blob: 53aa475cf35eee53edef0869ae71117cff4851f7 [file] [log] [blame]
Alok Chauhan3a2b4d92013-02-15 16:04:20 +05301/* Copyright (c) 2008-2013, The Linux Foundation. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13/*
14 * SPI driver for Qualcomm MSM platforms
15 *
16 */
Gilad Avidov002dba02013-05-21 18:06:32 -060017
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070018#include <linux/version.h>
19#include <linux/kernel.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070020#include <linux/module.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070021#include <linux/init.h>
22#include <linux/spinlock.h>
23#include <linux/list.h>
24#include <linux/irq.h>
25#include <linux/platform_device.h>
26#include <linux/spi/spi.h>
27#include <linux/interrupt.h>
28#include <linux/err.h>
29#include <linux/clk.h>
30#include <linux/delay.h>
31#include <linux/workqueue.h>
32#include <linux/io.h>
33#include <linux/debugfs.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070034#include <linux/gpio.h>
35#include <linux/remote_spinlock.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070036#include <linux/pm_qos.h>
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070037#include <linux/of.h>
Sathish Ambleycd06bf32012-04-09 11:59:43 -070038#include <linux/of_gpio.h>
Gilad Avidovd0262342012-10-24 16:52:30 -060039#include <linux/dma-mapping.h>
40#include <linux/sched.h>
41#include <linux/mutex.h>
42#include <linux/atomic.h>
Alok Chauhan7fd3add2013-03-12 18:34:43 +053043#include <linux/pm_runtime.h>
Gilad Avidovd0262342012-10-24 16:52:30 -060044#include <mach/msm_spi.h>
45#include <mach/sps.h>
46#include <mach/dma.h>
Gilad Avidov23350552013-05-21 09:26:46 -060047#include <mach/msm_bus.h>
48#include <mach/msm_bus_board.h>
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070049#include "spi_qsd.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070050
Alok Chauhan7fd3add2013-03-12 18:34:43 +053051static int msm_spi_pm_resume_runtime(struct device *device);
52static int msm_spi_pm_suspend_runtime(struct device *device);
53
54
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070055static inline int msm_spi_configure_gsbi(struct msm_spi *dd,
56 struct platform_device *pdev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070057{
58 struct resource *resource;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070059 unsigned long gsbi_mem_phys_addr;
60 size_t gsbi_mem_size;
61 void __iomem *gsbi_base;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070062
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070063 resource = platform_get_resource(pdev, IORESOURCE_MEM, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070064 if (!resource)
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070065 return 0;
66
67 gsbi_mem_phys_addr = resource->start;
68 gsbi_mem_size = resource_size(resource);
69 if (!devm_request_mem_region(&pdev->dev, gsbi_mem_phys_addr,
70 gsbi_mem_size, SPI_DRV_NAME))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070071 return -ENXIO;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070072
73 gsbi_base = devm_ioremap(&pdev->dev, gsbi_mem_phys_addr,
74 gsbi_mem_size);
75 if (!gsbi_base)
76 return -ENXIO;
77
78 /* Set GSBI to SPI mode */
79 writel_relaxed(GSBI_SPI_CONFIG, gsbi_base + GSBI_CTRL_REG);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070080
81 return 0;
82}
83
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070084static inline void msm_spi_register_init(struct msm_spi *dd)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070085{
Harini Jayaramanc710a5e2011-11-22 12:02:43 -070086 writel_relaxed(0x00000001, dd->base + SPI_SW_RESET);
87 msm_spi_set_state(dd, SPI_OP_STATE_RESET);
88 writel_relaxed(0x00000000, dd->base + SPI_OPERATIONAL);
89 writel_relaxed(0x00000000, dd->base + SPI_CONFIG);
90 writel_relaxed(0x00000000, dd->base + SPI_IO_MODES);
91 if (dd->qup_ver)
92 writel_relaxed(0x00000000, dd->base + QUP_OPERATIONAL_MASK);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070093}
94
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070095static inline int msm_spi_request_gpios(struct msm_spi *dd)
96{
97 int i;
98 int result = 0;
99
100 for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
101 if (dd->spi_gpios[i] >= 0) {
102 result = gpio_request(dd->spi_gpios[i], spi_rsrcs[i]);
103 if (result) {
Harini Jayaramane4c06192011-09-28 16:26:39 -0600104 dev_err(dd->dev, "%s: gpio_request for pin %d "
105 "failed with error %d\n", __func__,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700106 dd->spi_gpios[i], result);
107 goto error;
108 }
109 }
110 }
111 return 0;
112
113error:
114 for (; --i >= 0;) {
115 if (dd->spi_gpios[i] >= 0)
116 gpio_free(dd->spi_gpios[i]);
117 }
118 return result;
119}
120
121static inline void msm_spi_free_gpios(struct msm_spi *dd)
122{
123 int i;
124
125 for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
126 if (dd->spi_gpios[i] >= 0)
127 gpio_free(dd->spi_gpios[i]);
128 }
Harini Jayaramane4c06192011-09-28 16:26:39 -0600129
130 for (i = 0; i < ARRAY_SIZE(spi_cs_rsrcs); ++i) {
131 if (dd->cs_gpios[i].valid) {
132 gpio_free(dd->cs_gpios[i].gpio_num);
133 dd->cs_gpios[i].valid = 0;
134 }
135 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700136}
137
Gilad Avidovd2a8b562012-10-18 09:34:35 -0600138/**
139 * msm_spi_clk_max_rate: finds the nearest lower rate for a clk
140 * @clk the clock for which to find nearest lower rate
141 * @rate clock frequency in Hz
142 * @return nearest lower rate or negative error value
143 *
144 * Public clock API extends clk_round_rate which is a ceiling function. This
145 * function is a floor function implemented as a binary search using the
146 * ceiling function.
147 */
148static long msm_spi_clk_max_rate(struct clk *clk, unsigned long rate)
149{
150 long lowest_available, nearest_low, step_size, cur;
151 long step_direction = -1;
152 long guess = rate;
153 int max_steps = 10;
154
155 cur = clk_round_rate(clk, rate);
156 if (cur == rate)
157 return rate;
158
159 /* if we got here then: cur > rate */
160 lowest_available = clk_round_rate(clk, 0);
161 if (lowest_available > rate)
162 return -EINVAL;
163
164 step_size = (rate - lowest_available) >> 1;
165 nearest_low = lowest_available;
166
167 while (max_steps-- && step_size) {
168 guess += step_size * step_direction;
169
170 cur = clk_round_rate(clk, guess);
171
172 if ((cur < rate) && (cur > nearest_low))
173 nearest_low = cur;
174
175 /*
176 * if we stepped too far, then start stepping in the other
177 * direction with half the step size
178 */
179 if (((cur > rate) && (step_direction > 0))
180 || ((cur < rate) && (step_direction < 0))) {
181 step_direction = -step_direction;
182 step_size >>= 1;
183 }
184 }
185 return nearest_low;
186}
187
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700188static void msm_spi_clock_set(struct msm_spi *dd, int speed)
189{
Gilad Avidovd2a8b562012-10-18 09:34:35 -0600190 long rate;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700191 int rc;
192
Gilad Avidovd2a8b562012-10-18 09:34:35 -0600193 rate = msm_spi_clk_max_rate(dd->clk, speed);
194 if (rate < 0) {
195 dev_err(dd->dev,
196 "%s: no match found for requested clock frequency:%d",
197 __func__, speed);
198 return;
199 }
200
201 rc = clk_set_rate(dd->clk, rate);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700202 if (!rc)
Gilad Avidovd2a8b562012-10-18 09:34:35 -0600203 dd->clock_speed = rate;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700204}
205
Gilad Avidov23350552013-05-21 09:26:46 -0600206static void msm_spi_clk_path_vote(struct msm_spi *dd)
207{
208 if (dd->clk_path_vote.client_hdl)
209 msm_bus_scale_client_update_request(
210 dd->clk_path_vote.client_hdl,
211 MSM_SPI_CLK_PATH_RESUME_VEC);
212}
213
214static void msm_spi_clk_path_unvote(struct msm_spi *dd)
215{
216 if (dd->clk_path_vote.client_hdl)
217 msm_bus_scale_client_update_request(
218 dd->clk_path_vote.client_hdl,
219 MSM_SPI_CLK_PATH_SUSPEND_VEC);
220}
221
222static void msm_spi_clk_path_teardown(struct msm_spi *dd)
223{
224 if (dd->pdata->active_only)
225 msm_spi_clk_path_unvote(dd);
226
227 if (dd->clk_path_vote.client_hdl) {
228 msm_bus_scale_unregister_client(dd->clk_path_vote.client_hdl);
229 dd->clk_path_vote.client_hdl = 0;
230 }
231}
232
233/**
234 * msm_spi_clk_path_init_structs: internal impl detail of msm_spi_clk_path_init
235 *
236 * allocates and initilizes the bus scaling vectors.
237 */
238static int msm_spi_clk_path_init_structs(struct msm_spi *dd)
239{
240 struct msm_bus_vectors *paths = NULL;
241 struct msm_bus_paths *usecases = NULL;
242
243 dev_dbg(dd->dev, "initialises path clock voting structs");
244
245 paths = devm_kzalloc(dd->dev, sizeof(*paths) * 2, GFP_KERNEL);
246 if (!paths) {
247 dev_err(dd->dev,
248 "msm_bus_paths.paths memory allocation failed");
249 return -ENOMEM;
250 }
251
252 usecases = devm_kzalloc(dd->dev, sizeof(*usecases) * 2, GFP_KERNEL);
253 if (!usecases) {
254 dev_err(dd->dev,
255 "msm_bus_scale_pdata.usecases memory allocation failed");
256 goto path_init_err;
257 }
258
259 dd->clk_path_vote.pdata = devm_kzalloc(dd->dev,
260 sizeof(*dd->clk_path_vote.pdata),
261 GFP_KERNEL);
262 if (!dd->clk_path_vote.pdata) {
263 dev_err(dd->dev,
264 "msm_bus_scale_pdata memory allocation failed");
265 goto path_init_err;
266 }
267
268 paths[MSM_SPI_CLK_PATH_SUSPEND_VEC] = (struct msm_bus_vectors) {
269 .src = dd->pdata->master_id,
270 .dst = MSM_BUS_SLAVE_EBI_CH0,
271 .ab = 0,
272 .ib = 0,
273 };
274
275 paths[MSM_SPI_CLK_PATH_RESUME_VEC] = (struct msm_bus_vectors) {
276 .src = dd->pdata->master_id,
277 .dst = MSM_BUS_SLAVE_EBI_CH0,
278 .ab = MSM_SPI_CLK_PATH_AVRG_BW(dd),
279 .ib = MSM_SPI_CLK_PATH_BRST_BW(dd),
280 };
281
282 usecases[MSM_SPI_CLK_PATH_SUSPEND_VEC] = (struct msm_bus_paths) {
283 .num_paths = 1,
284 .vectors = &paths[MSM_SPI_CLK_PATH_SUSPEND_VEC],
285 };
286
287 usecases[MSM_SPI_CLK_PATH_RESUME_VEC] = (struct msm_bus_paths) {
288 .num_paths = 1,
289 .vectors = &paths[MSM_SPI_CLK_PATH_RESUME_VEC],
290 };
291
292 *dd->clk_path_vote.pdata = (struct msm_bus_scale_pdata) {
293 .active_only = dd->pdata->active_only,
294 .name = dev_name(dd->dev),
295 .num_usecases = 2,
296 .usecase = usecases,
297 };
298
299 return 0;
300
301path_init_err:
302 devm_kfree(dd->dev, paths);
303 devm_kfree(dd->dev, usecases);
304 devm_kfree(dd->dev, dd->clk_path_vote.pdata);
305 dd->clk_path_vote.pdata = NULL;
306 return -ENOMEM;
307}
308
309/**
310 * msm_spi_clk_path_postponed_register: reg with bus-scaling after it is probed
311 *
312 * @return zero on success
313 *
314 * Workaround: SPI driver may be probed before the bus scaling driver. Calling
315 * msm_bus_scale_register_client() will fail if the bus scaling driver is not
316 * ready yet. Thus, this function should be called not from probe but from a
317 * later context. Also, this function may be called more then once before
318 * register succeed. At this case only one error message will be logged. At boot
319 * time all clocks are on, so earlier SPI transactions should succeed.
320 */
321static int msm_spi_clk_path_postponed_register(struct msm_spi *dd)
322{
323 dd->clk_path_vote.client_hdl = msm_bus_scale_register_client(
324 dd->clk_path_vote.pdata);
325
326 if (dd->clk_path_vote.client_hdl) {
327 if (dd->clk_path_vote.reg_err) {
328 /* log a success message if an error msg was logged */
329 dd->clk_path_vote.reg_err = false;
330 dev_info(dd->dev,
331 "msm_bus_scale_register_client(mstr-id:%d "
332 "actv-only:%d):0x%x",
333 dd->pdata->master_id, dd->pdata->active_only,
334 dd->clk_path_vote.client_hdl);
335 }
336
337 if (dd->pdata->active_only)
338 msm_spi_clk_path_vote(dd);
339 } else {
340 /* guard to log only one error on multiple failure */
341 if (!dd->clk_path_vote.reg_err) {
342 dd->clk_path_vote.reg_err = true;
343
344 dev_info(dd->dev,
345 "msm_bus_scale_register_client(mstr-id:%d "
346 "actv-only:%d):0",
347 dd->pdata->master_id, dd->pdata->active_only);
348 }
349 }
350
351 return dd->clk_path_vote.client_hdl ? 0 : -EAGAIN;
352}
353
354static void msm_spi_clk_path_init(struct msm_spi *dd)
355{
356 /*
357 * bail out if path voting is diabled (master_id == 0) or if it is
358 * already registered (client_hdl != 0)
359 */
360 if (!dd->pdata->master_id || dd->clk_path_vote.client_hdl)
361 return;
362
363 /* if fail once then try no more */
364 if (!dd->clk_path_vote.pdata && msm_spi_clk_path_init_structs(dd)) {
365 dd->pdata->master_id = 0;
366 return;
367 };
368
369 /* on failure try again later */
370 if (msm_spi_clk_path_postponed_register(dd))
371 return;
372
373 if (dd->pdata->active_only)
374 msm_spi_clk_path_vote(dd);
375}
376
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700377static int msm_spi_calculate_size(int *fifo_size,
378 int *block_size,
379 int block,
380 int mult)
381{
382 int words;
383
384 switch (block) {
385 case 0:
386 words = 1; /* 4 bytes */
387 break;
388 case 1:
389 words = 4; /* 16 bytes */
390 break;
391 case 2:
392 words = 8; /* 32 bytes */
393 break;
394 default:
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700395 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700396 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700397
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700398 switch (mult) {
399 case 0:
400 *fifo_size = words * 2;
401 break;
402 case 1:
403 *fifo_size = words * 4;
404 break;
405 case 2:
406 *fifo_size = words * 8;
407 break;
408 case 3:
409 *fifo_size = words * 16;
410 break;
411 default:
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700412 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700413 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700414
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700415 *block_size = words * sizeof(u32); /* in bytes */
416 return 0;
417}
418
419static void get_next_transfer(struct msm_spi *dd)
420{
421 struct spi_transfer *t = dd->cur_transfer;
422
423 if (t->transfer_list.next != &dd->cur_msg->transfers) {
424 dd->cur_transfer = list_entry(t->transfer_list.next,
425 struct spi_transfer,
426 transfer_list);
427 dd->write_buf = dd->cur_transfer->tx_buf;
428 dd->read_buf = dd->cur_transfer->rx_buf;
429 }
430}
431
432static void __init msm_spi_calculate_fifo_size(struct msm_spi *dd)
433{
434 u32 spi_iom;
435 int block;
436 int mult;
437
438 spi_iom = readl_relaxed(dd->base + SPI_IO_MODES);
439
440 block = (spi_iom & SPI_IO_M_INPUT_BLOCK_SIZE) >> INPUT_BLOCK_SZ_SHIFT;
441 mult = (spi_iom & SPI_IO_M_INPUT_FIFO_SIZE) >> INPUT_FIFO_SZ_SHIFT;
442 if (msm_spi_calculate_size(&dd->input_fifo_size, &dd->input_block_size,
443 block, mult)) {
444 goto fifo_size_err;
445 }
446
447 block = (spi_iom & SPI_IO_M_OUTPUT_BLOCK_SIZE) >> OUTPUT_BLOCK_SZ_SHIFT;
448 mult = (spi_iom & SPI_IO_M_OUTPUT_FIFO_SIZE) >> OUTPUT_FIFO_SZ_SHIFT;
449 if (msm_spi_calculate_size(&dd->output_fifo_size,
450 &dd->output_block_size, block, mult)) {
451 goto fifo_size_err;
452 }
Gilad Avidovd0262342012-10-24 16:52:30 -0600453 if (dd->qup_ver == SPI_QUP_VERSION_NONE) {
454 /* DM mode is not available for this block size */
455 if (dd->input_block_size == 4 || dd->output_block_size == 4)
456 dd->use_dma = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700457
Alok Chauhan3a2b4d92013-02-15 16:04:20 +0530458 if (dd->use_dma) {
459 dd->input_burst_size = max(dd->input_block_size,
460 DM_BURST_SIZE);
461 dd->output_burst_size = max(dd->output_block_size,
462 DM_BURST_SIZE);
463 }
Gilad Avidovd0262342012-10-24 16:52:30 -0600464 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700465
466 return;
467
468fifo_size_err:
469 dd->use_dma = 0;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700470 pr_err("%s: invalid FIFO size, SPI_IO_MODES=0x%x\n", __func__, spi_iom);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700471 return;
472}
473
474static void msm_spi_read_word_from_fifo(struct msm_spi *dd)
475{
476 u32 data_in;
477 int i;
478 int shift;
479
480 data_in = readl_relaxed(dd->base + SPI_INPUT_FIFO);
481 if (dd->read_buf) {
482 for (i = 0; (i < dd->bytes_per_word) &&
483 dd->rx_bytes_remaining; i++) {
484 /* The data format depends on bytes_per_word:
485 4 bytes: 0x12345678
486 3 bytes: 0x00123456
487 2 bytes: 0x00001234
488 1 byte : 0x00000012
489 */
490 shift = 8 * (dd->bytes_per_word - i - 1);
491 *dd->read_buf++ = (data_in & (0xFF << shift)) >> shift;
492 dd->rx_bytes_remaining--;
493 }
494 } else {
495 if (dd->rx_bytes_remaining >= dd->bytes_per_word)
496 dd->rx_bytes_remaining -= dd->bytes_per_word;
497 else
498 dd->rx_bytes_remaining = 0;
499 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700500
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700501 dd->read_xfr_cnt++;
502 if (dd->multi_xfr) {
503 if (!dd->rx_bytes_remaining)
504 dd->read_xfr_cnt = 0;
505 else if ((dd->read_xfr_cnt * dd->bytes_per_word) ==
506 dd->read_len) {
507 struct spi_transfer *t = dd->cur_rx_transfer;
508 if (t->transfer_list.next != &dd->cur_msg->transfers) {
509 t = list_entry(t->transfer_list.next,
510 struct spi_transfer,
511 transfer_list);
512 dd->read_buf = t->rx_buf;
513 dd->read_len = t->len;
514 dd->read_xfr_cnt = 0;
515 dd->cur_rx_transfer = t;
516 }
517 }
518 }
519}
520
521static inline bool msm_spi_is_valid_state(struct msm_spi *dd)
522{
523 u32 spi_op = readl_relaxed(dd->base + SPI_STATE);
524
525 return spi_op & SPI_OP_STATE_VALID;
526}
527
Sagar Dharia525593d2012-11-02 18:26:01 -0600528static inline void msm_spi_udelay(unsigned long delay_usecs)
529{
530 /*
531 * For smaller values of delay, context switch time
532 * would negate the usage of usleep
533 */
534 if (delay_usecs > 20)
535 usleep_range(delay_usecs, delay_usecs);
536 else if (delay_usecs)
537 udelay(delay_usecs);
538}
539
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700540static inline int msm_spi_wait_valid(struct msm_spi *dd)
541{
542 unsigned long delay = 0;
543 unsigned long timeout = 0;
544
545 if (dd->clock_speed == 0)
546 return -EINVAL;
547 /*
548 * Based on the SPI clock speed, sufficient time
549 * should be given for the SPI state transition
550 * to occur
551 */
552 delay = (10 * USEC_PER_SEC) / dd->clock_speed;
553 /*
554 * For small delay values, the default timeout would
555 * be one jiffy
556 */
557 if (delay < SPI_DELAY_THRESHOLD)
558 delay = SPI_DELAY_THRESHOLD;
Harini Jayaramanbcef7732011-09-01 12:12:58 -0600559
560 /* Adding one to round off to the nearest jiffy */
561 timeout = jiffies + msecs_to_jiffies(delay * SPI_DEFAULT_TIMEOUT) + 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700562 while (!msm_spi_is_valid_state(dd)) {
563 if (time_after(jiffies, timeout)) {
Harini Jayaramanbcef7732011-09-01 12:12:58 -0600564 if (!msm_spi_is_valid_state(dd)) {
565 if (dd->cur_msg)
566 dd->cur_msg->status = -EIO;
567 dev_err(dd->dev, "%s: SPI operational state"
568 "not valid\n", __func__);
569 return -ETIMEDOUT;
570 } else
571 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700572 }
Sagar Dharia525593d2012-11-02 18:26:01 -0600573 msm_spi_udelay(delay);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700574 }
575 return 0;
576}
577
578static inline int msm_spi_set_state(struct msm_spi *dd,
579 enum msm_spi_state state)
580{
581 enum msm_spi_state cur_state;
582 if (msm_spi_wait_valid(dd))
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700583 return -EIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700584 cur_state = readl_relaxed(dd->base + SPI_STATE);
585 /* Per spec:
586 For PAUSE_STATE to RESET_STATE, two writes of (10) are required */
587 if (((cur_state & SPI_OP_STATE) == SPI_OP_STATE_PAUSE) &&
588 (state == SPI_OP_STATE_RESET)) {
589 writel_relaxed(SPI_OP_STATE_CLEAR_BITS, dd->base + SPI_STATE);
590 writel_relaxed(SPI_OP_STATE_CLEAR_BITS, dd->base + SPI_STATE);
591 } else {
592 writel_relaxed((cur_state & ~SPI_OP_STATE) | state,
593 dd->base + SPI_STATE);
594 }
595 if (msm_spi_wait_valid(dd))
Harini Jayaramanc710a5e2011-11-22 12:02:43 -0700596 return -EIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700597
598 return 0;
599}
600
Gilad Avidovd0262342012-10-24 16:52:30 -0600601/**
602 * msm_spi_set_bpw_and_no_io_flags: configure N, and no-input/no-output flags
603 */
604static inline void
605msm_spi_set_bpw_and_no_io_flags(struct msm_spi *dd, u32 *config, int n)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700606{
607 *config &= ~(SPI_NO_INPUT|SPI_NO_OUTPUT);
608
609 if (n != (*config & SPI_CFG_N))
610 *config = (*config & ~SPI_CFG_N) | n;
611
Gilad Avidovd0262342012-10-24 16:52:30 -0600612 if (((dd->mode == SPI_DMOV_MODE) && (!dd->read_len))
613 || (dd->mode == SPI_BAM_MODE)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700614 if (dd->read_buf == NULL)
615 *config |= SPI_NO_INPUT;
616 if (dd->write_buf == NULL)
617 *config |= SPI_NO_OUTPUT;
618 }
619}
620
Gilad Avidovd0262342012-10-24 16:52:30 -0600621/**
622 * msm_spi_calc_spi_config_loopback_and_input_first: Calculate the values that
623 * should be updated into SPI_CONFIG's LOOPBACK and INPUT_FIRST flags
624 * @return calculatd value for SPI_CONFIG
625 */
626static u32
627msm_spi_calc_spi_config_loopback_and_input_first(u32 spi_config, u8 mode)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700628{
Gilad Avidovd0262342012-10-24 16:52:30 -0600629 if (mode & SPI_LOOP)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700630 spi_config |= SPI_CFG_LOOPBACK;
631 else
632 spi_config &= ~SPI_CFG_LOOPBACK;
Gilad Avidovd0262342012-10-24 16:52:30 -0600633
634 if (mode & SPI_CPHA)
635 spi_config &= ~SPI_CFG_INPUT_FIRST;
636 else
637 spi_config |= SPI_CFG_INPUT_FIRST;
638
639 return spi_config;
640}
641
642/**
643 * msm_spi_set_spi_config: prepares register SPI_CONFIG to process the
644 * next transfer
645 */
646static void msm_spi_set_spi_config(struct msm_spi *dd, int bpw)
647{
648 u32 spi_config = readl_relaxed(dd->base + SPI_CONFIG);
649 spi_config = msm_spi_calc_spi_config_loopback_and_input_first(
650 spi_config, dd->cur_msg->spi->mode);
651
652 if (dd->qup_ver == SPI_QUP_VERSION_NONE)
653 /* flags removed from SPI_CONFIG in QUP version-2 */
654 msm_spi_set_bpw_and_no_io_flags(dd, &spi_config, bpw-1);
655 else if (dd->mode == SPI_BAM_MODE)
656 spi_config |= SPI_CFG_INPUT_FIRST;
657
Gilad Avidov91c2ab4c2013-03-12 11:01:22 -0600658 /*
659 * HS_MODE improves signal stability for spi-clk high rates
660 * but is invalid in LOOPBACK mode.
661 */
662 if ((dd->clock_speed >= SPI_HS_MIN_RATE) &&
663 !(dd->cur_msg->spi->mode & SPI_LOOP))
664 spi_config |= SPI_CFG_HS_MODE;
665 else
666 spi_config &= ~SPI_CFG_HS_MODE;
667
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700668 writel_relaxed(spi_config, dd->base + SPI_CONFIG);
Gilad Avidovd0262342012-10-24 16:52:30 -0600669}
670
671/**
672 * msm_spi_set_mx_counts: set SPI_MX_INPUT_COUNT and SPI_MX_INPUT_COUNT
673 * for FIFO-mode. set SPI_MX_INPUT_COUNT and SPI_MX_OUTPUT_COUNT for
674 * BAM and DMOV modes.
675 * @n_words The number of reads/writes of size N.
676 */
677static void msm_spi_set_mx_counts(struct msm_spi *dd, u32 n_words)
678{
679 /*
680 * n_words cannot exceed fifo_size, and only one READ COUNT
681 * interrupt is generated per transaction, so for transactions
682 * larger than fifo size READ COUNT must be disabled.
683 * For those transactions we usually move to Data Mover mode.
684 */
685 if (dd->mode == SPI_FIFO_MODE) {
686 if (n_words <= dd->input_fifo_size) {
687 writel_relaxed(n_words,
688 dd->base + SPI_MX_READ_COUNT);
689 msm_spi_set_write_count(dd, n_words);
690 } else {
691 writel_relaxed(0, dd->base + SPI_MX_READ_COUNT);
692 msm_spi_set_write_count(dd, 0);
693 }
694 if (dd->qup_ver == SPI_QUP_VERSION_BFAM) {
695 /* must be zero for FIFO */
696 writel_relaxed(0, dd->base + SPI_MX_INPUT_COUNT);
697 writel_relaxed(0, dd->base + SPI_MX_OUTPUT_COUNT);
698 }
699 } else {
700 /* must be zero for BAM and DMOV */
701 writel_relaxed(0, dd->base + SPI_MX_READ_COUNT);
702 msm_spi_set_write_count(dd, 0);
703
704 /*
705 * for DMA transfers, both QUP_MX_INPUT_COUNT and
706 * QUP_MX_OUTPUT_COUNT must be zero to all cases but one.
707 * That case is a non-balanced transfer when there is
708 * only a read_buf.
709 */
710 if (dd->qup_ver == SPI_QUP_VERSION_BFAM) {
711 if (dd->write_buf)
712 writel_relaxed(0,
713 dd->base + SPI_MX_INPUT_COUNT);
714 else
715 writel_relaxed(n_words,
716 dd->base + SPI_MX_INPUT_COUNT);
717
718 writel_relaxed(0, dd->base + SPI_MX_OUTPUT_COUNT);
719 }
720 }
721}
722
723/**
724 * msm_spi_bam_begin_transfer: transfer dd->tx_bytes_remaining bytes
725 * using BAM.
726 * @brief BAM can transfer SPI_MAX_TRFR_BTWN_RESETS byte at a single
727 * transfer. Between transfer QUP must change to reset state. A loop is
728 * issuing a single BAM transfer at a time. If another tsranfer is
729 * required, it waits for the trasfer to finish, then moving to reset
730 * state, and back to run state to issue the next transfer.
731 * The function dose not wait for the last transfer to end, or if only
732 * a single transfer is required, the function dose not wait for it to
733 * end.
734 * @timeout max time in jiffies to wait for a transfer to finish.
735 * @return zero on success
736 */
737static int
738msm_spi_bam_begin_transfer(struct msm_spi *dd, u32 timeout, u8 bpw)
739{
740 u32 bytes_to_send, bytes_sent, n_words_xfr, cons_flags, prod_flags;
741 int ret;
742 /*
743 * QUP must move to reset mode every 64K-1 bytes of transfer
744 * (counter is 16 bit)
745 */
746 if (dd->tx_bytes_remaining > SPI_MAX_TRFR_BTWN_RESETS) {
747 /* assert chip select unconditionally */
748 u32 spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
749 if (!(spi_ioc & SPI_IO_C_FORCE_CS))
750 writel_relaxed(spi_ioc | SPI_IO_C_FORCE_CS,
751 dd->base + SPI_IO_CONTROL);
752 }
753
754 /* Following flags are required since we are waiting on all transfers */
755 cons_flags = SPS_IOVEC_FLAG_EOT | SPS_IOVEC_FLAG_NWD;
756 /*
757 * on a balanced transaction, BAM will set the flags on the producer
758 * pipe based on the flags set on the consumer pipe
759 */
760 prod_flags = (dd->write_buf) ? 0 : cons_flags;
761
762 while (dd->tx_bytes_remaining > 0) {
763 bytes_sent = dd->cur_transfer->len - dd->tx_bytes_remaining;
764 bytes_to_send = min_t(u32, dd->tx_bytes_remaining
765 , SPI_MAX_TRFR_BTWN_RESETS);
766 n_words_xfr = DIV_ROUND_UP(bytes_to_send
767 , dd->bytes_per_word);
768
769 msm_spi_set_mx_counts(dd, n_words_xfr);
770
771 ret = msm_spi_set_state(dd, SPI_OP_STATE_RUN);
772 if (ret < 0) {
773 dev_err(dd->dev,
774 "%s: Failed to set QUP state to run",
775 __func__);
776 goto xfr_err;
777 }
778
779 /* enqueue read buffer in BAM */
780 if (dd->read_buf) {
781 ret = sps_transfer_one(dd->bam.prod.handle,
782 dd->cur_transfer->rx_dma + bytes_sent,
783 bytes_to_send, dd, prod_flags);
784 if (ret < 0) {
785 dev_err(dd->dev,
786 "%s: Failed to queue producer BAM transfer",
787 __func__);
788 goto xfr_err;
789 }
790 }
791
792 /* enqueue write buffer in BAM */
793 if (dd->write_buf) {
794 ret = sps_transfer_one(dd->bam.cons.handle,
795 dd->cur_transfer->tx_dma + bytes_sent,
796 bytes_to_send, dd, cons_flags);
797 if (ret < 0) {
798 dev_err(dd->dev,
799 "%s: Failed to queue consumer BAM transfer",
800 __func__);
801 goto xfr_err;
802 }
803 }
804
805 dd->tx_bytes_remaining -= bytes_to_send;
806
807 /* move to reset state after SPI_MAX_TRFR_BTWN_RESETS */
808 if (dd->tx_bytes_remaining > 0) {
809 if (!wait_for_completion_timeout(
810 &dd->transfer_complete, timeout)) {
811 dev_err(dd->dev,
812 "%s: SPI transaction timeout",
813 __func__);
814 dd->cur_msg->status = -EIO;
815 ret = -EIO;
816 goto xfr_err;
817 }
818 ret = msm_spi_set_state(dd, SPI_OP_STATE_RESET);
819 if (ret < 0) {
820 dev_err(dd->dev,
821 "%s: Failed to set QUP state to reset",
822 __func__);
823 goto xfr_err;
824 }
825 init_completion(&dd->transfer_complete);
826 }
827 }
828 return 0;
829
830xfr_err:
831 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700832}
833
834static void msm_spi_setup_dm_transfer(struct msm_spi *dd)
835{
836 dmov_box *box;
Alok Chauhan3a2b4d92013-02-15 16:04:20 +0530837 int bytes_to_send, bytes_sent;
838 int tx_num_rows, rx_num_rows;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700839 u32 num_transfers;
840
841 atomic_set(&dd->rx_irq_called, 0);
Kiran Gunda54eb06e2012-05-18 15:17:06 +0530842 atomic_set(&dd->tx_irq_called, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700843 if (dd->write_len && !dd->read_len) {
844 /* WR-WR transfer */
845 bytes_sent = dd->cur_msg_len - dd->tx_bytes_remaining;
846 dd->write_buf = dd->temp_buf;
847 } else {
848 bytes_sent = dd->cur_transfer->len - dd->tx_bytes_remaining;
849 /* For WR-RD transfer, bytes_sent can be negative */
850 if (bytes_sent < 0)
851 bytes_sent = 0;
852 }
Kiran Gundae8f16742012-06-27 10:06:32 +0530853 /* We'll send in chunks of SPI_MAX_LEN if larger than
Kiran Gundac5fbd7f2012-07-30 13:22:39 +0530854 * 4K bytes for targets that have only 12 bits in
855 * QUP_MAX_OUTPUT_CNT register. If the target supports
856 * more than 12bits then we send the data in chunks of
857 * the infinite_mode value that is defined in the
858 * corresponding board file.
Kiran Gundae8f16742012-06-27 10:06:32 +0530859 */
860 if (!dd->pdata->infinite_mode)
Kiran Gundac5fbd7f2012-07-30 13:22:39 +0530861 dd->max_trfr_len = SPI_MAX_LEN;
Kiran Gundae8f16742012-06-27 10:06:32 +0530862 else
Kiran Gundac5fbd7f2012-07-30 13:22:39 +0530863 dd->max_trfr_len = (dd->pdata->infinite_mode) *
864 (dd->bytes_per_word);
865
866 bytes_to_send = min_t(u32, dd->tx_bytes_remaining,
867 dd->max_trfr_len);
Kiran Gundae8f16742012-06-27 10:06:32 +0530868
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700869 num_transfers = DIV_ROUND_UP(bytes_to_send, dd->bytes_per_word);
Alok Chauhan3a2b4d92013-02-15 16:04:20 +0530870 dd->tx_unaligned_len = bytes_to_send % dd->output_burst_size;
871 dd->rx_unaligned_len = bytes_to_send % dd->input_burst_size;
872 tx_num_rows = bytes_to_send / dd->output_burst_size;
873 rx_num_rows = bytes_to_send / dd->input_burst_size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700874
875 dd->mode = SPI_DMOV_MODE;
876
Alok Chauhan3a2b4d92013-02-15 16:04:20 +0530877 if (tx_num_rows) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700878 /* src in 16 MSB, dst in 16 LSB */
879 box = &dd->tx_dmov_cmd->box;
880 box->src_row_addr = dd->cur_transfer->tx_dma + bytes_sent;
Alok Chauhan3a2b4d92013-02-15 16:04:20 +0530881 box->src_dst_len
882 = (dd->output_burst_size << 16) | dd->output_burst_size;
883 box->num_rows = (tx_num_rows << 16) | tx_num_rows;
884 box->row_offset = (dd->output_burst_size << 16) | 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700885
Alok Chauhan3a2b4d92013-02-15 16:04:20 +0530886 dd->tx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
887 DMOV_CMD_ADDR(dd->tx_dmov_cmd_dma +
888 offsetof(struct spi_dmov_cmd, box));
889 } else {
890 dd->tx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
891 DMOV_CMD_ADDR(dd->tx_dmov_cmd_dma +
892 offsetof(struct spi_dmov_cmd, single_pad));
893 }
894
895 if (rx_num_rows) {
896 /* src in 16 MSB, dst in 16 LSB */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700897 box = &dd->rx_dmov_cmd->box;
898 box->dst_row_addr = dd->cur_transfer->rx_dma + bytes_sent;
Alok Chauhan3a2b4d92013-02-15 16:04:20 +0530899 box->src_dst_len
900 = (dd->input_burst_size << 16) | dd->input_burst_size;
901 box->num_rows = (rx_num_rows << 16) | rx_num_rows;
902 box->row_offset = (0 << 16) | dd->input_burst_size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700903
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700904 dd->rx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
905 DMOV_CMD_ADDR(dd->rx_dmov_cmd_dma +
906 offsetof(struct spi_dmov_cmd, box));
907 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700908 dd->rx_dmov_cmd->cmd_ptr = CMD_PTR_LP |
909 DMOV_CMD_ADDR(dd->rx_dmov_cmd_dma +
910 offsetof(struct spi_dmov_cmd, single_pad));
911 }
912
Alok Chauhan3a2b4d92013-02-15 16:04:20 +0530913 if (!dd->tx_unaligned_len) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700914 dd->tx_dmov_cmd->box.cmd |= CMD_LC;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700915 } else {
916 dmov_s *tx_cmd = &(dd->tx_dmov_cmd->single_pad);
Alok Chauhan3a2b4d92013-02-15 16:04:20 +0530917 u32 tx_offset = dd->cur_transfer->len - dd->tx_unaligned_len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700918
919 if ((dd->multi_xfr) && (dd->read_len <= 0))
Alok Chauhan3a2b4d92013-02-15 16:04:20 +0530920 tx_offset = dd->cur_msg_len - dd->tx_unaligned_len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700921
922 dd->tx_dmov_cmd->box.cmd &= ~CMD_LC;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700923
Alok Chauhan3a2b4d92013-02-15 16:04:20 +0530924 memset(dd->tx_padding, 0, dd->output_burst_size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700925 if (dd->write_buf)
Alok Chauhan3a2b4d92013-02-15 16:04:20 +0530926 memcpy(dd->tx_padding, dd->write_buf + tx_offset,
927 dd->tx_unaligned_len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700928
929 tx_cmd->src = dd->tx_padding_dma;
Alok Chauhan3a2b4d92013-02-15 16:04:20 +0530930 tx_cmd->len = dd->output_burst_size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700931 }
Alok Chauhan3a2b4d92013-02-15 16:04:20 +0530932
933 if (!dd->rx_unaligned_len) {
934 dd->rx_dmov_cmd->box.cmd |= CMD_LC;
935 } else {
936 dmov_s *rx_cmd = &(dd->rx_dmov_cmd->single_pad);
937 dd->rx_dmov_cmd->box.cmd &= ~CMD_LC;
938
939 memset(dd->rx_padding, 0, dd->input_burst_size);
940 rx_cmd->dst = dd->rx_padding_dma;
941 rx_cmd->len = dd->input_burst_size;
942 }
943
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700944 /* This also takes care of the padding dummy buf
945 Since this is set to the correct length, the
946 dummy bytes won't be actually sent */
947 if (dd->multi_xfr) {
948 u32 write_transfers = 0;
949 u32 read_transfers = 0;
950
951 if (dd->write_len > 0) {
952 write_transfers = DIV_ROUND_UP(dd->write_len,
953 dd->bytes_per_word);
954 writel_relaxed(write_transfers,
955 dd->base + SPI_MX_OUTPUT_COUNT);
956 }
957 if (dd->read_len > 0) {
958 /*
959 * The read following a write transfer must take
960 * into account, that the bytes pertaining to
961 * the write transfer needs to be discarded,
962 * before the actual read begins.
963 */
964 read_transfers = DIV_ROUND_UP(dd->read_len +
965 dd->write_len,
966 dd->bytes_per_word);
967 writel_relaxed(read_transfers,
968 dd->base + SPI_MX_INPUT_COUNT);
969 }
970 } else {
971 if (dd->write_buf)
972 writel_relaxed(num_transfers,
973 dd->base + SPI_MX_OUTPUT_COUNT);
974 if (dd->read_buf)
975 writel_relaxed(num_transfers,
976 dd->base + SPI_MX_INPUT_COUNT);
977 }
978}
979
980static void msm_spi_enqueue_dm_commands(struct msm_spi *dd)
981{
982 dma_coherent_pre_ops();
983 if (dd->write_buf)
984 msm_dmov_enqueue_cmd(dd->tx_dma_chan, &dd->tx_hdr);
985 if (dd->read_buf)
986 msm_dmov_enqueue_cmd(dd->rx_dma_chan, &dd->rx_hdr);
987}
988
Kiran Gundac5fbd7f2012-07-30 13:22:39 +0530989/* SPI core on targets that does not support infinite mode can send
990 maximum of 4K transfers or 64K transfers depending up on size of
991 MAX_OUTPUT_COUNT register, Therefore, we are sending in several
992 chunks. Upon completion we send the next chunk, or complete the
993 transfer if everything is finished. On targets that support
Kiran Gundae8f16742012-06-27 10:06:32 +0530994 infinite mode, we send all the bytes in as single chunk.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700995*/
996static int msm_spi_dm_send_next(struct msm_spi *dd)
997{
998 /* By now we should have sent all the bytes in FIFO mode,
999 * However to make things right, we'll check anyway.
1000 */
1001 if (dd->mode != SPI_DMOV_MODE)
1002 return 0;
1003
Kiran Gundae8f16742012-06-27 10:06:32 +05301004 /* On targets which does not support infinite mode,
1005 We need to send more chunks, if we sent max last time */
Kiran Gundac5fbd7f2012-07-30 13:22:39 +05301006 if (dd->tx_bytes_remaining > dd->max_trfr_len) {
1007 dd->tx_bytes_remaining -= dd->max_trfr_len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001008 if (msm_spi_set_state(dd, SPI_OP_STATE_RESET))
1009 return 0;
1010 dd->read_len = dd->write_len = 0;
1011 msm_spi_setup_dm_transfer(dd);
1012 msm_spi_enqueue_dm_commands(dd);
1013 if (msm_spi_set_state(dd, SPI_OP_STATE_RUN))
1014 return 0;
1015 return 1;
1016 } else if (dd->read_len && dd->write_len) {
1017 dd->tx_bytes_remaining -= dd->cur_transfer->len;
1018 if (list_is_last(&dd->cur_transfer->transfer_list,
1019 &dd->cur_msg->transfers))
1020 return 0;
1021 get_next_transfer(dd);
1022 if (msm_spi_set_state(dd, SPI_OP_STATE_PAUSE))
1023 return 0;
1024 dd->tx_bytes_remaining = dd->read_len + dd->write_len;
1025 dd->read_buf = dd->temp_buf;
1026 dd->read_len = dd->write_len = -1;
1027 msm_spi_setup_dm_transfer(dd);
1028 msm_spi_enqueue_dm_commands(dd);
1029 if (msm_spi_set_state(dd, SPI_OP_STATE_RUN))
1030 return 0;
1031 return 1;
1032 }
1033 return 0;
1034}
1035
1036static inline void msm_spi_ack_transfer(struct msm_spi *dd)
1037{
1038 writel_relaxed(SPI_OP_MAX_INPUT_DONE_FLAG |
1039 SPI_OP_MAX_OUTPUT_DONE_FLAG,
1040 dd->base + SPI_OPERATIONAL);
1041 /* Ensure done flag was cleared before proceeding further */
1042 mb();
1043}
1044
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001045/* Figure which irq occured and call the relevant functions */
1046static inline irqreturn_t msm_spi_qup_irq(int irq, void *dev_id)
1047{
1048 u32 op, ret = IRQ_NONE;
1049 struct msm_spi *dd = dev_id;
1050
Alok Chauhan7fd3add2013-03-12 18:34:43 +05301051 if (pm_runtime_suspended(dd->dev)) {
1052 dev_warn(dd->dev, "QUP: pm runtime suspend, irq:%d\n", irq);
1053 return ret;
1054 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001055 if (readl_relaxed(dd->base + SPI_ERROR_FLAGS) ||
1056 readl_relaxed(dd->base + QUP_ERROR_FLAGS)) {
1057 struct spi_master *master = dev_get_drvdata(dd->dev);
1058 ret |= msm_spi_error_irq(irq, master);
1059 }
1060
1061 op = readl_relaxed(dd->base + SPI_OPERATIONAL);
1062 if (op & SPI_OP_INPUT_SERVICE_FLAG) {
1063 writel_relaxed(SPI_OP_INPUT_SERVICE_FLAG,
1064 dd->base + SPI_OPERATIONAL);
1065 /*
1066 * Ensure service flag was cleared before further
1067 * processing of interrupt.
1068 */
1069 mb();
1070 ret |= msm_spi_input_irq(irq, dev_id);
1071 }
1072
1073 if (op & SPI_OP_OUTPUT_SERVICE_FLAG) {
1074 writel_relaxed(SPI_OP_OUTPUT_SERVICE_FLAG,
1075 dd->base + SPI_OPERATIONAL);
1076 /*
1077 * Ensure service flag was cleared before further
1078 * processing of interrupt.
1079 */
1080 mb();
1081 ret |= msm_spi_output_irq(irq, dev_id);
1082 }
1083
1084 if (dd->done) {
1085 complete(&dd->transfer_complete);
1086 dd->done = 0;
1087 }
1088 return ret;
1089}
1090
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001091static irqreturn_t msm_spi_input_irq(int irq, void *dev_id)
1092{
1093 struct msm_spi *dd = dev_id;
1094
1095 dd->stat_rx++;
1096
1097 if (dd->mode == SPI_MODE_NONE)
1098 return IRQ_HANDLED;
1099
1100 if (dd->mode == SPI_DMOV_MODE) {
1101 u32 op = readl_relaxed(dd->base + SPI_OPERATIONAL);
1102 if ((!dd->read_buf || op & SPI_OP_MAX_INPUT_DONE_FLAG) &&
1103 (!dd->write_buf || op & SPI_OP_MAX_OUTPUT_DONE_FLAG)) {
1104 msm_spi_ack_transfer(dd);
Alok Chauhan3a2b4d92013-02-15 16:04:20 +05301105 if (dd->rx_unaligned_len == 0) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001106 if (atomic_inc_return(&dd->rx_irq_called) == 1)
1107 return IRQ_HANDLED;
1108 }
1109 msm_spi_complete(dd);
1110 return IRQ_HANDLED;
1111 }
1112 return IRQ_NONE;
1113 }
1114
1115 if (dd->mode == SPI_FIFO_MODE) {
1116 while ((readl_relaxed(dd->base + SPI_OPERATIONAL) &
1117 SPI_OP_IP_FIFO_NOT_EMPTY) &&
1118 (dd->rx_bytes_remaining > 0)) {
1119 msm_spi_read_word_from_fifo(dd);
1120 }
1121 if (dd->rx_bytes_remaining == 0)
1122 msm_spi_complete(dd);
1123 }
1124
1125 return IRQ_HANDLED;
1126}
1127
1128static void msm_spi_write_word_to_fifo(struct msm_spi *dd)
1129{
1130 u32 word;
1131 u8 byte;
1132 int i;
1133
1134 word = 0;
1135 if (dd->write_buf) {
1136 for (i = 0; (i < dd->bytes_per_word) &&
1137 dd->tx_bytes_remaining; i++) {
1138 dd->tx_bytes_remaining--;
1139 byte = *dd->write_buf++;
1140 word |= (byte << (BITS_PER_BYTE * (3 - i)));
1141 }
1142 } else
1143 if (dd->tx_bytes_remaining > dd->bytes_per_word)
1144 dd->tx_bytes_remaining -= dd->bytes_per_word;
1145 else
1146 dd->tx_bytes_remaining = 0;
1147 dd->write_xfr_cnt++;
1148 if (dd->multi_xfr) {
1149 if (!dd->tx_bytes_remaining)
1150 dd->write_xfr_cnt = 0;
1151 else if ((dd->write_xfr_cnt * dd->bytes_per_word) ==
1152 dd->write_len) {
1153 struct spi_transfer *t = dd->cur_tx_transfer;
1154 if (t->transfer_list.next != &dd->cur_msg->transfers) {
1155 t = list_entry(t->transfer_list.next,
1156 struct spi_transfer,
1157 transfer_list);
1158 dd->write_buf = t->tx_buf;
1159 dd->write_len = t->len;
1160 dd->write_xfr_cnt = 0;
1161 dd->cur_tx_transfer = t;
1162 }
1163 }
1164 }
1165 writel_relaxed(word, dd->base + SPI_OUTPUT_FIFO);
1166}
1167
1168static inline void msm_spi_write_rmn_to_fifo(struct msm_spi *dd)
1169{
1170 int count = 0;
1171
1172 while ((dd->tx_bytes_remaining > 0) && (count < dd->input_fifo_size) &&
1173 !(readl_relaxed(dd->base + SPI_OPERATIONAL) &
1174 SPI_OP_OUTPUT_FIFO_FULL)) {
1175 msm_spi_write_word_to_fifo(dd);
1176 count++;
1177 }
1178}
1179
1180static irqreturn_t msm_spi_output_irq(int irq, void *dev_id)
1181{
1182 struct msm_spi *dd = dev_id;
1183
1184 dd->stat_tx++;
1185
1186 if (dd->mode == SPI_MODE_NONE)
1187 return IRQ_HANDLED;
1188
1189 if (dd->mode == SPI_DMOV_MODE) {
1190 /* TX_ONLY transaction is handled here
1191 This is the only place we send complete at tx and not rx */
1192 if (dd->read_buf == NULL &&
1193 readl_relaxed(dd->base + SPI_OPERATIONAL) &
1194 SPI_OP_MAX_OUTPUT_DONE_FLAG) {
1195 msm_spi_ack_transfer(dd);
Kiran Gunda54eb06e2012-05-18 15:17:06 +05301196 if (atomic_inc_return(&dd->tx_irq_called) == 1)
1197 return IRQ_HANDLED;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001198 msm_spi_complete(dd);
1199 return IRQ_HANDLED;
1200 }
1201 return IRQ_NONE;
1202 }
1203
1204 /* Output FIFO is empty. Transmit any outstanding write data. */
1205 if (dd->mode == SPI_FIFO_MODE)
1206 msm_spi_write_rmn_to_fifo(dd);
1207
1208 return IRQ_HANDLED;
1209}
1210
1211static irqreturn_t msm_spi_error_irq(int irq, void *dev_id)
1212{
1213 struct spi_master *master = dev_id;
1214 struct msm_spi *dd = spi_master_get_devdata(master);
1215 u32 spi_err;
1216
1217 spi_err = readl_relaxed(dd->base + SPI_ERROR_FLAGS);
1218 if (spi_err & SPI_ERR_OUTPUT_OVER_RUN_ERR)
1219 dev_warn(master->dev.parent, "SPI output overrun error\n");
1220 if (spi_err & SPI_ERR_INPUT_UNDER_RUN_ERR)
1221 dev_warn(master->dev.parent, "SPI input underrun error\n");
1222 if (spi_err & SPI_ERR_OUTPUT_UNDER_RUN_ERR)
1223 dev_warn(master->dev.parent, "SPI output underrun error\n");
1224 msm_spi_get_clk_err(dd, &spi_err);
1225 if (spi_err & SPI_ERR_CLK_OVER_RUN_ERR)
1226 dev_warn(master->dev.parent, "SPI clock overrun error\n");
1227 if (spi_err & SPI_ERR_CLK_UNDER_RUN_ERR)
1228 dev_warn(master->dev.parent, "SPI clock underrun error\n");
1229 msm_spi_clear_error_flags(dd);
1230 msm_spi_ack_clk_err(dd);
1231 /* Ensure clearing of QUP_ERROR_FLAGS was completed */
1232 mb();
1233 return IRQ_HANDLED;
1234}
1235
Gilad Avidovd0262342012-10-24 16:52:30 -06001236/**
1237 * msm_spi_dma_map_buffers: prepares buffer for DMA transfer
1238 * @return zero on success or negative error code
1239 *
1240 * calls dma_map_single() on the read/write buffers, effectively invalidating
1241 * their cash entries. for For WR-WR and WR-RD transfers, allocates temporary
1242 * buffer and copy the data to/from the client buffers
1243 */
1244static int msm_spi_dma_map_buffers(struct msm_spi *dd)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001245{
1246 struct device *dev;
1247 struct spi_transfer *first_xfr;
Jordan Crouse47b3f832011-09-19 11:21:16 -06001248 struct spi_transfer *nxt_xfr = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001249 void *tx_buf, *rx_buf;
1250 unsigned tx_len, rx_len;
1251 int ret = -EINVAL;
1252
1253 dev = &dd->cur_msg->spi->dev;
1254 first_xfr = dd->cur_transfer;
1255 tx_buf = (void *)first_xfr->tx_buf;
1256 rx_buf = first_xfr->rx_buf;
1257 tx_len = rx_len = first_xfr->len;
1258
1259 /*
1260 * For WR-WR and WR-RD transfers, we allocate our own temporary
1261 * buffer and copy the data to/from the client buffers.
1262 */
1263 if (dd->multi_xfr) {
1264 dd->temp_buf = kzalloc(dd->cur_msg_len,
1265 GFP_KERNEL | __GFP_DMA);
1266 if (!dd->temp_buf)
1267 return -ENOMEM;
1268 nxt_xfr = list_entry(first_xfr->transfer_list.next,
1269 struct spi_transfer, transfer_list);
1270
1271 if (dd->write_len && !dd->read_len) {
1272 if (!first_xfr->tx_buf || !nxt_xfr->tx_buf)
1273 goto error;
1274
1275 memcpy(dd->temp_buf, first_xfr->tx_buf, first_xfr->len);
1276 memcpy(dd->temp_buf + first_xfr->len, nxt_xfr->tx_buf,
1277 nxt_xfr->len);
1278 tx_buf = dd->temp_buf;
1279 tx_len = dd->cur_msg_len;
1280 } else {
1281 if (!first_xfr->tx_buf || !nxt_xfr->rx_buf)
1282 goto error;
1283
1284 rx_buf = dd->temp_buf;
1285 rx_len = dd->cur_msg_len;
1286 }
1287 }
1288 if (tx_buf != NULL) {
1289 first_xfr->tx_dma = dma_map_single(dev, tx_buf,
1290 tx_len, DMA_TO_DEVICE);
1291 if (dma_mapping_error(NULL, first_xfr->tx_dma)) {
1292 dev_err(dev, "dma %cX %d bytes error\n",
1293 'T', tx_len);
1294 ret = -ENOMEM;
1295 goto error;
1296 }
1297 }
1298 if (rx_buf != NULL) {
1299 dma_addr_t dma_handle;
1300 dma_handle = dma_map_single(dev, rx_buf,
1301 rx_len, DMA_FROM_DEVICE);
1302 if (dma_mapping_error(NULL, dma_handle)) {
1303 dev_err(dev, "dma %cX %d bytes error\n",
1304 'R', rx_len);
1305 if (tx_buf != NULL)
1306 dma_unmap_single(NULL, first_xfr->tx_dma,
1307 tx_len, DMA_TO_DEVICE);
1308 ret = -ENOMEM;
1309 goto error;
1310 }
1311 if (dd->multi_xfr)
1312 nxt_xfr->rx_dma = dma_handle;
1313 else
1314 first_xfr->rx_dma = dma_handle;
1315 }
1316 return 0;
1317
1318error:
1319 kfree(dd->temp_buf);
1320 dd->temp_buf = NULL;
1321 return ret;
1322}
1323
Gilad Avidovd0262342012-10-24 16:52:30 -06001324static void msm_spi_dmov_unmap_buffers(struct msm_spi *dd)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001325{
1326 struct device *dev;
1327 u32 offset;
1328
1329 dev = &dd->cur_msg->spi->dev;
1330 if (dd->cur_msg->is_dma_mapped)
1331 goto unmap_end;
1332
1333 if (dd->multi_xfr) {
1334 if (dd->write_len && !dd->read_len) {
1335 dma_unmap_single(dev,
1336 dd->cur_transfer->tx_dma,
1337 dd->cur_msg_len,
1338 DMA_TO_DEVICE);
1339 } else {
1340 struct spi_transfer *prev_xfr;
1341 prev_xfr = list_entry(
1342 dd->cur_transfer->transfer_list.prev,
1343 struct spi_transfer,
1344 transfer_list);
1345 if (dd->cur_transfer->rx_buf) {
1346 dma_unmap_single(dev,
1347 dd->cur_transfer->rx_dma,
1348 dd->cur_msg_len,
1349 DMA_FROM_DEVICE);
1350 }
1351 if (prev_xfr->tx_buf) {
1352 dma_unmap_single(dev,
1353 prev_xfr->tx_dma,
1354 prev_xfr->len,
1355 DMA_TO_DEVICE);
1356 }
Alok Chauhan3a2b4d92013-02-15 16:04:20 +05301357 if (dd->rx_unaligned_len && dd->read_buf) {
1358 offset = dd->cur_msg_len - dd->rx_unaligned_len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001359 dma_coherent_post_ops();
1360 memcpy(dd->read_buf + offset, dd->rx_padding,
Alok Chauhan3a2b4d92013-02-15 16:04:20 +05301361 dd->rx_unaligned_len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001362 memcpy(dd->cur_transfer->rx_buf,
1363 dd->read_buf + prev_xfr->len,
1364 dd->cur_transfer->len);
1365 }
1366 }
1367 kfree(dd->temp_buf);
1368 dd->temp_buf = NULL;
1369 return;
1370 } else {
1371 if (dd->cur_transfer->rx_buf)
1372 dma_unmap_single(dev, dd->cur_transfer->rx_dma,
1373 dd->cur_transfer->len,
1374 DMA_FROM_DEVICE);
1375 if (dd->cur_transfer->tx_buf)
1376 dma_unmap_single(dev, dd->cur_transfer->tx_dma,
1377 dd->cur_transfer->len,
1378 DMA_TO_DEVICE);
1379 }
1380
1381unmap_end:
1382 /* If we padded the transfer, we copy it from the padding buf */
Alok Chauhan3a2b4d92013-02-15 16:04:20 +05301383 if (dd->rx_unaligned_len && dd->read_buf) {
1384 offset = dd->cur_transfer->len - dd->rx_unaligned_len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001385 dma_coherent_post_ops();
1386 memcpy(dd->read_buf + offset, dd->rx_padding,
Alok Chauhan3a2b4d92013-02-15 16:04:20 +05301387 dd->rx_unaligned_len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001388 }
1389}
1390
Gilad Avidovd0262342012-10-24 16:52:30 -06001391static void msm_spi_bam_unmap_buffers(struct msm_spi *dd)
1392{
1393 struct device *dev;
1394
1395 /* mapped by client */
1396 if (dd->cur_msg->is_dma_mapped)
1397 return;
1398
1399 dev = &dd->cur_msg->spi->dev;
1400 if (dd->cur_transfer->rx_buf)
1401 dma_unmap_single(dev, dd->cur_transfer->rx_dma,
1402 dd->cur_transfer->len,
1403 DMA_FROM_DEVICE);
1404
1405 if (dd->cur_transfer->tx_buf)
1406 dma_unmap_single(dev, dd->cur_transfer->tx_dma,
1407 dd->cur_transfer->len,
1408 DMA_TO_DEVICE);
1409}
1410
1411static inline void msm_spi_dma_unmap_buffers(struct msm_spi *dd)
1412{
1413 if (dd->mode == SPI_DMOV_MODE)
1414 msm_spi_dmov_unmap_buffers(dd);
1415 else if (dd->mode == SPI_BAM_MODE)
1416 msm_spi_bam_unmap_buffers(dd);
1417}
1418
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001419/**
Gilad Avidovd0262342012-10-24 16:52:30 -06001420 * msm_spi_use_dma - decides whether to use Data-Mover or BAM for
1421 * the given transfer
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001422 * @dd: device
1423 * @tr: transfer
1424 *
Gilad Avidovd0262342012-10-24 16:52:30 -06001425 * Start using DMA if:
1426 * 1. Is supported by HW
1427 * 2. Is not diabled by platfrom data
1428 * 3. Transfer size is greater than 3*block size.
1429 * 4. Buffers are aligned to cache line.
1430 * 5. Bytes-per-word is 8,16 or 32.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001431 */
Gilad Avidovd0262342012-10-24 16:52:30 -06001432static inline bool
1433msm_spi_use_dma(struct msm_spi *dd, struct spi_transfer *tr, u8 bpw)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001434{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001435 if (!dd->use_dma)
Gilad Avidovd0262342012-10-24 16:52:30 -06001436 return false;
1437
1438 /* check constraints from platform data */
1439 if ((dd->qup_ver == SPI_QUP_VERSION_BFAM) && !dd->pdata->use_bam)
1440 return false;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001441
1442 if (dd->cur_msg_len < 3*dd->input_block_size)
Gilad Avidovd0262342012-10-24 16:52:30 -06001443 return false;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001444
1445 if (dd->multi_xfr && !dd->read_len && !dd->write_len)
Gilad Avidovd0262342012-10-24 16:52:30 -06001446 return false;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001447
Gilad Avidovd0262342012-10-24 16:52:30 -06001448 if (dd->qup_ver == SPI_QUP_VERSION_NONE) {
1449 u32 cache_line = dma_get_cache_alignment();
1450
1451 if (tr->tx_buf) {
1452 if (!IS_ALIGNED((size_t)tr->tx_buf, cache_line))
1453 return 0;
1454 }
1455 if (tr->rx_buf) {
1456 if (!IS_ALIGNED((size_t)tr->rx_buf, cache_line))
1457 return false;
1458 }
1459
1460 if (tr->cs_change &&
Kiran Gunda84286c32013-04-29 18:05:49 +05301461 ((bpw != 8) && (bpw != 16) && (bpw != 32)))
Gilad Avidovd0262342012-10-24 16:52:30 -06001462 return false;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001463 }
1464
Gilad Avidovd0262342012-10-24 16:52:30 -06001465 return true;
1466}
1467
1468/**
1469 * msm_spi_set_transfer_mode: Chooses optimal transfer mode. Sets dd->mode and
1470 * prepares to process a transfer.
1471 */
1472static void
1473msm_spi_set_transfer_mode(struct msm_spi *dd, u8 bpw, u32 read_count)
1474{
1475 if (msm_spi_use_dma(dd, dd->cur_transfer, bpw)) {
1476 if (dd->qup_ver) {
1477 dd->mode = SPI_BAM_MODE;
1478 } else {
1479 dd->mode = SPI_DMOV_MODE;
1480 if (dd->write_len && dd->read_len) {
1481 dd->tx_bytes_remaining = dd->write_len;
1482 dd->rx_bytes_remaining = dd->read_len;
1483 }
1484 }
1485 } else {
1486 dd->mode = SPI_FIFO_MODE;
1487 if (dd->multi_xfr) {
1488 dd->read_len = dd->cur_transfer->len;
1489 dd->write_len = dd->cur_transfer->len;
1490 }
1491 }
1492}
1493
1494/**
1495 * msm_spi_set_qup_io_modes: prepares register QUP_IO_MODES to process a
1496 * transfer
1497 */
1498static void msm_spi_set_qup_io_modes(struct msm_spi *dd)
1499{
1500 u32 spi_iom;
1501 spi_iom = readl_relaxed(dd->base + SPI_IO_MODES);
1502 /* Set input and output transfer mode: FIFO, DMOV, or BAM */
1503 spi_iom &= ~(SPI_IO_M_INPUT_MODE | SPI_IO_M_OUTPUT_MODE);
1504 spi_iom = (spi_iom | (dd->mode << OUTPUT_MODE_SHIFT));
1505 spi_iom = (spi_iom | (dd->mode << INPUT_MODE_SHIFT));
1506 /* Turn on packing for data mover */
1507 if ((dd->mode == SPI_DMOV_MODE) || (dd->mode == SPI_BAM_MODE))
1508 spi_iom |= SPI_IO_M_PACK_EN | SPI_IO_M_UNPACK_EN;
1509 else
1510 spi_iom &= ~(SPI_IO_M_PACK_EN | SPI_IO_M_UNPACK_EN);
1511
1512 /*if (dd->mode == SPI_BAM_MODE) {
1513 spi_iom |= SPI_IO_C_NO_TRI_STATE;
1514 spi_iom &= ~(SPI_IO_C_CS_SELECT | SPI_IO_C_CS_N_POLARITY);
1515 }*/
1516 writel_relaxed(spi_iom, dd->base + SPI_IO_MODES);
1517}
1518
1519static u32 msm_spi_calc_spi_ioc_clk_polarity(u32 spi_ioc, u8 mode)
1520{
1521 if (mode & SPI_CPOL)
1522 spi_ioc |= SPI_IO_C_CLK_IDLE_HIGH;
1523 else
1524 spi_ioc &= ~SPI_IO_C_CLK_IDLE_HIGH;
1525 return spi_ioc;
1526}
1527
1528/**
1529 * msm_spi_set_spi_io_control: prepares register SPI_IO_CONTROL to process the
1530 * next transfer
1531 * @return the new set value of SPI_IO_CONTROL
1532 */
1533static u32 msm_spi_set_spi_io_control(struct msm_spi *dd)
1534{
1535 u32 spi_ioc, spi_ioc_orig, chip_select;
1536
1537 spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
1538 spi_ioc_orig = spi_ioc;
1539 spi_ioc = msm_spi_calc_spi_ioc_clk_polarity(spi_ioc
1540 , dd->cur_msg->spi->mode);
1541 /* Set chip-select */
1542 chip_select = dd->cur_msg->spi->chip_select << 2;
1543 if ((spi_ioc & SPI_IO_C_CS_SELECT) != chip_select)
1544 spi_ioc = (spi_ioc & ~SPI_IO_C_CS_SELECT) | chip_select;
1545 if (!dd->cur_transfer->cs_change)
1546 spi_ioc |= SPI_IO_C_MX_CS_MODE;
1547
1548 if (spi_ioc != spi_ioc_orig)
1549 writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
1550
1551 return spi_ioc;
1552}
1553
1554/**
1555 * msm_spi_set_qup_op_mask: prepares register QUP_OPERATIONAL_MASK to process
1556 * the next transfer
1557 */
1558static void msm_spi_set_qup_op_mask(struct msm_spi *dd)
1559{
1560 /* mask INPUT and OUTPUT service flags in to prevent IRQs on FIFO status
1561 * change in BAM mode */
1562 u32 mask = (dd->mode == SPI_BAM_MODE) ?
1563 QUP_OP_MASK_OUTPUT_SERVICE_FLAG | QUP_OP_MASK_INPUT_SERVICE_FLAG
1564 : 0;
1565 writel_relaxed(mask, dd->base + QUP_OPERATIONAL_MASK);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001566}
1567
1568static void msm_spi_process_transfer(struct msm_spi *dd)
1569{
1570 u8 bpw;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001571 u32 max_speed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001572 u32 read_count;
1573 u32 timeout;
Gilad Avidovd0262342012-10-24 16:52:30 -06001574 u32 spi_ioc;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001575 u32 int_loopback = 0;
1576
1577 dd->tx_bytes_remaining = dd->cur_msg_len;
1578 dd->rx_bytes_remaining = dd->cur_msg_len;
1579 dd->read_buf = dd->cur_transfer->rx_buf;
1580 dd->write_buf = dd->cur_transfer->tx_buf;
1581 init_completion(&dd->transfer_complete);
1582 if (dd->cur_transfer->bits_per_word)
1583 bpw = dd->cur_transfer->bits_per_word;
1584 else
1585 if (dd->cur_msg->spi->bits_per_word)
1586 bpw = dd->cur_msg->spi->bits_per_word;
1587 else
1588 bpw = 8;
1589 dd->bytes_per_word = (bpw + 7) / 8;
1590
1591 if (dd->cur_transfer->speed_hz)
1592 max_speed = dd->cur_transfer->speed_hz;
1593 else
1594 max_speed = dd->cur_msg->spi->max_speed_hz;
1595 if (!dd->clock_speed || max_speed != dd->clock_speed)
1596 msm_spi_clock_set(dd, max_speed);
1597
Gilad Avidovd0262342012-10-24 16:52:30 -06001598 timeout = 100 * msecs_to_jiffies(
1599 DIV_ROUND_UP(dd->cur_msg_len * 8,
1600 DIV_ROUND_UP(max_speed, MSEC_PER_SEC)));
1601
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001602 read_count = DIV_ROUND_UP(dd->cur_msg_len, dd->bytes_per_word);
1603 if (dd->cur_msg->spi->mode & SPI_LOOP)
1604 int_loopback = 1;
1605 if (int_loopback && dd->multi_xfr &&
1606 (read_count > dd->input_fifo_size)) {
1607 if (dd->read_len && dd->write_len)
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001608 pr_err(
1609 "%s:Internal Loopback does not support > fifo size"
1610 "for write-then-read transactions\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001611 __func__);
1612 else if (dd->write_len && !dd->read_len)
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001613 pr_err(
1614 "%s:Internal Loopback does not support > fifo size"
1615 "for write-then-write transactions\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001616 __func__);
1617 return;
1618 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001619
Gilad Avidovd0262342012-10-24 16:52:30 -06001620 if (msm_spi_set_state(dd, SPI_OP_STATE_RESET))
1621 dev_err(dd->dev,
1622 "%s: Error setting QUP to reset-state",
1623 __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001624
Gilad Avidovd0262342012-10-24 16:52:30 -06001625 msm_spi_set_transfer_mode(dd, bpw, read_count);
1626 msm_spi_set_mx_counts(dd, read_count);
1627 if ((dd->mode == SPI_BAM_MODE) || (dd->mode == SPI_DMOV_MODE))
1628 if (msm_spi_dma_map_buffers(dd) < 0) {
1629 pr_err("Mapping DMA buffers\n");
1630 return;
1631 }
1632 msm_spi_set_qup_io_modes(dd);
1633 msm_spi_set_spi_config(dd, bpw);
1634 msm_spi_set_qup_config(dd, bpw);
1635 spi_ioc = msm_spi_set_spi_io_control(dd);
1636 msm_spi_set_qup_op_mask(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001637
1638 if (dd->mode == SPI_DMOV_MODE) {
1639 msm_spi_setup_dm_transfer(dd);
1640 msm_spi_enqueue_dm_commands(dd);
1641 }
1642 /* The output fifo interrupt handler will handle all writes after
1643 the first. Restricting this to one write avoids contention
1644 issues and race conditions between this thread and the int handler
1645 */
1646 else if (dd->mode == SPI_FIFO_MODE) {
1647 if (msm_spi_prepare_for_write(dd))
1648 goto transfer_end;
1649 msm_spi_start_write(dd, read_count);
Gilad Avidovd0262342012-10-24 16:52:30 -06001650 } else if (dd->mode == SPI_BAM_MODE) {
1651 if ((msm_spi_bam_begin_transfer(dd, timeout, bpw)) < 0)
1652 dev_err(dd->dev, "%s: BAM transfer setup failed\n",
1653 __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001654 }
1655
Gilad Avidovd0262342012-10-24 16:52:30 -06001656 /*
1657 * On BAM mode, current state here is run.
1658 * Only enter the RUN state after the first word is written into
1659 * the output FIFO. Otherwise, the output FIFO EMPTY interrupt
1660 * might fire before the first word is written resulting in a
1661 * possible race condition.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001662 */
Gilad Avidovd0262342012-10-24 16:52:30 -06001663 if (dd->mode != SPI_BAM_MODE)
1664 if (msm_spi_set_state(dd, SPI_OP_STATE_RUN)) {
1665 dev_warn(dd->dev,
1666 "%s: Failed to set QUP to run-state. Mode:%d",
1667 __func__, dd->mode);
1668 goto transfer_end;
1669 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001670
1671 /* Assume success, this might change later upon transaction result */
1672 dd->cur_msg->status = 0;
1673 do {
1674 if (!wait_for_completion_timeout(&dd->transfer_complete,
1675 timeout)) {
Gilad Avidovd0262342012-10-24 16:52:30 -06001676 dev_err(dd->dev,
1677 "%s: SPI transaction timeout\n",
1678 __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001679 dd->cur_msg->status = -EIO;
1680 if (dd->mode == SPI_DMOV_MODE) {
Jeff Ohlstein6bf7b3a2012-04-27 12:27:53 -07001681 msm_dmov_flush(dd->tx_dma_chan, 1);
1682 msm_dmov_flush(dd->rx_dma_chan, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001683 }
1684 break;
1685 }
1686 } while (msm_spi_dm_send_next(dd));
1687
Sagar Dharia525593d2012-11-02 18:26:01 -06001688 msm_spi_udelay(dd->cur_transfer->delay_usecs);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001689transfer_end:
Gilad Avidovd0262342012-10-24 16:52:30 -06001690 msm_spi_dma_unmap_buffers(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001691 dd->mode = SPI_MODE_NONE;
1692
1693 msm_spi_set_state(dd, SPI_OP_STATE_RESET);
1694 writel_relaxed(spi_ioc & ~SPI_IO_C_MX_CS_MODE,
1695 dd->base + SPI_IO_CONTROL);
1696}
1697
1698static void get_transfer_length(struct msm_spi *dd)
1699{
1700 struct spi_transfer *tr;
1701 int num_xfrs = 0;
1702 int readlen = 0;
1703 int writelen = 0;
1704
1705 dd->cur_msg_len = 0;
1706 dd->multi_xfr = 0;
1707 dd->read_len = dd->write_len = 0;
1708
1709 list_for_each_entry(tr, &dd->cur_msg->transfers, transfer_list) {
1710 if (tr->tx_buf)
1711 writelen += tr->len;
1712 if (tr->rx_buf)
1713 readlen += tr->len;
1714 dd->cur_msg_len += tr->len;
1715 num_xfrs++;
1716 }
1717
1718 if (num_xfrs == 2) {
1719 struct spi_transfer *first_xfr = dd->cur_transfer;
1720
1721 dd->multi_xfr = 1;
1722 tr = list_entry(first_xfr->transfer_list.next,
1723 struct spi_transfer,
1724 transfer_list);
1725 /*
1726 * We update dd->read_len and dd->write_len only
1727 * for WR-WR and WR-RD transfers.
1728 */
1729 if ((first_xfr->tx_buf) && (!first_xfr->rx_buf)) {
1730 if (((tr->tx_buf) && (!tr->rx_buf)) ||
1731 ((!tr->tx_buf) && (tr->rx_buf))) {
1732 dd->read_len = readlen;
1733 dd->write_len = writelen;
1734 }
1735 }
1736 } else if (num_xfrs > 1)
1737 dd->multi_xfr = 1;
1738}
1739
1740static inline int combine_transfers(struct msm_spi *dd)
1741{
1742 struct spi_transfer *t = dd->cur_transfer;
1743 struct spi_transfer *nxt;
1744 int xfrs_grped = 1;
1745
1746 dd->cur_msg_len = dd->cur_transfer->len;
1747 while (t->transfer_list.next != &dd->cur_msg->transfers) {
1748 nxt = list_entry(t->transfer_list.next,
1749 struct spi_transfer,
1750 transfer_list);
1751 if (t->cs_change != nxt->cs_change)
1752 return xfrs_grped;
1753 dd->cur_msg_len += nxt->len;
1754 xfrs_grped++;
1755 t = nxt;
1756 }
1757 return xfrs_grped;
1758}
1759
Harini Jayaraman093938a2012-04-20 15:33:23 -06001760static inline void write_force_cs(struct msm_spi *dd, bool set_flag)
1761{
1762 u32 spi_ioc;
1763 u32 spi_ioc_orig;
1764
1765 spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
1766 spi_ioc_orig = spi_ioc;
1767 if (set_flag)
1768 spi_ioc |= SPI_IO_C_FORCE_CS;
1769 else
1770 spi_ioc &= ~SPI_IO_C_FORCE_CS;
1771
1772 if (spi_ioc != spi_ioc_orig)
1773 writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
1774}
1775
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001776static void msm_spi_process_message(struct msm_spi *dd)
1777{
1778 int xfrs_grped = 0;
Harini Jayaramane4c06192011-09-28 16:26:39 -06001779 int cs_num;
1780 int rc;
Sagar Dharia525593d2012-11-02 18:26:01 -06001781 bool xfer_delay = false;
1782 struct spi_transfer *tr;
Harini Jayaramane4c06192011-09-28 16:26:39 -06001783
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001784 dd->write_xfr_cnt = dd->read_xfr_cnt = 0;
Harini Jayaramane4c06192011-09-28 16:26:39 -06001785 cs_num = dd->cur_msg->spi->chip_select;
1786 if ((!(dd->cur_msg->spi->mode & SPI_LOOP)) &&
1787 (!(dd->cs_gpios[cs_num].valid)) &&
1788 (dd->cs_gpios[cs_num].gpio_num >= 0)) {
1789 rc = gpio_request(dd->cs_gpios[cs_num].gpio_num,
1790 spi_cs_rsrcs[cs_num]);
1791 if (rc) {
1792 dev_err(dd->dev, "gpio_request for pin %d failed with "
1793 "error %d\n", dd->cs_gpios[cs_num].gpio_num,
1794 rc);
1795 return;
1796 }
1797 dd->cs_gpios[cs_num].valid = 1;
1798 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001799
Sagar Dharia525593d2012-11-02 18:26:01 -06001800 list_for_each_entry(tr,
1801 &dd->cur_msg->transfers,
1802 transfer_list) {
1803 if (tr->delay_usecs) {
1804 dev_info(dd->dev, "SPI slave requests delay per txn :%d",
1805 tr->delay_usecs);
1806 xfer_delay = true;
1807 break;
1808 }
1809 }
1810
1811 /* Don't combine xfers if delay is needed after every xfer */
1812 if (dd->qup_ver || xfer_delay) {
1813 if (dd->qup_ver)
1814 write_force_cs(dd, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001815 list_for_each_entry(dd->cur_transfer,
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001816 &dd->cur_msg->transfers,
1817 transfer_list) {
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001818 struct spi_transfer *t = dd->cur_transfer;
1819 struct spi_transfer *nxt;
1820
1821 if (t->transfer_list.next != &dd->cur_msg->transfers) {
1822 nxt = list_entry(t->transfer_list.next,
1823 struct spi_transfer,
1824 transfer_list);
1825
Sagar Dharia525593d2012-11-02 18:26:01 -06001826 if (dd->qup_ver &&
1827 t->cs_change == nxt->cs_change)
Harini Jayaraman093938a2012-04-20 15:33:23 -06001828 write_force_cs(dd, 1);
Sagar Dharia525593d2012-11-02 18:26:01 -06001829 else if (dd->qup_ver)
Harini Jayaraman093938a2012-04-20 15:33:23 -06001830 write_force_cs(dd, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001831 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001832
1833 dd->cur_msg_len = dd->cur_transfer->len;
1834 msm_spi_process_transfer(dd);
1835 }
1836 } else {
1837 dd->cur_transfer = list_first_entry(&dd->cur_msg->transfers,
1838 struct spi_transfer,
1839 transfer_list);
1840 get_transfer_length(dd);
1841 if (dd->multi_xfr && !dd->read_len && !dd->write_len) {
1842 /*
1843 * Handling of multi-transfers.
1844 * FIFO mode is used by default
1845 */
1846 list_for_each_entry(dd->cur_transfer,
1847 &dd->cur_msg->transfers,
1848 transfer_list) {
1849 if (!dd->cur_transfer->len)
1850 goto error;
1851 if (xfrs_grped) {
1852 xfrs_grped--;
1853 continue;
1854 } else {
1855 dd->read_len = dd->write_len = 0;
1856 xfrs_grped = combine_transfers(dd);
1857 }
1858
1859 dd->cur_tx_transfer = dd->cur_transfer;
1860 dd->cur_rx_transfer = dd->cur_transfer;
1861 msm_spi_process_transfer(dd);
1862 xfrs_grped--;
1863 }
1864 } else {
1865 /* Handling of a single transfer or
1866 * WR-WR or WR-RD transfers
1867 */
1868 if ((!dd->cur_msg->is_dma_mapped) &&
Gilad Avidovd0262342012-10-24 16:52:30 -06001869 (msm_spi_use_dma(dd, dd->cur_transfer,
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001870 dd->cur_transfer->bits_per_word))) {
1871 /* Mapping of DMA buffers */
Gilad Avidovd0262342012-10-24 16:52:30 -06001872 int ret = msm_spi_dma_map_buffers(dd);
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07001873 if (ret < 0) {
1874 dd->cur_msg->status = ret;
1875 goto error;
1876 }
1877 }
1878
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001879 dd->cur_tx_transfer = dd->cur_transfer;
1880 dd->cur_rx_transfer = dd->cur_transfer;
1881 msm_spi_process_transfer(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001882 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001883 }
Harini Jayaramane4c06192011-09-28 16:26:39 -06001884
1885 return;
1886
1887error:
1888 if (dd->cs_gpios[cs_num].valid) {
1889 gpio_free(dd->cs_gpios[cs_num].gpio_num);
1890 dd->cs_gpios[cs_num].valid = 0;
1891 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001892}
1893
1894/* workqueue - pull messages from queue & process */
1895static void msm_spi_workq(struct work_struct *work)
1896{
1897 struct msm_spi *dd =
1898 container_of(work, struct msm_spi, work_data);
1899 unsigned long flags;
1900 u32 status_error = 0;
Alok Chauhan7fd3add2013-03-12 18:34:43 +05301901
1902 pm_runtime_get_sync(dd->dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001903
1904 mutex_lock(&dd->core_lock);
1905
Alok Chauhan7fd3add2013-03-12 18:34:43 +05301906 /*
1907 * Counter-part of system-suspend when runtime-pm is not enabled.
1908 * This way, resume can be left empty and device will be put in
1909 * active mode only if client requests anything on the bus
1910 */
1911 if (!pm_runtime_enabled(dd->dev))
1912 msm_spi_pm_resume_runtime(dd->dev);
1913
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001914 if (dd->use_rlock)
1915 remote_mutex_lock(&dd->r_lock);
1916
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001917 if (!msm_spi_is_valid_state(dd)) {
1918 dev_err(dd->dev, "%s: SPI operational state not valid\n",
1919 __func__);
1920 status_error = 1;
1921 }
1922
1923 spin_lock_irqsave(&dd->queue_lock, flags);
Alok Chauhan7fd3add2013-03-12 18:34:43 +05301924 dd->transfer_pending = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001925 while (!list_empty(&dd->queue)) {
1926 dd->cur_msg = list_entry(dd->queue.next,
1927 struct spi_message, queue);
1928 list_del_init(&dd->cur_msg->queue);
1929 spin_unlock_irqrestore(&dd->queue_lock, flags);
1930 if (status_error)
1931 dd->cur_msg->status = -EIO;
1932 else
1933 msm_spi_process_message(dd);
1934 if (dd->cur_msg->complete)
1935 dd->cur_msg->complete(dd->cur_msg->context);
1936 spin_lock_irqsave(&dd->queue_lock, flags);
1937 }
1938 dd->transfer_pending = 0;
1939 spin_unlock_irqrestore(&dd->queue_lock, flags);
1940
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001941 if (dd->use_rlock)
1942 remote_mutex_unlock(&dd->r_lock);
1943
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001944 mutex_unlock(&dd->core_lock);
Alok Chauhan7fd3add2013-03-12 18:34:43 +05301945
1946 pm_runtime_mark_last_busy(dd->dev);
1947 pm_runtime_put_autosuspend(dd->dev);
1948
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001949 /* If needed, this can be done after the current message is complete,
1950 and work can be continued upon resume. No motivation for now. */
1951 if (dd->suspended)
1952 wake_up_interruptible(&dd->continue_suspend);
1953}
1954
1955static int msm_spi_transfer(struct spi_device *spi, struct spi_message *msg)
1956{
1957 struct msm_spi *dd;
1958 unsigned long flags;
1959 struct spi_transfer *tr;
1960
1961 dd = spi_master_get_devdata(spi->master);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001962
1963 if (list_empty(&msg->transfers) || !msg->complete)
1964 return -EINVAL;
1965
1966 list_for_each_entry(tr, &msg->transfers, transfer_list) {
1967 /* Check message parameters */
1968 if (tr->speed_hz > dd->pdata->max_clock_speed ||
1969 (tr->bits_per_word &&
1970 (tr->bits_per_word < 4 || tr->bits_per_word > 32)) ||
1971 (tr->tx_buf == NULL && tr->rx_buf == NULL)) {
1972 dev_err(&spi->dev, "Invalid transfer: %d Hz, %d bpw"
1973 "tx=%p, rx=%p\n",
1974 tr->speed_hz, tr->bits_per_word,
1975 tr->tx_buf, tr->rx_buf);
1976 return -EINVAL;
1977 }
1978 }
1979
1980 spin_lock_irqsave(&dd->queue_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001981 list_add_tail(&msg->queue, &dd->queue);
1982 spin_unlock_irqrestore(&dd->queue_lock, flags);
1983 queue_work(dd->workqueue, &dd->work_data);
1984 return 0;
1985}
1986
1987static int msm_spi_setup(struct spi_device *spi)
1988{
1989 struct msm_spi *dd;
1990 int rc = 0;
1991 u32 spi_ioc;
1992 u32 spi_config;
1993 u32 mask;
1994
1995 if (spi->bits_per_word < 4 || spi->bits_per_word > 32) {
1996 dev_err(&spi->dev, "%s: invalid bits_per_word %d\n",
1997 __func__, spi->bits_per_word);
1998 rc = -EINVAL;
1999 }
2000 if (spi->chip_select > SPI_NUM_CHIPSELECTS-1) {
2001 dev_err(&spi->dev, "%s, chip select %d exceeds max value %d\n",
2002 __func__, spi->chip_select, SPI_NUM_CHIPSELECTS - 1);
2003 rc = -EINVAL;
2004 }
2005
2006 if (rc)
2007 goto err_setup_exit;
2008
2009 dd = spi_master_get_devdata(spi->master);
2010
Alok Chauhan7fd3add2013-03-12 18:34:43 +05302011 pm_runtime_get_sync(dd->dev);
2012
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002013 mutex_lock(&dd->core_lock);
Alok Chauhan7fd3add2013-03-12 18:34:43 +05302014
2015 /* Counter-part of system-suspend when runtime-pm is not enabled. */
2016 if (!pm_runtime_enabled(dd->dev))
2017 msm_spi_pm_resume_runtime(dd->dev);
2018
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002019 if (dd->suspended) {
2020 mutex_unlock(&dd->core_lock);
2021 return -EBUSY;
2022 }
2023
2024 if (dd->use_rlock)
2025 remote_mutex_lock(&dd->r_lock);
2026
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002027 spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
2028 mask = SPI_IO_C_CS_N_POLARITY_0 << spi->chip_select;
2029 if (spi->mode & SPI_CS_HIGH)
2030 spi_ioc |= mask;
2031 else
2032 spi_ioc &= ~mask;
Gilad Avidovd0262342012-10-24 16:52:30 -06002033 spi_ioc = msm_spi_calc_spi_ioc_clk_polarity(spi_ioc, spi->mode);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002034
2035 writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
2036
2037 spi_config = readl_relaxed(dd->base + SPI_CONFIG);
Gilad Avidovd0262342012-10-24 16:52:30 -06002038 spi_config = msm_spi_calc_spi_config_loopback_and_input_first(
2039 spi_config, spi->mode);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002040 writel_relaxed(spi_config, dd->base + SPI_CONFIG);
2041
2042 /* Ensure previous write completed before disabling the clocks */
2043 mb();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002044
2045 if (dd->use_rlock)
2046 remote_mutex_unlock(&dd->r_lock);
Alok Chauhan7fd3add2013-03-12 18:34:43 +05302047
2048 /* Counter-part of system-resume when runtime-pm is not enabled. */
2049 if (!pm_runtime_enabled(dd->dev))
2050 msm_spi_pm_suspend_runtime(dd->dev);
2051
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002052 mutex_unlock(&dd->core_lock);
Alok Chauhan7fd3add2013-03-12 18:34:43 +05302053
2054 pm_runtime_mark_last_busy(dd->dev);
2055 pm_runtime_put_autosuspend(dd->dev);
2056
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002057err_setup_exit:
2058 return rc;
2059}
2060
2061#ifdef CONFIG_DEBUG_FS
2062static int debugfs_iomem_x32_set(void *data, u64 val)
2063{
2064 writel_relaxed(val, data);
2065 /* Ensure the previous write completed. */
2066 mb();
2067 return 0;
2068}
2069
2070static int debugfs_iomem_x32_get(void *data, u64 *val)
2071{
2072 *val = readl_relaxed(data);
2073 /* Ensure the previous read completed. */
2074 mb();
2075 return 0;
2076}
2077
2078DEFINE_SIMPLE_ATTRIBUTE(fops_iomem_x32, debugfs_iomem_x32_get,
2079 debugfs_iomem_x32_set, "0x%08llx\n");
2080
2081static void spi_debugfs_init(struct msm_spi *dd)
2082{
2083 dd->dent_spi = debugfs_create_dir(dev_name(dd->dev), NULL);
2084 if (dd->dent_spi) {
2085 int i;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002086
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002087 for (i = 0; i < ARRAY_SIZE(debugfs_spi_regs); i++) {
2088 dd->debugfs_spi_regs[i] =
2089 debugfs_create_file(
2090 debugfs_spi_regs[i].name,
2091 debugfs_spi_regs[i].mode,
2092 dd->dent_spi,
2093 dd->base + debugfs_spi_regs[i].offset,
2094 &fops_iomem_x32);
2095 }
2096 }
2097}
2098
2099static void spi_debugfs_exit(struct msm_spi *dd)
2100{
2101 if (dd->dent_spi) {
2102 int i;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002103
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002104 debugfs_remove_recursive(dd->dent_spi);
2105 dd->dent_spi = NULL;
2106 for (i = 0; i < ARRAY_SIZE(debugfs_spi_regs); i++)
2107 dd->debugfs_spi_regs[i] = NULL;
2108 }
2109}
2110#else
2111static void spi_debugfs_init(struct msm_spi *dd) {}
2112static void spi_debugfs_exit(struct msm_spi *dd) {}
2113#endif
2114
2115/* ===Device attributes begin=== */
2116static ssize_t show_stats(struct device *dev, struct device_attribute *attr,
2117 char *buf)
2118{
2119 struct spi_master *master = dev_get_drvdata(dev);
2120 struct msm_spi *dd = spi_master_get_devdata(master);
2121
2122 return snprintf(buf, PAGE_SIZE,
2123 "Device %s\n"
2124 "rx fifo_size = %d spi words\n"
2125 "tx fifo_size = %d spi words\n"
2126 "use_dma ? %s\n"
2127 "rx block size = %d bytes\n"
2128 "tx block size = %d bytes\n"
Alok Chauhan3a2b4d92013-02-15 16:04:20 +05302129 "input burst size = %d bytes\n"
2130 "output burst size = %d bytes\n"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002131 "DMA configuration:\n"
2132 "tx_ch=%d, rx_ch=%d, tx_crci= %d, rx_crci=%d\n"
2133 "--statistics--\n"
2134 "Rx isrs = %d\n"
2135 "Tx isrs = %d\n"
2136 "DMA error = %d\n"
2137 "--debug--\n"
2138 "NA yet\n",
2139 dev_name(dev),
2140 dd->input_fifo_size,
2141 dd->output_fifo_size,
2142 dd->use_dma ? "yes" : "no",
2143 dd->input_block_size,
2144 dd->output_block_size,
Alok Chauhan3a2b4d92013-02-15 16:04:20 +05302145 dd->input_burst_size,
2146 dd->output_burst_size,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002147 dd->tx_dma_chan,
2148 dd->rx_dma_chan,
2149 dd->tx_dma_crci,
2150 dd->rx_dma_crci,
2151 dd->stat_rx + dd->stat_dmov_rx,
2152 dd->stat_tx + dd->stat_dmov_tx,
2153 dd->stat_dmov_tx_err + dd->stat_dmov_rx_err
2154 );
2155}
2156
2157/* Reset statistics on write */
2158static ssize_t set_stats(struct device *dev, struct device_attribute *attr,
2159 const char *buf, size_t count)
2160{
2161 struct msm_spi *dd = dev_get_drvdata(dev);
2162 dd->stat_rx = 0;
2163 dd->stat_tx = 0;
2164 dd->stat_dmov_rx = 0;
2165 dd->stat_dmov_tx = 0;
2166 dd->stat_dmov_rx_err = 0;
2167 dd->stat_dmov_tx_err = 0;
2168 return count;
2169}
2170
2171static DEVICE_ATTR(stats, S_IRUGO | S_IWUSR, show_stats, set_stats);
2172
2173static struct attribute *dev_attrs[] = {
2174 &dev_attr_stats.attr,
2175 NULL,
2176};
2177
2178static struct attribute_group dev_attr_grp = {
2179 .attrs = dev_attrs,
2180};
2181/* ===Device attributes end=== */
2182
2183/**
2184 * spi_dmov_tx_complete_func - DataMover tx completion callback
2185 *
2186 * Executed in IRQ context (Data Mover's IRQ) DataMover's
2187 * spinlock @msm_dmov_lock held.
2188 */
2189static void spi_dmov_tx_complete_func(struct msm_dmov_cmd *cmd,
2190 unsigned int result,
2191 struct msm_dmov_errdata *err)
2192{
2193 struct msm_spi *dd;
2194
2195 if (!(result & DMOV_RSLT_VALID)) {
2196 pr_err("Invalid DMOV result: rc=0x%08x, cmd = %p", result, cmd);
2197 return;
2198 }
2199 /* restore original context */
2200 dd = container_of(cmd, struct msm_spi, tx_hdr);
Kiran Gunda54eb06e2012-05-18 15:17:06 +05302201 if (result & DMOV_RSLT_DONE) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002202 dd->stat_dmov_tx++;
Kiran Gunda54eb06e2012-05-18 15:17:06 +05302203 if ((atomic_inc_return(&dd->tx_irq_called) == 1))
2204 return;
2205 complete(&dd->transfer_complete);
2206 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002207 /* Error or flush */
2208 if (result & DMOV_RSLT_ERROR) {
2209 dev_err(dd->dev, "DMA error (0x%08x)\n", result);
2210 dd->stat_dmov_tx_err++;
2211 }
2212 if (result & DMOV_RSLT_FLUSH) {
2213 /*
2214 * Flushing normally happens in process of
2215 * removing, when we are waiting for outstanding
2216 * DMA commands to be flushed.
2217 */
2218 dev_info(dd->dev,
2219 "DMA channel flushed (0x%08x)\n", result);
2220 }
2221 if (err)
2222 dev_err(dd->dev,
2223 "Flush data(%08x %08x %08x %08x %08x %08x)\n",
2224 err->flush[0], err->flush[1], err->flush[2],
2225 err->flush[3], err->flush[4], err->flush[5]);
2226 dd->cur_msg->status = -EIO;
2227 complete(&dd->transfer_complete);
2228 }
2229}
2230
2231/**
2232 * spi_dmov_rx_complete_func - DataMover rx completion callback
2233 *
2234 * Executed in IRQ context (Data Mover's IRQ)
2235 * DataMover's spinlock @msm_dmov_lock held.
2236 */
2237static void spi_dmov_rx_complete_func(struct msm_dmov_cmd *cmd,
2238 unsigned int result,
2239 struct msm_dmov_errdata *err)
2240{
2241 struct msm_spi *dd;
2242
2243 if (!(result & DMOV_RSLT_VALID)) {
2244 pr_err("Invalid DMOV result(rc = 0x%08x, cmd = %p)",
2245 result, cmd);
2246 return;
2247 }
2248 /* restore original context */
2249 dd = container_of(cmd, struct msm_spi, rx_hdr);
2250 if (result & DMOV_RSLT_DONE) {
2251 dd->stat_dmov_rx++;
2252 if (atomic_inc_return(&dd->rx_irq_called) == 1)
2253 return;
2254 complete(&dd->transfer_complete);
2255 } else {
2256 /** Error or flush */
2257 if (result & DMOV_RSLT_ERROR) {
2258 dev_err(dd->dev, "DMA error(0x%08x)\n", result);
2259 dd->stat_dmov_rx_err++;
2260 }
2261 if (result & DMOV_RSLT_FLUSH) {
2262 dev_info(dd->dev,
2263 "DMA channel flushed(0x%08x)\n", result);
2264 }
2265 if (err)
2266 dev_err(dd->dev,
2267 "Flush data(%08x %08x %08x %08x %08x %08x)\n",
2268 err->flush[0], err->flush[1], err->flush[2],
2269 err->flush[3], err->flush[4], err->flush[5]);
2270 dd->cur_msg->status = -EIO;
2271 complete(&dd->transfer_complete);
2272 }
2273}
2274
Alok Chauhan3a2b4d92013-02-15 16:04:20 +05302275static inline u32 get_chunk_size(struct msm_spi *dd, int input_burst_size,
2276 int output_burst_size)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002277{
2278 u32 cache_line = dma_get_cache_alignment();
Alok Chauhan3a2b4d92013-02-15 16:04:20 +05302279 int burst_size = (input_burst_size > output_burst_size) ?
2280 input_burst_size : output_burst_size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002281
2282 return (roundup(sizeof(struct spi_dmov_cmd), DM_BYTE_ALIGN) +
Alok Chauhan3a2b4d92013-02-15 16:04:20 +05302283 roundup(burst_size, cache_line))*2;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002284}
2285
Gilad Avidovd0262342012-10-24 16:52:30 -06002286static void msm_spi_dmov_teardown(struct msm_spi *dd)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002287{
2288 int limit = 0;
2289
2290 if (!dd->use_dma)
2291 return;
2292
2293 while (dd->mode == SPI_DMOV_MODE && limit++ < 50) {
Jeff Ohlstein6bf7b3a2012-04-27 12:27:53 -07002294 msm_dmov_flush(dd->tx_dma_chan, 1);
2295 msm_dmov_flush(dd->rx_dma_chan, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002296 msleep(10);
2297 }
2298
Alok Chauhan3a2b4d92013-02-15 16:04:20 +05302299 dma_free_coherent(NULL,
2300 get_chunk_size(dd, dd->input_burst_size, dd->output_burst_size),
2301 dd->tx_dmov_cmd,
2302 dd->tx_dmov_cmd_dma);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002303 dd->tx_dmov_cmd = dd->rx_dmov_cmd = NULL;
2304 dd->tx_padding = dd->rx_padding = NULL;
2305}
2306
Gilad Avidovd0262342012-10-24 16:52:30 -06002307static void msm_spi_bam_pipe_teardown(struct msm_spi *dd,
2308 enum msm_spi_pipe_direction pipe_dir)
2309{
2310 struct msm_spi_bam_pipe *pipe = (pipe_dir == SPI_BAM_CONSUMER_PIPE) ?
2311 (&dd->bam.prod) : (&dd->bam.cons);
2312 if (!pipe->teardown_required)
2313 return;
2314
2315 sps_disconnect(pipe->handle);
2316 dma_free_coherent(dd->dev, pipe->config.desc.size,
2317 pipe->config.desc.base, pipe->config.desc.phys_base);
2318 sps_free_endpoint(pipe->handle);
2319 pipe->handle = 0;
2320 pipe->teardown_required = false;
2321}
2322
2323static int msm_spi_bam_pipe_init(struct msm_spi *dd,
2324 enum msm_spi_pipe_direction pipe_dir)
2325{
2326 int rc = 0;
2327 struct sps_pipe *pipe_handle;
2328 struct sps_register_event event = {0};
2329 struct msm_spi_bam_pipe *pipe = (pipe_dir == SPI_BAM_CONSUMER_PIPE) ?
2330 (&dd->bam.prod) : (&dd->bam.cons);
2331 struct sps_connect *pipe_conf = &pipe->config;
2332
2333 pipe->handle = 0;
2334 pipe_handle = sps_alloc_endpoint();
2335 if (!pipe_handle) {
2336 dev_err(dd->dev, "%s: Failed to allocate BAM endpoint\n"
2337 , __func__);
2338 return -ENOMEM;
2339 }
2340
2341 memset(pipe_conf, 0, sizeof(*pipe_conf));
2342 rc = sps_get_config(pipe_handle, pipe_conf);
2343 if (rc) {
2344 dev_err(dd->dev, "%s: Failed to get BAM pipe config\n"
2345 , __func__);
2346 goto config_err;
2347 }
2348
2349 if (pipe_dir == SPI_BAM_CONSUMER_PIPE) {
2350 pipe_conf->source = dd->bam.handle;
2351 pipe_conf->destination = SPS_DEV_HANDLE_MEM;
2352 pipe_conf->mode = SPS_MODE_SRC;
2353 pipe_conf->src_pipe_index =
2354 dd->pdata->bam_producer_pipe_index;
2355 pipe_conf->dest_pipe_index = 0;
2356 } else {
2357 pipe_conf->source = SPS_DEV_HANDLE_MEM;
2358 pipe_conf->destination = dd->bam.handle;
2359 pipe_conf->mode = SPS_MODE_DEST;
2360 pipe_conf->src_pipe_index = 0;
2361 pipe_conf->dest_pipe_index =
2362 dd->pdata->bam_consumer_pipe_index;
2363 }
2364 pipe_conf->options = SPS_O_EOT | SPS_O_AUTO_ENABLE;
2365 pipe_conf->desc.size = SPI_BAM_MAX_DESC_NUM * sizeof(struct sps_iovec);
2366 pipe_conf->desc.base = dma_alloc_coherent(dd->dev,
2367 pipe_conf->desc.size,
2368 &pipe_conf->desc.phys_base,
2369 GFP_KERNEL);
2370 if (!pipe_conf->desc.base) {
2371 dev_err(dd->dev, "%s: Failed allocate BAM pipe memory"
2372 , __func__);
2373 rc = -ENOMEM;
2374 goto config_err;
2375 }
2376
2377 memset(pipe_conf->desc.base, 0x00, pipe_conf->desc.size);
2378
2379 rc = sps_connect(pipe_handle, pipe_conf);
2380 if (rc) {
2381 dev_err(dd->dev, "%s: Failed to connect BAM pipe", __func__);
2382 goto connect_err;
2383 }
2384
2385 event.mode = SPS_TRIGGER_WAIT;
2386 event.options = SPS_O_EOT;
2387 event.xfer_done = &dd->transfer_complete;
2388 event.user = (void *)dd;
2389 rc = sps_register_event(pipe_handle, &event);
2390 if (rc) {
2391 dev_err(dd->dev, "%s: Failed to register BAM EOT event",
2392 __func__);
2393 goto register_err;
2394 }
2395
2396 pipe->handle = pipe_handle;
2397 pipe->teardown_required = true;
2398 return 0;
2399
2400register_err:
2401 sps_disconnect(pipe_handle);
2402connect_err:
2403 dma_free_coherent(dd->dev, pipe_conf->desc.size,
2404 pipe_conf->desc.base, pipe_conf->desc.phys_base);
2405config_err:
2406 sps_free_endpoint(pipe_handle);
2407
2408 return rc;
2409}
2410
2411static void msm_spi_bam_teardown(struct msm_spi *dd)
2412{
2413 msm_spi_bam_pipe_teardown(dd, SPI_BAM_PRODUCER_PIPE);
2414 msm_spi_bam_pipe_teardown(dd, SPI_BAM_CONSUMER_PIPE);
2415
2416 if (dd->bam.deregister_required) {
2417 sps_deregister_bam_device(dd->bam.handle);
2418 dd->bam.deregister_required = false;
2419 }
2420}
2421
2422static int msm_spi_bam_init(struct msm_spi *dd)
2423{
2424 struct sps_bam_props bam_props = {0};
2425 u32 bam_handle;
2426 int rc = 0;
2427
2428 rc = sps_phy2h(dd->bam.phys_addr, &bam_handle);
2429 if (rc || !bam_handle) {
2430 bam_props.phys_addr = dd->bam.phys_addr;
2431 bam_props.virt_addr = dd->bam.base;
2432 bam_props.irq = dd->bam.irq;
Gilad Avidovb0968052013-05-03 09:51:37 -06002433 bam_props.manage = SPS_BAM_MGR_DEVICE_REMOTE;
Gilad Avidovd0262342012-10-24 16:52:30 -06002434 bam_props.summing_threshold = 0x10;
2435
2436 rc = sps_register_bam_device(&bam_props, &bam_handle);
2437 if (rc) {
2438 dev_err(dd->dev,
2439 "%s: Failed to register BAM device",
2440 __func__);
2441 return rc;
2442 }
2443 dd->bam.deregister_required = true;
2444 }
2445
2446 dd->bam.handle = bam_handle;
2447
2448 rc = msm_spi_bam_pipe_init(dd, SPI_BAM_PRODUCER_PIPE);
2449 if (rc) {
2450 dev_err(dd->dev,
2451 "%s: Failed to init producer BAM-pipe",
2452 __func__);
2453 goto bam_init_error;
2454 }
2455
2456 rc = msm_spi_bam_pipe_init(dd, SPI_BAM_CONSUMER_PIPE);
2457 if (rc) {
2458 dev_err(dd->dev,
2459 "%s: Failed to init consumer BAM-pipe",
2460 __func__);
2461 goto bam_init_error;
2462 }
2463
2464 return 0;
2465
2466bam_init_error:
2467 msm_spi_bam_teardown(dd);
2468 return rc;
2469}
2470
2471static __init int msm_spi_dmov_init(struct msm_spi *dd)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002472{
2473 dmov_box *box;
2474 u32 cache_line = dma_get_cache_alignment();
2475
2476 /* Allocate all as one chunk, since all is smaller than page size */
2477
2478 /* We send NULL device, since it requires coherent_dma_mask id
2479 device definition, we're okay with using system pool */
Alok Chauhan3a2b4d92013-02-15 16:04:20 +05302480 dd->tx_dmov_cmd
2481 = dma_alloc_coherent(NULL,
2482 get_chunk_size(dd, dd->input_burst_size,
2483 dd->output_burst_size),
2484 &dd->tx_dmov_cmd_dma, GFP_KERNEL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002485 if (dd->tx_dmov_cmd == NULL)
2486 return -ENOMEM;
2487
2488 /* DMA addresses should be 64 bit aligned aligned */
2489 dd->rx_dmov_cmd = (struct spi_dmov_cmd *)
2490 ALIGN((size_t)&dd->tx_dmov_cmd[1], DM_BYTE_ALIGN);
2491 dd->rx_dmov_cmd_dma = ALIGN(dd->tx_dmov_cmd_dma +
2492 sizeof(struct spi_dmov_cmd), DM_BYTE_ALIGN);
2493
2494 /* Buffers should be aligned to cache line */
2495 dd->tx_padding = (u8 *)ALIGN((size_t)&dd->rx_dmov_cmd[1], cache_line);
2496 dd->tx_padding_dma = ALIGN(dd->rx_dmov_cmd_dma +
2497 sizeof(struct spi_dmov_cmd), cache_line);
Alok Chauhan3a2b4d92013-02-15 16:04:20 +05302498 dd->rx_padding = (u8 *)ALIGN((size_t)(dd->tx_padding +
2499 dd->output_burst_size), cache_line);
2500 dd->rx_padding_dma = ALIGN(dd->tx_padding_dma + dd->output_burst_size,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002501 cache_line);
2502
2503 /* Setup DM commands */
2504 box = &(dd->rx_dmov_cmd->box);
2505 box->cmd = CMD_MODE_BOX | CMD_SRC_CRCI(dd->rx_dma_crci);
2506 box->src_row_addr = (uint32_t)dd->mem_phys_addr + SPI_INPUT_FIFO;
2507 dd->rx_hdr.cmdptr = DMOV_CMD_PTR_LIST |
2508 DMOV_CMD_ADDR(dd->rx_dmov_cmd_dma +
2509 offsetof(struct spi_dmov_cmd, cmd_ptr));
2510 dd->rx_hdr.complete_func = spi_dmov_rx_complete_func;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002511
2512 box = &(dd->tx_dmov_cmd->box);
2513 box->cmd = CMD_MODE_BOX | CMD_DST_CRCI(dd->tx_dma_crci);
2514 box->dst_row_addr = (uint32_t)dd->mem_phys_addr + SPI_OUTPUT_FIFO;
2515 dd->tx_hdr.cmdptr = DMOV_CMD_PTR_LIST |
2516 DMOV_CMD_ADDR(dd->tx_dmov_cmd_dma +
2517 offsetof(struct spi_dmov_cmd, cmd_ptr));
2518 dd->tx_hdr.complete_func = spi_dmov_tx_complete_func;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002519
2520 dd->tx_dmov_cmd->single_pad.cmd = CMD_MODE_SINGLE | CMD_LC |
2521 CMD_DST_CRCI(dd->tx_dma_crci);
2522 dd->tx_dmov_cmd->single_pad.dst = (uint32_t)dd->mem_phys_addr +
2523 SPI_OUTPUT_FIFO;
2524 dd->rx_dmov_cmd->single_pad.cmd = CMD_MODE_SINGLE | CMD_LC |
2525 CMD_SRC_CRCI(dd->rx_dma_crci);
2526 dd->rx_dmov_cmd->single_pad.src = (uint32_t)dd->mem_phys_addr +
2527 SPI_INPUT_FIFO;
2528
2529 /* Clear remaining activities on channel */
Jeff Ohlstein6bf7b3a2012-04-27 12:27:53 -07002530 msm_dmov_flush(dd->tx_dma_chan, 1);
2531 msm_dmov_flush(dd->rx_dma_chan, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002532
2533 return 0;
2534}
2535
Gilad Avidov23350552013-05-21 09:26:46 -06002536enum msm_spi_dt_entry_status {
2537 DT_REQ, /* Required: fail if missing */
2538 DT_SGST, /* Suggested: warn if missing */
2539 DT_OPT, /* Optional: don't warn if missing */
2540};
2541
2542enum msm_spi_dt_entry_type {
2543 DT_U32,
2544 DT_GPIO,
2545 DT_BOOL,
2546};
2547
2548struct msm_spi_dt_to_pdata_map {
2549 const char *dt_name;
2550 void *ptr_data;
2551 enum msm_spi_dt_entry_status status;
2552 enum msm_spi_dt_entry_type type;
2553 int default_val;
2554};
2555
2556static int __init msm_spi_dt_to_pdata_populate(struct platform_device *pdev,
2557 struct msm_spi_platform_data *pdata,
2558 struct msm_spi_dt_to_pdata_map *itr)
2559{
2560 int ret, err = 0;
2561 struct device_node *node = pdev->dev.of_node;
2562
2563 for (; itr->dt_name ; ++itr) {
2564 switch (itr->type) {
2565 case DT_GPIO:
2566 ret = of_get_named_gpio(node, itr->dt_name, 0);
2567 if (ret >= 0) {
2568 *((int *) itr->ptr_data) = ret;
2569 ret = 0;
2570 }
2571 break;
2572 case DT_U32:
2573 ret = of_property_read_u32(node, itr->dt_name,
2574 (u32 *) itr->ptr_data);
2575 break;
2576 case DT_BOOL:
2577 *((bool *) itr->ptr_data) =
2578 of_property_read_bool(node, itr->dt_name);
2579 ret = 0;
2580 break;
2581 default:
2582 dev_err(&pdev->dev, "%d is an unknown DT entry type\n",
2583 itr->type);
2584 ret = -EBADE;
2585 }
2586
2587 dev_dbg(&pdev->dev, "DT entry ret:%d name:%s val:%d\n",
2588 ret, itr->dt_name, *((int *)itr->ptr_data));
2589
2590 if (ret) {
2591 *((int *)itr->ptr_data) = itr->default_val;
2592
2593 if (itr->status < DT_OPT) {
2594 dev_err(&pdev->dev, "Missing '%s' DT entry\n",
2595 itr->dt_name);
2596
2597 /* cont on err to dump all missing entries */
2598 if (itr->status == DT_REQ && !err)
2599 err = ret;
2600 }
2601 }
2602 }
2603
2604 return err;
2605}
2606
Gilad Avidovd0262342012-10-24 16:52:30 -06002607/**
Gilad Avidov002dba02013-05-21 18:06:32 -06002608 * msm_spi_dt_to_pdata: create pdata and read gpio config from device tree
Gilad Avidovd0262342012-10-24 16:52:30 -06002609 */
Gilad Avidov002dba02013-05-21 18:06:32 -06002610struct msm_spi_platform_data * __init msm_spi_dt_to_pdata(
2611 struct platform_device *pdev, struct msm_spi *dd)
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002612{
Gilad Avidov002dba02013-05-21 18:06:32 -06002613 int i;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002614 struct msm_spi_platform_data *pdata;
2615
2616 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
2617 if (!pdata) {
2618 pr_err("Unable to allocate platform data\n");
2619 return NULL;
Gilad Avidov23350552013-05-21 09:26:46 -06002620 } else {
2621 struct msm_spi_dt_to_pdata_map map[] = {
2622 {"spi-max-frequency",
Gilad Avidov002dba02013-05-21 18:06:32 -06002623 &pdata->max_clock_speed, DT_SGST, DT_U32, 0},
Gilad Avidov23350552013-05-21 09:26:46 -06002624 {"qcom,infinite-mode",
Gilad Avidov002dba02013-05-21 18:06:32 -06002625 &pdata->infinite_mode, DT_OPT, DT_U32, 0},
Gilad Avidov23350552013-05-21 09:26:46 -06002626 {"qcom,active-only",
Gilad Avidov002dba02013-05-21 18:06:32 -06002627 &pdata->active_only, DT_OPT, DT_BOOL, 0},
Gilad Avidov23350552013-05-21 09:26:46 -06002628 {"qcom,master-id",
Gilad Avidov002dba02013-05-21 18:06:32 -06002629 &pdata->master_id, DT_SGST, DT_U32, 0},
Gilad Avidov23350552013-05-21 09:26:46 -06002630 {"qcom,ver-reg-exists",
Gilad Avidov002dba02013-05-21 18:06:32 -06002631 &pdata->ver_reg_exists, DT_OPT, DT_BOOL, 0},
Gilad Avidov23350552013-05-21 09:26:46 -06002632 {"qcom,use-bam",
Gilad Avidov002dba02013-05-21 18:06:32 -06002633 &pdata->use_bam, DT_OPT, DT_BOOL, 0},
Gilad Avidov23350552013-05-21 09:26:46 -06002634 {"qcom,bam-consumer-pipe-index",
Gilad Avidov002dba02013-05-21 18:06:32 -06002635 &pdata->bam_consumer_pipe_index, DT_OPT, DT_U32, 0},
Gilad Avidov23350552013-05-21 09:26:46 -06002636 {"qcom,bam-producer-pipe-index",
Gilad Avidov002dba02013-05-21 18:06:32 -06002637 &pdata->bam_producer_pipe_index, DT_OPT, DT_U32, 0},
2638 {"qcom,gpio-clk",
2639 &dd->spi_gpios[0], DT_OPT, DT_GPIO, -1},
2640 {"qcom,gpio-miso",
2641 &dd->spi_gpios[1], DT_OPT, DT_GPIO, -1},
2642 {"qcom,gpio-mosi",
2643 &dd->spi_gpios[2], DT_OPT, DT_GPIO, -1},
2644 {"qcom,gpio-cs0",
2645 &dd->cs_gpios[0].gpio_num, DT_OPT, DT_GPIO, -1},
2646 {"qcom,gpio-cs1",
2647 &dd->cs_gpios[1].gpio_num, DT_OPT, DT_GPIO, -1},
2648 {"qcom,gpio-cs2",
2649 &dd->cs_gpios[2].gpio_num, DT_OPT, DT_GPIO, -1},
2650 {"qcom,gpio-cs3",
2651 &dd->cs_gpios[3].gpio_num, DT_OPT, DT_GPIO, -1},
2652 {NULL, NULL, 0, 0, 0},
Gilad Avidov23350552013-05-21 09:26:46 -06002653 };
2654
2655 if (msm_spi_dt_to_pdata_populate(pdev, pdata, map)) {
2656 devm_kfree(&pdev->dev, pdata);
2657 return NULL;
2658 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002659 }
2660
Gilad Avidovd0262342012-10-24 16:52:30 -06002661 if (pdata->use_bam) {
Gilad Avidov23350552013-05-21 09:26:46 -06002662 if (!pdata->bam_consumer_pipe_index) {
Gilad Avidovd0262342012-10-24 16:52:30 -06002663 dev_warn(&pdev->dev,
2664 "missing qcom,bam-consumer-pipe-index entry in device-tree\n");
2665 pdata->use_bam = false;
2666 }
2667
Gilad Avidov23350552013-05-21 09:26:46 -06002668 if (pdata->bam_producer_pipe_index) {
Gilad Avidovd0262342012-10-24 16:52:30 -06002669 dev_warn(&pdev->dev,
2670 "missing qcom,bam-producer-pipe-index entry in device-tree\n");
2671 pdata->use_bam = false;
2672 }
2673 }
Gilad Avidov002dba02013-05-21 18:06:32 -06002674
2675 for (i = 0; i < ARRAY_SIZE(spi_cs_rsrcs); ++i)
2676 dd->cs_gpios[i].valid = (dd->cs_gpios[i].gpio_num >= 0);
2677
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002678 return pdata;
2679}
2680
Gilad Avidovd0262342012-10-24 16:52:30 -06002681static int __init msm_spi_get_qup_hw_ver(struct device *dev, struct msm_spi *dd)
2682{
2683 u32 data = readl_relaxed(dd->base + QUP_HARDWARE_VER);
2684 return (data >= QUP_HARDWARE_VER_2_1_1) ? SPI_QUP_VERSION_BFAM
2685 : SPI_QUP_VERSION_NONE;
2686}
2687
2688static int __init msm_spi_bam_get_resources(struct msm_spi *dd,
2689 struct platform_device *pdev, struct spi_master *master)
2690{
2691 struct resource *resource;
2692 size_t bam_mem_size;
2693
2694 resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2695 "spi_bam_physical");
2696 if (!resource) {
2697 dev_warn(&pdev->dev,
2698 "%s: Missing spi_bam_physical entry in DT",
2699 __func__);
2700 return -ENXIO;
2701 }
2702
2703 dd->bam.phys_addr = resource->start;
2704 bam_mem_size = resource_size(resource);
2705 dd->bam.base = devm_ioremap(&pdev->dev, dd->bam.phys_addr,
2706 bam_mem_size);
2707 if (!dd->bam.base) {
2708 dev_warn(&pdev->dev,
2709 "%s: Failed to ioremap(spi_bam_physical)",
2710 __func__);
2711 return -ENXIO;
2712 }
2713
2714 dd->bam.irq = platform_get_irq_byname(pdev, "spi_bam_irq");
2715 if (dd->bam.irq < 0) {
2716 dev_warn(&pdev->dev, "%s: Missing spi_bam_irq entry in DT",
2717 __func__);
2718 return -EINVAL;
2719 }
2720
2721 dd->dma_init = msm_spi_bam_init;
2722 dd->dma_teardown = msm_spi_bam_teardown;
2723 return 0;
2724}
2725
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002726static int __init msm_spi_probe(struct platform_device *pdev)
2727{
2728 struct spi_master *master;
2729 struct msm_spi *dd;
2730 struct resource *resource;
2731 int rc = -ENXIO;
2732 int locked = 0;
2733 int i = 0;
2734 int clk_enabled = 0;
2735 int pclk_enabled = 0;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002736 struct msm_spi_platform_data *pdata;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002737
2738 master = spi_alloc_master(&pdev->dev, sizeof(struct msm_spi));
2739 if (!master) {
2740 rc = -ENOMEM;
2741 dev_err(&pdev->dev, "master allocation failed\n");
2742 goto err_probe_exit;
2743 }
2744
2745 master->bus_num = pdev->id;
2746 master->mode_bits = SPI_SUPPORTED_MODES;
2747 master->num_chipselect = SPI_NUM_CHIPSELECTS;
2748 master->setup = msm_spi_setup;
2749 master->transfer = msm_spi_transfer;
2750 platform_set_drvdata(pdev, master);
2751 dd = spi_master_get_devdata(master);
2752
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002753 if (pdev->dev.of_node) {
2754 dd->qup_ver = SPI_QUP_VERSION_BFAM;
2755 master->dev.of_node = pdev->dev.of_node;
Gilad Avidov002dba02013-05-21 18:06:32 -06002756 pdata = msm_spi_dt_to_pdata(pdev, dd);
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002757 if (!pdata) {
2758 rc = -ENOMEM;
2759 goto err_probe_exit;
2760 }
Sathish Ambleycd06bf32012-04-09 11:59:43 -07002761
Gilad Avidov0697ea62013-02-11 16:46:38 -07002762 rc = of_alias_get_id(pdev->dev.of_node, "spi");
2763 if (rc < 0)
Kenneth Heitkeecc836b2012-08-11 20:53:01 -06002764 dev_warn(&pdev->dev,
2765 "using default bus_num %d\n", pdev->id);
2766 else
Gilad Avidov0697ea62013-02-11 16:46:38 -07002767 master->bus_num = pdev->id = rc;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002768 } else {
2769 pdata = pdev->dev.platform_data;
2770 dd->qup_ver = SPI_QUP_VERSION_NONE;
Sathish Ambleycd06bf32012-04-09 11:59:43 -07002771
2772 for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
2773 resource = platform_get_resource(pdev, IORESOURCE_IO,
2774 i);
2775 dd->spi_gpios[i] = resource ? resource->start : -1;
2776 }
2777
2778 for (i = 0; i < ARRAY_SIZE(spi_cs_rsrcs); ++i) {
2779 resource = platform_get_resource(pdev, IORESOURCE_IO,
2780 i + ARRAY_SIZE(spi_rsrcs));
2781 dd->cs_gpios[i].gpio_num = resource ?
2782 resource->start : -1;
2783 dd->cs_gpios[i].valid = 0;
2784 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002785 }
2786
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002787 dd->pdata = pdata;
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002788 resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002789 if (!resource) {
2790 rc = -ENXIO;
2791 goto err_probe_res;
2792 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002793
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002794 dd->mem_phys_addr = resource->start;
2795 dd->mem_size = resource_size(resource);
2796
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002797 if (pdata) {
2798 if (pdata->dma_config) {
2799 rc = pdata->dma_config();
2800 if (rc) {
2801 dev_warn(&pdev->dev,
2802 "%s: DM mode not supported\n",
2803 __func__);
2804 dd->use_dma = 0;
2805 goto skip_dma_resources;
2806 }
2807 }
Gilad Avidovd0262342012-10-24 16:52:30 -06002808 if (dd->qup_ver == SPI_QUP_VERSION_NONE) {
2809 resource = platform_get_resource(pdev,
2810 IORESOURCE_DMA, 0);
2811 if (resource) {
2812 dd->rx_dma_chan = resource->start;
2813 dd->tx_dma_chan = resource->end;
2814 resource = platform_get_resource(pdev,
2815 IORESOURCE_DMA, 1);
2816 if (!resource) {
2817 rc = -ENXIO;
2818 goto err_probe_res;
2819 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002820
Gilad Avidovd0262342012-10-24 16:52:30 -06002821 dd->rx_dma_crci = resource->start;
2822 dd->tx_dma_crci = resource->end;
2823 dd->use_dma = 1;
2824 master->dma_alignment =
2825 dma_get_cache_alignment();
2826 dd->dma_init = msm_spi_dmov_init ;
2827 dd->dma_teardown = msm_spi_dmov_teardown;
2828 }
2829 } else {
2830 if (!dd->pdata->use_bam)
2831 goto skip_dma_resources;
2832
2833 rc = msm_spi_bam_get_resources(dd, pdev, master);
2834 if (rc) {
2835 dev_warn(dd->dev,
2836 "%s: Faild to get BAM resources",
2837 __func__);
2838 goto skip_dma_resources;
2839 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002840 dd->use_dma = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002841 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002842 }
2843
Alok Chauhan66554a12012-08-22 19:54:45 +05302844skip_dma_resources:
Harini Jayaramane4c06192011-09-28 16:26:39 -06002845
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002846 spin_lock_init(&dd->queue_lock);
2847 mutex_init(&dd->core_lock);
2848 INIT_LIST_HEAD(&dd->queue);
2849 INIT_WORK(&dd->work_data, msm_spi_workq);
2850 init_waitqueue_head(&dd->continue_suspend);
2851 dd->workqueue = create_singlethread_workqueue(
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002852 dev_name(master->dev.parent));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002853 if (!dd->workqueue)
2854 goto err_probe_workq;
2855
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002856 if (!devm_request_mem_region(&pdev->dev, dd->mem_phys_addr,
2857 dd->mem_size, SPI_DRV_NAME)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002858 rc = -ENXIO;
2859 goto err_probe_reqmem;
2860 }
2861
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002862 dd->base = devm_ioremap(&pdev->dev, dd->mem_phys_addr, dd->mem_size);
2863 if (!dd->base) {
2864 rc = -ENOMEM;
2865 goto err_probe_reqmem;
2866 }
2867
Gilad Avidovd0262342012-10-24 16:52:30 -06002868 if (pdata && pdata->ver_reg_exists) {
2869 enum msm_spi_qup_version ver =
2870 msm_spi_get_qup_hw_ver(&pdev->dev, dd);
2871 if (dd->qup_ver != ver)
2872 dev_warn(&pdev->dev,
2873 "%s: HW version different then initially assumed by probe",
2874 __func__);
2875 }
2876
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002877 if (pdata && pdata->rsl_id) {
2878 struct remote_mutex_id rmid;
2879 rmid.r_spinlock_id = pdata->rsl_id;
2880 rmid.delay_us = SPI_TRYLOCK_DELAY;
2881
2882 rc = remote_mutex_init(&dd->r_lock, &rmid);
2883 if (rc) {
2884 dev_err(&pdev->dev, "%s: unable to init remote_mutex "
2885 "(%s), (rc=%d)\n", rmid.r_spinlock_id,
2886 __func__, rc);
2887 goto err_probe_rlock_init;
2888 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002889
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002890 dd->use_rlock = 1;
2891 dd->pm_lat = pdata->pm_lat;
Alok Chauhan66554a12012-08-22 19:54:45 +05302892 pm_qos_add_request(&qos_req_list, PM_QOS_CPU_DMA_LATENCY,
Gilad Avidovd0262342012-10-24 16:52:30 -06002893 PM_QOS_DEFAULT_VALUE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002894 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002895
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002896 mutex_lock(&dd->core_lock);
2897 if (dd->use_rlock)
2898 remote_mutex_lock(&dd->r_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002899
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002900 locked = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002901 dd->dev = &pdev->dev;
Matt Wagantallac294852011-08-17 15:44:58 -07002902 dd->clk = clk_get(&pdev->dev, "core_clk");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002903 if (IS_ERR(dd->clk)) {
Matt Wagantallac294852011-08-17 15:44:58 -07002904 dev_err(&pdev->dev, "%s: unable to get core_clk\n", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002905 rc = PTR_ERR(dd->clk);
2906 goto err_probe_clk_get;
2907 }
2908
Matt Wagantallac294852011-08-17 15:44:58 -07002909 dd->pclk = clk_get(&pdev->dev, "iface_clk");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002910 if (IS_ERR(dd->pclk)) {
Matt Wagantallac294852011-08-17 15:44:58 -07002911 dev_err(&pdev->dev, "%s: unable to get iface_clk\n", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002912 rc = PTR_ERR(dd->pclk);
2913 goto err_probe_pclk_get;
2914 }
2915
2916 if (pdata && pdata->max_clock_speed)
2917 msm_spi_clock_set(dd, dd->pdata->max_clock_speed);
2918
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07002919 rc = clk_prepare_enable(dd->clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002920 if (rc) {
Matt Wagantallac294852011-08-17 15:44:58 -07002921 dev_err(&pdev->dev, "%s: unable to enable core_clk\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002922 __func__);
2923 goto err_probe_clk_enable;
2924 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002925
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002926 clk_enabled = 1;
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07002927 rc = clk_prepare_enable(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002928 if (rc) {
Matt Wagantallac294852011-08-17 15:44:58 -07002929 dev_err(&pdev->dev, "%s: unable to enable iface_clk\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002930 __func__);
2931 goto err_probe_pclk_enable;
2932 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002933
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002934 pclk_enabled = 1;
Gilad Avidovd0262342012-10-24 16:52:30 -06002935 /* GSBI dose not exists on B-family MSM-chips */
2936 if (dd->qup_ver != SPI_QUP_VERSION_BFAM) {
2937 rc = msm_spi_configure_gsbi(dd, pdev);
2938 if (rc)
2939 goto err_probe_gsbi;
2940 }
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002941
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002942 msm_spi_calculate_fifo_size(dd);
2943 if (dd->use_dma) {
Gilad Avidovd0262342012-10-24 16:52:30 -06002944 rc = dd->dma_init(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002945 if (rc)
2946 goto err_probe_dma;
2947 }
2948
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002949 msm_spi_register_init(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002950 /*
2951 * The SPI core generates a bogus input overrun error on some targets,
2952 * when a transition from run to reset state occurs and if the FIFO has
2953 * an odd number of entries. Hence we disable the INPUT_OVER_RUN_ERR_EN
2954 * bit.
2955 */
2956 msm_spi_enable_error_flags(dd);
2957
2958 writel_relaxed(SPI_IO_C_NO_TRI_STATE, dd->base + SPI_IO_CONTROL);
2959 rc = msm_spi_set_state(dd, SPI_OP_STATE_RESET);
2960 if (rc)
2961 goto err_probe_state;
2962
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07002963 clk_disable_unprepare(dd->clk);
2964 clk_disable_unprepare(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002965 clk_enabled = 0;
2966 pclk_enabled = 0;
2967
Alok Chauhan7fd3add2013-03-12 18:34:43 +05302968 dd->suspended = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002969 dd->transfer_pending = 0;
2970 dd->multi_xfr = 0;
2971 dd->mode = SPI_MODE_NONE;
2972
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07002973 rc = msm_spi_request_irq(dd, pdev, master);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002974 if (rc)
2975 goto err_probe_irq;
2976
2977 msm_spi_disable_irqs(dd);
2978 if (dd->use_rlock)
2979 remote_mutex_unlock(&dd->r_lock);
2980
2981 mutex_unlock(&dd->core_lock);
2982 locked = 0;
2983
Alok Chauhan7fd3add2013-03-12 18:34:43 +05302984 pm_runtime_set_autosuspend_delay(&pdev->dev, MSEC_PER_SEC);
2985 pm_runtime_use_autosuspend(&pdev->dev);
2986 pm_runtime_enable(&pdev->dev);
2987
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002988 rc = spi_register_master(master);
2989 if (rc)
2990 goto err_probe_reg_master;
2991
2992 rc = sysfs_create_group(&(dd->dev->kobj), &dev_attr_grp);
2993 if (rc) {
2994 dev_err(&pdev->dev, "failed to create dev. attrs : %d\n", rc);
2995 goto err_attrs;
2996 }
2997
2998 spi_debugfs_init(dd);
Kiran Gundac5fbd7f2012-07-30 13:22:39 +05302999
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003000 return 0;
3001
3002err_attrs:
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07003003 spi_unregister_master(master);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003004err_probe_reg_master:
Alok Chauhan7fd3add2013-03-12 18:34:43 +05303005 pm_runtime_disable(&pdev->dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003006err_probe_irq:
3007err_probe_state:
Stepan Moskovchenko37b70d62012-11-28 13:27:49 -08003008 if (dd->dma_teardown)
3009 dd->dma_teardown(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003010err_probe_dma:
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07003011err_probe_gsbi:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003012 if (pclk_enabled)
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07003013 clk_disable_unprepare(dd->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003014err_probe_pclk_enable:
3015 if (clk_enabled)
Harini Jayaraman4266ddf2012-01-23 16:51:02 -07003016 clk_disable_unprepare(dd->clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003017err_probe_clk_enable:
3018 clk_put(dd->pclk);
3019err_probe_pclk_get:
3020 clk_put(dd->clk);
3021err_probe_clk_get:
3022 if (locked) {
3023 if (dd->use_rlock)
3024 remote_mutex_unlock(&dd->r_lock);
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07003025
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003026 mutex_unlock(&dd->core_lock);
3027 }
3028err_probe_rlock_init:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003029err_probe_reqmem:
3030 destroy_workqueue(dd->workqueue);
3031err_probe_workq:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003032err_probe_res:
3033 spi_master_put(master);
3034err_probe_exit:
3035 return rc;
3036}
3037
3038#ifdef CONFIG_PM
Alok Chauhan7fd3add2013-03-12 18:34:43 +05303039static int msm_spi_pm_suspend_runtime(struct device *device)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003040{
Alok Chauhan7fd3add2013-03-12 18:34:43 +05303041 struct platform_device *pdev = to_platform_device(device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003042 struct spi_master *master = platform_get_drvdata(pdev);
Alok Chauhan7fd3add2013-03-12 18:34:43 +05303043 struct msm_spi *dd;
3044 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003045
Alok Chauhan7fd3add2013-03-12 18:34:43 +05303046 dev_dbg(device, "pm_runtime: suspending...\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003047 if (!master)
3048 goto suspend_exit;
3049 dd = spi_master_get_devdata(master);
3050 if (!dd)
3051 goto suspend_exit;
3052
Alok Chauhan7fd3add2013-03-12 18:34:43 +05303053 if (dd->suspended)
3054 return 0;
3055
3056 /*
3057 * Make sure nothing is added to the queue while we're
3058 * suspending
3059 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003060 spin_lock_irqsave(&dd->queue_lock, flags);
3061 dd->suspended = 1;
3062 spin_unlock_irqrestore(&dd->queue_lock, flags);
3063
3064 /* Wait for transactions to end, or time out */
Alok Chauhan7fd3add2013-03-12 18:34:43 +05303065 wait_event_interruptible(dd->continue_suspend,
3066 !dd->transfer_pending);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003067
Alok Chauhan7fd3add2013-03-12 18:34:43 +05303068 msm_spi_disable_irqs(dd);
3069 clk_disable_unprepare(dd->clk);
3070 clk_disable_unprepare(dd->pclk);
Gilad Avidov23350552013-05-21 09:26:46 -06003071 if (!dd->pdata->active_only)
3072 msm_spi_clk_path_unvote(dd);
Alok Chauhan7fd3add2013-03-12 18:34:43 +05303073
3074 /* Free the spi clk, miso, mosi, cs gpio */
3075 if (dd->pdata && dd->pdata->gpio_release)
3076 dd->pdata->gpio_release();
3077
3078 msm_spi_free_gpios(dd);
3079
3080 if (pm_qos_request_active(&qos_req_list))
3081 pm_qos_update_request(&qos_req_list,
3082 PM_QOS_DEFAULT_VALUE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003083suspend_exit:
3084 return 0;
3085}
3086
Alok Chauhan7fd3add2013-03-12 18:34:43 +05303087static int msm_spi_pm_resume_runtime(struct device *device)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003088{
Alok Chauhan7fd3add2013-03-12 18:34:43 +05303089 struct platform_device *pdev = to_platform_device(device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003090 struct spi_master *master = platform_get_drvdata(pdev);
Alok Chauhan7fd3add2013-03-12 18:34:43 +05303091 struct msm_spi *dd;
3092 int ret = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003093
Alok Chauhan7fd3add2013-03-12 18:34:43 +05303094 dev_dbg(device, "pm_runtime: resuming...\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003095 if (!master)
3096 goto resume_exit;
3097 dd = spi_master_get_devdata(master);
3098 if (!dd)
3099 goto resume_exit;
3100
Alok Chauhan7fd3add2013-03-12 18:34:43 +05303101 if (!dd->suspended)
3102 return 0;
3103
3104 if (pm_qos_request_active(&qos_req_list))
3105 pm_qos_update_request(&qos_req_list,
3106 dd->pm_lat);
3107
3108 /* Configure the spi clk, miso, mosi and cs gpio */
3109 if (dd->pdata->gpio_config) {
3110 ret = dd->pdata->gpio_config();
3111 if (ret) {
3112 dev_err(dd->dev,
3113 "%s: error configuring GPIOs\n",
3114 __func__);
3115 return ret;
3116 }
3117 }
3118
3119 ret = msm_spi_request_gpios(dd);
3120 if (ret)
3121 return ret;
3122
Gilad Avidov23350552013-05-21 09:26:46 -06003123 msm_spi_clk_path_init(dd);
3124 if (!dd->pdata->active_only)
3125 msm_spi_clk_path_vote(dd);
Alok Chauhan7fd3add2013-03-12 18:34:43 +05303126 clk_prepare_enable(dd->clk);
3127 clk_prepare_enable(dd->pclk);
3128 msm_spi_enable_irqs(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003129 dd->suspended = 0;
Gilad Avidov23350552013-05-21 09:26:46 -06003130
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003131resume_exit:
3132 return 0;
3133}
Alok Chauhan7fd3add2013-03-12 18:34:43 +05303134
3135static int msm_spi_suspend(struct device *device)
3136{
3137 if (!pm_runtime_enabled(device) || !pm_runtime_suspended(device)) {
3138 struct platform_device *pdev = to_platform_device(device);
3139 struct spi_master *master = platform_get_drvdata(pdev);
3140 struct msm_spi *dd;
3141
3142 dev_dbg(device, "system suspend");
3143 if (!master)
3144 goto suspend_exit;
3145 dd = spi_master_get_devdata(master);
3146 if (!dd)
3147 goto suspend_exit;
3148 msm_spi_pm_suspend_runtime(device);
3149 }
3150suspend_exit:
3151 return 0;
3152}
3153
3154static int msm_spi_resume(struct device *device)
3155{
3156 /*
3157 * Rely on runtime-PM to call resume in case it is enabled
3158 * Even if it's not enabled, rely on 1st client transaction to do
3159 * clock ON and gpio configuration
3160 */
3161 dev_dbg(device, "system resume");
3162 return 0;
3163}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003164#else
3165#define msm_spi_suspend NULL
3166#define msm_spi_resume NULL
Alok Chauhan7fd3add2013-03-12 18:34:43 +05303167#define msm_spi_pm_suspend_runtime NULL
3168#define msm_spi_pm_resume_runtime NULL
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003169#endif /* CONFIG_PM */
3170
3171static int __devexit msm_spi_remove(struct platform_device *pdev)
3172{
3173 struct spi_master *master = platform_get_drvdata(pdev);
3174 struct msm_spi *dd = spi_master_get_devdata(master);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003175
3176 pm_qos_remove_request(&qos_req_list);
3177 spi_debugfs_exit(dd);
3178 sysfs_remove_group(&pdev->dev.kobj, &dev_attr_grp);
3179
Stepan Moskovchenko37b70d62012-11-28 13:27:49 -08003180 if (dd->dma_teardown)
3181 dd->dma_teardown(dd);
Alok Chauhan7fd3add2013-03-12 18:34:43 +05303182 pm_runtime_disable(&pdev->dev);
3183 pm_runtime_set_suspended(&pdev->dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003184 clk_put(dd->clk);
3185 clk_put(dd->pclk);
Gilad Avidov23350552013-05-21 09:26:46 -06003186 msm_spi_clk_path_teardown(dd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003187 destroy_workqueue(dd->workqueue);
3188 platform_set_drvdata(pdev, 0);
3189 spi_unregister_master(master);
3190 spi_master_put(master);
3191
3192 return 0;
3193}
3194
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07003195static struct of_device_id msm_spi_dt_match[] = {
3196 {
3197 .compatible = "qcom,spi-qup-v2",
3198 },
3199 {}
3200};
3201
Alok Chauhan7fd3add2013-03-12 18:34:43 +05303202static const struct dev_pm_ops msm_spi_dev_pm_ops = {
3203 SET_SYSTEM_SLEEP_PM_OPS(msm_spi_suspend, msm_spi_resume)
3204 SET_RUNTIME_PM_OPS(msm_spi_pm_suspend_runtime,
3205 msm_spi_pm_resume_runtime, NULL)
3206};
3207
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003208static struct platform_driver msm_spi_driver = {
3209 .driver = {
3210 .name = SPI_DRV_NAME,
3211 .owner = THIS_MODULE,
Alok Chauhan7fd3add2013-03-12 18:34:43 +05303212 .pm = &msm_spi_dev_pm_ops,
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07003213 .of_match_table = msm_spi_dt_match,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003214 },
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003215 .remove = __exit_p(msm_spi_remove),
3216};
3217
3218static int __init msm_spi_init(void)
3219{
3220 return platform_driver_probe(&msm_spi_driver, msm_spi_probe);
3221}
3222module_init(msm_spi_init);
3223
3224static void __exit msm_spi_exit(void)
3225{
3226 platform_driver_unregister(&msm_spi_driver);
3227}
3228module_exit(msm_spi_exit);
Harini Jayaramanc710a5e2011-11-22 12:02:43 -07003229
3230MODULE_LICENSE("GPL v2");
3231MODULE_VERSION("0.4");
3232MODULE_ALIAS("platform:"SPI_DRV_NAME);