blob: bada281fc202e2416bb70eaa503aed8900cde1bb [file] [log] [blame]
Alok Chauhanf04d9262017-08-01 19:54:01 +05301/* Copyright (c) 2008-2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13/*
14 * SPI driver for Qualcomm MSM platforms
15 *
16 */
17
18#include <linux/version.h>
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/init.h>
22#include <linux/spinlock.h>
23#include <linux/list.h>
24#include <linux/irq.h>
25#include <linux/platform_device.h>
26#include <linux/spi/spi.h>
27#include <linux/interrupt.h>
28#include <linux/err.h>
29#include <linux/clk.h>
30#include <linux/delay.h>
31#include <linux/workqueue.h>
32#include <linux/io.h>
33#include <linux/debugfs.h>
34#include <linux/gpio.h>
35#include <linux/of.h>
36#include <linux/of_gpio.h>
37#include <linux/dma-mapping.h>
38#include <linux/sched.h>
39#include <linux/mutex.h>
40#include <linux/atomic.h>
41#include <linux/pm_runtime.h>
42#include <linux/spi/qcom-spi.h>
43#include <linux/msm-sps.h>
44#include <linux/msm-bus.h>
45#include <linux/msm-bus-board.h>
46#include "spi_qsd.h"
47
48#define SPI_MAX_BYTES_PER_WORD (4)
49
50static int msm_spi_pm_resume_runtime(struct device *device);
51static int msm_spi_pm_suspend_runtime(struct device *device);
52static inline void msm_spi_dma_unmap_buffers(struct msm_spi *dd);
53static int get_local_resources(struct msm_spi *dd);
54static void put_local_resources(struct msm_spi *dd);
55
56static inline int msm_spi_configure_gsbi(struct msm_spi *dd,
57 struct platform_device *pdev)
58{
59 struct resource *resource;
60 unsigned long gsbi_mem_phys_addr;
61 size_t gsbi_mem_size;
62 void __iomem *gsbi_base;
63
64 resource = platform_get_resource(pdev, IORESOURCE_MEM, 1);
65 if (!resource)
66 return 0;
67
68 gsbi_mem_phys_addr = resource->start;
69 gsbi_mem_size = resource_size(resource);
70 if (!devm_request_mem_region(&pdev->dev, gsbi_mem_phys_addr,
71 gsbi_mem_size, SPI_DRV_NAME))
72 return -ENXIO;
73
74 gsbi_base = devm_ioremap(&pdev->dev, gsbi_mem_phys_addr,
75 gsbi_mem_size);
76 if (!gsbi_base)
77 return -ENXIO;
78
79 /* Set GSBI to SPI mode */
80 writel_relaxed(GSBI_SPI_CONFIG, gsbi_base + GSBI_CTRL_REG);
81
82 return 0;
83}
84
85static inline void msm_spi_register_init(struct msm_spi *dd)
86{
87 writel_relaxed(0x00000001, dd->base + SPI_SW_RESET);
88 msm_spi_set_state(dd, SPI_OP_STATE_RESET);
89 writel_relaxed(0x00000000, dd->base + SPI_OPERATIONAL);
90 writel_relaxed(0x00000000, dd->base + SPI_CONFIG);
91 writel_relaxed(0x00000000, dd->base + SPI_IO_MODES);
92 if (dd->qup_ver)
93 writel_relaxed(0x00000000, dd->base + QUP_OPERATIONAL_MASK);
94}
95
96static int msm_spi_pinctrl_init(struct msm_spi *dd)
97{
98 dd->pinctrl = devm_pinctrl_get(dd->dev);
99 if (IS_ERR_OR_NULL(dd->pinctrl)) {
100 dev_err(dd->dev, "Failed to get pin ctrl\n");
101 return PTR_ERR(dd->pinctrl);
102 }
103 dd->pins_active = pinctrl_lookup_state(dd->pinctrl,
104 SPI_PINCTRL_STATE_DEFAULT);
105 if (IS_ERR_OR_NULL(dd->pins_active)) {
106 dev_err(dd->dev, "Failed to lookup pinctrl default state\n");
107 return PTR_ERR(dd->pins_active);
108 }
109
110 dd->pins_sleep = pinctrl_lookup_state(dd->pinctrl,
111 SPI_PINCTRL_STATE_SLEEP);
112 if (IS_ERR_OR_NULL(dd->pins_sleep)) {
113 dev_err(dd->dev, "Failed to lookup pinctrl sleep state\n");
114 return PTR_ERR(dd->pins_sleep);
115 }
116
117 return 0;
118}
119
120static inline int msm_spi_request_gpios(struct msm_spi *dd)
121{
122 int i = 0;
123 int result = 0;
124
125 if (!dd->pdata->use_pinctrl) {
126 for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
127 if (dd->spi_gpios[i] >= 0) {
128 result = gpio_request(dd->spi_gpios[i],
129 spi_rsrcs[i]);
130 if (result) {
131 dev_err(dd->dev,
132 "error %d gpio_request for pin %d\n",
133 result, dd->spi_gpios[i]);
134 goto error;
135 }
136 }
137 }
138 } else {
139 result = pinctrl_select_state(dd->pinctrl, dd->pins_active);
140 if (result) {
141 dev_err(dd->dev, "%s: Can not set %s pins\n",
142 __func__, SPI_PINCTRL_STATE_DEFAULT);
143 goto error;
144 }
145 }
146 return 0;
147error:
148 if (!dd->pdata->use_pinctrl) {
149 for (; --i >= 0;) {
150 if (dd->spi_gpios[i] >= 0)
151 gpio_free(dd->spi_gpios[i]);
152 }
153 }
154 return result;
155}
156
157static inline void msm_spi_free_gpios(struct msm_spi *dd)
158{
159 int i;
160 int result = 0;
161
162 if (!dd->pdata->use_pinctrl) {
163 for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
164 if (dd->spi_gpios[i] >= 0)
165 gpio_free(dd->spi_gpios[i]);
166 }
167
168 for (i = 0; i < ARRAY_SIZE(spi_cs_rsrcs); ++i) {
169 if (dd->cs_gpios[i].valid) {
170 gpio_free(dd->cs_gpios[i].gpio_num);
171 dd->cs_gpios[i].valid = 0;
172 }
173 }
174 } else {
175 result = pinctrl_select_state(dd->pinctrl, dd->pins_sleep);
176 if (result)
177 dev_err(dd->dev, "%s: Can not set %s pins\n",
178 __func__, SPI_PINCTRL_STATE_SLEEP);
179 }
180}
181
182static inline int msm_spi_request_cs_gpio(struct msm_spi *dd)
183{
184 int cs_num;
185 int rc;
186
187 cs_num = dd->spi->chip_select;
188 if (!(dd->spi->mode & SPI_LOOP)) {
189 if (!dd->pdata->use_pinctrl) {
190 if ((!(dd->cs_gpios[cs_num].valid)) &&
191 (dd->cs_gpios[cs_num].gpio_num >= 0)) {
192 rc = gpio_request(dd->cs_gpios[cs_num].gpio_num,
193 spi_cs_rsrcs[cs_num]);
194
195 if (rc) {
196 dev_err(dd->dev,
197 "gpio_request for pin %d failed,error %d\n",
198 dd->cs_gpios[cs_num].gpio_num, rc);
199 return rc;
200 }
201 dd->cs_gpios[cs_num].valid = 1;
202 }
203 }
204 }
205 return 0;
206}
207
208static inline void msm_spi_free_cs_gpio(struct msm_spi *dd)
209{
210 int cs_num;
211
212 cs_num = dd->spi->chip_select;
213 if (!dd->pdata->use_pinctrl) {
214 if (dd->cs_gpios[cs_num].valid) {
215 gpio_free(dd->cs_gpios[cs_num].gpio_num);
216 dd->cs_gpios[cs_num].valid = 0;
217 }
218 }
219}
220
221
222/**
223 * msm_spi_clk_max_rate: finds the nearest lower rate for a clk
224 * @clk the clock for which to find nearest lower rate
225 * @rate clock frequency in Hz
226 * @return nearest lower rate or negative error value
227 *
228 * Public clock API extends clk_round_rate which is a ceiling function. This
229 * function is a floor function implemented as a binary search using the
230 * ceiling function.
231 */
232static long msm_spi_clk_max_rate(struct clk *clk, unsigned long rate)
233{
234 long lowest_available, nearest_low, step_size, cur;
235 long step_direction = -1;
236 long guess = rate;
237 int max_steps = 10;
238
239 cur = clk_round_rate(clk, rate);
240 if (cur == rate)
241 return rate;
242
243 /* if we got here then: cur > rate */
244 lowest_available = clk_round_rate(clk, 0);
245 if (lowest_available > rate)
246 return -EINVAL;
247
248 step_size = (rate - lowest_available) >> 1;
249 nearest_low = lowest_available;
250
251 while (max_steps-- && step_size) {
252 guess += step_size * step_direction;
253
254 cur = clk_round_rate(clk, guess);
255
256 if ((cur < rate) && (cur > nearest_low))
257 nearest_low = cur;
258
259 /*
260 * if we stepped too far, then start stepping in the other
261 * direction with half the step size
262 */
263 if (((cur > rate) && (step_direction > 0))
264 || ((cur < rate) && (step_direction < 0))) {
265 step_direction = -step_direction;
266 step_size >>= 1;
267 }
268 }
269 return nearest_low;
270}
271
272static void msm_spi_clock_set(struct msm_spi *dd, int speed)
273{
274 long rate;
275 int rc;
276
277 rate = msm_spi_clk_max_rate(dd->clk, speed);
278 if (rate < 0) {
279 dev_err(dd->dev,
280 "%s: no match found for requested clock frequency:%d",
281 __func__, speed);
282 return;
283 }
284
285 rc = clk_set_rate(dd->clk, rate);
286 if (!rc)
287 dd->clock_speed = rate;
288}
289
290static void msm_spi_clk_path_vote(struct msm_spi *dd, u32 rate)
291{
292 if (dd->bus_cl_hdl) {
293 u64 ib = rate * dd->pdata->bus_width;
294
295 msm_bus_scale_update_bw(dd->bus_cl_hdl, 0, ib);
296 }
297}
298
299static void msm_spi_clk_path_teardown(struct msm_spi *dd)
300{
301 msm_spi_clk_path_vote(dd, 0);
302
303 if (dd->bus_cl_hdl) {
304 msm_bus_scale_unregister(dd->bus_cl_hdl);
305 dd->bus_cl_hdl = NULL;
306 }
307}
308
309/**
310 * msm_spi_clk_path_postponed_register: reg with bus-scaling after it is probed
311 *
312 * @return zero on success
313 *
314 * Workaround: SPI driver may be probed before the bus scaling driver. Calling
315 * msm_bus_scale_register_client() will fail if the bus scaling driver is not
316 * ready yet. Thus, this function should be called not from probe but from a
317 * later context. Also, this function may be called more then once before
318 * register succeed. At this case only one error message will be logged. At boot
319 * time all clocks are on, so earlier SPI transactions should succeed.
320 */
321static int msm_spi_clk_path_postponed_register(struct msm_spi *dd)
322{
323 int ret = 0;
324
325 dd->bus_cl_hdl = msm_bus_scale_register(dd->pdata->master_id,
326 MSM_BUS_SLAVE_EBI_CH0,
327 (char *)dev_name(dd->dev),
328 false);
329
330 if (IS_ERR_OR_NULL(dd->bus_cl_hdl)) {
331 ret = (dd->bus_cl_hdl ? PTR_ERR(dd->bus_cl_hdl) : -EAGAIN);
332 dev_err(dd->dev, "Failed bus registration Err %d", ret);
333 }
334
335 return ret;
336}
337
338static void msm_spi_clk_path_init(struct msm_spi *dd)
339{
340 /*
341 * bail out if path voting is diabled (master_id == 0) or if it is
342 * already registered (client_hdl != 0)
343 */
344 if (!dd->pdata->master_id || dd->bus_cl_hdl)
345 return;
346
347 /* on failure try again later */
348 if (msm_spi_clk_path_postponed_register(dd))
349 return;
350
351}
352
353static int msm_spi_calculate_size(int *fifo_size,
354 int *block_size,
355 int block,
356 int mult)
357{
358 int words;
359
360 switch (block) {
361 case 0:
362 words = 1; /* 4 bytes */
363 break;
364 case 1:
365 words = 4; /* 16 bytes */
366 break;
367 case 2:
368 words = 8; /* 32 bytes */
369 break;
370 default:
371 return -EINVAL;
372 }
373
374 switch (mult) {
375 case 0:
376 *fifo_size = words * 2;
377 break;
378 case 1:
379 *fifo_size = words * 4;
380 break;
381 case 2:
382 *fifo_size = words * 8;
383 break;
384 case 3:
385 *fifo_size = words * 16;
386 break;
387 default:
388 return -EINVAL;
389 }
390
391 *block_size = words * sizeof(u32); /* in bytes */
392 return 0;
393}
394
395static void msm_spi_calculate_fifo_size(struct msm_spi *dd)
396{
397 u32 spi_iom;
398 int block;
399 int mult;
400
401 spi_iom = readl_relaxed(dd->base + SPI_IO_MODES);
402
403 block = (spi_iom & SPI_IO_M_INPUT_BLOCK_SIZE) >> INPUT_BLOCK_SZ_SHIFT;
404 mult = (spi_iom & SPI_IO_M_INPUT_FIFO_SIZE) >> INPUT_FIFO_SZ_SHIFT;
405 if (msm_spi_calculate_size(&dd->input_fifo_size, &dd->input_block_size,
406 block, mult)) {
407 goto fifo_size_err;
408 }
409
410 block = (spi_iom & SPI_IO_M_OUTPUT_BLOCK_SIZE) >> OUTPUT_BLOCK_SZ_SHIFT;
411 mult = (spi_iom & SPI_IO_M_OUTPUT_FIFO_SIZE) >> OUTPUT_FIFO_SZ_SHIFT;
412 if (msm_spi_calculate_size(&dd->output_fifo_size,
413 &dd->output_block_size, block, mult)) {
414 goto fifo_size_err;
415 }
416 if (dd->qup_ver == SPI_QUP_VERSION_NONE) {
417 /* DM mode is not available for this block size */
418 if (dd->input_block_size == 4 || dd->output_block_size == 4)
419 dd->use_dma = 0;
420
421 if (dd->use_dma) {
422 dd->input_burst_size = max(dd->input_block_size,
423 DM_BURST_SIZE);
424 dd->output_burst_size = max(dd->output_block_size,
425 DM_BURST_SIZE);
426 }
427 }
428
429 return;
430
431fifo_size_err:
432 dd->use_dma = 0;
433 pr_err("%s: invalid FIFO size, SPI_IO_MODES=0x%x\n", __func__, spi_iom);
434}
435
436static void msm_spi_read_word_from_fifo(struct msm_spi *dd)
437{
438 u32 data_in;
439 int i;
440 int shift;
441 int read_bytes = (dd->pack_words ?
442 SPI_MAX_BYTES_PER_WORD : dd->bytes_per_word);
443
444 data_in = readl_relaxed(dd->base + SPI_INPUT_FIFO);
445 if (dd->read_buf) {
446 for (i = 0; (i < read_bytes) &&
447 dd->rx_bytes_remaining; i++) {
448 /* The data format depends on bytes_per_word:
449 * 4 bytes: 0x12345678
450 * 3 bytes: 0x00123456
451 * 2 bytes: 0x00001234
452 * 1 byte : 0x00000012
453 */
454 shift = BITS_PER_BYTE * i;
455 *dd->read_buf++ = (data_in & (0xFF << shift)) >> shift;
456 dd->rx_bytes_remaining--;
457 }
458 } else {
459 if (dd->rx_bytes_remaining >= read_bytes)
460 dd->rx_bytes_remaining -= read_bytes;
461 else
462 dd->rx_bytes_remaining = 0;
463 }
464
465 dd->read_xfr_cnt++;
466}
467
468static inline bool msm_spi_is_valid_state(struct msm_spi *dd)
469{
470 u32 spi_op = readl_relaxed(dd->base + SPI_STATE);
471
472 return spi_op & SPI_OP_STATE_VALID;
473}
474
475static inline void msm_spi_udelay(unsigned int delay_usecs)
476{
477 /*
478 * For smaller values of delay, context switch time
479 * would negate the usage of usleep
480 */
481 if (delay_usecs > 20)
482 usleep_range(delay_usecs, delay_usecs + 1);
483 else if (delay_usecs)
484 udelay(delay_usecs);
485}
486
487static inline int msm_spi_wait_valid(struct msm_spi *dd)
488{
489 unsigned int delay = 0;
490 unsigned long timeout = 0;
491
492 if (dd->clock_speed == 0)
493 return -EINVAL;
494 /*
495 * Based on the SPI clock speed, sufficient time
496 * should be given for the SPI state transition
497 * to occur
498 */
499 delay = (10 * USEC_PER_SEC) / dd->clock_speed;
500 /*
501 * For small delay values, the default timeout would
502 * be one jiffy
503 */
504 if (delay < SPI_DELAY_THRESHOLD)
505 delay = SPI_DELAY_THRESHOLD;
506
507 /* Adding one to round off to the nearest jiffy */
508 timeout = jiffies + msecs_to_jiffies(delay * SPI_DEFAULT_TIMEOUT) + 1;
509 while (!msm_spi_is_valid_state(dd)) {
510 if (time_after(jiffies, timeout)) {
511 if (!msm_spi_is_valid_state(dd)) {
512 dev_err(dd->dev, "Invalid SPI operational state\n");
513 return -ETIMEDOUT;
514 } else
515 return 0;
516 }
517 msm_spi_udelay(delay);
518 }
519 return 0;
520}
521
522static inline int msm_spi_set_state(struct msm_spi *dd,
523 enum msm_spi_state state)
524{
525 enum msm_spi_state cur_state;
526
527 if (msm_spi_wait_valid(dd))
528 return -EIO;
529 cur_state = readl_relaxed(dd->base + SPI_STATE);
530 /* Per spec:
531 * For PAUSE_STATE to RESET_STATE, two writes of (10) are required
532 */
533 if (((cur_state & SPI_OP_STATE) == SPI_OP_STATE_PAUSE) &&
534 (state == SPI_OP_STATE_RESET)) {
535 writel_relaxed(SPI_OP_STATE_CLEAR_BITS, dd->base + SPI_STATE);
536 writel_relaxed(SPI_OP_STATE_CLEAR_BITS, dd->base + SPI_STATE);
537 } else {
538 writel_relaxed((cur_state & ~SPI_OP_STATE) | state,
539 dd->base + SPI_STATE);
540 }
541 if (msm_spi_wait_valid(dd))
542 return -EIO;
543
544 return 0;
545}
546
547/**
548 * msm_spi_set_bpw_and_no_io_flags: configure N, and no-input/no-output flags
549 */
550static inline void
551msm_spi_set_bpw_and_no_io_flags(struct msm_spi *dd, u32 *config, int n)
552{
553 *config &= ~(SPI_NO_INPUT|SPI_NO_OUTPUT);
554
555 if (n != (*config & SPI_CFG_N))
556 *config = (*config & ~SPI_CFG_N) | n;
557
558 if (dd->tx_mode == SPI_BAM_MODE) {
559 if (dd->read_buf == NULL)
560 *config |= SPI_NO_INPUT;
561 if (dd->write_buf == NULL)
562 *config |= SPI_NO_OUTPUT;
563 }
564}
565
566/**
567 * msm_spi_calc_spi_config_loopback_and_input_first: Calculate the values that
568 * should be updated into SPI_CONFIG's LOOPBACK and INPUT_FIRST flags
569 * @return calculatd value for SPI_CONFIG
570 */
571static u32
572msm_spi_calc_spi_config_loopback_and_input_first(u32 spi_config, u8 mode)
573{
574 if (mode & SPI_LOOP)
575 spi_config |= SPI_CFG_LOOPBACK;
576 else
577 spi_config &= ~SPI_CFG_LOOPBACK;
578
579 if (mode & SPI_CPHA)
580 spi_config &= ~SPI_CFG_INPUT_FIRST;
581 else
582 spi_config |= SPI_CFG_INPUT_FIRST;
583
584 return spi_config;
585}
586
587/**
588 * msm_spi_set_spi_config: prepares register SPI_CONFIG to process the
589 * next transfer
590 */
591static void msm_spi_set_spi_config(struct msm_spi *dd, int bpw)
592{
593 u32 spi_config = readl_relaxed(dd->base + SPI_CONFIG);
594
595 spi_config = msm_spi_calc_spi_config_loopback_and_input_first(
596 spi_config, dd->spi->mode);
597
598 if (dd->qup_ver == SPI_QUP_VERSION_NONE)
599 /* flags removed from SPI_CONFIG in QUP version-2 */
600 msm_spi_set_bpw_and_no_io_flags(dd, &spi_config, bpw-1);
601
602 /*
603 * HS_MODE improves signal stability for spi-clk high rates
604 * but is invalid in LOOPBACK mode.
605 */
606 if ((dd->clock_speed >= SPI_HS_MIN_RATE) &&
607 !(dd->spi->mode & SPI_LOOP))
608 spi_config |= SPI_CFG_HS_MODE;
609 else
610 spi_config &= ~SPI_CFG_HS_MODE;
611
612 writel_relaxed(spi_config, dd->base + SPI_CONFIG);
613}
614
615/**
616 * msm_spi_set_mx_counts: set SPI_MX_INPUT_COUNT and SPI_MX_INPUT_COUNT
617 * for FIFO-mode. set SPI_MX_INPUT_COUNT and SPI_MX_OUTPUT_COUNT for
618 * BAM and DMOV modes.
619 * @n_words The number of reads/writes of size N.
620 */
621static void msm_spi_set_mx_counts(struct msm_spi *dd, u32 n_words)
622{
623 /*
624 * For FIFO mode:
625 * - Set the MX_OUTPUT_COUNT/MX_INPUT_COUNT registers to 0
626 * - Set the READ/WRITE_COUNT registers to 0 (infinite mode)
627 * or num bytes (finite mode) if less than fifo worth of data.
628 * For Block mode:
629 * - Set the MX_OUTPUT/MX_INPUT_COUNT registers to num xfer bytes.
630 * - Set the READ/WRITE_COUNT registers to 0.
631 */
632 if (dd->tx_mode != SPI_BAM_MODE) {
633 if (dd->tx_mode == SPI_FIFO_MODE) {
634 if (n_words <= dd->input_fifo_size)
635 msm_spi_set_write_count(dd, n_words);
636 else
637 msm_spi_set_write_count(dd, 0);
638 writel_relaxed(0, dd->base + SPI_MX_OUTPUT_COUNT);
639 } else
640 writel_relaxed(n_words, dd->base + SPI_MX_OUTPUT_COUNT);
641
642 if (dd->rx_mode == SPI_FIFO_MODE) {
643 if (n_words <= dd->input_fifo_size)
644 writel_relaxed(n_words,
645 dd->base + SPI_MX_READ_COUNT);
646 else
647 writel_relaxed(0,
648 dd->base + SPI_MX_READ_COUNT);
649 writel_relaxed(0, dd->base + SPI_MX_INPUT_COUNT);
650 } else
651 writel_relaxed(n_words, dd->base + SPI_MX_INPUT_COUNT);
652 } else {
653 /* must be zero for BAM and DMOV */
654 writel_relaxed(0, dd->base + SPI_MX_READ_COUNT);
655 msm_spi_set_write_count(dd, 0);
656
657 /*
658 * for DMA transfers, both QUP_MX_INPUT_COUNT and
659 * QUP_MX_OUTPUT_COUNT must be zero to all cases but one.
660 * That case is a non-balanced transfer when there is
661 * only a read_buf.
662 */
663 if (dd->qup_ver == SPI_QUP_VERSION_BFAM) {
664 if (dd->write_buf)
665 writel_relaxed(0,
666 dd->base + SPI_MX_INPUT_COUNT);
667 else
668 writel_relaxed(n_words,
669 dd->base + SPI_MX_INPUT_COUNT);
670
671 writel_relaxed(0, dd->base + SPI_MX_OUTPUT_COUNT);
672 }
673 }
674}
675
676static int msm_spi_bam_pipe_disconnect(struct msm_spi *dd,
677 struct msm_spi_bam_pipe *pipe)
678{
679 int ret = sps_disconnect(pipe->handle);
680
681 if (ret) {
682 dev_dbg(dd->dev, "%s disconnect bam %s pipe failed\n",
683 __func__, pipe->name);
684 return ret;
685 }
686 return 0;
687}
688
689static int msm_spi_bam_pipe_connect(struct msm_spi *dd,
690 struct msm_spi_bam_pipe *pipe, struct sps_connect *config)
691{
692 int ret;
693 struct sps_register_event event = {
694 .mode = SPS_TRIGGER_WAIT,
695 .options = SPS_O_EOT,
696 };
697
698 if (pipe == &dd->bam.prod)
699 event.xfer_done = &dd->rx_transfer_complete;
700 else if (pipe == &dd->bam.cons)
701 event.xfer_done = &dd->tx_transfer_complete;
702
703 ret = sps_connect(pipe->handle, config);
704 if (ret) {
705 dev_err(dd->dev, "%s: sps_connect(%s:0x%pK):%d",
706 __func__, pipe->name, pipe->handle, ret);
707 return ret;
708 }
709
710 ret = sps_register_event(pipe->handle, &event);
711 if (ret) {
712 dev_err(dd->dev, "%s sps_register_event(hndl:0x%pK %s):%d",
713 __func__, pipe->handle, pipe->name, ret);
714 msm_spi_bam_pipe_disconnect(dd, pipe);
715 return ret;
716 }
717
718 pipe->teardown_required = true;
719 return 0;
720}
721
722
723static void msm_spi_bam_pipe_flush(struct msm_spi *dd,
724 enum msm_spi_pipe_direction pipe_dir)
725{
726 struct msm_spi_bam_pipe *pipe = (pipe_dir == SPI_BAM_CONSUMER_PIPE) ?
727 (&dd->bam.prod) : (&dd->bam.cons);
728 struct sps_connect config = pipe->config;
729 int ret;
730
731 ret = msm_spi_bam_pipe_disconnect(dd, pipe);
732 if (ret)
733 return;
734
735 ret = msm_spi_bam_pipe_connect(dd, pipe, &config);
736 if (ret)
737 return;
738}
739
740static void msm_spi_bam_flush(struct msm_spi *dd)
741{
742 dev_dbg(dd->dev, "%s flushing bam for recovery\n", __func__);
743
744 msm_spi_bam_pipe_flush(dd, SPI_BAM_CONSUMER_PIPE);
745 msm_spi_bam_pipe_flush(dd, SPI_BAM_PRODUCER_PIPE);
746}
747
748static int
749msm_spi_bam_process_rx(struct msm_spi *dd, u32 *bytes_to_send, u32 desc_cnt)
750{
751 int ret = 0;
752 u32 data_xfr_size = 0, rem_bc = 0;
753 u32 prod_flags = 0;
754
755 rem_bc = dd->cur_rx_transfer->len - dd->bam.curr_rx_bytes_recvd;
756 data_xfr_size = (rem_bc < *bytes_to_send) ? rem_bc : *bytes_to_send;
757
758 /*
759 * set flags for last descriptor only
760 */
761 if ((desc_cnt == 1)
762 || (*bytes_to_send == data_xfr_size))
763 prod_flags = (dd->write_buf)
764 ? 0 : (SPS_IOVEC_FLAG_EOT | SPS_IOVEC_FLAG_NWD);
765
766 /*
767 * enqueue read buffer in BAM
768 */
769 ret = sps_transfer_one(dd->bam.prod.handle,
770 dd->cur_rx_transfer->rx_dma
771 + dd->bam.curr_rx_bytes_recvd,
772 data_xfr_size, dd, prod_flags);
773 if (ret < 0) {
774 dev_err(dd->dev,
775 "%s: Failed to queue producer BAM transfer",
776 __func__);
777 return ret;
778 }
779
780 dd->bam.curr_rx_bytes_recvd += data_xfr_size;
781 *bytes_to_send -= data_xfr_size;
782 dd->bam.bam_rx_len -= data_xfr_size;
783 return data_xfr_size;
784}
785
786static int
787msm_spi_bam_process_tx(struct msm_spi *dd, u32 *bytes_to_send, u32 desc_cnt)
788{
789 int ret = 0;
790 u32 data_xfr_size = 0, rem_bc = 0;
791 u32 cons_flags = 0;
792
793 rem_bc = dd->cur_tx_transfer->len - dd->bam.curr_tx_bytes_sent;
794 data_xfr_size = (rem_bc < *bytes_to_send) ? rem_bc : *bytes_to_send;
795
796 /*
797 * set flags for last descriptor only
798 */
799 if ((desc_cnt == 1)
800 || (*bytes_to_send == data_xfr_size))
801 cons_flags = SPS_IOVEC_FLAG_EOT | SPS_IOVEC_FLAG_NWD;
802
803 /*
804 * enqueue write buffer in BAM
805 */
806 ret = sps_transfer_one(dd->bam.cons.handle,
807 dd->cur_tx_transfer->tx_dma
808 + dd->bam.curr_tx_bytes_sent,
809 data_xfr_size, dd, cons_flags);
810 if (ret < 0) {
811 dev_err(dd->dev,
812 "%s: Failed to queue consumer BAM transfer",
813 __func__);
814 return ret;
815 }
816
817 dd->bam.curr_tx_bytes_sent += data_xfr_size;
818 *bytes_to_send -= data_xfr_size;
819 dd->bam.bam_tx_len -= data_xfr_size;
820 return data_xfr_size;
821}
822
823
824/**
825 * msm_spi_bam_begin_transfer: transfer dd->tx_bytes_remaining bytes
826 * using BAM.
827 * @brief BAM can transfer SPI_MAX_TRFR_BTWN_RESETS byte at a single
828 * transfer. Between transfer QUP must change to reset state. A loop is
829 * issuing a single BAM transfer at a time.
830 * @return zero on success
831 */
832static int
833msm_spi_bam_begin_transfer(struct msm_spi *dd)
834{
835 u32 tx_bytes_to_send = 0, rx_bytes_to_recv = 0;
836 u32 n_words_xfr;
837 s32 ret = 0;
838 u32 prod_desc_cnt = SPI_BAM_MAX_DESC_NUM - 1;
839 u32 cons_desc_cnt = SPI_BAM_MAX_DESC_NUM - 1;
840 u32 byte_count = 0;
841
842 rx_bytes_to_recv = min_t(u32, dd->bam.bam_rx_len,
843 SPI_MAX_TRFR_BTWN_RESETS);
844 tx_bytes_to_send = min_t(u32, dd->bam.bam_tx_len,
845 SPI_MAX_TRFR_BTWN_RESETS);
846 n_words_xfr = DIV_ROUND_UP(rx_bytes_to_recv,
847 dd->bytes_per_word);
848
849 msm_spi_set_mx_counts(dd, n_words_xfr);
850 ret = msm_spi_set_state(dd, SPI_OP_STATE_RUN);
851 if (ret < 0) {
852 dev_err(dd->dev,
853 "%s: Failed to set QUP state to run",
854 __func__);
855 goto xfr_err;
856 }
857
858 while ((rx_bytes_to_recv + tx_bytes_to_send) &&
859 ((cons_desc_cnt + prod_desc_cnt) > 0)) {
860 struct spi_transfer *t = NULL;
861
862 if (dd->read_buf && (prod_desc_cnt > 0)) {
863 ret = msm_spi_bam_process_rx(dd, &rx_bytes_to_recv,
864 prod_desc_cnt);
865 if (ret < 0)
866 goto xfr_err;
867
868 if (!(dd->cur_rx_transfer->len
869 - dd->bam.curr_rx_bytes_recvd))
870 t = dd->cur_rx_transfer;
871 prod_desc_cnt--;
872 }
873
874 if (dd->write_buf && (cons_desc_cnt > 0)) {
875 ret = msm_spi_bam_process_tx(dd, &tx_bytes_to_send,
876 cons_desc_cnt);
877 if (ret < 0)
878 goto xfr_err;
879
880 if (!(dd->cur_tx_transfer->len
881 - dd->bam.curr_tx_bytes_sent))
882 t = dd->cur_tx_transfer;
883 cons_desc_cnt--;
884 }
885
886 byte_count += ret;
887 }
888
889 dd->tx_bytes_remaining -= min_t(u32, byte_count,
890 SPI_MAX_TRFR_BTWN_RESETS);
891 return 0;
892xfr_err:
893 return ret;
894}
895
896static int
897msm_spi_bam_next_transfer(struct msm_spi *dd)
898{
899 if (dd->tx_mode != SPI_BAM_MODE)
900 return 0;
901
902 if (dd->tx_bytes_remaining > 0) {
903 if (msm_spi_set_state(dd, SPI_OP_STATE_RESET))
904 return 0;
905 if ((msm_spi_bam_begin_transfer(dd)) < 0) {
906 dev_err(dd->dev, "%s: BAM transfer setup failed\n",
907 __func__);
908 return 0;
909 }
910 return 1;
911 }
912 return 0;
913}
914
915static int msm_spi_dma_send_next(struct msm_spi *dd)
916{
917 int ret = 0;
918
919 if (dd->tx_mode == SPI_BAM_MODE)
920 ret = msm_spi_bam_next_transfer(dd);
921 return ret;
922}
923
924static inline void msm_spi_ack_transfer(struct msm_spi *dd)
925{
926 writel_relaxed(SPI_OP_MAX_INPUT_DONE_FLAG |
927 SPI_OP_MAX_OUTPUT_DONE_FLAG,
928 dd->base + SPI_OPERATIONAL);
929 /* Ensure done flag was cleared before proceeding further */
930 mb();
931}
932
933/* Figure which irq occurred and call the relevant functions */
934static inline irqreturn_t msm_spi_qup_irq(int irq, void *dev_id)
935{
936 u32 op, ret = IRQ_NONE;
937 struct msm_spi *dd = dev_id;
938
939 if (pm_runtime_suspended(dd->dev)) {
940 dev_warn(dd->dev, "QUP: pm runtime suspend, irq:%d\n", irq);
941 return ret;
942 }
943 if (readl_relaxed(dd->base + SPI_ERROR_FLAGS) ||
944 readl_relaxed(dd->base + QUP_ERROR_FLAGS)) {
945 struct spi_master *master = dev_get_drvdata(dd->dev);
946
947 ret |= msm_spi_error_irq(irq, master);
948 }
949
950 op = readl_relaxed(dd->base + SPI_OPERATIONAL);
951 writel_relaxed(op, dd->base + SPI_OPERATIONAL);
952 /*
953 * Ensure service flag was cleared before further
954 * processing of interrupt.
955 */
956 mb();
957 if (op & SPI_OP_INPUT_SERVICE_FLAG)
958 ret |= msm_spi_input_irq(irq, dev_id);
959
960 if (op & SPI_OP_OUTPUT_SERVICE_FLAG)
961 ret |= msm_spi_output_irq(irq, dev_id);
962
963 if (dd->tx_mode != SPI_BAM_MODE) {
964 if (!dd->rx_done) {
965 if (dd->rx_bytes_remaining == 0)
966 dd->rx_done = true;
967 }
968 if (!dd->tx_done) {
969 if (!dd->tx_bytes_remaining &&
970 (op & SPI_OP_IP_FIFO_NOT_EMPTY)) {
971 dd->tx_done = true;
972 }
973 }
974 }
975 if (dd->tx_done && dd->rx_done) {
976 msm_spi_set_state(dd, SPI_OP_STATE_RESET);
977 dd->tx_done = false;
978 dd->rx_done = false;
979 complete(&dd->rx_transfer_complete);
980 complete(&dd->tx_transfer_complete);
981 }
982 return ret;
983}
984
985static irqreturn_t msm_spi_input_irq(int irq, void *dev_id)
986{
987 struct msm_spi *dd = dev_id;
988
989 dd->stat_rx++;
990
991 if (dd->rx_mode == SPI_MODE_NONE)
992 return IRQ_HANDLED;
993
994 if (dd->rx_mode == SPI_FIFO_MODE) {
995 while ((readl_relaxed(dd->base + SPI_OPERATIONAL) &
996 SPI_OP_IP_FIFO_NOT_EMPTY) &&
997 (dd->rx_bytes_remaining > 0)) {
998 msm_spi_read_word_from_fifo(dd);
999 }
1000 } else if (dd->rx_mode == SPI_BLOCK_MODE) {
1001 int count = 0;
1002
1003 while (dd->rx_bytes_remaining &&
1004 (count < dd->input_block_size)) {
1005 msm_spi_read_word_from_fifo(dd);
1006 count += SPI_MAX_BYTES_PER_WORD;
1007 }
1008 }
1009
1010 return IRQ_HANDLED;
1011}
1012
1013static void msm_spi_write_word_to_fifo(struct msm_spi *dd)
1014{
1015 u32 word;
1016 u8 byte;
1017 int i;
1018 int write_bytes =
1019 (dd->pack_words ? SPI_MAX_BYTES_PER_WORD : dd->bytes_per_word);
1020
1021 word = 0;
1022 if (dd->write_buf) {
1023 for (i = 0; (i < write_bytes) &&
1024 dd->tx_bytes_remaining; i++) {
1025 dd->tx_bytes_remaining--;
1026 byte = *dd->write_buf++;
1027 word |= (byte << (BITS_PER_BYTE * i));
1028 }
1029 } else
1030 if (dd->tx_bytes_remaining > write_bytes)
1031 dd->tx_bytes_remaining -= write_bytes;
1032 else
1033 dd->tx_bytes_remaining = 0;
1034 dd->write_xfr_cnt++;
1035
1036 writel_relaxed(word, dd->base + SPI_OUTPUT_FIFO);
1037}
1038
1039static inline void msm_spi_write_rmn_to_fifo(struct msm_spi *dd)
1040{
1041 int count = 0;
1042
1043 if (dd->tx_mode == SPI_FIFO_MODE) {
1044 while ((dd->tx_bytes_remaining > 0) &&
1045 (count < dd->input_fifo_size) &&
1046 !(readl_relaxed(dd->base + SPI_OPERATIONAL)
1047 & SPI_OP_OUTPUT_FIFO_FULL)) {
1048 msm_spi_write_word_to_fifo(dd);
1049 count++;
1050 }
1051 }
1052
1053 if (dd->tx_mode == SPI_BLOCK_MODE) {
1054 while (dd->tx_bytes_remaining &&
1055 (count < dd->output_block_size)) {
1056 msm_spi_write_word_to_fifo(dd);
1057 count += SPI_MAX_BYTES_PER_WORD;
1058 }
1059 }
1060}
1061
1062static irqreturn_t msm_spi_output_irq(int irq, void *dev_id)
1063{
1064 struct msm_spi *dd = dev_id;
1065
1066 dd->stat_tx++;
1067
1068 if (dd->tx_mode == SPI_MODE_NONE)
1069 return IRQ_HANDLED;
1070
1071 /* Output FIFO is empty. Transmit any outstanding write data. */
1072 if ((dd->tx_mode == SPI_FIFO_MODE) || (dd->tx_mode == SPI_BLOCK_MODE))
1073 msm_spi_write_rmn_to_fifo(dd);
1074
1075 return IRQ_HANDLED;
1076}
1077
1078static irqreturn_t msm_spi_error_irq(int irq, void *dev_id)
1079{
1080 struct spi_master *master = dev_id;
1081 struct msm_spi *dd = spi_master_get_devdata(master);
1082 u32 spi_err;
1083
1084 spi_err = readl_relaxed(dd->base + SPI_ERROR_FLAGS);
1085 if (spi_err & SPI_ERR_OUTPUT_OVER_RUN_ERR)
1086 dev_warn(master->dev.parent, "SPI output overrun error\n");
1087 if (spi_err & SPI_ERR_INPUT_UNDER_RUN_ERR)
1088 dev_warn(master->dev.parent, "SPI input underrun error\n");
1089 if (spi_err & SPI_ERR_OUTPUT_UNDER_RUN_ERR)
1090 dev_warn(master->dev.parent, "SPI output underrun error\n");
1091 msm_spi_get_clk_err(dd, &spi_err);
1092 if (spi_err & SPI_ERR_CLK_OVER_RUN_ERR)
1093 dev_warn(master->dev.parent, "SPI clock overrun error\n");
1094 if (spi_err & SPI_ERR_CLK_UNDER_RUN_ERR)
1095 dev_warn(master->dev.parent, "SPI clock underrun error\n");
1096 msm_spi_clear_error_flags(dd);
1097 msm_spi_ack_clk_err(dd);
1098 /* Ensure clearing of QUP_ERROR_FLAGS was completed */
1099 mb();
1100 return IRQ_HANDLED;
1101}
1102
1103static int msm_spi_bam_map_buffers(struct msm_spi *dd)
1104{
1105 int ret = -EINVAL;
1106 struct device *dev;
1107 struct spi_transfer *xfr;
1108 void *tx_buf, *rx_buf;
1109 u32 tx_len, rx_len;
1110
1111 dev = dd->dev;
1112 xfr = dd->cur_transfer;
1113
1114 tx_buf = (void *)xfr->tx_buf;
1115 rx_buf = xfr->rx_buf;
1116 tx_len = rx_len = xfr->len;
1117 if (tx_buf != NULL) {
1118 xfr->tx_dma = dma_map_single(dev, tx_buf,
1119 tx_len, DMA_TO_DEVICE);
1120 if (dma_mapping_error(dev, xfr->tx_dma)) {
1121 ret = -ENOMEM;
1122 goto error;
1123 }
1124 }
1125
1126 if (rx_buf != NULL) {
1127 xfr->rx_dma = dma_map_single(dev, rx_buf, rx_len,
1128 DMA_FROM_DEVICE);
1129 if (dma_mapping_error(dev, xfr->rx_dma)) {
1130 if (tx_buf != NULL)
1131 dma_unmap_single(dev,
1132 xfr->tx_dma,
1133 tx_len, DMA_TO_DEVICE);
1134 ret = -ENOMEM;
1135 goto error;
1136 }
1137 }
1138
1139 return 0;
1140error:
1141 msm_spi_dma_unmap_buffers(dd);
1142 return ret;
1143}
1144
1145static int msm_spi_dma_map_buffers(struct msm_spi *dd)
1146{
1147 int ret = 0;
1148
1149 if (dd->tx_mode == SPI_BAM_MODE)
1150 ret = msm_spi_bam_map_buffers(dd);
1151 return ret;
1152}
1153
1154static void msm_spi_bam_unmap_buffers(struct msm_spi *dd)
1155{
1156 struct device *dev;
1157 struct spi_transfer *xfr;
1158 void *tx_buf, *rx_buf;
1159 u32 tx_len, rx_len;
1160
1161 dev = dd->dev;
1162 xfr = dd->cur_transfer;
1163
1164 tx_buf = (void *)xfr->tx_buf;
1165 rx_buf = xfr->rx_buf;
1166 tx_len = rx_len = xfr->len;
1167 if (tx_buf != NULL)
1168 dma_unmap_single(dev, xfr->tx_dma,
1169 tx_len, DMA_TO_DEVICE);
1170
1171 if (rx_buf != NULL)
1172 dma_unmap_single(dev, xfr->rx_dma,
1173 rx_len, DMA_FROM_DEVICE);
1174}
1175
1176static inline void msm_spi_dma_unmap_buffers(struct msm_spi *dd)
1177{
1178 if (dd->tx_mode == SPI_BAM_MODE)
1179 msm_spi_bam_unmap_buffers(dd);
1180}
1181
1182/**
1183 * msm_spi_use_dma - decides whether to use Data-Mover or BAM for
1184 * the given transfer
1185 * @dd: device
1186 * @tr: transfer
1187 *
1188 * Start using DMA if:
1189 * 1. Is supported by HW
1190 * 2. Is not diabled by platform data
1191 * 3. Transfer size is greater than 3*block size.
1192 * 4. Buffers are aligned to cache line.
1193 * 5. Bytes-per-word is 8,16 or 32.
1194 */
1195static inline bool
1196msm_spi_use_dma(struct msm_spi *dd, struct spi_transfer *tr, u8 bpw)
1197{
1198 if (!dd->use_dma)
1199 return false;
1200
1201 /* check constraints from platform data */
1202 if ((dd->qup_ver == SPI_QUP_VERSION_BFAM) && !dd->pdata->use_bam)
1203 return false;
1204
1205 if (dd->cur_msg_len < 3*dd->input_block_size)
1206 return false;
1207
1208 if ((dd->qup_ver != SPI_QUP_VERSION_BFAM) &&
1209 !dd->read_len && !dd->write_len)
1210 return false;
1211
1212 if (dd->qup_ver == SPI_QUP_VERSION_NONE) {
1213 u32 cache_line = dma_get_cache_alignment();
1214
1215 if (tr->tx_buf) {
1216 if (!IS_ALIGNED((size_t)tr->tx_buf, cache_line))
1217 return 0;
1218 }
1219 if (tr->rx_buf) {
1220 if (!IS_ALIGNED((size_t)tr->rx_buf, cache_line))
1221 return false;
1222 }
1223
1224 if (tr->cs_change &&
1225 ((bpw != 8) && (bpw != 16) && (bpw != 32)))
1226 return false;
1227 }
1228
1229 return true;
1230}
1231
1232/**
1233 * msm_spi_set_transfer_mode: Chooses optimal transfer mode. Sets dd->mode and
1234 * prepares to process a transfer.
1235 */
1236static void
1237msm_spi_set_transfer_mode(struct msm_spi *dd, u8 bpw, u32 read_count)
1238{
1239 if (msm_spi_use_dma(dd, dd->cur_transfer, bpw)) {
1240 dd->tx_mode = SPI_BAM_MODE;
1241 dd->rx_mode = SPI_BAM_MODE;
1242 } else {
1243 dd->rx_mode = SPI_FIFO_MODE;
1244 dd->tx_mode = SPI_FIFO_MODE;
1245 dd->read_len = dd->cur_transfer->len;
1246 dd->write_len = dd->cur_transfer->len;
1247 }
1248}
1249
1250/**
1251 * msm_spi_set_qup_io_modes: prepares register QUP_IO_MODES to process a
1252 * transfer
1253 */
1254static void msm_spi_set_qup_io_modes(struct msm_spi *dd)
1255{
1256 u32 spi_iom;
1257
1258 spi_iom = readl_relaxed(dd->base + SPI_IO_MODES);
1259 /* Set input and output transfer mode: FIFO, DMOV, or BAM */
1260 spi_iom &= ~(SPI_IO_M_INPUT_MODE | SPI_IO_M_OUTPUT_MODE);
1261 spi_iom = (spi_iom | (dd->tx_mode << OUTPUT_MODE_SHIFT));
1262 spi_iom = (spi_iom | (dd->rx_mode << INPUT_MODE_SHIFT));
1263
1264 /* Always enable packing for the BAM mode and for non BAM mode only
1265 * if bpw is % 8 and transfer length is % 4 Bytes.
1266 */
1267 if (dd->tx_mode == SPI_BAM_MODE ||
1268 ((dd->cur_msg_len % SPI_MAX_BYTES_PER_WORD == 0) &&
1269 (dd->cur_transfer->bits_per_word) &&
1270 (dd->cur_transfer->bits_per_word <= 32) &&
1271 (dd->cur_transfer->bits_per_word % 8 == 0))) {
1272 spi_iom |= SPI_IO_M_PACK_EN | SPI_IO_M_UNPACK_EN;
1273 dd->pack_words = true;
1274 } else {
1275 spi_iom &= ~(SPI_IO_M_PACK_EN | SPI_IO_M_UNPACK_EN);
1276 spi_iom |= SPI_IO_M_OUTPUT_BIT_SHIFT_EN;
1277 dd->pack_words = false;
1278 }
1279
1280 writel_relaxed(spi_iom, dd->base + SPI_IO_MODES);
1281}
1282
1283static u32 msm_spi_calc_spi_ioc_clk_polarity(u32 spi_ioc, u8 mode)
1284{
1285 if (mode & SPI_CPOL)
1286 spi_ioc |= SPI_IO_C_CLK_IDLE_HIGH;
1287 else
1288 spi_ioc &= ~SPI_IO_C_CLK_IDLE_HIGH;
1289 return spi_ioc;
1290}
1291
1292/**
1293 * msm_spi_set_spi_io_control: prepares register SPI_IO_CONTROL to process the
1294 * next transfer
1295 * @return the new set value of SPI_IO_CONTROL
1296 */
1297static u32 msm_spi_set_spi_io_control(struct msm_spi *dd)
1298{
1299 u32 spi_ioc, spi_ioc_orig, chip_select;
1300
1301 spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
1302 spi_ioc_orig = spi_ioc;
1303 spi_ioc = msm_spi_calc_spi_ioc_clk_polarity(spi_ioc
1304 , dd->spi->mode);
1305 /* Set chip-select */
1306 chip_select = dd->spi->chip_select << 2;
1307 if ((spi_ioc & SPI_IO_C_CS_SELECT) != chip_select)
1308 spi_ioc = (spi_ioc & ~SPI_IO_C_CS_SELECT) | chip_select;
1309 if (!dd->cur_transfer->cs_change)
1310 spi_ioc |= SPI_IO_C_MX_CS_MODE;
1311
1312 if (spi_ioc != spi_ioc_orig)
1313 writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
1314
1315 /*
1316 * Ensure that the IO control mode register gets written
1317 * before proceeding with the transfer.
1318 */
1319 mb();
1320 return spi_ioc;
1321}
1322
1323/**
1324 * msm_spi_set_qup_op_mask: prepares register QUP_OPERATIONAL_MASK to process
1325 * the next transfer
1326 */
1327static void msm_spi_set_qup_op_mask(struct msm_spi *dd)
1328{
1329 /* mask INPUT and OUTPUT service flags in to prevent IRQs on FIFO status
1330 * change in BAM mode
1331 */
1332 u32 mask = (dd->tx_mode == SPI_BAM_MODE) ?
1333 QUP_OP_MASK_OUTPUT_SERVICE_FLAG | QUP_OP_MASK_INPUT_SERVICE_FLAG
1334 : 0;
1335 writel_relaxed(mask, dd->base + QUP_OPERATIONAL_MASK);
1336}
1337
1338static void get_transfer_length(struct msm_spi *dd)
1339{
1340 struct spi_transfer *xfer = dd->cur_transfer;
1341
1342 dd->cur_msg_len = 0;
1343 dd->read_len = dd->write_len = 0;
1344 dd->bam.bam_tx_len = dd->bam.bam_rx_len = 0;
1345
1346 if (xfer->tx_buf)
1347 dd->bam.bam_tx_len = dd->write_len = xfer->len;
1348 if (xfer->rx_buf)
1349 dd->bam.bam_rx_len = dd->read_len = xfer->len;
1350 dd->cur_msg_len = xfer->len;
1351}
1352
1353static int msm_spi_process_transfer(struct msm_spi *dd)
1354{
1355 u8 bpw;
1356 u32 max_speed;
1357 u32 read_count;
1358 u32 timeout;
1359 u32 spi_ioc;
1360 u32 int_loopback = 0;
1361 int ret;
1362 int status = 0;
1363
1364 get_transfer_length(dd);
1365 dd->cur_tx_transfer = dd->cur_transfer;
1366 dd->cur_rx_transfer = dd->cur_transfer;
1367 dd->bam.curr_rx_bytes_recvd = dd->bam.curr_tx_bytes_sent = 0;
1368 dd->write_xfr_cnt = dd->read_xfr_cnt = 0;
1369 dd->tx_bytes_remaining = dd->cur_msg_len;
1370 dd->rx_bytes_remaining = dd->cur_msg_len;
1371 dd->read_buf = dd->cur_transfer->rx_buf;
1372 dd->write_buf = dd->cur_transfer->tx_buf;
1373 dd->tx_done = false;
1374 dd->rx_done = false;
1375 init_completion(&dd->tx_transfer_complete);
1376 init_completion(&dd->rx_transfer_complete);
1377 if (dd->cur_transfer->bits_per_word)
1378 bpw = dd->cur_transfer->bits_per_word;
1379 else
1380 bpw = 8;
1381 dd->bytes_per_word = (bpw + 7) / 8;
1382
1383 if (dd->cur_transfer->speed_hz)
1384 max_speed = dd->cur_transfer->speed_hz;
1385 else
1386 max_speed = dd->spi->max_speed_hz;
1387 if (!dd->clock_speed || max_speed != dd->clock_speed)
1388 msm_spi_clock_set(dd, max_speed);
1389
1390 timeout = 100 * msecs_to_jiffies(
1391 DIV_ROUND_UP(dd->cur_msg_len * 8,
1392 DIV_ROUND_UP(max_speed, MSEC_PER_SEC)));
1393
1394 read_count = DIV_ROUND_UP(dd->cur_msg_len, dd->bytes_per_word);
1395 if (dd->spi->mode & SPI_LOOP)
1396 int_loopback = 1;
1397
1398 ret = msm_spi_set_state(dd, SPI_OP_STATE_RESET);
1399 if (ret < 0) {
1400 dev_err(dd->dev,
1401 "%s: Error setting QUP to reset-state",
1402 __func__);
1403 return ret;
1404 }
1405
1406 msm_spi_set_transfer_mode(dd, bpw, read_count);
1407 msm_spi_set_mx_counts(dd, read_count);
1408 if (dd->tx_mode == SPI_BAM_MODE) {
1409 ret = msm_spi_dma_map_buffers(dd);
1410 if (ret < 0) {
1411 pr_err("Mapping DMA buffers\n");
1412 dd->tx_mode = SPI_MODE_NONE;
1413 dd->rx_mode = SPI_MODE_NONE;
1414 return ret;
1415 }
1416 }
1417 msm_spi_set_qup_io_modes(dd);
1418 msm_spi_set_spi_config(dd, bpw);
1419 msm_spi_set_qup_config(dd, bpw);
1420 spi_ioc = msm_spi_set_spi_io_control(dd);
1421 msm_spi_set_qup_op_mask(dd);
1422
1423 /* The output fifo interrupt handler will handle all writes after
1424 * the first. Restricting this to one write avoids contention
1425 * issues and race conditions between this thread and the int handler
1426 */
1427 if (dd->tx_mode != SPI_BAM_MODE) {
1428 if (msm_spi_prepare_for_write(dd))
1429 goto transfer_end;
1430 msm_spi_start_write(dd, read_count);
1431 } else {
1432 if ((msm_spi_bam_begin_transfer(dd)) < 0) {
1433 dev_err(dd->dev, "%s: BAM transfer setup failed\n",
1434 __func__);
1435 status = -EIO;
1436 goto transfer_end;
1437 }
1438 }
1439
1440 /*
1441 * On BAM mode, current state here is run.
1442 * Only enter the RUN state after the first word is written into
1443 * the output FIFO. Otherwise, the output FIFO EMPTY interrupt
1444 * might fire before the first word is written resulting in a
1445 * possible race condition.
1446 */
1447 if (dd->tx_mode != SPI_BAM_MODE)
1448 if (msm_spi_set_state(dd, SPI_OP_STATE_RUN)) {
1449 dev_warn(dd->dev,
1450 "%s: Failed to set QUP to run-state. Mode:%d",
1451 __func__, dd->tx_mode);
1452 goto transfer_end;
1453 }
1454
1455 /* Assume success, this might change later upon transaction result */
1456 do {
1457 if (dd->write_buf &&
1458 !wait_for_completion_timeout(&dd->tx_transfer_complete,
1459 timeout)) {
1460 dev_err(dd->dev, "%s: SPI Tx transaction timeout\n",
1461 __func__);
1462 status = -EIO;
1463 break;
1464 }
1465
1466 if (dd->read_buf &&
1467 !wait_for_completion_timeout(&dd->rx_transfer_complete,
1468 timeout)) {
1469 dev_err(dd->dev, "%s: SPI Rx transaction timeout\n",
1470 __func__);
1471 status = -EIO;
1472 break;
1473 }
1474 } while (msm_spi_dma_send_next(dd));
1475
1476 msm_spi_udelay(dd->xfrs_delay_usec);
1477
1478transfer_end:
1479 if ((dd->tx_mode == SPI_BAM_MODE) && status)
1480 msm_spi_bam_flush(dd);
1481 msm_spi_dma_unmap_buffers(dd);
1482 dd->tx_mode = SPI_MODE_NONE;
1483 dd->rx_mode = SPI_MODE_NONE;
1484
1485 msm_spi_set_state(dd, SPI_OP_STATE_RESET);
1486 if (!dd->cur_transfer->cs_change)
1487 writel_relaxed(spi_ioc & ~SPI_IO_C_MX_CS_MODE,
1488 dd->base + SPI_IO_CONTROL);
1489 return status;
1490}
1491
1492
1493static inline void msm_spi_set_cs(struct spi_device *spi, bool set_flag)
1494{
1495 struct msm_spi *dd = spi_master_get_devdata(spi->master);
1496 u32 spi_ioc;
1497 u32 spi_ioc_orig;
1498 int rc = 0;
1499
1500 rc = pm_runtime_get_sync(dd->dev);
1501 if (rc < 0) {
1502 dev_err(dd->dev, "Failure during runtime get,rc=%d", rc);
1503 return;
1504 }
1505
1506 if (dd->pdata->is_shared) {
1507 rc = get_local_resources(dd);
1508 if (rc)
1509 return;
1510 }
1511
1512 msm_spi_clk_path_vote(dd, spi->max_speed_hz);
1513
1514 if (!(spi->mode & SPI_CS_HIGH))
1515 set_flag = !set_flag;
1516
1517 /* Serve only under mutex lock as RT suspend may cause a race */
1518 mutex_lock(&dd->core_lock);
1519 if (dd->suspended) {
1520 dev_err(dd->dev, "%s: SPI operational state=%d Invalid\n",
1521 __func__, dd->suspended);
1522 mutex_unlock(&dd->core_lock);
1523 return;
1524 }
1525
1526 spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
1527 spi_ioc_orig = spi_ioc;
1528 if (set_flag)
1529 spi_ioc |= SPI_IO_C_FORCE_CS;
1530 else
1531 spi_ioc &= ~SPI_IO_C_FORCE_CS;
1532
1533 if (spi_ioc != spi_ioc_orig)
1534 writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
1535 if (dd->pdata->is_shared)
1536 put_local_resources(dd);
1537 mutex_unlock(&dd->core_lock);
1538
1539 pm_runtime_mark_last_busy(dd->dev);
1540 pm_runtime_put_autosuspend(dd->dev);
1541}
1542
1543static void reset_core(struct msm_spi *dd)
1544{
1545 u32 spi_ioc;
1546
1547 msm_spi_register_init(dd);
1548 /*
1549 * The SPI core generates a bogus input overrun error on some targets,
1550 * when a transition from run to reset state occurs and if the FIFO has
1551 * an odd number of entries. Hence we disable the INPUT_OVER_RUN_ERR_EN
1552 * bit.
1553 */
1554 msm_spi_enable_error_flags(dd);
1555
1556 spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
1557 spi_ioc |= SPI_IO_C_NO_TRI_STATE;
1558 writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
1559 /*
1560 * Ensure that the IO control is written to before returning.
1561 */
1562 mb();
1563 msm_spi_set_state(dd, SPI_OP_STATE_RESET);
1564}
1565
1566static void put_local_resources(struct msm_spi *dd)
1567{
1568
1569 if (IS_ERR_OR_NULL(dd->clk) || IS_ERR_OR_NULL(dd->pclk)) {
1570 dev_err(dd->dev,
1571 "%s: error clk put\n",
1572 __func__);
1573 return;
1574 }
1575 msm_spi_disable_irqs(dd);
1576 clk_disable_unprepare(dd->clk);
1577 dd->clock_speed = 0;
1578 clk_disable_unprepare(dd->pclk);
1579
1580 /* Free the spi clk, miso, mosi, cs gpio */
1581 if (dd->pdata && dd->pdata->gpio_release)
1582 dd->pdata->gpio_release();
1583
1584 msm_spi_free_gpios(dd);
1585}
1586
1587static int get_local_resources(struct msm_spi *dd)
1588{
1589 int ret = -EINVAL;
1590
1591 if (IS_ERR_OR_NULL(dd->clk) || IS_ERR_OR_NULL(dd->pclk)) {
1592 dev_err(dd->dev,
1593 "%s: error clk put\n",
1594 __func__);
1595 return ret;
1596 }
1597
1598 /* Configure the spi clk, miso, mosi and cs gpio */
1599 if (dd->pdata->gpio_config) {
1600 ret = dd->pdata->gpio_config();
1601 if (ret) {
1602 dev_err(dd->dev,
1603 "%s: error configuring GPIOs\n",
1604 __func__);
1605 return ret;
1606 }
1607 }
1608
1609 ret = msm_spi_request_gpios(dd);
1610 if (ret)
1611 return ret;
1612
1613 ret = clk_prepare_enable(dd->clk);
1614 if (ret)
1615 goto clk0_err;
1616 ret = clk_prepare_enable(dd->pclk);
1617 if (ret)
1618 goto clk1_err;
1619 msm_spi_enable_irqs(dd);
1620
1621 return 0;
1622
1623clk1_err:
1624 clk_disable_unprepare(dd->clk);
1625clk0_err:
1626 msm_spi_free_gpios(dd);
1627 return ret;
1628}
1629
1630/**
1631 * msm_spi_transfer_one: To process one spi transfer at a time
1632 * @master: spi master controller reference
1633 * @msg: one multi-segment SPI transaction
1634 * @return zero on success or negative error value
1635 *
1636 */
1637static int msm_spi_transfer_one(struct spi_master *master,
1638 struct spi_device *spi,
1639 struct spi_transfer *xfer)
1640{
1641 struct msm_spi *dd;
1642 unsigned long flags;
1643 u32 status_error = 0;
1644
1645 dd = spi_master_get_devdata(master);
1646
1647 /* Check message parameters */
1648 if (xfer->speed_hz > dd->pdata->max_clock_speed ||
1649 (xfer->bits_per_word &&
1650 (xfer->bits_per_word < 4 || xfer->bits_per_word > 32)) ||
1651 (xfer->tx_buf == NULL && xfer->rx_buf == NULL)) {
1652 dev_err(dd->dev,
1653 "Invalid transfer: %d Hz, %d bpw tx=%pK, rx=%pK\n",
1654 xfer->speed_hz, xfer->bits_per_word,
1655 xfer->tx_buf, xfer->rx_buf);
1656 return -EINVAL;
1657 }
1658 dd->spi = spi;
1659 dd->cur_transfer = xfer;
1660
1661 mutex_lock(&dd->core_lock);
1662
1663 spin_lock_irqsave(&dd->queue_lock, flags);
1664 dd->transfer_pending = 1;
1665 spin_unlock_irqrestore(&dd->queue_lock, flags);
1666 /*
1667 * get local resources for each transfer to ensure we're in a good
1668 * state and not interfering with other EE's using this device
1669 */
1670 if (dd->pdata->is_shared) {
1671 if (get_local_resources(dd)) {
1672 mutex_unlock(&dd->core_lock);
1673 spi_finalize_current_message(master);
1674 return -EINVAL;
1675 }
1676
1677 reset_core(dd);
1678 if (dd->use_dma) {
1679 msm_spi_bam_pipe_connect(dd, &dd->bam.prod,
1680 &dd->bam.prod.config);
1681 msm_spi_bam_pipe_connect(dd, &dd->bam.cons,
1682 &dd->bam.cons.config);
1683 }
1684 }
1685
1686 if (dd->suspended || !msm_spi_is_valid_state(dd)) {
1687 dev_err(dd->dev, "%s: SPI operational state not valid\n",
1688 __func__);
1689 status_error = 1;
1690 }
1691
1692
1693 if (!status_error)
1694 status_error =
1695 msm_spi_process_transfer(dd);
1696
1697 spin_lock_irqsave(&dd->queue_lock, flags);
1698 dd->transfer_pending = 0;
1699 spin_unlock_irqrestore(&dd->queue_lock, flags);
1700
1701 /*
1702 * Put local resources prior to calling finalize to ensure the hw
1703 * is in a known state before notifying the calling thread (which is a
1704 * different context since we're running in the spi kthread here) to
1705 * prevent race conditions between us and any other EE's using this hw.
1706 */
1707 if (dd->pdata->is_shared) {
1708 if (dd->use_dma) {
1709 msm_spi_bam_pipe_disconnect(dd, &dd->bam.prod);
1710 msm_spi_bam_pipe_disconnect(dd, &dd->bam.cons);
1711 }
1712 put_local_resources(dd);
1713 }
1714 mutex_unlock(&dd->core_lock);
1715 if (dd->suspended)
1716 wake_up_interruptible(&dd->continue_suspend);
1717 return status_error;
1718}
1719
1720static int msm_spi_prepare_transfer_hardware(struct spi_master *master)
1721{
1722 struct msm_spi *dd = spi_master_get_devdata(master);
1723 int resume_state = 0;
1724
1725 resume_state = pm_runtime_get_sync(dd->dev);
1726 if (resume_state < 0)
1727 goto spi_finalize;
1728
1729 /*
1730 * Counter-part of system-suspend when runtime-pm is not enabled.
1731 * This way, resume can be left empty and device will be put in
1732 * active mode only if client requests anything on the bus
1733 */
1734 if (!pm_runtime_enabled(dd->dev))
1735 resume_state = msm_spi_pm_resume_runtime(dd->dev);
1736 if (resume_state < 0)
1737 goto spi_finalize;
1738 if (dd->suspended) {
1739 resume_state = -EBUSY;
1740 goto spi_finalize;
1741 }
1742 return 0;
1743
1744spi_finalize:
1745 spi_finalize_current_message(master);
1746 return resume_state;
1747}
1748
1749static int msm_spi_unprepare_transfer_hardware(struct spi_master *master)
1750{
1751 struct msm_spi *dd = spi_master_get_devdata(master);
1752
1753 pm_runtime_mark_last_busy(dd->dev);
1754 pm_runtime_put_autosuspend(dd->dev);
1755 return 0;
1756}
1757
1758static int msm_spi_setup(struct spi_device *spi)
1759{
1760 struct msm_spi *dd;
1761 int rc = 0;
1762 u32 spi_ioc;
1763 u32 spi_config;
1764 u32 mask;
1765
1766 if (spi->bits_per_word < 4 || spi->bits_per_word > 32) {
1767 dev_err(&spi->dev, "%s: invalid bits_per_word %d\n",
1768 __func__, spi->bits_per_word);
1769 return -EINVAL;
1770 }
1771 if (spi->chip_select > SPI_NUM_CHIPSELECTS-1) {
1772 dev_err(&spi->dev, "%s, chip select %d exceeds max value %d\n",
1773 __func__, spi->chip_select, SPI_NUM_CHIPSELECTS - 1);
1774 return -EINVAL;
1775 }
1776
1777 dd = spi_master_get_devdata(spi->master);
1778
1779 rc = pm_runtime_get_sync(dd->dev);
1780 if (rc < 0 && !dd->is_init_complete &&
1781 pm_runtime_enabled(dd->dev)) {
1782 pm_runtime_set_suspended(dd->dev);
1783 pm_runtime_put_sync(dd->dev);
1784 rc = 0;
1785 goto err_setup_exit;
1786 } else
1787 rc = 0;
1788
1789 mutex_lock(&dd->core_lock);
1790
1791 /* Counter-part of system-suspend when runtime-pm is not enabled. */
1792 if (!pm_runtime_enabled(dd->dev)) {
1793 rc = msm_spi_pm_resume_runtime(dd->dev);
1794 if (rc < 0 && !dd->is_init_complete) {
1795 rc = 0;
1796 mutex_unlock(&dd->core_lock);
1797 goto err_setup_exit;
1798 }
1799 }
1800
1801 if (dd->suspended) {
1802 rc = -EBUSY;
1803 mutex_unlock(&dd->core_lock);
1804 goto err_setup_exit;
1805 }
1806
1807 if (dd->pdata->is_shared) {
1808 rc = get_local_resources(dd);
1809 if (rc)
1810 goto no_resources;
1811 }
1812
1813 spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
1814 mask = SPI_IO_C_CS_N_POLARITY_0 << spi->chip_select;
1815 if (spi->mode & SPI_CS_HIGH)
1816 spi_ioc |= mask;
1817 else
1818 spi_ioc &= ~mask;
1819 spi_ioc = msm_spi_calc_spi_ioc_clk_polarity(spi_ioc, spi->mode);
1820
1821 writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
1822
1823 spi_config = readl_relaxed(dd->base + SPI_CONFIG);
1824 spi_config = msm_spi_calc_spi_config_loopback_and_input_first(
1825 spi_config, spi->mode);
1826 writel_relaxed(spi_config, dd->base + SPI_CONFIG);
1827
1828 /* Ensure previous write completed before disabling the clocks */
1829 mb();
1830 if (dd->pdata->is_shared)
1831 put_local_resources(dd);
1832 /* Counter-part of system-resume when runtime-pm is not enabled. */
1833 if (!pm_runtime_enabled(dd->dev))
1834 msm_spi_pm_suspend_runtime(dd->dev);
1835
1836no_resources:
1837 mutex_unlock(&dd->core_lock);
1838 pm_runtime_mark_last_busy(dd->dev);
1839 pm_runtime_put_autosuspend(dd->dev);
1840
1841err_setup_exit:
1842 return rc;
1843}
1844
1845#ifdef CONFIG_DEBUG_FS
1846
1847
1848static int debugfs_iomem_x32_set(void *data, u64 val)
1849{
1850 struct msm_spi_debugfs_data *reg = (struct msm_spi_debugfs_data *)data;
1851 struct msm_spi *dd = reg->dd;
1852 int ret;
1853
1854 ret = pm_runtime_get_sync(dd->dev);
1855 if (ret < 0)
1856 return ret;
1857
1858 writel_relaxed(val, (dd->base + reg->offset));
1859 /* Ensure the previous write completed. */
1860 mb();
1861
1862 pm_runtime_mark_last_busy(dd->dev);
1863 pm_runtime_put_autosuspend(dd->dev);
1864 return 0;
1865}
1866
1867static int debugfs_iomem_x32_get(void *data, u64 *val)
1868{
1869 struct msm_spi_debugfs_data *reg = (struct msm_spi_debugfs_data *)data;
1870 struct msm_spi *dd = reg->dd;
1871 int ret;
1872
1873 ret = pm_runtime_get_sync(dd->dev);
1874 if (ret < 0)
1875 return ret;
1876 *val = readl_relaxed(dd->base + reg->offset);
1877 /* Ensure the previous read completed. */
1878 mb();
1879
1880 pm_runtime_mark_last_busy(dd->dev);
1881 pm_runtime_put_autosuspend(dd->dev);
1882 return 0;
1883}
1884
1885DEFINE_SIMPLE_ATTRIBUTE(fops_iomem_x32, debugfs_iomem_x32_get,
1886 debugfs_iomem_x32_set, "0x%08llx\n");
1887
1888static void spi_debugfs_init(struct msm_spi *dd)
1889{
1890 char dir_name[20];
1891
1892 scnprintf(dir_name, sizeof(dir_name), "%s_dbg", dev_name(dd->dev));
1893 dd->dent_spi = debugfs_create_dir(dir_name, NULL);
1894 if (dd->dent_spi) {
1895 int i;
1896
1897 for (i = 0; i < ARRAY_SIZE(debugfs_spi_regs); i++) {
1898 dd->reg_data[i].offset = debugfs_spi_regs[i].offset;
1899 dd->reg_data[i].dd = dd;
1900 dd->debugfs_spi_regs[i] =
1901 debugfs_create_file(
1902 debugfs_spi_regs[i].name,
1903 debugfs_spi_regs[i].mode,
1904 dd->dent_spi, &dd->reg_data[i],
1905 &fops_iomem_x32);
1906 }
1907 }
1908}
1909
1910static void spi_debugfs_exit(struct msm_spi *dd)
1911{
1912 if (dd->dent_spi) {
1913 int i;
1914
1915 debugfs_remove_recursive(dd->dent_spi);
1916 dd->dent_spi = NULL;
1917 for (i = 0; i < ARRAY_SIZE(debugfs_spi_regs); i++)
1918 dd->debugfs_spi_regs[i] = NULL;
1919 }
1920}
1921#else
1922static void spi_debugfs_init(struct msm_spi *dd) {}
1923static void spi_debugfs_exit(struct msm_spi *dd) {}
1924#endif
1925
1926/* ===Device attributes begin=== */
1927static ssize_t show_stats(struct device *dev, struct device_attribute *attr,
1928 char *buf)
1929{
1930 struct spi_master *master = dev_get_drvdata(dev);
1931 struct msm_spi *dd = spi_master_get_devdata(master);
1932
1933 return snprintf(buf, PAGE_SIZE,
1934 "Device %s\n"
1935 "rx fifo_size = %d spi words\n"
1936 "tx fifo_size = %d spi words\n"
1937 "use_dma ? %s\n"
1938 "rx block size = %d bytes\n"
1939 "tx block size = %d bytes\n"
1940 "input burst size = %d bytes\n"
1941 "output burst size = %d bytes\n"
1942 "DMA configuration:\n"
1943 "tx_ch=%d, rx_ch=%d, tx_crci= %d, rx_crci=%d\n"
1944 "--statistics--\n"
1945 "Rx isrs = %d\n"
1946 "Tx isrs = %d\n"
1947 "--debug--\n"
1948 "NA yet\n",
1949 dev_name(dev),
1950 dd->input_fifo_size,
1951 dd->output_fifo_size,
1952 dd->use_dma ? "yes" : "no",
1953 dd->input_block_size,
1954 dd->output_block_size,
1955 dd->input_burst_size,
1956 dd->output_burst_size,
1957 dd->tx_dma_chan,
1958 dd->rx_dma_chan,
1959 dd->tx_dma_crci,
1960 dd->rx_dma_crci,
1961 dd->stat_rx,
1962 dd->stat_tx
1963 );
1964}
1965
1966/* Reset statistics on write */
1967static ssize_t set_stats(struct device *dev, struct device_attribute *attr,
1968 const char *buf, size_t count)
1969{
1970 struct msm_spi *dd = dev_get_drvdata(dev);
1971
1972 dd->stat_rx = 0;
1973 dd->stat_tx = 0;
1974 return count;
1975}
1976
1977static DEVICE_ATTR(stats, 0644, show_stats, set_stats);
1978
1979static struct attribute *dev_attrs[] = {
1980 &dev_attr_stats.attr,
1981 NULL,
1982};
1983
1984static struct attribute_group dev_attr_grp = {
1985 .attrs = dev_attrs,
1986};
1987/* ===Device attributes end=== */
1988
1989static void msm_spi_bam_pipe_teardown(struct msm_spi *dd,
1990 enum msm_spi_pipe_direction pipe_dir)
1991{
1992 struct msm_spi_bam_pipe *pipe = (pipe_dir == SPI_BAM_CONSUMER_PIPE) ?
1993 (&dd->bam.prod) : (&dd->bam.cons);
1994 if (!pipe->teardown_required)
1995 return;
1996
1997 msm_spi_bam_pipe_disconnect(dd, pipe);
1998 dma_free_coherent(dd->dev, pipe->config.desc.size,
1999 pipe->config.desc.base, pipe->config.desc.phys_base);
2000 sps_free_endpoint(pipe->handle);
2001 pipe->handle = NULL;
2002 pipe->teardown_required = false;
2003}
2004
2005static int msm_spi_bam_pipe_init(struct msm_spi *dd,
2006 enum msm_spi_pipe_direction pipe_dir)
2007{
2008 int rc = 0;
2009 struct sps_pipe *pipe_handle;
2010 struct msm_spi_bam_pipe *pipe = (pipe_dir == SPI_BAM_CONSUMER_PIPE) ?
2011 (&dd->bam.prod) : (&dd->bam.cons);
2012 struct sps_connect *pipe_conf = &pipe->config;
2013
2014 pipe->name = (pipe_dir == SPI_BAM_CONSUMER_PIPE) ? "cons" : "prod";
2015 pipe->handle = NULL;
2016 pipe_handle = sps_alloc_endpoint();
2017 if (!pipe_handle) {
2018 dev_err(dd->dev, "%s: Failed to allocate BAM endpoint\n"
2019 , __func__);
2020 return -ENOMEM;
2021 }
2022
2023 memset(pipe_conf, 0, sizeof(*pipe_conf));
2024 rc = sps_get_config(pipe_handle, pipe_conf);
2025 if (rc) {
2026 dev_err(dd->dev, "%s: Failed to get BAM pipe config\n"
2027 , __func__);
2028 goto config_err;
2029 }
2030
2031 if (pipe_dir == SPI_BAM_CONSUMER_PIPE) {
2032 pipe_conf->source = dd->bam.handle;
2033 pipe_conf->destination = SPS_DEV_HANDLE_MEM;
2034 pipe_conf->mode = SPS_MODE_SRC;
2035 pipe_conf->src_pipe_index =
2036 dd->pdata->bam_producer_pipe_index;
2037 pipe_conf->dest_pipe_index = 0;
2038 } else {
2039 pipe_conf->source = SPS_DEV_HANDLE_MEM;
2040 pipe_conf->destination = dd->bam.handle;
2041 pipe_conf->mode = SPS_MODE_DEST;
2042 pipe_conf->src_pipe_index = 0;
2043 pipe_conf->dest_pipe_index =
2044 dd->pdata->bam_consumer_pipe_index;
2045 }
2046 pipe_conf->options = SPS_O_EOT | SPS_O_AUTO_ENABLE;
2047 pipe_conf->desc.size = SPI_BAM_MAX_DESC_NUM * sizeof(struct sps_iovec);
2048 pipe_conf->desc.base = dma_alloc_coherent(dd->dev,
2049 pipe_conf->desc.size,
2050 &pipe_conf->desc.phys_base,
2051 GFP_KERNEL);
2052 if (!pipe_conf->desc.base) {
2053 dev_err(dd->dev, "%s: Failed allocate BAM pipe memory"
2054 , __func__);
2055 rc = -ENOMEM;
2056 goto config_err;
2057 }
2058 /* zero descriptor FIFO for convenient debugging of first descs */
2059 memset(pipe_conf->desc.base, 0x00, pipe_conf->desc.size);
2060
2061 pipe->handle = pipe_handle;
2062
2063 return 0;
2064
2065config_err:
2066 sps_free_endpoint(pipe_handle);
2067
2068 return rc;
2069}
2070
2071static void msm_spi_bam_teardown(struct msm_spi *dd)
2072{
2073 msm_spi_bam_pipe_teardown(dd, SPI_BAM_PRODUCER_PIPE);
2074 msm_spi_bam_pipe_teardown(dd, SPI_BAM_CONSUMER_PIPE);
2075
2076 if (dd->bam.deregister_required) {
2077 sps_deregister_bam_device(dd->bam.handle);
2078 dd->bam.deregister_required = false;
2079 }
2080}
2081
2082static int msm_spi_bam_init(struct msm_spi *dd)
2083{
2084 struct sps_bam_props bam_props = {0};
2085 uintptr_t bam_handle;
2086 int rc = 0;
2087
2088 rc = sps_phy2h(dd->bam.phys_addr, &bam_handle);
2089 if (rc || !bam_handle) {
2090 bam_props.phys_addr = dd->bam.phys_addr;
2091 bam_props.virt_addr = dd->bam.base;
2092 bam_props.irq = dd->bam.irq;
2093 bam_props.manage = SPS_BAM_MGR_DEVICE_REMOTE;
2094 bam_props.summing_threshold = 0x10;
2095
2096 rc = sps_register_bam_device(&bam_props, &bam_handle);
2097 if (rc) {
2098 dev_err(dd->dev,
2099 "%s: Failed to register BAM device",
2100 __func__);
2101 return rc;
2102 }
2103 dd->bam.deregister_required = true;
2104 }
2105
2106 dd->bam.handle = bam_handle;
2107
2108 rc = msm_spi_bam_pipe_init(dd, SPI_BAM_PRODUCER_PIPE);
2109 if (rc) {
2110 dev_err(dd->dev,
2111 "%s: Failed to init producer BAM-pipe",
2112 __func__);
2113 goto bam_init_error;
2114 }
2115
2116 rc = msm_spi_bam_pipe_init(dd, SPI_BAM_CONSUMER_PIPE);
2117 if (rc) {
2118 dev_err(dd->dev,
2119 "%s: Failed to init consumer BAM-pipe",
2120 __func__);
2121 goto bam_init_error;
2122 }
2123
2124 return 0;
2125
2126bam_init_error:
2127 msm_spi_bam_teardown(dd);
2128 return rc;
2129}
2130
2131enum msm_spi_dt_entry_status {
2132 DT_REQ, /* Required: fail if missing */
2133 DT_SGST, /* Suggested: warn if missing */
2134 DT_OPT, /* Optional: don't warn if missing */
2135};
2136
2137enum msm_spi_dt_entry_type {
2138 DT_U32,
2139 DT_GPIO,
2140 DT_BOOL,
2141};
2142
2143struct msm_spi_dt_to_pdata_map {
2144 const char *dt_name;
2145 void *ptr_data;
2146 enum msm_spi_dt_entry_status status;
2147 enum msm_spi_dt_entry_type type;
2148 int default_val;
2149};
2150
2151static int msm_spi_dt_to_pdata_populate(struct platform_device *pdev,
2152 struct msm_spi_platform_data *pdata,
2153 struct msm_spi_dt_to_pdata_map *itr)
2154{
2155 int ret, err = 0;
2156 struct device_node *node = pdev->dev.of_node;
2157
2158 for (; itr->dt_name; ++itr) {
2159 switch (itr->type) {
2160 case DT_GPIO:
2161 ret = of_get_named_gpio(node, itr->dt_name, 0);
2162 if (ret >= 0) {
2163 *((int *) itr->ptr_data) = ret;
2164 ret = 0;
2165 }
2166 break;
2167 case DT_U32:
2168 ret = of_property_read_u32(node, itr->dt_name,
2169 (u32 *) itr->ptr_data);
2170 break;
2171 case DT_BOOL:
2172 *((bool *) itr->ptr_data) =
2173 of_property_read_bool(node, itr->dt_name);
2174 ret = 0;
2175 break;
2176 default:
2177 dev_err(&pdev->dev, "%d is an unknown DT entry type\n",
2178 itr->type);
2179 ret = -EBADE;
2180 }
2181
2182 dev_dbg(&pdev->dev, "DT entry ret:%d name:%s val:%d\n",
2183 ret, itr->dt_name, *((int *)itr->ptr_data));
2184
2185 if (ret) {
2186 *((int *)itr->ptr_data) = itr->default_val;
2187
2188 if (itr->status < DT_OPT) {
2189 dev_err(&pdev->dev, "Missing '%s' DT entry\n",
2190 itr->dt_name);
2191
2192 /* cont on err to dump all missing entries */
2193 if (itr->status == DT_REQ && !err)
2194 err = ret;
2195 }
2196 }
2197 }
2198
2199 return err;
2200}
2201
2202/**
2203 * msm_spi_dt_to_pdata: create pdata and read gpio config from device tree
2204 */
2205static struct msm_spi_platform_data *msm_spi_dt_to_pdata(
2206 struct platform_device *pdev, struct msm_spi *dd)
2207{
2208 struct msm_spi_platform_data *pdata;
2209
2210 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
2211 if (!pdata)
2212 return NULL;
2213
2214 if (pdata) {
2215 struct msm_spi_dt_to_pdata_map map[] = {
2216 {"spi-max-frequency",
2217 &pdata->max_clock_speed, DT_SGST, DT_U32, 0},
2218 {"qcom,infinite-mode",
2219 &pdata->infinite_mode, DT_OPT, DT_U32, 0},
2220 {"qcom,master-id",
2221 &pdata->master_id, DT_SGST, DT_U32, 0},
2222 {"qcom,bus-width",
2223 &pdata->bus_width, DT_OPT, DT_U32, 8},
2224 {"qcom,ver-reg-exists",
2225 &pdata->ver_reg_exists, DT_OPT, DT_BOOL, 0},
2226 {"qcom,use-bam",
2227 &pdata->use_bam, DT_OPT, DT_BOOL, 0},
2228 {"qcom,use-pinctrl",
2229 &pdata->use_pinctrl, DT_OPT, DT_BOOL, 0},
2230 {"qcom,bam-consumer-pipe-index",
2231 &pdata->bam_consumer_pipe_index, DT_OPT, DT_U32, 0},
2232 {"qcom,bam-producer-pipe-index",
2233 &pdata->bam_producer_pipe_index, DT_OPT, DT_U32, 0},
2234 {"qcom,gpio-clk",
2235 &dd->spi_gpios[0], DT_OPT, DT_GPIO, -1},
2236 {"qcom,gpio-miso",
2237 &dd->spi_gpios[1], DT_OPT, DT_GPIO, -1},
2238 {"qcom,gpio-mosi",
2239 &dd->spi_gpios[2], DT_OPT, DT_GPIO, -1},
2240 {"qcom,gpio-cs0",
2241 &dd->cs_gpios[0].gpio_num, DT_OPT, DT_GPIO, -1},
2242 {"qcom,gpio-cs1",
2243 &dd->cs_gpios[1].gpio_num, DT_OPT, DT_GPIO, -1},
2244 {"qcom,gpio-cs2",
2245 &dd->cs_gpios[2].gpio_num, DT_OPT, DT_GPIO, -1},
2246 {"qcom,gpio-cs3",
2247 &dd->cs_gpios[3].gpio_num, DT_OPT, DT_GPIO, -1},
2248 {"qcom,rt-priority",
2249 &pdata->rt_priority, DT_OPT, DT_BOOL, 0},
2250 {"qcom,shared",
2251 &pdata->is_shared, DT_OPT, DT_BOOL, 0},
2252 {NULL, NULL, 0, 0, 0},
2253 };
2254
2255 if (msm_spi_dt_to_pdata_populate(pdev, pdata, map)) {
2256 devm_kfree(&pdev->dev, pdata);
2257 return NULL;
2258 }
2259 }
2260
2261 if (pdata->use_bam) {
2262 if (!pdata->bam_consumer_pipe_index) {
2263 dev_warn(&pdev->dev,
2264 "missing qcom,bam-consumer-pipe-index entry in device-tree\n");
2265 pdata->use_bam = false;
2266 }
2267
2268 if (!pdata->bam_producer_pipe_index) {
2269 dev_warn(&pdev->dev,
2270 "missing qcom,bam-producer-pipe-index entry in device-tree\n");
2271 pdata->use_bam = false;
2272 }
2273 }
2274 return pdata;
2275}
2276
2277static int msm_spi_get_qup_hw_ver(struct device *dev, struct msm_spi *dd)
2278{
2279 u32 data = readl_relaxed(dd->base + QUP_HARDWARE_VER);
2280
2281 return (data >= QUP_HARDWARE_VER_2_1_1) ? SPI_QUP_VERSION_BFAM
2282 : SPI_QUP_VERSION_NONE;
2283}
2284
2285static int msm_spi_bam_get_resources(struct msm_spi *dd,
2286 struct platform_device *pdev, struct spi_master *master)
2287{
2288 struct resource *resource;
2289 size_t bam_mem_size;
2290
2291 resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2292 "spi_bam_physical");
2293 if (!resource) {
2294 dev_warn(&pdev->dev,
2295 "%s: Missing spi_bam_physical entry in DT",
2296 __func__);
2297 return -ENXIO;
2298 }
2299
2300 dd->bam.phys_addr = resource->start;
2301 bam_mem_size = resource_size(resource);
2302 dd->bam.base = devm_ioremap(&pdev->dev, dd->bam.phys_addr,
2303 bam_mem_size);
2304 if (!dd->bam.base) {
2305 dev_warn(&pdev->dev,
2306 "%s: Failed to ioremap(spi_bam_physical)",
2307 __func__);
2308 return -ENXIO;
2309 }
2310
2311 dd->bam.irq = platform_get_irq_byname(pdev, "spi_bam_irq");
2312 if (dd->bam.irq < 0) {
2313 dev_warn(&pdev->dev, "%s: Missing spi_bam_irq entry in DT",
2314 __func__);
2315 return -EINVAL;
2316 }
2317
2318 dd->dma_init = msm_spi_bam_init;
2319 dd->dma_teardown = msm_spi_bam_teardown;
2320 return 0;
2321}
2322
2323static int init_resources(struct platform_device *pdev)
2324{
2325 struct spi_master *master = platform_get_drvdata(pdev);
2326 struct msm_spi *dd;
2327 int rc = -ENXIO;
2328 int clk_enabled = 0;
2329 int pclk_enabled = 0;
2330
2331 dd = spi_master_get_devdata(master);
2332
2333 if (dd->pdata && dd->pdata->use_pinctrl) {
2334 rc = msm_spi_pinctrl_init(dd);
2335 if (rc) {
2336 dev_err(&pdev->dev, "%s: pinctrl init failed\n",
2337 __func__);
2338 return rc;
2339 }
2340 }
2341
2342 mutex_lock(&dd->core_lock);
2343
2344 dd->clk = clk_get(&pdev->dev, "core_clk");
2345 if (IS_ERR(dd->clk)) {
2346 dev_err(&pdev->dev, "%s: unable to get core_clk\n", __func__);
2347 rc = PTR_ERR(dd->clk);
2348 goto err_clk_get;
2349 }
2350
2351 dd->pclk = clk_get(&pdev->dev, "iface_clk");
2352 if (IS_ERR(dd->pclk)) {
2353 dev_err(&pdev->dev, "%s: unable to get iface_clk\n", __func__);
2354 rc = PTR_ERR(dd->pclk);
2355 goto err_pclk_get;
2356 }
2357
2358 if (dd->pdata && dd->pdata->max_clock_speed)
2359 msm_spi_clock_set(dd, dd->pdata->max_clock_speed);
2360
2361 rc = clk_prepare_enable(dd->clk);
2362 if (rc) {
2363 dev_err(&pdev->dev, "%s: unable to enable core_clk\n",
2364 __func__);
2365 goto err_clk_enable;
2366 }
2367
2368 clk_enabled = 1;
2369 rc = clk_prepare_enable(dd->pclk);
2370 if (rc) {
2371 dev_err(&pdev->dev, "%s: unable to enable iface_clk\n",
2372 __func__);
2373 goto err_pclk_enable;
2374 }
2375
2376 pclk_enabled = 1;
2377
2378 if (dd->pdata && dd->pdata->ver_reg_exists) {
2379 enum msm_spi_qup_version ver =
2380 msm_spi_get_qup_hw_ver(&pdev->dev, dd);
2381 if (dd->qup_ver != ver)
2382 dev_warn(&pdev->dev,
2383 "%s: HW version different then initially assumed by probe",
2384 __func__);
2385 }
2386
2387 /* GSBI dose not exists on B-family MSM-chips */
2388 if (dd->qup_ver != SPI_QUP_VERSION_BFAM) {
2389 rc = msm_spi_configure_gsbi(dd, pdev);
2390 if (rc)
2391 goto err_config_gsbi;
2392 }
2393
2394 msm_spi_calculate_fifo_size(dd);
2395 if (dd->use_dma) {
2396 rc = dd->dma_init(dd);
2397 if (rc) {
2398 dev_err(&pdev->dev,
2399 "%s: failed to init DMA. Disabling DMA mode\n",
2400 __func__);
2401 dd->use_dma = 0;
2402 }
2403 }
2404
2405 msm_spi_register_init(dd);
2406 /*
2407 * The SPI core generates a bogus input overrun error on some targets,
2408 * when a transition from run to reset state occurs and if the FIFO has
2409 * an odd number of entries. Hence we disable the INPUT_OVER_RUN_ERR_EN
2410 * bit.
2411 */
2412 msm_spi_enable_error_flags(dd);
2413
2414 writel_relaxed(SPI_IO_C_NO_TRI_STATE, dd->base + SPI_IO_CONTROL);
2415 rc = msm_spi_set_state(dd, SPI_OP_STATE_RESET);
2416 if (rc)
2417 goto err_spi_state;
2418
2419 clk_disable_unprepare(dd->clk);
2420 clk_disable_unprepare(dd->pclk);
2421 clk_enabled = 0;
2422 pclk_enabled = 0;
2423
2424 dd->transfer_pending = 0;
2425 dd->tx_mode = SPI_MODE_NONE;
2426 dd->rx_mode = SPI_MODE_NONE;
2427
2428 rc = msm_spi_request_irq(dd, pdev, master);
2429 if (rc)
2430 goto err_irq;
2431
2432 msm_spi_disable_irqs(dd);
2433
2434 mutex_unlock(&dd->core_lock);
2435 return 0;
2436
2437err_irq:
2438err_spi_state:
2439 if (dd->use_dma && dd->dma_teardown)
2440 dd->dma_teardown(dd);
2441err_config_gsbi:
2442 if (pclk_enabled)
2443 clk_disable_unprepare(dd->pclk);
2444err_pclk_enable:
2445 if (clk_enabled)
2446 clk_disable_unprepare(dd->clk);
2447err_clk_enable:
2448 clk_put(dd->pclk);
2449err_pclk_get:
2450 clk_put(dd->clk);
2451err_clk_get:
2452 mutex_unlock(&dd->core_lock);
2453 return rc;
2454}
2455
2456static int msm_spi_probe(struct platform_device *pdev)
2457{
2458 struct spi_master *master;
2459 struct msm_spi *dd;
2460 struct resource *resource;
2461 int i = 0;
2462 int rc = -ENXIO;
2463 struct msm_spi_platform_data *pdata;
2464
2465 master = spi_alloc_master(&pdev->dev, sizeof(struct msm_spi));
2466 if (!master) {
2467 rc = -ENOMEM;
2468 dev_err(&pdev->dev, "master allocation failed\n");
2469 goto err_probe_exit;
2470 }
2471
2472 master->bus_num = pdev->id;
2473 master->mode_bits = SPI_SUPPORTED_MODES;
2474 master->num_chipselect = SPI_NUM_CHIPSELECTS;
2475 master->set_cs = msm_spi_set_cs;
2476 master->setup = msm_spi_setup;
2477 master->prepare_transfer_hardware = msm_spi_prepare_transfer_hardware;
2478 master->transfer_one = msm_spi_transfer_one;
2479 master->unprepare_transfer_hardware
2480 = msm_spi_unprepare_transfer_hardware;
2481
2482 platform_set_drvdata(pdev, master);
2483 dd = spi_master_get_devdata(master);
2484
2485 if (pdev->dev.of_node) {
2486 dd->qup_ver = SPI_QUP_VERSION_BFAM;
2487 master->dev.of_node = pdev->dev.of_node;
2488 pdata = msm_spi_dt_to_pdata(pdev, dd);
2489 if (!pdata) {
2490 dev_err(&pdev->dev, "platform data allocation failed\n");
2491 rc = -ENOMEM;
2492 goto err_probe_exit;
2493 }
2494
2495 rc = of_alias_get_id(pdev->dev.of_node, "spi");
2496 if (rc < 0)
2497 dev_warn(&pdev->dev,
2498 "using default bus_num %d\n", pdev->id);
2499 else
2500 master->bus_num = pdev->id = rc;
2501 } else {
2502 pdata = pdev->dev.platform_data;
2503 dd->qup_ver = SPI_QUP_VERSION_NONE;
2504
2505 for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
2506 resource = platform_get_resource(pdev, IORESOURCE_IO,
2507 i);
2508 dd->spi_gpios[i] = resource ? resource->start : -1;
2509 }
2510
2511 for (i = 0; i < ARRAY_SIZE(spi_cs_rsrcs); ++i) {
2512 resource = platform_get_resource(pdev, IORESOURCE_IO,
2513 i + ARRAY_SIZE(spi_rsrcs));
2514 dd->cs_gpios[i].gpio_num = resource ?
2515 resource->start : -1;
2516 }
2517 }
2518
2519 for (i = 0; i < ARRAY_SIZE(spi_cs_rsrcs); ++i)
2520 dd->cs_gpios[i].valid = 0;
2521
2522 dd->pdata = pdata;
2523 resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2524 if (!resource) {
2525 rc = -ENXIO;
2526 goto err_probe_res;
2527 }
2528
2529 dd->mem_phys_addr = resource->start;
2530 dd->mem_size = resource_size(resource);
2531 dd->dev = &pdev->dev;
2532
2533 if (pdata) {
2534 master->rt = pdata->rt_priority;
2535 if (pdata->dma_config) {
2536 rc = pdata->dma_config();
2537 if (rc) {
2538 dev_warn(&pdev->dev,
2539 "%s: DM mode not supported\n",
2540 __func__);
2541 dd->use_dma = 0;
2542 goto skip_dma_resources;
2543 }
2544 }
2545 if (!dd->pdata->use_bam)
2546 goto skip_dma_resources;
2547
2548 rc = msm_spi_bam_get_resources(dd, pdev, master);
2549 if (rc) {
2550 dev_warn(dd->dev,
2551 "%s: Failed to get BAM resources",
2552 __func__);
2553 goto skip_dma_resources;
2554 }
2555 dd->use_dma = 1;
2556 }
2557
2558 spi_dma_mask(&pdev->dev);
2559skip_dma_resources:
2560
2561 spin_lock_init(&dd->queue_lock);
2562 mutex_init(&dd->core_lock);
2563 init_waitqueue_head(&dd->continue_suspend);
2564
2565 if (!devm_request_mem_region(&pdev->dev, dd->mem_phys_addr,
2566 dd->mem_size, SPI_DRV_NAME)) {
2567 rc = -ENXIO;
2568 goto err_probe_reqmem;
2569 }
2570
2571 dd->base = devm_ioremap(&pdev->dev, dd->mem_phys_addr, dd->mem_size);
2572 if (!dd->base) {
2573 rc = -ENOMEM;
2574 goto err_probe_reqmem;
2575 }
2576
2577 pm_runtime_set_autosuspend_delay(&pdev->dev, MSEC_PER_SEC);
2578 pm_runtime_use_autosuspend(&pdev->dev);
2579 pm_runtime_enable(&pdev->dev);
2580
2581 dd->suspended = 1;
2582 rc = spi_register_master(master);
2583 if (rc)
2584 goto err_probe_reg_master;
2585
2586 rc = sysfs_create_group(&(dd->dev->kobj), &dev_attr_grp);
2587 if (rc) {
2588 dev_err(&pdev->dev, "failed to create dev. attrs : %d\n", rc);
2589 goto err_attrs;
2590 }
2591 spi_debugfs_init(dd);
2592
2593 return 0;
2594
2595err_attrs:
2596 spi_unregister_master(master);
2597err_probe_reg_master:
2598 pm_runtime_disable(&pdev->dev);
2599err_probe_reqmem:
2600err_probe_res:
2601 spi_master_put(master);
2602err_probe_exit:
2603 return rc;
2604}
2605
2606static int msm_spi_pm_suspend_runtime(struct device *device)
2607{
2608 struct platform_device *pdev = to_platform_device(device);
2609 struct spi_master *master = platform_get_drvdata(pdev);
2610 struct msm_spi *dd;
2611 unsigned long flags;
2612
2613 dev_dbg(device, "pm_runtime: suspending...\n");
2614 if (!master)
2615 goto suspend_exit;
2616 dd = spi_master_get_devdata(master);
2617 if (!dd)
2618 goto suspend_exit;
2619
2620 if (dd->suspended)
2621 return 0;
2622
2623 /*
2624 * Make sure nothing is added to the queue while we're
2625 * suspending
2626 */
2627 spin_lock_irqsave(&dd->queue_lock, flags);
2628 dd->suspended = 1;
2629 spin_unlock_irqrestore(&dd->queue_lock, flags);
2630
2631 /* Wait for transactions to end, or time out */
2632 wait_event_interruptible(dd->continue_suspend,
2633 !dd->transfer_pending);
2634
2635 mutex_lock(&dd->core_lock);
2636 if (dd->pdata && !dd->pdata->is_shared && dd->use_dma) {
2637 msm_spi_bam_pipe_disconnect(dd, &dd->bam.prod);
2638 msm_spi_bam_pipe_disconnect(dd, &dd->bam.cons);
2639 }
2640 if (dd->pdata && !dd->pdata->is_shared)
2641 put_local_resources(dd);
2642
2643 if (dd->pdata)
2644 msm_spi_clk_path_vote(dd, 0);
2645 mutex_unlock(&dd->core_lock);
2646
2647suspend_exit:
2648 return 0;
2649}
2650
2651static int msm_spi_pm_resume_runtime(struct device *device)
2652{
2653 struct platform_device *pdev = to_platform_device(device);
2654 struct spi_master *master = platform_get_drvdata(pdev);
2655 struct msm_spi *dd;
2656 int ret = 0;
2657
2658 dev_dbg(device, "pm_runtime: resuming...\n");
2659 if (!master)
2660 goto resume_exit;
2661 dd = spi_master_get_devdata(master);
2662 if (!dd)
2663 goto resume_exit;
2664
2665 if (!dd->suspended)
2666 return 0;
2667 if (!dd->is_init_complete) {
2668 ret = init_resources(pdev);
2669 if (ret != 0)
2670 return ret;
2671
2672 dd->is_init_complete = true;
2673 }
2674 msm_spi_clk_path_init(dd);
2675 msm_spi_clk_path_vote(dd, dd->pdata->max_clock_speed);
2676
2677 if (!dd->pdata->is_shared) {
2678 ret = get_local_resources(dd);
2679 if (ret)
2680 return ret;
2681 }
2682 if (!dd->pdata->is_shared && dd->use_dma) {
2683 msm_spi_bam_pipe_connect(dd, &dd->bam.prod,
2684 &dd->bam.prod.config);
2685 msm_spi_bam_pipe_connect(dd, &dd->bam.cons,
2686 &dd->bam.cons.config);
2687 }
2688 dd->suspended = 0;
2689
2690resume_exit:
2691 return 0;
2692}
2693
2694#ifdef CONFIG_PM_SLEEP
2695static int msm_spi_suspend(struct device *device)
2696{
2697 if (!pm_runtime_enabled(device) || !pm_runtime_suspended(device)) {
2698 struct platform_device *pdev = to_platform_device(device);
2699 struct spi_master *master = platform_get_drvdata(pdev);
2700 struct msm_spi *dd;
2701
2702 dev_dbg(device, "system suspend");
2703 if (!master)
2704 goto suspend_exit;
2705 dd = spi_master_get_devdata(master);
2706 if (!dd)
2707 goto suspend_exit;
2708 msm_spi_pm_suspend_runtime(device);
2709
2710 /*
2711 * set the device's runtime PM status to 'suspended'
2712 */
2713 pm_runtime_disable(device);
2714 pm_runtime_set_suspended(device);
2715 pm_runtime_enable(device);
2716 }
2717suspend_exit:
2718 return 0;
2719}
2720
2721static int msm_spi_resume(struct device *device)
2722{
2723 /*
2724 * Rely on runtime-PM to call resume in case it is enabled
2725 * Even if it's not enabled, rely on 1st client transaction to do
2726 * clock ON and gpio configuration
2727 */
2728 dev_dbg(device, "system resume");
2729 return 0;
2730}
2731#else
2732#define msm_spi_suspend NULL
2733#define msm_spi_resume NULL
2734#endif
2735
2736
2737static int msm_spi_remove(struct platform_device *pdev)
2738{
2739 struct spi_master *master = platform_get_drvdata(pdev);
2740 struct msm_spi *dd = spi_master_get_devdata(master);
2741
2742 spi_debugfs_exit(dd);
2743 sysfs_remove_group(&pdev->dev.kobj, &dev_attr_grp);
2744
2745 if (dd->dma_teardown)
2746 dd->dma_teardown(dd);
2747 pm_runtime_disable(&pdev->dev);
2748 pm_runtime_set_suspended(&pdev->dev);
2749 clk_put(dd->clk);
2750 clk_put(dd->pclk);
2751 msm_spi_clk_path_teardown(dd);
2752 platform_set_drvdata(pdev, NULL);
2753 spi_unregister_master(master);
2754 spi_master_put(master);
2755
2756 return 0;
2757}
2758
2759static const struct of_device_id msm_spi_dt_match[] = {
2760 {
2761 .compatible = "qcom,spi-qup-v2",
2762 },
2763 {}
2764};
2765
2766static const struct dev_pm_ops msm_spi_dev_pm_ops = {
2767 SET_SYSTEM_SLEEP_PM_OPS(msm_spi_suspend, msm_spi_resume)
2768 SET_RUNTIME_PM_OPS(msm_spi_pm_suspend_runtime,
2769 msm_spi_pm_resume_runtime, NULL)
2770};
2771
2772static struct platform_driver msm_spi_driver = {
2773 .driver = {
2774 .name = SPI_DRV_NAME,
2775 .owner = THIS_MODULE,
2776 .pm = &msm_spi_dev_pm_ops,
2777 .of_match_table = msm_spi_dt_match,
2778 },
2779 .probe = msm_spi_probe,
2780 .remove = msm_spi_remove,
2781};
2782
2783module_platform_driver(msm_spi_driver);
2784
2785MODULE_LICENSE("GPL v2");
2786MODULE_ALIAS("platform:"SPI_DRV_NAME);