blob: 957d6428427cc8aa17fae586323aea18ca1edd0e [file] [log] [blame]
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301/*
2 * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/init.h>
15#include <linux/module.h>
16#include <linux/of.h>
17#include <linux/debugfs.h>
18#include <linux/delay.h>
19#include <linux/bitops.h>
20#include <linux/spi/spi.h>
21#include <linux/regmap.h>
22#include <linux/component.h>
23#include <linux/ratelimit.h>
24#include <sound/wcd-dsp-mgr.h>
25#include <sound/wcd-spi.h>
26#include "wcd-spi-registers.h"
27
28/* Byte manipulations */
29#define SHIFT_1_BYTES (8)
30#define SHIFT_2_BYTES (16)
31#define SHIFT_3_BYTES (24)
32
33/* Command opcodes */
34#define WCD_SPI_CMD_NOP (0x00)
35#define WCD_SPI_CMD_WREN (0x06)
36#define WCD_SPI_CMD_CLKREQ (0xDA)
37#define WCD_SPI_CMD_RDSR (0x05)
38#define WCD_SPI_CMD_IRR (0x81)
39#define WCD_SPI_CMD_IRW (0x82)
40#define WCD_SPI_CMD_MIOR (0x83)
41#define WCD_SPI_CMD_FREAD (0x0B)
42#define WCD_SPI_CMD_MIOW (0x02)
43#define WCD_SPI_WRITE_FRAME_OPCODE \
44 (WCD_SPI_CMD_MIOW << SHIFT_3_BYTES)
45#define WCD_SPI_READ_FRAME_OPCODE \
46 (WCD_SPI_CMD_MIOR << SHIFT_3_BYTES)
47#define WCD_SPI_FREAD_FRAME_OPCODE \
48 (WCD_SPI_CMD_FREAD << SHIFT_3_BYTES)
49
50/* Command lengths */
51#define WCD_SPI_OPCODE_LEN (0x01)
52#define WCD_SPI_CMD_NOP_LEN (0x01)
53#define WCD_SPI_CMD_WREN_LEN (0x01)
54#define WCD_SPI_CMD_CLKREQ_LEN (0x04)
55#define WCD_SPI_CMD_IRR_LEN (0x04)
56#define WCD_SPI_CMD_IRW_LEN (0x06)
57#define WCD_SPI_WRITE_SINGLE_LEN (0x08)
58#define WCD_SPI_READ_SINGLE_LEN (0x13)
59#define WCD_SPI_CMD_FREAD_LEN (0x13)
60
61/* Command delays */
62#define WCD_SPI_CLKREQ_DELAY_USECS (500)
63#define WCD_SPI_CLK_OFF_TIMER_MS (500)
64#define WCD_SPI_RESUME_TIMEOUT_MS 100
65
66/* Command masks */
67#define WCD_CMD_ADDR_MASK \
68 (0xFF | \
69 (0xFF << SHIFT_1_BYTES) | \
70 (0xFF << SHIFT_2_BYTES))
71
72/* Clock ctrl request related */
73#define WCD_SPI_CLK_ENABLE true
74#define WCD_SPI_CLK_DISABLE false
75#define WCD_SPI_CLK_FLAG_DELAYED (1 << 0)
76#define WCD_SPI_CLK_FLAG_IMMEDIATE (1 << 1)
77
78/* Internal addresses */
79#define WCD_SPI_ADDR_IPC_CTL_HOST (0x012014)
80
81/* Word sizes and min/max lengths */
82#define WCD_SPI_WORD_BYTE_CNT (4)
83#define WCD_SPI_RW_MULTI_MIN_LEN (16)
84
85/* Max size is 32 bytes less than 64Kbytes */
86#define WCD_SPI_RW_MULTI_MAX_LEN ((64 * 1024) - 32)
87
88/*
89 * Max size for the pre-allocated buffers is the max
90 * possible read/write length + 32 bytes for the SPI
91 * read/write command header itself.
92 */
93#define WCD_SPI_RW_MAX_BUF_SIZE (WCD_SPI_RW_MULTI_MAX_LEN + 32)
94
95/* Alignment requirements */
96#define WCD_SPI_RW_MIN_ALIGN WCD_SPI_WORD_BYTE_CNT
97#define WCD_SPI_RW_MULTI_ALIGN (16)
98
99/* Status mask bits */
100#define WCD_SPI_CLK_STATE_ENABLED BIT(0)
101#define WCD_SPI_IS_SUSPENDED BIT(1)
102
103/* Locking related */
104#define WCD_SPI_MUTEX_LOCK(spi, lock) \
105{ \
106 dev_vdbg(&spi->dev, "%s: mutex_lock(%s)\n", \
107 __func__, __stringify_1(lock)); \
108 mutex_lock(&lock); \
109}
110
111#define WCD_SPI_MUTEX_UNLOCK(spi, lock) \
112{ \
113 dev_vdbg(&spi->dev, "%s: mutex_unlock(%s)\n", \
114 __func__, __stringify_1(lock)); \
115 mutex_unlock(&lock); \
116}
117
118struct wcd_spi_debug_data {
119 struct dentry *dir;
120 u32 addr;
121 u32 size;
122};
123
124struct wcd_spi_priv {
125 struct spi_device *spi;
126 u32 mem_base_addr;
127
128 struct regmap *regmap;
129
130 /* Message for single transfer */
131 struct spi_message msg1;
132 struct spi_transfer xfer1;
133
134 /* Message for two transfers */
135 struct spi_message msg2;
136 struct spi_transfer xfer2[2];
137
138 /* Register access related */
139 u32 reg_bytes;
140 u32 val_bytes;
141
142 /* Clock requests related */
143 struct mutex clk_mutex;
144 int clk_users;
145 unsigned long status_mask;
146 struct delayed_work clk_dwork;
147
148 /* Transaction related */
149 struct mutex xfer_mutex;
150
151 struct device *m_dev;
152 struct wdsp_mgr_ops *m_ops;
153
154 /* Debugfs related information */
155 struct wcd_spi_debug_data debug_data;
156
157 /* Completion object to indicate system resume completion */
158 struct completion resume_comp;
159
160 /* Buffers to hold memory used for transfers */
161 void *tx_buf;
162 void *rx_buf;
163};
164
165enum xfer_request {
166 WCD_SPI_XFER_WRITE,
167 WCD_SPI_XFER_READ,
168};
169
170
171static char *wcd_spi_xfer_req_str(enum xfer_request req)
172{
173 if (req == WCD_SPI_XFER_WRITE)
174 return "xfer_write";
175 else if (req == WCD_SPI_XFER_READ)
176 return "xfer_read";
177 else
178 return "xfer_invalid";
179}
180
181static void wcd_spi_reinit_xfer(struct spi_transfer *xfer)
182{
183 xfer->tx_buf = NULL;
184 xfer->rx_buf = NULL;
185 xfer->delay_usecs = 0;
186 xfer->len = 0;
187}
188
189static bool wcd_spi_is_suspended(struct wcd_spi_priv *wcd_spi)
190{
191 return test_bit(WCD_SPI_IS_SUSPENDED, &wcd_spi->status_mask);
192}
193
194static bool wcd_spi_can_suspend(struct wcd_spi_priv *wcd_spi)
195{
196 struct spi_device *spi = wcd_spi->spi;
197
198 if (wcd_spi->clk_users > 0 ||
199 test_bit(WCD_SPI_CLK_STATE_ENABLED, &wcd_spi->status_mask)) {
200 dev_err(&spi->dev, "%s: cannot suspend, clk_users = %d\n",
201 __func__, wcd_spi->clk_users);
202 return false;
203 }
204
205 return true;
206}
207
208static int wcd_spi_wait_for_resume(struct wcd_spi_priv *wcd_spi)
209{
210 struct spi_device *spi = wcd_spi->spi;
211 int rc = 0;
212
213 WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
214 /* If the system is already in resumed state, return right away */
215 if (!wcd_spi_is_suspended(wcd_spi))
216 goto done;
217
218 /* If suspended then wait for resume to happen */
219 reinit_completion(&wcd_spi->resume_comp);
220 WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
221 rc = wait_for_completion_timeout(&wcd_spi->resume_comp,
222 msecs_to_jiffies(WCD_SPI_RESUME_TIMEOUT_MS));
223 WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
224 if (rc == 0) {
225 dev_err(&spi->dev, "%s: failed to resume in %u msec\n",
226 __func__, WCD_SPI_RESUME_TIMEOUT_MS);
227 rc = -EIO;
228 goto done;
229 }
230
231 dev_dbg(&spi->dev, "%s: resume successful\n", __func__);
232 rc = 0;
233done:
234 WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
235 return rc;
236}
237
238static int wcd_spi_read_single(struct spi_device *spi,
239 u32 remote_addr, u32 *val)
240{
241 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
242 struct spi_transfer *tx_xfer = &wcd_spi->xfer2[0];
243 struct spi_transfer *rx_xfer = &wcd_spi->xfer2[1];
244 u8 *tx_buf = wcd_spi->tx_buf;
245 u32 frame = 0;
246 int ret;
247
248 dev_dbg(&spi->dev, "%s: remote_addr = 0x%x\n",
249 __func__, remote_addr);
250
251 if (!tx_buf) {
252 dev_err(&spi->dev, "%s: tx_buf not allocated\n",
253 __func__);
254 return -ENOMEM;
255 }
256
257 frame |= WCD_SPI_READ_FRAME_OPCODE;
258 frame |= remote_addr & WCD_CMD_ADDR_MASK;
259
260 wcd_spi_reinit_xfer(tx_xfer);
261 frame = cpu_to_be32(frame);
262 memcpy(tx_buf, &frame, sizeof(frame));
263 tx_xfer->tx_buf = tx_buf;
264 tx_xfer->len = WCD_SPI_READ_SINGLE_LEN;
265
266 wcd_spi_reinit_xfer(rx_xfer);
267 rx_xfer->rx_buf = val;
268 rx_xfer->len = sizeof(*val);
269
270 ret = spi_sync(spi, &wcd_spi->msg2);
271
272 return ret;
273}
274
275static int wcd_spi_read_multi(struct spi_device *spi,
276 u32 remote_addr, u8 *data,
277 size_t len)
278{
279 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
280 struct spi_transfer *xfer = &wcd_spi->xfer1;
281 u8 *tx_buf = wcd_spi->tx_buf;
282 u8 *rx_buf = wcd_spi->rx_buf;
283 u32 frame = 0;
284 int ret;
285
286 dev_dbg(&spi->dev, "%s: addr 0x%x, len = %zd\n",
287 __func__, remote_addr, len);
288
289 frame |= WCD_SPI_FREAD_FRAME_OPCODE;
290 frame |= remote_addr & WCD_CMD_ADDR_MASK;
291
292 if (!tx_buf || !rx_buf) {
293 dev_err(&spi->dev, "%s: %s not allocated\n", __func__,
294 (!tx_buf) ? "tx_buf" : "rx_buf");
295 return -ENOMEM;
296 }
297
298 wcd_spi_reinit_xfer(xfer);
299 frame = cpu_to_be32(frame);
300 memcpy(tx_buf, &frame, sizeof(frame));
301 xfer->tx_buf = tx_buf;
302 xfer->rx_buf = rx_buf;
303 xfer->len = WCD_SPI_CMD_FREAD_LEN + len;
304
305 ret = spi_sync(spi, &wcd_spi->msg1);
306 if (ret) {
307 dev_err(&spi->dev, "%s: failed, err = %d\n",
308 __func__, ret);
309 goto done;
310 }
311
312 memcpy(data, rx_buf + WCD_SPI_CMD_FREAD_LEN, len);
313done:
314 return ret;
315}
316
317static int wcd_spi_write_single(struct spi_device *spi,
318 u32 remote_addr, u32 val)
319{
320 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
321 struct spi_transfer *xfer = &wcd_spi->xfer1;
322 u8 buf[WCD_SPI_WRITE_SINGLE_LEN];
323 u32 frame = 0;
324
325 dev_dbg(&spi->dev, "%s: remote_addr = 0x%x, val = 0x%x\n",
326 __func__, remote_addr, val);
327
328 memset(buf, 0, WCD_SPI_WRITE_SINGLE_LEN);
329 frame |= WCD_SPI_WRITE_FRAME_OPCODE;
330 frame |= (remote_addr & WCD_CMD_ADDR_MASK);
331
332 frame = cpu_to_be32(frame);
333 memcpy(buf, &frame, sizeof(frame));
334 memcpy(buf + sizeof(frame), &val, sizeof(val));
335
336 wcd_spi_reinit_xfer(xfer);
337 xfer->tx_buf = buf;
338 xfer->len = WCD_SPI_WRITE_SINGLE_LEN;
339
340 return spi_sync(spi, &wcd_spi->msg1);
341}
342
343static int wcd_spi_write_multi(struct spi_device *spi,
344 u32 remote_addr, u8 *data,
345 size_t len)
346{
347 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
348 struct spi_transfer *xfer = &wcd_spi->xfer1;
349 u32 frame = 0;
350 u8 *tx_buf = wcd_spi->tx_buf;
351 int xfer_len, ret;
352
353 dev_dbg(&spi->dev, "%s: addr = 0x%x len = %zd\n",
354 __func__, remote_addr, len);
355
356 frame |= WCD_SPI_WRITE_FRAME_OPCODE;
357 frame |= (remote_addr & WCD_CMD_ADDR_MASK);
358
359 frame = cpu_to_be32(frame);
360 xfer_len = len + sizeof(frame);
361
362 if (!tx_buf) {
363 dev_err(&spi->dev, "%s: tx_buf not allocated\n",
364 __func__);
365 return -ENOMEM;
366 }
367
368 memcpy(tx_buf, &frame, sizeof(frame));
369 memcpy(tx_buf + sizeof(frame), data, len);
370
371 wcd_spi_reinit_xfer(xfer);
372 xfer->tx_buf = tx_buf;
373 xfer->len = xfer_len;
374
375 ret = spi_sync(spi, &wcd_spi->msg1);
376 if (ret < 0)
377 dev_err(&spi->dev,
378 "%s: Failed, addr = 0x%x, len = %zd\n",
379 __func__, remote_addr, len);
380 return ret;
381}
382
383static int wcd_spi_transfer_split(struct spi_device *spi,
384 struct wcd_spi_msg *data_msg,
385 enum xfer_request xfer_req)
386{
387 u32 addr = data_msg->remote_addr;
388 u8 *data = data_msg->data;
389 int remain_size = data_msg->len;
390 int to_xfer, loop_cnt, ret = 0;
391
392 /* Perform single writes until multi word alignment is met */
393 loop_cnt = 1;
394 while (remain_size &&
395 !IS_ALIGNED(addr, WCD_SPI_RW_MULTI_ALIGN)) {
396 if (xfer_req == WCD_SPI_XFER_WRITE)
397 ret = wcd_spi_write_single(spi, addr,
398 (*(u32 *)data));
399 else
400 ret = wcd_spi_read_single(spi, addr,
401 (u32 *)data);
402 if (ret < 0) {
403 dev_err(&spi->dev,
404 "%s: %s fail iter(%d) start-word addr (0x%x)\n",
405 __func__, wcd_spi_xfer_req_str(xfer_req),
406 loop_cnt, addr);
407 goto done;
408 }
409
410 addr += WCD_SPI_WORD_BYTE_CNT;
411 data += WCD_SPI_WORD_BYTE_CNT;
412 remain_size -= WCD_SPI_WORD_BYTE_CNT;
413 loop_cnt++;
414 }
415
416 /* Perform multi writes for max allowed multi writes */
417 loop_cnt = 1;
418 while (remain_size >= WCD_SPI_RW_MULTI_MAX_LEN) {
419 if (xfer_req == WCD_SPI_XFER_WRITE)
420 ret = wcd_spi_write_multi(spi, addr, data,
421 WCD_SPI_RW_MULTI_MAX_LEN);
422 else
423 ret = wcd_spi_read_multi(spi, addr, data,
424 WCD_SPI_RW_MULTI_MAX_LEN);
425 if (ret < 0) {
426 dev_err(&spi->dev,
427 "%s: %s fail iter(%d) max-write addr (0x%x)\n",
428 __func__, wcd_spi_xfer_req_str(xfer_req),
429 loop_cnt, addr);
430 goto done;
431 }
432
433 addr += WCD_SPI_RW_MULTI_MAX_LEN;
434 data += WCD_SPI_RW_MULTI_MAX_LEN;
435 remain_size -= WCD_SPI_RW_MULTI_MAX_LEN;
436 loop_cnt++;
437 }
438
439 /*
440 * Perform write for max possible data that is multiple
441 * of the minimum size for multi-write commands.
442 */
443 to_xfer = remain_size - (remain_size % WCD_SPI_RW_MULTI_MIN_LEN);
444 if (remain_size >= WCD_SPI_RW_MULTI_MIN_LEN &&
445 to_xfer > 0) {
446 if (xfer_req == WCD_SPI_XFER_WRITE)
447 ret = wcd_spi_write_multi(spi, addr, data, to_xfer);
448 else
449 ret = wcd_spi_read_multi(spi, addr, data, to_xfer);
450 if (ret < 0) {
451 dev_err(&spi->dev,
452 "%s: %s fail write addr (0x%x), size (0x%x)\n",
453 __func__, wcd_spi_xfer_req_str(xfer_req),
454 addr, to_xfer);
455 goto done;
456 }
457
458 addr += to_xfer;
459 data += to_xfer;
460 remain_size -= to_xfer;
461 }
462
463 /* Perform single writes for the last remaining data */
464 loop_cnt = 1;
465 while (remain_size > 0) {
466 if (xfer_req == WCD_SPI_XFER_WRITE)
467 ret = wcd_spi_write_single(spi, addr, (*((u32 *)data)));
468 else
469 ret = wcd_spi_read_single(spi, addr, (u32 *) data);
470 if (ret < 0) {
471 dev_err(&spi->dev,
472 "%s: %s fail iter(%d) end-write addr (0x%x)\n",
473 __func__, wcd_spi_xfer_req_str(xfer_req),
474 loop_cnt, addr);
475 goto done;
476 }
477
478 addr += WCD_SPI_WORD_BYTE_CNT;
479 data += WCD_SPI_WORD_BYTE_CNT;
480 remain_size -= WCD_SPI_WORD_BYTE_CNT;
481 loop_cnt++;
482 }
483
484done:
485 return ret;
486}
487
488static int wcd_spi_cmd_nop(struct spi_device *spi)
489{
490 u8 nop = WCD_SPI_CMD_NOP;
491
492 return spi_write(spi, &nop, WCD_SPI_CMD_NOP_LEN);
493}
494
495static int wcd_spi_cmd_clkreq(struct spi_device *spi)
496{
497 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
498 struct spi_transfer *xfer = &wcd_spi->xfer1;
499 u8 cmd[WCD_SPI_CMD_CLKREQ_LEN] = {
500 WCD_SPI_CMD_CLKREQ,
501 0xBA, 0x80, 0x00};
502
503 wcd_spi_reinit_xfer(xfer);
504 xfer->tx_buf = cmd;
505 xfer->len = WCD_SPI_CMD_CLKREQ_LEN;
506 xfer->delay_usecs = WCD_SPI_CLKREQ_DELAY_USECS;
507
508 return spi_sync(spi, &wcd_spi->msg1);
509}
510
511static int wcd_spi_cmd_wr_en(struct spi_device *spi)
512{
513 u8 wr_en = WCD_SPI_CMD_WREN;
514
515 return spi_write(spi, &wr_en, WCD_SPI_CMD_WREN_LEN);
516}
517
518static int wcd_spi_cmd_rdsr(struct spi_device *spi,
519 u32 *rdsr_status)
520{
521 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
522 struct spi_transfer *tx_xfer = &wcd_spi->xfer2[0];
523 struct spi_transfer *rx_xfer = &wcd_spi->xfer2[1];
524 u8 rdsr_cmd;
525 u32 status;
526 int ret;
527
528 rdsr_cmd = WCD_SPI_CMD_RDSR;
529 wcd_spi_reinit_xfer(tx_xfer);
530 tx_xfer->tx_buf = &rdsr_cmd;
531 tx_xfer->len = sizeof(rdsr_cmd);
532
533
534 wcd_spi_reinit_xfer(rx_xfer);
535 rx_xfer->rx_buf = &status;
536 rx_xfer->len = sizeof(status);
537
538 ret = spi_sync(spi, &wcd_spi->msg2);
539 if (ret < 0) {
540 dev_err(&spi->dev, "%s: RDSR failed, err = %d\n",
541 __func__, ret);
542 goto done;
543 }
544
545 *rdsr_status = be32_to_cpu(status);
546
547 dev_dbg(&spi->dev, "%s: RDSR success, value = 0x%x\n",
548 __func__, *rdsr_status);
549done:
550 return ret;
551}
552
553static int wcd_spi_clk_enable(struct spi_device *spi)
554{
555 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
556 int ret;
557 u32 rd_status = 0;
558
559 ret = wcd_spi_cmd_nop(spi);
560 if (ret < 0) {
561 dev_err(&spi->dev, "%s: NOP1 failed, err = %d\n",
562 __func__, ret);
563 goto done;
564 }
565
566 ret = wcd_spi_cmd_clkreq(spi);
567 if (ret < 0) {
568 dev_err(&spi->dev, "%s: CLK_REQ failed, err = %d\n",
569 __func__, ret);
570 goto done;
571 }
572
573 ret = wcd_spi_cmd_nop(spi);
574 if (ret < 0) {
575 dev_err(&spi->dev, "%s: NOP2 failed, err = %d\n",
576 __func__, ret);
577 goto done;
578 }
579 wcd_spi_cmd_rdsr(spi, &rd_status);
580 /*
581 * Read status zero means reads are not
582 * happenning on the bus, possibly because
583 * clock request failed.
584 */
585 if (rd_status) {
586 set_bit(WCD_SPI_CLK_STATE_ENABLED,
587 &wcd_spi->status_mask);
588 } else {
589 dev_err(&spi->dev, "%s: RDSR status is zero\n",
590 __func__);
591 ret = -EIO;
592 }
593done:
594 return ret;
595}
596
597static int wcd_spi_clk_disable(struct spi_device *spi)
598{
599 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
600 int ret;
601
602 ret = wcd_spi_write_single(spi, WCD_SPI_ADDR_IPC_CTL_HOST, 0x01);
603 if (ret < 0)
604 dev_err(&spi->dev, "%s: Failed, err = %d\n",
605 __func__, ret);
606 else
607 clear_bit(WCD_SPI_CLK_STATE_ENABLED, &wcd_spi->status_mask);
608
609 return ret;
610}
611
612static int wcd_spi_clk_ctrl(struct spi_device *spi,
613 bool request, u32 flags)
614{
615 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
616 int ret = 0;
617 const char *delay_str;
618
619 delay_str = (flags == WCD_SPI_CLK_FLAG_DELAYED) ?
620 "delayed" : "immediate";
621
622 WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
623
624 /* Reject any unbalanced disable request */
625 if (wcd_spi->clk_users < 0 ||
626 (!request && wcd_spi->clk_users == 0)) {
627 dev_err(&spi->dev, "%s: Unbalanced clk_users %d for %s\n",
628 __func__, wcd_spi->clk_users,
629 request ? "enable" : "disable");
630 ret = -EINVAL;
631
632 /* Reset the clk_users to 0 */
633 wcd_spi->clk_users = 0;
634
635 goto done;
636 }
637
638 if (request == WCD_SPI_CLK_ENABLE) {
639 /*
640 * If the SPI bus is suspended, then return error
641 * as the transaction cannot be completed.
642 */
643 if (wcd_spi_is_suspended(wcd_spi)) {
644 dev_err(&spi->dev,
645 "%s: SPI suspended, cannot enable clk\n",
646 __func__);
647 ret = -EIO;
648 goto done;
649 }
650
651 /* Cancel the disable clk work */
652 WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
653 cancel_delayed_work_sync(&wcd_spi->clk_dwork);
654 WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
655
656 wcd_spi->clk_users++;
657
658 /*
659 * If clk state is already set,
660 * then clk wasnt really disabled
661 */
662 if (test_bit(WCD_SPI_CLK_STATE_ENABLED, &wcd_spi->status_mask))
663 goto done;
664 else if (wcd_spi->clk_users == 1)
665 ret = wcd_spi_clk_enable(spi);
666
667 } else {
668 wcd_spi->clk_users--;
669
670 /* Clock is still voted for */
671 if (wcd_spi->clk_users > 0)
672 goto done;
673
674 /*
675 * If we are here, clk_users must be 0 and needs
676 * to be disabled. Call the disable based on the
677 * flags.
678 */
679 if (flags == WCD_SPI_CLK_FLAG_DELAYED) {
680 schedule_delayed_work(&wcd_spi->clk_dwork,
681 msecs_to_jiffies(WCD_SPI_CLK_OFF_TIMER_MS));
682 } else {
683 ret = wcd_spi_clk_disable(spi);
684 if (ret < 0)
685 dev_err(&spi->dev,
686 "%s: Failed to disable clk err = %d\n",
687 __func__, ret);
688 }
689 }
690
691done:
692 dev_dbg(&spi->dev, "%s: updated clk_users = %d, request_%s %s\n",
693 __func__, wcd_spi->clk_users, request ? "enable" : "disable",
694 request ? "" : delay_str);
695 WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
696
697 return ret;
698}
699
700static int wcd_spi_init(struct spi_device *spi)
701{
702 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
703 int ret;
704
705 ret = wcd_spi_clk_ctrl(spi, WCD_SPI_CLK_ENABLE,
706 WCD_SPI_CLK_FLAG_IMMEDIATE);
707 if (ret < 0)
708 goto done;
709
710 ret = wcd_spi_cmd_wr_en(spi);
711 if (ret < 0)
712 goto err_wr_en;
713
714 /*
715 * In case spi_init is called after component deinit,
716 * it is possible hardware register state is also reset.
717 * Sync the regcache here so hardware state is updated
718 * to reflect the cache.
719 */
720 regcache_sync(wcd_spi->regmap);
721
722 regmap_write(wcd_spi->regmap, WCD_SPI_SLAVE_CONFIG,
723 0x0F3D0800);
724
725 /* Write the MTU to max allowed size */
726 regmap_update_bits(wcd_spi->regmap,
727 WCD_SPI_SLAVE_TRNS_LEN,
728 0xFFFF0000, 0xFFFF0000);
729err_wr_en:
730 wcd_spi_clk_ctrl(spi, WCD_SPI_CLK_DISABLE,
731 WCD_SPI_CLK_FLAG_IMMEDIATE);
732done:
733 return ret;
734}
735
736static void wcd_spi_clk_work(struct work_struct *work)
737{
738 struct delayed_work *dwork;
739 struct wcd_spi_priv *wcd_spi;
740 struct spi_device *spi;
741 int ret;
742
743 dwork = to_delayed_work(work);
744 wcd_spi = container_of(dwork, struct wcd_spi_priv, clk_dwork);
745 spi = wcd_spi->spi;
746
747 WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
748 ret = wcd_spi_clk_disable(spi);
749 if (ret < 0)
750 dev_err(&spi->dev,
751 "%s: Failed to disable clk, err = %d\n",
752 __func__, ret);
753 WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
754}
755
756static int __wcd_spi_data_xfer(struct spi_device *spi,
757 struct wcd_spi_msg *msg,
758 enum xfer_request xfer_req)
759{
760 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
761 int ret;
762
763 /* Check for minimum alignment requirements */
764 if (!IS_ALIGNED(msg->remote_addr, WCD_SPI_RW_MIN_ALIGN)) {
765 dev_err(&spi->dev,
766 "%s addr 0x%x is not aligned to 0x%x\n",
767 __func__, msg->remote_addr, WCD_SPI_RW_MIN_ALIGN);
768 return -EINVAL;
769 } else if (msg->len % WCD_SPI_WORD_BYTE_CNT) {
770 dev_err(&spi->dev,
771 "%s len 0x%zx is not multiple of %d\n",
772 __func__, msg->len, WCD_SPI_WORD_BYTE_CNT);
773 return -EINVAL;
774 }
775
776 WCD_SPI_MUTEX_LOCK(spi, wcd_spi->xfer_mutex);
777 if (msg->len == WCD_SPI_WORD_BYTE_CNT) {
778 if (xfer_req == WCD_SPI_XFER_WRITE)
779 ret = wcd_spi_write_single(spi, msg->remote_addr,
780 (*((u32 *)msg->data)));
781 else
782 ret = wcd_spi_read_single(spi, msg->remote_addr,
783 (u32 *) msg->data);
784 } else {
785 ret = wcd_spi_transfer_split(spi, msg, xfer_req);
786 }
787 WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->xfer_mutex);
788
789 return ret;
790}
791
792static int wcd_spi_data_xfer(struct spi_device *spi,
793 struct wcd_spi_msg *msg,
794 enum xfer_request req)
795{
796 int ret, ret1;
797
798 if (msg->len <= 0) {
799 dev_err(&spi->dev, "%s: Invalid size %zd\n",
800 __func__, msg->len);
801 return -EINVAL;
802 }
803
804 /* Request for clock */
805 ret = wcd_spi_clk_ctrl(spi, WCD_SPI_CLK_ENABLE,
806 WCD_SPI_CLK_FLAG_IMMEDIATE);
807 if (ret < 0) {
808 dev_err(&spi->dev, "%s: clk enable failed %d\n",
809 __func__, ret);
810 goto done;
811 }
812
813 /* Perform the transaction */
814 ret = __wcd_spi_data_xfer(spi, msg, req);
815 if (ret < 0)
816 dev_err(&spi->dev,
817 "%s: Failed %s, addr = 0x%x, size = 0x%zx, err = %d\n",
818 __func__, wcd_spi_xfer_req_str(req),
819 msg->remote_addr, msg->len, ret);
820
821 /* Release the clock even if xfer failed */
822 ret1 = wcd_spi_clk_ctrl(spi, WCD_SPI_CLK_DISABLE,
823 WCD_SPI_CLK_FLAG_DELAYED);
824 if (ret1 < 0)
825 dev_err(&spi->dev, "%s: clk disable failed %d\n",
826 __func__, ret1);
827done:
828 return ret;
829}
830
831/*
832 * wcd_spi_data_write: Write data to WCD SPI
833 * @spi: spi_device struct
834 * @msg: msg that needs to be written to WCD
835 *
836 * This API writes length of data to address specified. These details
837 * about the write are encapsulated in @msg. Write size should be multiple
838 * of 4 bytes and write address should be 4-byte aligned.
839 */
840static int wcd_spi_data_write(struct spi_device *spi,
841 struct wcd_spi_msg *msg)
842{
843 if (!spi || !msg) {
844 pr_err("%s: Invalid %s\n", __func__,
845 (!spi) ? "spi device" : "msg");
846 return -EINVAL;
847 }
848
849 dev_dbg_ratelimited(&spi->dev, "%s: addr = 0x%x, len = %zu\n",
850 __func__, msg->remote_addr, msg->len);
851 return wcd_spi_data_xfer(spi, msg, WCD_SPI_XFER_WRITE);
852}
853
854/*
855 * wcd_spi_data_read: Read data from WCD SPI
856 * @spi: spi_device struct
857 * @msg: msg that needs to be read from WCD
858 *
859 * This API reads length of data from address specified. These details
860 * about the read are encapsulated in @msg. Read size should be multiple
861 * of 4 bytes and read address should be 4-byte aligned.
862 */
863static int wcd_spi_data_read(struct spi_device *spi,
864 struct wcd_spi_msg *msg)
865{
866 if (!spi || !msg) {
867 pr_err("%s: Invalid %s\n", __func__,
868 (!spi) ? "spi device" : "msg");
869 return -EINVAL;
870 }
871
872 dev_dbg_ratelimited(&spi->dev, "%s: addr = 0x%x,len = %zu\n",
873 __func__, msg->remote_addr, msg->len);
874 return wcd_spi_data_xfer(spi, msg, WCD_SPI_XFER_READ);
875}
876
877static int wdsp_spi_dload_section(struct spi_device *spi,
878 void *data)
879{
880 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
881 struct wdsp_img_section *sec = data;
882 struct wcd_spi_msg msg;
883 int ret;
884
885 dev_dbg(&spi->dev, "%s: addr = 0x%x, size = 0x%zx\n",
886 __func__, sec->addr, sec->size);
887
888 msg.remote_addr = sec->addr + wcd_spi->mem_base_addr;
889 msg.data = sec->data;
890 msg.len = sec->size;
891
892 ret = __wcd_spi_data_xfer(spi, &msg, WCD_SPI_XFER_WRITE);
893 if (ret < 0)
894 dev_err(&spi->dev, "%s: fail addr (0x%x) size (0x%zx)\n",
895 __func__, msg.remote_addr, msg.len);
896 return ret;
897}
898
899static int wdsp_spi_read_section(struct spi_device *spi, void *data)
900{
901 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
902 struct wdsp_img_section *sec = data;
903 struct wcd_spi_msg msg;
904 int ret;
905
906 msg.remote_addr = sec->addr + wcd_spi->mem_base_addr;
907 msg.data = sec->data;
908 msg.len = sec->size;
909
910 dev_dbg(&spi->dev, "%s: addr = 0x%x, size = 0x%zx\n",
911 __func__, msg.remote_addr, msg.len);
912
913 ret = wcd_spi_data_xfer(spi, &msg, WCD_SPI_XFER_READ);
914 if (ret < 0)
915 dev_err(&spi->dev, "%s: fail addr (0x%x) size (0x%zx)\n",
916 __func__, msg.remote_addr, msg.len);
917 return ret;
918}
919
920static int wdsp_spi_event_handler(struct device *dev, void *priv_data,
921 enum wdsp_event_type event,
922 void *data)
923{
924 struct spi_device *spi = to_spi_device(dev);
925 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
926 struct wcd_spi_ops *spi_ops;
927 int ret = 0;
928
929 dev_dbg(&spi->dev, "%s: event type %d\n",
930 __func__, event);
931
932 switch (event) {
933 case WDSP_EVENT_POST_SHUTDOWN:
934 cancel_delayed_work_sync(&wcd_spi->clk_dwork);
935 WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
936 if (test_bit(WCD_SPI_CLK_STATE_ENABLED, &wcd_spi->status_mask))
937 wcd_spi_clk_disable(spi);
938 wcd_spi->clk_users = 0;
939 WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
940 break;
941
942 case WDSP_EVENT_PRE_DLOAD_CODE:
943 case WDSP_EVENT_PRE_DLOAD_DATA:
944 ret = wcd_spi_clk_ctrl(spi, WCD_SPI_CLK_ENABLE,
945 WCD_SPI_CLK_FLAG_IMMEDIATE);
946 if (ret < 0)
947 dev_err(&spi->dev, "%s: clk_req failed %d\n",
948 __func__, ret);
949 break;
950
951 case WDSP_EVENT_POST_DLOAD_CODE:
952 case WDSP_EVENT_POST_DLOAD_DATA:
953 case WDSP_EVENT_DLOAD_FAILED:
954
955 ret = wcd_spi_clk_ctrl(spi, WCD_SPI_CLK_DISABLE,
956 WCD_SPI_CLK_FLAG_IMMEDIATE);
957 if (ret < 0)
958 dev_err(&spi->dev, "%s: clk unvote failed %d\n",
959 __func__, ret);
960 break;
961
962 case WDSP_EVENT_DLOAD_SECTION:
963 ret = wdsp_spi_dload_section(spi, data);
964 break;
965
966 case WDSP_EVENT_READ_SECTION:
967 ret = wdsp_spi_read_section(spi, data);
968 break;
969
970 case WDSP_EVENT_SUSPEND:
971 WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
972 if (!wcd_spi_can_suspend(wcd_spi))
973 ret = -EBUSY;
974 WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
975 break;
976
977 case WDSP_EVENT_RESUME:
978 ret = wcd_spi_wait_for_resume(wcd_spi);
979 break;
980
981 case WDSP_EVENT_GET_DEVOPS:
982 if (!data) {
983 dev_err(&spi->dev, "%s: invalid data\n",
984 __func__);
985 ret = -EINVAL;
986 break;
987 }
988
989 spi_ops = (struct wcd_spi_ops *) data;
990 spi_ops->spi_dev = spi;
991 spi_ops->read_dev = wcd_spi_data_read;
992 spi_ops->write_dev = wcd_spi_data_write;
993 break;
994
995 default:
996 dev_dbg(&spi->dev, "%s: Unhandled event %d\n",
997 __func__, event);
998 break;
999 }
1000
1001 return ret;
1002}
1003
1004static int wcd_spi_bus_gwrite(void *context, const void *reg,
1005 size_t reg_len, const void *val,
1006 size_t val_len)
1007{
1008 struct device *dev = context;
1009 struct spi_device *spi = to_spi_device(dev);
1010 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
1011 u8 tx_buf[WCD_SPI_CMD_IRW_LEN];
1012
1013 if (!reg || !val || reg_len != wcd_spi->reg_bytes ||
1014 val_len != wcd_spi->val_bytes) {
1015 dev_err(&spi->dev,
1016 "%s: Invalid input, reg_len = %zd, val_len = %zd",
1017 __func__, reg_len, val_len);
1018 return -EINVAL;
1019 }
1020
1021 tx_buf[0] = WCD_SPI_CMD_IRW;
1022 tx_buf[1] = *((u8 *)reg);
1023 memcpy(&tx_buf[WCD_SPI_OPCODE_LEN + reg_len],
1024 val, val_len);
1025
1026 return spi_write(spi, tx_buf, WCD_SPI_CMD_IRW_LEN);
1027}
1028
1029static int wcd_spi_bus_write(void *context, const void *data,
1030 size_t count)
1031{
1032 struct device *dev = context;
1033 struct spi_device *spi = to_spi_device(dev);
1034 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
1035
1036 if (count < (wcd_spi->reg_bytes + wcd_spi->val_bytes)) {
1037 dev_err(&spi->dev, "%s: Invalid size %zd\n",
1038 __func__, count);
1039 WARN_ON(1);
1040 return -EINVAL;
1041 }
1042
1043 return wcd_spi_bus_gwrite(context, data, wcd_spi->reg_bytes,
1044 data + wcd_spi->reg_bytes,
1045 count - wcd_spi->reg_bytes);
1046}
1047
1048static int wcd_spi_bus_read(void *context, const void *reg,
1049 size_t reg_len, void *val,
1050 size_t val_len)
1051{
1052 struct device *dev = context;
1053 struct spi_device *spi = to_spi_device(dev);
1054 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
1055 struct spi_transfer *tx_xfer = &wcd_spi->xfer2[0];
1056 struct spi_transfer *rx_xfer = &wcd_spi->xfer2[1];
1057 u8 tx_buf[WCD_SPI_CMD_IRR_LEN];
1058
1059 if (!reg || !val || reg_len != wcd_spi->reg_bytes ||
1060 val_len != wcd_spi->val_bytes) {
1061 dev_err(&spi->dev,
1062 "%s: Invalid input, reg_len = %zd, val_len = %zd",
1063 __func__, reg_len, val_len);
1064 return -EINVAL;
1065 }
1066
1067 memset(tx_buf, 0, WCD_SPI_OPCODE_LEN);
1068 tx_buf[0] = WCD_SPI_CMD_IRR;
1069 tx_buf[1] = *((u8 *)reg);
1070
1071 wcd_spi_reinit_xfer(tx_xfer);
1072 tx_xfer->tx_buf = tx_buf;
1073 tx_xfer->rx_buf = NULL;
1074 tx_xfer->len = WCD_SPI_CMD_IRR_LEN;
1075
1076 wcd_spi_reinit_xfer(rx_xfer);
1077 rx_xfer->tx_buf = NULL;
1078 rx_xfer->rx_buf = val;
1079 rx_xfer->len = val_len;
1080
1081 return spi_sync(spi, &wcd_spi->msg2);
1082}
1083
1084static struct regmap_bus wcd_spi_regmap_bus = {
1085 .write = wcd_spi_bus_write,
1086 .gather_write = wcd_spi_bus_gwrite,
1087 .read = wcd_spi_bus_read,
1088 .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
1089 .val_format_endian_default = REGMAP_ENDIAN_BIG,
1090};
1091
1092static int wcd_spi_state_show(struct seq_file *f, void *ptr)
1093{
1094 struct spi_device *spi = f->private;
1095 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
1096 const char *clk_state, *clk_mutex, *xfer_mutex;
1097
1098 if (test_bit(WCD_SPI_CLK_STATE_ENABLED, &wcd_spi->status_mask))
1099 clk_state = "enabled";
1100 else
1101 clk_state = "disabled";
1102
1103 clk_mutex = mutex_is_locked(&wcd_spi->clk_mutex) ?
1104 "locked" : "unlocked";
1105
1106 xfer_mutex = mutex_is_locked(&wcd_spi->xfer_mutex) ?
1107 "locked" : "unlocked";
1108
1109 seq_printf(f, "clk_state = %s\nclk_users = %d\n"
1110 "clk_mutex = %s\nxfer_mutex = %s\n",
1111 clk_state, wcd_spi->clk_users, clk_mutex,
1112 xfer_mutex);
1113 return 0;
1114}
1115
1116static int wcd_spi_state_open(struct inode *inode, struct file *file)
1117{
1118 return single_open(file, wcd_spi_state_show, inode->i_private);
1119}
1120
1121static const struct file_operations state_fops = {
1122 .open = wcd_spi_state_open,
1123 .read = seq_read,
1124 .llseek = seq_lseek,
1125 .release = single_release,
1126};
1127
1128static ssize_t wcd_spi_debugfs_mem_read(struct file *file, char __user *ubuf,
1129 size_t count, loff_t *ppos)
1130{
1131 struct spi_device *spi = file->private_data;
1132 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
1133 struct wcd_spi_debug_data *dbg_data = &wcd_spi->debug_data;
1134 struct wcd_spi_msg msg;
1135 ssize_t buf_size, read_count = 0;
1136 char *buf;
1137 int ret;
1138
1139 if (*ppos < 0 || !count)
1140 return -EINVAL;
1141
1142 if (dbg_data->size == 0 || dbg_data->addr == 0) {
1143 dev_err(&spi->dev,
1144 "%s: Invalid request, size = %u, addr = 0x%x\n",
1145 __func__, dbg_data->size, dbg_data->addr);
1146 return 0;
1147 }
1148
1149 buf_size = count < dbg_data->size ? count : dbg_data->size;
1150 buf = kzalloc(buf_size, GFP_KERNEL);
1151 if (!buf)
1152 return -ENOMEM;
1153
1154 msg.data = buf;
1155 msg.remote_addr = dbg_data->addr;
1156 msg.len = buf_size;
1157 msg.flags = 0;
1158
1159 ret = wcd_spi_data_read(spi, &msg);
1160 if (ret < 0) {
1161 dev_err(&spi->dev,
1162 "%s: Failed to read %zu bytes from addr 0x%x\n",
1163 __func__, buf_size, msg.remote_addr);
1164 goto done;
1165 }
1166
1167 read_count = simple_read_from_buffer(ubuf, count, ppos, buf, buf_size);
1168
1169done:
1170 kfree(buf);
1171 if (ret < 0)
1172 return ret;
1173 else
1174 return read_count;
1175}
1176
1177static const struct file_operations mem_read_fops = {
1178 .open = simple_open,
1179 .read = wcd_spi_debugfs_mem_read,
1180};
1181
1182static int wcd_spi_debugfs_init(struct spi_device *spi)
1183{
1184 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
1185 struct wcd_spi_debug_data *dbg_data = &wcd_spi->debug_data;
1186 int rc = 0;
1187
1188 dbg_data->dir = debugfs_create_dir("wcd_spi", NULL);
1189 if (IS_ERR_OR_NULL(dbg_data->dir)) {
1190 dbg_data->dir = NULL;
1191 rc = -ENODEV;
1192 goto done;
1193 }
1194
1195 debugfs_create_file("state", 0444, dbg_data->dir, spi, &state_fops);
1196 debugfs_create_u32("addr", 0644, dbg_data->dir,
1197 &dbg_data->addr);
1198 debugfs_create_u32("size", 0644, dbg_data->dir,
1199 &dbg_data->size);
1200
1201 debugfs_create_file("mem_read", 0444, dbg_data->dir,
1202 spi, &mem_read_fops);
1203done:
1204 return rc;
1205}
1206
1207
1208static const struct reg_default wcd_spi_defaults[] = {
1209 {WCD_SPI_SLAVE_SANITY, 0xDEADBEEF},
1210 {WCD_SPI_SLAVE_DEVICE_ID, 0x00500000},
1211 {WCD_SPI_SLAVE_STATUS, 0x80100000},
1212 {WCD_SPI_SLAVE_CONFIG, 0x0F200808},
1213 {WCD_SPI_SLAVE_SW_RESET, 0x00000000},
1214 {WCD_SPI_SLAVE_IRQ_STATUS, 0x00000000},
1215 {WCD_SPI_SLAVE_IRQ_EN, 0x00000000},
1216 {WCD_SPI_SLAVE_IRQ_CLR, 0x00000000},
1217 {WCD_SPI_SLAVE_IRQ_FORCE, 0x00000000},
1218 {WCD_SPI_SLAVE_TX, 0x00000000},
1219 {WCD_SPI_SLAVE_TEST_BUS_DATA, 0x00000000},
1220 {WCD_SPI_SLAVE_TEST_BUS_CTRL, 0x00000000},
1221 {WCD_SPI_SLAVE_SW_RST_IRQ, 0x00000000},
1222 {WCD_SPI_SLAVE_CHAR_CFG, 0x00000000},
1223 {WCD_SPI_SLAVE_CHAR_DATA_MOSI, 0x00000000},
1224 {WCD_SPI_SLAVE_CHAR_DATA_CS_N, 0x00000000},
1225 {WCD_SPI_SLAVE_CHAR_DATA_MISO, 0x00000000},
1226 {WCD_SPI_SLAVE_TRNS_BYTE_CNT, 0x00000000},
1227 {WCD_SPI_SLAVE_TRNS_LEN, 0x00000000},
1228 {WCD_SPI_SLAVE_FIFO_LEVEL, 0x00000000},
1229 {WCD_SPI_SLAVE_GENERICS, 0x80000000},
1230 {WCD_SPI_SLAVE_EXT_BASE_ADDR, 0x00000000},
1231};
1232
1233static bool wcd_spi_is_volatile_reg(struct device *dev,
1234 unsigned int reg)
1235{
1236 switch (reg) {
1237 case WCD_SPI_SLAVE_SANITY:
1238 case WCD_SPI_SLAVE_STATUS:
1239 case WCD_SPI_SLAVE_IRQ_STATUS:
1240 case WCD_SPI_SLAVE_TX:
1241 case WCD_SPI_SLAVE_SW_RST_IRQ:
1242 case WCD_SPI_SLAVE_TRNS_BYTE_CNT:
1243 case WCD_SPI_SLAVE_FIFO_LEVEL:
1244 case WCD_SPI_SLAVE_GENERICS:
1245 return true;
1246 }
1247
1248 return false;
1249}
1250
1251static bool wcd_spi_is_readable_reg(struct device *dev,
1252 unsigned int reg)
1253{
1254 switch (reg) {
1255 case WCD_SPI_SLAVE_SW_RESET:
1256 case WCD_SPI_SLAVE_IRQ_CLR:
1257 case WCD_SPI_SLAVE_IRQ_FORCE:
1258 return false;
1259 }
1260
1261 return true;
1262}
1263
1264static struct regmap_config wcd_spi_regmap_cfg = {
1265 .reg_bits = 8,
1266 .val_bits = 32,
1267 .cache_type = REGCACHE_RBTREE,
1268 .reg_defaults = wcd_spi_defaults,
1269 .num_reg_defaults = ARRAY_SIZE(wcd_spi_defaults),
1270 .max_register = WCD_SPI_MAX_REGISTER,
1271 .volatile_reg = wcd_spi_is_volatile_reg,
1272 .readable_reg = wcd_spi_is_readable_reg,
1273};
1274
1275static int wdsp_spi_init(struct device *dev, void *priv_data)
1276{
1277 struct spi_device *spi = to_spi_device(dev);
1278 int ret;
1279
1280 ret = wcd_spi_init(spi);
1281 if (ret < 0)
1282 dev_err(&spi->dev, "%s: Init failed, err = %d\n",
1283 __func__, ret);
1284 return ret;
1285}
1286
1287static int wdsp_spi_deinit(struct device *dev, void *priv_data)
1288{
1289 struct spi_device *spi = to_spi_device(dev);
1290 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
1291
1292 /*
1293 * Deinit means the hardware is reset. Mark the cache
1294 * as dirty here, so init will sync the cache
1295 */
1296 regcache_mark_dirty(wcd_spi->regmap);
1297
1298 return 0;
1299}
1300
1301static struct wdsp_cmpnt_ops wdsp_spi_ops = {
1302 .init = wdsp_spi_init,
1303 .deinit = wdsp_spi_deinit,
1304 .event_handler = wdsp_spi_event_handler,
1305};
1306
1307static int wcd_spi_component_bind(struct device *dev,
1308 struct device *master,
1309 void *data)
1310{
1311 struct spi_device *spi = to_spi_device(dev);
1312 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
1313 int ret = 0;
1314
1315 wcd_spi->m_dev = master;
1316 wcd_spi->m_ops = data;
1317
1318 if (wcd_spi->m_ops &&
1319 wcd_spi->m_ops->register_cmpnt_ops)
1320 ret = wcd_spi->m_ops->register_cmpnt_ops(master, dev,
1321 wcd_spi,
1322 &wdsp_spi_ops);
1323 if (ret) {
1324 dev_err(dev, "%s: register_cmpnt_ops failed, err = %d\n",
1325 __func__, ret);
1326 goto done;
1327 }
1328
1329 wcd_spi->reg_bytes = DIV_ROUND_UP(wcd_spi_regmap_cfg.reg_bits, 8);
1330 wcd_spi->val_bytes = DIV_ROUND_UP(wcd_spi_regmap_cfg.val_bits, 8);
1331
1332 wcd_spi->regmap = devm_regmap_init(&spi->dev, &wcd_spi_regmap_bus,
1333 &spi->dev, &wcd_spi_regmap_cfg);
1334 if (IS_ERR(wcd_spi->regmap)) {
1335 ret = PTR_ERR(wcd_spi->regmap);
1336 dev_err(&spi->dev, "%s: Failed to allocate regmap, err = %d\n",
1337 __func__, ret);
1338 goto done;
1339 }
1340
1341 if (wcd_spi_debugfs_init(spi))
1342 dev_err(&spi->dev, "%s: Failed debugfs init\n", __func__);
1343
1344 spi_message_init(&wcd_spi->msg1);
1345 spi_message_add_tail(&wcd_spi->xfer1, &wcd_spi->msg1);
1346
1347 spi_message_init(&wcd_spi->msg2);
1348 spi_message_add_tail(&wcd_spi->xfer2[0], &wcd_spi->msg2);
1349 spi_message_add_tail(&wcd_spi->xfer2[1], &wcd_spi->msg2);
1350
1351 /* Pre-allocate the buffers */
1352 wcd_spi->tx_buf = kzalloc(WCD_SPI_RW_MAX_BUF_SIZE,
1353 GFP_KERNEL | GFP_DMA);
1354 if (!wcd_spi->tx_buf) {
1355 ret = -ENOMEM;
1356 goto done;
1357 }
1358
1359 wcd_spi->rx_buf = kzalloc(WCD_SPI_RW_MAX_BUF_SIZE,
1360 GFP_KERNEL | GFP_DMA);
1361 if (!wcd_spi->rx_buf) {
1362 kfree(wcd_spi->tx_buf);
1363 wcd_spi->tx_buf = NULL;
1364 ret = -ENOMEM;
1365 goto done;
1366 }
1367done:
1368 return ret;
1369}
1370
1371static void wcd_spi_component_unbind(struct device *dev,
1372 struct device *master,
1373 void *data)
1374{
1375 struct spi_device *spi = to_spi_device(dev);
1376 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
1377
1378 wcd_spi->m_dev = NULL;
1379 wcd_spi->m_ops = NULL;
1380
1381 spi_transfer_del(&wcd_spi->xfer1);
1382 spi_transfer_del(&wcd_spi->xfer2[0]);
1383 spi_transfer_del(&wcd_spi->xfer2[1]);
1384
1385 kfree(wcd_spi->tx_buf);
1386 kfree(wcd_spi->rx_buf);
1387 wcd_spi->tx_buf = NULL;
1388 wcd_spi->rx_buf = NULL;
1389}
1390
1391static const struct component_ops wcd_spi_component_ops = {
1392 .bind = wcd_spi_component_bind,
1393 .unbind = wcd_spi_component_unbind,
1394};
1395
1396static int wcd_spi_probe(struct spi_device *spi)
1397{
1398 struct wcd_spi_priv *wcd_spi;
1399 int ret = 0;
1400
1401 wcd_spi = devm_kzalloc(&spi->dev, sizeof(*wcd_spi),
1402 GFP_KERNEL);
1403 if (!wcd_spi)
1404 return -ENOMEM;
1405
1406 ret = of_property_read_u32(spi->dev.of_node,
1407 "qcom,mem-base-addr",
1408 &wcd_spi->mem_base_addr);
1409 if (ret < 0) {
1410 dev_err(&spi->dev, "%s: Missing %s DT entry",
1411 __func__, "qcom,mem-base-addr");
1412 goto err_ret;
1413 }
1414
1415 dev_dbg(&spi->dev,
1416 "%s: mem_base_addr 0x%x\n", __func__, wcd_spi->mem_base_addr);
1417
1418 mutex_init(&wcd_spi->clk_mutex);
1419 mutex_init(&wcd_spi->xfer_mutex);
1420 INIT_DELAYED_WORK(&wcd_spi->clk_dwork, wcd_spi_clk_work);
1421 init_completion(&wcd_spi->resume_comp);
1422
1423 wcd_spi->spi = spi;
1424 spi_set_drvdata(spi, wcd_spi);
1425
1426 ret = component_add(&spi->dev, &wcd_spi_component_ops);
1427 if (ret) {
1428 dev_err(&spi->dev, "%s: component_add failed err = %d\n",
1429 __func__, ret);
1430 goto err_component_add;
1431 }
1432
1433 return ret;
1434
1435err_component_add:
1436 mutex_destroy(&wcd_spi->clk_mutex);
1437 mutex_destroy(&wcd_spi->xfer_mutex);
1438err_ret:
1439 devm_kfree(&spi->dev, wcd_spi);
1440 spi_set_drvdata(spi, NULL);
1441 return ret;
1442}
1443
1444static int wcd_spi_remove(struct spi_device *spi)
1445{
1446 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
1447
1448 component_del(&spi->dev, &wcd_spi_component_ops);
1449
1450 mutex_destroy(&wcd_spi->clk_mutex);
1451 mutex_destroy(&wcd_spi->xfer_mutex);
1452
1453 devm_kfree(&spi->dev, wcd_spi);
1454 spi_set_drvdata(spi, NULL);
1455
1456 return 0;
1457}
1458
1459#ifdef CONFIG_PM
1460static int wcd_spi_suspend(struct device *dev)
1461{
1462 struct spi_device *spi = to_spi_device(dev);
1463 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
1464 int rc = 0;
1465
1466 WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
1467 if (!wcd_spi_can_suspend(wcd_spi)) {
1468 rc = -EBUSY;
1469 goto done;
1470 }
1471
1472 /*
1473 * If we are here, it is okay to let the suspend go
1474 * through for this driver. But, still need to notify
1475 * the master to make sure all other components can suspend
1476 * as well.
1477 */
1478 if (wcd_spi->m_dev && wcd_spi->m_ops &&
1479 wcd_spi->m_ops->suspend) {
1480 WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
1481 rc = wcd_spi->m_ops->suspend(wcd_spi->m_dev);
1482 WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
1483 }
1484
1485 if (rc == 0)
1486 set_bit(WCD_SPI_IS_SUSPENDED, &wcd_spi->status_mask);
1487 else
1488 dev_dbg(&spi->dev, "%s: cannot suspend, err = %d\n",
1489 __func__, rc);
1490done:
1491 WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
1492 return rc;
1493}
1494
1495static int wcd_spi_resume(struct device *dev)
1496{
1497 struct spi_device *spi = to_spi_device(dev);
1498 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
1499
1500 WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
1501 clear_bit(WCD_SPI_IS_SUSPENDED, &wcd_spi->status_mask);
1502 complete(&wcd_spi->resume_comp);
1503 WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
1504
1505 return 0;
1506}
1507
1508static const struct dev_pm_ops wcd_spi_pm_ops = {
1509 .suspend = wcd_spi_suspend,
1510 .resume = wcd_spi_resume,
1511};
1512#endif
1513
1514static const struct of_device_id wcd_spi_of_match[] = {
1515 { .compatible = "qcom,wcd-spi-v2", },
1516 { }
1517};
1518MODULE_DEVICE_TABLE(of, wcd_spi_of_match);
1519
1520static struct spi_driver wcd_spi_driver = {
1521 .driver = {
1522 .name = "wcd-spi-v2",
1523 .of_match_table = wcd_spi_of_match,
1524#ifdef CONFIG_PM
1525 .pm = &wcd_spi_pm_ops,
1526#endif
1527 },
1528 .probe = wcd_spi_probe,
1529 .remove = wcd_spi_remove,
1530};
1531
1532module_spi_driver(wcd_spi_driver);
1533
1534MODULE_DESCRIPTION("WCD SPI driver");
1535MODULE_LICENSE("GPL v2");