blob: 0173e151cbb8bf8a2edcc842674d07bdffe1a48f [file] [log] [blame]
Meng Wang43bbb872018-12-10 12:32:05 +08001// SPDX-License-Identifier: GPL-2.0-only
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05302/*
Xiaoyu Ye89cc8892018-05-29 17:03:55 -07003 * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05304 */
5
6#include <linux/init.h>
7#include <linux/module.h>
8#include <linux/of.h>
9#include <linux/debugfs.h>
10#include <linux/delay.h>
Xiaoyu Ye89cc8892018-05-29 17:03:55 -070011#include <linux/dma-mapping.h>
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +053012#include <linux/bitops.h>
13#include <linux/spi/spi.h>
14#include <linux/regmap.h>
15#include <linux/component.h>
16#include <linux/ratelimit.h>
Bhalchandra Gajarec5f43a12018-03-19 14:35:42 -070017#include <linux/platform_device.h>
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +053018#include <sound/wcd-dsp-mgr.h>
19#include <sound/wcd-spi.h>
Bhalchandra Gajarec5f43a12018-03-19 14:35:42 -070020#include <soc/wcd-spi-ac.h>
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +053021#include "wcd-spi-registers.h"
22
23/* Byte manipulations */
24#define SHIFT_1_BYTES (8)
25#define SHIFT_2_BYTES (16)
26#define SHIFT_3_BYTES (24)
27
28/* Command opcodes */
29#define WCD_SPI_CMD_NOP (0x00)
30#define WCD_SPI_CMD_WREN (0x06)
31#define WCD_SPI_CMD_CLKREQ (0xDA)
32#define WCD_SPI_CMD_RDSR (0x05)
33#define WCD_SPI_CMD_IRR (0x81)
34#define WCD_SPI_CMD_IRW (0x82)
35#define WCD_SPI_CMD_MIOR (0x83)
36#define WCD_SPI_CMD_FREAD (0x0B)
37#define WCD_SPI_CMD_MIOW (0x02)
38#define WCD_SPI_WRITE_FRAME_OPCODE \
39 (WCD_SPI_CMD_MIOW << SHIFT_3_BYTES)
40#define WCD_SPI_READ_FRAME_OPCODE \
41 (WCD_SPI_CMD_MIOR << SHIFT_3_BYTES)
42#define WCD_SPI_FREAD_FRAME_OPCODE \
43 (WCD_SPI_CMD_FREAD << SHIFT_3_BYTES)
44
45/* Command lengths */
46#define WCD_SPI_OPCODE_LEN (0x01)
47#define WCD_SPI_CMD_NOP_LEN (0x01)
48#define WCD_SPI_CMD_WREN_LEN (0x01)
49#define WCD_SPI_CMD_CLKREQ_LEN (0x04)
50#define WCD_SPI_CMD_IRR_LEN (0x04)
51#define WCD_SPI_CMD_IRW_LEN (0x06)
52#define WCD_SPI_WRITE_SINGLE_LEN (0x08)
53#define WCD_SPI_READ_SINGLE_LEN (0x13)
54#define WCD_SPI_CMD_FREAD_LEN (0x13)
55
56/* Command delays */
57#define WCD_SPI_CLKREQ_DELAY_USECS (500)
58#define WCD_SPI_CLK_OFF_TIMER_MS (500)
59#define WCD_SPI_RESUME_TIMEOUT_MS 100
60
61/* Command masks */
62#define WCD_CMD_ADDR_MASK \
63 (0xFF | \
64 (0xFF << SHIFT_1_BYTES) | \
65 (0xFF << SHIFT_2_BYTES))
66
67/* Clock ctrl request related */
68#define WCD_SPI_CLK_ENABLE true
69#define WCD_SPI_CLK_DISABLE false
70#define WCD_SPI_CLK_FLAG_DELAYED (1 << 0)
71#define WCD_SPI_CLK_FLAG_IMMEDIATE (1 << 1)
72
73/* Internal addresses */
74#define WCD_SPI_ADDR_IPC_CTL_HOST (0x012014)
75
76/* Word sizes and min/max lengths */
77#define WCD_SPI_WORD_BYTE_CNT (4)
78#define WCD_SPI_RW_MULTI_MIN_LEN (16)
79
80/* Max size is 32 bytes less than 64Kbytes */
81#define WCD_SPI_RW_MULTI_MAX_LEN ((64 * 1024) - 32)
82
83/*
84 * Max size for the pre-allocated buffers is the max
85 * possible read/write length + 32 bytes for the SPI
86 * read/write command header itself.
87 */
88#define WCD_SPI_RW_MAX_BUF_SIZE (WCD_SPI_RW_MULTI_MAX_LEN + 32)
89
90/* Alignment requirements */
91#define WCD_SPI_RW_MIN_ALIGN WCD_SPI_WORD_BYTE_CNT
92#define WCD_SPI_RW_MULTI_ALIGN (16)
93
94/* Status mask bits */
95#define WCD_SPI_CLK_STATE_ENABLED BIT(0)
96#define WCD_SPI_IS_SUSPENDED BIT(1)
97
98/* Locking related */
99#define WCD_SPI_MUTEX_LOCK(spi, lock) \
100{ \
101 dev_vdbg(&spi->dev, "%s: mutex_lock(%s)\n", \
102 __func__, __stringify_1(lock)); \
103 mutex_lock(&lock); \
104}
105
106#define WCD_SPI_MUTEX_UNLOCK(spi, lock) \
107{ \
108 dev_vdbg(&spi->dev, "%s: mutex_unlock(%s)\n", \
109 __func__, __stringify_1(lock)); \
110 mutex_unlock(&lock); \
111}
112
113struct wcd_spi_debug_data {
114 struct dentry *dir;
115 u32 addr;
116 u32 size;
117};
118
119struct wcd_spi_priv {
120 struct spi_device *spi;
121 u32 mem_base_addr;
122
123 struct regmap *regmap;
124
125 /* Message for single transfer */
126 struct spi_message msg1;
127 struct spi_transfer xfer1;
128
129 /* Message for two transfers */
130 struct spi_message msg2;
131 struct spi_transfer xfer2[2];
132
133 /* Register access related */
134 u32 reg_bytes;
135 u32 val_bytes;
136
137 /* Clock requests related */
138 struct mutex clk_mutex;
139 int clk_users;
140 unsigned long status_mask;
141 struct delayed_work clk_dwork;
142
143 /* Transaction related */
144 struct mutex xfer_mutex;
145
146 struct device *m_dev;
147 struct wdsp_mgr_ops *m_ops;
148
149 /* Debugfs related information */
150 struct wcd_spi_debug_data debug_data;
151
152 /* Completion object to indicate system resume completion */
153 struct completion resume_comp;
154
155 /* Buffers to hold memory used for transfers */
156 void *tx_buf;
157 void *rx_buf;
Xiaoyu Ye89cc8892018-05-29 17:03:55 -0700158
159 /* DMA handles for transfer buffers */
160 dma_addr_t tx_dma;
161 dma_addr_t rx_dma;
Bhalchandra Gajarec5f43a12018-03-19 14:35:42 -0700162 /* Handle to child (qmi client) device */
163 struct device *ac_dev;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530164};
165
166enum xfer_request {
167 WCD_SPI_XFER_WRITE,
168 WCD_SPI_XFER_READ,
169};
170
171
172static char *wcd_spi_xfer_req_str(enum xfer_request req)
173{
174 if (req == WCD_SPI_XFER_WRITE)
175 return "xfer_write";
176 else if (req == WCD_SPI_XFER_READ)
177 return "xfer_read";
178 else
179 return "xfer_invalid";
180}
181
182static void wcd_spi_reinit_xfer(struct spi_transfer *xfer)
183{
184 xfer->tx_buf = NULL;
185 xfer->rx_buf = NULL;
186 xfer->delay_usecs = 0;
187 xfer->len = 0;
188}
189
190static bool wcd_spi_is_suspended(struct wcd_spi_priv *wcd_spi)
191{
192 return test_bit(WCD_SPI_IS_SUSPENDED, &wcd_spi->status_mask);
193}
194
195static bool wcd_spi_can_suspend(struct wcd_spi_priv *wcd_spi)
196{
197 struct spi_device *spi = wcd_spi->spi;
198
199 if (wcd_spi->clk_users > 0 ||
200 test_bit(WCD_SPI_CLK_STATE_ENABLED, &wcd_spi->status_mask)) {
201 dev_err(&spi->dev, "%s: cannot suspend, clk_users = %d\n",
202 __func__, wcd_spi->clk_users);
203 return false;
204 }
205
206 return true;
207}
208
209static int wcd_spi_wait_for_resume(struct wcd_spi_priv *wcd_spi)
210{
211 struct spi_device *spi = wcd_spi->spi;
212 int rc = 0;
213
214 WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
215 /* If the system is already in resumed state, return right away */
216 if (!wcd_spi_is_suspended(wcd_spi))
217 goto done;
218
219 /* If suspended then wait for resume to happen */
220 reinit_completion(&wcd_spi->resume_comp);
221 WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
222 rc = wait_for_completion_timeout(&wcd_spi->resume_comp,
223 msecs_to_jiffies(WCD_SPI_RESUME_TIMEOUT_MS));
224 WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
225 if (rc == 0) {
226 dev_err(&spi->dev, "%s: failed to resume in %u msec\n",
227 __func__, WCD_SPI_RESUME_TIMEOUT_MS);
228 rc = -EIO;
229 goto done;
230 }
231
232 dev_dbg(&spi->dev, "%s: resume successful\n", __func__);
233 rc = 0;
234done:
235 WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
236 return rc;
237}
238
239static int wcd_spi_read_single(struct spi_device *spi,
240 u32 remote_addr, u32 *val)
241{
242 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
243 struct spi_transfer *tx_xfer = &wcd_spi->xfer2[0];
244 struct spi_transfer *rx_xfer = &wcd_spi->xfer2[1];
245 u8 *tx_buf = wcd_spi->tx_buf;
Xiaoyu Ye19a5e412018-04-16 18:44:38 -0700246 u8 *rx_buf = wcd_spi->rx_buf;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530247 u32 frame = 0;
248 int ret;
249
250 dev_dbg(&spi->dev, "%s: remote_addr = 0x%x\n",
251 __func__, remote_addr);
252
253 if (!tx_buf) {
254 dev_err(&spi->dev, "%s: tx_buf not allocated\n",
255 __func__);
256 return -ENOMEM;
257 }
258
259 frame |= WCD_SPI_READ_FRAME_OPCODE;
260 frame |= remote_addr & WCD_CMD_ADDR_MASK;
261
262 wcd_spi_reinit_xfer(tx_xfer);
263 frame = cpu_to_be32(frame);
264 memcpy(tx_buf, &frame, sizeof(frame));
265 tx_xfer->tx_buf = tx_buf;
266 tx_xfer->len = WCD_SPI_READ_SINGLE_LEN;
267
268 wcd_spi_reinit_xfer(rx_xfer);
Xiaoyu Ye19a5e412018-04-16 18:44:38 -0700269 rx_xfer->rx_buf = rx_buf;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530270 rx_xfer->len = sizeof(*val);
271
272 ret = spi_sync(spi, &wcd_spi->msg2);
Xiaoyu Ye19a5e412018-04-16 18:44:38 -0700273 if (ret)
274 dev_err(&spi->dev, "%s: spi_sync failed, err %d\n",
275 __func__, ret);
276 else
277 memcpy((u8*) val, rx_buf, sizeof(*val));
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530278
279 return ret;
280}
281
282static int wcd_spi_read_multi(struct spi_device *spi,
283 u32 remote_addr, u8 *data,
284 size_t len)
285{
286 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
287 struct spi_transfer *xfer = &wcd_spi->xfer1;
288 u8 *tx_buf = wcd_spi->tx_buf;
289 u8 *rx_buf = wcd_spi->rx_buf;
290 u32 frame = 0;
291 int ret;
292
293 dev_dbg(&spi->dev, "%s: addr 0x%x, len = %zd\n",
294 __func__, remote_addr, len);
295
296 frame |= WCD_SPI_FREAD_FRAME_OPCODE;
297 frame |= remote_addr & WCD_CMD_ADDR_MASK;
298
299 if (!tx_buf || !rx_buf) {
300 dev_err(&spi->dev, "%s: %s not allocated\n", __func__,
301 (!tx_buf) ? "tx_buf" : "rx_buf");
302 return -ENOMEM;
303 }
304
305 wcd_spi_reinit_xfer(xfer);
306 frame = cpu_to_be32(frame);
307 memcpy(tx_buf, &frame, sizeof(frame));
308 xfer->tx_buf = tx_buf;
309 xfer->rx_buf = rx_buf;
310 xfer->len = WCD_SPI_CMD_FREAD_LEN + len;
311
312 ret = spi_sync(spi, &wcd_spi->msg1);
313 if (ret) {
314 dev_err(&spi->dev, "%s: failed, err = %d\n",
315 __func__, ret);
316 goto done;
317 }
318
319 memcpy(data, rx_buf + WCD_SPI_CMD_FREAD_LEN, len);
320done:
321 return ret;
322}
323
324static int wcd_spi_write_single(struct spi_device *spi,
325 u32 remote_addr, u32 val)
326{
327 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
328 struct spi_transfer *xfer = &wcd_spi->xfer1;
Xiaoyu Ye19a5e412018-04-16 18:44:38 -0700329 u8 *tx_buf = wcd_spi->tx_buf;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530330 u32 frame = 0;
331
332 dev_dbg(&spi->dev, "%s: remote_addr = 0x%x, val = 0x%x\n",
333 __func__, remote_addr, val);
334
Xiaoyu Ye19a5e412018-04-16 18:44:38 -0700335 memset(tx_buf, 0, WCD_SPI_WRITE_SINGLE_LEN);
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530336 frame |= WCD_SPI_WRITE_FRAME_OPCODE;
337 frame |= (remote_addr & WCD_CMD_ADDR_MASK);
338
339 frame = cpu_to_be32(frame);
Xiaoyu Ye19a5e412018-04-16 18:44:38 -0700340 memcpy(tx_buf, &frame, sizeof(frame));
341 memcpy(tx_buf + sizeof(frame), &val, sizeof(val));
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530342
343 wcd_spi_reinit_xfer(xfer);
Xiaoyu Ye19a5e412018-04-16 18:44:38 -0700344 xfer->tx_buf = tx_buf;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530345 xfer->len = WCD_SPI_WRITE_SINGLE_LEN;
346
347 return spi_sync(spi, &wcd_spi->msg1);
348}
349
350static int wcd_spi_write_multi(struct spi_device *spi,
351 u32 remote_addr, u8 *data,
352 size_t len)
353{
354 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
355 struct spi_transfer *xfer = &wcd_spi->xfer1;
356 u32 frame = 0;
357 u8 *tx_buf = wcd_spi->tx_buf;
358 int xfer_len, ret;
359
360 dev_dbg(&spi->dev, "%s: addr = 0x%x len = %zd\n",
361 __func__, remote_addr, len);
362
363 frame |= WCD_SPI_WRITE_FRAME_OPCODE;
364 frame |= (remote_addr & WCD_CMD_ADDR_MASK);
365
366 frame = cpu_to_be32(frame);
367 xfer_len = len + sizeof(frame);
368
369 if (!tx_buf) {
370 dev_err(&spi->dev, "%s: tx_buf not allocated\n",
371 __func__);
372 return -ENOMEM;
373 }
374
375 memcpy(tx_buf, &frame, sizeof(frame));
376 memcpy(tx_buf + sizeof(frame), data, len);
377
378 wcd_spi_reinit_xfer(xfer);
379 xfer->tx_buf = tx_buf;
380 xfer->len = xfer_len;
381
382 ret = spi_sync(spi, &wcd_spi->msg1);
383 if (ret < 0)
384 dev_err(&spi->dev,
385 "%s: Failed, addr = 0x%x, len = %zd\n",
386 __func__, remote_addr, len);
387 return ret;
388}
389
390static int wcd_spi_transfer_split(struct spi_device *spi,
391 struct wcd_spi_msg *data_msg,
392 enum xfer_request xfer_req)
393{
394 u32 addr = data_msg->remote_addr;
395 u8 *data = data_msg->data;
396 int remain_size = data_msg->len;
397 int to_xfer, loop_cnt, ret = 0;
398
399 /* Perform single writes until multi word alignment is met */
400 loop_cnt = 1;
401 while (remain_size &&
402 !IS_ALIGNED(addr, WCD_SPI_RW_MULTI_ALIGN)) {
403 if (xfer_req == WCD_SPI_XFER_WRITE)
404 ret = wcd_spi_write_single(spi, addr,
405 (*(u32 *)data));
406 else
407 ret = wcd_spi_read_single(spi, addr,
408 (u32 *)data);
409 if (ret < 0) {
410 dev_err(&spi->dev,
411 "%s: %s fail iter(%d) start-word addr (0x%x)\n",
412 __func__, wcd_spi_xfer_req_str(xfer_req),
413 loop_cnt, addr);
414 goto done;
415 }
416
417 addr += WCD_SPI_WORD_BYTE_CNT;
418 data += WCD_SPI_WORD_BYTE_CNT;
419 remain_size -= WCD_SPI_WORD_BYTE_CNT;
420 loop_cnt++;
421 }
422
423 /* Perform multi writes for max allowed multi writes */
424 loop_cnt = 1;
425 while (remain_size >= WCD_SPI_RW_MULTI_MAX_LEN) {
426 if (xfer_req == WCD_SPI_XFER_WRITE)
427 ret = wcd_spi_write_multi(spi, addr, data,
428 WCD_SPI_RW_MULTI_MAX_LEN);
429 else
430 ret = wcd_spi_read_multi(spi, addr, data,
431 WCD_SPI_RW_MULTI_MAX_LEN);
432 if (ret < 0) {
433 dev_err(&spi->dev,
434 "%s: %s fail iter(%d) max-write addr (0x%x)\n",
435 __func__, wcd_spi_xfer_req_str(xfer_req),
436 loop_cnt, addr);
437 goto done;
438 }
439
440 addr += WCD_SPI_RW_MULTI_MAX_LEN;
441 data += WCD_SPI_RW_MULTI_MAX_LEN;
442 remain_size -= WCD_SPI_RW_MULTI_MAX_LEN;
443 loop_cnt++;
444 }
445
446 /*
447 * Perform write for max possible data that is multiple
448 * of the minimum size for multi-write commands.
449 */
450 to_xfer = remain_size - (remain_size % WCD_SPI_RW_MULTI_MIN_LEN);
451 if (remain_size >= WCD_SPI_RW_MULTI_MIN_LEN &&
452 to_xfer > 0) {
453 if (xfer_req == WCD_SPI_XFER_WRITE)
454 ret = wcd_spi_write_multi(spi, addr, data, to_xfer);
455 else
456 ret = wcd_spi_read_multi(spi, addr, data, to_xfer);
457 if (ret < 0) {
458 dev_err(&spi->dev,
459 "%s: %s fail write addr (0x%x), size (0x%x)\n",
460 __func__, wcd_spi_xfer_req_str(xfer_req),
461 addr, to_xfer);
462 goto done;
463 }
464
465 addr += to_xfer;
466 data += to_xfer;
467 remain_size -= to_xfer;
468 }
469
470 /* Perform single writes for the last remaining data */
471 loop_cnt = 1;
472 while (remain_size > 0) {
473 if (xfer_req == WCD_SPI_XFER_WRITE)
474 ret = wcd_spi_write_single(spi, addr, (*((u32 *)data)));
475 else
476 ret = wcd_spi_read_single(spi, addr, (u32 *) data);
477 if (ret < 0) {
478 dev_err(&spi->dev,
479 "%s: %s fail iter(%d) end-write addr (0x%x)\n",
480 __func__, wcd_spi_xfer_req_str(xfer_req),
481 loop_cnt, addr);
482 goto done;
483 }
484
485 addr += WCD_SPI_WORD_BYTE_CNT;
486 data += WCD_SPI_WORD_BYTE_CNT;
487 remain_size -= WCD_SPI_WORD_BYTE_CNT;
488 loop_cnt++;
489 }
490
491done:
492 return ret;
493}
494
495static int wcd_spi_cmd_nop(struct spi_device *spi)
496{
Xiaoyu Ye19a5e412018-04-16 18:44:38 -0700497 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
498 u8 *tx_buf = wcd_spi->tx_buf;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530499
Xiaoyu Ye19a5e412018-04-16 18:44:38 -0700500 tx_buf[0] = WCD_SPI_CMD_NOP;
501
502 return spi_write(spi, tx_buf, WCD_SPI_CMD_NOP_LEN);
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530503}
504
505static int wcd_spi_cmd_clkreq(struct spi_device *spi)
506{
507 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
508 struct spi_transfer *xfer = &wcd_spi->xfer1;
Xiaoyu Ye19a5e412018-04-16 18:44:38 -0700509 u8 *tx_buf = wcd_spi->tx_buf;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530510 u8 cmd[WCD_SPI_CMD_CLKREQ_LEN] = {
511 WCD_SPI_CMD_CLKREQ,
512 0xBA, 0x80, 0x00};
513
Xiaoyu Ye19a5e412018-04-16 18:44:38 -0700514 memcpy(tx_buf, cmd, WCD_SPI_CMD_CLKREQ_LEN);
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530515 wcd_spi_reinit_xfer(xfer);
Xiaoyu Ye19a5e412018-04-16 18:44:38 -0700516 xfer->tx_buf = tx_buf;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530517 xfer->len = WCD_SPI_CMD_CLKREQ_LEN;
518 xfer->delay_usecs = WCD_SPI_CLKREQ_DELAY_USECS;
519
520 return spi_sync(spi, &wcd_spi->msg1);
521}
522
523static int wcd_spi_cmd_wr_en(struct spi_device *spi)
524{
Xiaoyu Ye19a5e412018-04-16 18:44:38 -0700525 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
526 u8 *tx_buf = wcd_spi->tx_buf;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530527
Xiaoyu Ye19a5e412018-04-16 18:44:38 -0700528 tx_buf[0] = WCD_SPI_CMD_WREN;
529
530 return spi_write(spi, tx_buf, WCD_SPI_CMD_WREN_LEN);
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530531}
532
533static int wcd_spi_cmd_rdsr(struct spi_device *spi,
534 u32 *rdsr_status)
535{
536 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
537 struct spi_transfer *tx_xfer = &wcd_spi->xfer2[0];
538 struct spi_transfer *rx_xfer = &wcd_spi->xfer2[1];
Xiaoyu Ye19a5e412018-04-16 18:44:38 -0700539 u8 *tx_buf = wcd_spi->tx_buf;
540 u8 *rx_buf = wcd_spi->rx_buf;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530541 int ret;
542
Xiaoyu Ye19a5e412018-04-16 18:44:38 -0700543 tx_buf[0] = WCD_SPI_CMD_RDSR;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530544 wcd_spi_reinit_xfer(tx_xfer);
Xiaoyu Ye19a5e412018-04-16 18:44:38 -0700545 tx_xfer->tx_buf = tx_buf;
546 tx_xfer->len = WCD_SPI_OPCODE_LEN;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530547
Xiaoyu Ye19a5e412018-04-16 18:44:38 -0700548 memset(rx_buf, 0, sizeof(*rdsr_status));
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530549 wcd_spi_reinit_xfer(rx_xfer);
Xiaoyu Ye19a5e412018-04-16 18:44:38 -0700550 rx_xfer->rx_buf = rx_buf;
551 rx_xfer->len = sizeof(*rdsr_status);
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530552
553 ret = spi_sync(spi, &wcd_spi->msg2);
554 if (ret < 0) {
555 dev_err(&spi->dev, "%s: RDSR failed, err = %d\n",
556 __func__, ret);
557 goto done;
558 }
559
Xiaoyu Ye19a5e412018-04-16 18:44:38 -0700560 *rdsr_status = be32_to_cpu(*((u32*)rx_buf));
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530561
562 dev_dbg(&spi->dev, "%s: RDSR success, value = 0x%x\n",
Xiaoyu Ye19a5e412018-04-16 18:44:38 -0700563 __func__, *rdsr_status);
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530564done:
565 return ret;
566}
567
568static int wcd_spi_clk_enable(struct spi_device *spi)
569{
570 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
571 int ret;
572 u32 rd_status = 0;
573
Bhalchandra Gajarec5f43a12018-03-19 14:35:42 -0700574 /* Get the SPI access first */
575 if (wcd_spi->ac_dev) {
576 ret = wcd_spi_access_ctl(wcd_spi->ac_dev,
577 WCD_SPI_ACCESS_REQUEST,
578 WCD_SPI_AC_DATA_TRANSFER);
579 if (ret) {
580 dev_err(&spi->dev,
581 "%s: Can't get spi access, err = %d\n",
582 __func__, ret);
583 return ret;
584 }
585 }
586
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530587 ret = wcd_spi_cmd_nop(spi);
588 if (ret < 0) {
589 dev_err(&spi->dev, "%s: NOP1 failed, err = %d\n",
590 __func__, ret);
591 goto done;
592 }
593
594 ret = wcd_spi_cmd_clkreq(spi);
595 if (ret < 0) {
596 dev_err(&spi->dev, "%s: CLK_REQ failed, err = %d\n",
597 __func__, ret);
598 goto done;
599 }
600
601 ret = wcd_spi_cmd_nop(spi);
602 if (ret < 0) {
603 dev_err(&spi->dev, "%s: NOP2 failed, err = %d\n",
604 __func__, ret);
605 goto done;
606 }
607 wcd_spi_cmd_rdsr(spi, &rd_status);
608 /*
609 * Read status zero means reads are not
610 * happenning on the bus, possibly because
611 * clock request failed.
612 */
613 if (rd_status) {
614 set_bit(WCD_SPI_CLK_STATE_ENABLED,
615 &wcd_spi->status_mask);
616 } else {
617 dev_err(&spi->dev, "%s: RDSR status is zero\n",
618 __func__);
619 ret = -EIO;
620 }
621done:
622 return ret;
623}
624
625static int wcd_spi_clk_disable(struct spi_device *spi)
626{
627 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
628 int ret;
629
630 ret = wcd_spi_write_single(spi, WCD_SPI_ADDR_IPC_CTL_HOST, 0x01);
631 if (ret < 0)
632 dev_err(&spi->dev, "%s: Failed, err = %d\n",
633 __func__, ret);
Bhalchandra Gajareb466d182018-01-08 18:41:02 -0800634 /*
635 * clear this bit even if clock disable failed
636 * as the source clocks might get turned off.
637 */
638 clear_bit(WCD_SPI_CLK_STATE_ENABLED, &wcd_spi->status_mask);
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530639
Bhalchandra Gajarec5f43a12018-03-19 14:35:42 -0700640 /* once the clock is released, SPI access can be released as well */
641 if (wcd_spi->ac_dev) {
642 ret = wcd_spi_access_ctl(wcd_spi->ac_dev,
643 WCD_SPI_ACCESS_RELEASE,
644 WCD_SPI_AC_DATA_TRANSFER);
645 if (ret)
646 dev_err(&spi->dev,
647 "%s: SPI access release failed, err = %d\n",
648 __func__, ret);
649 }
650
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530651 return ret;
652}
653
654static int wcd_spi_clk_ctrl(struct spi_device *spi,
655 bool request, u32 flags)
656{
657 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
658 int ret = 0;
659 const char *delay_str;
660
661 delay_str = (flags == WCD_SPI_CLK_FLAG_DELAYED) ?
662 "delayed" : "immediate";
663
664 WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
665
666 /* Reject any unbalanced disable request */
667 if (wcd_spi->clk_users < 0 ||
668 (!request && wcd_spi->clk_users == 0)) {
669 dev_err(&spi->dev, "%s: Unbalanced clk_users %d for %s\n",
670 __func__, wcd_spi->clk_users,
671 request ? "enable" : "disable");
672 ret = -EINVAL;
673
674 /* Reset the clk_users to 0 */
675 wcd_spi->clk_users = 0;
676
677 goto done;
678 }
679
680 if (request == WCD_SPI_CLK_ENABLE) {
681 /*
682 * If the SPI bus is suspended, then return error
683 * as the transaction cannot be completed.
684 */
685 if (wcd_spi_is_suspended(wcd_spi)) {
686 dev_err(&spi->dev,
687 "%s: SPI suspended, cannot enable clk\n",
688 __func__);
689 ret = -EIO;
690 goto done;
691 }
692
693 /* Cancel the disable clk work */
694 WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
695 cancel_delayed_work_sync(&wcd_spi->clk_dwork);
696 WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
697
698 wcd_spi->clk_users++;
699
700 /*
701 * If clk state is already set,
702 * then clk wasnt really disabled
703 */
704 if (test_bit(WCD_SPI_CLK_STATE_ENABLED, &wcd_spi->status_mask))
705 goto done;
706 else if (wcd_spi->clk_users == 1)
707 ret = wcd_spi_clk_enable(spi);
708
709 } else {
710 wcd_spi->clk_users--;
711
712 /* Clock is still voted for */
713 if (wcd_spi->clk_users > 0)
714 goto done;
715
716 /*
717 * If we are here, clk_users must be 0 and needs
718 * to be disabled. Call the disable based on the
719 * flags.
720 */
721 if (flags == WCD_SPI_CLK_FLAG_DELAYED) {
722 schedule_delayed_work(&wcd_spi->clk_dwork,
723 msecs_to_jiffies(WCD_SPI_CLK_OFF_TIMER_MS));
724 } else {
725 ret = wcd_spi_clk_disable(spi);
726 if (ret < 0)
727 dev_err(&spi->dev,
728 "%s: Failed to disable clk err = %d\n",
729 __func__, ret);
730 }
731 }
732
733done:
734 dev_dbg(&spi->dev, "%s: updated clk_users = %d, request_%s %s\n",
735 __func__, wcd_spi->clk_users, request ? "enable" : "disable",
736 request ? "" : delay_str);
737 WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
738
739 return ret;
740}
741
742static int wcd_spi_init(struct spi_device *spi)
743{
744 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
745 int ret;
746
747 ret = wcd_spi_clk_ctrl(spi, WCD_SPI_CLK_ENABLE,
748 WCD_SPI_CLK_FLAG_IMMEDIATE);
749 if (ret < 0)
750 goto done;
751
752 ret = wcd_spi_cmd_wr_en(spi);
753 if (ret < 0)
754 goto err_wr_en;
755
756 /*
757 * In case spi_init is called after component deinit,
758 * it is possible hardware register state is also reset.
759 * Sync the regcache here so hardware state is updated
760 * to reflect the cache.
761 */
762 regcache_sync(wcd_spi->regmap);
763
764 regmap_write(wcd_spi->regmap, WCD_SPI_SLAVE_CONFIG,
765 0x0F3D0800);
766
767 /* Write the MTU to max allowed size */
768 regmap_update_bits(wcd_spi->regmap,
769 WCD_SPI_SLAVE_TRNS_LEN,
770 0xFFFF0000, 0xFFFF0000);
771err_wr_en:
772 wcd_spi_clk_ctrl(spi, WCD_SPI_CLK_DISABLE,
773 WCD_SPI_CLK_FLAG_IMMEDIATE);
774done:
775 return ret;
776}
777
778static void wcd_spi_clk_work(struct work_struct *work)
779{
780 struct delayed_work *dwork;
781 struct wcd_spi_priv *wcd_spi;
782 struct spi_device *spi;
783 int ret;
784
785 dwork = to_delayed_work(work);
786 wcd_spi = container_of(dwork, struct wcd_spi_priv, clk_dwork);
787 spi = wcd_spi->spi;
788
789 WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
790 ret = wcd_spi_clk_disable(spi);
791 if (ret < 0)
792 dev_err(&spi->dev,
793 "%s: Failed to disable clk, err = %d\n",
794 __func__, ret);
795 WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
796}
797
798static int __wcd_spi_data_xfer(struct spi_device *spi,
799 struct wcd_spi_msg *msg,
800 enum xfer_request xfer_req)
801{
802 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
803 int ret;
804
805 /* Check for minimum alignment requirements */
806 if (!IS_ALIGNED(msg->remote_addr, WCD_SPI_RW_MIN_ALIGN)) {
807 dev_err(&spi->dev,
808 "%s addr 0x%x is not aligned to 0x%x\n",
809 __func__, msg->remote_addr, WCD_SPI_RW_MIN_ALIGN);
810 return -EINVAL;
811 } else if (msg->len % WCD_SPI_WORD_BYTE_CNT) {
812 dev_err(&spi->dev,
813 "%s len 0x%zx is not multiple of %d\n",
814 __func__, msg->len, WCD_SPI_WORD_BYTE_CNT);
815 return -EINVAL;
816 }
817
818 WCD_SPI_MUTEX_LOCK(spi, wcd_spi->xfer_mutex);
819 if (msg->len == WCD_SPI_WORD_BYTE_CNT) {
820 if (xfer_req == WCD_SPI_XFER_WRITE)
821 ret = wcd_spi_write_single(spi, msg->remote_addr,
822 (*((u32 *)msg->data)));
823 else
824 ret = wcd_spi_read_single(spi, msg->remote_addr,
825 (u32 *) msg->data);
826 } else {
827 ret = wcd_spi_transfer_split(spi, msg, xfer_req);
828 }
829 WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->xfer_mutex);
830
831 return ret;
832}
833
834static int wcd_spi_data_xfer(struct spi_device *spi,
835 struct wcd_spi_msg *msg,
836 enum xfer_request req)
837{
838 int ret, ret1;
839
840 if (msg->len <= 0) {
841 dev_err(&spi->dev, "%s: Invalid size %zd\n",
842 __func__, msg->len);
843 return -EINVAL;
844 }
845
846 /* Request for clock */
847 ret = wcd_spi_clk_ctrl(spi, WCD_SPI_CLK_ENABLE,
848 WCD_SPI_CLK_FLAG_IMMEDIATE);
849 if (ret < 0) {
850 dev_err(&spi->dev, "%s: clk enable failed %d\n",
851 __func__, ret);
852 goto done;
853 }
854
855 /* Perform the transaction */
856 ret = __wcd_spi_data_xfer(spi, msg, req);
857 if (ret < 0)
858 dev_err(&spi->dev,
859 "%s: Failed %s, addr = 0x%x, size = 0x%zx, err = %d\n",
860 __func__, wcd_spi_xfer_req_str(req),
861 msg->remote_addr, msg->len, ret);
862
863 /* Release the clock even if xfer failed */
864 ret1 = wcd_spi_clk_ctrl(spi, WCD_SPI_CLK_DISABLE,
865 WCD_SPI_CLK_FLAG_DELAYED);
866 if (ret1 < 0)
867 dev_err(&spi->dev, "%s: clk disable failed %d\n",
868 __func__, ret1);
869done:
870 return ret;
871}
872
873/*
874 * wcd_spi_data_write: Write data to WCD SPI
875 * @spi: spi_device struct
876 * @msg: msg that needs to be written to WCD
877 *
878 * This API writes length of data to address specified. These details
879 * about the write are encapsulated in @msg. Write size should be multiple
880 * of 4 bytes and write address should be 4-byte aligned.
881 */
882static int wcd_spi_data_write(struct spi_device *spi,
883 struct wcd_spi_msg *msg)
884{
885 if (!spi || !msg) {
886 pr_err("%s: Invalid %s\n", __func__,
887 (!spi) ? "spi device" : "msg");
888 return -EINVAL;
889 }
890
891 dev_dbg_ratelimited(&spi->dev, "%s: addr = 0x%x, len = %zu\n",
892 __func__, msg->remote_addr, msg->len);
893 return wcd_spi_data_xfer(spi, msg, WCD_SPI_XFER_WRITE);
894}
895
896/*
897 * wcd_spi_data_read: Read data from WCD SPI
898 * @spi: spi_device struct
899 * @msg: msg that needs to be read from WCD
900 *
901 * This API reads length of data from address specified. These details
902 * about the read are encapsulated in @msg. Read size should be multiple
903 * of 4 bytes and read address should be 4-byte aligned.
904 */
905static int wcd_spi_data_read(struct spi_device *spi,
906 struct wcd_spi_msg *msg)
907{
908 if (!spi || !msg) {
909 pr_err("%s: Invalid %s\n", __func__,
910 (!spi) ? "spi device" : "msg");
911 return -EINVAL;
912 }
913
914 dev_dbg_ratelimited(&spi->dev, "%s: addr = 0x%x,len = %zu\n",
915 __func__, msg->remote_addr, msg->len);
916 return wcd_spi_data_xfer(spi, msg, WCD_SPI_XFER_READ);
917}
918
919static int wdsp_spi_dload_section(struct spi_device *spi,
920 void *data)
921{
922 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
923 struct wdsp_img_section *sec = data;
924 struct wcd_spi_msg msg;
925 int ret;
926
927 dev_dbg(&spi->dev, "%s: addr = 0x%x, size = 0x%zx\n",
928 __func__, sec->addr, sec->size);
929
930 msg.remote_addr = sec->addr + wcd_spi->mem_base_addr;
931 msg.data = sec->data;
932 msg.len = sec->size;
933
934 ret = __wcd_spi_data_xfer(spi, &msg, WCD_SPI_XFER_WRITE);
935 if (ret < 0)
936 dev_err(&spi->dev, "%s: fail addr (0x%x) size (0x%zx)\n",
937 __func__, msg.remote_addr, msg.len);
938 return ret;
939}
940
941static int wdsp_spi_read_section(struct spi_device *spi, void *data)
942{
943 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
944 struct wdsp_img_section *sec = data;
945 struct wcd_spi_msg msg;
946 int ret;
947
948 msg.remote_addr = sec->addr + wcd_spi->mem_base_addr;
949 msg.data = sec->data;
950 msg.len = sec->size;
951
952 dev_dbg(&spi->dev, "%s: addr = 0x%x, size = 0x%zx\n",
953 __func__, msg.remote_addr, msg.len);
954
955 ret = wcd_spi_data_xfer(spi, &msg, WCD_SPI_XFER_READ);
956 if (ret < 0)
957 dev_err(&spi->dev, "%s: fail addr (0x%x) size (0x%zx)\n",
958 __func__, msg.remote_addr, msg.len);
959 return ret;
960}
961
962static int wdsp_spi_event_handler(struct device *dev, void *priv_data,
963 enum wdsp_event_type event,
964 void *data)
965{
966 struct spi_device *spi = to_spi_device(dev);
967 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
968 struct wcd_spi_ops *spi_ops;
969 int ret = 0;
970
971 dev_dbg(&spi->dev, "%s: event type %d\n",
972 __func__, event);
973
974 switch (event) {
Bhalchandra Gajarec5f43a12018-03-19 14:35:42 -0700975 case WDSP_EVENT_PRE_SHUTDOWN:
976 if (wcd_spi->ac_dev) {
977 ret = wcd_spi_access_ctl(wcd_spi->ac_dev,
978 WCD_SPI_ACCESS_REQUEST,
979 WCD_SPI_AC_REMOTE_DOWN);
980 if (ret)
981 dev_err(&spi->dev,
982 "%s: request access failed %d\n",
983 __func__, ret);
984 }
985 break;
986
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530987 case WDSP_EVENT_POST_SHUTDOWN:
988 cancel_delayed_work_sync(&wcd_spi->clk_dwork);
989 WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
990 if (test_bit(WCD_SPI_CLK_STATE_ENABLED, &wcd_spi->status_mask))
991 wcd_spi_clk_disable(spi);
992 wcd_spi->clk_users = 0;
993 WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
994 break;
995
Bhalchandra Gajarec5f43a12018-03-19 14:35:42 -0700996 case WDSP_EVENT_POST_BOOTUP:
997 if (wcd_spi->ac_dev) {
998 ret = wcd_spi_access_ctl(wcd_spi->ac_dev,
999 WCD_SPI_ACCESS_RELEASE,
1000 WCD_SPI_AC_REMOTE_DOWN);
1001 if (ret)
1002 dev_err(&spi->dev,
1003 "%s: release access failed %d\n",
1004 __func__, ret);
1005 }
1006 break;
1007
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301008 case WDSP_EVENT_PRE_DLOAD_CODE:
1009 case WDSP_EVENT_PRE_DLOAD_DATA:
1010 ret = wcd_spi_clk_ctrl(spi, WCD_SPI_CLK_ENABLE,
1011 WCD_SPI_CLK_FLAG_IMMEDIATE);
1012 if (ret < 0)
1013 dev_err(&spi->dev, "%s: clk_req failed %d\n",
1014 __func__, ret);
1015 break;
1016
1017 case WDSP_EVENT_POST_DLOAD_CODE:
1018 case WDSP_EVENT_POST_DLOAD_DATA:
1019 case WDSP_EVENT_DLOAD_FAILED:
1020
1021 ret = wcd_spi_clk_ctrl(spi, WCD_SPI_CLK_DISABLE,
1022 WCD_SPI_CLK_FLAG_IMMEDIATE);
1023 if (ret < 0)
1024 dev_err(&spi->dev, "%s: clk unvote failed %d\n",
1025 __func__, ret);
1026 break;
1027
1028 case WDSP_EVENT_DLOAD_SECTION:
1029 ret = wdsp_spi_dload_section(spi, data);
1030 break;
1031
1032 case WDSP_EVENT_READ_SECTION:
1033 ret = wdsp_spi_read_section(spi, data);
1034 break;
1035
1036 case WDSP_EVENT_SUSPEND:
1037 WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
1038 if (!wcd_spi_can_suspend(wcd_spi))
1039 ret = -EBUSY;
1040 WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
1041 break;
1042
1043 case WDSP_EVENT_RESUME:
1044 ret = wcd_spi_wait_for_resume(wcd_spi);
1045 break;
1046
1047 case WDSP_EVENT_GET_DEVOPS:
1048 if (!data) {
1049 dev_err(&spi->dev, "%s: invalid data\n",
1050 __func__);
1051 ret = -EINVAL;
1052 break;
1053 }
1054
1055 spi_ops = (struct wcd_spi_ops *) data;
1056 spi_ops->spi_dev = spi;
1057 spi_ops->read_dev = wcd_spi_data_read;
1058 spi_ops->write_dev = wcd_spi_data_write;
1059 break;
1060
1061 default:
1062 dev_dbg(&spi->dev, "%s: Unhandled event %d\n",
1063 __func__, event);
1064 break;
1065 }
1066
1067 return ret;
1068}
1069
1070static int wcd_spi_bus_gwrite(void *context, const void *reg,
1071 size_t reg_len, const void *val,
1072 size_t val_len)
1073{
1074 struct device *dev = context;
1075 struct spi_device *spi = to_spi_device(dev);
1076 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
Xiaoyu Ye19a5e412018-04-16 18:44:38 -07001077 u8 *tx_buf = wcd_spi->tx_buf;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301078
1079 if (!reg || !val || reg_len != wcd_spi->reg_bytes ||
1080 val_len != wcd_spi->val_bytes) {
1081 dev_err(&spi->dev,
1082 "%s: Invalid input, reg_len = %zd, val_len = %zd",
1083 __func__, reg_len, val_len);
1084 return -EINVAL;
1085 }
1086
Xiaoyu Ye19a5e412018-04-16 18:44:38 -07001087 memset(tx_buf, 0, WCD_SPI_CMD_IRW_LEN);
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301088 tx_buf[0] = WCD_SPI_CMD_IRW;
1089 tx_buf[1] = *((u8 *)reg);
Xiaoyu Ye19a5e412018-04-16 18:44:38 -07001090 memcpy(tx_buf + WCD_SPI_OPCODE_LEN + reg_len,
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301091 val, val_len);
1092
1093 return spi_write(spi, tx_buf, WCD_SPI_CMD_IRW_LEN);
1094}
1095
1096static int wcd_spi_bus_write(void *context, const void *data,
1097 size_t count)
1098{
1099 struct device *dev = context;
1100 struct spi_device *spi = to_spi_device(dev);
1101 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
1102
1103 if (count < (wcd_spi->reg_bytes + wcd_spi->val_bytes)) {
1104 dev_err(&spi->dev, "%s: Invalid size %zd\n",
1105 __func__, count);
1106 WARN_ON(1);
1107 return -EINVAL;
1108 }
1109
1110 return wcd_spi_bus_gwrite(context, data, wcd_spi->reg_bytes,
1111 data + wcd_spi->reg_bytes,
1112 count - wcd_spi->reg_bytes);
1113}
1114
1115static int wcd_spi_bus_read(void *context, const void *reg,
1116 size_t reg_len, void *val,
1117 size_t val_len)
1118{
1119 struct device *dev = context;
1120 struct spi_device *spi = to_spi_device(dev);
1121 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
1122 struct spi_transfer *tx_xfer = &wcd_spi->xfer2[0];
1123 struct spi_transfer *rx_xfer = &wcd_spi->xfer2[1];
Xiaoyu Ye19a5e412018-04-16 18:44:38 -07001124 u8 *tx_buf = wcd_spi->tx_buf;
1125 u8 *rx_buf = wcd_spi->rx_buf;
1126 int ret = 0;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301127
1128 if (!reg || !val || reg_len != wcd_spi->reg_bytes ||
1129 val_len != wcd_spi->val_bytes) {
1130 dev_err(&spi->dev,
1131 "%s: Invalid input, reg_len = %zd, val_len = %zd",
1132 __func__, reg_len, val_len);
1133 return -EINVAL;
1134 }
1135
Xiaoyu Ye19a5e412018-04-16 18:44:38 -07001136 memset(tx_buf, 0, WCD_SPI_CMD_IRR_LEN);
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301137 tx_buf[0] = WCD_SPI_CMD_IRR;
1138 tx_buf[1] = *((u8 *)reg);
1139
1140 wcd_spi_reinit_xfer(tx_xfer);
1141 tx_xfer->tx_buf = tx_buf;
1142 tx_xfer->rx_buf = NULL;
1143 tx_xfer->len = WCD_SPI_CMD_IRR_LEN;
1144
1145 wcd_spi_reinit_xfer(rx_xfer);
1146 rx_xfer->tx_buf = NULL;
Xiaoyu Ye19a5e412018-04-16 18:44:38 -07001147 rx_xfer->rx_buf = rx_buf;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301148 rx_xfer->len = val_len;
1149
Xiaoyu Ye19a5e412018-04-16 18:44:38 -07001150 ret = spi_sync(spi, &wcd_spi->msg2);
1151 if (ret) {
1152 dev_err(&spi->dev, "%s: spi_sync failed, err %d\n",
1153 __func__, ret);
1154 goto done;
1155 }
1156
1157 memcpy(val, rx_buf, val_len);
1158
1159done:
1160 return ret;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301161}
1162
1163static struct regmap_bus wcd_spi_regmap_bus = {
1164 .write = wcd_spi_bus_write,
1165 .gather_write = wcd_spi_bus_gwrite,
1166 .read = wcd_spi_bus_read,
1167 .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
1168 .val_format_endian_default = REGMAP_ENDIAN_BIG,
1169};
1170
1171static int wcd_spi_state_show(struct seq_file *f, void *ptr)
1172{
1173 struct spi_device *spi = f->private;
1174 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
1175 const char *clk_state, *clk_mutex, *xfer_mutex;
1176
1177 if (test_bit(WCD_SPI_CLK_STATE_ENABLED, &wcd_spi->status_mask))
1178 clk_state = "enabled";
1179 else
1180 clk_state = "disabled";
1181
1182 clk_mutex = mutex_is_locked(&wcd_spi->clk_mutex) ?
1183 "locked" : "unlocked";
1184
1185 xfer_mutex = mutex_is_locked(&wcd_spi->xfer_mutex) ?
1186 "locked" : "unlocked";
1187
1188 seq_printf(f, "clk_state = %s\nclk_users = %d\n"
1189 "clk_mutex = %s\nxfer_mutex = %s\n",
1190 clk_state, wcd_spi->clk_users, clk_mutex,
1191 xfer_mutex);
1192 return 0;
1193}
1194
1195static int wcd_spi_state_open(struct inode *inode, struct file *file)
1196{
1197 return single_open(file, wcd_spi_state_show, inode->i_private);
1198}
1199
1200static const struct file_operations state_fops = {
1201 .open = wcd_spi_state_open,
1202 .read = seq_read,
1203 .llseek = seq_lseek,
1204 .release = single_release,
1205};
1206
1207static ssize_t wcd_spi_debugfs_mem_read(struct file *file, char __user *ubuf,
1208 size_t count, loff_t *ppos)
1209{
1210 struct spi_device *spi = file->private_data;
1211 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
1212 struct wcd_spi_debug_data *dbg_data = &wcd_spi->debug_data;
1213 struct wcd_spi_msg msg;
1214 ssize_t buf_size, read_count = 0;
1215 char *buf;
1216 int ret;
1217
1218 if (*ppos < 0 || !count)
1219 return -EINVAL;
1220
1221 if (dbg_data->size == 0 || dbg_data->addr == 0) {
1222 dev_err(&spi->dev,
1223 "%s: Invalid request, size = %u, addr = 0x%x\n",
1224 __func__, dbg_data->size, dbg_data->addr);
1225 return 0;
1226 }
1227
1228 buf_size = count < dbg_data->size ? count : dbg_data->size;
1229 buf = kzalloc(buf_size, GFP_KERNEL);
1230 if (!buf)
1231 return -ENOMEM;
1232
1233 msg.data = buf;
1234 msg.remote_addr = dbg_data->addr;
1235 msg.len = buf_size;
1236 msg.flags = 0;
1237
1238 ret = wcd_spi_data_read(spi, &msg);
1239 if (ret < 0) {
1240 dev_err(&spi->dev,
1241 "%s: Failed to read %zu bytes from addr 0x%x\n",
1242 __func__, buf_size, msg.remote_addr);
1243 goto done;
1244 }
1245
1246 read_count = simple_read_from_buffer(ubuf, count, ppos, buf, buf_size);
1247
1248done:
1249 kfree(buf);
1250 if (ret < 0)
1251 return ret;
1252 else
1253 return read_count;
1254}
1255
1256static const struct file_operations mem_read_fops = {
1257 .open = simple_open,
1258 .read = wcd_spi_debugfs_mem_read,
1259};
1260
1261static int wcd_spi_debugfs_init(struct spi_device *spi)
1262{
1263 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
1264 struct wcd_spi_debug_data *dbg_data = &wcd_spi->debug_data;
1265 int rc = 0;
1266
1267 dbg_data->dir = debugfs_create_dir("wcd_spi", NULL);
1268 if (IS_ERR_OR_NULL(dbg_data->dir)) {
1269 dbg_data->dir = NULL;
1270 rc = -ENODEV;
1271 goto done;
1272 }
1273
1274 debugfs_create_file("state", 0444, dbg_data->dir, spi, &state_fops);
1275 debugfs_create_u32("addr", 0644, dbg_data->dir,
1276 &dbg_data->addr);
1277 debugfs_create_u32("size", 0644, dbg_data->dir,
1278 &dbg_data->size);
1279
1280 debugfs_create_file("mem_read", 0444, dbg_data->dir,
1281 spi, &mem_read_fops);
1282done:
1283 return rc;
1284}
1285
1286
1287static const struct reg_default wcd_spi_defaults[] = {
1288 {WCD_SPI_SLAVE_SANITY, 0xDEADBEEF},
1289 {WCD_SPI_SLAVE_DEVICE_ID, 0x00500000},
1290 {WCD_SPI_SLAVE_STATUS, 0x80100000},
1291 {WCD_SPI_SLAVE_CONFIG, 0x0F200808},
1292 {WCD_SPI_SLAVE_SW_RESET, 0x00000000},
1293 {WCD_SPI_SLAVE_IRQ_STATUS, 0x00000000},
1294 {WCD_SPI_SLAVE_IRQ_EN, 0x00000000},
1295 {WCD_SPI_SLAVE_IRQ_CLR, 0x00000000},
1296 {WCD_SPI_SLAVE_IRQ_FORCE, 0x00000000},
1297 {WCD_SPI_SLAVE_TX, 0x00000000},
1298 {WCD_SPI_SLAVE_TEST_BUS_DATA, 0x00000000},
1299 {WCD_SPI_SLAVE_TEST_BUS_CTRL, 0x00000000},
1300 {WCD_SPI_SLAVE_SW_RST_IRQ, 0x00000000},
1301 {WCD_SPI_SLAVE_CHAR_CFG, 0x00000000},
1302 {WCD_SPI_SLAVE_CHAR_DATA_MOSI, 0x00000000},
1303 {WCD_SPI_SLAVE_CHAR_DATA_CS_N, 0x00000000},
1304 {WCD_SPI_SLAVE_CHAR_DATA_MISO, 0x00000000},
1305 {WCD_SPI_SLAVE_TRNS_BYTE_CNT, 0x00000000},
1306 {WCD_SPI_SLAVE_TRNS_LEN, 0x00000000},
1307 {WCD_SPI_SLAVE_FIFO_LEVEL, 0x00000000},
1308 {WCD_SPI_SLAVE_GENERICS, 0x80000000},
1309 {WCD_SPI_SLAVE_EXT_BASE_ADDR, 0x00000000},
1310};
1311
1312static bool wcd_spi_is_volatile_reg(struct device *dev,
1313 unsigned int reg)
1314{
1315 switch (reg) {
1316 case WCD_SPI_SLAVE_SANITY:
1317 case WCD_SPI_SLAVE_STATUS:
1318 case WCD_SPI_SLAVE_IRQ_STATUS:
1319 case WCD_SPI_SLAVE_TX:
1320 case WCD_SPI_SLAVE_SW_RST_IRQ:
1321 case WCD_SPI_SLAVE_TRNS_BYTE_CNT:
1322 case WCD_SPI_SLAVE_FIFO_LEVEL:
1323 case WCD_SPI_SLAVE_GENERICS:
1324 return true;
1325 }
1326
1327 return false;
1328}
1329
1330static bool wcd_spi_is_readable_reg(struct device *dev,
1331 unsigned int reg)
1332{
1333 switch (reg) {
1334 case WCD_SPI_SLAVE_SW_RESET:
1335 case WCD_SPI_SLAVE_IRQ_CLR:
1336 case WCD_SPI_SLAVE_IRQ_FORCE:
1337 return false;
1338 }
1339
1340 return true;
1341}
1342
1343static struct regmap_config wcd_spi_regmap_cfg = {
1344 .reg_bits = 8,
1345 .val_bits = 32,
1346 .cache_type = REGCACHE_RBTREE,
1347 .reg_defaults = wcd_spi_defaults,
1348 .num_reg_defaults = ARRAY_SIZE(wcd_spi_defaults),
1349 .max_register = WCD_SPI_MAX_REGISTER,
1350 .volatile_reg = wcd_spi_is_volatile_reg,
1351 .readable_reg = wcd_spi_is_readable_reg,
1352};
1353
Bhalchandra Gajarec5f43a12018-03-19 14:35:42 -07001354static int wcd_spi_add_ac_dev(struct device *dev,
1355 struct device_node *node)
1356{
1357 struct spi_device *spi = to_spi_device(dev);
1358 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
1359 struct platform_device *pdev;
1360 int ret = 0;
1361
1362 pdev = platform_device_alloc("wcd-spi-ac", -1);
1363 if (IS_ERR_OR_NULL(pdev)) {
1364 ret = PTR_ERR(pdev);
1365 dev_err(dev, "%s: pdev alloc failed, ret = %d\n",
1366 __func__, ret);
1367 return ret;
1368 }
1369
1370 pdev->dev.parent = dev;
1371 pdev->dev.of_node = node;
1372
1373 ret = platform_device_add(pdev);
1374 if (ret) {
1375 dev_err(dev, "%s: pdev add failed, ret = %d\n",
1376 __func__, ret);
1377 goto dealloc_pdev;
1378 }
1379
1380 wcd_spi->ac_dev = &pdev->dev;
1381 return 0;
1382
1383dealloc_pdev:
1384 platform_device_put(pdev);
1385 return ret;
1386}
1387
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301388static int wdsp_spi_init(struct device *dev, void *priv_data)
1389{
1390 struct spi_device *spi = to_spi_device(dev);
1391 int ret;
Bhalchandra Gajarec5f43a12018-03-19 14:35:42 -07001392 struct device_node *node;
1393
1394 for_each_child_of_node(dev->of_node, node) {
1395 if (!strcmp(node->name, "wcd_spi_ac"))
1396 wcd_spi_add_ac_dev(dev, node);
1397 }
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301398
1399 ret = wcd_spi_init(spi);
1400 if (ret < 0)
1401 dev_err(&spi->dev, "%s: Init failed, err = %d\n",
1402 __func__, ret);
1403 return ret;
1404}
1405
1406static int wdsp_spi_deinit(struct device *dev, void *priv_data)
1407{
1408 struct spi_device *spi = to_spi_device(dev);
1409 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
1410
1411 /*
1412 * Deinit means the hardware is reset. Mark the cache
1413 * as dirty here, so init will sync the cache
1414 */
1415 regcache_mark_dirty(wcd_spi->regmap);
1416
1417 return 0;
1418}
1419
1420static struct wdsp_cmpnt_ops wdsp_spi_ops = {
1421 .init = wdsp_spi_init,
1422 .deinit = wdsp_spi_deinit,
1423 .event_handler = wdsp_spi_event_handler,
1424};
1425
1426static int wcd_spi_component_bind(struct device *dev,
1427 struct device *master,
1428 void *data)
1429{
1430 struct spi_device *spi = to_spi_device(dev);
1431 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
1432 int ret = 0;
1433
1434 wcd_spi->m_dev = master;
1435 wcd_spi->m_ops = data;
1436
1437 if (wcd_spi->m_ops &&
1438 wcd_spi->m_ops->register_cmpnt_ops)
1439 ret = wcd_spi->m_ops->register_cmpnt_ops(master, dev,
1440 wcd_spi,
1441 &wdsp_spi_ops);
1442 if (ret) {
1443 dev_err(dev, "%s: register_cmpnt_ops failed, err = %d\n",
1444 __func__, ret);
1445 goto done;
1446 }
1447
1448 wcd_spi->reg_bytes = DIV_ROUND_UP(wcd_spi_regmap_cfg.reg_bits, 8);
1449 wcd_spi->val_bytes = DIV_ROUND_UP(wcd_spi_regmap_cfg.val_bits, 8);
1450
1451 wcd_spi->regmap = devm_regmap_init(&spi->dev, &wcd_spi_regmap_bus,
1452 &spi->dev, &wcd_spi_regmap_cfg);
1453 if (IS_ERR(wcd_spi->regmap)) {
1454 ret = PTR_ERR(wcd_spi->regmap);
1455 dev_err(&spi->dev, "%s: Failed to allocate regmap, err = %d\n",
1456 __func__, ret);
1457 goto done;
1458 }
1459
1460 if (wcd_spi_debugfs_init(spi))
1461 dev_err(&spi->dev, "%s: Failed debugfs init\n", __func__);
1462
1463 spi_message_init(&wcd_spi->msg1);
1464 spi_message_add_tail(&wcd_spi->xfer1, &wcd_spi->msg1);
1465
1466 spi_message_init(&wcd_spi->msg2);
1467 spi_message_add_tail(&wcd_spi->xfer2[0], &wcd_spi->msg2);
1468 spi_message_add_tail(&wcd_spi->xfer2[1], &wcd_spi->msg2);
1469
1470 /* Pre-allocate the buffers */
Xiaoyu Ye89cc8892018-05-29 17:03:55 -07001471 wcd_spi->tx_buf = dma_zalloc_coherent(&spi->dev,
1472 WCD_SPI_RW_MAX_BUF_SIZE,
1473 &wcd_spi->tx_dma, GFP_KERNEL);
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301474 if (!wcd_spi->tx_buf) {
1475 ret = -ENOMEM;
1476 goto done;
1477 }
1478
Xiaoyu Ye89cc8892018-05-29 17:03:55 -07001479 wcd_spi->rx_buf = dma_zalloc_coherent(&spi->dev,
1480 WCD_SPI_RW_MAX_BUF_SIZE,
1481 &wcd_spi->rx_dma, GFP_KERNEL);
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301482 if (!wcd_spi->rx_buf) {
Xiaoyu Ye89cc8892018-05-29 17:03:55 -07001483 dma_free_coherent(&spi->dev, WCD_SPI_RW_MAX_BUF_SIZE,
1484 wcd_spi->tx_buf, wcd_spi->tx_dma);
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301485 wcd_spi->tx_buf = NULL;
1486 ret = -ENOMEM;
1487 goto done;
1488 }
1489done:
1490 return ret;
1491}
1492
1493static void wcd_spi_component_unbind(struct device *dev,
1494 struct device *master,
1495 void *data)
1496{
1497 struct spi_device *spi = to_spi_device(dev);
1498 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
Laxminath Kasam8f7ccc22017-08-28 17:35:04 +05301499 struct wcd_spi_debug_data *dbg_data = &wcd_spi->debug_data;
1500
1501 debugfs_remove_recursive(dbg_data->dir);
1502 dbg_data->dir = NULL;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301503
1504 wcd_spi->m_dev = NULL;
1505 wcd_spi->m_ops = NULL;
1506
1507 spi_transfer_del(&wcd_spi->xfer1);
1508 spi_transfer_del(&wcd_spi->xfer2[0]);
1509 spi_transfer_del(&wcd_spi->xfer2[1]);
1510
Xiaoyu Ye89cc8892018-05-29 17:03:55 -07001511 dma_free_coherent(&spi->dev, WCD_SPI_RW_MAX_BUF_SIZE,
1512 wcd_spi->tx_buf, wcd_spi->tx_dma);
1513 dma_free_coherent(&spi->dev, WCD_SPI_RW_MAX_BUF_SIZE,
1514 wcd_spi->rx_buf, wcd_spi->rx_dma);
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301515 wcd_spi->tx_buf = NULL;
1516 wcd_spi->rx_buf = NULL;
1517}
1518
1519static const struct component_ops wcd_spi_component_ops = {
1520 .bind = wcd_spi_component_bind,
1521 .unbind = wcd_spi_component_unbind,
1522};
1523
1524static int wcd_spi_probe(struct spi_device *spi)
1525{
1526 struct wcd_spi_priv *wcd_spi;
1527 int ret = 0;
1528
1529 wcd_spi = devm_kzalloc(&spi->dev, sizeof(*wcd_spi),
1530 GFP_KERNEL);
1531 if (!wcd_spi)
1532 return -ENOMEM;
1533
1534 ret = of_property_read_u32(spi->dev.of_node,
1535 "qcom,mem-base-addr",
1536 &wcd_spi->mem_base_addr);
1537 if (ret < 0) {
1538 dev_err(&spi->dev, "%s: Missing %s DT entry",
1539 __func__, "qcom,mem-base-addr");
1540 goto err_ret;
1541 }
1542
1543 dev_dbg(&spi->dev,
1544 "%s: mem_base_addr 0x%x\n", __func__, wcd_spi->mem_base_addr);
1545
1546 mutex_init(&wcd_spi->clk_mutex);
1547 mutex_init(&wcd_spi->xfer_mutex);
1548 INIT_DELAYED_WORK(&wcd_spi->clk_dwork, wcd_spi_clk_work);
1549 init_completion(&wcd_spi->resume_comp);
Xiaoyu Ye89cc8892018-05-29 17:03:55 -07001550 arch_setup_dma_ops(&spi->dev, 0, 0, NULL, true);
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301551
1552 wcd_spi->spi = spi;
1553 spi_set_drvdata(spi, wcd_spi);
1554
1555 ret = component_add(&spi->dev, &wcd_spi_component_ops);
1556 if (ret) {
1557 dev_err(&spi->dev, "%s: component_add failed err = %d\n",
1558 __func__, ret);
1559 goto err_component_add;
1560 }
1561
1562 return ret;
1563
1564err_component_add:
1565 mutex_destroy(&wcd_spi->clk_mutex);
1566 mutex_destroy(&wcd_spi->xfer_mutex);
1567err_ret:
1568 devm_kfree(&spi->dev, wcd_spi);
1569 spi_set_drvdata(spi, NULL);
1570 return ret;
1571}
1572
1573static int wcd_spi_remove(struct spi_device *spi)
1574{
1575 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
1576
1577 component_del(&spi->dev, &wcd_spi_component_ops);
1578
1579 mutex_destroy(&wcd_spi->clk_mutex);
1580 mutex_destroy(&wcd_spi->xfer_mutex);
1581
1582 devm_kfree(&spi->dev, wcd_spi);
1583 spi_set_drvdata(spi, NULL);
1584
1585 return 0;
1586}
1587
1588#ifdef CONFIG_PM
1589static int wcd_spi_suspend(struct device *dev)
1590{
1591 struct spi_device *spi = to_spi_device(dev);
1592 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
1593 int rc = 0;
1594
1595 WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
1596 if (!wcd_spi_can_suspend(wcd_spi)) {
1597 rc = -EBUSY;
1598 goto done;
1599 }
1600
1601 /*
1602 * If we are here, it is okay to let the suspend go
1603 * through for this driver. But, still need to notify
1604 * the master to make sure all other components can suspend
1605 * as well.
1606 */
1607 if (wcd_spi->m_dev && wcd_spi->m_ops &&
1608 wcd_spi->m_ops->suspend) {
1609 WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
1610 rc = wcd_spi->m_ops->suspend(wcd_spi->m_dev);
1611 WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
1612 }
1613
1614 if (rc == 0)
1615 set_bit(WCD_SPI_IS_SUSPENDED, &wcd_spi->status_mask);
1616 else
1617 dev_dbg(&spi->dev, "%s: cannot suspend, err = %d\n",
1618 __func__, rc);
1619done:
1620 WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
1621 return rc;
1622}
1623
1624static int wcd_spi_resume(struct device *dev)
1625{
1626 struct spi_device *spi = to_spi_device(dev);
1627 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
1628
1629 WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
1630 clear_bit(WCD_SPI_IS_SUSPENDED, &wcd_spi->status_mask);
1631 complete(&wcd_spi->resume_comp);
1632 WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
1633
1634 return 0;
1635}
1636
1637static const struct dev_pm_ops wcd_spi_pm_ops = {
1638 .suspend = wcd_spi_suspend,
1639 .resume = wcd_spi_resume,
1640};
1641#endif
1642
1643static const struct of_device_id wcd_spi_of_match[] = {
1644 { .compatible = "qcom,wcd-spi-v2", },
1645 { }
1646};
1647MODULE_DEVICE_TABLE(of, wcd_spi_of_match);
1648
1649static struct spi_driver wcd_spi_driver = {
1650 .driver = {
1651 .name = "wcd-spi-v2",
1652 .of_match_table = wcd_spi_of_match,
1653#ifdef CONFIG_PM
1654 .pm = &wcd_spi_pm_ops,
1655#endif
1656 },
1657 .probe = wcd_spi_probe,
1658 .remove = wcd_spi_remove,
1659};
1660
1661module_spi_driver(wcd_spi_driver);
1662
1663MODULE_DESCRIPTION("WCD SPI driver");
1664MODULE_LICENSE("GPL v2");