blob: bb9333485e3bcf6c76f1a8440f30cad07460aea8 [file] [log] [blame]
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301/*
Bhalchandra Gajareb8306ec2018-01-08 18:41:02 -08002 * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05303 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/init.h>
15#include <linux/module.h>
16#include <linux/of.h>
17#include <linux/debugfs.h>
18#include <linux/delay.h>
19#include <linux/bitops.h>
20#include <linux/spi/spi.h>
21#include <linux/regmap.h>
22#include <linux/component.h>
23#include <linux/ratelimit.h>
24#include <sound/wcd-dsp-mgr.h>
25#include <sound/wcd-spi.h>
26#include "wcd-spi-registers.h"
27
28/* Byte manipulations */
29#define SHIFT_1_BYTES (8)
30#define SHIFT_2_BYTES (16)
31#define SHIFT_3_BYTES (24)
32
33/* Command opcodes */
34#define WCD_SPI_CMD_NOP (0x00)
35#define WCD_SPI_CMD_WREN (0x06)
36#define WCD_SPI_CMD_CLKREQ (0xDA)
37#define WCD_SPI_CMD_RDSR (0x05)
38#define WCD_SPI_CMD_IRR (0x81)
39#define WCD_SPI_CMD_IRW (0x82)
40#define WCD_SPI_CMD_MIOR (0x83)
41#define WCD_SPI_CMD_FREAD (0x0B)
42#define WCD_SPI_CMD_MIOW (0x02)
43#define WCD_SPI_WRITE_FRAME_OPCODE \
44 (WCD_SPI_CMD_MIOW << SHIFT_3_BYTES)
45#define WCD_SPI_READ_FRAME_OPCODE \
46 (WCD_SPI_CMD_MIOR << SHIFT_3_BYTES)
47#define WCD_SPI_FREAD_FRAME_OPCODE \
48 (WCD_SPI_CMD_FREAD << SHIFT_3_BYTES)
49
50/* Command lengths */
51#define WCD_SPI_OPCODE_LEN (0x01)
52#define WCD_SPI_CMD_NOP_LEN (0x01)
53#define WCD_SPI_CMD_WREN_LEN (0x01)
54#define WCD_SPI_CMD_CLKREQ_LEN (0x04)
55#define WCD_SPI_CMD_IRR_LEN (0x04)
56#define WCD_SPI_CMD_IRW_LEN (0x06)
57#define WCD_SPI_WRITE_SINGLE_LEN (0x08)
58#define WCD_SPI_READ_SINGLE_LEN (0x13)
59#define WCD_SPI_CMD_FREAD_LEN (0x13)
60
61/* Command delays */
62#define WCD_SPI_CLKREQ_DELAY_USECS (500)
63#define WCD_SPI_CLK_OFF_TIMER_MS (500)
64#define WCD_SPI_RESUME_TIMEOUT_MS 100
65
66/* Command masks */
67#define WCD_CMD_ADDR_MASK \
68 (0xFF | \
69 (0xFF << SHIFT_1_BYTES) | \
70 (0xFF << SHIFT_2_BYTES))
71
72/* Clock ctrl request related */
73#define WCD_SPI_CLK_ENABLE true
74#define WCD_SPI_CLK_DISABLE false
75#define WCD_SPI_CLK_FLAG_DELAYED (1 << 0)
76#define WCD_SPI_CLK_FLAG_IMMEDIATE (1 << 1)
77
78/* Internal addresses */
79#define WCD_SPI_ADDR_IPC_CTL_HOST (0x012014)
80
81/* Word sizes and min/max lengths */
82#define WCD_SPI_WORD_BYTE_CNT (4)
83#define WCD_SPI_RW_MULTI_MIN_LEN (16)
84
85/* Max size is 32 bytes less than 64Kbytes */
86#define WCD_SPI_RW_MULTI_MAX_LEN ((64 * 1024) - 32)
87
88/*
89 * Max size for the pre-allocated buffers is the max
90 * possible read/write length + 32 bytes for the SPI
91 * read/write command header itself.
92 */
93#define WCD_SPI_RW_MAX_BUF_SIZE (WCD_SPI_RW_MULTI_MAX_LEN + 32)
94
95/* Alignment requirements */
96#define WCD_SPI_RW_MIN_ALIGN WCD_SPI_WORD_BYTE_CNT
97#define WCD_SPI_RW_MULTI_ALIGN (16)
98
99/* Status mask bits */
100#define WCD_SPI_CLK_STATE_ENABLED BIT(0)
101#define WCD_SPI_IS_SUSPENDED BIT(1)
102
103/* Locking related */
104#define WCD_SPI_MUTEX_LOCK(spi, lock) \
105{ \
106 dev_vdbg(&spi->dev, "%s: mutex_lock(%s)\n", \
107 __func__, __stringify_1(lock)); \
108 mutex_lock(&lock); \
109}
110
111#define WCD_SPI_MUTEX_UNLOCK(spi, lock) \
112{ \
113 dev_vdbg(&spi->dev, "%s: mutex_unlock(%s)\n", \
114 __func__, __stringify_1(lock)); \
115 mutex_unlock(&lock); \
116}
117
118struct wcd_spi_debug_data {
119 struct dentry *dir;
120 u32 addr;
121 u32 size;
122};
123
124struct wcd_spi_priv {
125 struct spi_device *spi;
126 u32 mem_base_addr;
127
128 struct regmap *regmap;
129
130 /* Message for single transfer */
131 struct spi_message msg1;
132 struct spi_transfer xfer1;
133
134 /* Message for two transfers */
135 struct spi_message msg2;
136 struct spi_transfer xfer2[2];
137
138 /* Register access related */
139 u32 reg_bytes;
140 u32 val_bytes;
141
142 /* Clock requests related */
143 struct mutex clk_mutex;
144 int clk_users;
145 unsigned long status_mask;
146 struct delayed_work clk_dwork;
147
148 /* Transaction related */
149 struct mutex xfer_mutex;
150
151 struct device *m_dev;
152 struct wdsp_mgr_ops *m_ops;
153
154 /* Debugfs related information */
155 struct wcd_spi_debug_data debug_data;
156
157 /* Completion object to indicate system resume completion */
158 struct completion resume_comp;
159
160 /* Buffers to hold memory used for transfers */
161 void *tx_buf;
162 void *rx_buf;
163};
164
165enum xfer_request {
166 WCD_SPI_XFER_WRITE,
167 WCD_SPI_XFER_READ,
168};
169
170
171static char *wcd_spi_xfer_req_str(enum xfer_request req)
172{
173 if (req == WCD_SPI_XFER_WRITE)
174 return "xfer_write";
175 else if (req == WCD_SPI_XFER_READ)
176 return "xfer_read";
177 else
178 return "xfer_invalid";
179}
180
181static void wcd_spi_reinit_xfer(struct spi_transfer *xfer)
182{
183 xfer->tx_buf = NULL;
184 xfer->rx_buf = NULL;
185 xfer->delay_usecs = 0;
186 xfer->len = 0;
187}
188
189static bool wcd_spi_is_suspended(struct wcd_spi_priv *wcd_spi)
190{
191 return test_bit(WCD_SPI_IS_SUSPENDED, &wcd_spi->status_mask);
192}
193
194static bool wcd_spi_can_suspend(struct wcd_spi_priv *wcd_spi)
195{
196 struct spi_device *spi = wcd_spi->spi;
197
198 if (wcd_spi->clk_users > 0 ||
199 test_bit(WCD_SPI_CLK_STATE_ENABLED, &wcd_spi->status_mask)) {
200 dev_err(&spi->dev, "%s: cannot suspend, clk_users = %d\n",
201 __func__, wcd_spi->clk_users);
202 return false;
203 }
204
205 return true;
206}
207
208static int wcd_spi_wait_for_resume(struct wcd_spi_priv *wcd_spi)
209{
210 struct spi_device *spi = wcd_spi->spi;
211 int rc = 0;
212
213 WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
214 /* If the system is already in resumed state, return right away */
215 if (!wcd_spi_is_suspended(wcd_spi))
216 goto done;
217
218 /* If suspended then wait for resume to happen */
219 reinit_completion(&wcd_spi->resume_comp);
220 WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
221 rc = wait_for_completion_timeout(&wcd_spi->resume_comp,
222 msecs_to_jiffies(WCD_SPI_RESUME_TIMEOUT_MS));
223 WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
224 if (rc == 0) {
225 dev_err(&spi->dev, "%s: failed to resume in %u msec\n",
226 __func__, WCD_SPI_RESUME_TIMEOUT_MS);
227 rc = -EIO;
228 goto done;
229 }
230
231 dev_dbg(&spi->dev, "%s: resume successful\n", __func__);
232 rc = 0;
233done:
234 WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
235 return rc;
236}
237
238static int wcd_spi_read_single(struct spi_device *spi,
239 u32 remote_addr, u32 *val)
240{
241 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
242 struct spi_transfer *tx_xfer = &wcd_spi->xfer2[0];
243 struct spi_transfer *rx_xfer = &wcd_spi->xfer2[1];
244 u8 *tx_buf = wcd_spi->tx_buf;
Xiaoyu Yeb9ff33c2018-06-12 17:28:00 -0700245 u8 *rx_buf = wcd_spi->rx_buf;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530246 u32 frame = 0;
247 int ret;
248
249 dev_dbg(&spi->dev, "%s: remote_addr = 0x%x\n",
250 __func__, remote_addr);
251
252 if (!tx_buf) {
253 dev_err(&spi->dev, "%s: tx_buf not allocated\n",
254 __func__);
255 return -ENOMEM;
256 }
257
258 frame |= WCD_SPI_READ_FRAME_OPCODE;
259 frame |= remote_addr & WCD_CMD_ADDR_MASK;
260
261 wcd_spi_reinit_xfer(tx_xfer);
262 frame = cpu_to_be32(frame);
263 memcpy(tx_buf, &frame, sizeof(frame));
264 tx_xfer->tx_buf = tx_buf;
265 tx_xfer->len = WCD_SPI_READ_SINGLE_LEN;
266
267 wcd_spi_reinit_xfer(rx_xfer);
Xiaoyu Yeb9ff33c2018-06-12 17:28:00 -0700268 rx_xfer->rx_buf = rx_buf;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530269 rx_xfer->len = sizeof(*val);
270
271 ret = spi_sync(spi, &wcd_spi->msg2);
Xiaoyu Yeb9ff33c2018-06-12 17:28:00 -0700272 if (ret)
273 dev_err(&spi->dev, "%s: spi_sync failed, err %d\n",
274 __func__, ret);
275 else
276 memcpy((u8*) val, rx_buf, sizeof(*val));
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530277
278 return ret;
279}
280
281static int wcd_spi_read_multi(struct spi_device *spi,
282 u32 remote_addr, u8 *data,
283 size_t len)
284{
285 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
286 struct spi_transfer *xfer = &wcd_spi->xfer1;
287 u8 *tx_buf = wcd_spi->tx_buf;
288 u8 *rx_buf = wcd_spi->rx_buf;
289 u32 frame = 0;
290 int ret;
291
292 dev_dbg(&spi->dev, "%s: addr 0x%x, len = %zd\n",
293 __func__, remote_addr, len);
294
295 frame |= WCD_SPI_FREAD_FRAME_OPCODE;
296 frame |= remote_addr & WCD_CMD_ADDR_MASK;
297
298 if (!tx_buf || !rx_buf) {
299 dev_err(&spi->dev, "%s: %s not allocated\n", __func__,
300 (!tx_buf) ? "tx_buf" : "rx_buf");
301 return -ENOMEM;
302 }
303
304 wcd_spi_reinit_xfer(xfer);
305 frame = cpu_to_be32(frame);
306 memcpy(tx_buf, &frame, sizeof(frame));
307 xfer->tx_buf = tx_buf;
308 xfer->rx_buf = rx_buf;
309 xfer->len = WCD_SPI_CMD_FREAD_LEN + len;
310
311 ret = spi_sync(spi, &wcd_spi->msg1);
312 if (ret) {
313 dev_err(&spi->dev, "%s: failed, err = %d\n",
314 __func__, ret);
315 goto done;
316 }
317
318 memcpy(data, rx_buf + WCD_SPI_CMD_FREAD_LEN, len);
319done:
320 return ret;
321}
322
323static int wcd_spi_write_single(struct spi_device *spi,
324 u32 remote_addr, u32 val)
325{
326 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
327 struct spi_transfer *xfer = &wcd_spi->xfer1;
Xiaoyu Yeb9ff33c2018-06-12 17:28:00 -0700328 u8 *tx_buf = wcd_spi->tx_buf;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530329 u32 frame = 0;
330
331 dev_dbg(&spi->dev, "%s: remote_addr = 0x%x, val = 0x%x\n",
332 __func__, remote_addr, val);
333
Xiaoyu Yeb9ff33c2018-06-12 17:28:00 -0700334 memset(tx_buf, 0, WCD_SPI_WRITE_SINGLE_LEN);
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530335 frame |= WCD_SPI_WRITE_FRAME_OPCODE;
336 frame |= (remote_addr & WCD_CMD_ADDR_MASK);
337
338 frame = cpu_to_be32(frame);
Xiaoyu Yeb9ff33c2018-06-12 17:28:00 -0700339 memcpy(tx_buf, &frame, sizeof(frame));
340 memcpy(tx_buf + sizeof(frame), &val, sizeof(val));
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530341
342 wcd_spi_reinit_xfer(xfer);
Xiaoyu Yeb9ff33c2018-06-12 17:28:00 -0700343 xfer->tx_buf = tx_buf;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530344 xfer->len = WCD_SPI_WRITE_SINGLE_LEN;
345
346 return spi_sync(spi, &wcd_spi->msg1);
347}
348
349static int wcd_spi_write_multi(struct spi_device *spi,
350 u32 remote_addr, u8 *data,
351 size_t len)
352{
353 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
354 struct spi_transfer *xfer = &wcd_spi->xfer1;
355 u32 frame = 0;
356 u8 *tx_buf = wcd_spi->tx_buf;
357 int xfer_len, ret;
358
359 dev_dbg(&spi->dev, "%s: addr = 0x%x len = %zd\n",
360 __func__, remote_addr, len);
361
362 frame |= WCD_SPI_WRITE_FRAME_OPCODE;
363 frame |= (remote_addr & WCD_CMD_ADDR_MASK);
364
365 frame = cpu_to_be32(frame);
366 xfer_len = len + sizeof(frame);
367
368 if (!tx_buf) {
369 dev_err(&spi->dev, "%s: tx_buf not allocated\n",
370 __func__);
371 return -ENOMEM;
372 }
373
374 memcpy(tx_buf, &frame, sizeof(frame));
375 memcpy(tx_buf + sizeof(frame), data, len);
376
377 wcd_spi_reinit_xfer(xfer);
378 xfer->tx_buf = tx_buf;
379 xfer->len = xfer_len;
380
381 ret = spi_sync(spi, &wcd_spi->msg1);
382 if (ret < 0)
383 dev_err(&spi->dev,
384 "%s: Failed, addr = 0x%x, len = %zd\n",
385 __func__, remote_addr, len);
386 return ret;
387}
388
389static int wcd_spi_transfer_split(struct spi_device *spi,
390 struct wcd_spi_msg *data_msg,
391 enum xfer_request xfer_req)
392{
393 u32 addr = data_msg->remote_addr;
394 u8 *data = data_msg->data;
395 int remain_size = data_msg->len;
396 int to_xfer, loop_cnt, ret = 0;
397
398 /* Perform single writes until multi word alignment is met */
399 loop_cnt = 1;
400 while (remain_size &&
401 !IS_ALIGNED(addr, WCD_SPI_RW_MULTI_ALIGN)) {
402 if (xfer_req == WCD_SPI_XFER_WRITE)
403 ret = wcd_spi_write_single(spi, addr,
404 (*(u32 *)data));
405 else
406 ret = wcd_spi_read_single(spi, addr,
407 (u32 *)data);
408 if (ret < 0) {
409 dev_err(&spi->dev,
410 "%s: %s fail iter(%d) start-word addr (0x%x)\n",
411 __func__, wcd_spi_xfer_req_str(xfer_req),
412 loop_cnt, addr);
413 goto done;
414 }
415
416 addr += WCD_SPI_WORD_BYTE_CNT;
417 data += WCD_SPI_WORD_BYTE_CNT;
418 remain_size -= WCD_SPI_WORD_BYTE_CNT;
419 loop_cnt++;
420 }
421
422 /* Perform multi writes for max allowed multi writes */
423 loop_cnt = 1;
424 while (remain_size >= WCD_SPI_RW_MULTI_MAX_LEN) {
425 if (xfer_req == WCD_SPI_XFER_WRITE)
426 ret = wcd_spi_write_multi(spi, addr, data,
427 WCD_SPI_RW_MULTI_MAX_LEN);
428 else
429 ret = wcd_spi_read_multi(spi, addr, data,
430 WCD_SPI_RW_MULTI_MAX_LEN);
431 if (ret < 0) {
432 dev_err(&spi->dev,
433 "%s: %s fail iter(%d) max-write addr (0x%x)\n",
434 __func__, wcd_spi_xfer_req_str(xfer_req),
435 loop_cnt, addr);
436 goto done;
437 }
438
439 addr += WCD_SPI_RW_MULTI_MAX_LEN;
440 data += WCD_SPI_RW_MULTI_MAX_LEN;
441 remain_size -= WCD_SPI_RW_MULTI_MAX_LEN;
442 loop_cnt++;
443 }
444
445 /*
446 * Perform write for max possible data that is multiple
447 * of the minimum size for multi-write commands.
448 */
449 to_xfer = remain_size - (remain_size % WCD_SPI_RW_MULTI_MIN_LEN);
450 if (remain_size >= WCD_SPI_RW_MULTI_MIN_LEN &&
451 to_xfer > 0) {
452 if (xfer_req == WCD_SPI_XFER_WRITE)
453 ret = wcd_spi_write_multi(spi, addr, data, to_xfer);
454 else
455 ret = wcd_spi_read_multi(spi, addr, data, to_xfer);
456 if (ret < 0) {
457 dev_err(&spi->dev,
458 "%s: %s fail write addr (0x%x), size (0x%x)\n",
459 __func__, wcd_spi_xfer_req_str(xfer_req),
460 addr, to_xfer);
461 goto done;
462 }
463
464 addr += to_xfer;
465 data += to_xfer;
466 remain_size -= to_xfer;
467 }
468
469 /* Perform single writes for the last remaining data */
470 loop_cnt = 1;
471 while (remain_size > 0) {
472 if (xfer_req == WCD_SPI_XFER_WRITE)
473 ret = wcd_spi_write_single(spi, addr, (*((u32 *)data)));
474 else
475 ret = wcd_spi_read_single(spi, addr, (u32 *) data);
476 if (ret < 0) {
477 dev_err(&spi->dev,
478 "%s: %s fail iter(%d) end-write addr (0x%x)\n",
479 __func__, wcd_spi_xfer_req_str(xfer_req),
480 loop_cnt, addr);
481 goto done;
482 }
483
484 addr += WCD_SPI_WORD_BYTE_CNT;
485 data += WCD_SPI_WORD_BYTE_CNT;
486 remain_size -= WCD_SPI_WORD_BYTE_CNT;
487 loop_cnt++;
488 }
489
490done:
491 return ret;
492}
493
494static int wcd_spi_cmd_nop(struct spi_device *spi)
495{
Xiaoyu Yeb9ff33c2018-06-12 17:28:00 -0700496 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
497 u8 *tx_buf = wcd_spi->tx_buf;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530498
Xiaoyu Yeb9ff33c2018-06-12 17:28:00 -0700499 tx_buf[0] = WCD_SPI_CMD_NOP;
500
501 return spi_write(spi, tx_buf, WCD_SPI_CMD_NOP_LEN);
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530502}
503
504static int wcd_spi_cmd_clkreq(struct spi_device *spi)
505{
506 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
507 struct spi_transfer *xfer = &wcd_spi->xfer1;
Xiaoyu Yeb9ff33c2018-06-12 17:28:00 -0700508 u8 *tx_buf = wcd_spi->tx_buf;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530509 u8 cmd[WCD_SPI_CMD_CLKREQ_LEN] = {
510 WCD_SPI_CMD_CLKREQ,
511 0xBA, 0x80, 0x00};
512
Xiaoyu Yeb9ff33c2018-06-12 17:28:00 -0700513 memcpy(tx_buf, cmd, WCD_SPI_CMD_CLKREQ_LEN);
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530514 wcd_spi_reinit_xfer(xfer);
Xiaoyu Yeb9ff33c2018-06-12 17:28:00 -0700515 xfer->tx_buf = tx_buf;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530516 xfer->len = WCD_SPI_CMD_CLKREQ_LEN;
517 xfer->delay_usecs = WCD_SPI_CLKREQ_DELAY_USECS;
518
519 return spi_sync(spi, &wcd_spi->msg1);
520}
521
522static int wcd_spi_cmd_wr_en(struct spi_device *spi)
523{
Xiaoyu Yeb9ff33c2018-06-12 17:28:00 -0700524 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
525 u8 *tx_buf = wcd_spi->tx_buf;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530526
Xiaoyu Yeb9ff33c2018-06-12 17:28:00 -0700527 tx_buf[0] = WCD_SPI_CMD_WREN;
528
529 return spi_write(spi, tx_buf, WCD_SPI_CMD_WREN_LEN);
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530530}
531
532static int wcd_spi_cmd_rdsr(struct spi_device *spi,
533 u32 *rdsr_status)
534{
535 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
536 struct spi_transfer *tx_xfer = &wcd_spi->xfer2[0];
537 struct spi_transfer *rx_xfer = &wcd_spi->xfer2[1];
Xiaoyu Yeb9ff33c2018-06-12 17:28:00 -0700538 u8 *tx_buf = wcd_spi->tx_buf;
539 u8 *rx_buf = wcd_spi->rx_buf;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530540 int ret;
541
Xiaoyu Yeb9ff33c2018-06-12 17:28:00 -0700542 tx_buf[0] = WCD_SPI_CMD_RDSR;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530543 wcd_spi_reinit_xfer(tx_xfer);
Xiaoyu Yeb9ff33c2018-06-12 17:28:00 -0700544 tx_xfer->tx_buf = tx_buf;
545 tx_xfer->len = WCD_SPI_OPCODE_LEN;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530546
Xiaoyu Yeb9ff33c2018-06-12 17:28:00 -0700547 memset(rx_buf, 0, sizeof(*rdsr_status));
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530548 wcd_spi_reinit_xfer(rx_xfer);
Xiaoyu Yeb9ff33c2018-06-12 17:28:00 -0700549 rx_xfer->rx_buf = rx_buf;
550 rx_xfer->len = sizeof(*rdsr_status);
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530551
552 ret = spi_sync(spi, &wcd_spi->msg2);
553 if (ret < 0) {
554 dev_err(&spi->dev, "%s: RDSR failed, err = %d\n",
555 __func__, ret);
556 goto done;
557 }
558
Xiaoyu Yeb9ff33c2018-06-12 17:28:00 -0700559 *rdsr_status = be32_to_cpu(*((u32*)rx_buf));
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530560
561 dev_dbg(&spi->dev, "%s: RDSR success, value = 0x%x\n",
Xiaoyu Yeb9ff33c2018-06-12 17:28:00 -0700562 __func__, *rdsr_status);
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530563done:
564 return ret;
565}
566
567static int wcd_spi_clk_enable(struct spi_device *spi)
568{
569 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
570 int ret;
571 u32 rd_status = 0;
572
573 ret = wcd_spi_cmd_nop(spi);
574 if (ret < 0) {
575 dev_err(&spi->dev, "%s: NOP1 failed, err = %d\n",
576 __func__, ret);
577 goto done;
578 }
579
580 ret = wcd_spi_cmd_clkreq(spi);
581 if (ret < 0) {
582 dev_err(&spi->dev, "%s: CLK_REQ failed, err = %d\n",
583 __func__, ret);
584 goto done;
585 }
586
587 ret = wcd_spi_cmd_nop(spi);
588 if (ret < 0) {
589 dev_err(&spi->dev, "%s: NOP2 failed, err = %d\n",
590 __func__, ret);
591 goto done;
592 }
593 wcd_spi_cmd_rdsr(spi, &rd_status);
594 /*
595 * Read status zero means reads are not
596 * happenning on the bus, possibly because
597 * clock request failed.
598 */
599 if (rd_status) {
600 set_bit(WCD_SPI_CLK_STATE_ENABLED,
601 &wcd_spi->status_mask);
602 } else {
603 dev_err(&spi->dev, "%s: RDSR status is zero\n",
604 __func__);
605 ret = -EIO;
606 }
607done:
608 return ret;
609}
610
611static int wcd_spi_clk_disable(struct spi_device *spi)
612{
613 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
614 int ret;
615
616 ret = wcd_spi_write_single(spi, WCD_SPI_ADDR_IPC_CTL_HOST, 0x01);
617 if (ret < 0)
618 dev_err(&spi->dev, "%s: Failed, err = %d\n",
619 __func__, ret);
Bhalchandra Gajareb8306ec2018-01-08 18:41:02 -0800620 /*
621 * clear this bit even if clock disable failed
622 * as the source clocks might get turned off.
623 */
624 clear_bit(WCD_SPI_CLK_STATE_ENABLED, &wcd_spi->status_mask);
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530625
626 return ret;
627}
628
629static int wcd_spi_clk_ctrl(struct spi_device *spi,
630 bool request, u32 flags)
631{
632 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
633 int ret = 0;
634 const char *delay_str;
635
636 delay_str = (flags == WCD_SPI_CLK_FLAG_DELAYED) ?
637 "delayed" : "immediate";
638
639 WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
640
641 /* Reject any unbalanced disable request */
642 if (wcd_spi->clk_users < 0 ||
643 (!request && wcd_spi->clk_users == 0)) {
644 dev_err(&spi->dev, "%s: Unbalanced clk_users %d for %s\n",
645 __func__, wcd_spi->clk_users,
646 request ? "enable" : "disable");
647 ret = -EINVAL;
648
649 /* Reset the clk_users to 0 */
650 wcd_spi->clk_users = 0;
651
652 goto done;
653 }
654
655 if (request == WCD_SPI_CLK_ENABLE) {
656 /*
657 * If the SPI bus is suspended, then return error
658 * as the transaction cannot be completed.
659 */
660 if (wcd_spi_is_suspended(wcd_spi)) {
661 dev_err(&spi->dev,
662 "%s: SPI suspended, cannot enable clk\n",
663 __func__);
664 ret = -EIO;
665 goto done;
666 }
667
668 /* Cancel the disable clk work */
669 WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
670 cancel_delayed_work_sync(&wcd_spi->clk_dwork);
671 WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
672
673 wcd_spi->clk_users++;
674
675 /*
676 * If clk state is already set,
677 * then clk wasnt really disabled
678 */
679 if (test_bit(WCD_SPI_CLK_STATE_ENABLED, &wcd_spi->status_mask))
680 goto done;
681 else if (wcd_spi->clk_users == 1)
682 ret = wcd_spi_clk_enable(spi);
683
684 } else {
685 wcd_spi->clk_users--;
686
687 /* Clock is still voted for */
688 if (wcd_spi->clk_users > 0)
689 goto done;
690
691 /*
692 * If we are here, clk_users must be 0 and needs
693 * to be disabled. Call the disable based on the
694 * flags.
695 */
696 if (flags == WCD_SPI_CLK_FLAG_DELAYED) {
697 schedule_delayed_work(&wcd_spi->clk_dwork,
698 msecs_to_jiffies(WCD_SPI_CLK_OFF_TIMER_MS));
699 } else {
700 ret = wcd_spi_clk_disable(spi);
701 if (ret < 0)
702 dev_err(&spi->dev,
703 "%s: Failed to disable clk err = %d\n",
704 __func__, ret);
705 }
706 }
707
708done:
709 dev_dbg(&spi->dev, "%s: updated clk_users = %d, request_%s %s\n",
710 __func__, wcd_spi->clk_users, request ? "enable" : "disable",
711 request ? "" : delay_str);
712 WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
713
714 return ret;
715}
716
717static int wcd_spi_init(struct spi_device *spi)
718{
719 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
720 int ret;
721
722 ret = wcd_spi_clk_ctrl(spi, WCD_SPI_CLK_ENABLE,
723 WCD_SPI_CLK_FLAG_IMMEDIATE);
724 if (ret < 0)
725 goto done;
726
727 ret = wcd_spi_cmd_wr_en(spi);
728 if (ret < 0)
729 goto err_wr_en;
730
731 /*
732 * In case spi_init is called after component deinit,
733 * it is possible hardware register state is also reset.
734 * Sync the regcache here so hardware state is updated
735 * to reflect the cache.
736 */
737 regcache_sync(wcd_spi->regmap);
738
739 regmap_write(wcd_spi->regmap, WCD_SPI_SLAVE_CONFIG,
740 0x0F3D0800);
741
742 /* Write the MTU to max allowed size */
743 regmap_update_bits(wcd_spi->regmap,
744 WCD_SPI_SLAVE_TRNS_LEN,
745 0xFFFF0000, 0xFFFF0000);
746err_wr_en:
747 wcd_spi_clk_ctrl(spi, WCD_SPI_CLK_DISABLE,
748 WCD_SPI_CLK_FLAG_IMMEDIATE);
749done:
750 return ret;
751}
752
753static void wcd_spi_clk_work(struct work_struct *work)
754{
755 struct delayed_work *dwork;
756 struct wcd_spi_priv *wcd_spi;
757 struct spi_device *spi;
758 int ret;
759
760 dwork = to_delayed_work(work);
761 wcd_spi = container_of(dwork, struct wcd_spi_priv, clk_dwork);
762 spi = wcd_spi->spi;
763
764 WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
765 ret = wcd_spi_clk_disable(spi);
766 if (ret < 0)
767 dev_err(&spi->dev,
768 "%s: Failed to disable clk, err = %d\n",
769 __func__, ret);
770 WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
771}
772
773static int __wcd_spi_data_xfer(struct spi_device *spi,
774 struct wcd_spi_msg *msg,
775 enum xfer_request xfer_req)
776{
777 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
778 int ret;
779
780 /* Check for minimum alignment requirements */
781 if (!IS_ALIGNED(msg->remote_addr, WCD_SPI_RW_MIN_ALIGN)) {
782 dev_err(&spi->dev,
783 "%s addr 0x%x is not aligned to 0x%x\n",
784 __func__, msg->remote_addr, WCD_SPI_RW_MIN_ALIGN);
785 return -EINVAL;
786 } else if (msg->len % WCD_SPI_WORD_BYTE_CNT) {
787 dev_err(&spi->dev,
788 "%s len 0x%zx is not multiple of %d\n",
789 __func__, msg->len, WCD_SPI_WORD_BYTE_CNT);
790 return -EINVAL;
791 }
792
793 WCD_SPI_MUTEX_LOCK(spi, wcd_spi->xfer_mutex);
794 if (msg->len == WCD_SPI_WORD_BYTE_CNT) {
795 if (xfer_req == WCD_SPI_XFER_WRITE)
796 ret = wcd_spi_write_single(spi, msg->remote_addr,
797 (*((u32 *)msg->data)));
798 else
799 ret = wcd_spi_read_single(spi, msg->remote_addr,
800 (u32 *) msg->data);
801 } else {
802 ret = wcd_spi_transfer_split(spi, msg, xfer_req);
803 }
804 WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->xfer_mutex);
805
806 return ret;
807}
808
809static int wcd_spi_data_xfer(struct spi_device *spi,
810 struct wcd_spi_msg *msg,
811 enum xfer_request req)
812{
813 int ret, ret1;
814
815 if (msg->len <= 0) {
816 dev_err(&spi->dev, "%s: Invalid size %zd\n",
817 __func__, msg->len);
818 return -EINVAL;
819 }
820
821 /* Request for clock */
822 ret = wcd_spi_clk_ctrl(spi, WCD_SPI_CLK_ENABLE,
823 WCD_SPI_CLK_FLAG_IMMEDIATE);
824 if (ret < 0) {
825 dev_err(&spi->dev, "%s: clk enable failed %d\n",
826 __func__, ret);
827 goto done;
828 }
829
830 /* Perform the transaction */
831 ret = __wcd_spi_data_xfer(spi, msg, req);
832 if (ret < 0)
833 dev_err(&spi->dev,
834 "%s: Failed %s, addr = 0x%x, size = 0x%zx, err = %d\n",
835 __func__, wcd_spi_xfer_req_str(req),
836 msg->remote_addr, msg->len, ret);
837
838 /* Release the clock even if xfer failed */
839 ret1 = wcd_spi_clk_ctrl(spi, WCD_SPI_CLK_DISABLE,
840 WCD_SPI_CLK_FLAG_DELAYED);
841 if (ret1 < 0)
842 dev_err(&spi->dev, "%s: clk disable failed %d\n",
843 __func__, ret1);
844done:
845 return ret;
846}
847
848/*
849 * wcd_spi_data_write: Write data to WCD SPI
850 * @spi: spi_device struct
851 * @msg: msg that needs to be written to WCD
852 *
853 * This API writes length of data to address specified. These details
854 * about the write are encapsulated in @msg. Write size should be multiple
855 * of 4 bytes and write address should be 4-byte aligned.
856 */
857static int wcd_spi_data_write(struct spi_device *spi,
858 struct wcd_spi_msg *msg)
859{
860 if (!spi || !msg) {
861 pr_err("%s: Invalid %s\n", __func__,
862 (!spi) ? "spi device" : "msg");
863 return -EINVAL;
864 }
865
866 dev_dbg_ratelimited(&spi->dev, "%s: addr = 0x%x, len = %zu\n",
867 __func__, msg->remote_addr, msg->len);
868 return wcd_spi_data_xfer(spi, msg, WCD_SPI_XFER_WRITE);
869}
870
871/*
872 * wcd_spi_data_read: Read data from WCD SPI
873 * @spi: spi_device struct
874 * @msg: msg that needs to be read from WCD
875 *
876 * This API reads length of data from address specified. These details
877 * about the read are encapsulated in @msg. Read size should be multiple
878 * of 4 bytes and read address should be 4-byte aligned.
879 */
880static int wcd_spi_data_read(struct spi_device *spi,
881 struct wcd_spi_msg *msg)
882{
883 if (!spi || !msg) {
884 pr_err("%s: Invalid %s\n", __func__,
885 (!spi) ? "spi device" : "msg");
886 return -EINVAL;
887 }
888
889 dev_dbg_ratelimited(&spi->dev, "%s: addr = 0x%x,len = %zu\n",
890 __func__, msg->remote_addr, msg->len);
891 return wcd_spi_data_xfer(spi, msg, WCD_SPI_XFER_READ);
892}
893
894static int wdsp_spi_dload_section(struct spi_device *spi,
895 void *data)
896{
897 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
898 struct wdsp_img_section *sec = data;
899 struct wcd_spi_msg msg;
900 int ret;
901
902 dev_dbg(&spi->dev, "%s: addr = 0x%x, size = 0x%zx\n",
903 __func__, sec->addr, sec->size);
904
905 msg.remote_addr = sec->addr + wcd_spi->mem_base_addr;
906 msg.data = sec->data;
907 msg.len = sec->size;
908
909 ret = __wcd_spi_data_xfer(spi, &msg, WCD_SPI_XFER_WRITE);
910 if (ret < 0)
911 dev_err(&spi->dev, "%s: fail addr (0x%x) size (0x%zx)\n",
912 __func__, msg.remote_addr, msg.len);
913 return ret;
914}
915
916static int wdsp_spi_read_section(struct spi_device *spi, void *data)
917{
918 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
919 struct wdsp_img_section *sec = data;
920 struct wcd_spi_msg msg;
921 int ret;
922
923 msg.remote_addr = sec->addr + wcd_spi->mem_base_addr;
924 msg.data = sec->data;
925 msg.len = sec->size;
926
927 dev_dbg(&spi->dev, "%s: addr = 0x%x, size = 0x%zx\n",
928 __func__, msg.remote_addr, msg.len);
929
930 ret = wcd_spi_data_xfer(spi, &msg, WCD_SPI_XFER_READ);
931 if (ret < 0)
932 dev_err(&spi->dev, "%s: fail addr (0x%x) size (0x%zx)\n",
933 __func__, msg.remote_addr, msg.len);
934 return ret;
935}
936
937static int wdsp_spi_event_handler(struct device *dev, void *priv_data,
938 enum wdsp_event_type event,
939 void *data)
940{
941 struct spi_device *spi = to_spi_device(dev);
942 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
943 struct wcd_spi_ops *spi_ops;
944 int ret = 0;
945
946 dev_dbg(&spi->dev, "%s: event type %d\n",
947 __func__, event);
948
949 switch (event) {
950 case WDSP_EVENT_POST_SHUTDOWN:
951 cancel_delayed_work_sync(&wcd_spi->clk_dwork);
952 WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
953 if (test_bit(WCD_SPI_CLK_STATE_ENABLED, &wcd_spi->status_mask))
954 wcd_spi_clk_disable(spi);
955 wcd_spi->clk_users = 0;
956 WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
957 break;
958
959 case WDSP_EVENT_PRE_DLOAD_CODE:
960 case WDSP_EVENT_PRE_DLOAD_DATA:
961 ret = wcd_spi_clk_ctrl(spi, WCD_SPI_CLK_ENABLE,
962 WCD_SPI_CLK_FLAG_IMMEDIATE);
963 if (ret < 0)
964 dev_err(&spi->dev, "%s: clk_req failed %d\n",
965 __func__, ret);
966 break;
967
968 case WDSP_EVENT_POST_DLOAD_CODE:
969 case WDSP_EVENT_POST_DLOAD_DATA:
970 case WDSP_EVENT_DLOAD_FAILED:
971
972 ret = wcd_spi_clk_ctrl(spi, WCD_SPI_CLK_DISABLE,
973 WCD_SPI_CLK_FLAG_IMMEDIATE);
974 if (ret < 0)
975 dev_err(&spi->dev, "%s: clk unvote failed %d\n",
976 __func__, ret);
977 break;
978
979 case WDSP_EVENT_DLOAD_SECTION:
980 ret = wdsp_spi_dload_section(spi, data);
981 break;
982
983 case WDSP_EVENT_READ_SECTION:
984 ret = wdsp_spi_read_section(spi, data);
985 break;
986
987 case WDSP_EVENT_SUSPEND:
988 WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
989 if (!wcd_spi_can_suspend(wcd_spi))
990 ret = -EBUSY;
991 WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
992 break;
993
994 case WDSP_EVENT_RESUME:
995 ret = wcd_spi_wait_for_resume(wcd_spi);
996 break;
997
998 case WDSP_EVENT_GET_DEVOPS:
999 if (!data) {
1000 dev_err(&spi->dev, "%s: invalid data\n",
1001 __func__);
1002 ret = -EINVAL;
1003 break;
1004 }
1005
1006 spi_ops = (struct wcd_spi_ops *) data;
1007 spi_ops->spi_dev = spi;
1008 spi_ops->read_dev = wcd_spi_data_read;
1009 spi_ops->write_dev = wcd_spi_data_write;
1010 break;
1011
1012 default:
1013 dev_dbg(&spi->dev, "%s: Unhandled event %d\n",
1014 __func__, event);
1015 break;
1016 }
1017
1018 return ret;
1019}
1020
1021static int wcd_spi_bus_gwrite(void *context, const void *reg,
1022 size_t reg_len, const void *val,
1023 size_t val_len)
1024{
1025 struct device *dev = context;
1026 struct spi_device *spi = to_spi_device(dev);
1027 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
Xiaoyu Yeb9ff33c2018-06-12 17:28:00 -07001028 u8 *tx_buf = wcd_spi->tx_buf;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301029
1030 if (!reg || !val || reg_len != wcd_spi->reg_bytes ||
1031 val_len != wcd_spi->val_bytes) {
1032 dev_err(&spi->dev,
1033 "%s: Invalid input, reg_len = %zd, val_len = %zd",
1034 __func__, reg_len, val_len);
1035 return -EINVAL;
1036 }
1037
Xiaoyu Yeb9ff33c2018-06-12 17:28:00 -07001038 memset(tx_buf, 0, WCD_SPI_CMD_IRW_LEN);
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301039 tx_buf[0] = WCD_SPI_CMD_IRW;
1040 tx_buf[1] = *((u8 *)reg);
Xiaoyu Yeb9ff33c2018-06-12 17:28:00 -07001041 memcpy(tx_buf + WCD_SPI_OPCODE_LEN + reg_len,
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301042 val, val_len);
1043
1044 return spi_write(spi, tx_buf, WCD_SPI_CMD_IRW_LEN);
1045}
1046
1047static int wcd_spi_bus_write(void *context, const void *data,
1048 size_t count)
1049{
1050 struct device *dev = context;
1051 struct spi_device *spi = to_spi_device(dev);
1052 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
1053
1054 if (count < (wcd_spi->reg_bytes + wcd_spi->val_bytes)) {
1055 dev_err(&spi->dev, "%s: Invalid size %zd\n",
1056 __func__, count);
1057 WARN_ON(1);
1058 return -EINVAL;
1059 }
1060
1061 return wcd_spi_bus_gwrite(context, data, wcd_spi->reg_bytes,
1062 data + wcd_spi->reg_bytes,
1063 count - wcd_spi->reg_bytes);
1064}
1065
1066static int wcd_spi_bus_read(void *context, const void *reg,
1067 size_t reg_len, void *val,
1068 size_t val_len)
1069{
1070 struct device *dev = context;
1071 struct spi_device *spi = to_spi_device(dev);
1072 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
1073 struct spi_transfer *tx_xfer = &wcd_spi->xfer2[0];
1074 struct spi_transfer *rx_xfer = &wcd_spi->xfer2[1];
Xiaoyu Yeb9ff33c2018-06-12 17:28:00 -07001075 u8 *tx_buf = wcd_spi->tx_buf;
1076 u8 *rx_buf = wcd_spi->rx_buf;
1077 int ret = 0;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301078
1079 if (!reg || !val || reg_len != wcd_spi->reg_bytes ||
1080 val_len != wcd_spi->val_bytes) {
1081 dev_err(&spi->dev,
1082 "%s: Invalid input, reg_len = %zd, val_len = %zd",
1083 __func__, reg_len, val_len);
1084 return -EINVAL;
1085 }
1086
Xiaoyu Yeb9ff33c2018-06-12 17:28:00 -07001087 memset(tx_buf, 0, WCD_SPI_CMD_IRR_LEN);
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301088 tx_buf[0] = WCD_SPI_CMD_IRR;
1089 tx_buf[1] = *((u8 *)reg);
1090
1091 wcd_spi_reinit_xfer(tx_xfer);
1092 tx_xfer->tx_buf = tx_buf;
1093 tx_xfer->rx_buf = NULL;
1094 tx_xfer->len = WCD_SPI_CMD_IRR_LEN;
1095
1096 wcd_spi_reinit_xfer(rx_xfer);
1097 rx_xfer->tx_buf = NULL;
Xiaoyu Yeb9ff33c2018-06-12 17:28:00 -07001098 rx_xfer->rx_buf = rx_buf;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301099 rx_xfer->len = val_len;
1100
Xiaoyu Yeb9ff33c2018-06-12 17:28:00 -07001101 ret = spi_sync(spi, &wcd_spi->msg2);
1102 if (ret) {
1103 dev_err(&spi->dev, "%s: spi_sync failed, err %d\n",
1104 __func__, ret);
1105 goto done;
1106 }
1107
1108 memcpy(val, rx_buf, val_len);
1109
1110done:
1111 return ret;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301112}
1113
1114static struct regmap_bus wcd_spi_regmap_bus = {
1115 .write = wcd_spi_bus_write,
1116 .gather_write = wcd_spi_bus_gwrite,
1117 .read = wcd_spi_bus_read,
1118 .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
1119 .val_format_endian_default = REGMAP_ENDIAN_BIG,
1120};
1121
1122static int wcd_spi_state_show(struct seq_file *f, void *ptr)
1123{
1124 struct spi_device *spi = f->private;
1125 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
1126 const char *clk_state, *clk_mutex, *xfer_mutex;
1127
1128 if (test_bit(WCD_SPI_CLK_STATE_ENABLED, &wcd_spi->status_mask))
1129 clk_state = "enabled";
1130 else
1131 clk_state = "disabled";
1132
1133 clk_mutex = mutex_is_locked(&wcd_spi->clk_mutex) ?
1134 "locked" : "unlocked";
1135
1136 xfer_mutex = mutex_is_locked(&wcd_spi->xfer_mutex) ?
1137 "locked" : "unlocked";
1138
1139 seq_printf(f, "clk_state = %s\nclk_users = %d\n"
1140 "clk_mutex = %s\nxfer_mutex = %s\n",
1141 clk_state, wcd_spi->clk_users, clk_mutex,
1142 xfer_mutex);
1143 return 0;
1144}
1145
1146static int wcd_spi_state_open(struct inode *inode, struct file *file)
1147{
1148 return single_open(file, wcd_spi_state_show, inode->i_private);
1149}
1150
1151static const struct file_operations state_fops = {
1152 .open = wcd_spi_state_open,
1153 .read = seq_read,
1154 .llseek = seq_lseek,
1155 .release = single_release,
1156};
1157
1158static ssize_t wcd_spi_debugfs_mem_read(struct file *file, char __user *ubuf,
1159 size_t count, loff_t *ppos)
1160{
1161 struct spi_device *spi = file->private_data;
1162 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
1163 struct wcd_spi_debug_data *dbg_data = &wcd_spi->debug_data;
1164 struct wcd_spi_msg msg;
1165 ssize_t buf_size, read_count = 0;
1166 char *buf;
1167 int ret;
1168
1169 if (*ppos < 0 || !count)
1170 return -EINVAL;
1171
1172 if (dbg_data->size == 0 || dbg_data->addr == 0) {
1173 dev_err(&spi->dev,
1174 "%s: Invalid request, size = %u, addr = 0x%x\n",
1175 __func__, dbg_data->size, dbg_data->addr);
1176 return 0;
1177 }
1178
1179 buf_size = count < dbg_data->size ? count : dbg_data->size;
1180 buf = kzalloc(buf_size, GFP_KERNEL);
1181 if (!buf)
1182 return -ENOMEM;
1183
1184 msg.data = buf;
1185 msg.remote_addr = dbg_data->addr;
1186 msg.len = buf_size;
1187 msg.flags = 0;
1188
1189 ret = wcd_spi_data_read(spi, &msg);
1190 if (ret < 0) {
1191 dev_err(&spi->dev,
1192 "%s: Failed to read %zu bytes from addr 0x%x\n",
1193 __func__, buf_size, msg.remote_addr);
1194 goto done;
1195 }
1196
1197 read_count = simple_read_from_buffer(ubuf, count, ppos, buf, buf_size);
1198
1199done:
1200 kfree(buf);
1201 if (ret < 0)
1202 return ret;
1203 else
1204 return read_count;
1205}
1206
1207static const struct file_operations mem_read_fops = {
1208 .open = simple_open,
1209 .read = wcd_spi_debugfs_mem_read,
1210};
1211
1212static int wcd_spi_debugfs_init(struct spi_device *spi)
1213{
1214 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
1215 struct wcd_spi_debug_data *dbg_data = &wcd_spi->debug_data;
1216 int rc = 0;
1217
1218 dbg_data->dir = debugfs_create_dir("wcd_spi", NULL);
1219 if (IS_ERR_OR_NULL(dbg_data->dir)) {
1220 dbg_data->dir = NULL;
1221 rc = -ENODEV;
1222 goto done;
1223 }
1224
1225 debugfs_create_file("state", 0444, dbg_data->dir, spi, &state_fops);
1226 debugfs_create_u32("addr", 0644, dbg_data->dir,
1227 &dbg_data->addr);
1228 debugfs_create_u32("size", 0644, dbg_data->dir,
1229 &dbg_data->size);
1230
1231 debugfs_create_file("mem_read", 0444, dbg_data->dir,
1232 spi, &mem_read_fops);
1233done:
1234 return rc;
1235}
1236
1237
1238static const struct reg_default wcd_spi_defaults[] = {
1239 {WCD_SPI_SLAVE_SANITY, 0xDEADBEEF},
1240 {WCD_SPI_SLAVE_DEVICE_ID, 0x00500000},
1241 {WCD_SPI_SLAVE_STATUS, 0x80100000},
1242 {WCD_SPI_SLAVE_CONFIG, 0x0F200808},
1243 {WCD_SPI_SLAVE_SW_RESET, 0x00000000},
1244 {WCD_SPI_SLAVE_IRQ_STATUS, 0x00000000},
1245 {WCD_SPI_SLAVE_IRQ_EN, 0x00000000},
1246 {WCD_SPI_SLAVE_IRQ_CLR, 0x00000000},
1247 {WCD_SPI_SLAVE_IRQ_FORCE, 0x00000000},
1248 {WCD_SPI_SLAVE_TX, 0x00000000},
1249 {WCD_SPI_SLAVE_TEST_BUS_DATA, 0x00000000},
1250 {WCD_SPI_SLAVE_TEST_BUS_CTRL, 0x00000000},
1251 {WCD_SPI_SLAVE_SW_RST_IRQ, 0x00000000},
1252 {WCD_SPI_SLAVE_CHAR_CFG, 0x00000000},
1253 {WCD_SPI_SLAVE_CHAR_DATA_MOSI, 0x00000000},
1254 {WCD_SPI_SLAVE_CHAR_DATA_CS_N, 0x00000000},
1255 {WCD_SPI_SLAVE_CHAR_DATA_MISO, 0x00000000},
1256 {WCD_SPI_SLAVE_TRNS_BYTE_CNT, 0x00000000},
1257 {WCD_SPI_SLAVE_TRNS_LEN, 0x00000000},
1258 {WCD_SPI_SLAVE_FIFO_LEVEL, 0x00000000},
1259 {WCD_SPI_SLAVE_GENERICS, 0x80000000},
1260 {WCD_SPI_SLAVE_EXT_BASE_ADDR, 0x00000000},
1261};
1262
1263static bool wcd_spi_is_volatile_reg(struct device *dev,
1264 unsigned int reg)
1265{
1266 switch (reg) {
1267 case WCD_SPI_SLAVE_SANITY:
1268 case WCD_SPI_SLAVE_STATUS:
1269 case WCD_SPI_SLAVE_IRQ_STATUS:
1270 case WCD_SPI_SLAVE_TX:
1271 case WCD_SPI_SLAVE_SW_RST_IRQ:
1272 case WCD_SPI_SLAVE_TRNS_BYTE_CNT:
1273 case WCD_SPI_SLAVE_FIFO_LEVEL:
1274 case WCD_SPI_SLAVE_GENERICS:
1275 return true;
1276 }
1277
1278 return false;
1279}
1280
1281static bool wcd_spi_is_readable_reg(struct device *dev,
1282 unsigned int reg)
1283{
1284 switch (reg) {
1285 case WCD_SPI_SLAVE_SW_RESET:
1286 case WCD_SPI_SLAVE_IRQ_CLR:
1287 case WCD_SPI_SLAVE_IRQ_FORCE:
1288 return false;
1289 }
1290
1291 return true;
1292}
1293
1294static struct regmap_config wcd_spi_regmap_cfg = {
1295 .reg_bits = 8,
1296 .val_bits = 32,
1297 .cache_type = REGCACHE_RBTREE,
1298 .reg_defaults = wcd_spi_defaults,
1299 .num_reg_defaults = ARRAY_SIZE(wcd_spi_defaults),
1300 .max_register = WCD_SPI_MAX_REGISTER,
1301 .volatile_reg = wcd_spi_is_volatile_reg,
1302 .readable_reg = wcd_spi_is_readable_reg,
1303};
1304
1305static int wdsp_spi_init(struct device *dev, void *priv_data)
1306{
1307 struct spi_device *spi = to_spi_device(dev);
1308 int ret;
1309
1310 ret = wcd_spi_init(spi);
1311 if (ret < 0)
1312 dev_err(&spi->dev, "%s: Init failed, err = %d\n",
1313 __func__, ret);
1314 return ret;
1315}
1316
1317static int wdsp_spi_deinit(struct device *dev, void *priv_data)
1318{
1319 struct spi_device *spi = to_spi_device(dev);
1320 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
1321
1322 /*
1323 * Deinit means the hardware is reset. Mark the cache
1324 * as dirty here, so init will sync the cache
1325 */
1326 regcache_mark_dirty(wcd_spi->regmap);
1327
1328 return 0;
1329}
1330
1331static struct wdsp_cmpnt_ops wdsp_spi_ops = {
1332 .init = wdsp_spi_init,
1333 .deinit = wdsp_spi_deinit,
1334 .event_handler = wdsp_spi_event_handler,
1335};
1336
1337static int wcd_spi_component_bind(struct device *dev,
1338 struct device *master,
1339 void *data)
1340{
1341 struct spi_device *spi = to_spi_device(dev);
1342 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
1343 int ret = 0;
1344
1345 wcd_spi->m_dev = master;
1346 wcd_spi->m_ops = data;
1347
1348 if (wcd_spi->m_ops &&
1349 wcd_spi->m_ops->register_cmpnt_ops)
1350 ret = wcd_spi->m_ops->register_cmpnt_ops(master, dev,
1351 wcd_spi,
1352 &wdsp_spi_ops);
1353 if (ret) {
1354 dev_err(dev, "%s: register_cmpnt_ops failed, err = %d\n",
1355 __func__, ret);
1356 goto done;
1357 }
1358
1359 wcd_spi->reg_bytes = DIV_ROUND_UP(wcd_spi_regmap_cfg.reg_bits, 8);
1360 wcd_spi->val_bytes = DIV_ROUND_UP(wcd_spi_regmap_cfg.val_bits, 8);
1361
1362 wcd_spi->regmap = devm_regmap_init(&spi->dev, &wcd_spi_regmap_bus,
1363 &spi->dev, &wcd_spi_regmap_cfg);
1364 if (IS_ERR(wcd_spi->regmap)) {
1365 ret = PTR_ERR(wcd_spi->regmap);
1366 dev_err(&spi->dev, "%s: Failed to allocate regmap, err = %d\n",
1367 __func__, ret);
1368 goto done;
1369 }
1370
1371 if (wcd_spi_debugfs_init(spi))
1372 dev_err(&spi->dev, "%s: Failed debugfs init\n", __func__);
1373
1374 spi_message_init(&wcd_spi->msg1);
1375 spi_message_add_tail(&wcd_spi->xfer1, &wcd_spi->msg1);
1376
1377 spi_message_init(&wcd_spi->msg2);
1378 spi_message_add_tail(&wcd_spi->xfer2[0], &wcd_spi->msg2);
1379 spi_message_add_tail(&wcd_spi->xfer2[1], &wcd_spi->msg2);
1380
1381 /* Pre-allocate the buffers */
1382 wcd_spi->tx_buf = kzalloc(WCD_SPI_RW_MAX_BUF_SIZE,
1383 GFP_KERNEL | GFP_DMA);
1384 if (!wcd_spi->tx_buf) {
1385 ret = -ENOMEM;
1386 goto done;
1387 }
1388
1389 wcd_spi->rx_buf = kzalloc(WCD_SPI_RW_MAX_BUF_SIZE,
1390 GFP_KERNEL | GFP_DMA);
1391 if (!wcd_spi->rx_buf) {
1392 kfree(wcd_spi->tx_buf);
1393 wcd_spi->tx_buf = NULL;
1394 ret = -ENOMEM;
1395 goto done;
1396 }
1397done:
1398 return ret;
1399}
1400
1401static void wcd_spi_component_unbind(struct device *dev,
1402 struct device *master,
1403 void *data)
1404{
1405 struct spi_device *spi = to_spi_device(dev);
1406 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
Laxminath Kasam8f7ccc22017-08-28 17:35:04 +05301407 struct wcd_spi_debug_data *dbg_data = &wcd_spi->debug_data;
1408
1409 debugfs_remove_recursive(dbg_data->dir);
1410 dbg_data->dir = NULL;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301411
1412 wcd_spi->m_dev = NULL;
1413 wcd_spi->m_ops = NULL;
1414
1415 spi_transfer_del(&wcd_spi->xfer1);
1416 spi_transfer_del(&wcd_spi->xfer2[0]);
1417 spi_transfer_del(&wcd_spi->xfer2[1]);
1418
1419 kfree(wcd_spi->tx_buf);
1420 kfree(wcd_spi->rx_buf);
1421 wcd_spi->tx_buf = NULL;
1422 wcd_spi->rx_buf = NULL;
1423}
1424
1425static const struct component_ops wcd_spi_component_ops = {
1426 .bind = wcd_spi_component_bind,
1427 .unbind = wcd_spi_component_unbind,
1428};
1429
1430static int wcd_spi_probe(struct spi_device *spi)
1431{
1432 struct wcd_spi_priv *wcd_spi;
1433 int ret = 0;
1434
1435 wcd_spi = devm_kzalloc(&spi->dev, sizeof(*wcd_spi),
1436 GFP_KERNEL);
1437 if (!wcd_spi)
1438 return -ENOMEM;
1439
1440 ret = of_property_read_u32(spi->dev.of_node,
1441 "qcom,mem-base-addr",
1442 &wcd_spi->mem_base_addr);
1443 if (ret < 0) {
1444 dev_err(&spi->dev, "%s: Missing %s DT entry",
1445 __func__, "qcom,mem-base-addr");
1446 goto err_ret;
1447 }
1448
1449 dev_dbg(&spi->dev,
1450 "%s: mem_base_addr 0x%x\n", __func__, wcd_spi->mem_base_addr);
1451
1452 mutex_init(&wcd_spi->clk_mutex);
1453 mutex_init(&wcd_spi->xfer_mutex);
1454 INIT_DELAYED_WORK(&wcd_spi->clk_dwork, wcd_spi_clk_work);
1455 init_completion(&wcd_spi->resume_comp);
1456
1457 wcd_spi->spi = spi;
1458 spi_set_drvdata(spi, wcd_spi);
1459
1460 ret = component_add(&spi->dev, &wcd_spi_component_ops);
1461 if (ret) {
1462 dev_err(&spi->dev, "%s: component_add failed err = %d\n",
1463 __func__, ret);
1464 goto err_component_add;
1465 }
1466
1467 return ret;
1468
1469err_component_add:
1470 mutex_destroy(&wcd_spi->clk_mutex);
1471 mutex_destroy(&wcd_spi->xfer_mutex);
1472err_ret:
1473 devm_kfree(&spi->dev, wcd_spi);
1474 spi_set_drvdata(spi, NULL);
1475 return ret;
1476}
1477
1478static int wcd_spi_remove(struct spi_device *spi)
1479{
1480 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
1481
1482 component_del(&spi->dev, &wcd_spi_component_ops);
1483
1484 mutex_destroy(&wcd_spi->clk_mutex);
1485 mutex_destroy(&wcd_spi->xfer_mutex);
1486
1487 devm_kfree(&spi->dev, wcd_spi);
1488 spi_set_drvdata(spi, NULL);
1489
1490 return 0;
1491}
1492
1493#ifdef CONFIG_PM
1494static int wcd_spi_suspend(struct device *dev)
1495{
1496 struct spi_device *spi = to_spi_device(dev);
1497 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
1498 int rc = 0;
1499
1500 WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
1501 if (!wcd_spi_can_suspend(wcd_spi)) {
1502 rc = -EBUSY;
1503 goto done;
1504 }
1505
1506 /*
1507 * If we are here, it is okay to let the suspend go
1508 * through for this driver. But, still need to notify
1509 * the master to make sure all other components can suspend
1510 * as well.
1511 */
1512 if (wcd_spi->m_dev && wcd_spi->m_ops &&
1513 wcd_spi->m_ops->suspend) {
1514 WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
1515 rc = wcd_spi->m_ops->suspend(wcd_spi->m_dev);
1516 WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
1517 }
1518
1519 if (rc == 0)
1520 set_bit(WCD_SPI_IS_SUSPENDED, &wcd_spi->status_mask);
1521 else
1522 dev_dbg(&spi->dev, "%s: cannot suspend, err = %d\n",
1523 __func__, rc);
1524done:
1525 WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
1526 return rc;
1527}
1528
1529static int wcd_spi_resume(struct device *dev)
1530{
1531 struct spi_device *spi = to_spi_device(dev);
1532 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
1533
1534 WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
1535 clear_bit(WCD_SPI_IS_SUSPENDED, &wcd_spi->status_mask);
1536 complete(&wcd_spi->resume_comp);
1537 WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
1538
1539 return 0;
1540}
1541
1542static const struct dev_pm_ops wcd_spi_pm_ops = {
1543 .suspend = wcd_spi_suspend,
1544 .resume = wcd_spi_resume,
1545};
1546#endif
1547
1548static const struct of_device_id wcd_spi_of_match[] = {
1549 { .compatible = "qcom,wcd-spi-v2", },
1550 { }
1551};
1552MODULE_DEVICE_TABLE(of, wcd_spi_of_match);
1553
1554static struct spi_driver wcd_spi_driver = {
1555 .driver = {
1556 .name = "wcd-spi-v2",
1557 .of_match_table = wcd_spi_of_match,
1558#ifdef CONFIG_PM
1559 .pm = &wcd_spi_pm_ops,
1560#endif
1561 },
1562 .probe = wcd_spi_probe,
1563 .remove = wcd_spi_remove,
1564};
1565
1566module_spi_driver(wcd_spi_driver);
1567
1568MODULE_DESCRIPTION("WCD SPI driver");
1569MODULE_LICENSE("GPL v2");