blob: 4c7a85338ed48e1ed42154fc8af264f891f541fd [file] [log] [blame]
Meng Wang43bbb872018-12-10 12:32:05 +08001// SPDX-License-Identifier: GPL-2.0-only
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05302/*
Xiaoyu Ye89cc8892018-05-29 17:03:55 -07003 * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05304 */
5
6#include <linux/init.h>
7#include <linux/module.h>
8#include <linux/of.h>
9#include <linux/debugfs.h>
10#include <linux/delay.h>
Xiaoyu Ye89cc8892018-05-29 17:03:55 -070011#include <linux/dma-mapping.h>
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +053012#include <linux/bitops.h>
13#include <linux/spi/spi.h>
14#include <linux/regmap.h>
15#include <linux/component.h>
16#include <linux/ratelimit.h>
Bhalchandra Gajarec5f43a12018-03-19 14:35:42 -070017#include <linux/platform_device.h>
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +053018#include <sound/wcd-dsp-mgr.h>
19#include <sound/wcd-spi.h>
Bhalchandra Gajarec5f43a12018-03-19 14:35:42 -070020#include <soc/wcd-spi-ac.h>
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +053021#include "wcd-spi-registers.h"
22
23/* Byte manipulations */
24#define SHIFT_1_BYTES (8)
25#define SHIFT_2_BYTES (16)
26#define SHIFT_3_BYTES (24)
27
28/* Command opcodes */
29#define WCD_SPI_CMD_NOP (0x00)
30#define WCD_SPI_CMD_WREN (0x06)
31#define WCD_SPI_CMD_CLKREQ (0xDA)
32#define WCD_SPI_CMD_RDSR (0x05)
33#define WCD_SPI_CMD_IRR (0x81)
34#define WCD_SPI_CMD_IRW (0x82)
35#define WCD_SPI_CMD_MIOR (0x83)
36#define WCD_SPI_CMD_FREAD (0x0B)
37#define WCD_SPI_CMD_MIOW (0x02)
38#define WCD_SPI_WRITE_FRAME_OPCODE \
39 (WCD_SPI_CMD_MIOW << SHIFT_3_BYTES)
40#define WCD_SPI_READ_FRAME_OPCODE \
41 (WCD_SPI_CMD_MIOR << SHIFT_3_BYTES)
42#define WCD_SPI_FREAD_FRAME_OPCODE \
43 (WCD_SPI_CMD_FREAD << SHIFT_3_BYTES)
44
45/* Command lengths */
46#define WCD_SPI_OPCODE_LEN (0x01)
47#define WCD_SPI_CMD_NOP_LEN (0x01)
48#define WCD_SPI_CMD_WREN_LEN (0x01)
49#define WCD_SPI_CMD_CLKREQ_LEN (0x04)
50#define WCD_SPI_CMD_IRR_LEN (0x04)
51#define WCD_SPI_CMD_IRW_LEN (0x06)
52#define WCD_SPI_WRITE_SINGLE_LEN (0x08)
53#define WCD_SPI_READ_SINGLE_LEN (0x13)
54#define WCD_SPI_CMD_FREAD_LEN (0x13)
55
56/* Command delays */
57#define WCD_SPI_CLKREQ_DELAY_USECS (500)
58#define WCD_SPI_CLK_OFF_TIMER_MS (500)
59#define WCD_SPI_RESUME_TIMEOUT_MS 100
60
61/* Command masks */
62#define WCD_CMD_ADDR_MASK \
63 (0xFF | \
64 (0xFF << SHIFT_1_BYTES) | \
65 (0xFF << SHIFT_2_BYTES))
66
67/* Clock ctrl request related */
68#define WCD_SPI_CLK_ENABLE true
69#define WCD_SPI_CLK_DISABLE false
70#define WCD_SPI_CLK_FLAG_DELAYED (1 << 0)
71#define WCD_SPI_CLK_FLAG_IMMEDIATE (1 << 1)
72
73/* Internal addresses */
74#define WCD_SPI_ADDR_IPC_CTL_HOST (0x012014)
75
76/* Word sizes and min/max lengths */
77#define WCD_SPI_WORD_BYTE_CNT (4)
78#define WCD_SPI_RW_MULTI_MIN_LEN (16)
79
80/* Max size is 32 bytes less than 64Kbytes */
81#define WCD_SPI_RW_MULTI_MAX_LEN ((64 * 1024) - 32)
82
83/*
84 * Max size for the pre-allocated buffers is the max
85 * possible read/write length + 32 bytes for the SPI
86 * read/write command header itself.
87 */
88#define WCD_SPI_RW_MAX_BUF_SIZE (WCD_SPI_RW_MULTI_MAX_LEN + 32)
89
90/* Alignment requirements */
91#define WCD_SPI_RW_MIN_ALIGN WCD_SPI_WORD_BYTE_CNT
92#define WCD_SPI_RW_MULTI_ALIGN (16)
93
94/* Status mask bits */
95#define WCD_SPI_CLK_STATE_ENABLED BIT(0)
96#define WCD_SPI_IS_SUSPENDED BIT(1)
97
98/* Locking related */
99#define WCD_SPI_MUTEX_LOCK(spi, lock) \
100{ \
101 dev_vdbg(&spi->dev, "%s: mutex_lock(%s)\n", \
102 __func__, __stringify_1(lock)); \
103 mutex_lock(&lock); \
104}
105
106#define WCD_SPI_MUTEX_UNLOCK(spi, lock) \
107{ \
108 dev_vdbg(&spi->dev, "%s: mutex_unlock(%s)\n", \
109 __func__, __stringify_1(lock)); \
110 mutex_unlock(&lock); \
111}
112
113struct wcd_spi_debug_data {
114 struct dentry *dir;
115 u32 addr;
116 u32 size;
117};
118
119struct wcd_spi_priv {
120 struct spi_device *spi;
121 u32 mem_base_addr;
122
123 struct regmap *regmap;
124
125 /* Message for single transfer */
126 struct spi_message msg1;
127 struct spi_transfer xfer1;
128
129 /* Message for two transfers */
130 struct spi_message msg2;
131 struct spi_transfer xfer2[2];
132
133 /* Register access related */
134 u32 reg_bytes;
135 u32 val_bytes;
136
137 /* Clock requests related */
138 struct mutex clk_mutex;
139 int clk_users;
140 unsigned long status_mask;
141 struct delayed_work clk_dwork;
142
143 /* Transaction related */
144 struct mutex xfer_mutex;
145
146 struct device *m_dev;
147 struct wdsp_mgr_ops *m_ops;
148
149 /* Debugfs related information */
150 struct wcd_spi_debug_data debug_data;
151
152 /* Completion object to indicate system resume completion */
153 struct completion resume_comp;
154
155 /* Buffers to hold memory used for transfers */
156 void *tx_buf;
157 void *rx_buf;
Xiaoyu Ye89cc8892018-05-29 17:03:55 -0700158
159 /* DMA handles for transfer buffers */
160 dma_addr_t tx_dma;
161 dma_addr_t rx_dma;
Bhalchandra Gajarec5f43a12018-03-19 14:35:42 -0700162 /* Handle to child (qmi client) device */
163 struct device *ac_dev;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530164};
165
166enum xfer_request {
167 WCD_SPI_XFER_WRITE,
168 WCD_SPI_XFER_READ,
169};
170
171
172static char *wcd_spi_xfer_req_str(enum xfer_request req)
173{
174 if (req == WCD_SPI_XFER_WRITE)
175 return "xfer_write";
176 else if (req == WCD_SPI_XFER_READ)
177 return "xfer_read";
178 else
179 return "xfer_invalid";
180}
181
182static void wcd_spi_reinit_xfer(struct spi_transfer *xfer)
183{
184 xfer->tx_buf = NULL;
185 xfer->rx_buf = NULL;
186 xfer->delay_usecs = 0;
187 xfer->len = 0;
188}
189
190static bool wcd_spi_is_suspended(struct wcd_spi_priv *wcd_spi)
191{
192 return test_bit(WCD_SPI_IS_SUSPENDED, &wcd_spi->status_mask);
193}
194
195static bool wcd_spi_can_suspend(struct wcd_spi_priv *wcd_spi)
196{
197 struct spi_device *spi = wcd_spi->spi;
198
199 if (wcd_spi->clk_users > 0 ||
200 test_bit(WCD_SPI_CLK_STATE_ENABLED, &wcd_spi->status_mask)) {
201 dev_err(&spi->dev, "%s: cannot suspend, clk_users = %d\n",
202 __func__, wcd_spi->clk_users);
203 return false;
204 }
205
206 return true;
207}
208
209static int wcd_spi_wait_for_resume(struct wcd_spi_priv *wcd_spi)
210{
211 struct spi_device *spi = wcd_spi->spi;
212 int rc = 0;
213
214 WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
215 /* If the system is already in resumed state, return right away */
216 if (!wcd_spi_is_suspended(wcd_spi))
217 goto done;
218
219 /* If suspended then wait for resume to happen */
220 reinit_completion(&wcd_spi->resume_comp);
221 WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
222 rc = wait_for_completion_timeout(&wcd_spi->resume_comp,
223 msecs_to_jiffies(WCD_SPI_RESUME_TIMEOUT_MS));
224 WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
225 if (rc == 0) {
226 dev_err(&spi->dev, "%s: failed to resume in %u msec\n",
227 __func__, WCD_SPI_RESUME_TIMEOUT_MS);
228 rc = -EIO;
229 goto done;
230 }
231
232 dev_dbg(&spi->dev, "%s: resume successful\n", __func__);
233 rc = 0;
234done:
235 WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
236 return rc;
237}
238
239static int wcd_spi_read_single(struct spi_device *spi,
240 u32 remote_addr, u32 *val)
241{
242 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
243 struct spi_transfer *tx_xfer = &wcd_spi->xfer2[0];
244 struct spi_transfer *rx_xfer = &wcd_spi->xfer2[1];
245 u8 *tx_buf = wcd_spi->tx_buf;
Xiaoyu Ye19a5e412018-04-16 18:44:38 -0700246 u8 *rx_buf = wcd_spi->rx_buf;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530247 u32 frame = 0;
248 int ret;
249
250 dev_dbg(&spi->dev, "%s: remote_addr = 0x%x\n",
251 __func__, remote_addr);
252
253 if (!tx_buf) {
254 dev_err(&spi->dev, "%s: tx_buf not allocated\n",
255 __func__);
256 return -ENOMEM;
257 }
258
259 frame |= WCD_SPI_READ_FRAME_OPCODE;
260 frame |= remote_addr & WCD_CMD_ADDR_MASK;
261
262 wcd_spi_reinit_xfer(tx_xfer);
263 frame = cpu_to_be32(frame);
264 memcpy(tx_buf, &frame, sizeof(frame));
265 tx_xfer->tx_buf = tx_buf;
266 tx_xfer->len = WCD_SPI_READ_SINGLE_LEN;
267
268 wcd_spi_reinit_xfer(rx_xfer);
Xiaoyu Ye19a5e412018-04-16 18:44:38 -0700269 rx_xfer->rx_buf = rx_buf;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530270 rx_xfer->len = sizeof(*val);
271
272 ret = spi_sync(spi, &wcd_spi->msg2);
Xiaoyu Ye19a5e412018-04-16 18:44:38 -0700273 if (ret)
274 dev_err(&spi->dev, "%s: spi_sync failed, err %d\n",
275 __func__, ret);
276 else
277 memcpy((u8*) val, rx_buf, sizeof(*val));
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530278
279 return ret;
280}
281
282static int wcd_spi_read_multi(struct spi_device *spi,
283 u32 remote_addr, u8 *data,
284 size_t len)
285{
286 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
287 struct spi_transfer *xfer = &wcd_spi->xfer1;
288 u8 *tx_buf = wcd_spi->tx_buf;
289 u8 *rx_buf = wcd_spi->rx_buf;
290 u32 frame = 0;
291 int ret;
292
293 dev_dbg(&spi->dev, "%s: addr 0x%x, len = %zd\n",
294 __func__, remote_addr, len);
295
296 frame |= WCD_SPI_FREAD_FRAME_OPCODE;
297 frame |= remote_addr & WCD_CMD_ADDR_MASK;
298
299 if (!tx_buf || !rx_buf) {
300 dev_err(&spi->dev, "%s: %s not allocated\n", __func__,
301 (!tx_buf) ? "tx_buf" : "rx_buf");
302 return -ENOMEM;
303 }
304
305 wcd_spi_reinit_xfer(xfer);
306 frame = cpu_to_be32(frame);
307 memcpy(tx_buf, &frame, sizeof(frame));
308 xfer->tx_buf = tx_buf;
309 xfer->rx_buf = rx_buf;
310 xfer->len = WCD_SPI_CMD_FREAD_LEN + len;
311
312 ret = spi_sync(spi, &wcd_spi->msg1);
313 if (ret) {
314 dev_err(&spi->dev, "%s: failed, err = %d\n",
315 __func__, ret);
316 goto done;
317 }
318
319 memcpy(data, rx_buf + WCD_SPI_CMD_FREAD_LEN, len);
320done:
321 return ret;
322}
323
324static int wcd_spi_write_single(struct spi_device *spi,
325 u32 remote_addr, u32 val)
326{
327 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
328 struct spi_transfer *xfer = &wcd_spi->xfer1;
Xiaoyu Ye19a5e412018-04-16 18:44:38 -0700329 u8 *tx_buf = wcd_spi->tx_buf;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530330 u32 frame = 0;
331
332 dev_dbg(&spi->dev, "%s: remote_addr = 0x%x, val = 0x%x\n",
333 __func__, remote_addr, val);
334
Xiaoyu Ye19a5e412018-04-16 18:44:38 -0700335 memset(tx_buf, 0, WCD_SPI_WRITE_SINGLE_LEN);
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530336 frame |= WCD_SPI_WRITE_FRAME_OPCODE;
337 frame |= (remote_addr & WCD_CMD_ADDR_MASK);
338
339 frame = cpu_to_be32(frame);
Xiaoyu Ye19a5e412018-04-16 18:44:38 -0700340 memcpy(tx_buf, &frame, sizeof(frame));
341 memcpy(tx_buf + sizeof(frame), &val, sizeof(val));
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530342
343 wcd_spi_reinit_xfer(xfer);
Xiaoyu Ye19a5e412018-04-16 18:44:38 -0700344 xfer->tx_buf = tx_buf;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530345 xfer->len = WCD_SPI_WRITE_SINGLE_LEN;
346
347 return spi_sync(spi, &wcd_spi->msg1);
348}
349
350static int wcd_spi_write_multi(struct spi_device *spi,
351 u32 remote_addr, u8 *data,
352 size_t len)
353{
354 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
355 struct spi_transfer *xfer = &wcd_spi->xfer1;
356 u32 frame = 0;
357 u8 *tx_buf = wcd_spi->tx_buf;
358 int xfer_len, ret;
359
360 dev_dbg(&spi->dev, "%s: addr = 0x%x len = %zd\n",
361 __func__, remote_addr, len);
362
363 frame |= WCD_SPI_WRITE_FRAME_OPCODE;
364 frame |= (remote_addr & WCD_CMD_ADDR_MASK);
365
366 frame = cpu_to_be32(frame);
367 xfer_len = len + sizeof(frame);
368
369 if (!tx_buf) {
370 dev_err(&spi->dev, "%s: tx_buf not allocated\n",
371 __func__);
372 return -ENOMEM;
373 }
374
375 memcpy(tx_buf, &frame, sizeof(frame));
376 memcpy(tx_buf + sizeof(frame), data, len);
377
378 wcd_spi_reinit_xfer(xfer);
379 xfer->tx_buf = tx_buf;
380 xfer->len = xfer_len;
381
382 ret = spi_sync(spi, &wcd_spi->msg1);
383 if (ret < 0)
384 dev_err(&spi->dev,
385 "%s: Failed, addr = 0x%x, len = %zd\n",
386 __func__, remote_addr, len);
387 return ret;
388}
389
390static int wcd_spi_transfer_split(struct spi_device *spi,
391 struct wcd_spi_msg *data_msg,
392 enum xfer_request xfer_req)
393{
394 u32 addr = data_msg->remote_addr;
395 u8 *data = data_msg->data;
396 int remain_size = data_msg->len;
397 int to_xfer, loop_cnt, ret = 0;
398
399 /* Perform single writes until multi word alignment is met */
400 loop_cnt = 1;
401 while (remain_size &&
402 !IS_ALIGNED(addr, WCD_SPI_RW_MULTI_ALIGN)) {
403 if (xfer_req == WCD_SPI_XFER_WRITE)
404 ret = wcd_spi_write_single(spi, addr,
405 (*(u32 *)data));
406 else
407 ret = wcd_spi_read_single(spi, addr,
408 (u32 *)data);
409 if (ret < 0) {
410 dev_err(&spi->dev,
411 "%s: %s fail iter(%d) start-word addr (0x%x)\n",
412 __func__, wcd_spi_xfer_req_str(xfer_req),
413 loop_cnt, addr);
414 goto done;
415 }
416
417 addr += WCD_SPI_WORD_BYTE_CNT;
418 data += WCD_SPI_WORD_BYTE_CNT;
419 remain_size -= WCD_SPI_WORD_BYTE_CNT;
420 loop_cnt++;
421 }
422
423 /* Perform multi writes for max allowed multi writes */
424 loop_cnt = 1;
425 while (remain_size >= WCD_SPI_RW_MULTI_MAX_LEN) {
426 if (xfer_req == WCD_SPI_XFER_WRITE)
427 ret = wcd_spi_write_multi(spi, addr, data,
428 WCD_SPI_RW_MULTI_MAX_LEN);
429 else
430 ret = wcd_spi_read_multi(spi, addr, data,
431 WCD_SPI_RW_MULTI_MAX_LEN);
432 if (ret < 0) {
433 dev_err(&spi->dev,
434 "%s: %s fail iter(%d) max-write addr (0x%x)\n",
435 __func__, wcd_spi_xfer_req_str(xfer_req),
436 loop_cnt, addr);
437 goto done;
438 }
439
440 addr += WCD_SPI_RW_MULTI_MAX_LEN;
441 data += WCD_SPI_RW_MULTI_MAX_LEN;
442 remain_size -= WCD_SPI_RW_MULTI_MAX_LEN;
443 loop_cnt++;
444 }
445
446 /*
447 * Perform write for max possible data that is multiple
448 * of the minimum size for multi-write commands.
449 */
450 to_xfer = remain_size - (remain_size % WCD_SPI_RW_MULTI_MIN_LEN);
451 if (remain_size >= WCD_SPI_RW_MULTI_MIN_LEN &&
452 to_xfer > 0) {
453 if (xfer_req == WCD_SPI_XFER_WRITE)
454 ret = wcd_spi_write_multi(spi, addr, data, to_xfer);
455 else
456 ret = wcd_spi_read_multi(spi, addr, data, to_xfer);
457 if (ret < 0) {
458 dev_err(&spi->dev,
459 "%s: %s fail write addr (0x%x), size (0x%x)\n",
460 __func__, wcd_spi_xfer_req_str(xfer_req),
461 addr, to_xfer);
462 goto done;
463 }
464
465 addr += to_xfer;
466 data += to_xfer;
467 remain_size -= to_xfer;
468 }
469
470 /* Perform single writes for the last remaining data */
471 loop_cnt = 1;
472 while (remain_size > 0) {
473 if (xfer_req == WCD_SPI_XFER_WRITE)
474 ret = wcd_spi_write_single(spi, addr, (*((u32 *)data)));
475 else
476 ret = wcd_spi_read_single(spi, addr, (u32 *) data);
477 if (ret < 0) {
478 dev_err(&spi->dev,
479 "%s: %s fail iter(%d) end-write addr (0x%x)\n",
480 __func__, wcd_spi_xfer_req_str(xfer_req),
481 loop_cnt, addr);
482 goto done;
483 }
484
485 addr += WCD_SPI_WORD_BYTE_CNT;
486 data += WCD_SPI_WORD_BYTE_CNT;
487 remain_size -= WCD_SPI_WORD_BYTE_CNT;
488 loop_cnt++;
489 }
490
491done:
492 return ret;
493}
494
495static int wcd_spi_cmd_nop(struct spi_device *spi)
496{
Xiaoyu Ye19a5e412018-04-16 18:44:38 -0700497 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
498 u8 *tx_buf = wcd_spi->tx_buf;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530499
Xiaoyu Ye19a5e412018-04-16 18:44:38 -0700500 tx_buf[0] = WCD_SPI_CMD_NOP;
501
502 return spi_write(spi, tx_buf, WCD_SPI_CMD_NOP_LEN);
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530503}
504
505static int wcd_spi_cmd_clkreq(struct spi_device *spi)
506{
507 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
508 struct spi_transfer *xfer = &wcd_spi->xfer1;
Xiaoyu Ye19a5e412018-04-16 18:44:38 -0700509 u8 *tx_buf = wcd_spi->tx_buf;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530510 u8 cmd[WCD_SPI_CMD_CLKREQ_LEN] = {
511 WCD_SPI_CMD_CLKREQ,
512 0xBA, 0x80, 0x00};
513
Xiaoyu Ye19a5e412018-04-16 18:44:38 -0700514 memcpy(tx_buf, cmd, WCD_SPI_CMD_CLKREQ_LEN);
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530515 wcd_spi_reinit_xfer(xfer);
Xiaoyu Ye19a5e412018-04-16 18:44:38 -0700516 xfer->tx_buf = tx_buf;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530517 xfer->len = WCD_SPI_CMD_CLKREQ_LEN;
518 xfer->delay_usecs = WCD_SPI_CLKREQ_DELAY_USECS;
519
520 return spi_sync(spi, &wcd_spi->msg1);
521}
522
523static int wcd_spi_cmd_wr_en(struct spi_device *spi)
524{
Xiaoyu Ye19a5e412018-04-16 18:44:38 -0700525 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
526 u8 *tx_buf = wcd_spi->tx_buf;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530527
Xiaoyu Ye19a5e412018-04-16 18:44:38 -0700528 tx_buf[0] = WCD_SPI_CMD_WREN;
529
530 return spi_write(spi, tx_buf, WCD_SPI_CMD_WREN_LEN);
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530531}
532
533static int wcd_spi_cmd_rdsr(struct spi_device *spi,
534 u32 *rdsr_status)
535{
536 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
537 struct spi_transfer *tx_xfer = &wcd_spi->xfer2[0];
538 struct spi_transfer *rx_xfer = &wcd_spi->xfer2[1];
Xiaoyu Ye19a5e412018-04-16 18:44:38 -0700539 u8 *tx_buf = wcd_spi->tx_buf;
540 u8 *rx_buf = wcd_spi->rx_buf;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530541 int ret;
542
Xiaoyu Ye19a5e412018-04-16 18:44:38 -0700543 tx_buf[0] = WCD_SPI_CMD_RDSR;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530544 wcd_spi_reinit_xfer(tx_xfer);
Xiaoyu Ye19a5e412018-04-16 18:44:38 -0700545 tx_xfer->tx_buf = tx_buf;
546 tx_xfer->len = WCD_SPI_OPCODE_LEN;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530547
Xiaoyu Ye19a5e412018-04-16 18:44:38 -0700548 memset(rx_buf, 0, sizeof(*rdsr_status));
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530549 wcd_spi_reinit_xfer(rx_xfer);
Xiaoyu Ye19a5e412018-04-16 18:44:38 -0700550 rx_xfer->rx_buf = rx_buf;
551 rx_xfer->len = sizeof(*rdsr_status);
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530552
553 ret = spi_sync(spi, &wcd_spi->msg2);
554 if (ret < 0) {
555 dev_err(&spi->dev, "%s: RDSR failed, err = %d\n",
556 __func__, ret);
557 goto done;
558 }
559
Xiaoyu Ye19a5e412018-04-16 18:44:38 -0700560 *rdsr_status = be32_to_cpu(*((u32*)rx_buf));
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530561
562 dev_dbg(&spi->dev, "%s: RDSR success, value = 0x%x\n",
Xiaoyu Ye19a5e412018-04-16 18:44:38 -0700563 __func__, *rdsr_status);
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530564done:
565 return ret;
566}
567
568static int wcd_spi_clk_enable(struct spi_device *spi)
569{
570 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
571 int ret;
572 u32 rd_status = 0;
573
Bhalchandra Gajarec5f43a12018-03-19 14:35:42 -0700574 /* Get the SPI access first */
575 if (wcd_spi->ac_dev) {
576 ret = wcd_spi_access_ctl(wcd_spi->ac_dev,
577 WCD_SPI_ACCESS_REQUEST,
578 WCD_SPI_AC_DATA_TRANSFER);
579 if (ret) {
580 dev_err(&spi->dev,
581 "%s: Can't get spi access, err = %d\n",
582 __func__, ret);
583 return ret;
584 }
585 }
586
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530587 ret = wcd_spi_cmd_nop(spi);
588 if (ret < 0) {
589 dev_err(&spi->dev, "%s: NOP1 failed, err = %d\n",
590 __func__, ret);
591 goto done;
592 }
593
594 ret = wcd_spi_cmd_clkreq(spi);
595 if (ret < 0) {
596 dev_err(&spi->dev, "%s: CLK_REQ failed, err = %d\n",
597 __func__, ret);
598 goto done;
599 }
600
601 ret = wcd_spi_cmd_nop(spi);
602 if (ret < 0) {
603 dev_err(&spi->dev, "%s: NOP2 failed, err = %d\n",
604 __func__, ret);
605 goto done;
606 }
607 wcd_spi_cmd_rdsr(spi, &rd_status);
608 /*
609 * Read status zero means reads are not
610 * happenning on the bus, possibly because
611 * clock request failed.
612 */
613 if (rd_status) {
614 set_bit(WCD_SPI_CLK_STATE_ENABLED,
615 &wcd_spi->status_mask);
616 } else {
617 dev_err(&spi->dev, "%s: RDSR status is zero\n",
618 __func__);
619 ret = -EIO;
620 }
621done:
622 return ret;
623}
624
625static int wcd_spi_clk_disable(struct spi_device *spi)
626{
627 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
628 int ret;
629
630 ret = wcd_spi_write_single(spi, WCD_SPI_ADDR_IPC_CTL_HOST, 0x01);
631 if (ret < 0)
632 dev_err(&spi->dev, "%s: Failed, err = %d\n",
633 __func__, ret);
Bhalchandra Gajareb466d182018-01-08 18:41:02 -0800634 /*
635 * clear this bit even if clock disable failed
636 * as the source clocks might get turned off.
637 */
638 clear_bit(WCD_SPI_CLK_STATE_ENABLED, &wcd_spi->status_mask);
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530639
Bhalchandra Gajarec5f43a12018-03-19 14:35:42 -0700640 /* once the clock is released, SPI access can be released as well */
641 if (wcd_spi->ac_dev) {
642 ret = wcd_spi_access_ctl(wcd_spi->ac_dev,
643 WCD_SPI_ACCESS_RELEASE,
644 WCD_SPI_AC_DATA_TRANSFER);
645 if (ret)
646 dev_err(&spi->dev,
647 "%s: SPI access release failed, err = %d\n",
648 __func__, ret);
649 }
650
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530651 return ret;
652}
653
654static int wcd_spi_clk_ctrl(struct spi_device *spi,
655 bool request, u32 flags)
656{
657 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
658 int ret = 0;
659 const char *delay_str;
660
661 delay_str = (flags == WCD_SPI_CLK_FLAG_DELAYED) ?
662 "delayed" : "immediate";
663
664 WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
665
666 /* Reject any unbalanced disable request */
667 if (wcd_spi->clk_users < 0 ||
668 (!request && wcd_spi->clk_users == 0)) {
669 dev_err(&spi->dev, "%s: Unbalanced clk_users %d for %s\n",
670 __func__, wcd_spi->clk_users,
671 request ? "enable" : "disable");
672 ret = -EINVAL;
673
674 /* Reset the clk_users to 0 */
675 wcd_spi->clk_users = 0;
676
677 goto done;
678 }
679
680 if (request == WCD_SPI_CLK_ENABLE) {
681 /*
682 * If the SPI bus is suspended, then return error
683 * as the transaction cannot be completed.
684 */
685 if (wcd_spi_is_suspended(wcd_spi)) {
686 dev_err(&spi->dev,
687 "%s: SPI suspended, cannot enable clk\n",
688 __func__);
689 ret = -EIO;
690 goto done;
691 }
692
693 /* Cancel the disable clk work */
694 WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
695 cancel_delayed_work_sync(&wcd_spi->clk_dwork);
696 WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
697
698 wcd_spi->clk_users++;
699
700 /*
701 * If clk state is already set,
702 * then clk wasnt really disabled
703 */
704 if (test_bit(WCD_SPI_CLK_STATE_ENABLED, &wcd_spi->status_mask))
705 goto done;
706 else if (wcd_spi->clk_users == 1)
707 ret = wcd_spi_clk_enable(spi);
708
709 } else {
710 wcd_spi->clk_users--;
711
712 /* Clock is still voted for */
713 if (wcd_spi->clk_users > 0)
714 goto done;
715
716 /*
717 * If we are here, clk_users must be 0 and needs
718 * to be disabled. Call the disable based on the
719 * flags.
720 */
721 if (flags == WCD_SPI_CLK_FLAG_DELAYED) {
722 schedule_delayed_work(&wcd_spi->clk_dwork,
723 msecs_to_jiffies(WCD_SPI_CLK_OFF_TIMER_MS));
724 } else {
725 ret = wcd_spi_clk_disable(spi);
726 if (ret < 0)
727 dev_err(&spi->dev,
728 "%s: Failed to disable clk err = %d\n",
729 __func__, ret);
730 }
731 }
732
733done:
734 dev_dbg(&spi->dev, "%s: updated clk_users = %d, request_%s %s\n",
735 __func__, wcd_spi->clk_users, request ? "enable" : "disable",
736 request ? "" : delay_str);
737 WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
738
739 return ret;
740}
741
742static int wcd_spi_init(struct spi_device *spi)
743{
744 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
745 int ret;
746
747 ret = wcd_spi_clk_ctrl(spi, WCD_SPI_CLK_ENABLE,
748 WCD_SPI_CLK_FLAG_IMMEDIATE);
749 if (ret < 0)
750 goto done;
751
752 ret = wcd_spi_cmd_wr_en(spi);
753 if (ret < 0)
754 goto err_wr_en;
755
756 /*
757 * In case spi_init is called after component deinit,
758 * it is possible hardware register state is also reset.
759 * Sync the regcache here so hardware state is updated
760 * to reflect the cache.
761 */
762 regcache_sync(wcd_spi->regmap);
763
764 regmap_write(wcd_spi->regmap, WCD_SPI_SLAVE_CONFIG,
765 0x0F3D0800);
766
767 /* Write the MTU to max allowed size */
768 regmap_update_bits(wcd_spi->regmap,
769 WCD_SPI_SLAVE_TRNS_LEN,
770 0xFFFF0000, 0xFFFF0000);
771err_wr_en:
772 wcd_spi_clk_ctrl(spi, WCD_SPI_CLK_DISABLE,
773 WCD_SPI_CLK_FLAG_IMMEDIATE);
774done:
775 return ret;
776}
777
778static void wcd_spi_clk_work(struct work_struct *work)
779{
780 struct delayed_work *dwork;
781 struct wcd_spi_priv *wcd_spi;
782 struct spi_device *spi;
783 int ret;
784
785 dwork = to_delayed_work(work);
786 wcd_spi = container_of(dwork, struct wcd_spi_priv, clk_dwork);
787 spi = wcd_spi->spi;
788
789 WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
790 ret = wcd_spi_clk_disable(spi);
791 if (ret < 0)
792 dev_err(&spi->dev,
793 "%s: Failed to disable clk, err = %d\n",
794 __func__, ret);
795 WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
796}
797
798static int __wcd_spi_data_xfer(struct spi_device *spi,
799 struct wcd_spi_msg *msg,
800 enum xfer_request xfer_req)
801{
802 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
803 int ret;
804
805 /* Check for minimum alignment requirements */
806 if (!IS_ALIGNED(msg->remote_addr, WCD_SPI_RW_MIN_ALIGN)) {
807 dev_err(&spi->dev,
808 "%s addr 0x%x is not aligned to 0x%x\n",
809 __func__, msg->remote_addr, WCD_SPI_RW_MIN_ALIGN);
810 return -EINVAL;
811 } else if (msg->len % WCD_SPI_WORD_BYTE_CNT) {
812 dev_err(&spi->dev,
813 "%s len 0x%zx is not multiple of %d\n",
814 __func__, msg->len, WCD_SPI_WORD_BYTE_CNT);
815 return -EINVAL;
816 }
817
Aditya Bavanarid7ee50c2019-10-10 12:35:11 +0530818 WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
819 if (wcd_spi_is_suspended(wcd_spi)) {
820 dev_dbg(&spi->dev,
821 "%s: SPI suspended, cannot perform transfer\n",
822 __func__);
823 ret = -EIO;
824 goto done;
825 }
826
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530827 WCD_SPI_MUTEX_LOCK(spi, wcd_spi->xfer_mutex);
828 if (msg->len == WCD_SPI_WORD_BYTE_CNT) {
829 if (xfer_req == WCD_SPI_XFER_WRITE)
830 ret = wcd_spi_write_single(spi, msg->remote_addr,
831 (*((u32 *)msg->data)));
832 else
833 ret = wcd_spi_read_single(spi, msg->remote_addr,
834 (u32 *) msg->data);
835 } else {
836 ret = wcd_spi_transfer_split(spi, msg, xfer_req);
837 }
838 WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->xfer_mutex);
Aditya Bavanarid7ee50c2019-10-10 12:35:11 +0530839done:
840 WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530841 return ret;
842}
843
844static int wcd_spi_data_xfer(struct spi_device *spi,
845 struct wcd_spi_msg *msg,
846 enum xfer_request req)
847{
848 int ret, ret1;
849
850 if (msg->len <= 0) {
851 dev_err(&spi->dev, "%s: Invalid size %zd\n",
852 __func__, msg->len);
853 return -EINVAL;
854 }
855
856 /* Request for clock */
857 ret = wcd_spi_clk_ctrl(spi, WCD_SPI_CLK_ENABLE,
858 WCD_SPI_CLK_FLAG_IMMEDIATE);
859 if (ret < 0) {
860 dev_err(&spi->dev, "%s: clk enable failed %d\n",
861 __func__, ret);
862 goto done;
863 }
864
865 /* Perform the transaction */
866 ret = __wcd_spi_data_xfer(spi, msg, req);
867 if (ret < 0)
868 dev_err(&spi->dev,
869 "%s: Failed %s, addr = 0x%x, size = 0x%zx, err = %d\n",
870 __func__, wcd_spi_xfer_req_str(req),
871 msg->remote_addr, msg->len, ret);
872
873 /* Release the clock even if xfer failed */
874 ret1 = wcd_spi_clk_ctrl(spi, WCD_SPI_CLK_DISABLE,
875 WCD_SPI_CLK_FLAG_DELAYED);
876 if (ret1 < 0)
877 dev_err(&spi->dev, "%s: clk disable failed %d\n",
878 __func__, ret1);
879done:
880 return ret;
881}
882
883/*
884 * wcd_spi_data_write: Write data to WCD SPI
885 * @spi: spi_device struct
886 * @msg: msg that needs to be written to WCD
887 *
888 * This API writes length of data to address specified. These details
889 * about the write are encapsulated in @msg. Write size should be multiple
890 * of 4 bytes and write address should be 4-byte aligned.
891 */
892static int wcd_spi_data_write(struct spi_device *spi,
893 struct wcd_spi_msg *msg)
894{
895 if (!spi || !msg) {
896 pr_err("%s: Invalid %s\n", __func__,
897 (!spi) ? "spi device" : "msg");
898 return -EINVAL;
899 }
900
901 dev_dbg_ratelimited(&spi->dev, "%s: addr = 0x%x, len = %zu\n",
902 __func__, msg->remote_addr, msg->len);
903 return wcd_spi_data_xfer(spi, msg, WCD_SPI_XFER_WRITE);
904}
905
906/*
907 * wcd_spi_data_read: Read data from WCD SPI
908 * @spi: spi_device struct
909 * @msg: msg that needs to be read from WCD
910 *
911 * This API reads length of data from address specified. These details
912 * about the read are encapsulated in @msg. Read size should be multiple
913 * of 4 bytes and read address should be 4-byte aligned.
914 */
915static int wcd_spi_data_read(struct spi_device *spi,
916 struct wcd_spi_msg *msg)
917{
918 if (!spi || !msg) {
919 pr_err("%s: Invalid %s\n", __func__,
920 (!spi) ? "spi device" : "msg");
921 return -EINVAL;
922 }
923
924 dev_dbg_ratelimited(&spi->dev, "%s: addr = 0x%x,len = %zu\n",
925 __func__, msg->remote_addr, msg->len);
926 return wcd_spi_data_xfer(spi, msg, WCD_SPI_XFER_READ);
927}
928
929static int wdsp_spi_dload_section(struct spi_device *spi,
930 void *data)
931{
932 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
933 struct wdsp_img_section *sec = data;
934 struct wcd_spi_msg msg;
935 int ret;
936
937 dev_dbg(&spi->dev, "%s: addr = 0x%x, size = 0x%zx\n",
938 __func__, sec->addr, sec->size);
939
940 msg.remote_addr = sec->addr + wcd_spi->mem_base_addr;
941 msg.data = sec->data;
942 msg.len = sec->size;
943
944 ret = __wcd_spi_data_xfer(spi, &msg, WCD_SPI_XFER_WRITE);
945 if (ret < 0)
946 dev_err(&spi->dev, "%s: fail addr (0x%x) size (0x%zx)\n",
947 __func__, msg.remote_addr, msg.len);
948 return ret;
949}
950
951static int wdsp_spi_read_section(struct spi_device *spi, void *data)
952{
953 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
954 struct wdsp_img_section *sec = data;
955 struct wcd_spi_msg msg;
956 int ret;
957
958 msg.remote_addr = sec->addr + wcd_spi->mem_base_addr;
959 msg.data = sec->data;
960 msg.len = sec->size;
961
962 dev_dbg(&spi->dev, "%s: addr = 0x%x, size = 0x%zx\n",
963 __func__, msg.remote_addr, msg.len);
964
965 ret = wcd_spi_data_xfer(spi, &msg, WCD_SPI_XFER_READ);
966 if (ret < 0)
967 dev_err(&spi->dev, "%s: fail addr (0x%x) size (0x%zx)\n",
968 __func__, msg.remote_addr, msg.len);
969 return ret;
970}
971
972static int wdsp_spi_event_handler(struct device *dev, void *priv_data,
973 enum wdsp_event_type event,
974 void *data)
975{
976 struct spi_device *spi = to_spi_device(dev);
977 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
978 struct wcd_spi_ops *spi_ops;
979 int ret = 0;
980
981 dev_dbg(&spi->dev, "%s: event type %d\n",
982 __func__, event);
983
984 switch (event) {
Bhalchandra Gajarec5f43a12018-03-19 14:35:42 -0700985 case WDSP_EVENT_PRE_SHUTDOWN:
986 if (wcd_spi->ac_dev) {
987 ret = wcd_spi_access_ctl(wcd_spi->ac_dev,
988 WCD_SPI_ACCESS_REQUEST,
989 WCD_SPI_AC_REMOTE_DOWN);
990 if (ret)
991 dev_err(&spi->dev,
992 "%s: request access failed %d\n",
993 __func__, ret);
994 }
995 break;
996
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +0530997 case WDSP_EVENT_POST_SHUTDOWN:
998 cancel_delayed_work_sync(&wcd_spi->clk_dwork);
999 WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
1000 if (test_bit(WCD_SPI_CLK_STATE_ENABLED, &wcd_spi->status_mask))
1001 wcd_spi_clk_disable(spi);
1002 wcd_spi->clk_users = 0;
1003 WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
1004 break;
1005
Bhalchandra Gajarec5f43a12018-03-19 14:35:42 -07001006 case WDSP_EVENT_POST_BOOTUP:
1007 if (wcd_spi->ac_dev) {
1008 ret = wcd_spi_access_ctl(wcd_spi->ac_dev,
1009 WCD_SPI_ACCESS_RELEASE,
1010 WCD_SPI_AC_REMOTE_DOWN);
1011 if (ret)
1012 dev_err(&spi->dev,
1013 "%s: release access failed %d\n",
1014 __func__, ret);
1015 }
1016 break;
1017
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301018 case WDSP_EVENT_PRE_DLOAD_CODE:
1019 case WDSP_EVENT_PRE_DLOAD_DATA:
1020 ret = wcd_spi_clk_ctrl(spi, WCD_SPI_CLK_ENABLE,
1021 WCD_SPI_CLK_FLAG_IMMEDIATE);
1022 if (ret < 0)
1023 dev_err(&spi->dev, "%s: clk_req failed %d\n",
1024 __func__, ret);
1025 break;
1026
1027 case WDSP_EVENT_POST_DLOAD_CODE:
1028 case WDSP_EVENT_POST_DLOAD_DATA:
1029 case WDSP_EVENT_DLOAD_FAILED:
1030
1031 ret = wcd_spi_clk_ctrl(spi, WCD_SPI_CLK_DISABLE,
1032 WCD_SPI_CLK_FLAG_IMMEDIATE);
1033 if (ret < 0)
1034 dev_err(&spi->dev, "%s: clk unvote failed %d\n",
1035 __func__, ret);
1036 break;
1037
1038 case WDSP_EVENT_DLOAD_SECTION:
1039 ret = wdsp_spi_dload_section(spi, data);
1040 break;
1041
1042 case WDSP_EVENT_READ_SECTION:
1043 ret = wdsp_spi_read_section(spi, data);
1044 break;
1045
1046 case WDSP_EVENT_SUSPEND:
1047 WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
1048 if (!wcd_spi_can_suspend(wcd_spi))
1049 ret = -EBUSY;
1050 WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
1051 break;
1052
1053 case WDSP_EVENT_RESUME:
1054 ret = wcd_spi_wait_for_resume(wcd_spi);
1055 break;
1056
1057 case WDSP_EVENT_GET_DEVOPS:
1058 if (!data) {
1059 dev_err(&spi->dev, "%s: invalid data\n",
1060 __func__);
1061 ret = -EINVAL;
1062 break;
1063 }
1064
1065 spi_ops = (struct wcd_spi_ops *) data;
1066 spi_ops->spi_dev = spi;
1067 spi_ops->read_dev = wcd_spi_data_read;
1068 spi_ops->write_dev = wcd_spi_data_write;
1069 break;
1070
1071 default:
1072 dev_dbg(&spi->dev, "%s: Unhandled event %d\n",
1073 __func__, event);
1074 break;
1075 }
1076
1077 return ret;
1078}
1079
1080static int wcd_spi_bus_gwrite(void *context, const void *reg,
1081 size_t reg_len, const void *val,
1082 size_t val_len)
1083{
1084 struct device *dev = context;
1085 struct spi_device *spi = to_spi_device(dev);
1086 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
Xiaoyu Ye19a5e412018-04-16 18:44:38 -07001087 u8 *tx_buf = wcd_spi->tx_buf;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301088
1089 if (!reg || !val || reg_len != wcd_spi->reg_bytes ||
1090 val_len != wcd_spi->val_bytes) {
1091 dev_err(&spi->dev,
1092 "%s: Invalid input, reg_len = %zd, val_len = %zd",
1093 __func__, reg_len, val_len);
1094 return -EINVAL;
1095 }
1096
Xiaoyu Ye19a5e412018-04-16 18:44:38 -07001097 memset(tx_buf, 0, WCD_SPI_CMD_IRW_LEN);
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301098 tx_buf[0] = WCD_SPI_CMD_IRW;
1099 tx_buf[1] = *((u8 *)reg);
Xiaoyu Ye19a5e412018-04-16 18:44:38 -07001100 memcpy(tx_buf + WCD_SPI_OPCODE_LEN + reg_len,
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301101 val, val_len);
1102
1103 return spi_write(spi, tx_buf, WCD_SPI_CMD_IRW_LEN);
1104}
1105
1106static int wcd_spi_bus_write(void *context, const void *data,
1107 size_t count)
1108{
1109 struct device *dev = context;
1110 struct spi_device *spi = to_spi_device(dev);
1111 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
1112
1113 if (count < (wcd_spi->reg_bytes + wcd_spi->val_bytes)) {
1114 dev_err(&spi->dev, "%s: Invalid size %zd\n",
1115 __func__, count);
1116 WARN_ON(1);
1117 return -EINVAL;
1118 }
1119
1120 return wcd_spi_bus_gwrite(context, data, wcd_spi->reg_bytes,
1121 data + wcd_spi->reg_bytes,
1122 count - wcd_spi->reg_bytes);
1123}
1124
1125static int wcd_spi_bus_read(void *context, const void *reg,
1126 size_t reg_len, void *val,
1127 size_t val_len)
1128{
1129 struct device *dev = context;
1130 struct spi_device *spi = to_spi_device(dev);
1131 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
1132 struct spi_transfer *tx_xfer = &wcd_spi->xfer2[0];
1133 struct spi_transfer *rx_xfer = &wcd_spi->xfer2[1];
Xiaoyu Ye19a5e412018-04-16 18:44:38 -07001134 u8 *tx_buf = wcd_spi->tx_buf;
1135 u8 *rx_buf = wcd_spi->rx_buf;
1136 int ret = 0;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301137
1138 if (!reg || !val || reg_len != wcd_spi->reg_bytes ||
1139 val_len != wcd_spi->val_bytes) {
1140 dev_err(&spi->dev,
1141 "%s: Invalid input, reg_len = %zd, val_len = %zd",
1142 __func__, reg_len, val_len);
1143 return -EINVAL;
1144 }
1145
Xiaoyu Ye19a5e412018-04-16 18:44:38 -07001146 memset(tx_buf, 0, WCD_SPI_CMD_IRR_LEN);
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301147 tx_buf[0] = WCD_SPI_CMD_IRR;
1148 tx_buf[1] = *((u8 *)reg);
1149
1150 wcd_spi_reinit_xfer(tx_xfer);
1151 tx_xfer->tx_buf = tx_buf;
1152 tx_xfer->rx_buf = NULL;
1153 tx_xfer->len = WCD_SPI_CMD_IRR_LEN;
1154
1155 wcd_spi_reinit_xfer(rx_xfer);
1156 rx_xfer->tx_buf = NULL;
Xiaoyu Ye19a5e412018-04-16 18:44:38 -07001157 rx_xfer->rx_buf = rx_buf;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301158 rx_xfer->len = val_len;
1159
Xiaoyu Ye19a5e412018-04-16 18:44:38 -07001160 ret = spi_sync(spi, &wcd_spi->msg2);
1161 if (ret) {
1162 dev_err(&spi->dev, "%s: spi_sync failed, err %d\n",
1163 __func__, ret);
1164 goto done;
1165 }
1166
1167 memcpy(val, rx_buf, val_len);
1168
1169done:
1170 return ret;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301171}
1172
1173static struct regmap_bus wcd_spi_regmap_bus = {
1174 .write = wcd_spi_bus_write,
1175 .gather_write = wcd_spi_bus_gwrite,
1176 .read = wcd_spi_bus_read,
1177 .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
1178 .val_format_endian_default = REGMAP_ENDIAN_BIG,
1179};
1180
1181static int wcd_spi_state_show(struct seq_file *f, void *ptr)
1182{
1183 struct spi_device *spi = f->private;
1184 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
1185 const char *clk_state, *clk_mutex, *xfer_mutex;
1186
1187 if (test_bit(WCD_SPI_CLK_STATE_ENABLED, &wcd_spi->status_mask))
1188 clk_state = "enabled";
1189 else
1190 clk_state = "disabled";
1191
1192 clk_mutex = mutex_is_locked(&wcd_spi->clk_mutex) ?
1193 "locked" : "unlocked";
1194
1195 xfer_mutex = mutex_is_locked(&wcd_spi->xfer_mutex) ?
1196 "locked" : "unlocked";
1197
1198 seq_printf(f, "clk_state = %s\nclk_users = %d\n"
1199 "clk_mutex = %s\nxfer_mutex = %s\n",
1200 clk_state, wcd_spi->clk_users, clk_mutex,
1201 xfer_mutex);
1202 return 0;
1203}
1204
1205static int wcd_spi_state_open(struct inode *inode, struct file *file)
1206{
1207 return single_open(file, wcd_spi_state_show, inode->i_private);
1208}
1209
1210static const struct file_operations state_fops = {
1211 .open = wcd_spi_state_open,
1212 .read = seq_read,
1213 .llseek = seq_lseek,
1214 .release = single_release,
1215};
1216
1217static ssize_t wcd_spi_debugfs_mem_read(struct file *file, char __user *ubuf,
1218 size_t count, loff_t *ppos)
1219{
1220 struct spi_device *spi = file->private_data;
1221 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
1222 struct wcd_spi_debug_data *dbg_data = &wcd_spi->debug_data;
1223 struct wcd_spi_msg msg;
1224 ssize_t buf_size, read_count = 0;
1225 char *buf;
1226 int ret;
1227
1228 if (*ppos < 0 || !count)
1229 return -EINVAL;
1230
1231 if (dbg_data->size == 0 || dbg_data->addr == 0) {
1232 dev_err(&spi->dev,
1233 "%s: Invalid request, size = %u, addr = 0x%x\n",
1234 __func__, dbg_data->size, dbg_data->addr);
1235 return 0;
1236 }
1237
1238 buf_size = count < dbg_data->size ? count : dbg_data->size;
1239 buf = kzalloc(buf_size, GFP_KERNEL);
1240 if (!buf)
1241 return -ENOMEM;
1242
1243 msg.data = buf;
1244 msg.remote_addr = dbg_data->addr;
1245 msg.len = buf_size;
1246 msg.flags = 0;
1247
1248 ret = wcd_spi_data_read(spi, &msg);
1249 if (ret < 0) {
1250 dev_err(&spi->dev,
1251 "%s: Failed to read %zu bytes from addr 0x%x\n",
1252 __func__, buf_size, msg.remote_addr);
1253 goto done;
1254 }
1255
1256 read_count = simple_read_from_buffer(ubuf, count, ppos, buf, buf_size);
1257
1258done:
1259 kfree(buf);
1260 if (ret < 0)
1261 return ret;
1262 else
1263 return read_count;
1264}
1265
1266static const struct file_operations mem_read_fops = {
1267 .open = simple_open,
1268 .read = wcd_spi_debugfs_mem_read,
1269};
1270
1271static int wcd_spi_debugfs_init(struct spi_device *spi)
1272{
1273 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
1274 struct wcd_spi_debug_data *dbg_data = &wcd_spi->debug_data;
1275 int rc = 0;
1276
1277 dbg_data->dir = debugfs_create_dir("wcd_spi", NULL);
1278 if (IS_ERR_OR_NULL(dbg_data->dir)) {
1279 dbg_data->dir = NULL;
1280 rc = -ENODEV;
1281 goto done;
1282 }
1283
1284 debugfs_create_file("state", 0444, dbg_data->dir, spi, &state_fops);
1285 debugfs_create_u32("addr", 0644, dbg_data->dir,
1286 &dbg_data->addr);
1287 debugfs_create_u32("size", 0644, dbg_data->dir,
1288 &dbg_data->size);
1289
1290 debugfs_create_file("mem_read", 0444, dbg_data->dir,
1291 spi, &mem_read_fops);
1292done:
1293 return rc;
1294}
1295
1296
1297static const struct reg_default wcd_spi_defaults[] = {
1298 {WCD_SPI_SLAVE_SANITY, 0xDEADBEEF},
1299 {WCD_SPI_SLAVE_DEVICE_ID, 0x00500000},
1300 {WCD_SPI_SLAVE_STATUS, 0x80100000},
1301 {WCD_SPI_SLAVE_CONFIG, 0x0F200808},
1302 {WCD_SPI_SLAVE_SW_RESET, 0x00000000},
1303 {WCD_SPI_SLAVE_IRQ_STATUS, 0x00000000},
1304 {WCD_SPI_SLAVE_IRQ_EN, 0x00000000},
1305 {WCD_SPI_SLAVE_IRQ_CLR, 0x00000000},
1306 {WCD_SPI_SLAVE_IRQ_FORCE, 0x00000000},
1307 {WCD_SPI_SLAVE_TX, 0x00000000},
1308 {WCD_SPI_SLAVE_TEST_BUS_DATA, 0x00000000},
1309 {WCD_SPI_SLAVE_TEST_BUS_CTRL, 0x00000000},
1310 {WCD_SPI_SLAVE_SW_RST_IRQ, 0x00000000},
1311 {WCD_SPI_SLAVE_CHAR_CFG, 0x00000000},
1312 {WCD_SPI_SLAVE_CHAR_DATA_MOSI, 0x00000000},
1313 {WCD_SPI_SLAVE_CHAR_DATA_CS_N, 0x00000000},
1314 {WCD_SPI_SLAVE_CHAR_DATA_MISO, 0x00000000},
1315 {WCD_SPI_SLAVE_TRNS_BYTE_CNT, 0x00000000},
1316 {WCD_SPI_SLAVE_TRNS_LEN, 0x00000000},
1317 {WCD_SPI_SLAVE_FIFO_LEVEL, 0x00000000},
1318 {WCD_SPI_SLAVE_GENERICS, 0x80000000},
1319 {WCD_SPI_SLAVE_EXT_BASE_ADDR, 0x00000000},
1320};
1321
1322static bool wcd_spi_is_volatile_reg(struct device *dev,
1323 unsigned int reg)
1324{
1325 switch (reg) {
1326 case WCD_SPI_SLAVE_SANITY:
1327 case WCD_SPI_SLAVE_STATUS:
1328 case WCD_SPI_SLAVE_IRQ_STATUS:
1329 case WCD_SPI_SLAVE_TX:
1330 case WCD_SPI_SLAVE_SW_RST_IRQ:
1331 case WCD_SPI_SLAVE_TRNS_BYTE_CNT:
1332 case WCD_SPI_SLAVE_FIFO_LEVEL:
1333 case WCD_SPI_SLAVE_GENERICS:
1334 return true;
1335 }
1336
1337 return false;
1338}
1339
1340static bool wcd_spi_is_readable_reg(struct device *dev,
1341 unsigned int reg)
1342{
1343 switch (reg) {
1344 case WCD_SPI_SLAVE_SW_RESET:
1345 case WCD_SPI_SLAVE_IRQ_CLR:
1346 case WCD_SPI_SLAVE_IRQ_FORCE:
1347 return false;
1348 }
1349
1350 return true;
1351}
1352
1353static struct regmap_config wcd_spi_regmap_cfg = {
1354 .reg_bits = 8,
1355 .val_bits = 32,
1356 .cache_type = REGCACHE_RBTREE,
1357 .reg_defaults = wcd_spi_defaults,
1358 .num_reg_defaults = ARRAY_SIZE(wcd_spi_defaults),
1359 .max_register = WCD_SPI_MAX_REGISTER,
1360 .volatile_reg = wcd_spi_is_volatile_reg,
1361 .readable_reg = wcd_spi_is_readable_reg,
1362};
1363
Bhalchandra Gajarec5f43a12018-03-19 14:35:42 -07001364static int wcd_spi_add_ac_dev(struct device *dev,
1365 struct device_node *node)
1366{
1367 struct spi_device *spi = to_spi_device(dev);
1368 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
1369 struct platform_device *pdev;
1370 int ret = 0;
1371
1372 pdev = platform_device_alloc("wcd-spi-ac", -1);
1373 if (IS_ERR_OR_NULL(pdev)) {
1374 ret = PTR_ERR(pdev);
1375 dev_err(dev, "%s: pdev alloc failed, ret = %d\n",
1376 __func__, ret);
1377 return ret;
1378 }
1379
1380 pdev->dev.parent = dev;
1381 pdev->dev.of_node = node;
1382
1383 ret = platform_device_add(pdev);
1384 if (ret) {
1385 dev_err(dev, "%s: pdev add failed, ret = %d\n",
1386 __func__, ret);
1387 goto dealloc_pdev;
1388 }
1389
1390 wcd_spi->ac_dev = &pdev->dev;
1391 return 0;
1392
1393dealloc_pdev:
1394 platform_device_put(pdev);
1395 return ret;
1396}
1397
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301398static int wdsp_spi_init(struct device *dev, void *priv_data)
1399{
1400 struct spi_device *spi = to_spi_device(dev);
1401 int ret;
Bhalchandra Gajarec5f43a12018-03-19 14:35:42 -07001402 struct device_node *node;
1403
1404 for_each_child_of_node(dev->of_node, node) {
1405 if (!strcmp(node->name, "wcd_spi_ac"))
1406 wcd_spi_add_ac_dev(dev, node);
1407 }
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301408
1409 ret = wcd_spi_init(spi);
1410 if (ret < 0)
1411 dev_err(&spi->dev, "%s: Init failed, err = %d\n",
1412 __func__, ret);
1413 return ret;
1414}
1415
1416static int wdsp_spi_deinit(struct device *dev, void *priv_data)
1417{
1418 struct spi_device *spi = to_spi_device(dev);
1419 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
1420
1421 /*
1422 * Deinit means the hardware is reset. Mark the cache
1423 * as dirty here, so init will sync the cache
1424 */
1425 regcache_mark_dirty(wcd_spi->regmap);
1426
1427 return 0;
1428}
1429
1430static struct wdsp_cmpnt_ops wdsp_spi_ops = {
1431 .init = wdsp_spi_init,
1432 .deinit = wdsp_spi_deinit,
1433 .event_handler = wdsp_spi_event_handler,
1434};
1435
1436static int wcd_spi_component_bind(struct device *dev,
1437 struct device *master,
1438 void *data)
1439{
1440 struct spi_device *spi = to_spi_device(dev);
1441 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
1442 int ret = 0;
1443
1444 wcd_spi->m_dev = master;
1445 wcd_spi->m_ops = data;
1446
1447 if (wcd_spi->m_ops &&
1448 wcd_spi->m_ops->register_cmpnt_ops)
1449 ret = wcd_spi->m_ops->register_cmpnt_ops(master, dev,
1450 wcd_spi,
1451 &wdsp_spi_ops);
1452 if (ret) {
1453 dev_err(dev, "%s: register_cmpnt_ops failed, err = %d\n",
1454 __func__, ret);
1455 goto done;
1456 }
1457
1458 wcd_spi->reg_bytes = DIV_ROUND_UP(wcd_spi_regmap_cfg.reg_bits, 8);
1459 wcd_spi->val_bytes = DIV_ROUND_UP(wcd_spi_regmap_cfg.val_bits, 8);
1460
1461 wcd_spi->regmap = devm_regmap_init(&spi->dev, &wcd_spi_regmap_bus,
1462 &spi->dev, &wcd_spi_regmap_cfg);
1463 if (IS_ERR(wcd_spi->regmap)) {
1464 ret = PTR_ERR(wcd_spi->regmap);
1465 dev_err(&spi->dev, "%s: Failed to allocate regmap, err = %d\n",
1466 __func__, ret);
1467 goto done;
1468 }
1469
1470 if (wcd_spi_debugfs_init(spi))
1471 dev_err(&spi->dev, "%s: Failed debugfs init\n", __func__);
1472
1473 spi_message_init(&wcd_spi->msg1);
1474 spi_message_add_tail(&wcd_spi->xfer1, &wcd_spi->msg1);
1475
1476 spi_message_init(&wcd_spi->msg2);
1477 spi_message_add_tail(&wcd_spi->xfer2[0], &wcd_spi->msg2);
1478 spi_message_add_tail(&wcd_spi->xfer2[1], &wcd_spi->msg2);
1479
1480 /* Pre-allocate the buffers */
Xiaoyu Ye89cc8892018-05-29 17:03:55 -07001481 wcd_spi->tx_buf = dma_zalloc_coherent(&spi->dev,
1482 WCD_SPI_RW_MAX_BUF_SIZE,
1483 &wcd_spi->tx_dma, GFP_KERNEL);
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301484 if (!wcd_spi->tx_buf) {
1485 ret = -ENOMEM;
1486 goto done;
1487 }
1488
Xiaoyu Ye89cc8892018-05-29 17:03:55 -07001489 wcd_spi->rx_buf = dma_zalloc_coherent(&spi->dev,
1490 WCD_SPI_RW_MAX_BUF_SIZE,
1491 &wcd_spi->rx_dma, GFP_KERNEL);
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301492 if (!wcd_spi->rx_buf) {
Xiaoyu Ye89cc8892018-05-29 17:03:55 -07001493 dma_free_coherent(&spi->dev, WCD_SPI_RW_MAX_BUF_SIZE,
1494 wcd_spi->tx_buf, wcd_spi->tx_dma);
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301495 wcd_spi->tx_buf = NULL;
1496 ret = -ENOMEM;
1497 goto done;
1498 }
1499done:
1500 return ret;
1501}
1502
1503static void wcd_spi_component_unbind(struct device *dev,
1504 struct device *master,
1505 void *data)
1506{
1507 struct spi_device *spi = to_spi_device(dev);
1508 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
Laxminath Kasam8f7ccc22017-08-28 17:35:04 +05301509 struct wcd_spi_debug_data *dbg_data = &wcd_spi->debug_data;
1510
1511 debugfs_remove_recursive(dbg_data->dir);
1512 dbg_data->dir = NULL;
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301513
1514 wcd_spi->m_dev = NULL;
1515 wcd_spi->m_ops = NULL;
1516
1517 spi_transfer_del(&wcd_spi->xfer1);
1518 spi_transfer_del(&wcd_spi->xfer2[0]);
1519 spi_transfer_del(&wcd_spi->xfer2[1]);
1520
Xiaoyu Ye89cc8892018-05-29 17:03:55 -07001521 dma_free_coherent(&spi->dev, WCD_SPI_RW_MAX_BUF_SIZE,
1522 wcd_spi->tx_buf, wcd_spi->tx_dma);
1523 dma_free_coherent(&spi->dev, WCD_SPI_RW_MAX_BUF_SIZE,
1524 wcd_spi->rx_buf, wcd_spi->rx_dma);
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301525 wcd_spi->tx_buf = NULL;
1526 wcd_spi->rx_buf = NULL;
1527}
1528
1529static const struct component_ops wcd_spi_component_ops = {
1530 .bind = wcd_spi_component_bind,
1531 .unbind = wcd_spi_component_unbind,
1532};
1533
1534static int wcd_spi_probe(struct spi_device *spi)
1535{
1536 struct wcd_spi_priv *wcd_spi;
1537 int ret = 0;
1538
1539 wcd_spi = devm_kzalloc(&spi->dev, sizeof(*wcd_spi),
1540 GFP_KERNEL);
1541 if (!wcd_spi)
1542 return -ENOMEM;
1543
1544 ret = of_property_read_u32(spi->dev.of_node,
1545 "qcom,mem-base-addr",
1546 &wcd_spi->mem_base_addr);
1547 if (ret < 0) {
1548 dev_err(&spi->dev, "%s: Missing %s DT entry",
1549 __func__, "qcom,mem-base-addr");
1550 goto err_ret;
1551 }
1552
1553 dev_dbg(&spi->dev,
1554 "%s: mem_base_addr 0x%x\n", __func__, wcd_spi->mem_base_addr);
1555
1556 mutex_init(&wcd_spi->clk_mutex);
1557 mutex_init(&wcd_spi->xfer_mutex);
1558 INIT_DELAYED_WORK(&wcd_spi->clk_dwork, wcd_spi_clk_work);
1559 init_completion(&wcd_spi->resume_comp);
Xiaoyu Ye89cc8892018-05-29 17:03:55 -07001560 arch_setup_dma_ops(&spi->dev, 0, 0, NULL, true);
Asish Bhattacharya8e2277f2017-07-20 18:31:55 +05301561
1562 wcd_spi->spi = spi;
1563 spi_set_drvdata(spi, wcd_spi);
1564
1565 ret = component_add(&spi->dev, &wcd_spi_component_ops);
1566 if (ret) {
1567 dev_err(&spi->dev, "%s: component_add failed err = %d\n",
1568 __func__, ret);
1569 goto err_component_add;
1570 }
1571
1572 return ret;
1573
1574err_component_add:
1575 mutex_destroy(&wcd_spi->clk_mutex);
1576 mutex_destroy(&wcd_spi->xfer_mutex);
1577err_ret:
1578 devm_kfree(&spi->dev, wcd_spi);
1579 spi_set_drvdata(spi, NULL);
1580 return ret;
1581}
1582
1583static int wcd_spi_remove(struct spi_device *spi)
1584{
1585 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
1586
1587 component_del(&spi->dev, &wcd_spi_component_ops);
1588
1589 mutex_destroy(&wcd_spi->clk_mutex);
1590 mutex_destroy(&wcd_spi->xfer_mutex);
1591
1592 devm_kfree(&spi->dev, wcd_spi);
1593 spi_set_drvdata(spi, NULL);
1594
1595 return 0;
1596}
1597
1598#ifdef CONFIG_PM
1599static int wcd_spi_suspend(struct device *dev)
1600{
1601 struct spi_device *spi = to_spi_device(dev);
1602 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
1603 int rc = 0;
1604
1605 WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
1606 if (!wcd_spi_can_suspend(wcd_spi)) {
1607 rc = -EBUSY;
1608 goto done;
1609 }
1610
1611 /*
1612 * If we are here, it is okay to let the suspend go
1613 * through for this driver. But, still need to notify
1614 * the master to make sure all other components can suspend
1615 * as well.
1616 */
1617 if (wcd_spi->m_dev && wcd_spi->m_ops &&
1618 wcd_spi->m_ops->suspend) {
1619 WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
1620 rc = wcd_spi->m_ops->suspend(wcd_spi->m_dev);
1621 WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
1622 }
1623
1624 if (rc == 0)
1625 set_bit(WCD_SPI_IS_SUSPENDED, &wcd_spi->status_mask);
1626 else
1627 dev_dbg(&spi->dev, "%s: cannot suspend, err = %d\n",
1628 __func__, rc);
1629done:
1630 WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
1631 return rc;
1632}
1633
1634static int wcd_spi_resume(struct device *dev)
1635{
1636 struct spi_device *spi = to_spi_device(dev);
1637 struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
1638
1639 WCD_SPI_MUTEX_LOCK(spi, wcd_spi->clk_mutex);
1640 clear_bit(WCD_SPI_IS_SUSPENDED, &wcd_spi->status_mask);
1641 complete(&wcd_spi->resume_comp);
1642 WCD_SPI_MUTEX_UNLOCK(spi, wcd_spi->clk_mutex);
1643
1644 return 0;
1645}
1646
1647static const struct dev_pm_ops wcd_spi_pm_ops = {
1648 .suspend = wcd_spi_suspend,
1649 .resume = wcd_spi_resume,
1650};
1651#endif
1652
1653static const struct of_device_id wcd_spi_of_match[] = {
1654 { .compatible = "qcom,wcd-spi-v2", },
1655 { }
1656};
1657MODULE_DEVICE_TABLE(of, wcd_spi_of_match);
1658
1659static struct spi_driver wcd_spi_driver = {
1660 .driver = {
1661 .name = "wcd-spi-v2",
1662 .of_match_table = wcd_spi_of_match,
1663#ifdef CONFIG_PM
1664 .pm = &wcd_spi_pm_ops,
1665#endif
1666 },
1667 .probe = wcd_spi_probe,
1668 .remove = wcd_spi_remove,
1669};
1670
1671module_spi_driver(wcd_spi_driver);
1672
1673MODULE_DESCRIPTION("WCD SPI driver");
1674MODULE_LICENSE("GPL v2");