blob: 2a822b2b1b37528772fe9b162880f12503a6275d [file] [log] [blame]
Meng Wangd74f72b2019-05-14 15:33:05 +08001// SPDX-License-Identifier: GPL-2.0-only
Bhalchandra Gajarec77b19f2018-03-09 17:22:33 -08002/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
Bhalchandra Gajarec77b19f2018-03-09 17:22:33 -08003 */
4
5#include <linux/init.h>
6#include <linux/module.h>
7#include <linux/of.h>
8#include <linux/platform_device.h>
9#include <linux/cdev.h>
10#include <linux/proc_fs.h>
11#include <linux/poll.h>
12#include <linux/slab.h>
13#include <linux/notifier.h>
14#include <linux/wcd-spi-ac-params.h>
15#include <soc/wcd-spi-ac.h>
16#include <soc/qcom/msm_qmi_interface.h>
17
18#include "wcd_spi_ctl_v01.h"
19
20#define WCD_SPI_AC_PFS_ENTRY_MAX_LEN 16
21#define WCD_SPI_AC_WRITE_CMD_MIN_SIZE \
22 (sizeof(struct wcd_spi_ac_write_cmd))
23#define WCD_SPI_AC_WRITE_CMD_MAX_SIZE \
24 (WCD_SPI_AC_WRITE_CMD_MIN_SIZE + \
25 (WCD_SPI_AC_MAX_BUFFERS * \
26 sizeof(struct wcd_spi_ac_buf_data)))
27
28#define WCD_SPI_AC_MUTEX_LOCK(dev, lock) \
29{ \
30 dev_dbg(dev, "%s: mutex_lock(%s)\n", \
31 __func__, __stringify_1(lock)); \
32 mutex_lock(&lock); \
33}
34
35#define WCD_SPI_AC_MUTEX_UNLOCK(dev, lock) \
36{ \
37 dev_dbg(dev, "%s: mutex_unlock(%s)\n", \
38 __func__, __stringify_1(lock)); \
39 mutex_unlock(&lock); \
40}
41
42/*
43 * All bits of status should be cleared for SPI access
44 * to be released.
45 */
46#define WCD_SPI_AC_STATUS_RELEASE_ACCESS 0x00
47#define WCD_SPI_AC_LOCAL_ACCESS 0x00
48#define WCD_SPI_AC_REMOTE_ACCESS 0x01
49#define WCD_SPI_CTL_INS_ID 0
50#define WCD_SPI_AC_QMI_TIMEOUT_MS 100
51
52struct wcd_spi_ac_priv {
53
54 /* Pointer to device for this driver */
55 struct device *dev;
56
57 /* Pointer to parent's device */
58 struct device *parent;
59
60 /* char dev related */
61 struct class *cls;
62 struct device *chardev;
63 struct cdev cdev;
64 dev_t cdev_num;
65
66 /* proc entry related */
67 struct proc_dir_entry *pfs_root;
68 struct proc_dir_entry *pfs_status;
69
70 /* service status related */
71 u8 svc_offline;
72 u8 svc_offline_change;
73 wait_queue_head_t svc_poll_wait;
74 struct mutex status_lock;
75
76 /* state maintenence related */
77 u32 state;
78 struct mutex state_lock;
79 u8 current_access;
80
81 /* qmi related */
82 struct qmi_handle *qmi_hdl;
83 struct work_struct svc_arr_work;
84 struct work_struct svc_exit_work;
85 struct notifier_block nb;
86 struct mutex svc_lock;
87 struct workqueue_struct *qmi_wq;
88 struct work_struct recv_msg_work;
89};
90
91
92static void wcd_spi_ac_status_change(struct wcd_spi_ac_priv *ac,
93 u8 online)
94{
95 WCD_SPI_AC_MUTEX_LOCK(ac->dev, ac->status_lock);
96 ac->svc_offline = !online;
97 /* Make sure the write is complete */
98 wmb();
99 xchg(&ac->svc_offline_change, 1);
100 wake_up_interruptible(&ac->svc_poll_wait);
101 dev_dbg(ac->dev,
102 "%s request %u offline %u off_change %u\n",
103 __func__, online, ac->svc_offline,
104 ac->svc_offline_change);
105 WCD_SPI_AC_MUTEX_UNLOCK(ac->dev, ac->status_lock);
106}
107
108static int wcd_spi_ac_status_open(struct inode *inode,
109 struct file *file)
110{
111 struct wcd_spi_ac_priv *ac = PDE_DATA(inode);
112
113 file->private_data = ac;
114
115 return 0;
116}
117
118static ssize_t wcd_spi_ac_status_read(struct file *file,
119 char __user *buffer,
120 size_t count, loff_t *offset)
121{
122 struct wcd_spi_ac_priv *ac;
123 char buf[WCD_SPI_AC_PFS_ENTRY_MAX_LEN];
124 int len, ret;
125 u8 offline;
126
127 ac = (struct wcd_spi_ac_priv *) file->private_data;
128 if (!ac) {
129 pr_err("%s: Invalid private data for status\n",
130 __func__);
131 return -EINVAL;
132 }
133
134 WCD_SPI_AC_MUTEX_LOCK(ac->dev, ac->status_lock);
135 offline = ac->svc_offline;
136 /* Make sure the read is complete */
137 rmb();
138 dev_dbg(ac->dev, "%s: offline = %sline\n",
139 __func__, offline ? "off" : "on");
140 len = snprintf(buf, sizeof(buf), "%s\n",
141 offline ? "OFFLINE" : "ONLINE");
142 ret = simple_read_from_buffer(buffer, count, offset, buf, len);
143 WCD_SPI_AC_MUTEX_UNLOCK(ac->dev, ac->status_lock);
144
145 return ret;
146}
147
148static unsigned int wcd_spi_ac_status_poll(struct file *file,
149 poll_table *wait)
150{
151 struct wcd_spi_ac_priv *ac;
152 unsigned int ret = 0;
153
154 ac = (struct wcd_spi_ac_priv *) file->private_data;
155 if (!ac) {
156 pr_err("%s: Invalid private data for status\n",
157 __func__);
158 return -EINVAL;
159 }
160
161 dev_dbg(ac->dev, "%s: Poll wait, svc = %s\n",
162 __func__, ac->svc_offline ? "offline" : "online");
163 poll_wait(file, &ac->svc_poll_wait, wait);
164 dev_dbg(ac->dev, "%s: Woken up Poll wait, svc = %s\n",
165 __func__, ac->svc_offline ? "offline" : "online");
166
167 WCD_SPI_AC_MUTEX_LOCK(ac->dev, ac->status_lock);
168 if (xchg(&ac->svc_offline_change, 0))
169 ret = POLLIN | POLLPRI | POLLRDNORM;
170 dev_dbg(ac->dev, "%s: ret (%d) from poll_wait\n",
171 __func__, ret);
172 WCD_SPI_AC_MUTEX_UNLOCK(ac->dev, ac->status_lock);
173
174 return ret;
175}
176
177static const struct file_operations wcd_spi_ac_status_ops = {
178 .owner = THIS_MODULE,
179 .open = wcd_spi_ac_status_open,
180 .read = wcd_spi_ac_status_read,
181 .poll = wcd_spi_ac_status_poll,
182};
183
184static int wcd_spi_ac_procfs_init(struct wcd_spi_ac_priv *ac)
185{
186 int ret = 0;
187
188 ac->pfs_root = proc_mkdir(WCD_SPI_AC_PROCFS_DIR_NAME, NULL);
189 if (!ac->pfs_root) {
190 dev_err(ac->dev, "%s: proc_mkdir failed\n", __func__);
191 return -EINVAL;
192 }
193
194 ac->pfs_status = proc_create_data(WCD_SPI_AC_PROCFS_STATE_NAME,
195 0444, ac->pfs_root,
196 &wcd_spi_ac_status_ops,
197 ac);
198 if (!ac->pfs_status) {
199 dev_err(ac->dev, "%s: proc_create_data failed\n",
200 __func__);
201 ret = -EINVAL;
202 goto rmdir_root;
203 }
204
205 proc_set_size(ac->pfs_status, WCD_SPI_AC_PFS_ENTRY_MAX_LEN);
206
207 return 0;
208
209rmdir_root:
210 proc_remove(ac->pfs_root);
211 return ret;
212}
213
214static void wcd_spi_ac_procfs_deinit(struct wcd_spi_ac_priv *ac)
215{
216 proc_remove(ac->pfs_status);
217 proc_remove(ac->pfs_root);
218}
219
220static int wcd_spi_ac_request_access(struct wcd_spi_ac_priv *ac,
221 bool is_svc_locked)
222{
223 struct wcd_spi_req_access_msg_v01 req;
224 struct wcd_spi_req_access_resp_v01 rsp;
225 struct msg_desc req_desc, rsp_desc;
226 int ret = 0;
227
228 dev_dbg(ac->dev, "%s: is_svc_locked = %s\n",
229 __func__, is_svc_locked ? "true" : "false");
230
231 memset(&req, 0, sizeof(req));
232 memset(&rsp, 0, sizeof(rsp));
233
234 req.reason_valid = 1;
235 req.reason = ac->state & 0x03;
236
237 req_desc.max_msg_len = WCD_SPI_REQ_ACCESS_MSG_V01_MAX_MSG_LEN;
238 req_desc.msg_id = WCD_SPI_REQ_ACCESS_MSG_V01;
239 req_desc.ei_array = wcd_spi_req_access_msg_v01_ei;
240
241 rsp_desc.max_msg_len = WCD_SPI_REQ_ACCESS_RESP_V01_MAX_MSG_LEN;
242 rsp_desc.msg_id = WCD_SPI_REQ_ACCESS_RESP_V01;
243 rsp_desc.ei_array = wcd_spi_req_access_resp_v01_ei;
244
245 if (!is_svc_locked)
246 WCD_SPI_AC_MUTEX_LOCK(ac->dev, ac->svc_lock);
247
248 ret = qmi_send_req_wait(ac->qmi_hdl,
249 &req_desc, &req, sizeof(req),
250 &rsp_desc, &rsp, sizeof(rsp),
251 WCD_SPI_AC_QMI_TIMEOUT_MS);
252 if (ret) {
253 dev_err(ac->dev, "%s: msg send failed %d\n",
254 __func__, ret);
255 goto done;
256 }
257
258 if (rsp.resp.result != QMI_RESULT_SUCCESS_V01) {
259 ret = -EIO;
260 dev_err(ac->dev, "%s: qmi resp error %d\n",
261 __func__, rsp.resp.result);
262 }
263done:
264 if (!is_svc_locked)
265 WCD_SPI_AC_MUTEX_UNLOCK(ac->dev, ac->svc_lock);
266
267 return ret;
268}
269
270static int wcd_spi_ac_release_access(struct wcd_spi_ac_priv *ac,
271 bool is_svc_locked)
272{
273 struct wcd_spi_rel_access_msg_v01 req;
274 struct wcd_spi_rel_access_resp_v01 rsp;
275 struct msg_desc req_desc, rsp_desc;
276 int ret = 0;
277
278 dev_dbg(ac->dev, "%s: is_svc_locked = %s\n",
279 __func__, is_svc_locked ? "true" : "false");
280
281 memset(&req, 0, sizeof(req));
282 memset(&rsp, 0, sizeof(rsp));
283
284 req_desc.max_msg_len = WCD_SPI_REL_ACCESS_MSG_V01_MAX_MSG_LEN;
285 req_desc.msg_id = WCD_SPI_REL_ACCESS_MSG_V01;
286 req_desc.ei_array = wcd_spi_rel_access_msg_v01_ei;
287
288 rsp_desc.max_msg_len = WCD_SPI_REL_ACCESS_RESP_V01_MAX_MSG_LEN;
289 rsp_desc.msg_id = WCD_SPI_REL_ACCESS_RESP_V01;
290 rsp_desc.ei_array = wcd_spi_rel_access_resp_v01_ei;
291
292 if (!is_svc_locked)
293 WCD_SPI_AC_MUTEX_LOCK(ac->dev, ac->svc_lock);
294
295 ret = qmi_send_req_wait(ac->qmi_hdl,
296 &req_desc, &req, sizeof(req),
297 &rsp_desc, &rsp, sizeof(rsp),
298 WCD_SPI_AC_QMI_TIMEOUT_MS);
299 if (ret) {
300 dev_err(ac->dev, "%s: msg send failed %d\n",
301 __func__, ret);
302 goto done;
303 }
304
305 if (rsp.resp.result != QMI_RESULT_SUCCESS_V01) {
306 ret = -EIO;
307 dev_err(ac->dev, "%s: qmi resp error %d\n",
308 __func__, rsp.resp.result);
309 }
310done:
311 if (!is_svc_locked)
312 WCD_SPI_AC_MUTEX_UNLOCK(ac->dev, ac->svc_lock);
313 return ret;
314}
315
316static int wcd_spi_ac_buf_msg(
317 struct wcd_spi_ac_priv *ac,
318 u8 *data, int data_sz)
319{
320 struct wcd_spi_ac_buf_data *buf_data;
321 struct wcd_spi_buff_msg_v01 req;
322 struct wcd_spi_buff_resp_v01 rsp;
323 struct msg_desc req_desc, rsp_desc;
324 int ret = 0;
325
326 memset(&req, 0, sizeof(req));
327 memset(&rsp, 0, sizeof(rsp));
328
329 buf_data = (struct wcd_spi_ac_buf_data *) data;
330 memcpy(req.buff_addr_1, buf_data,
331 sizeof(*buf_data));
332
333 if (data_sz - sizeof(*buf_data) != 0) {
334 req.buff_addr_2_valid = 1;
335 buf_data++;
336 memcpy(req.buff_addr_2, buf_data,
337 sizeof(*buf_data));
338 }
339
340 req_desc.max_msg_len = WCD_SPI_BUFF_MSG_V01_MAX_MSG_LEN;
341 req_desc.msg_id = WCD_SPI_BUFF_MSG_V01;
342 req_desc.ei_array = wcd_spi_buff_msg_v01_ei;
343
344 rsp_desc.max_msg_len = WCD_SPI_BUFF_RESP_V01_MAX_MSG_LEN;
345 rsp_desc.msg_id = WCD_SPI_BUFF_RESP_V01;
346 rsp_desc.ei_array = wcd_spi_buff_resp_v01_ei;
347
348 WCD_SPI_AC_MUTEX_LOCK(ac->dev, ac->svc_lock);
349 ret = qmi_send_req_wait(ac->qmi_hdl,
350 &req_desc, &req, sizeof(req),
351 &rsp_desc, &rsp, sizeof(rsp),
352 WCD_SPI_AC_QMI_TIMEOUT_MS);
353
354 if (ret) {
355 dev_err(ac->dev, "%s: msg send failed %d\n",
356 __func__, ret);
357 goto done;
358 }
359
360 if (rsp.resp.result != QMI_RESULT_SUCCESS_V01) {
361 ret = -EIO;
362 dev_err(ac->dev, "%s: qmi resp error %d\n",
363 __func__, rsp.resp.result);
364 }
365done:
366 WCD_SPI_AC_MUTEX_UNLOCK(ac->dev, ac->svc_lock);
367 return ret;
368
369}
370
371/*
372 * wcd_spi_ac_set_sync: Sets the current status of the SPI
373 * bus and requests access if not
374 * already accesible.
375 * @ac: pointer to the drivers private data
376 * @value: value to be set in the status mask
377 * @is_svc_locked: flag to indicate if svc_lock is acquired by caller
378 */
379static int wcd_spi_ac_set_sync(struct wcd_spi_ac_priv *ac,
380 u32 value, bool is_svc_locked)
381{
382 int ret = 0;
383
384 WCD_SPI_AC_MUTEX_LOCK(ac->dev, ac->state_lock);
385 ac->state |= value;
386 /* any non-zero state indicates us to request SPI access */
387 wmb();
388 dev_dbg(ac->dev, "%s: current state = 0x%x, current access 0x%x\n",
389 __func__, ac->state, ac->current_access);
390 if (ac->current_access == WCD_SPI_AC_REMOTE_ACCESS) {
391 dev_dbg(ac->dev,
392 "%s: requesting access, state = 0x%x\n",
393 __func__, ac->state);
394 ret = wcd_spi_ac_request_access(ac, is_svc_locked);
395 if (!ret)
396 ac->current_access = WCD_SPI_AC_LOCAL_ACCESS;
397 }
398 WCD_SPI_AC_MUTEX_UNLOCK(ac->dev, ac->state_lock);
399
400 return ret;
401}
402
403/*
404 * wcd_spi_ac_clear_sync: Clears the current status of the SPI
405 * bus and releases access if applicable
406 * @ac: pointer to the drivers private data
407 * @value: value to be cleared in the status mask
408 * @is_svc_locked: flag to indicate if svc_lock is acquired by caller
409 */
410static int wcd_spi_ac_clear_sync(struct wcd_spi_ac_priv *ac,
411 u32 value, bool is_svc_locked)
412{
413 int ret = 0;
414
415 WCD_SPI_AC_MUTEX_LOCK(ac->dev, ac->state_lock);
416 ac->state &= ~(value);
417 /* make sure value is written before read */
418 wmb();
419 dev_dbg(ac->dev, "%s: current state = 0x%x, current access 0x%x\n",
420 __func__, ac->state, ac->current_access);
421 /* state should be zero to release SPI access */
422 if (!ac->state &&
423 ac->current_access == WCD_SPI_AC_LOCAL_ACCESS) {
424 dev_dbg(ac->dev,
425 "%s: releasing access, state = 0x%x\n",
426 __func__, ac->state);
427 ret = wcd_spi_ac_release_access(ac, is_svc_locked);
428 if (!ret)
429 ac->current_access = WCD_SPI_AC_REMOTE_ACCESS;
430 }
431 WCD_SPI_AC_MUTEX_UNLOCK(ac->dev, ac->state_lock);
432
433 return ret;
434
435}
436
437/*
438 * wcd_spi_access_ctl: API to request/release the access
439 * to wcd-spi bus.
440 * @dev: handle to the wcd-spi-ac device
441 * @request: enum to indicate access request or access release
442 * @reason: reason for request/release. Must be one of the
443 * valid reasons.
444 * Returns success if the access handover was sucessful,
445 * negative error code otherwise.
446 */
447int wcd_spi_access_ctl(struct device *dev,
448 enum wcd_spi_acc_req request,
449 u32 reason)
450{
451 struct wcd_spi_ac_priv *ac;
452 int ret = 0;
453
454 if (!dev) {
455 pr_err("%s: invalid device\n", __func__);
456 return -EINVAL;
457 }
458
459 /* only data_transfer and remote_down are valid reasons */
460 if (reason != WCD_SPI_AC_DATA_TRANSFER &&
461 reason != WCD_SPI_AC_REMOTE_DOWN) {
462 pr_err("%s: Invalid reason 0x%x\n",
463 __func__, reason);
464 return -EINVAL;
465 }
466
467 ac = (struct wcd_spi_ac_priv *) dev_get_drvdata(dev);
468 if (!ac) {
469 dev_err(dev, "%s: invalid driver data\n", __func__);
470 return -EINVAL;
471 }
472
473 dev_dbg(dev, "%s: request = 0x%x, reason = 0x%x\n",
474 __func__, request, reason);
475
476 switch (request) {
477 case WCD_SPI_ACCESS_REQUEST:
478 ret = wcd_spi_ac_set_sync(ac, reason, false);
479 if (ret)
480 dev_err(dev, "%s: set_sync(0x%x) failed %d\n",
481 __func__, reason, ret);
482 break;
483 case WCD_SPI_ACCESS_RELEASE:
484 ret = wcd_spi_ac_clear_sync(ac, reason, false);
485 if (ret)
486 dev_err(dev, "%s: clear_sync(0x%x) failed %d\n",
487 __func__, reason, ret);
488 break;
489 default:
490 dev_err(dev, "%s: invalid request 0x%x\n",
491 __func__, request);
492 break;
493 }
494
495 return ret;
496}
497EXPORT_SYMBOL(wcd_spi_access_ctl);
498
499static int wcd_spi_ac_cdev_open(struct inode *inode,
500 struct file *file)
501{
502 struct wcd_spi_ac_priv *ac;
503 int ret = 0;
504
505 ac = container_of(inode->i_cdev, struct wcd_spi_ac_priv, cdev);
506 if (!ac) {
507 pr_err("%s: Invalid private data\n", __func__);
508 return -EINVAL;
509 }
510
511 WCD_SPI_AC_MUTEX_LOCK(ac->dev, ac->status_lock);
512 if (ac->svc_offline) {
513 dev_err(ac->dev, "%s: SVC is not online, cannot open driver\n",
514 __func__);
515 ret = -ENODEV;
516 goto done;
517 }
518
519 file->private_data = ac;
520
521done:
522 WCD_SPI_AC_MUTEX_UNLOCK(ac->dev, ac->status_lock);
523 return ret;
524}
525
526static ssize_t wcd_spi_ac_cdev_write(struct file *file,
527 const char __user *buf,
528 size_t count,
529 loff_t *ppos)
530{
531 struct wcd_spi_ac_priv *ac;
532 struct wcd_spi_ac_write_cmd *cmd_buf;
533 int ret = 0;
534 int data_sz;
535
536 ac = (struct wcd_spi_ac_priv *) file->private_data;
537 if (!ac) {
538 pr_err("%s: Invalid private data\n", __func__);
539 return -EINVAL;
540 }
541
542 if (count < WCD_SPI_AC_WRITE_CMD_MIN_SIZE ||
543 count > WCD_SPI_AC_WRITE_CMD_MAX_SIZE) {
544 dev_err(ac->dev, "%s: Invalid write count %zd\n",
545 __func__, count);
546 return -EINVAL;
547 }
548
549 cmd_buf = kzalloc(count, GFP_KERNEL);
550 if (!cmd_buf)
551 return -ENOMEM;
552
553 if (get_user(cmd_buf->cmd_type, buf)) {
554 dev_err(ac->dev, "%s: get_user failed\n", __func__);
555 ret = -EFAULT;
556 goto free_cmd_buf;
557 }
558
559 dev_dbg(ac->dev, "%s: write cmd type 0x%x\n",
560 __func__, cmd_buf->cmd_type);
561
562 switch (cmd_buf->cmd_type) {
563
564 case WCD_SPI_AC_CMD_CONC_BEGIN:
565 ret = wcd_spi_ac_set_sync(ac, WCD_SPI_AC_CONCURRENCY, false);
566 if (ret) {
567 dev_err(ac->dev, "%s: set_sync(CONC) fail %d\n",
568 __func__, ret);
569 goto free_cmd_buf;
570 }
571
572 break;
573
574 case WCD_SPI_AC_CMD_CONC_END:
575 ret = wcd_spi_ac_clear_sync(ac, WCD_SPI_AC_CONCURRENCY, false);
576 if (ret) {
577 dev_err(ac->dev, "%s: clear_sync(CONC) fail %d\n",
578 __func__, ret);
579 goto free_cmd_buf;
580 }
581
582 break;
583
584 case WCD_SPI_AC_CMD_BUF_DATA:
585
586 /* Read the buffer details and send to service */
587 data_sz = count - sizeof(cmd_buf->cmd_type);
588
589 if (!data_sz ||
590 (data_sz % sizeof(struct wcd_spi_ac_buf_data))) {
591 dev_err(ac->dev, "%s: size %d not multiple of %ld\n",
592 __func__, data_sz,
593 sizeof(struct wcd_spi_ac_buf_data));
594 goto free_cmd_buf;
595 }
596
597 if (data_sz / sizeof(struct wcd_spi_ac_buf_data) >
598 WCD_SPI_AC_MAX_BUFFERS) {
599 dev_err(ac->dev, "%s: invalid size %d\n",
600 __func__, data_sz);
601 goto free_cmd_buf;
602 }
603
604 if (copy_from_user(cmd_buf->payload,
605 buf + sizeof(cmd_buf->cmd_type),
606 data_sz)) {
607 dev_err(ac->dev, "%s: copy_from_user failed\n",
608 __func__);
609 ret = -EFAULT;
610 goto free_cmd_buf;
611 }
612
613 ret = wcd_spi_ac_buf_msg(ac, cmd_buf->payload, data_sz);
614 if (ret) {
615 dev_err(ac->dev, "%s: _buf_msg failed %d\n",
616 __func__, ret);
617 goto free_cmd_buf;
618 }
619
620 ret = wcd_spi_ac_clear_sync(ac, WCD_SPI_AC_UNINITIALIZED,
621 false);
622 if (ret) {
623 dev_err(ac->dev, "%s: clear_sync 0x%lx failed %d\n",
624 __func__, WCD_SPI_AC_UNINITIALIZED, ret);
625 goto free_cmd_buf;
626 }
627 break;
628 default:
629 dev_err(ac->dev, "%s: Invalid cmd_type 0x%x\n",
630 __func__, cmd_buf->cmd_type);
631 ret = -EINVAL;
632 goto free_cmd_buf;
633 }
634
635free_cmd_buf:
636
637 kfree(cmd_buf);
638 if (!ret)
639 ret = count;
640
641 return ret;
642}
643
644static int wcd_spi_ac_cdev_release(struct inode *inode,
645 struct file *file)
646{
647 struct wcd_spi_ac_priv *ac;
648 int ret = 0;
649
650 ac = (struct wcd_spi_ac_priv *) file->private_data;
651 if (!ac) {
652 pr_err("%s: Invalid private data\n", __func__);
653 return -EINVAL;
654 }
655
656 ret = wcd_spi_ac_set_sync(ac, WCD_SPI_AC_UNINITIALIZED, false);
657 if (ret)
658 dev_err(ac->dev, "%s: set_sync(UNINITIALIZED) failed %d\n",
659 __func__, ret);
660 return ret;
661}
662
663static const struct file_operations wcd_spi_ac_cdev_fops = {
664 .owner = THIS_MODULE,
665 .open = wcd_spi_ac_cdev_open,
666 .write = wcd_spi_ac_cdev_write,
667 .release = wcd_spi_ac_cdev_release,
668};
669
670static int wcd_spi_ac_reg_chardev(struct wcd_spi_ac_priv *ac)
671{
672 int ret;
673
674 ret = alloc_chrdev_region(&ac->cdev_num, 0, 1,
675 WCD_SPI_AC_CLIENT_CDEV_NAME);
676 if (ret) {
677 dev_err(ac->dev, "%s: alloc_chrdev_region failed %d\n",
678 __func__, ret);
679 return ret;
680 }
681
682 ac->cls = class_create(THIS_MODULE, WCD_SPI_AC_CLIENT_CDEV_NAME);
683 if (IS_ERR(ac->cls)) {
684 ret = PTR_ERR(ac->cls);
685 dev_err(ac->dev, "%s: class_create failed %d\n",
686 __func__, ret);
687 goto unregister_chrdev;
688 }
689
690 ac->chardev = device_create(ac->cls, NULL, ac->cdev_num,
691 NULL, WCD_SPI_AC_CLIENT_CDEV_NAME);
692 if (IS_ERR(ac->chardev)) {
693 ret = PTR_ERR(ac->chardev);
694 dev_err(ac->dev, "%s: device_create failed %d\n",
695 __func__, ret);
696 goto destroy_class;
697 }
698
699 cdev_init(&ac->cdev, &wcd_spi_ac_cdev_fops);
700 ret = cdev_add(&ac->cdev, ac->cdev_num, 1);
701 if (ret) {
702 dev_err(ac->dev, "%s: cdev_add failed %d\n",
703 __func__, ret);
704 goto destroy_device;
705 }
706
707 return 0;
708
709destroy_device:
710 device_destroy(ac->cls, ac->cdev_num);
711
712destroy_class:
713 class_destroy(ac->cls);
714
715unregister_chrdev:
716 unregister_chrdev_region(0, 1);
717 return ret;
718}
719
720static int wcd_spi_ac_unreg_chardev(struct wcd_spi_ac_priv *ac)
721{
722 cdev_del(&ac->cdev);
723 device_destroy(ac->cls, ac->cdev_num);
724 class_destroy(ac->cls);
725 unregister_chrdev_region(0, 1);
726
727 return 0;
728}
729
730static void wcd_spi_ac_recv_msg(struct work_struct *work)
731{
732 struct wcd_spi_ac_priv *ac;
733 int rc = 0;
734
735 ac = container_of(work, struct wcd_spi_ac_priv,
736 recv_msg_work);
737 if (!ac) {
738 pr_err("%s: Invalid private data\n", __func__);
739 return;
740 }
741
742 do {
743 dev_dbg(ac->dev, "%s: msg received, rc = %d\n",
744 __func__, rc);
745 } while ((rc = qmi_recv_msg(ac->qmi_hdl)) == 0);
746
747 if (rc != -ENOMSG)
748 dev_err(ac->dev, "%s: qmi_recv_msg failed %d\n",
749 __func__, rc);
750}
751
752static void wcd_spi_ac_clnt_notify(struct qmi_handle *hdl,
753 enum qmi_event_type event, void *priv_data)
754{
755 struct wcd_spi_ac_priv *ac;
756
757 if (!priv_data) {
758 pr_err("%s: Invalid private data\n", __func__);
759 return;
760 }
761
762 ac = (struct wcd_spi_ac_priv *) priv_data;
763
764 switch (event) {
765 case QMI_RECV_MSG:
766 queue_work(ac->qmi_wq, &ac->recv_msg_work);
767 break;
768 default:
769 break;
770 }
771}
772
773static void wcd_spi_ac_svc_arrive(struct work_struct *work)
774{
775 struct wcd_spi_ac_priv *ac;
776 int ret;
777
778 ac = container_of(work, struct wcd_spi_ac_priv,
779 svc_arr_work);
780 if (!ac) {
781 pr_err("%s: Invalid private data\n",
782 __func__);
783 return;
784 }
785
786 WCD_SPI_AC_MUTEX_LOCK(ac->dev, ac->svc_lock);
787 ac->qmi_hdl = qmi_handle_create(wcd_spi_ac_clnt_notify,
788 ac);
789 if (!ac->qmi_hdl) {
790 dev_err(ac->dev, "%s: qmi_handle_create failed\n",
791 __func__);
792 goto done;
793 }
794
795 ret = qmi_connect_to_service(ac->qmi_hdl,
796 WCD_SPI_CTL_SERVICE_ID_V01,
797 WCD_SPI_CTL_SERVICE_VERS_V01,
798 WCD_SPI_CTL_INS_ID);
799 if (ret) {
800 dev_err(ac->dev, "%s, cant connect to service, error %d\n",
801 __func__, ret);
802 qmi_handle_destroy(ac->qmi_hdl);
803 ac->qmi_hdl = NULL;
804 goto done;
805 }
806
807 /* Mark service as online */
808 wcd_spi_ac_status_change(ac, 1);
809
810 /*
811 * update the state and clear the WCD_SPI_AC_SVC_OFFLINE
812 * bit to indicate that the service is now online.
813 */
814 ret = wcd_spi_ac_clear_sync(ac, WCD_SPI_AC_SVC_OFFLINE, true);
815 if (ret)
816 dev_err(ac->dev, "%s: clear_sync(SVC_OFFLINE) failed %d\n",
817 __func__, ret);
818done:
819 WCD_SPI_AC_MUTEX_UNLOCK(ac->dev, ac->svc_lock);
820
821}
822
823static void wcd_spi_ac_svc_exit(struct work_struct *work)
824{
825 struct wcd_spi_ac_priv *ac;
826 int ret = 0;
827
828 ac = container_of(work, struct wcd_spi_ac_priv,
829 svc_exit_work);
830 if (!ac) {
831 pr_err("%s: Invalid private data\n",
832 __func__);
833 return;
834 }
835
836 WCD_SPI_AC_MUTEX_LOCK(ac->dev, ac->svc_lock);
837 ret = wcd_spi_ac_set_sync(ac, WCD_SPI_AC_SVC_OFFLINE, true);
838 if (ret)
839 dev_err(ac->dev, "%s: set_sync(SVC_OFFLINE) failed %d\n",
840 __func__, ret);
841 qmi_handle_destroy(ac->qmi_hdl);
842 ac->qmi_hdl = NULL;
843 wcd_spi_ac_status_change(ac, 0);
844 WCD_SPI_AC_MUTEX_UNLOCK(ac->dev, ac->svc_lock);
845}
846
847static int wcd_spi_ac_svc_event(struct notifier_block *this,
848 unsigned long event,
849 void *data)
850{
851 struct wcd_spi_ac_priv *ac;
852
853 ac = container_of(this, struct wcd_spi_ac_priv, nb);
854 if (!ac) {
855 pr_err("%s: Invalid private data\n", __func__);
856 return -EINVAL;
857 }
858
859 dev_dbg(ac->dev, "%s: event = 0x%lx", __func__, event);
860
861 switch (event) {
862 case QMI_SERVER_ARRIVE:
863 schedule_work(&ac->svc_arr_work);
864 break;
865 case QMI_SERVER_EXIT:
866 schedule_work(&ac->svc_exit_work);
867 break;
868 default:
869 dev_err(ac->dev, "%s unhandled event %ld\n",
870 __func__, event);
871 break;
872 }
873
874 return 0;
875}
876
877static int wcd_spi_ac_probe(struct platform_device *pdev)
878{
879 struct wcd_spi_ac_priv *ac;
880 struct device *parent = pdev->dev.parent;
881 int ret = 0;
882
883 ac = devm_kzalloc(&pdev->dev, sizeof(*ac),
884 GFP_KERNEL);
885 if (!ac)
886 return -ENOMEM;
887
888 ac->dev = &pdev->dev;
889 ac->parent = parent;
890
891 ret = wcd_spi_ac_reg_chardev(ac);
892 if (ret)
893 return ret;
894
895 ret = wcd_spi_ac_procfs_init(ac);
896 if (ret)
897 goto unreg_chardev;
898
899 mutex_init(&ac->status_lock);
900 mutex_init(&ac->state_lock);
901 mutex_init(&ac->svc_lock);
902 init_waitqueue_head(&ac->svc_poll_wait);
903 ac->svc_offline = 1;
904 ac->state = (WCD_SPI_AC_SVC_OFFLINE |
905 WCD_SPI_AC_UNINITIALIZED);
906 ac->current_access = WCD_SPI_AC_LOCAL_ACCESS;
907
908 ac->nb.notifier_call = wcd_spi_ac_svc_event;
909 INIT_WORK(&ac->svc_arr_work, wcd_spi_ac_svc_arrive);
910 INIT_WORK(&ac->svc_exit_work, wcd_spi_ac_svc_exit);
911 INIT_WORK(&ac->recv_msg_work, wcd_spi_ac_recv_msg);
912
913 ac->qmi_wq = create_singlethread_workqueue("qmi_wq");
914 if (!ac->qmi_wq) {
915 dev_err(&pdev->dev,
916 "%s: create_singlethread_workqueue failed\n",
917 __func__);
918 goto deinit_procfs;
919 }
920
921 dev_set_drvdata(&pdev->dev, ac);
922
923 ret = qmi_svc_event_notifier_register(
924 WCD_SPI_CTL_SERVICE_ID_V01,
925 WCD_SPI_CTL_SERVICE_VERS_V01,
926 WCD_SPI_CTL_INS_ID,
927 &ac->nb);
928 if (ret) {
929 dev_err(&pdev->dev,
930 "%s: qmi_svc_event_notifier_register failed %d\n",
931 __func__, ret);
932 goto destroy_wq;
933 }
934
935 return 0;
936
937destroy_wq:
938 destroy_workqueue(ac->qmi_wq);
939 dev_set_drvdata(&pdev->dev, NULL);
940deinit_procfs:
941 wcd_spi_ac_procfs_deinit(ac);
942 mutex_destroy(&ac->status_lock);
943 mutex_destroy(&ac->state_lock);
944 mutex_destroy(&ac->svc_lock);
945unreg_chardev:
946 wcd_spi_ac_unreg_chardev(ac);
947 return ret;
948}
949
950static int wcd_spi_ac_remove(struct platform_device *pdev)
951{
952 struct wcd_spi_ac_priv *ac;
953
954 ac = dev_get_drvdata(&pdev->dev);
955 qmi_svc_event_notifier_unregister(
956 WCD_SPI_CTL_SERVICE_ID_V01,
957 WCD_SPI_CTL_SERVICE_VERS_V01,
958 WCD_SPI_CTL_INS_ID,
959 &ac->nb);
960 if (ac->qmi_wq)
961 destroy_workqueue(ac->qmi_wq);
962 wcd_spi_ac_unreg_chardev(ac);
963 wcd_spi_ac_procfs_deinit(ac);
964 mutex_destroy(&ac->status_lock);
965 mutex_destroy(&ac->state_lock);
966 mutex_destroy(&ac->svc_lock);
967
968 return 0;
969}
970
971static const struct of_device_id wcd_spi_ac_of_match[] = {
972 { .compatible = "qcom,wcd-spi-ac" },
973 { },
974};
975
976MODULE_DEVICE_TABLE(of, wcd_spi_ac_of_match);
977
978static struct platform_driver wcd_spi_ac_driver = {
979 .driver = {
980 .name = "qcom,wcd-spi-ac",
981 .of_match_table = wcd_spi_ac_of_match,
Xiaojun Sang53cd13a2018-06-29 15:14:37 +0800982 .suppress_bind_attrs = true,
Bhalchandra Gajarec77b19f2018-03-09 17:22:33 -0800983 },
984 .probe = wcd_spi_ac_probe,
985 .remove = wcd_spi_ac_remove,
986};
987
988module_platform_driver(wcd_spi_ac_driver);
989
990MODULE_DESCRIPTION("WCD SPI access control driver");
991MODULE_LICENSE("GPL v2");