blob: e2ff0f4e9bbc4214ec665f2a229089d80bb5ad50 [file] [log] [blame]
Zaheerulla Meerabb0eba2013-05-22 23:31:59 +05301/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
Karthikeyan Ramasubramanianfec77ff2012-10-26 20:03:26 -06002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/slab.h>
14#include <linux/uaccess.h>
15#include <linux/module.h>
16#include <linux/kernel.h>
17#include <linux/errno.h>
18#include <linux/io.h>
19#include <linux/string.h>
20#include <linux/types.h>
21#include <linux/errno.h>
22#include <linux/sched.h>
23#include <linux/mm.h>
24#include <linux/list.h>
25#include <linux/socket.h>
26#include <linux/gfp.h>
27#include <linux/qmi_encdec.h>
Zaheerulla Meer51dc3a12013-05-08 19:27:27 +053028#include <linux/workqueue.h>
29#include <linux/mutex.h>
Karthikeyan Ramasubramanianfec77ff2012-10-26 20:03:26 -060030
31#include <mach/msm_qmi_interface.h>
32#include <mach/msm_ipc_router.h>
33
34#include "msm_qmi_interface_priv.h"
35
36static LIST_HEAD(svc_event_nb_list);
37static DEFINE_MUTEX(svc_event_nb_list_lock);
Zaheerulla Meer51dc3a12013-05-08 19:27:27 +053038static DEFINE_MUTEX(msm_qmi_init_lock);
39static struct workqueue_struct *msm_qmi_pending_workqueue;
Karthikeyan Ramasubramanianfec77ff2012-10-26 20:03:26 -060040
Karthikeyan Ramasubramanian5f10dbd2012-11-08 10:58:48 -070041struct elem_info qmi_response_type_v01_ei[] = {
42 {
43 .data_type = QMI_SIGNED_2_BYTE_ENUM,
44 .elem_len = 1,
45 .elem_size = sizeof(uint16_t),
46 .is_array = NO_ARRAY,
47 .tlv_type = QMI_COMMON_TLV_TYPE,
48 .offset = offsetof(struct qmi_response_type_v01,
49 result),
50 .ei_array = NULL,
51 },
52 {
53 .data_type = QMI_SIGNED_2_BYTE_ENUM,
54 .elem_len = 1,
55 .elem_size = sizeof(uint16_t),
56 .is_array = NO_ARRAY,
57 .tlv_type = QMI_COMMON_TLV_TYPE,
58 .offset = offsetof(struct qmi_response_type_v01,
59 error),
60 .ei_array = NULL,
61 },
62 {
63 .data_type = QMI_EOTI,
64 .elem_len = 0,
65 .elem_size = 0,
66 .is_array = NO_ARRAY,
67 .tlv_type = QMI_COMMON_TLV_TYPE,
68 .offset = 0,
69 .ei_array = NULL,
70 },
71};
72
Karthikeyan Ramasubramanianfec77ff2012-10-26 20:03:26 -060073static void qmi_event_notify(unsigned event, void *priv)
74{
75 struct qmi_handle *handle = (struct qmi_handle *)priv;
76 unsigned long flags;
77
78 if (!handle)
79 return;
80
81 mutex_lock(&handle->handle_lock);
82 if (handle->handle_reset) {
83 mutex_unlock(&handle->handle_lock);
84 return;
85 }
86
87 switch (event) {
88 case MSM_IPC_ROUTER_READ_CB:
89 spin_lock_irqsave(&handle->notify_lock, flags);
90 handle->notify(handle, QMI_RECV_MSG, handle->notify_priv);
91 spin_unlock_irqrestore(&handle->notify_lock, flags);
92 break;
93
Zaheerulla Meer51dc3a12013-05-08 19:27:27 +053094 case MSM_IPC_ROUTER_RESUME_TX:
95 queue_delayed_work(msm_qmi_pending_workqueue,
96 &handle->resume_tx_work,
97 msecs_to_jiffies(0));
98 break;
Karthikeyan Ramasubramanianfec77ff2012-10-26 20:03:26 -060099 default:
100 break;
101 }
102 mutex_unlock(&handle->handle_lock);
103}
104
Zaheerulla Meer51dc3a12013-05-08 19:27:27 +0530105/**
106 * init_msm_qmi() - Init function for kernel space QMI
107 *
108 * This function is implemented to initialize the QMI resources that are common
109 * across kernel space QMI users. As it is not necessary for this init function
110 * to be module_init function it is called when the first handle of kernel space
111 * QMI gets created.
112 */
113static void init_msm_qmi(void)
114{
115 static bool msm_qmi_inited;
116
117 if (likely(msm_qmi_inited))
118 return;
119
120 mutex_lock(&msm_qmi_init_lock);
121 if (likely(msm_qmi_inited && msm_qmi_pending_workqueue)) {
122 mutex_unlock(&msm_qmi_init_lock);
123 return;
124 }
125 msm_qmi_inited = 1;
126 msm_qmi_pending_workqueue =
127 create_singlethread_workqueue("msm_qmi_rtx_q");
128 mutex_unlock(&msm_qmi_init_lock);
129}
130
131/**
132 * handle_resume_tx() - Handle the Resume_Tx event
133 * @work : Pointer to the work strcuture.
134 *
135 * This function handles the resume_tx event for any QMI client that
136 * exists in the kernel space. This function parses the pending_txn_list of
137 * the handle and attempts a send for each transaction in that list.
138 */
139static void handle_resume_tx(struct work_struct *work)
140{
141 struct delayed_work *rtx_work = to_delayed_work(work);
142 struct qmi_handle *handle =
143 container_of(rtx_work, struct qmi_handle, resume_tx_work);
144 struct qmi_txn *pend_txn, *temp_txn;
145 int ret;
146 uint16_t msg_id;
147
148 mutex_lock(&handle->handle_lock);
149 list_for_each_entry_safe(pend_txn, temp_txn,
150 &handle->pending_txn_list, list) {
151 ret = msm_ipc_router_send_msg(
152 (struct msm_ipc_port *)handle->src_port,
153 (struct msm_ipc_addr *)handle->dest_info,
154 pend_txn->enc_data, pend_txn->enc_data_len);
155
156 if (ret == -EAGAIN) {
157 mutex_unlock(&handle->handle_lock);
158 return;
159 }
160 msg_id = ((struct qmi_header *)pend_txn->enc_data)->msg_id;
161 kfree(pend_txn->enc_data);
162 if (ret < 0) {
Zaheerulla Meer7d0fd732013-08-12 17:14:07 +0530163 pr_err("%s: Sending transaction %d from port %d failed",
164 __func__, pend_txn->txn_id,
165 ((struct msm_ipc_port *)handle->src_port)->
166 this_port.port_id);
Zaheerulla Meer51dc3a12013-05-08 19:27:27 +0530167 if (pend_txn->type == QMI_ASYNC_TXN) {
168 pend_txn->resp_cb(pend_txn->handle,
169 msg_id, pend_txn->resp,
170 pend_txn->resp_cb_data,
171 ret);
172 list_del(&pend_txn->list);
173 kfree(pend_txn);
174 } else if (pend_txn->type == QMI_SYNC_TXN) {
175 pend_txn->send_stat = ret;
176 wake_up(&pend_txn->wait_q);
177 }
Zaheerulla Meer51dc3a12013-05-08 19:27:27 +0530178 } else {
179 list_del(&pend_txn->list);
180 list_add_tail(&pend_txn->list, &handle->txn_list);
181 }
182 }
183 mutex_unlock(&handle->handle_lock);
184}
185
Karthikeyan Ramasubramanianfec77ff2012-10-26 20:03:26 -0600186struct qmi_handle *qmi_handle_create(
187 void (*notify)(struct qmi_handle *handle,
188 enum qmi_event_type event, void *notify_priv),
189 void *notify_priv)
190{
191 struct qmi_handle *temp_handle;
192 struct msm_ipc_port *port_ptr;
193
194 temp_handle = kzalloc(sizeof(struct qmi_handle), GFP_KERNEL);
195 if (!temp_handle) {
196 pr_err("%s: Failure allocating client handle\n", __func__);
197 return NULL;
198 }
199
200 port_ptr = msm_ipc_router_create_port(qmi_event_notify,
201 (void *)temp_handle);
202 if (!port_ptr) {
203 pr_err("%s: IPC router port creation failed\n", __func__);
204 kfree(temp_handle);
205 return NULL;
206 }
207
208 temp_handle->src_port = port_ptr;
209 temp_handle->next_txn_id = 1;
210 INIT_LIST_HEAD(&temp_handle->txn_list);
Zaheerulla Meer51dc3a12013-05-08 19:27:27 +0530211 INIT_LIST_HEAD(&temp_handle->pending_txn_list);
Karthikeyan Ramasubramanianfec77ff2012-10-26 20:03:26 -0600212 mutex_init(&temp_handle->handle_lock);
213 spin_lock_init(&temp_handle->notify_lock);
214 temp_handle->notify = notify;
215 temp_handle->notify_priv = notify_priv;
216 temp_handle->handle_reset = 0;
217 init_waitqueue_head(&temp_handle->reset_waitq);
Zaheerulla Meer51dc3a12013-05-08 19:27:27 +0530218 INIT_DELAYED_WORK(&temp_handle->resume_tx_work, handle_resume_tx);
219 init_msm_qmi();
Karthikeyan Ramasubramanianfec77ff2012-10-26 20:03:26 -0600220 return temp_handle;
221}
222EXPORT_SYMBOL(qmi_handle_create);
223
224static void clean_txn_info(struct qmi_handle *handle)
225{
Zaheerulla Meer51dc3a12013-05-08 19:27:27 +0530226 struct qmi_txn *txn_handle, *temp_txn_handle, *pend_txn;
Karthikeyan Ramasubramanianfec77ff2012-10-26 20:03:26 -0600227
Zaheerulla Meer51dc3a12013-05-08 19:27:27 +0530228 list_for_each_entry_safe(pend_txn, temp_txn_handle,
229 &handle->pending_txn_list, list) {
230 if (pend_txn->type == QMI_ASYNC_TXN) {
231 list_del(&pend_txn->list);
232 pend_txn->resp_cb(pend_txn->handle,
233 ((struct qmi_header *)
234 pend_txn->enc_data)->msg_id,
235 pend_txn->resp, pend_txn->resp_cb_data,
236 -ENETRESET);
237 kfree(pend_txn->enc_data);
238 kfree(pend_txn);
239 } else if (pend_txn->type == QMI_SYNC_TXN) {
240 kfree(pend_txn->enc_data);
241 wake_up(&pend_txn->wait_q);
242 }
243 }
Karthikeyan Ramasubramanianfec77ff2012-10-26 20:03:26 -0600244 list_for_each_entry_safe(txn_handle, temp_txn_handle,
245 &handle->txn_list, list) {
246 if (txn_handle->type == QMI_ASYNC_TXN) {
247 list_del(&txn_handle->list);
248 kfree(txn_handle);
249 } else if (txn_handle->type == QMI_SYNC_TXN) {
250 wake_up(&txn_handle->wait_q);
251 }
252 }
253}
254
255int qmi_handle_destroy(struct qmi_handle *handle)
256{
257 int rc;
258
259 if (!handle)
260 return -EINVAL;
261
262 mutex_lock(&handle->handle_lock);
263 handle->handle_reset = 1;
264 clean_txn_info(handle);
265 mutex_unlock(&handle->handle_lock);
Zaheerulla Meer51dc3a12013-05-08 19:27:27 +0530266 flush_delayed_work(&handle->resume_tx_work);
Karthikeyan Ramasubramanianfec77ff2012-10-26 20:03:26 -0600267 rc = wait_event_interruptible(handle->reset_waitq,
268 list_empty(&handle->txn_list));
269
270 /* TODO: Destroy client owned transaction */
271 msm_ipc_router_close_port((struct msm_ipc_port *)(handle->src_port));
272 kfree(handle->dest_info);
273 kfree(handle);
274 return 0;
275}
276EXPORT_SYMBOL(qmi_handle_destroy);
277
278int qmi_register_ind_cb(struct qmi_handle *handle,
279 void (*ind_cb)(struct qmi_handle *handle,
280 unsigned int msg_id, void *msg,
281 unsigned int msg_len, void *ind_cb_priv),
282 void *ind_cb_priv)
283{
284 if (!handle)
285 return -EINVAL;
286
287 mutex_lock(&handle->handle_lock);
288 if (handle->handle_reset) {
289 mutex_unlock(&handle->handle_lock);
290 return -ENETRESET;
291 }
292
293 handle->ind_cb = ind_cb;
294 handle->ind_cb_priv = ind_cb_priv;
295 mutex_unlock(&handle->handle_lock);
296 return 0;
297}
298EXPORT_SYMBOL(qmi_register_ind_cb);
299
300static int qmi_encode_and_send_req(struct qmi_txn **ret_txn_handle,
301 struct qmi_handle *handle, enum txn_type type,
302 struct msg_desc *req_desc, void *req, unsigned int req_len,
303 struct msg_desc *resp_desc, void *resp, unsigned int resp_len,
304 void (*resp_cb)(struct qmi_handle *handle,
305 unsigned int msg_id, void *msg,
Zaheerulla Meer51dc3a12013-05-08 19:27:27 +0530306 void *resp_cb_data, int stat),
Karthikeyan Ramasubramanianfec77ff2012-10-26 20:03:26 -0600307 void *resp_cb_data)
308{
309 struct qmi_txn *txn_handle;
310 int rc, encoded_req_len;
311 void *encoded_req;
312
313 if (!handle || !handle->dest_info ||
314 !req_desc || !req || !resp_desc || !resp)
315 return -EINVAL;
316
317 mutex_lock(&handle->handle_lock);
318 if (handle->handle_reset) {
319 mutex_unlock(&handle->handle_lock);
320 return -ENETRESET;
321 }
322
323 /* Allocate Transaction Info */
324 txn_handle = kzalloc(sizeof(struct qmi_txn), GFP_KERNEL);
325 if (!txn_handle) {
326 pr_err("%s: Failed to allocate txn handle\n", __func__);
327 mutex_unlock(&handle->handle_lock);
328 return -ENOMEM;
329 }
330 txn_handle->type = type;
331 INIT_LIST_HEAD(&txn_handle->list);
332 init_waitqueue_head(&txn_handle->wait_q);
333
334 /* Cache the parameters passed & mark it as sync*/
335 txn_handle->handle = handle;
336 txn_handle->resp_desc = resp_desc;
337 txn_handle->resp = resp;
338 txn_handle->resp_len = resp_len;
339 txn_handle->resp_received = 0;
340 txn_handle->resp_cb = resp_cb;
341 txn_handle->resp_cb_data = resp_cb_data;
Zaheerulla Meer51dc3a12013-05-08 19:27:27 +0530342 txn_handle->enc_data = NULL;
343 txn_handle->enc_data_len = 0;
Karthikeyan Ramasubramanianfec77ff2012-10-26 20:03:26 -0600344
345 /* Encode the request msg */
346 encoded_req_len = req_desc->max_msg_len + QMI_HEADER_SIZE;
347 encoded_req = kmalloc(encoded_req_len, GFP_KERNEL);
348 if (!encoded_req) {
349 pr_err("%s: Failed to allocate req_msg_buf\n", __func__);
350 rc = -ENOMEM;
351 goto encode_and_send_req_err1;
352 }
353 rc = qmi_kernel_encode(req_desc,
354 (void *)(encoded_req + QMI_HEADER_SIZE),
355 req_desc->max_msg_len, req);
356 if (rc < 0) {
357 pr_err("%s: Encode Failure %d\n", __func__, rc);
358 goto encode_and_send_req_err2;
359 }
360 encoded_req_len = rc;
361
362 /* Encode the header & Add to the txn_list */
363 if (!handle->next_txn_id)
364 handle->next_txn_id++;
365 txn_handle->txn_id = handle->next_txn_id++;
366 encode_qmi_header(encoded_req, QMI_REQUEST_CONTROL_FLAG,
367 txn_handle->txn_id, req_desc->msg_id,
368 encoded_req_len);
369 encoded_req_len += QMI_HEADER_SIZE;
Karthikeyan Ramasubramanianfec77ff2012-10-26 20:03:26 -0600370
Zaheerulla Meer51dc3a12013-05-08 19:27:27 +0530371 /*
372 * Check if this port has transactions queued to its pending list
373 * and if there are any pending transactions then add the current
374 * transaction to the pending list rather than sending it. This avoids
375 * out-of-order message transfers.
376 */
377 if (!list_empty(&handle->pending_txn_list)) {
378 rc = -EAGAIN;
379 goto append_pend_txn;
380 }
381
382 list_add_tail(&txn_handle->list, &handle->txn_list);
Karthikeyan Ramasubramanianfec77ff2012-10-26 20:03:26 -0600383 /* Send the request */
384 rc = msm_ipc_router_send_msg((struct msm_ipc_port *)(handle->src_port),
385 (struct msm_ipc_addr *)handle->dest_info,
386 encoded_req, encoded_req_len);
Zaheerulla Meer51dc3a12013-05-08 19:27:27 +0530387append_pend_txn:
388 if (rc == -EAGAIN) {
389 txn_handle->enc_data = encoded_req;
390 txn_handle->enc_data_len = encoded_req_len;
391 if (list_empty(&handle->pending_txn_list))
392 list_del(&txn_handle->list);
393 list_add_tail(&txn_handle->list, &handle->pending_txn_list);
394 if (ret_txn_handle)
395 *ret_txn_handle = txn_handle;
396 mutex_unlock(&handle->handle_lock);
397 return 0;
398 }
Karthikeyan Ramasubramanianfec77ff2012-10-26 20:03:26 -0600399 if (rc < 0) {
400 pr_err("%s: send_msg failed %d\n", __func__, rc);
401 goto encode_and_send_req_err3;
402 }
403 mutex_unlock(&handle->handle_lock);
404
405 kfree(encoded_req);
406 if (ret_txn_handle)
407 *ret_txn_handle = txn_handle;
408 return 0;
409
410encode_and_send_req_err3:
411 list_del(&txn_handle->list);
412encode_and_send_req_err2:
413 kfree(encoded_req);
414encode_and_send_req_err1:
415 kfree(txn_handle);
416 mutex_unlock(&handle->handle_lock);
417 return rc;
418}
419
420int qmi_send_req_wait(struct qmi_handle *handle,
421 struct msg_desc *req_desc,
422 void *req, unsigned int req_len,
423 struct msg_desc *resp_desc,
424 void *resp, unsigned int resp_len,
425 unsigned long timeout_ms)
426{
427 struct qmi_txn *txn_handle = NULL;
428 int rc;
429
430 /* Encode and send the request */
431 rc = qmi_encode_and_send_req(&txn_handle, handle, QMI_SYNC_TXN,
432 req_desc, req, req_len,
433 resp_desc, resp, resp_len,
434 NULL, NULL);
435 if (rc < 0) {
436 pr_err("%s: Error encode & send req: %d\n", __func__, rc);
437 return rc;
438 }
439
440 /* Wait for the response */
441 if (!timeout_ms) {
Karthikeyan Ramasubramanian48db0a12013-06-25 09:54:01 -0600442 wait_event(txn_handle->wait_q,
443 (txn_handle->resp_received ||
444 handle->handle_reset ||
445 (txn_handle->send_stat < 0)));
Karthikeyan Ramasubramanianfec77ff2012-10-26 20:03:26 -0600446 } else {
Karthikeyan Ramasubramanian48db0a12013-06-25 09:54:01 -0600447 rc = wait_event_timeout(txn_handle->wait_q,
Zaheerulla Meer51dc3a12013-05-08 19:27:27 +0530448 (txn_handle->resp_received ||
449 handle->handle_reset ||
450 (txn_handle->send_stat < 0)),
451 msecs_to_jiffies(timeout_ms));
Karthikeyan Ramasubramanianfec77ff2012-10-26 20:03:26 -0600452 if (rc == 0)
453 rc = -ETIMEDOUT;
454 }
455
456 mutex_lock(&handle->handle_lock);
457 if (!txn_handle->resp_received) {
458 pr_err("%s: Response Wait Error %d\n", __func__, rc);
459 if (handle->handle_reset)
460 rc = -ENETRESET;
461 if (rc >= 0)
462 rc = -EFAULT;
Zaheerulla Meer51dc3a12013-05-08 19:27:27 +0530463 if (txn_handle->send_stat < 0)
464 rc = txn_handle->send_stat;
Karthikeyan Ramasubramanianfec77ff2012-10-26 20:03:26 -0600465 goto send_req_wait_err;
466 }
467 rc = 0;
468
469send_req_wait_err:
470 list_del(&txn_handle->list);
471 kfree(txn_handle);
472 mutex_unlock(&handle->handle_lock);
473 wake_up(&handle->reset_waitq);
474 return rc;
475}
476EXPORT_SYMBOL(qmi_send_req_wait);
477
478int qmi_send_req_nowait(struct qmi_handle *handle,
479 struct msg_desc *req_desc,
480 void *req, unsigned int req_len,
481 struct msg_desc *resp_desc,
482 void *resp, unsigned int resp_len,
483 void (*resp_cb)(struct qmi_handle *handle,
484 unsigned int msg_id, void *msg,
Zaheerulla Meer51dc3a12013-05-08 19:27:27 +0530485 void *resp_cb_data, int stat),
Karthikeyan Ramasubramanianfec77ff2012-10-26 20:03:26 -0600486 void *resp_cb_data)
487{
488 return qmi_encode_and_send_req(NULL, handle, QMI_ASYNC_TXN,
489 req_desc, req, req_len,
490 resp_desc, resp, resp_len,
491 resp_cb, resp_cb_data);
492}
493EXPORT_SYMBOL(qmi_send_req_nowait);
494
495static struct qmi_txn *find_txn_handle(struct qmi_handle *handle,
496 uint16_t txn_id)
497{
498 struct qmi_txn *txn_handle;
499
500 list_for_each_entry(txn_handle, &handle->txn_list, list) {
501 if (txn_handle->txn_id == txn_id)
502 return txn_handle;
503 }
504 return NULL;
505}
506
507static int handle_qmi_response(struct qmi_handle *handle,
508 unsigned char *resp_msg, uint16_t txn_id,
509 uint16_t msg_id, uint16_t msg_len)
510{
511 struct qmi_txn *txn_handle;
512 int rc;
513
514 /* Find the transaction handle */
515 txn_handle = find_txn_handle(handle, txn_id);
516 if (!txn_handle) {
517 pr_err("%s Response received for non-existent txn_id %d\n",
518 __func__, txn_id);
519 return -EINVAL;
520 }
521
522 /* Decode the message */
523 rc = qmi_kernel_decode(txn_handle->resp_desc, txn_handle->resp,
524 (void *)(resp_msg + QMI_HEADER_SIZE), msg_len);
525 if (rc < 0) {
526 pr_err("%s: Response Decode Failure <%d: %d: %d> rc: %d\n",
527 __func__, txn_id, msg_id, msg_len, rc);
528 wake_up(&txn_handle->wait_q);
529 if (txn_handle->type == QMI_ASYNC_TXN) {
530 list_del(&txn_handle->list);
531 kfree(txn_handle);
532 }
533 return rc;
534 }
535
536 /* Handle async or sync resp */
537 switch (txn_handle->type) {
538 case QMI_SYNC_TXN:
539 txn_handle->resp_received = 1;
540 wake_up(&txn_handle->wait_q);
541 rc = 0;
542 break;
543
544 case QMI_ASYNC_TXN:
545 if (txn_handle->resp_cb)
546 txn_handle->resp_cb(txn_handle->handle, msg_id,
547 txn_handle->resp,
Zaheerulla Meer51dc3a12013-05-08 19:27:27 +0530548 txn_handle->resp_cb_data, 0);
Karthikeyan Ramasubramanianfec77ff2012-10-26 20:03:26 -0600549 list_del(&txn_handle->list);
550 kfree(txn_handle);
551 rc = 0;
552 break;
553
554 default:
555 pr_err("%s: Unrecognized transaction type\n", __func__);
556 return -EFAULT;
557 }
558 return rc;
559}
560
561static int handle_qmi_indication(struct qmi_handle *handle, void *msg,
562 unsigned int msg_id, unsigned int msg_len)
563{
564 if (handle->ind_cb)
565 handle->ind_cb(handle, msg_id, msg,
566 msg_len, handle->ind_cb_priv);
567 return 0;
568}
569
570int qmi_recv_msg(struct qmi_handle *handle)
571{
572 unsigned int recv_msg_len;
573 unsigned char *recv_msg = NULL;
574 struct msm_ipc_addr src_addr;
575 unsigned char cntl_flag;
576 uint16_t txn_id, msg_id, msg_len;
577 int rc;
578
579 if (!handle)
580 return -EINVAL;
581
582 mutex_lock(&handle->handle_lock);
583 if (handle->handle_reset) {
584 mutex_unlock(&handle->handle_lock);
585 return -ENETRESET;
586 }
587
588 /* Read the messages */
589 rc = msm_ipc_router_read_msg((struct msm_ipc_port *)(handle->src_port),
590 &src_addr, &recv_msg, &recv_msg_len);
Zaheerulla Meerabb0eba2013-05-22 23:31:59 +0530591 if (rc == -ENOMSG) {
592 mutex_unlock(&handle->handle_lock);
593 return rc;
594 }
595
Karthikeyan Ramasubramanianfec77ff2012-10-26 20:03:26 -0600596 if (rc < 0) {
597 pr_err("%s: Read failed %d\n", __func__, rc);
598 mutex_unlock(&handle->handle_lock);
599 return rc;
600 }
601
602 /* Decode the header & Handle the req, resp, indication message */
603 decode_qmi_header(recv_msg, &cntl_flag, &txn_id, &msg_id, &msg_len);
604
605 switch (cntl_flag) {
606 case QMI_RESPONSE_CONTROL_FLAG:
607 rc = handle_qmi_response(handle, recv_msg,
608 txn_id, msg_id, msg_len);
609 break;
610
611 case QMI_INDICATION_CONTROL_FLAG:
612 rc = handle_qmi_indication(handle, recv_msg, msg_id, msg_len);
613 break;
614
615 default:
616 rc = -EFAULT;
617 pr_err("%s: Unsupported message type %d\n",
618 __func__, cntl_flag);
619 break;
620 }
621 kfree(recv_msg);
622 mutex_unlock(&handle->handle_lock);
623 return rc;
624}
625EXPORT_SYMBOL(qmi_recv_msg);
626
627int qmi_connect_to_service(struct qmi_handle *handle,
628 uint32_t service_id, uint32_t instance_id)
629{
630 struct msm_ipc_port_name svc_name;
631 struct msm_ipc_server_info svc_info;
632 struct msm_ipc_addr *svc_dest_addr;
633 int rc;
634
635 if (!handle)
636 return -EINVAL;
637
638 svc_dest_addr = kzalloc(sizeof(struct msm_ipc_addr),
639 GFP_KERNEL);
640 if (!svc_dest_addr) {
641 pr_err("%s: Failure allocating memory\n", __func__);
642 return -ENOMEM;
643 }
644
645 svc_name.service = service_id;
646 svc_name.instance = instance_id;
647
648 rc = msm_ipc_router_lookup_server_name(&svc_name, &svc_info, 1, 0xFF);
649 if (rc <= 0) {
650 pr_err("%s: Server not found\n", __func__);
651 return -ENODEV;
652 }
653 svc_dest_addr->addrtype = MSM_IPC_ADDR_ID;
654 svc_dest_addr->addr.port_addr.node_id = svc_info.node_id;
655 svc_dest_addr->addr.port_addr.port_id = svc_info.port_id;
656 mutex_lock(&handle->handle_lock);
657 if (handle->handle_reset) {
658 mutex_unlock(&handle->handle_lock);
659 return -ENETRESET;
660 }
661 handle->dest_info = svc_dest_addr;
662 mutex_unlock(&handle->handle_lock);
663
664 return 0;
665}
666EXPORT_SYMBOL(qmi_connect_to_service);
667
668static struct svc_event_nb *find_svc_event_nb_by_name(const char *name)
669{
670 struct svc_event_nb *temp;
671
672 list_for_each_entry(temp, &svc_event_nb_list, list) {
673 if (!strncmp(name, temp->pdriver_name,
674 sizeof(temp->pdriver_name)))
675 return temp;
676 }
677 return NULL;
678}
679
680static int qmi_svc_event_probe(struct platform_device *pdev)
681{
682 struct svc_event_nb *temp;
683 unsigned long flags;
684
685 mutex_lock(&svc_event_nb_list_lock);
686 temp = find_svc_event_nb_by_name(pdev->name);
687 if (!temp) {
688 mutex_unlock(&svc_event_nb_list_lock);
689 return -EINVAL;
690 }
691
692 spin_lock_irqsave(&temp->nb_lock, flags);
693 temp->svc_avail = 1;
694 raw_notifier_call_chain(&temp->svc_event_rcvr_list,
695 QMI_SERVER_ARRIVE, NULL);
696 spin_unlock_irqrestore(&temp->nb_lock, flags);
697 mutex_unlock(&svc_event_nb_list_lock);
698 return 0;
699}
700
701static int qmi_svc_event_remove(struct platform_device *pdev)
702{
703 struct svc_event_nb *temp;
704 unsigned long flags;
705
706 mutex_lock(&svc_event_nb_list_lock);
707 temp = find_svc_event_nb_by_name(pdev->name);
708 if (!temp) {
709 mutex_unlock(&svc_event_nb_list_lock);
710 return -EINVAL;
711 }
712
713 spin_lock_irqsave(&temp->nb_lock, flags);
714 temp->svc_avail = 0;
715 raw_notifier_call_chain(&temp->svc_event_rcvr_list,
716 QMI_SERVER_EXIT, NULL);
717 spin_unlock_irqrestore(&temp->nb_lock, flags);
718 mutex_unlock(&svc_event_nb_list_lock);
719 return 0;
720}
721
722static struct svc_event_nb *find_svc_event_nb(uint32_t service_id,
723 uint32_t instance_id)
724{
725 struct svc_event_nb *temp;
726
727 list_for_each_entry(temp, &svc_event_nb_list, list) {
728 if (temp->service_id == service_id &&
729 temp->instance_id == instance_id)
730 return temp;
731 }
732 return NULL;
733}
734
735static struct svc_event_nb *find_and_add_svc_event_nb(uint32_t service_id,
736 uint32_t instance_id)
737{
738 struct svc_event_nb *temp;
739 int ret;
740
741 mutex_lock(&svc_event_nb_list_lock);
742 temp = find_svc_event_nb(service_id, instance_id);
743 if (temp) {
744 mutex_unlock(&svc_event_nb_list_lock);
745 return temp;
746 }
747
748 temp = kzalloc(sizeof(struct svc_event_nb), GFP_KERNEL);
749 if (!temp) {
750 mutex_unlock(&svc_event_nb_list_lock);
751 pr_err("%s: Failed to alloc notifier block\n", __func__);
752 return temp;
753 }
754
755 spin_lock_init(&temp->nb_lock);
756 temp->service_id = service_id;
757 temp->instance_id = instance_id;
758 INIT_LIST_HEAD(&temp->list);
759 temp->svc_driver.probe = qmi_svc_event_probe;
760 temp->svc_driver.remove = qmi_svc_event_remove;
761 scnprintf(temp->pdriver_name, sizeof(temp->pdriver_name),
762 "QMI%08x:%08x", service_id, instance_id);
763 temp->svc_driver.driver.name = temp->pdriver_name;
764 RAW_INIT_NOTIFIER_HEAD(&temp->svc_event_rcvr_list);
765
766 list_add_tail(&temp->list, &svc_event_nb_list);
767 mutex_unlock(&svc_event_nb_list_lock);
768
769 ret = platform_driver_register(&temp->svc_driver);
770 if (ret < 0) {
771 pr_err("%s: Failed pdriver register\n", __func__);
772 mutex_lock(&svc_event_nb_list_lock);
773 list_del(&temp->list);
774 mutex_unlock(&svc_event_nb_list_lock);
775 kfree(temp);
776 temp = NULL;
777 }
778
779 return temp;
780}
781
782int qmi_svc_event_notifier_register(uint32_t service_id,
783 uint32_t instance_id,
784 struct notifier_block *nb)
785{
786 struct svc_event_nb *temp;
787 unsigned long flags;
788 int ret;
789
790 temp = find_and_add_svc_event_nb(service_id, instance_id);
791 if (!temp)
792 return -EFAULT;
793
794 mutex_lock(&svc_event_nb_list_lock);
795 temp = find_svc_event_nb(service_id, instance_id);
796 if (!temp) {
797 mutex_unlock(&svc_event_nb_list_lock);
798 return -EFAULT;
799 }
800 spin_lock_irqsave(&temp->nb_lock, flags);
801 if (temp->svc_avail)
802 nb->notifier_call(nb, QMI_SERVER_ARRIVE, NULL);
803
804 ret = raw_notifier_chain_register(&temp->svc_event_rcvr_list, nb);
805 spin_unlock_irqrestore(&temp->nb_lock, flags);
806 mutex_unlock(&svc_event_nb_list_lock);
807
808 return ret;
809}
810EXPORT_SYMBOL(qmi_svc_event_notifier_register);
811
812int qmi_svc_event_notifier_unregister(uint32_t service_id,
813 uint32_t instance_id,
814 struct notifier_block *nb)
815{
816 int ret;
817 struct svc_event_nb *temp;
818 unsigned long flags;
819
820 mutex_lock(&svc_event_nb_list_lock);
821 temp = find_svc_event_nb(service_id, instance_id);
822 if (!temp) {
823 mutex_unlock(&svc_event_nb_list_lock);
824 return -EINVAL;
825 }
826
827 spin_lock_irqsave(&temp->nb_lock, flags);
828 ret = raw_notifier_chain_unregister(&temp->svc_event_rcvr_list, nb);
829 spin_unlock_irqrestore(&temp->nb_lock, flags);
830 mutex_unlock(&svc_event_nb_list_lock);
831
832 return ret;
833}
834EXPORT_SYMBOL(qmi_svc_event_notifier_unregister);
835
836MODULE_DESCRIPTION("MSM QMI Interface");
837MODULE_LICENSE("GPL v2");