blob: 1ea1fe66ae7bbe575a0473cea25aa8cc3108b328 [file] [log] [blame]
Karthikeyan Ramasubramanianfec77ff2012-10-26 20:03:26 -06001/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/slab.h>
14#include <linux/uaccess.h>
15#include <linux/module.h>
16#include <linux/kernel.h>
17#include <linux/errno.h>
18#include <linux/io.h>
19#include <linux/string.h>
20#include <linux/types.h>
21#include <linux/errno.h>
22#include <linux/sched.h>
23#include <linux/mm.h>
24#include <linux/list.h>
25#include <linux/socket.h>
26#include <linux/gfp.h>
27#include <linux/qmi_encdec.h>
28
29#include <mach/msm_qmi_interface.h>
30#include <mach/msm_ipc_router.h>
31
32#include "msm_qmi_interface_priv.h"
33
34static LIST_HEAD(svc_event_nb_list);
35static DEFINE_MUTEX(svc_event_nb_list_lock);
36
37static void qmi_event_notify(unsigned event, void *priv)
38{
39 struct qmi_handle *handle = (struct qmi_handle *)priv;
40 unsigned long flags;
41
42 if (!handle)
43 return;
44
45 mutex_lock(&handle->handle_lock);
46 if (handle->handle_reset) {
47 mutex_unlock(&handle->handle_lock);
48 return;
49 }
50
51 switch (event) {
52 case MSM_IPC_ROUTER_READ_CB:
53 spin_lock_irqsave(&handle->notify_lock, flags);
54 handle->notify(handle, QMI_RECV_MSG, handle->notify_priv);
55 spin_unlock_irqrestore(&handle->notify_lock, flags);
56 break;
57
58 default:
59 break;
60 }
61 mutex_unlock(&handle->handle_lock);
62}
63
64struct qmi_handle *qmi_handle_create(
65 void (*notify)(struct qmi_handle *handle,
66 enum qmi_event_type event, void *notify_priv),
67 void *notify_priv)
68{
69 struct qmi_handle *temp_handle;
70 struct msm_ipc_port *port_ptr;
71
72 temp_handle = kzalloc(sizeof(struct qmi_handle), GFP_KERNEL);
73 if (!temp_handle) {
74 pr_err("%s: Failure allocating client handle\n", __func__);
75 return NULL;
76 }
77
78 port_ptr = msm_ipc_router_create_port(qmi_event_notify,
79 (void *)temp_handle);
80 if (!port_ptr) {
81 pr_err("%s: IPC router port creation failed\n", __func__);
82 kfree(temp_handle);
83 return NULL;
84 }
85
86 temp_handle->src_port = port_ptr;
87 temp_handle->next_txn_id = 1;
88 INIT_LIST_HEAD(&temp_handle->txn_list);
89 mutex_init(&temp_handle->handle_lock);
90 spin_lock_init(&temp_handle->notify_lock);
91 temp_handle->notify = notify;
92 temp_handle->notify_priv = notify_priv;
93 temp_handle->handle_reset = 0;
94 init_waitqueue_head(&temp_handle->reset_waitq);
95 return temp_handle;
96}
97EXPORT_SYMBOL(qmi_handle_create);
98
99static void clean_txn_info(struct qmi_handle *handle)
100{
101 struct qmi_txn *txn_handle, *temp_txn_handle;
102
103 list_for_each_entry_safe(txn_handle, temp_txn_handle,
104 &handle->txn_list, list) {
105 if (txn_handle->type == QMI_ASYNC_TXN) {
106 list_del(&txn_handle->list);
107 kfree(txn_handle);
108 } else if (txn_handle->type == QMI_SYNC_TXN) {
109 wake_up(&txn_handle->wait_q);
110 }
111 }
112}
113
114int qmi_handle_destroy(struct qmi_handle *handle)
115{
116 int rc;
117
118 if (!handle)
119 return -EINVAL;
120
121 mutex_lock(&handle->handle_lock);
122 handle->handle_reset = 1;
123 clean_txn_info(handle);
124 mutex_unlock(&handle->handle_lock);
125
126 rc = wait_event_interruptible(handle->reset_waitq,
127 list_empty(&handle->txn_list));
128
129 /* TODO: Destroy client owned transaction */
130 msm_ipc_router_close_port((struct msm_ipc_port *)(handle->src_port));
131 kfree(handle->dest_info);
132 kfree(handle);
133 return 0;
134}
135EXPORT_SYMBOL(qmi_handle_destroy);
136
137int qmi_register_ind_cb(struct qmi_handle *handle,
138 void (*ind_cb)(struct qmi_handle *handle,
139 unsigned int msg_id, void *msg,
140 unsigned int msg_len, void *ind_cb_priv),
141 void *ind_cb_priv)
142{
143 if (!handle)
144 return -EINVAL;
145
146 mutex_lock(&handle->handle_lock);
147 if (handle->handle_reset) {
148 mutex_unlock(&handle->handle_lock);
149 return -ENETRESET;
150 }
151
152 handle->ind_cb = ind_cb;
153 handle->ind_cb_priv = ind_cb_priv;
154 mutex_unlock(&handle->handle_lock);
155 return 0;
156}
157EXPORT_SYMBOL(qmi_register_ind_cb);
158
159static int qmi_encode_and_send_req(struct qmi_txn **ret_txn_handle,
160 struct qmi_handle *handle, enum txn_type type,
161 struct msg_desc *req_desc, void *req, unsigned int req_len,
162 struct msg_desc *resp_desc, void *resp, unsigned int resp_len,
163 void (*resp_cb)(struct qmi_handle *handle,
164 unsigned int msg_id, void *msg,
165 void *resp_cb_data),
166 void *resp_cb_data)
167{
168 struct qmi_txn *txn_handle;
169 int rc, encoded_req_len;
170 void *encoded_req;
171
172 if (!handle || !handle->dest_info ||
173 !req_desc || !req || !resp_desc || !resp)
174 return -EINVAL;
175
176 mutex_lock(&handle->handle_lock);
177 if (handle->handle_reset) {
178 mutex_unlock(&handle->handle_lock);
179 return -ENETRESET;
180 }
181
182 /* Allocate Transaction Info */
183 txn_handle = kzalloc(sizeof(struct qmi_txn), GFP_KERNEL);
184 if (!txn_handle) {
185 pr_err("%s: Failed to allocate txn handle\n", __func__);
186 mutex_unlock(&handle->handle_lock);
187 return -ENOMEM;
188 }
189 txn_handle->type = type;
190 INIT_LIST_HEAD(&txn_handle->list);
191 init_waitqueue_head(&txn_handle->wait_q);
192
193 /* Cache the parameters passed & mark it as sync*/
194 txn_handle->handle = handle;
195 txn_handle->resp_desc = resp_desc;
196 txn_handle->resp = resp;
197 txn_handle->resp_len = resp_len;
198 txn_handle->resp_received = 0;
199 txn_handle->resp_cb = resp_cb;
200 txn_handle->resp_cb_data = resp_cb_data;
201
202 /* Encode the request msg */
203 encoded_req_len = req_desc->max_msg_len + QMI_HEADER_SIZE;
204 encoded_req = kmalloc(encoded_req_len, GFP_KERNEL);
205 if (!encoded_req) {
206 pr_err("%s: Failed to allocate req_msg_buf\n", __func__);
207 rc = -ENOMEM;
208 goto encode_and_send_req_err1;
209 }
210 rc = qmi_kernel_encode(req_desc,
211 (void *)(encoded_req + QMI_HEADER_SIZE),
212 req_desc->max_msg_len, req);
213 if (rc < 0) {
214 pr_err("%s: Encode Failure %d\n", __func__, rc);
215 goto encode_and_send_req_err2;
216 }
217 encoded_req_len = rc;
218
219 /* Encode the header & Add to the txn_list */
220 if (!handle->next_txn_id)
221 handle->next_txn_id++;
222 txn_handle->txn_id = handle->next_txn_id++;
223 encode_qmi_header(encoded_req, QMI_REQUEST_CONTROL_FLAG,
224 txn_handle->txn_id, req_desc->msg_id,
225 encoded_req_len);
226 encoded_req_len += QMI_HEADER_SIZE;
227 list_add_tail(&txn_handle->list, &handle->txn_list);
228
229 /* Send the request */
230 rc = msm_ipc_router_send_msg((struct msm_ipc_port *)(handle->src_port),
231 (struct msm_ipc_addr *)handle->dest_info,
232 encoded_req, encoded_req_len);
233 if (rc < 0) {
234 pr_err("%s: send_msg failed %d\n", __func__, rc);
235 goto encode_and_send_req_err3;
236 }
237 mutex_unlock(&handle->handle_lock);
238
239 kfree(encoded_req);
240 if (ret_txn_handle)
241 *ret_txn_handle = txn_handle;
242 return 0;
243
244encode_and_send_req_err3:
245 list_del(&txn_handle->list);
246encode_and_send_req_err2:
247 kfree(encoded_req);
248encode_and_send_req_err1:
249 kfree(txn_handle);
250 mutex_unlock(&handle->handle_lock);
251 return rc;
252}
253
254int qmi_send_req_wait(struct qmi_handle *handle,
255 struct msg_desc *req_desc,
256 void *req, unsigned int req_len,
257 struct msg_desc *resp_desc,
258 void *resp, unsigned int resp_len,
259 unsigned long timeout_ms)
260{
261 struct qmi_txn *txn_handle = NULL;
262 int rc;
263
264 /* Encode and send the request */
265 rc = qmi_encode_and_send_req(&txn_handle, handle, QMI_SYNC_TXN,
266 req_desc, req, req_len,
267 resp_desc, resp, resp_len,
268 NULL, NULL);
269 if (rc < 0) {
270 pr_err("%s: Error encode & send req: %d\n", __func__, rc);
271 return rc;
272 }
273
274 /* Wait for the response */
275 if (!timeout_ms) {
276 rc = wait_event_interruptible(txn_handle->wait_q,
277 (txn_handle->resp_received ||
278 handle->handle_reset));
279 } else {
280 rc = wait_event_interruptible_timeout(txn_handle->wait_q,
281 (txn_handle->resp_received ||
282 handle->handle_reset),
283 msecs_to_jiffies(timeout_ms));
284 if (rc == 0)
285 rc = -ETIMEDOUT;
286 }
287
288 mutex_lock(&handle->handle_lock);
289 if (!txn_handle->resp_received) {
290 pr_err("%s: Response Wait Error %d\n", __func__, rc);
291 if (handle->handle_reset)
292 rc = -ENETRESET;
293 if (rc >= 0)
294 rc = -EFAULT;
295 goto send_req_wait_err;
296 }
297 rc = 0;
298
299send_req_wait_err:
300 list_del(&txn_handle->list);
301 kfree(txn_handle);
302 mutex_unlock(&handle->handle_lock);
303 wake_up(&handle->reset_waitq);
304 return rc;
305}
306EXPORT_SYMBOL(qmi_send_req_wait);
307
308int qmi_send_req_nowait(struct qmi_handle *handle,
309 struct msg_desc *req_desc,
310 void *req, unsigned int req_len,
311 struct msg_desc *resp_desc,
312 void *resp, unsigned int resp_len,
313 void (*resp_cb)(struct qmi_handle *handle,
314 unsigned int msg_id, void *msg,
315 void *resp_cb_data),
316 void *resp_cb_data)
317{
318 return qmi_encode_and_send_req(NULL, handle, QMI_ASYNC_TXN,
319 req_desc, req, req_len,
320 resp_desc, resp, resp_len,
321 resp_cb, resp_cb_data);
322}
323EXPORT_SYMBOL(qmi_send_req_nowait);
324
325static struct qmi_txn *find_txn_handle(struct qmi_handle *handle,
326 uint16_t txn_id)
327{
328 struct qmi_txn *txn_handle;
329
330 list_for_each_entry(txn_handle, &handle->txn_list, list) {
331 if (txn_handle->txn_id == txn_id)
332 return txn_handle;
333 }
334 return NULL;
335}
336
337static int handle_qmi_response(struct qmi_handle *handle,
338 unsigned char *resp_msg, uint16_t txn_id,
339 uint16_t msg_id, uint16_t msg_len)
340{
341 struct qmi_txn *txn_handle;
342 int rc;
343
344 /* Find the transaction handle */
345 txn_handle = find_txn_handle(handle, txn_id);
346 if (!txn_handle) {
347 pr_err("%s Response received for non-existent txn_id %d\n",
348 __func__, txn_id);
349 return -EINVAL;
350 }
351
352 /* Decode the message */
353 rc = qmi_kernel_decode(txn_handle->resp_desc, txn_handle->resp,
354 (void *)(resp_msg + QMI_HEADER_SIZE), msg_len);
355 if (rc < 0) {
356 pr_err("%s: Response Decode Failure <%d: %d: %d> rc: %d\n",
357 __func__, txn_id, msg_id, msg_len, rc);
358 wake_up(&txn_handle->wait_q);
359 if (txn_handle->type == QMI_ASYNC_TXN) {
360 list_del(&txn_handle->list);
361 kfree(txn_handle);
362 }
363 return rc;
364 }
365
366 /* Handle async or sync resp */
367 switch (txn_handle->type) {
368 case QMI_SYNC_TXN:
369 txn_handle->resp_received = 1;
370 wake_up(&txn_handle->wait_q);
371 rc = 0;
372 break;
373
374 case QMI_ASYNC_TXN:
375 if (txn_handle->resp_cb)
376 txn_handle->resp_cb(txn_handle->handle, msg_id,
377 txn_handle->resp,
378 txn_handle->resp_cb_data);
379 list_del(&txn_handle->list);
380 kfree(txn_handle);
381 rc = 0;
382 break;
383
384 default:
385 pr_err("%s: Unrecognized transaction type\n", __func__);
386 return -EFAULT;
387 }
388 return rc;
389}
390
391static int handle_qmi_indication(struct qmi_handle *handle, void *msg,
392 unsigned int msg_id, unsigned int msg_len)
393{
394 if (handle->ind_cb)
395 handle->ind_cb(handle, msg_id, msg,
396 msg_len, handle->ind_cb_priv);
397 return 0;
398}
399
400int qmi_recv_msg(struct qmi_handle *handle)
401{
402 unsigned int recv_msg_len;
403 unsigned char *recv_msg = NULL;
404 struct msm_ipc_addr src_addr;
405 unsigned char cntl_flag;
406 uint16_t txn_id, msg_id, msg_len;
407 int rc;
408
409 if (!handle)
410 return -EINVAL;
411
412 mutex_lock(&handle->handle_lock);
413 if (handle->handle_reset) {
414 mutex_unlock(&handle->handle_lock);
415 return -ENETRESET;
416 }
417
418 /* Read the messages */
419 rc = msm_ipc_router_read_msg((struct msm_ipc_port *)(handle->src_port),
420 &src_addr, &recv_msg, &recv_msg_len);
421 if (rc < 0) {
422 pr_err("%s: Read failed %d\n", __func__, rc);
423 mutex_unlock(&handle->handle_lock);
424 return rc;
425 }
426
427 /* Decode the header & Handle the req, resp, indication message */
428 decode_qmi_header(recv_msg, &cntl_flag, &txn_id, &msg_id, &msg_len);
429
430 switch (cntl_flag) {
431 case QMI_RESPONSE_CONTROL_FLAG:
432 rc = handle_qmi_response(handle, recv_msg,
433 txn_id, msg_id, msg_len);
434 break;
435
436 case QMI_INDICATION_CONTROL_FLAG:
437 rc = handle_qmi_indication(handle, recv_msg, msg_id, msg_len);
438 break;
439
440 default:
441 rc = -EFAULT;
442 pr_err("%s: Unsupported message type %d\n",
443 __func__, cntl_flag);
444 break;
445 }
446 kfree(recv_msg);
447 mutex_unlock(&handle->handle_lock);
448 return rc;
449}
450EXPORT_SYMBOL(qmi_recv_msg);
451
452int qmi_connect_to_service(struct qmi_handle *handle,
453 uint32_t service_id, uint32_t instance_id)
454{
455 struct msm_ipc_port_name svc_name;
456 struct msm_ipc_server_info svc_info;
457 struct msm_ipc_addr *svc_dest_addr;
458 int rc;
459
460 if (!handle)
461 return -EINVAL;
462
463 svc_dest_addr = kzalloc(sizeof(struct msm_ipc_addr),
464 GFP_KERNEL);
465 if (!svc_dest_addr) {
466 pr_err("%s: Failure allocating memory\n", __func__);
467 return -ENOMEM;
468 }
469
470 svc_name.service = service_id;
471 svc_name.instance = instance_id;
472
473 rc = msm_ipc_router_lookup_server_name(&svc_name, &svc_info, 1, 0xFF);
474 if (rc <= 0) {
475 pr_err("%s: Server not found\n", __func__);
476 return -ENODEV;
477 }
478 svc_dest_addr->addrtype = MSM_IPC_ADDR_ID;
479 svc_dest_addr->addr.port_addr.node_id = svc_info.node_id;
480 svc_dest_addr->addr.port_addr.port_id = svc_info.port_id;
481 mutex_lock(&handle->handle_lock);
482 if (handle->handle_reset) {
483 mutex_unlock(&handle->handle_lock);
484 return -ENETRESET;
485 }
486 handle->dest_info = svc_dest_addr;
487 mutex_unlock(&handle->handle_lock);
488
489 return 0;
490}
491EXPORT_SYMBOL(qmi_connect_to_service);
492
493static struct svc_event_nb *find_svc_event_nb_by_name(const char *name)
494{
495 struct svc_event_nb *temp;
496
497 list_for_each_entry(temp, &svc_event_nb_list, list) {
498 if (!strncmp(name, temp->pdriver_name,
499 sizeof(temp->pdriver_name)))
500 return temp;
501 }
502 return NULL;
503}
504
505static int qmi_svc_event_probe(struct platform_device *pdev)
506{
507 struct svc_event_nb *temp;
508 unsigned long flags;
509
510 mutex_lock(&svc_event_nb_list_lock);
511 temp = find_svc_event_nb_by_name(pdev->name);
512 if (!temp) {
513 mutex_unlock(&svc_event_nb_list_lock);
514 return -EINVAL;
515 }
516
517 spin_lock_irqsave(&temp->nb_lock, flags);
518 temp->svc_avail = 1;
519 raw_notifier_call_chain(&temp->svc_event_rcvr_list,
520 QMI_SERVER_ARRIVE, NULL);
521 spin_unlock_irqrestore(&temp->nb_lock, flags);
522 mutex_unlock(&svc_event_nb_list_lock);
523 return 0;
524}
525
526static int qmi_svc_event_remove(struct platform_device *pdev)
527{
528 struct svc_event_nb *temp;
529 unsigned long flags;
530
531 mutex_lock(&svc_event_nb_list_lock);
532 temp = find_svc_event_nb_by_name(pdev->name);
533 if (!temp) {
534 mutex_unlock(&svc_event_nb_list_lock);
535 return -EINVAL;
536 }
537
538 spin_lock_irqsave(&temp->nb_lock, flags);
539 temp->svc_avail = 0;
540 raw_notifier_call_chain(&temp->svc_event_rcvr_list,
541 QMI_SERVER_EXIT, NULL);
542 spin_unlock_irqrestore(&temp->nb_lock, flags);
543 mutex_unlock(&svc_event_nb_list_lock);
544 return 0;
545}
546
547static struct svc_event_nb *find_svc_event_nb(uint32_t service_id,
548 uint32_t instance_id)
549{
550 struct svc_event_nb *temp;
551
552 list_for_each_entry(temp, &svc_event_nb_list, list) {
553 if (temp->service_id == service_id &&
554 temp->instance_id == instance_id)
555 return temp;
556 }
557 return NULL;
558}
559
560static struct svc_event_nb *find_and_add_svc_event_nb(uint32_t service_id,
561 uint32_t instance_id)
562{
563 struct svc_event_nb *temp;
564 int ret;
565
566 mutex_lock(&svc_event_nb_list_lock);
567 temp = find_svc_event_nb(service_id, instance_id);
568 if (temp) {
569 mutex_unlock(&svc_event_nb_list_lock);
570 return temp;
571 }
572
573 temp = kzalloc(sizeof(struct svc_event_nb), GFP_KERNEL);
574 if (!temp) {
575 mutex_unlock(&svc_event_nb_list_lock);
576 pr_err("%s: Failed to alloc notifier block\n", __func__);
577 return temp;
578 }
579
580 spin_lock_init(&temp->nb_lock);
581 temp->service_id = service_id;
582 temp->instance_id = instance_id;
583 INIT_LIST_HEAD(&temp->list);
584 temp->svc_driver.probe = qmi_svc_event_probe;
585 temp->svc_driver.remove = qmi_svc_event_remove;
586 scnprintf(temp->pdriver_name, sizeof(temp->pdriver_name),
587 "QMI%08x:%08x", service_id, instance_id);
588 temp->svc_driver.driver.name = temp->pdriver_name;
589 RAW_INIT_NOTIFIER_HEAD(&temp->svc_event_rcvr_list);
590
591 list_add_tail(&temp->list, &svc_event_nb_list);
592 mutex_unlock(&svc_event_nb_list_lock);
593
594 ret = platform_driver_register(&temp->svc_driver);
595 if (ret < 0) {
596 pr_err("%s: Failed pdriver register\n", __func__);
597 mutex_lock(&svc_event_nb_list_lock);
598 list_del(&temp->list);
599 mutex_unlock(&svc_event_nb_list_lock);
600 kfree(temp);
601 temp = NULL;
602 }
603
604 return temp;
605}
606
607int qmi_svc_event_notifier_register(uint32_t service_id,
608 uint32_t instance_id,
609 struct notifier_block *nb)
610{
611 struct svc_event_nb *temp;
612 unsigned long flags;
613 int ret;
614
615 temp = find_and_add_svc_event_nb(service_id, instance_id);
616 if (!temp)
617 return -EFAULT;
618
619 mutex_lock(&svc_event_nb_list_lock);
620 temp = find_svc_event_nb(service_id, instance_id);
621 if (!temp) {
622 mutex_unlock(&svc_event_nb_list_lock);
623 return -EFAULT;
624 }
625 spin_lock_irqsave(&temp->nb_lock, flags);
626 if (temp->svc_avail)
627 nb->notifier_call(nb, QMI_SERVER_ARRIVE, NULL);
628
629 ret = raw_notifier_chain_register(&temp->svc_event_rcvr_list, nb);
630 spin_unlock_irqrestore(&temp->nb_lock, flags);
631 mutex_unlock(&svc_event_nb_list_lock);
632
633 return ret;
634}
635EXPORT_SYMBOL(qmi_svc_event_notifier_register);
636
637int qmi_svc_event_notifier_unregister(uint32_t service_id,
638 uint32_t instance_id,
639 struct notifier_block *nb)
640{
641 int ret;
642 struct svc_event_nb *temp;
643 unsigned long flags;
644
645 mutex_lock(&svc_event_nb_list_lock);
646 temp = find_svc_event_nb(service_id, instance_id);
647 if (!temp) {
648 mutex_unlock(&svc_event_nb_list_lock);
649 return -EINVAL;
650 }
651
652 spin_lock_irqsave(&temp->nb_lock, flags);
653 ret = raw_notifier_chain_unregister(&temp->svc_event_rcvr_list, nb);
654 spin_unlock_irqrestore(&temp->nb_lock, flags);
655 mutex_unlock(&svc_event_nb_list_lock);
656
657 return ret;
658}
659EXPORT_SYMBOL(qmi_svc_event_notifier_unregister);
660
661MODULE_DESCRIPTION("MSM QMI Interface");
662MODULE_LICENSE("GPL v2");