blob: 4c4635aabce69a5af3242190012dea6c677a102b [file] [log] [blame]
Karthikeyan Ramasubramanianfec77ff2012-10-26 20:03:26 -06001/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/slab.h>
14#include <linux/uaccess.h>
15#include <linux/module.h>
16#include <linux/kernel.h>
17#include <linux/errno.h>
18#include <linux/io.h>
19#include <linux/string.h>
20#include <linux/types.h>
21#include <linux/errno.h>
22#include <linux/sched.h>
23#include <linux/mm.h>
24#include <linux/list.h>
25#include <linux/socket.h>
26#include <linux/gfp.h>
27#include <linux/qmi_encdec.h>
28
29#include <mach/msm_qmi_interface.h>
30#include <mach/msm_ipc_router.h>
31
32#include "msm_qmi_interface_priv.h"
33
34static LIST_HEAD(svc_event_nb_list);
35static DEFINE_MUTEX(svc_event_nb_list_lock);
36
Karthikeyan Ramasubramanian5f10dbd2012-11-08 10:58:48 -070037struct elem_info qmi_response_type_v01_ei[] = {
38 {
39 .data_type = QMI_SIGNED_2_BYTE_ENUM,
40 .elem_len = 1,
41 .elem_size = sizeof(uint16_t),
42 .is_array = NO_ARRAY,
43 .tlv_type = QMI_COMMON_TLV_TYPE,
44 .offset = offsetof(struct qmi_response_type_v01,
45 result),
46 .ei_array = NULL,
47 },
48 {
49 .data_type = QMI_SIGNED_2_BYTE_ENUM,
50 .elem_len = 1,
51 .elem_size = sizeof(uint16_t),
52 .is_array = NO_ARRAY,
53 .tlv_type = QMI_COMMON_TLV_TYPE,
54 .offset = offsetof(struct qmi_response_type_v01,
55 error),
56 .ei_array = NULL,
57 },
58 {
59 .data_type = QMI_EOTI,
60 .elem_len = 0,
61 .elem_size = 0,
62 .is_array = NO_ARRAY,
63 .tlv_type = QMI_COMMON_TLV_TYPE,
64 .offset = 0,
65 .ei_array = NULL,
66 },
67};
68
Karthikeyan Ramasubramanianfec77ff2012-10-26 20:03:26 -060069static void qmi_event_notify(unsigned event, void *priv)
70{
71 struct qmi_handle *handle = (struct qmi_handle *)priv;
72 unsigned long flags;
73
74 if (!handle)
75 return;
76
77 mutex_lock(&handle->handle_lock);
78 if (handle->handle_reset) {
79 mutex_unlock(&handle->handle_lock);
80 return;
81 }
82
83 switch (event) {
84 case MSM_IPC_ROUTER_READ_CB:
85 spin_lock_irqsave(&handle->notify_lock, flags);
86 handle->notify(handle, QMI_RECV_MSG, handle->notify_priv);
87 spin_unlock_irqrestore(&handle->notify_lock, flags);
88 break;
89
90 default:
91 break;
92 }
93 mutex_unlock(&handle->handle_lock);
94}
95
96struct qmi_handle *qmi_handle_create(
97 void (*notify)(struct qmi_handle *handle,
98 enum qmi_event_type event, void *notify_priv),
99 void *notify_priv)
100{
101 struct qmi_handle *temp_handle;
102 struct msm_ipc_port *port_ptr;
103
104 temp_handle = kzalloc(sizeof(struct qmi_handle), GFP_KERNEL);
105 if (!temp_handle) {
106 pr_err("%s: Failure allocating client handle\n", __func__);
107 return NULL;
108 }
109
110 port_ptr = msm_ipc_router_create_port(qmi_event_notify,
111 (void *)temp_handle);
112 if (!port_ptr) {
113 pr_err("%s: IPC router port creation failed\n", __func__);
114 kfree(temp_handle);
115 return NULL;
116 }
117
118 temp_handle->src_port = port_ptr;
119 temp_handle->next_txn_id = 1;
120 INIT_LIST_HEAD(&temp_handle->txn_list);
121 mutex_init(&temp_handle->handle_lock);
122 spin_lock_init(&temp_handle->notify_lock);
123 temp_handle->notify = notify;
124 temp_handle->notify_priv = notify_priv;
125 temp_handle->handle_reset = 0;
126 init_waitqueue_head(&temp_handle->reset_waitq);
127 return temp_handle;
128}
129EXPORT_SYMBOL(qmi_handle_create);
130
131static void clean_txn_info(struct qmi_handle *handle)
132{
133 struct qmi_txn *txn_handle, *temp_txn_handle;
134
135 list_for_each_entry_safe(txn_handle, temp_txn_handle,
136 &handle->txn_list, list) {
137 if (txn_handle->type == QMI_ASYNC_TXN) {
138 list_del(&txn_handle->list);
139 kfree(txn_handle);
140 } else if (txn_handle->type == QMI_SYNC_TXN) {
141 wake_up(&txn_handle->wait_q);
142 }
143 }
144}
145
146int qmi_handle_destroy(struct qmi_handle *handle)
147{
148 int rc;
149
150 if (!handle)
151 return -EINVAL;
152
153 mutex_lock(&handle->handle_lock);
154 handle->handle_reset = 1;
155 clean_txn_info(handle);
156 mutex_unlock(&handle->handle_lock);
157
158 rc = wait_event_interruptible(handle->reset_waitq,
159 list_empty(&handle->txn_list));
160
161 /* TODO: Destroy client owned transaction */
162 msm_ipc_router_close_port((struct msm_ipc_port *)(handle->src_port));
163 kfree(handle->dest_info);
164 kfree(handle);
165 return 0;
166}
167EXPORT_SYMBOL(qmi_handle_destroy);
168
169int qmi_register_ind_cb(struct qmi_handle *handle,
170 void (*ind_cb)(struct qmi_handle *handle,
171 unsigned int msg_id, void *msg,
172 unsigned int msg_len, void *ind_cb_priv),
173 void *ind_cb_priv)
174{
175 if (!handle)
176 return -EINVAL;
177
178 mutex_lock(&handle->handle_lock);
179 if (handle->handle_reset) {
180 mutex_unlock(&handle->handle_lock);
181 return -ENETRESET;
182 }
183
184 handle->ind_cb = ind_cb;
185 handle->ind_cb_priv = ind_cb_priv;
186 mutex_unlock(&handle->handle_lock);
187 return 0;
188}
189EXPORT_SYMBOL(qmi_register_ind_cb);
190
191static int qmi_encode_and_send_req(struct qmi_txn **ret_txn_handle,
192 struct qmi_handle *handle, enum txn_type type,
193 struct msg_desc *req_desc, void *req, unsigned int req_len,
194 struct msg_desc *resp_desc, void *resp, unsigned int resp_len,
195 void (*resp_cb)(struct qmi_handle *handle,
196 unsigned int msg_id, void *msg,
197 void *resp_cb_data),
198 void *resp_cb_data)
199{
200 struct qmi_txn *txn_handle;
201 int rc, encoded_req_len;
202 void *encoded_req;
203
204 if (!handle || !handle->dest_info ||
205 !req_desc || !req || !resp_desc || !resp)
206 return -EINVAL;
207
208 mutex_lock(&handle->handle_lock);
209 if (handle->handle_reset) {
210 mutex_unlock(&handle->handle_lock);
211 return -ENETRESET;
212 }
213
214 /* Allocate Transaction Info */
215 txn_handle = kzalloc(sizeof(struct qmi_txn), GFP_KERNEL);
216 if (!txn_handle) {
217 pr_err("%s: Failed to allocate txn handle\n", __func__);
218 mutex_unlock(&handle->handle_lock);
219 return -ENOMEM;
220 }
221 txn_handle->type = type;
222 INIT_LIST_HEAD(&txn_handle->list);
223 init_waitqueue_head(&txn_handle->wait_q);
224
225 /* Cache the parameters passed & mark it as sync*/
226 txn_handle->handle = handle;
227 txn_handle->resp_desc = resp_desc;
228 txn_handle->resp = resp;
229 txn_handle->resp_len = resp_len;
230 txn_handle->resp_received = 0;
231 txn_handle->resp_cb = resp_cb;
232 txn_handle->resp_cb_data = resp_cb_data;
233
234 /* Encode the request msg */
235 encoded_req_len = req_desc->max_msg_len + QMI_HEADER_SIZE;
236 encoded_req = kmalloc(encoded_req_len, GFP_KERNEL);
237 if (!encoded_req) {
238 pr_err("%s: Failed to allocate req_msg_buf\n", __func__);
239 rc = -ENOMEM;
240 goto encode_and_send_req_err1;
241 }
242 rc = qmi_kernel_encode(req_desc,
243 (void *)(encoded_req + QMI_HEADER_SIZE),
244 req_desc->max_msg_len, req);
245 if (rc < 0) {
246 pr_err("%s: Encode Failure %d\n", __func__, rc);
247 goto encode_and_send_req_err2;
248 }
249 encoded_req_len = rc;
250
251 /* Encode the header & Add to the txn_list */
252 if (!handle->next_txn_id)
253 handle->next_txn_id++;
254 txn_handle->txn_id = handle->next_txn_id++;
255 encode_qmi_header(encoded_req, QMI_REQUEST_CONTROL_FLAG,
256 txn_handle->txn_id, req_desc->msg_id,
257 encoded_req_len);
258 encoded_req_len += QMI_HEADER_SIZE;
259 list_add_tail(&txn_handle->list, &handle->txn_list);
260
261 /* Send the request */
262 rc = msm_ipc_router_send_msg((struct msm_ipc_port *)(handle->src_port),
263 (struct msm_ipc_addr *)handle->dest_info,
264 encoded_req, encoded_req_len);
265 if (rc < 0) {
266 pr_err("%s: send_msg failed %d\n", __func__, rc);
267 goto encode_and_send_req_err3;
268 }
269 mutex_unlock(&handle->handle_lock);
270
271 kfree(encoded_req);
272 if (ret_txn_handle)
273 *ret_txn_handle = txn_handle;
274 return 0;
275
276encode_and_send_req_err3:
277 list_del(&txn_handle->list);
278encode_and_send_req_err2:
279 kfree(encoded_req);
280encode_and_send_req_err1:
281 kfree(txn_handle);
282 mutex_unlock(&handle->handle_lock);
283 return rc;
284}
285
286int qmi_send_req_wait(struct qmi_handle *handle,
287 struct msg_desc *req_desc,
288 void *req, unsigned int req_len,
289 struct msg_desc *resp_desc,
290 void *resp, unsigned int resp_len,
291 unsigned long timeout_ms)
292{
293 struct qmi_txn *txn_handle = NULL;
294 int rc;
295
296 /* Encode and send the request */
297 rc = qmi_encode_and_send_req(&txn_handle, handle, QMI_SYNC_TXN,
298 req_desc, req, req_len,
299 resp_desc, resp, resp_len,
300 NULL, NULL);
301 if (rc < 0) {
302 pr_err("%s: Error encode & send req: %d\n", __func__, rc);
303 return rc;
304 }
305
306 /* Wait for the response */
307 if (!timeout_ms) {
308 rc = wait_event_interruptible(txn_handle->wait_q,
309 (txn_handle->resp_received ||
310 handle->handle_reset));
311 } else {
312 rc = wait_event_interruptible_timeout(txn_handle->wait_q,
313 (txn_handle->resp_received ||
314 handle->handle_reset),
315 msecs_to_jiffies(timeout_ms));
316 if (rc == 0)
317 rc = -ETIMEDOUT;
318 }
319
320 mutex_lock(&handle->handle_lock);
321 if (!txn_handle->resp_received) {
322 pr_err("%s: Response Wait Error %d\n", __func__, rc);
323 if (handle->handle_reset)
324 rc = -ENETRESET;
325 if (rc >= 0)
326 rc = -EFAULT;
327 goto send_req_wait_err;
328 }
329 rc = 0;
330
331send_req_wait_err:
332 list_del(&txn_handle->list);
333 kfree(txn_handle);
334 mutex_unlock(&handle->handle_lock);
335 wake_up(&handle->reset_waitq);
336 return rc;
337}
338EXPORT_SYMBOL(qmi_send_req_wait);
339
340int qmi_send_req_nowait(struct qmi_handle *handle,
341 struct msg_desc *req_desc,
342 void *req, unsigned int req_len,
343 struct msg_desc *resp_desc,
344 void *resp, unsigned int resp_len,
345 void (*resp_cb)(struct qmi_handle *handle,
346 unsigned int msg_id, void *msg,
347 void *resp_cb_data),
348 void *resp_cb_data)
349{
350 return qmi_encode_and_send_req(NULL, handle, QMI_ASYNC_TXN,
351 req_desc, req, req_len,
352 resp_desc, resp, resp_len,
353 resp_cb, resp_cb_data);
354}
355EXPORT_SYMBOL(qmi_send_req_nowait);
356
357static struct qmi_txn *find_txn_handle(struct qmi_handle *handle,
358 uint16_t txn_id)
359{
360 struct qmi_txn *txn_handle;
361
362 list_for_each_entry(txn_handle, &handle->txn_list, list) {
363 if (txn_handle->txn_id == txn_id)
364 return txn_handle;
365 }
366 return NULL;
367}
368
369static int handle_qmi_response(struct qmi_handle *handle,
370 unsigned char *resp_msg, uint16_t txn_id,
371 uint16_t msg_id, uint16_t msg_len)
372{
373 struct qmi_txn *txn_handle;
374 int rc;
375
376 /* Find the transaction handle */
377 txn_handle = find_txn_handle(handle, txn_id);
378 if (!txn_handle) {
379 pr_err("%s Response received for non-existent txn_id %d\n",
380 __func__, txn_id);
381 return -EINVAL;
382 }
383
384 /* Decode the message */
385 rc = qmi_kernel_decode(txn_handle->resp_desc, txn_handle->resp,
386 (void *)(resp_msg + QMI_HEADER_SIZE), msg_len);
387 if (rc < 0) {
388 pr_err("%s: Response Decode Failure <%d: %d: %d> rc: %d\n",
389 __func__, txn_id, msg_id, msg_len, rc);
390 wake_up(&txn_handle->wait_q);
391 if (txn_handle->type == QMI_ASYNC_TXN) {
392 list_del(&txn_handle->list);
393 kfree(txn_handle);
394 }
395 return rc;
396 }
397
398 /* Handle async or sync resp */
399 switch (txn_handle->type) {
400 case QMI_SYNC_TXN:
401 txn_handle->resp_received = 1;
402 wake_up(&txn_handle->wait_q);
403 rc = 0;
404 break;
405
406 case QMI_ASYNC_TXN:
407 if (txn_handle->resp_cb)
408 txn_handle->resp_cb(txn_handle->handle, msg_id,
409 txn_handle->resp,
410 txn_handle->resp_cb_data);
411 list_del(&txn_handle->list);
412 kfree(txn_handle);
413 rc = 0;
414 break;
415
416 default:
417 pr_err("%s: Unrecognized transaction type\n", __func__);
418 return -EFAULT;
419 }
420 return rc;
421}
422
423static int handle_qmi_indication(struct qmi_handle *handle, void *msg,
424 unsigned int msg_id, unsigned int msg_len)
425{
426 if (handle->ind_cb)
427 handle->ind_cb(handle, msg_id, msg,
428 msg_len, handle->ind_cb_priv);
429 return 0;
430}
431
432int qmi_recv_msg(struct qmi_handle *handle)
433{
434 unsigned int recv_msg_len;
435 unsigned char *recv_msg = NULL;
436 struct msm_ipc_addr src_addr;
437 unsigned char cntl_flag;
438 uint16_t txn_id, msg_id, msg_len;
439 int rc;
440
441 if (!handle)
442 return -EINVAL;
443
444 mutex_lock(&handle->handle_lock);
445 if (handle->handle_reset) {
446 mutex_unlock(&handle->handle_lock);
447 return -ENETRESET;
448 }
449
450 /* Read the messages */
451 rc = msm_ipc_router_read_msg((struct msm_ipc_port *)(handle->src_port),
452 &src_addr, &recv_msg, &recv_msg_len);
453 if (rc < 0) {
454 pr_err("%s: Read failed %d\n", __func__, rc);
455 mutex_unlock(&handle->handle_lock);
456 return rc;
457 }
458
459 /* Decode the header & Handle the req, resp, indication message */
460 decode_qmi_header(recv_msg, &cntl_flag, &txn_id, &msg_id, &msg_len);
461
462 switch (cntl_flag) {
463 case QMI_RESPONSE_CONTROL_FLAG:
464 rc = handle_qmi_response(handle, recv_msg,
465 txn_id, msg_id, msg_len);
466 break;
467
468 case QMI_INDICATION_CONTROL_FLAG:
469 rc = handle_qmi_indication(handle, recv_msg, msg_id, msg_len);
470 break;
471
472 default:
473 rc = -EFAULT;
474 pr_err("%s: Unsupported message type %d\n",
475 __func__, cntl_flag);
476 break;
477 }
478 kfree(recv_msg);
479 mutex_unlock(&handle->handle_lock);
480 return rc;
481}
482EXPORT_SYMBOL(qmi_recv_msg);
483
484int qmi_connect_to_service(struct qmi_handle *handle,
485 uint32_t service_id, uint32_t instance_id)
486{
487 struct msm_ipc_port_name svc_name;
488 struct msm_ipc_server_info svc_info;
489 struct msm_ipc_addr *svc_dest_addr;
490 int rc;
491
492 if (!handle)
493 return -EINVAL;
494
495 svc_dest_addr = kzalloc(sizeof(struct msm_ipc_addr),
496 GFP_KERNEL);
497 if (!svc_dest_addr) {
498 pr_err("%s: Failure allocating memory\n", __func__);
499 return -ENOMEM;
500 }
501
502 svc_name.service = service_id;
503 svc_name.instance = instance_id;
504
505 rc = msm_ipc_router_lookup_server_name(&svc_name, &svc_info, 1, 0xFF);
506 if (rc <= 0) {
507 pr_err("%s: Server not found\n", __func__);
508 return -ENODEV;
509 }
510 svc_dest_addr->addrtype = MSM_IPC_ADDR_ID;
511 svc_dest_addr->addr.port_addr.node_id = svc_info.node_id;
512 svc_dest_addr->addr.port_addr.port_id = svc_info.port_id;
513 mutex_lock(&handle->handle_lock);
514 if (handle->handle_reset) {
515 mutex_unlock(&handle->handle_lock);
516 return -ENETRESET;
517 }
518 handle->dest_info = svc_dest_addr;
519 mutex_unlock(&handle->handle_lock);
520
521 return 0;
522}
523EXPORT_SYMBOL(qmi_connect_to_service);
524
525static struct svc_event_nb *find_svc_event_nb_by_name(const char *name)
526{
527 struct svc_event_nb *temp;
528
529 list_for_each_entry(temp, &svc_event_nb_list, list) {
530 if (!strncmp(name, temp->pdriver_name,
531 sizeof(temp->pdriver_name)))
532 return temp;
533 }
534 return NULL;
535}
536
537static int qmi_svc_event_probe(struct platform_device *pdev)
538{
539 struct svc_event_nb *temp;
540 unsigned long flags;
541
542 mutex_lock(&svc_event_nb_list_lock);
543 temp = find_svc_event_nb_by_name(pdev->name);
544 if (!temp) {
545 mutex_unlock(&svc_event_nb_list_lock);
546 return -EINVAL;
547 }
548
549 spin_lock_irqsave(&temp->nb_lock, flags);
550 temp->svc_avail = 1;
551 raw_notifier_call_chain(&temp->svc_event_rcvr_list,
552 QMI_SERVER_ARRIVE, NULL);
553 spin_unlock_irqrestore(&temp->nb_lock, flags);
554 mutex_unlock(&svc_event_nb_list_lock);
555 return 0;
556}
557
558static int qmi_svc_event_remove(struct platform_device *pdev)
559{
560 struct svc_event_nb *temp;
561 unsigned long flags;
562
563 mutex_lock(&svc_event_nb_list_lock);
564 temp = find_svc_event_nb_by_name(pdev->name);
565 if (!temp) {
566 mutex_unlock(&svc_event_nb_list_lock);
567 return -EINVAL;
568 }
569
570 spin_lock_irqsave(&temp->nb_lock, flags);
571 temp->svc_avail = 0;
572 raw_notifier_call_chain(&temp->svc_event_rcvr_list,
573 QMI_SERVER_EXIT, NULL);
574 spin_unlock_irqrestore(&temp->nb_lock, flags);
575 mutex_unlock(&svc_event_nb_list_lock);
576 return 0;
577}
578
579static struct svc_event_nb *find_svc_event_nb(uint32_t service_id,
580 uint32_t instance_id)
581{
582 struct svc_event_nb *temp;
583
584 list_for_each_entry(temp, &svc_event_nb_list, list) {
585 if (temp->service_id == service_id &&
586 temp->instance_id == instance_id)
587 return temp;
588 }
589 return NULL;
590}
591
592static struct svc_event_nb *find_and_add_svc_event_nb(uint32_t service_id,
593 uint32_t instance_id)
594{
595 struct svc_event_nb *temp;
596 int ret;
597
598 mutex_lock(&svc_event_nb_list_lock);
599 temp = find_svc_event_nb(service_id, instance_id);
600 if (temp) {
601 mutex_unlock(&svc_event_nb_list_lock);
602 return temp;
603 }
604
605 temp = kzalloc(sizeof(struct svc_event_nb), GFP_KERNEL);
606 if (!temp) {
607 mutex_unlock(&svc_event_nb_list_lock);
608 pr_err("%s: Failed to alloc notifier block\n", __func__);
609 return temp;
610 }
611
612 spin_lock_init(&temp->nb_lock);
613 temp->service_id = service_id;
614 temp->instance_id = instance_id;
615 INIT_LIST_HEAD(&temp->list);
616 temp->svc_driver.probe = qmi_svc_event_probe;
617 temp->svc_driver.remove = qmi_svc_event_remove;
618 scnprintf(temp->pdriver_name, sizeof(temp->pdriver_name),
619 "QMI%08x:%08x", service_id, instance_id);
620 temp->svc_driver.driver.name = temp->pdriver_name;
621 RAW_INIT_NOTIFIER_HEAD(&temp->svc_event_rcvr_list);
622
623 list_add_tail(&temp->list, &svc_event_nb_list);
624 mutex_unlock(&svc_event_nb_list_lock);
625
626 ret = platform_driver_register(&temp->svc_driver);
627 if (ret < 0) {
628 pr_err("%s: Failed pdriver register\n", __func__);
629 mutex_lock(&svc_event_nb_list_lock);
630 list_del(&temp->list);
631 mutex_unlock(&svc_event_nb_list_lock);
632 kfree(temp);
633 temp = NULL;
634 }
635
636 return temp;
637}
638
639int qmi_svc_event_notifier_register(uint32_t service_id,
640 uint32_t instance_id,
641 struct notifier_block *nb)
642{
643 struct svc_event_nb *temp;
644 unsigned long flags;
645 int ret;
646
647 temp = find_and_add_svc_event_nb(service_id, instance_id);
648 if (!temp)
649 return -EFAULT;
650
651 mutex_lock(&svc_event_nb_list_lock);
652 temp = find_svc_event_nb(service_id, instance_id);
653 if (!temp) {
654 mutex_unlock(&svc_event_nb_list_lock);
655 return -EFAULT;
656 }
657 spin_lock_irqsave(&temp->nb_lock, flags);
658 if (temp->svc_avail)
659 nb->notifier_call(nb, QMI_SERVER_ARRIVE, NULL);
660
661 ret = raw_notifier_chain_register(&temp->svc_event_rcvr_list, nb);
662 spin_unlock_irqrestore(&temp->nb_lock, flags);
663 mutex_unlock(&svc_event_nb_list_lock);
664
665 return ret;
666}
667EXPORT_SYMBOL(qmi_svc_event_notifier_register);
668
669int qmi_svc_event_notifier_unregister(uint32_t service_id,
670 uint32_t instance_id,
671 struct notifier_block *nb)
672{
673 int ret;
674 struct svc_event_nb *temp;
675 unsigned long flags;
676
677 mutex_lock(&svc_event_nb_list_lock);
678 temp = find_svc_event_nb(service_id, instance_id);
679 if (!temp) {
680 mutex_unlock(&svc_event_nb_list_lock);
681 return -EINVAL;
682 }
683
684 spin_lock_irqsave(&temp->nb_lock, flags);
685 ret = raw_notifier_chain_unregister(&temp->svc_event_rcvr_list, nb);
686 spin_unlock_irqrestore(&temp->nb_lock, flags);
687 mutex_unlock(&svc_event_nb_list_lock);
688
689 return ret;
690}
691EXPORT_SYMBOL(qmi_svc_event_notifier_unregister);
692
693MODULE_DESCRIPTION("MSM QMI Interface");
694MODULE_LICENSE("GPL v2");