blob: 983d0c197bb577866e777490c2f97bd4aadece0a [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* arch/arm/mach-msm/smd_rpcrouter.c
2 *
3 * Copyright (C) 2007 Google, Inc.
4 * Copyright (c) 2007-2011, Code Aurora Forum. All rights reserved.
5 * Author: San Mehat <san@android.com>
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18/* TODO: handle cases where smd_write() will tempfail due to full fifo */
19/* TODO: thread priority? schedule a work to bump it? */
20/* TODO: maybe make server_list_lock a mutex */
21/* TODO: pool fragments to avoid kmalloc/kfree churn */
22
23#include <linux/slab.h>
24#include <linux/module.h>
25#include <linux/kernel.h>
26#include <linux/string.h>
27#include <linux/errno.h>
28#include <linux/cdev.h>
29#include <linux/init.h>
30#include <linux/device.h>
31#include <linux/types.h>
32#include <linux/delay.h>
33#include <linux/fs.h>
34#include <linux/err.h>
35#include <linux/sched.h>
36#include <linux/poll.h>
37#include <linux/wakelock.h>
38#include <asm/uaccess.h>
39#include <asm/byteorder.h>
40#include <linux/platform_device.h>
41#include <linux/uaccess.h>
42#include <linux/debugfs.h>
43
44#include <asm/byteorder.h>
45
46#include <mach/msm_smd.h>
47#include <mach/smem_log.h>
48#include <mach/subsystem_notif.h>
49
50#include "smd_rpcrouter.h"
51#include "modem_notifier.h"
52#include "smd_rpc_sym.h"
53#include "smd_private.h"
54
55enum {
56 SMEM_LOG = 1U << 0,
57 RTR_DBG = 1U << 1,
58 R2R_MSG = 1U << 2,
59 R2R_RAW = 1U << 3,
60 RPC_MSG = 1U << 4,
61 NTFY_MSG = 1U << 5,
62 RAW_PMR = 1U << 6,
63 RAW_PMW = 1U << 7,
64 R2R_RAW_HDR = 1U << 8,
65};
66static int msm_rpc_connect_timeout_ms;
67module_param_named(connect_timeout, msm_rpc_connect_timeout_ms,
68 int, S_IRUGO | S_IWUSR | S_IWGRP);
69
70static int smd_rpcrouter_debug_mask;
71module_param_named(debug_mask, smd_rpcrouter_debug_mask,
72 int, S_IRUGO | S_IWUSR | S_IWGRP);
73
74#define DIAG(x...) printk(KERN_ERR "[RR] ERROR " x)
75
76#if defined(CONFIG_MSM_ONCRPCROUTER_DEBUG)
77#define D(x...) do { \
78if (smd_rpcrouter_debug_mask & RTR_DBG) \
79 printk(KERN_ERR x); \
80} while (0)
81
82#define RR(x...) do { \
83if (smd_rpcrouter_debug_mask & R2R_MSG) \
84 printk(KERN_ERR "[RR] "x); \
85} while (0)
86
87#define RAW(x...) do { \
88if (smd_rpcrouter_debug_mask & R2R_RAW) \
89 printk(KERN_ERR "[RAW] "x); \
90} while (0)
91
92#define RAW_HDR(x...) do { \
93if (smd_rpcrouter_debug_mask & R2R_RAW_HDR) \
94 printk(KERN_ERR "[HDR] "x); \
95} while (0)
96
97#define RAW_PMR(x...) do { \
98if (smd_rpcrouter_debug_mask & RAW_PMR) \
99 printk(KERN_ERR "[PMR] "x); \
100} while (0)
101
102#define RAW_PMR_NOMASK(x...) do { \
103 printk(KERN_ERR "[PMR] "x); \
104} while (0)
105
106#define RAW_PMW(x...) do { \
107if (smd_rpcrouter_debug_mask & RAW_PMW) \
108 printk(KERN_ERR "[PMW] "x); \
109} while (0)
110
111#define RAW_PMW_NOMASK(x...) do { \
112 printk(KERN_ERR "[PMW] "x); \
113} while (0)
114
115#define IO(x...) do { \
116if (smd_rpcrouter_debug_mask & RPC_MSG) \
117 printk(KERN_ERR "[RPC] "x); \
118} while (0)
119
120#define NTFY(x...) do { \
121if (smd_rpcrouter_debug_mask & NTFY_MSG) \
122 printk(KERN_ERR "[NOTIFY] "x); \
123} while (0)
124#else
125#define D(x...) do { } while (0)
126#define RR(x...) do { } while (0)
127#define RAW(x...) do { } while (0)
128#define RAW_HDR(x...) do { } while (0)
129#define RAW_PMR(x...) do { } while (0)
130#define RAW_PMR_NO_MASK(x...) do { } while (0)
131#define RAW_PMW(x...) do { } while (0)
132#define RAW_PMW_NO_MASK(x...) do { } while (0)
133#define IO(x...) do { } while (0)
134#define NTFY(x...) do { } while (0)
135#endif
136
137
138static LIST_HEAD(local_endpoints);
139static LIST_HEAD(remote_endpoints);
140
141static LIST_HEAD(server_list);
142
143static wait_queue_head_t newserver_wait;
144static wait_queue_head_t subsystem_restart_wait;
145
146static DEFINE_SPINLOCK(local_endpoints_lock);
147static DEFINE_SPINLOCK(remote_endpoints_lock);
148static DEFINE_SPINLOCK(server_list_lock);
149
150static LIST_HEAD(rpc_board_dev_list);
151static DEFINE_SPINLOCK(rpc_board_dev_list_lock);
152
153static struct workqueue_struct *rpcrouter_workqueue;
154
155static atomic_t next_xid = ATOMIC_INIT(1);
156static atomic_t pm_mid = ATOMIC_INIT(1);
157
158static void do_read_data(struct work_struct *work);
159static void do_create_pdevs(struct work_struct *work);
160static void do_create_rpcrouter_pdev(struct work_struct *work);
161
162static DECLARE_WORK(work_create_pdevs, do_create_pdevs);
163static DECLARE_WORK(work_create_rpcrouter_pdev, do_create_rpcrouter_pdev);
164
165#define RR_STATE_IDLE 0
166#define RR_STATE_HEADER 1
167#define RR_STATE_BODY 2
168#define RR_STATE_ERROR 3
169
170/* State for remote ep following restart */
171#define RESTART_QUOTA_ABORT 1
172
173struct rr_context {
174 struct rr_packet *pkt;
175 uint8_t *ptr;
176 uint32_t state; /* current assembly state */
177 uint32_t count; /* bytes needed in this state */
178};
179
180struct rr_context the_rr_context;
181
182struct rpc_board_dev_info {
183 struct list_head list;
184
185 struct rpc_board_dev *dev;
186};
187
188static struct platform_device rpcrouter_pdev = {
189 .name = "oncrpc_router",
190 .id = -1,
191};
192
193struct rpcrouter_xprt_info {
194 struct list_head list;
195
196 struct rpcrouter_xprt *xprt;
197
198 int remote_pid;
199 uint32_t initialized;
200 wait_queue_head_t read_wait;
201 struct wake_lock wakelock;
202 spinlock_t lock;
203 uint32_t need_len;
204 struct work_struct read_data;
205 struct workqueue_struct *workqueue;
206 int abort_data_read;
207 unsigned char r2r_buf[RPCROUTER_MSGSIZE_MAX];
208};
209
210static LIST_HEAD(xprt_info_list);
211static DEFINE_MUTEX(xprt_info_list_lock);
212
213DECLARE_COMPLETION(rpc_remote_router_up);
214static atomic_t pending_close_count = ATOMIC_INIT(0);
215
216/*
217 * Search for transport (xprt) that matches the provided PID.
218 *
219 * Note: The calling function must ensure that the mutex
220 * xprt_info_list_lock is locked when this function
221 * is called.
222 *
223 * @remote_pid Remote PID for the transport
224 *
225 * @returns Pointer to transport or NULL if not found
226 */
227static struct rpcrouter_xprt_info *rpcrouter_get_xprt_info(uint32_t remote_pid)
228{
229 struct rpcrouter_xprt_info *xprt_info;
230
231 list_for_each_entry(xprt_info, &xprt_info_list, list) {
232 if (xprt_info->remote_pid == remote_pid)
233 return xprt_info;
234 }
235 return NULL;
236}
237
238static int rpcrouter_send_control_msg(struct rpcrouter_xprt_info *xprt_info,
239 union rr_control_msg *msg)
240{
241 struct rr_header hdr;
242 unsigned long flags = 0;
243 int need;
244
245 if (xprt_info->remote_pid == RPCROUTER_PID_LOCAL)
246 return 0;
247
248 if (!(msg->cmd == RPCROUTER_CTRL_CMD_HELLO) &&
249 !xprt_info->initialized) {
250 printk(KERN_ERR "rpcrouter_send_control_msg(): Warning, "
251 "router not initialized\n");
252 return -EINVAL;
253 }
254
255 hdr.version = RPCROUTER_VERSION;
256 hdr.type = msg->cmd;
257 hdr.src_pid = RPCROUTER_PID_LOCAL;
258 hdr.src_cid = RPCROUTER_ROUTER_ADDRESS;
259 hdr.confirm_rx = 0;
260 hdr.size = sizeof(*msg);
261 hdr.dst_pid = xprt_info->remote_pid;
262 hdr.dst_cid = RPCROUTER_ROUTER_ADDRESS;
263
264 /* TODO: what if channel is full? */
265
266 need = sizeof(hdr) + hdr.size;
267 spin_lock_irqsave(&xprt_info->lock, flags);
268 while (xprt_info->xprt->write_avail() < need) {
269 spin_unlock_irqrestore(&xprt_info->lock, flags);
270 msleep(250);
271 spin_lock_irqsave(&xprt_info->lock, flags);
272 }
273 xprt_info->xprt->write(&hdr, sizeof(hdr), HEADER);
274 xprt_info->xprt->write(msg, hdr.size, PAYLOAD);
275 spin_unlock_irqrestore(&xprt_info->lock, flags);
276
277 return 0;
278}
279
280static void modem_reset_cleanup(struct rpcrouter_xprt_info *xprt_info)
281{
282 struct msm_rpc_endpoint *ept;
283 struct rr_remote_endpoint *r_ept;
284 struct rr_packet *pkt, *tmp_pkt;
285 struct rr_fragment *frag, *next;
286 struct msm_rpc_reply *reply, *reply_tmp;
287 unsigned long flags;
288
289 spin_lock_irqsave(&local_endpoints_lock, flags);
290 /* remove all partial packets received */
291 list_for_each_entry(ept, &local_endpoints, list) {
292 RR("%s EPT DST PID %x, remote_pid:%d\n", __func__,
293 ept->dst_pid, xprt_info->remote_pid);
294
295 if (xprt_info->remote_pid != ept->dst_pid)
296 continue;
297
298 D("calling teardown cb %p\n", ept->cb_restart_teardown);
299 if (ept->cb_restart_teardown)
300 ept->cb_restart_teardown(ept->client_data);
301 ept->do_setup_notif = 1;
302
303 /* remove replies */
304 spin_lock(&ept->reply_q_lock);
305 list_for_each_entry_safe(reply, reply_tmp,
306 &ept->reply_pend_q, list) {
307 list_del(&reply->list);
308 kfree(reply);
309 }
310 list_for_each_entry_safe(reply, reply_tmp,
311 &ept->reply_avail_q, list) {
312 list_del(&reply->list);
313 kfree(reply);
314 }
Karthikeyan Ramasubramanian1079f782011-08-23 10:01:47 -0600315 ept->reply_cnt = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700316 spin_unlock(&ept->reply_q_lock);
317
318 /* Set restart state for local ep */
319 RR("EPT:0x%p, State %d RESTART_PEND_NTFY_SVR "
320 "PROG:0x%08x VERS:0x%08x\n",
321 ept, ept->restart_state,
322 be32_to_cpu(ept->dst_prog),
323 be32_to_cpu(ept->dst_vers));
324 spin_lock(&ept->restart_lock);
325 ept->restart_state = RESTART_PEND_NTFY_SVR;
326
327 /* remove incomplete packets */
328 spin_lock(&ept->incomplete_lock);
329 list_for_each_entry_safe(pkt, tmp_pkt,
330 &ept->incomplete, list) {
331 list_del(&pkt->list);
332 frag = pkt->first;
333 while (frag != NULL) {
334 next = frag->next;
335 kfree(frag);
336 frag = next;
337 }
338 kfree(pkt);
339 }
340 spin_unlock(&ept->incomplete_lock);
341
342 /* remove all completed packets waiting to be read */
343 spin_lock(&ept->read_q_lock);
344 list_for_each_entry_safe(pkt, tmp_pkt, &ept->read_q,
345 list) {
346 list_del(&pkt->list);
347 frag = pkt->first;
348 while (frag != NULL) {
349 next = frag->next;
350 kfree(frag);
351 frag = next;
352 }
353 kfree(pkt);
354 }
355 spin_unlock(&ept->read_q_lock);
356
357 spin_unlock(&ept->restart_lock);
358 wake_up(&ept->wait_q);
359 }
360
361 spin_unlock_irqrestore(&local_endpoints_lock, flags);
362
363 /* Unblock endpoints waiting for quota ack*/
364 spin_lock_irqsave(&remote_endpoints_lock, flags);
365 list_for_each_entry(r_ept, &remote_endpoints, list) {
366 spin_lock(&r_ept->quota_lock);
367 r_ept->quota_restart_state = RESTART_QUOTA_ABORT;
368 RR("Set STATE_PENDING PID:0x%08x CID:0x%08x \n", r_ept->pid,
369 r_ept->cid);
370 spin_unlock(&r_ept->quota_lock);
371 wake_up(&r_ept->quota_wait);
372 }
373 spin_unlock_irqrestore(&remote_endpoints_lock, flags);
374}
375
376static void modem_reset_startup(struct rpcrouter_xprt_info *xprt_info)
377{
378 struct msm_rpc_endpoint *ept;
379 unsigned long flags;
380
381 spin_lock_irqsave(&local_endpoints_lock, flags);
382
383 /* notify all endpoints that we are coming back up */
384 list_for_each_entry(ept, &local_endpoints, list) {
385 RR("%s EPT DST PID %x, remote_pid:%d\n", __func__,
386 ept->dst_pid, xprt_info->remote_pid);
387
388 if (xprt_info->remote_pid != ept->dst_pid)
389 continue;
390
391 D("calling setup cb %d:%p\n", ept->do_setup_notif,
392 ept->cb_restart_setup);
393 if (ept->do_setup_notif && ept->cb_restart_setup)
394 ept->cb_restart_setup(ept->client_data);
395 ept->do_setup_notif = 0;
396 }
397
398 spin_unlock_irqrestore(&local_endpoints_lock, flags);
399}
400
401/*
402 * Blocks and waits for endpoint if a reset is in progress.
403 *
404 * @returns
405 * ENETRESET Reset is in progress and a notification needed
406 * ERESTARTSYS Signal occurred
407 * 0 Reset is not in progress
408 */
409static int wait_for_restart_and_notify(struct msm_rpc_endpoint *ept)
410{
411 unsigned long flags;
412 int ret = 0;
413 DEFINE_WAIT(__wait);
414
415 for (;;) {
416 prepare_to_wait(&ept->restart_wait, &__wait,
417 TASK_INTERRUPTIBLE);
418
419 spin_lock_irqsave(&ept->restart_lock, flags);
420 if (ept->restart_state == RESTART_NORMAL) {
421 spin_unlock_irqrestore(&ept->restart_lock, flags);
422 break;
423 } else if (ept->restart_state & RESTART_PEND_NTFY) {
424 ept->restart_state &= ~RESTART_PEND_NTFY;
425 spin_unlock_irqrestore(&ept->restart_lock, flags);
426 ret = -ENETRESET;
427 break;
428 }
429 if (signal_pending(current) &&
430 ((!(ept->flags & MSM_RPC_UNINTERRUPTIBLE)))) {
431 spin_unlock_irqrestore(&ept->restart_lock, flags);
432 ret = -ERESTARTSYS;
433 break;
434 }
435 spin_unlock_irqrestore(&ept->restart_lock, flags);
436 schedule();
437 }
438 finish_wait(&ept->restart_wait, &__wait);
439 return ret;
440}
441
442static struct rr_server *rpcrouter_create_server(uint32_t pid,
443 uint32_t cid,
444 uint32_t prog,
445 uint32_t ver)
446{
447 struct rr_server *server;
448 unsigned long flags;
449 int rc;
450
451 server = kmalloc(sizeof(struct rr_server), GFP_KERNEL);
452 if (!server)
453 return ERR_PTR(-ENOMEM);
454
455 memset(server, 0, sizeof(struct rr_server));
456 server->pid = pid;
457 server->cid = cid;
458 server->prog = prog;
459 server->vers = ver;
460
461 spin_lock_irqsave(&server_list_lock, flags);
462 list_add_tail(&server->list, &server_list);
463 spin_unlock_irqrestore(&server_list_lock, flags);
464
465 rc = msm_rpcrouter_create_server_cdev(server);
466 if (rc < 0)
467 goto out_fail;
468
469 return server;
470out_fail:
471 spin_lock_irqsave(&server_list_lock, flags);
472 list_del(&server->list);
473 spin_unlock_irqrestore(&server_list_lock, flags);
474 kfree(server);
475 return ERR_PTR(rc);
476}
477
478static void rpcrouter_destroy_server(struct rr_server *server)
479{
480 unsigned long flags;
481
482 spin_lock_irqsave(&server_list_lock, flags);
483 list_del(&server->list);
484 spin_unlock_irqrestore(&server_list_lock, flags);
485 device_destroy(msm_rpcrouter_class, server->device_number);
486 kfree(server);
487}
488
489int msm_rpc_add_board_dev(struct rpc_board_dev *devices, int num)
490{
491 unsigned long flags;
492 struct rpc_board_dev_info *board_info;
493 int i;
494
495 for (i = 0; i < num; i++) {
496 board_info = kzalloc(sizeof(struct rpc_board_dev_info),
497 GFP_KERNEL);
498 if (!board_info)
499 return -ENOMEM;
500
501 board_info->dev = &devices[i];
502 D("%s: adding program %x\n", __func__, board_info->dev->prog);
503 spin_lock_irqsave(&rpc_board_dev_list_lock, flags);
504 list_add_tail(&board_info->list, &rpc_board_dev_list);
505 spin_unlock_irqrestore(&rpc_board_dev_list_lock, flags);
506 }
507
508 return 0;
509}
510EXPORT_SYMBOL(msm_rpc_add_board_dev);
511
512static void rpcrouter_register_board_dev(struct rr_server *server)
513{
514 struct rpc_board_dev_info *board_info;
515 unsigned long flags;
516 int rc;
517
518 spin_lock_irqsave(&rpc_board_dev_list_lock, flags);
519 list_for_each_entry(board_info, &rpc_board_dev_list, list) {
520 if (server->prog == board_info->dev->prog) {
521 D("%s: registering device %x\n",
522 __func__, board_info->dev->prog);
523 list_del(&board_info->list);
524 rc = platform_device_register(&board_info->dev->pdev);
525 if (rc)
526 pr_err("%s: board dev register failed %d\n",
527 __func__, rc);
528 kfree(board_info);
529 break;
530 }
531 }
532 spin_unlock_irqrestore(&rpc_board_dev_list_lock, flags);
533}
534
535static struct rr_server *rpcrouter_lookup_server(uint32_t prog, uint32_t ver)
536{
537 struct rr_server *server;
538 unsigned long flags;
539
540 spin_lock_irqsave(&server_list_lock, flags);
541 list_for_each_entry(server, &server_list, list) {
542 if (server->prog == prog
543 && server->vers == ver) {
544 spin_unlock_irqrestore(&server_list_lock, flags);
545 return server;
546 }
547 }
548 spin_unlock_irqrestore(&server_list_lock, flags);
549 return NULL;
550}
551
552static struct rr_server *rpcrouter_lookup_server_by_dev(dev_t dev)
553{
554 struct rr_server *server;
555 unsigned long flags;
556
557 spin_lock_irqsave(&server_list_lock, flags);
558 list_for_each_entry(server, &server_list, list) {
559 if (server->device_number == dev) {
560 spin_unlock_irqrestore(&server_list_lock, flags);
561 return server;
562 }
563 }
564 spin_unlock_irqrestore(&server_list_lock, flags);
565 return NULL;
566}
567
568struct msm_rpc_endpoint *msm_rpcrouter_create_local_endpoint(dev_t dev)
569{
570 struct msm_rpc_endpoint *ept;
571 unsigned long flags;
572
573 ept = kmalloc(sizeof(struct msm_rpc_endpoint), GFP_KERNEL);
574 if (!ept)
575 return NULL;
576 memset(ept, 0, sizeof(struct msm_rpc_endpoint));
577 ept->cid = (uint32_t) ept;
578 ept->pid = RPCROUTER_PID_LOCAL;
579 ept->dev = dev;
580
581 if ((dev != msm_rpcrouter_devno) && (dev != MKDEV(0, 0))) {
582 struct rr_server *srv;
583 /*
584 * This is a userspace client which opened
585 * a program/ver devicenode. Bind the client
586 * to that destination
587 */
588 srv = rpcrouter_lookup_server_by_dev(dev);
589 /* TODO: bug? really? */
590 BUG_ON(!srv);
591
592 ept->dst_pid = srv->pid;
593 ept->dst_cid = srv->cid;
594 ept->dst_prog = cpu_to_be32(srv->prog);
595 ept->dst_vers = cpu_to_be32(srv->vers);
596 } else {
597 /* mark not connected */
598 ept->dst_pid = 0xffffffff;
599 }
600
601 init_waitqueue_head(&ept->wait_q);
602 INIT_LIST_HEAD(&ept->read_q);
603 spin_lock_init(&ept->read_q_lock);
604 INIT_LIST_HEAD(&ept->reply_avail_q);
605 INIT_LIST_HEAD(&ept->reply_pend_q);
606 spin_lock_init(&ept->reply_q_lock);
607 spin_lock_init(&ept->restart_lock);
608 init_waitqueue_head(&ept->restart_wait);
609 ept->restart_state = RESTART_NORMAL;
610 wake_lock_init(&ept->read_q_wake_lock, WAKE_LOCK_SUSPEND, "rpc_read");
611 wake_lock_init(&ept->reply_q_wake_lock, WAKE_LOCK_SUSPEND, "rpc_reply");
612 INIT_LIST_HEAD(&ept->incomplete);
613 spin_lock_init(&ept->incomplete_lock);
614
615 spin_lock_irqsave(&local_endpoints_lock, flags);
616 list_add_tail(&ept->list, &local_endpoints);
617 spin_unlock_irqrestore(&local_endpoints_lock, flags);
618 return ept;
619}
620
621int msm_rpcrouter_destroy_local_endpoint(struct msm_rpc_endpoint *ept)
622{
623 int rc;
624 union rr_control_msg msg;
625 struct msm_rpc_reply *reply, *reply_tmp;
626 unsigned long flags;
627 struct rpcrouter_xprt_info *xprt_info;
628
629 /* Endpoint with dst_pid = 0xffffffff corresponds to that of
630 ** router port. So don't send a REMOVE CLIENT message while
631 ** destroying it.*/
Karthikeyan Ramasubramanian768567a2011-08-11 11:53:21 -0600632 spin_lock_irqsave(&local_endpoints_lock, flags);
633 list_del(&ept->list);
634 spin_unlock_irqrestore(&local_endpoints_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700635 if (ept->dst_pid != 0xffffffff) {
636 msg.cmd = RPCROUTER_CTRL_CMD_REMOVE_CLIENT;
637 msg.cli.pid = ept->pid;
638 msg.cli.cid = ept->cid;
639
640 RR("x REMOVE_CLIENT id=%d:%08x\n", ept->pid, ept->cid);
641 mutex_lock(&xprt_info_list_lock);
642 list_for_each_entry(xprt_info, &xprt_info_list, list) {
643 rc = rpcrouter_send_control_msg(xprt_info, &msg);
644 if (rc < 0) {
645 mutex_unlock(&xprt_info_list_lock);
646 return rc;
647 }
648 }
649 mutex_unlock(&xprt_info_list_lock);
650 }
651
652 /* Free replies */
653 spin_lock_irqsave(&ept->reply_q_lock, flags);
654 list_for_each_entry_safe(reply, reply_tmp, &ept->reply_pend_q, list) {
655 list_del(&reply->list);
656 kfree(reply);
657 }
658 list_for_each_entry_safe(reply, reply_tmp, &ept->reply_avail_q, list) {
659 list_del(&reply->list);
660 kfree(reply);
661 }
662 spin_unlock_irqrestore(&ept->reply_q_lock, flags);
663
664 wake_lock_destroy(&ept->read_q_wake_lock);
665 wake_lock_destroy(&ept->reply_q_wake_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700666 kfree(ept);
667 return 0;
668}
669
670static int rpcrouter_create_remote_endpoint(uint32_t pid, uint32_t cid)
671{
672 struct rr_remote_endpoint *new_c;
673 unsigned long flags;
674
675 new_c = kmalloc(sizeof(struct rr_remote_endpoint), GFP_KERNEL);
676 if (!new_c)
677 return -ENOMEM;
678 memset(new_c, 0, sizeof(struct rr_remote_endpoint));
679
680 new_c->cid = cid;
681 new_c->pid = pid;
682 init_waitqueue_head(&new_c->quota_wait);
683 spin_lock_init(&new_c->quota_lock);
684
685 spin_lock_irqsave(&remote_endpoints_lock, flags);
686 list_add_tail(&new_c->list, &remote_endpoints);
687 new_c->quota_restart_state = RESTART_NORMAL;
688 spin_unlock_irqrestore(&remote_endpoints_lock, flags);
689 return 0;
690}
691
692static struct msm_rpc_endpoint *rpcrouter_lookup_local_endpoint(uint32_t cid)
693{
694 struct msm_rpc_endpoint *ept;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700695
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700696 list_for_each_entry(ept, &local_endpoints, list) {
Karthikeyan Ramasubramanian768567a2011-08-11 11:53:21 -0600697 if (ept->cid == cid)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700698 return ept;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700699 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700700 return NULL;
701}
702
703static struct rr_remote_endpoint *rpcrouter_lookup_remote_endpoint(uint32_t pid,
704 uint32_t cid)
705{
706 struct rr_remote_endpoint *ept;
707 unsigned long flags;
708
709 spin_lock_irqsave(&remote_endpoints_lock, flags);
710 list_for_each_entry(ept, &remote_endpoints, list) {
711 if ((ept->pid == pid) && (ept->cid == cid)) {
712 spin_unlock_irqrestore(&remote_endpoints_lock, flags);
713 return ept;
714 }
715 }
716 spin_unlock_irqrestore(&remote_endpoints_lock, flags);
717 return NULL;
718}
719
720static void handle_server_restart(struct rr_server *server,
721 uint32_t pid, uint32_t cid,
722 uint32_t prog, uint32_t vers)
723{
724 struct rr_remote_endpoint *r_ept;
725 struct msm_rpc_endpoint *ept;
726 unsigned long flags;
727 r_ept = rpcrouter_lookup_remote_endpoint(pid, cid);
728 if (r_ept && (r_ept->quota_restart_state !=
729 RESTART_NORMAL)) {
730 spin_lock_irqsave(&r_ept->quota_lock, flags);
731 r_ept->tx_quota_cntr = 0;
732 r_ept->quota_restart_state =
733 RESTART_NORMAL;
734 spin_unlock_irqrestore(&r_ept->quota_lock, flags);
735 D(KERN_INFO "rpcrouter: Remote EPT Reset %0x\n",
736 (unsigned int)r_ept);
737 wake_up(&r_ept->quota_wait);
738 }
739 spin_lock_irqsave(&local_endpoints_lock, flags);
740 list_for_each_entry(ept, &local_endpoints, list) {
741 if ((be32_to_cpu(ept->dst_prog) == prog) &&
742 (be32_to_cpu(ept->dst_vers) == vers) &&
743 (ept->restart_state & RESTART_PEND_SVR)) {
744 spin_lock(&ept->restart_lock);
745 ept->restart_state &= ~RESTART_PEND_SVR;
746 spin_unlock(&ept->restart_lock);
747 D("rpcrouter: Local EPT Reset %08x:%08x \n",
748 prog, vers);
749 wake_up(&ept->restart_wait);
750 wake_up(&ept->wait_q);
751 }
752 }
753 spin_unlock_irqrestore(&local_endpoints_lock, flags);
754}
755
756static int process_control_msg(struct rpcrouter_xprt_info *xprt_info,
757 union rr_control_msg *msg, int len)
758{
759 union rr_control_msg ctl;
760 struct rr_server *server;
761 struct rr_remote_endpoint *r_ept;
762 int rc = 0;
763 unsigned long flags;
764 static int first = 1;
765
766 if (len != sizeof(*msg)) {
767 RR(KERN_ERR "rpcrouter: r2r msg size %d != %d\n",
768 len, sizeof(*msg));
769 return -EINVAL;
770 }
771
772 switch (msg->cmd) {
773 case RPCROUTER_CTRL_CMD_HELLO:
774 RR("o HELLO PID %d\n", xprt_info->remote_pid);
775 memset(&ctl, 0, sizeof(ctl));
776 ctl.cmd = RPCROUTER_CTRL_CMD_HELLO;
777 rpcrouter_send_control_msg(xprt_info, &ctl);
778
779 xprt_info->initialized = 1;
780
781 /* Send list of servers one at a time */
782 ctl.cmd = RPCROUTER_CTRL_CMD_NEW_SERVER;
783
784 /* TODO: long time to hold a spinlock... */
785 spin_lock_irqsave(&server_list_lock, flags);
786 list_for_each_entry(server, &server_list, list) {
787 if (server->pid != RPCROUTER_PID_LOCAL)
788 continue;
789 ctl.srv.pid = server->pid;
790 ctl.srv.cid = server->cid;
791 ctl.srv.prog = server->prog;
792 ctl.srv.vers = server->vers;
793
794 RR("x NEW_SERVER id=%d:%08x prog=%08x:%08x\n",
795 server->pid, server->cid,
796 server->prog, server->vers);
797
798 rpcrouter_send_control_msg(xprt_info, &ctl);
799 }
800 spin_unlock_irqrestore(&server_list_lock, flags);
801
802 if (first) {
803 first = 0;
804 queue_work(rpcrouter_workqueue,
805 &work_create_rpcrouter_pdev);
806 }
807 break;
808
809 case RPCROUTER_CTRL_CMD_RESUME_TX:
810 RR("o RESUME_TX id=%d:%08x\n", msg->cli.pid, msg->cli.cid);
811
812 r_ept = rpcrouter_lookup_remote_endpoint(msg->cli.pid,
813 msg->cli.cid);
814 if (!r_ept) {
815 printk(KERN_ERR
816 "rpcrouter: Unable to resume client\n");
817 break;
818 }
819 spin_lock_irqsave(&r_ept->quota_lock, flags);
820 r_ept->tx_quota_cntr = 0;
821 spin_unlock_irqrestore(&r_ept->quota_lock, flags);
822 wake_up(&r_ept->quota_wait);
823 break;
824
825 case RPCROUTER_CTRL_CMD_NEW_SERVER:
826 if (msg->srv.vers == 0) {
827 pr_err(
828 "rpcrouter: Server create rejected, version = 0, "
829 "program = %08x\n", msg->srv.prog);
830 break;
831 }
832
833 RR("o NEW_SERVER id=%d:%08x prog=%08x:%08x\n",
834 msg->srv.pid, msg->srv.cid, msg->srv.prog, msg->srv.vers);
835
836 server = rpcrouter_lookup_server(msg->srv.prog, msg->srv.vers);
837
838 if (!server) {
839 server = rpcrouter_create_server(
840 msg->srv.pid, msg->srv.cid,
841 msg->srv.prog, msg->srv.vers);
842 if (!server)
843 return -ENOMEM;
844 /*
845 * XXX: Verify that its okay to add the
846 * client to our remote client list
847 * if we get a NEW_SERVER notification
848 */
849 if (!rpcrouter_lookup_remote_endpoint(msg->srv.pid,
850 msg->srv.cid)) {
851 rc = rpcrouter_create_remote_endpoint(
852 msg->srv.pid, msg->srv.cid);
853 if (rc < 0)
854 printk(KERN_ERR
855 "rpcrouter:Client create"
856 "error (%d)\n", rc);
857 }
858 rpcrouter_register_board_dev(server);
859 schedule_work(&work_create_pdevs);
860 wake_up(&newserver_wait);
861 } else {
862 if ((server->pid == msg->srv.pid) &&
863 (server->cid == msg->srv.cid)) {
864 handle_server_restart(server,
865 msg->srv.pid,
866 msg->srv.cid,
867 msg->srv.prog,
868 msg->srv.vers);
869 } else {
870 server->pid = msg->srv.pid;
871 server->cid = msg->srv.cid;
872 }
873 }
874 break;
875
876 case RPCROUTER_CTRL_CMD_REMOVE_SERVER:
877 RR("o REMOVE_SERVER prog=%08x:%d\n",
878 msg->srv.prog, msg->srv.vers);
879 server = rpcrouter_lookup_server(msg->srv.prog, msg->srv.vers);
880 if (server)
881 rpcrouter_destroy_server(server);
882 break;
883
884 case RPCROUTER_CTRL_CMD_REMOVE_CLIENT:
885 RR("o REMOVE_CLIENT id=%d:%08x\n", msg->cli.pid, msg->cli.cid);
886 if (msg->cli.pid == RPCROUTER_PID_LOCAL) {
887 printk(KERN_ERR
888 "rpcrouter: Denying remote removal of "
889 "local client\n");
890 break;
891 }
892 r_ept = rpcrouter_lookup_remote_endpoint(msg->cli.pid,
893 msg->cli.cid);
894 if (r_ept) {
895 spin_lock_irqsave(&remote_endpoints_lock, flags);
896 list_del(&r_ept->list);
897 spin_unlock_irqrestore(&remote_endpoints_lock, flags);
898 kfree(r_ept);
899 }
900
901 /* Notify local clients of this event */
902 printk(KERN_ERR "rpcrouter: LOCAL NOTIFICATION NOT IMP\n");
903 rc = -ENOSYS;
904
905 break;
906 case RPCROUTER_CTRL_CMD_PING:
907 /* No action needed for ping messages received */
908 RR("o PING\n");
909 break;
910 default:
911 RR("o UNKNOWN(%08x)\n", msg->cmd);
912 rc = -ENOSYS;
913 }
914
915 return rc;
916}
917
918static void do_create_rpcrouter_pdev(struct work_struct *work)
919{
920 D("%s: modem rpc router up\n", __func__);
921 platform_device_register(&rpcrouter_pdev);
922 complete_all(&rpc_remote_router_up);
923}
924
925static void do_create_pdevs(struct work_struct *work)
926{
927 unsigned long flags;
928 struct rr_server *server;
929
930 /* TODO: race if destroyed while being registered */
931 spin_lock_irqsave(&server_list_lock, flags);
932 list_for_each_entry(server, &server_list, list) {
933 if (server->pid != RPCROUTER_PID_LOCAL) {
934 if (server->pdev_name[0] == 0) {
935 sprintf(server->pdev_name, "rs%.8x",
936 server->prog);
937 spin_unlock_irqrestore(&server_list_lock,
938 flags);
939 msm_rpcrouter_create_server_pdev(server);
940 schedule_work(&work_create_pdevs);
941 return;
942 }
943 }
944 }
945 spin_unlock_irqrestore(&server_list_lock, flags);
946}
947
948static void *rr_malloc(unsigned sz)
949{
950 void *ptr = kmalloc(sz, GFP_KERNEL);
951 if (ptr)
952 return ptr;
953
954 printk(KERN_ERR "rpcrouter: kmalloc of %d failed, retrying...\n", sz);
955 do {
956 ptr = kmalloc(sz, GFP_KERNEL);
957 } while (!ptr);
958
959 return ptr;
960}
961
962static int rr_read(struct rpcrouter_xprt_info *xprt_info,
963 void *data, uint32_t len)
964{
965 int rc;
966 unsigned long flags;
967
968 while (!xprt_info->abort_data_read) {
969 spin_lock_irqsave(&xprt_info->lock, flags);
970 if (xprt_info->xprt->read_avail() >= len) {
971 rc = xprt_info->xprt->read(data, len);
972 spin_unlock_irqrestore(&xprt_info->lock, flags);
973 if (rc == len && !xprt_info->abort_data_read)
974 return 0;
975 else
976 return -EIO;
977 }
978 xprt_info->need_len = len;
979 wake_unlock(&xprt_info->wakelock);
980 spin_unlock_irqrestore(&xprt_info->lock, flags);
981
982 wait_event(xprt_info->read_wait,
983 xprt_info->xprt->read_avail() >= len
984 || xprt_info->abort_data_read);
985 }
986 return -EIO;
987}
988
989#if defined(CONFIG_MSM_ONCRPCROUTER_DEBUG)
990static char *type_to_str(int i)
991{
992 switch (i) {
993 case RPCROUTER_CTRL_CMD_DATA:
994 return "data ";
995 case RPCROUTER_CTRL_CMD_HELLO:
996 return "hello ";
997 case RPCROUTER_CTRL_CMD_BYE:
998 return "bye ";
999 case RPCROUTER_CTRL_CMD_NEW_SERVER:
1000 return "new_srvr";
1001 case RPCROUTER_CTRL_CMD_REMOVE_SERVER:
1002 return "rmv_srvr";
1003 case RPCROUTER_CTRL_CMD_REMOVE_CLIENT:
1004 return "rmv_clnt";
1005 case RPCROUTER_CTRL_CMD_RESUME_TX:
1006 return "resum_tx";
1007 case RPCROUTER_CTRL_CMD_EXIT:
1008 return "cmd_exit";
1009 default:
1010 return "invalid";
1011 }
1012}
1013#endif
1014
1015static void do_read_data(struct work_struct *work)
1016{
1017 struct rr_header hdr;
1018 struct rr_packet *pkt;
1019 struct rr_fragment *frag;
1020 struct msm_rpc_endpoint *ept;
1021#if defined(CONFIG_MSM_ONCRPCROUTER_DEBUG)
1022 struct rpc_request_hdr *rq;
1023#endif
1024 uint32_t pm, mid;
1025 unsigned long flags;
1026
1027 struct rpcrouter_xprt_info *xprt_info =
1028 container_of(work,
1029 struct rpcrouter_xprt_info,
1030 read_data);
1031
1032 if (rr_read(xprt_info, &hdr, sizeof(hdr)))
1033 goto fail_io;
1034
1035 RR("- ver=%d type=%d src=%d:%08x crx=%d siz=%d dst=%d:%08x\n",
1036 hdr.version, hdr.type, hdr.src_pid, hdr.src_cid,
1037 hdr.confirm_rx, hdr.size, hdr.dst_pid, hdr.dst_cid);
1038 RAW_HDR("[r rr_h] "
1039 "ver=%i,type=%s,src_pid=%08x,src_cid=%08x,"
1040 "confirm_rx=%i,size=%3i,dst_pid=%08x,dst_cid=%08x\n",
1041 hdr.version, type_to_str(hdr.type), hdr.src_pid, hdr.src_cid,
1042 hdr.confirm_rx, hdr.size, hdr.dst_pid, hdr.dst_cid);
1043
1044 if (hdr.version != RPCROUTER_VERSION) {
1045 DIAG("version %d != %d\n", hdr.version, RPCROUTER_VERSION);
1046 goto fail_data;
1047 }
1048 if (hdr.size > RPCROUTER_MSGSIZE_MAX) {
1049 DIAG("msg size %d > max %d\n", hdr.size, RPCROUTER_MSGSIZE_MAX);
1050 goto fail_data;
1051 }
1052
1053 if (hdr.dst_cid == RPCROUTER_ROUTER_ADDRESS) {
1054 if (xprt_info->remote_pid == -1) {
1055 xprt_info->remote_pid = hdr.src_pid;
1056
1057 /* do restart notification */
1058 modem_reset_startup(xprt_info);
1059 }
1060
1061 if (rr_read(xprt_info, xprt_info->r2r_buf, hdr.size))
1062 goto fail_io;
1063 process_control_msg(xprt_info,
1064 (void *) xprt_info->r2r_buf, hdr.size);
1065 goto done;
1066 }
1067
1068 if (hdr.size < sizeof(pm)) {
1069 DIAG("runt packet (no pacmark)\n");
1070 goto fail_data;
1071 }
1072 if (rr_read(xprt_info, &pm, sizeof(pm)))
1073 goto fail_io;
1074
1075 hdr.size -= sizeof(pm);
1076
1077 frag = rr_malloc(sizeof(*frag));
1078 frag->next = NULL;
1079 frag->length = hdr.size;
1080 if (rr_read(xprt_info, frag->data, hdr.size)) {
1081 kfree(frag);
1082 goto fail_io;
1083 }
1084
1085#if defined(CONFIG_MSM_ONCRPCROUTER_DEBUG)
1086 if ((smd_rpcrouter_debug_mask & RAW_PMR) &&
1087 ((pm >> 30 & 0x1) || (pm >> 31 & 0x1))) {
1088 uint32_t xid = 0;
1089 if (pm >> 30 & 0x1) {
1090 rq = (struct rpc_request_hdr *) frag->data;
1091 xid = ntohl(rq->xid);
1092 }
1093 if ((pm >> 31 & 0x1) || (pm >> 30 & 0x1))
1094 RAW_PMR_NOMASK("xid:0x%03x first=%i,last=%i,mid=%3i,"
1095 "len=%3i,dst_cid=%08x\n",
1096 xid,
1097 pm >> 30 & 0x1,
1098 pm >> 31 & 0x1,
1099 pm >> 16 & 0xFF,
1100 pm & 0xFFFF, hdr.dst_cid);
1101 }
1102
1103 if (smd_rpcrouter_debug_mask & SMEM_LOG) {
1104 rq = (struct rpc_request_hdr *) frag->data;
1105 if (rq->xid == 0)
1106 smem_log_event(SMEM_LOG_PROC_ID_APPS |
1107 RPC_ROUTER_LOG_EVENT_MID_READ,
1108 PACMARK_MID(pm),
1109 hdr.dst_cid,
1110 hdr.src_cid);
1111 else
1112 smem_log_event(SMEM_LOG_PROC_ID_APPS |
1113 RPC_ROUTER_LOG_EVENT_MSG_READ,
1114 ntohl(rq->xid),
1115 hdr.dst_cid,
1116 hdr.src_cid);
1117 }
1118#endif
1119
Karthikeyan Ramasubramanian768567a2011-08-11 11:53:21 -06001120 spin_lock_irqsave(&local_endpoints_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001121 ept = rpcrouter_lookup_local_endpoint(hdr.dst_cid);
1122 if (!ept) {
Karthikeyan Ramasubramanian768567a2011-08-11 11:53:21 -06001123 spin_unlock_irqrestore(&local_endpoints_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001124 DIAG("no local ept for cid %08x\n", hdr.dst_cid);
1125 kfree(frag);
1126 goto done;
1127 }
1128
1129 /* See if there is already a partial packet that matches our mid
1130 * and if so, append this fragment to that packet.
1131 */
1132 mid = PACMARK_MID(pm);
Karthikeyan Ramasubramanian768567a2011-08-11 11:53:21 -06001133 spin_lock(&ept->incomplete_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001134 list_for_each_entry(pkt, &ept->incomplete, list) {
1135 if (pkt->mid == mid) {
1136 pkt->last->next = frag;
1137 pkt->last = frag;
1138 pkt->length += frag->length;
1139 if (PACMARK_LAST(pm)) {
1140 list_del(&pkt->list);
Karthikeyan Ramasubramanian768567a2011-08-11 11:53:21 -06001141 spin_unlock(&ept->incomplete_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001142 goto packet_complete;
1143 }
Karthikeyan Ramasubramanian768567a2011-08-11 11:53:21 -06001144 spin_unlock(&ept->incomplete_lock);
1145 spin_unlock_irqrestore(&local_endpoints_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001146 goto done;
1147 }
1148 }
Karthikeyan Ramasubramanian9dea7212011-09-07 18:37:16 -06001149 spin_unlock(&ept->incomplete_lock);
1150 spin_unlock_irqrestore(&local_endpoints_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001151 /* This mid is new -- create a packet for it, and put it on
1152 * the incomplete list if this fragment is not a last fragment,
1153 * otherwise put it on the read queue.
1154 */
1155 pkt = rr_malloc(sizeof(struct rr_packet));
1156 pkt->first = frag;
1157 pkt->last = frag;
1158 memcpy(&pkt->hdr, &hdr, sizeof(hdr));
1159 pkt->mid = mid;
1160 pkt->length = frag->length;
Karthikeyan Ramasubramanian9dea7212011-09-07 18:37:16 -06001161
1162 spin_lock_irqsave(&local_endpoints_lock, flags);
1163 ept = rpcrouter_lookup_local_endpoint(hdr.dst_cid);
1164 if (!ept) {
1165 spin_unlock_irqrestore(&local_endpoints_lock, flags);
1166 DIAG("no local ept for cid %08x\n", hdr.dst_cid);
1167 kfree(frag);
1168 kfree(pkt);
1169 goto done;
1170 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001171 if (!PACMARK_LAST(pm)) {
Karthikeyan Ramasubramanian9dea7212011-09-07 18:37:16 -06001172 spin_lock(&ept->incomplete_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001173 list_add_tail(&pkt->list, &ept->incomplete);
Karthikeyan Ramasubramanian768567a2011-08-11 11:53:21 -06001174 spin_unlock(&ept->incomplete_lock);
1175 spin_unlock_irqrestore(&local_endpoints_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001176 goto done;
1177 }
1178
1179packet_complete:
Karthikeyan Ramasubramanian768567a2011-08-11 11:53:21 -06001180 spin_lock(&ept->read_q_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001181 D("%s: take read lock on ept %p\n", __func__, ept);
1182 wake_lock(&ept->read_q_wake_lock);
1183 list_add_tail(&pkt->list, &ept->read_q);
1184 wake_up(&ept->wait_q);
Karthikeyan Ramasubramanian768567a2011-08-11 11:53:21 -06001185 spin_unlock(&ept->read_q_lock);
1186 spin_unlock_irqrestore(&local_endpoints_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001187done:
1188
1189 if (hdr.confirm_rx) {
1190 union rr_control_msg msg;
1191
1192 msg.cmd = RPCROUTER_CTRL_CMD_RESUME_TX;
1193 msg.cli.pid = hdr.dst_pid;
1194 msg.cli.cid = hdr.dst_cid;
1195
1196 RR("x RESUME_TX id=%d:%08x\n", msg.cli.pid, msg.cli.cid);
1197 rpcrouter_send_control_msg(xprt_info, &msg);
1198
1199#if defined(CONFIG_MSM_ONCRPCROUTER_DEBUG)
1200 if (smd_rpcrouter_debug_mask & SMEM_LOG)
1201 smem_log_event(SMEM_LOG_PROC_ID_APPS |
1202 RPC_ROUTER_LOG_EVENT_MSG_CFM_SNT,
1203 RPCROUTER_PID_LOCAL,
1204 hdr.dst_cid,
1205 hdr.src_cid);
1206#endif
1207
1208 }
1209
1210 /* don't requeue if we should be shutting down */
1211 if (!xprt_info->abort_data_read) {
1212 queue_work(xprt_info->workqueue, &xprt_info->read_data);
1213 return;
1214 }
1215
1216 D("rpc_router terminating for '%s'\n",
1217 xprt_info->xprt->name);
1218
1219fail_io:
1220fail_data:
1221 D(KERN_ERR "rpc_router has died for '%s'\n",
1222 xprt_info->xprt->name);
1223}
1224
1225void msm_rpc_setup_req(struct rpc_request_hdr *hdr, uint32_t prog,
1226 uint32_t vers, uint32_t proc)
1227{
1228 memset(hdr, 0, sizeof(struct rpc_request_hdr));
1229 hdr->xid = cpu_to_be32(atomic_add_return(1, &next_xid));
1230 hdr->rpc_vers = cpu_to_be32(2);
1231 hdr->prog = cpu_to_be32(prog);
1232 hdr->vers = cpu_to_be32(vers);
1233 hdr->procedure = cpu_to_be32(proc);
1234}
1235EXPORT_SYMBOL(msm_rpc_setup_req);
1236
1237struct msm_rpc_endpoint *msm_rpc_open(void)
1238{
1239 struct msm_rpc_endpoint *ept;
1240
1241 ept = msm_rpcrouter_create_local_endpoint(MKDEV(0, 0));
1242 if (ept == NULL)
1243 return ERR_PTR(-ENOMEM);
1244
1245 return ept;
1246}
1247
1248void msm_rpc_read_wakeup(struct msm_rpc_endpoint *ept)
1249{
1250 ept->forced_wakeup = 1;
1251 wake_up(&ept->wait_q);
1252}
1253
1254int msm_rpc_close(struct msm_rpc_endpoint *ept)
1255{
1256 if (!ept)
1257 return -EINVAL;
1258 return msm_rpcrouter_destroy_local_endpoint(ept);
1259}
1260EXPORT_SYMBOL(msm_rpc_close);
1261
1262static int msm_rpc_write_pkt(
1263 struct rr_header *hdr,
1264 struct msm_rpc_endpoint *ept,
1265 struct rr_remote_endpoint *r_ept,
1266 void *buffer,
1267 int count,
1268 int first,
1269 int last,
1270 uint32_t mid
1271 )
1272{
1273#if defined(CONFIG_MSM_ONCRPCROUTER_DEBUG)
1274 struct rpc_request_hdr *rq = buffer;
1275 uint32_t event_id;
1276#endif
1277 uint32_t pacmark;
1278 unsigned long flags = 0;
1279 int rc;
1280 struct rpcrouter_xprt_info *xprt_info;
1281 int needed;
1282
1283 DEFINE_WAIT(__wait);
1284
1285 /* Create routing header */
1286 hdr->type = RPCROUTER_CTRL_CMD_DATA;
1287 hdr->version = RPCROUTER_VERSION;
1288 hdr->src_pid = ept->pid;
1289 hdr->src_cid = ept->cid;
1290 hdr->confirm_rx = 0;
1291 hdr->size = count + sizeof(uint32_t);
1292
1293 rc = wait_for_restart_and_notify(ept);
1294 if (rc)
1295 return rc;
1296
1297 if (r_ept) {
1298 for (;;) {
1299 prepare_to_wait(&r_ept->quota_wait, &__wait,
1300 TASK_INTERRUPTIBLE);
1301 spin_lock_irqsave(&r_ept->quota_lock, flags);
1302 if ((r_ept->tx_quota_cntr <
1303 RPCROUTER_DEFAULT_RX_QUOTA) ||
1304 (r_ept->quota_restart_state != RESTART_NORMAL))
1305 break;
1306 if (signal_pending(current) &&
1307 (!(ept->flags & MSM_RPC_UNINTERRUPTIBLE)))
1308 break;
1309 spin_unlock_irqrestore(&r_ept->quota_lock, flags);
1310 schedule();
1311 }
1312 finish_wait(&r_ept->quota_wait, &__wait);
1313
1314 if (r_ept->quota_restart_state != RESTART_NORMAL) {
1315 spin_lock(&ept->restart_lock);
1316 ept->restart_state &= ~RESTART_PEND_NTFY;
1317 spin_unlock(&ept->restart_lock);
1318 spin_unlock_irqrestore(&r_ept->quota_lock, flags);
1319 return -ENETRESET;
1320 }
1321
1322 if (signal_pending(current) &&
1323 (!(ept->flags & MSM_RPC_UNINTERRUPTIBLE))) {
1324 spin_unlock_irqrestore(&r_ept->quota_lock, flags);
1325 return -ERESTARTSYS;
1326 }
1327 r_ept->tx_quota_cntr++;
1328 if (r_ept->tx_quota_cntr == RPCROUTER_DEFAULT_RX_QUOTA) {
1329 hdr->confirm_rx = 1;
1330
1331#if defined(CONFIG_MSM_ONCRPCROUTER_DEBUG)
1332 if (smd_rpcrouter_debug_mask & SMEM_LOG) {
1333 event_id = (rq->xid == 0) ?
1334 RPC_ROUTER_LOG_EVENT_MID_CFM_REQ :
1335 RPC_ROUTER_LOG_EVENT_MSG_CFM_REQ;
1336
1337 smem_log_event(SMEM_LOG_PROC_ID_APPS | event_id,
1338 hdr->dst_pid,
1339 hdr->dst_cid,
1340 hdr->src_cid);
1341 }
1342#endif
1343
1344 }
1345 }
1346 pacmark = PACMARK(count, mid, first, last);
1347
1348 if (r_ept)
1349 spin_unlock_irqrestore(&r_ept->quota_lock, flags);
1350
1351 mutex_lock(&xprt_info_list_lock);
1352 xprt_info = rpcrouter_get_xprt_info(hdr->dst_pid);
1353 if (!xprt_info) {
1354 mutex_unlock(&xprt_info_list_lock);
1355 return -ENETRESET;
1356 }
1357 spin_lock_irqsave(&xprt_info->lock, flags);
1358 mutex_unlock(&xprt_info_list_lock);
1359 spin_lock(&ept->restart_lock);
1360 if (ept->restart_state != RESTART_NORMAL) {
1361 ept->restart_state &= ~RESTART_PEND_NTFY;
1362 spin_unlock(&ept->restart_lock);
1363 spin_unlock_irqrestore(&xprt_info->lock, flags);
1364 return -ENETRESET;
1365 }
1366
1367 needed = sizeof(*hdr) + hdr->size;
1368 while ((ept->restart_state == RESTART_NORMAL) &&
1369 (xprt_info->xprt->write_avail() < needed)) {
1370 spin_unlock(&ept->restart_lock);
1371 spin_unlock_irqrestore(&xprt_info->lock, flags);
1372 msleep(250);
1373
1374 /* refresh xprt pointer to ensure that it hasn't
1375 * been deleted since our last retrieval */
1376 mutex_lock(&xprt_info_list_lock);
1377 xprt_info = rpcrouter_get_xprt_info(hdr->dst_pid);
1378 if (!xprt_info) {
1379 mutex_unlock(&xprt_info_list_lock);
1380 return -ENETRESET;
1381 }
1382 spin_lock_irqsave(&xprt_info->lock, flags);
1383 mutex_unlock(&xprt_info_list_lock);
1384 spin_lock(&ept->restart_lock);
1385 }
1386 if (ept->restart_state != RESTART_NORMAL) {
1387 ept->restart_state &= ~RESTART_PEND_NTFY;
1388 spin_unlock(&ept->restart_lock);
1389 spin_unlock_irqrestore(&xprt_info->lock, flags);
1390 return -ENETRESET;
1391 }
1392
1393 /* TODO: deal with full fifo */
1394 xprt_info->xprt->write(hdr, sizeof(*hdr), HEADER);
1395 RAW_HDR("[w rr_h] "
1396 "ver=%i,type=%s,src_pid=%08x,src_cid=%08x,"
1397 "confirm_rx=%i,size=%3i,dst_pid=%08x,dst_cid=%08x\n",
1398 hdr->version, type_to_str(hdr->type),
1399 hdr->src_pid, hdr->src_cid,
1400 hdr->confirm_rx, hdr->size, hdr->dst_pid, hdr->dst_cid);
1401 xprt_info->xprt->write(&pacmark, sizeof(pacmark), PACKMARK);
1402
1403#if defined(CONFIG_MSM_ONCRPCROUTER_DEBUG)
1404 if ((smd_rpcrouter_debug_mask & RAW_PMW) &&
1405 ((pacmark >> 30 & 0x1) || (pacmark >> 31 & 0x1))) {
1406 uint32_t xid = 0;
1407 if (pacmark >> 30 & 0x1)
1408 xid = ntohl(rq->xid);
1409 if ((pacmark >> 31 & 0x1) || (pacmark >> 30 & 0x1))
1410 RAW_PMW_NOMASK("xid:0x%03x first=%i,last=%i,mid=%3i,"
1411 "len=%3i,src_cid=%x\n",
1412 xid,
1413 pacmark >> 30 & 0x1,
1414 pacmark >> 31 & 0x1,
1415 pacmark >> 16 & 0xFF,
1416 pacmark & 0xFFFF, hdr->src_cid);
1417 }
1418#endif
1419
1420 xprt_info->xprt->write(buffer, count, PAYLOAD);
1421 spin_unlock(&ept->restart_lock);
1422 spin_unlock_irqrestore(&xprt_info->lock, flags);
1423
1424#if defined(CONFIG_MSM_ONCRPCROUTER_DEBUG)
1425 if (smd_rpcrouter_debug_mask & SMEM_LOG) {
1426 if (rq->xid == 0)
1427 smem_log_event(SMEM_LOG_PROC_ID_APPS |
1428 RPC_ROUTER_LOG_EVENT_MID_WRITTEN,
1429 PACMARK_MID(pacmark),
1430 hdr->dst_cid,
1431 hdr->src_cid);
1432 else
1433 smem_log_event(SMEM_LOG_PROC_ID_APPS |
1434 RPC_ROUTER_LOG_EVENT_MSG_WRITTEN,
1435 ntohl(rq->xid),
1436 hdr->dst_cid,
1437 hdr->src_cid);
1438 }
1439#endif
1440
1441 return needed;
1442}
1443
1444static struct msm_rpc_reply *get_pend_reply(struct msm_rpc_endpoint *ept,
1445 uint32_t xid)
1446{
1447 unsigned long flags;
1448 struct msm_rpc_reply *reply;
1449 spin_lock_irqsave(&ept->reply_q_lock, flags);
1450 list_for_each_entry(reply, &ept->reply_pend_q, list) {
1451 if (reply->xid == xid) {
1452 list_del(&reply->list);
1453 spin_unlock_irqrestore(&ept->reply_q_lock, flags);
1454 return reply;
1455 }
1456 }
1457 spin_unlock_irqrestore(&ept->reply_q_lock, flags);
1458 return NULL;
1459}
1460
1461void get_requesting_client(struct msm_rpc_endpoint *ept, uint32_t xid,
1462 struct msm_rpc_client_info *clnt_info)
1463{
1464 unsigned long flags;
1465 struct msm_rpc_reply *reply;
1466
1467 if (!clnt_info)
1468 return;
1469
1470 spin_lock_irqsave(&ept->reply_q_lock, flags);
1471 list_for_each_entry(reply, &ept->reply_pend_q, list) {
1472 if (reply->xid == xid) {
1473 clnt_info->pid = reply->pid;
1474 clnt_info->cid = reply->cid;
1475 clnt_info->prog = reply->prog;
1476 clnt_info->vers = reply->vers;
1477 spin_unlock_irqrestore(&ept->reply_q_lock, flags);
1478 return;
1479 }
1480 }
1481 spin_unlock_irqrestore(&ept->reply_q_lock, flags);
1482 return;
1483}
1484
1485static void set_avail_reply(struct msm_rpc_endpoint *ept,
1486 struct msm_rpc_reply *reply)
1487{
1488 unsigned long flags;
1489 spin_lock_irqsave(&ept->reply_q_lock, flags);
1490 list_add_tail(&reply->list, &ept->reply_avail_q);
1491 spin_unlock_irqrestore(&ept->reply_q_lock, flags);
1492}
1493
1494static struct msm_rpc_reply *get_avail_reply(struct msm_rpc_endpoint *ept)
1495{
1496 struct msm_rpc_reply *reply;
1497 unsigned long flags;
1498 if (list_empty(&ept->reply_avail_q)) {
1499 if (ept->reply_cnt >= RPCROUTER_PEND_REPLIES_MAX) {
1500 printk(KERN_ERR
1501 "exceeding max replies of %d \n",
1502 RPCROUTER_PEND_REPLIES_MAX);
1503 return 0;
1504 }
1505 reply = kmalloc(sizeof(struct msm_rpc_reply), GFP_KERNEL);
1506 if (!reply)
1507 return 0;
1508 D("Adding reply 0x%08x \n", (unsigned int)reply);
1509 memset(reply, 0, sizeof(struct msm_rpc_reply));
1510 spin_lock_irqsave(&ept->reply_q_lock, flags);
1511 ept->reply_cnt++;
1512 spin_unlock_irqrestore(&ept->reply_q_lock, flags);
1513 } else {
1514 spin_lock_irqsave(&ept->reply_q_lock, flags);
1515 reply = list_first_entry(&ept->reply_avail_q,
1516 struct msm_rpc_reply,
1517 list);
1518 list_del(&reply->list);
1519 spin_unlock_irqrestore(&ept->reply_q_lock, flags);
1520 }
1521 return reply;
1522}
1523
1524static void set_pend_reply(struct msm_rpc_endpoint *ept,
1525 struct msm_rpc_reply *reply)
1526{
1527 unsigned long flags;
1528 spin_lock_irqsave(&ept->reply_q_lock, flags);
1529 D("%s: take reply lock on ept %p\n", __func__, ept);
1530 wake_lock(&ept->reply_q_wake_lock);
1531 list_add_tail(&reply->list, &ept->reply_pend_q);
1532 spin_unlock_irqrestore(&ept->reply_q_lock, flags);
1533}
1534
1535int msm_rpc_write(struct msm_rpc_endpoint *ept, void *buffer, int count)
1536{
1537 struct rr_header hdr;
1538 struct rpc_request_hdr *rq = buffer;
1539 struct rr_remote_endpoint *r_ept;
1540 struct msm_rpc_reply *reply = NULL;
1541 int max_tx;
1542 int tx_cnt;
1543 char *tx_buf;
1544 int rc;
1545 int first_pkt = 1;
1546 uint32_t mid;
1547 unsigned long flags;
1548
1549 /* snoop the RPC packet and enforce permissions */
1550
1551 /* has to have at least the xid and type fields */
1552 if (count < (sizeof(uint32_t) * 2)) {
1553 printk(KERN_ERR "rr_write: rejecting runt packet\n");
1554 return -EINVAL;
1555 }
1556
1557 if (rq->type == 0) {
1558 /* RPC CALL */
1559 if (count < (sizeof(uint32_t) * 6)) {
1560 printk(KERN_ERR
1561 "rr_write: rejecting runt call packet\n");
1562 return -EINVAL;
1563 }
1564 if (ept->dst_pid == 0xffffffff) {
1565 printk(KERN_ERR "rr_write: not connected\n");
1566 return -ENOTCONN;
1567 }
1568 if ((ept->dst_prog != rq->prog) ||
1569 ((be32_to_cpu(ept->dst_vers) & 0x0fff0000) !=
1570 (be32_to_cpu(rq->vers) & 0x0fff0000))) {
1571 printk(KERN_ERR
1572 "rr_write: cannot write to %08x:%08x "
1573 "(bound to %08x:%08x)\n",
1574 be32_to_cpu(rq->prog), be32_to_cpu(rq->vers),
1575 be32_to_cpu(ept->dst_prog),
1576 be32_to_cpu(ept->dst_vers));
1577 return -EINVAL;
1578 }
1579 hdr.dst_pid = ept->dst_pid;
1580 hdr.dst_cid = ept->dst_cid;
1581 IO("CALL to %08x:%d @ %d:%08x (%d bytes)\n",
1582 be32_to_cpu(rq->prog), be32_to_cpu(rq->vers),
1583 ept->dst_pid, ept->dst_cid, count);
1584 } else {
1585 /* RPC REPLY */
1586 reply = get_pend_reply(ept, rq->xid);
1587 if (!reply) {
1588 printk(KERN_ERR
1589 "rr_write: rejecting, reply not found \n");
1590 return -EINVAL;
1591 }
1592 hdr.dst_pid = reply->pid;
1593 hdr.dst_cid = reply->cid;
1594 IO("REPLY to xid=%d @ %d:%08x (%d bytes)\n",
1595 be32_to_cpu(rq->xid), hdr.dst_pid, hdr.dst_cid, count);
1596 }
1597
1598 r_ept = rpcrouter_lookup_remote_endpoint(hdr.dst_pid, hdr.dst_cid);
1599
1600 if ((!r_ept) && (hdr.dst_pid != RPCROUTER_PID_LOCAL)) {
1601 printk(KERN_ERR
1602 "msm_rpc_write(): No route to ept "
1603 "[PID %x CID %x]\n", hdr.dst_pid, hdr.dst_cid);
1604 count = -EHOSTUNREACH;
1605 goto write_release_lock;
1606 }
1607
1608 tx_cnt = count;
1609 tx_buf = buffer;
1610 mid = atomic_add_return(1, &pm_mid) & 0xFF;
1611 /* The modem's router can only take 500 bytes of data. The
1612 first 8 bytes it uses on the modem side for addressing,
1613 the next 4 bytes are for the pacmark header. */
1614 max_tx = RPCROUTER_MSGSIZE_MAX - 8 - sizeof(uint32_t);
1615 IO("Writing %d bytes, max pkt size is %d\n",
1616 tx_cnt, max_tx);
1617 while (tx_cnt > 0) {
1618 if (tx_cnt > max_tx) {
1619 rc = msm_rpc_write_pkt(&hdr, ept, r_ept,
1620 tx_buf, max_tx,
1621 first_pkt, 0, mid);
1622 if (rc < 0) {
1623 count = rc;
1624 goto write_release_lock;
1625 }
1626 IO("Wrote %d bytes First %d, Last 0 mid %d\n",
1627 rc, first_pkt, mid);
1628 tx_cnt -= max_tx;
1629 tx_buf += max_tx;
1630 } else {
1631 rc = msm_rpc_write_pkt(&hdr, ept, r_ept,
1632 tx_buf, tx_cnt,
1633 first_pkt, 1, mid);
1634 if (rc < 0) {
1635 count = rc;
1636 goto write_release_lock;
1637 }
1638 IO("Wrote %d bytes First %d Last 1 mid %d\n",
1639 rc, first_pkt, mid);
1640 break;
1641 }
1642 first_pkt = 0;
1643 }
1644
1645 write_release_lock:
1646 /* if reply, release wakelock after writing to the transport */
1647 if (rq->type != 0) {
1648 /* Upon failure, add reply tag to the pending list.
1649 ** Else add reply tag to the avail/free list. */
1650 if (count < 0)
1651 set_pend_reply(ept, reply);
1652 else
1653 set_avail_reply(ept, reply);
1654
1655 spin_lock_irqsave(&ept->reply_q_lock, flags);
1656 if (list_empty(&ept->reply_pend_q)) {
1657 D("%s: release reply lock on ept %p\n", __func__, ept);
1658 wake_unlock(&ept->reply_q_wake_lock);
1659 }
1660 spin_unlock_irqrestore(&ept->reply_q_lock, flags);
1661 }
1662
1663 return count;
1664}
1665EXPORT_SYMBOL(msm_rpc_write);
1666
1667/*
1668 * NOTE: It is the responsibility of the caller to kfree buffer
1669 */
1670int msm_rpc_read(struct msm_rpc_endpoint *ept, void **buffer,
1671 unsigned user_len, long timeout)
1672{
1673 struct rr_fragment *frag, *next;
1674 char *buf;
1675 int rc;
1676
1677 rc = __msm_rpc_read(ept, &frag, user_len, timeout);
1678 if (rc <= 0)
1679 return rc;
1680
1681 /* single-fragment messages conveniently can be
1682 * returned as-is (the buffer is at the front)
1683 */
1684 if (frag->next == 0) {
1685 *buffer = (void*) frag;
1686 return rc;
1687 }
1688
1689 /* multi-fragment messages, we have to do it the
1690 * hard way, which is rather disgusting right now
1691 */
1692 buf = rr_malloc(rc);
1693 *buffer = buf;
1694
1695 while (frag != NULL) {
1696 memcpy(buf, frag->data, frag->length);
1697 next = frag->next;
1698 buf += frag->length;
1699 kfree(frag);
1700 frag = next;
1701 }
1702
1703 return rc;
1704}
1705EXPORT_SYMBOL(msm_rpc_read);
1706
1707int msm_rpc_call(struct msm_rpc_endpoint *ept, uint32_t proc,
1708 void *_request, int request_size,
1709 long timeout)
1710{
1711 return msm_rpc_call_reply(ept, proc,
1712 _request, request_size,
1713 NULL, 0, timeout);
1714}
1715EXPORT_SYMBOL(msm_rpc_call);
1716
1717int msm_rpc_call_reply(struct msm_rpc_endpoint *ept, uint32_t proc,
1718 void *_request, int request_size,
1719 void *_reply, int reply_size,
1720 long timeout)
1721{
1722 struct rpc_request_hdr *req = _request;
1723 struct rpc_reply_hdr *reply;
1724 int rc;
1725
1726 if (request_size < sizeof(*req))
1727 return -ETOOSMALL;
1728
1729 if (ept->dst_pid == 0xffffffff)
1730 return -ENOTCONN;
1731
1732 memset(req, 0, sizeof(*req));
1733 req->xid = cpu_to_be32(atomic_add_return(1, &next_xid));
1734 req->rpc_vers = cpu_to_be32(2);
1735 req->prog = ept->dst_prog;
1736 req->vers = ept->dst_vers;
1737 req->procedure = cpu_to_be32(proc);
1738
1739 rc = msm_rpc_write(ept, req, request_size);
1740 if (rc < 0)
1741 return rc;
1742
1743 for (;;) {
1744 rc = msm_rpc_read(ept, (void*) &reply, -1, timeout);
1745 if (rc < 0)
1746 return rc;
1747 if (rc < (3 * sizeof(uint32_t))) {
1748 rc = -EIO;
1749 break;
1750 }
1751 /* we should not get CALL packets -- ignore them */
1752 if (reply->type == 0) {
1753 kfree(reply);
1754 continue;
1755 }
1756 /* If an earlier call timed out, we could get the (no
1757 * longer wanted) reply for it. Ignore replies that
1758 * we don't expect
1759 */
1760 if (reply->xid != req->xid) {
1761 kfree(reply);
1762 continue;
1763 }
1764 if (reply->reply_stat != 0) {
1765 rc = -EPERM;
1766 break;
1767 }
1768 if (reply->data.acc_hdr.accept_stat != 0) {
1769 rc = -EINVAL;
1770 break;
1771 }
1772 if (_reply == NULL) {
1773 rc = 0;
1774 break;
1775 }
1776 if (rc > reply_size) {
1777 rc = -ENOMEM;
1778 } else {
1779 memcpy(_reply, reply, rc);
1780 }
1781 break;
1782 }
1783 kfree(reply);
1784 return rc;
1785}
1786EXPORT_SYMBOL(msm_rpc_call_reply);
1787
1788
1789static inline int ept_packet_available(struct msm_rpc_endpoint *ept)
1790{
1791 unsigned long flags;
1792 int ret;
1793 spin_lock_irqsave(&ept->read_q_lock, flags);
1794 ret = !list_empty(&ept->read_q);
1795 spin_unlock_irqrestore(&ept->read_q_lock, flags);
1796 return ret;
1797}
1798
1799int __msm_rpc_read(struct msm_rpc_endpoint *ept,
1800 struct rr_fragment **frag_ret,
1801 unsigned len, long timeout)
1802{
1803 struct rr_packet *pkt;
1804 struct rpc_request_hdr *rq;
1805 struct msm_rpc_reply *reply;
1806 unsigned long flags;
1807 int rc;
1808
1809 rc = wait_for_restart_and_notify(ept);
1810 if (rc)
1811 return rc;
1812
1813 IO("READ on ept %p\n", ept);
1814 if (ept->flags & MSM_RPC_UNINTERRUPTIBLE) {
1815 if (timeout < 0) {
1816 wait_event(ept->wait_q, (ept_packet_available(ept) ||
1817 ept->forced_wakeup ||
1818 ept->restart_state));
1819 if (!msm_rpc_clear_netreset(ept))
1820 return -ENETRESET;
1821 } else {
1822 rc = wait_event_timeout(
1823 ept->wait_q,
1824 (ept_packet_available(ept) ||
1825 ept->forced_wakeup ||
1826 ept->restart_state),
1827 timeout);
1828 if (!msm_rpc_clear_netreset(ept))
1829 return -ENETRESET;
1830 if (rc == 0)
1831 return -ETIMEDOUT;
1832 }
1833 } else {
1834 if (timeout < 0) {
1835 rc = wait_event_interruptible(
1836 ept->wait_q, (ept_packet_available(ept) ||
1837 ept->forced_wakeup ||
1838 ept->restart_state));
1839 if (!msm_rpc_clear_netreset(ept))
1840 return -ENETRESET;
1841 if (rc < 0)
1842 return rc;
1843 } else {
1844 rc = wait_event_interruptible_timeout(
1845 ept->wait_q,
1846 (ept_packet_available(ept) ||
1847 ept->forced_wakeup ||
1848 ept->restart_state),
1849 timeout);
1850 if (!msm_rpc_clear_netreset(ept))
1851 return -ENETRESET;
1852 if (rc == 0)
1853 return -ETIMEDOUT;
1854 }
1855 }
1856
1857 if (ept->forced_wakeup) {
1858 ept->forced_wakeup = 0;
1859 return 0;
1860 }
1861
1862 spin_lock_irqsave(&ept->read_q_lock, flags);
1863 if (list_empty(&ept->read_q)) {
1864 spin_unlock_irqrestore(&ept->read_q_lock, flags);
1865 return -EAGAIN;
1866 }
1867 pkt = list_first_entry(&ept->read_q, struct rr_packet, list);
1868 if (pkt->length > len) {
1869 spin_unlock_irqrestore(&ept->read_q_lock, flags);
1870 return -ETOOSMALL;
1871 }
1872 list_del(&pkt->list);
1873 spin_unlock_irqrestore(&ept->read_q_lock, flags);
1874
1875 rc = pkt->length;
1876
1877 *frag_ret = pkt->first;
1878 rq = (void*) pkt->first->data;
1879 if ((rc >= (sizeof(uint32_t) * 3)) && (rq->type == 0)) {
1880 /* RPC CALL */
1881 reply = get_avail_reply(ept);
1882 if (!reply) {
1883 rc = -ENOMEM;
1884 goto read_release_lock;
1885 }
1886 reply->cid = pkt->hdr.src_cid;
1887 reply->pid = pkt->hdr.src_pid;
1888 reply->xid = rq->xid;
1889 reply->prog = rq->prog;
1890 reply->vers = rq->vers;
1891 set_pend_reply(ept, reply);
1892 }
1893
1894 kfree(pkt);
1895
1896 IO("READ on ept %p (%d bytes)\n", ept, rc);
1897
1898 read_release_lock:
1899
1900 /* release read wakelock after taking reply wakelock */
1901 spin_lock_irqsave(&ept->read_q_lock, flags);
1902 if (list_empty(&ept->read_q)) {
1903 D("%s: release read lock on ept %p\n", __func__, ept);
1904 wake_unlock(&ept->read_q_wake_lock);
1905 }
1906 spin_unlock_irqrestore(&ept->read_q_lock, flags);
1907
1908 return rc;
1909}
1910
1911int msm_rpc_is_compatible_version(uint32_t server_version,
1912 uint32_t client_version)
1913{
1914
1915 if ((server_version & RPC_VERSION_MODE_MASK) !=
1916 (client_version & RPC_VERSION_MODE_MASK))
1917 return 0;
1918
1919 if (server_version & RPC_VERSION_MODE_MASK)
1920 return server_version == client_version;
1921
1922 return ((server_version & RPC_VERSION_MAJOR_MASK) ==
1923 (client_version & RPC_VERSION_MAJOR_MASK)) &&
1924 ((server_version & RPC_VERSION_MINOR_MASK) >=
1925 (client_version & RPC_VERSION_MINOR_MASK));
1926}
1927EXPORT_SYMBOL(msm_rpc_is_compatible_version);
1928
1929static struct rr_server *msm_rpc_get_server(uint32_t prog, uint32_t vers,
1930 uint32_t accept_compatible,
1931 uint32_t *found_prog)
1932{
1933 struct rr_server *server;
1934 unsigned long flags;
1935
1936 if (found_prog == NULL)
1937 return NULL;
1938
1939 *found_prog = 0;
1940 spin_lock_irqsave(&server_list_lock, flags);
1941 list_for_each_entry(server, &server_list, list) {
1942 if (server->prog == prog) {
1943 *found_prog = 1;
1944 spin_unlock_irqrestore(&server_list_lock, flags);
1945 if (accept_compatible) {
1946 if (msm_rpc_is_compatible_version(server->vers,
1947 vers)) {
1948 return server;
1949 } else {
1950 return NULL;
1951 }
1952 } else if (server->vers == vers) {
1953 return server;
1954 } else
1955 return NULL;
1956 }
1957 }
1958 spin_unlock_irqrestore(&server_list_lock, flags);
1959 return NULL;
1960}
1961
1962static struct msm_rpc_endpoint *__msm_rpc_connect(uint32_t prog, uint32_t vers,
1963 uint32_t accept_compatible,
1964 unsigned flags)
1965{
1966 struct msm_rpc_endpoint *ept;
1967 struct rr_server *server;
1968 uint32_t found_prog;
1969 int rc = 0;
1970
1971 DEFINE_WAIT(__wait);
1972
1973 for (;;) {
1974 prepare_to_wait(&newserver_wait, &__wait,
1975 TASK_INTERRUPTIBLE);
1976
1977 server = msm_rpc_get_server(prog, vers, accept_compatible,
1978 &found_prog);
1979 if (server)
1980 break;
1981
1982 if (found_prog) {
1983 pr_info("%s: server not found %x:%x\n",
1984 __func__, prog, vers);
1985 rc = -EHOSTUNREACH;
1986 break;
1987 }
1988
1989 if (msm_rpc_connect_timeout_ms == 0) {
1990 rc = -EHOSTUNREACH;
1991 break;
1992 }
1993
1994 if (signal_pending(current)) {
1995 rc = -ERESTARTSYS;
1996 break;
1997 }
1998
1999 rc = schedule_timeout(
2000 msecs_to_jiffies(msm_rpc_connect_timeout_ms));
2001 if (!rc) {
2002 rc = -ETIMEDOUT;
2003 break;
2004 }
2005 }
2006 finish_wait(&newserver_wait, &__wait);
2007
2008 if (!server)
2009 return ERR_PTR(rc);
2010
2011 if (accept_compatible && (server->vers != vers)) {
2012 D("RPC Using new version 0x%08x(0x%08x) prog 0x%08x",
2013 vers, server->vers, prog);
2014 D(" ... Continuing\n");
2015 }
2016
2017 ept = msm_rpc_open();
2018 if (IS_ERR(ept))
2019 return ept;
2020
2021 ept->flags = flags;
2022 ept->dst_pid = server->pid;
2023 ept->dst_cid = server->cid;
2024 ept->dst_prog = cpu_to_be32(prog);
2025 ept->dst_vers = cpu_to_be32(server->vers);
2026
2027 return ept;
2028}
2029
2030struct msm_rpc_endpoint *msm_rpc_connect_compatible(uint32_t prog,
2031 uint32_t vers, unsigned flags)
2032{
2033 return __msm_rpc_connect(prog, vers, 1, flags);
2034}
2035EXPORT_SYMBOL(msm_rpc_connect_compatible);
2036
2037struct msm_rpc_endpoint *msm_rpc_connect(uint32_t prog,
2038 uint32_t vers, unsigned flags)
2039{
2040 return __msm_rpc_connect(prog, vers, 0, flags);
2041}
2042EXPORT_SYMBOL(msm_rpc_connect);
2043
2044/* TODO: permission check? */
2045int msm_rpc_register_server(struct msm_rpc_endpoint *ept,
2046 uint32_t prog, uint32_t vers)
2047{
2048 int rc;
2049 union rr_control_msg msg;
2050 struct rr_server *server;
2051 struct rpcrouter_xprt_info *xprt_info;
2052
2053 server = rpcrouter_create_server(ept->pid, ept->cid,
2054 prog, vers);
2055 if (!server)
2056 return -ENODEV;
2057
2058 msg.srv.cmd = RPCROUTER_CTRL_CMD_NEW_SERVER;
2059 msg.srv.pid = ept->pid;
2060 msg.srv.cid = ept->cid;
2061 msg.srv.prog = prog;
2062 msg.srv.vers = vers;
2063
2064 RR("x NEW_SERVER id=%d:%08x prog=%08x:%08x\n",
2065 ept->pid, ept->cid, prog, vers);
2066
2067 mutex_lock(&xprt_info_list_lock);
2068 list_for_each_entry(xprt_info, &xprt_info_list, list) {
2069 rc = rpcrouter_send_control_msg(xprt_info, &msg);
2070 if (rc < 0) {
2071 mutex_unlock(&xprt_info_list_lock);
2072 return rc;
2073 }
2074 }
2075 mutex_unlock(&xprt_info_list_lock);
2076 return 0;
2077}
2078
2079int msm_rpc_clear_netreset(struct msm_rpc_endpoint *ept)
2080{
2081 unsigned long flags;
2082 int rc = 1;
2083 spin_lock_irqsave(&ept->restart_lock, flags);
2084 if (ept->restart_state != RESTART_NORMAL) {
2085 ept->restart_state &= ~RESTART_PEND_NTFY;
2086 rc = 0;
2087 }
2088 spin_unlock_irqrestore(&ept->restart_lock, flags);
2089 return rc;
2090}
2091
2092/* TODO: permission check -- disallow unreg of somebody else's server */
2093int msm_rpc_unregister_server(struct msm_rpc_endpoint *ept,
2094 uint32_t prog, uint32_t vers)
2095{
2096 struct rr_server *server;
2097 server = rpcrouter_lookup_server(prog, vers);
2098
2099 if (!server)
2100 return -ENOENT;
2101 rpcrouter_destroy_server(server);
2102 return 0;
2103}
2104
2105int msm_rpc_get_curr_pkt_size(struct msm_rpc_endpoint *ept)
2106{
2107 unsigned long flags;
2108 struct rr_packet *pkt;
2109 int rc = 0;
2110
2111 if (!ept)
2112 return -EINVAL;
2113
2114 if (!msm_rpc_clear_netreset(ept))
2115 return -ENETRESET;
2116
2117 spin_lock_irqsave(&ept->read_q_lock, flags);
2118 if (!list_empty(&ept->read_q)) {
2119 pkt = list_first_entry(&ept->read_q, struct rr_packet, list);
2120 rc = pkt->length;
2121 }
2122 spin_unlock_irqrestore(&ept->read_q_lock, flags);
2123
2124 return rc;
2125}
2126
2127int msm_rpcrouter_close(void)
2128{
Karthikeyan Ramasubramanianc74ab972011-10-17 12:17:03 -06002129 struct rpcrouter_xprt_info *xprt_info;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002130 union rr_control_msg ctl;
2131
2132 ctl.cmd = RPCROUTER_CTRL_CMD_BYE;
2133 mutex_lock(&xprt_info_list_lock);
Karthikeyan Ramasubramanianc74ab972011-10-17 12:17:03 -06002134 while (!list_empty(&xprt_info_list)) {
2135 xprt_info = list_first_entry(&xprt_info_list,
2136 struct rpcrouter_xprt_info, list);
2137 xprt_info->abort_data_read = 1;
2138 wake_up(&xprt_info->read_wait);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002139 rpcrouter_send_control_msg(xprt_info, &ctl);
2140 xprt_info->xprt->close();
2141 list_del(&xprt_info->list);
Karthikeyan Ramasubramanianc74ab972011-10-17 12:17:03 -06002142 mutex_unlock(&xprt_info_list_lock);
2143
2144 flush_workqueue(xprt_info->workqueue);
2145 destroy_workqueue(xprt_info->workqueue);
2146 wake_lock_destroy(&xprt_info->wakelock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002147 kfree(xprt_info);
Karthikeyan Ramasubramanianc74ab972011-10-17 12:17:03 -06002148
2149 mutex_lock(&xprt_info_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002150 }
2151 mutex_unlock(&xprt_info_list_lock);
2152 return 0;
2153}
2154
2155#if defined(CONFIG_DEBUG_FS)
2156static int dump_servers(char *buf, int max)
2157{
2158 int i = 0;
2159 unsigned long flags;
2160 struct rr_server *svr;
2161 const char *sym;
2162
2163 spin_lock_irqsave(&server_list_lock, flags);
2164 list_for_each_entry(svr, &server_list, list) {
2165 i += scnprintf(buf + i, max - i, "pdev_name: %s\n",
2166 svr->pdev_name);
2167 i += scnprintf(buf + i, max - i, "pid: 0x%08x\n", svr->pid);
2168 i += scnprintf(buf + i, max - i, "cid: 0x%08x\n", svr->cid);
2169 i += scnprintf(buf + i, max - i, "prog: 0x%08x", svr->prog);
2170 sym = smd_rpc_get_sym(svr->prog);
2171 if (sym)
2172 i += scnprintf(buf + i, max - i, " (%s)\n", sym);
2173 else
2174 i += scnprintf(buf + i, max - i, "\n");
2175 i += scnprintf(buf + i, max - i, "vers: 0x%08x\n", svr->vers);
2176 i += scnprintf(buf + i, max - i, "\n");
2177 }
2178 spin_unlock_irqrestore(&server_list_lock, flags);
2179
2180 return i;
2181}
2182
2183static int dump_remote_endpoints(char *buf, int max)
2184{
2185 int i = 0;
2186 unsigned long flags;
2187 struct rr_remote_endpoint *ept;
2188
2189 spin_lock_irqsave(&remote_endpoints_lock, flags);
2190 list_for_each_entry(ept, &remote_endpoints, list) {
2191 i += scnprintf(buf + i, max - i, "pid: 0x%08x\n", ept->pid);
2192 i += scnprintf(buf + i, max - i, "cid: 0x%08x\n", ept->cid);
2193 i += scnprintf(buf + i, max - i, "tx_quota_cntr: %i\n",
2194 ept->tx_quota_cntr);
2195 i += scnprintf(buf + i, max - i, "quota_restart_state: %i\n",
2196 ept->quota_restart_state);
2197 i += scnprintf(buf + i, max - i, "\n");
2198 }
2199 spin_unlock_irqrestore(&remote_endpoints_lock, flags);
2200
2201 return i;
2202}
2203
2204static int dump_msm_rpc_endpoint(char *buf, int max)
2205{
2206 int i = 0;
2207 unsigned long flags;
2208 struct msm_rpc_reply *reply;
2209 struct msm_rpc_endpoint *ept;
2210 struct rr_packet *pkt;
2211 const char *sym;
2212
2213 spin_lock_irqsave(&local_endpoints_lock, flags);
2214 list_for_each_entry(ept, &local_endpoints, list) {
2215 i += scnprintf(buf + i, max - i, "pid: 0x%08x\n", ept->pid);
2216 i += scnprintf(buf + i, max - i, "cid: 0x%08x\n", ept->cid);
2217 i += scnprintf(buf + i, max - i, "dst_pid: 0x%08x\n",
2218 ept->dst_pid);
2219 i += scnprintf(buf + i, max - i, "dst_cid: 0x%08x\n",
2220 ept->dst_cid);
2221 i += scnprintf(buf + i, max - i, "dst_prog: 0x%08x",
2222 be32_to_cpu(ept->dst_prog));
2223 sym = smd_rpc_get_sym(be32_to_cpu(ept->dst_prog));
2224 if (sym)
2225 i += scnprintf(buf + i, max - i, " (%s)\n", sym);
2226 else
2227 i += scnprintf(buf + i, max - i, "\n");
2228 i += scnprintf(buf + i, max - i, "dst_vers: 0x%08x\n",
2229 be32_to_cpu(ept->dst_vers));
2230 i += scnprintf(buf + i, max - i, "reply_cnt: %i\n",
2231 ept->reply_cnt);
2232 i += scnprintf(buf + i, max - i, "restart_state: %i\n",
2233 ept->restart_state);
2234
2235 i += scnprintf(buf + i, max - i, "outstanding xids:\n");
2236 spin_lock(&ept->reply_q_lock);
2237 list_for_each_entry(reply, &ept->reply_pend_q, list)
2238 i += scnprintf(buf + i, max - i, " xid = %u\n",
2239 ntohl(reply->xid));
2240 spin_unlock(&ept->reply_q_lock);
2241
2242 i += scnprintf(buf + i, max - i, "complete unread packets:\n");
2243 spin_lock(&ept->read_q_lock);
2244 list_for_each_entry(pkt, &ept->read_q, list) {
2245 i += scnprintf(buf + i, max - i, " mid = %i\n",
2246 pkt->mid);
2247 i += scnprintf(buf + i, max - i, " length = %i\n",
2248 pkt->length);
2249 }
2250 spin_unlock(&ept->read_q_lock);
2251 i += scnprintf(buf + i, max - i, "\n");
2252 }
2253 spin_unlock_irqrestore(&local_endpoints_lock, flags);
2254
2255 return i;
2256}
2257
2258#define DEBUG_BUFMAX 4096
2259static char debug_buffer[DEBUG_BUFMAX];
2260
2261static ssize_t debug_read(struct file *file, char __user *buf,
2262 size_t count, loff_t *ppos)
2263{
2264 int (*fill)(char *buf, int max) = file->private_data;
2265 int bsize = fill(debug_buffer, DEBUG_BUFMAX);
2266 return simple_read_from_buffer(buf, count, ppos, debug_buffer, bsize);
2267}
2268
2269static int debug_open(struct inode *inode, struct file *file)
2270{
2271 file->private_data = inode->i_private;
2272 return 0;
2273}
2274
2275static const struct file_operations debug_ops = {
2276 .read = debug_read,
2277 .open = debug_open,
2278};
2279
2280static void debug_create(const char *name, mode_t mode,
2281 struct dentry *dent,
2282 int (*fill)(char *buf, int max))
2283{
2284 debugfs_create_file(name, mode, dent, fill, &debug_ops);
2285}
2286
2287static void debugfs_init(void)
2288{
2289 struct dentry *dent;
2290
2291 dent = debugfs_create_dir("smd_rpcrouter", 0);
2292 if (IS_ERR(dent))
2293 return;
2294
2295 debug_create("dump_msm_rpc_endpoints", 0444, dent,
2296 dump_msm_rpc_endpoint);
2297 debug_create("dump_remote_endpoints", 0444, dent,
2298 dump_remote_endpoints);
2299 debug_create("dump_servers", 0444, dent,
2300 dump_servers);
2301
2302}
2303
2304#else
2305static void debugfs_init(void) {}
2306#endif
2307
2308static int msm_rpcrouter_add_xprt(struct rpcrouter_xprt *xprt)
2309{
2310 struct rpcrouter_xprt_info *xprt_info;
2311
2312 D("Registering xprt %s to RPC Router\n", xprt->name);
2313
2314 xprt_info = kmalloc(sizeof(struct rpcrouter_xprt_info), GFP_KERNEL);
2315 if (!xprt_info)
2316 return -ENOMEM;
2317
2318 xprt_info->xprt = xprt;
2319 xprt_info->initialized = 0;
2320 xprt_info->remote_pid = -1;
2321 init_waitqueue_head(&xprt_info->read_wait);
2322 spin_lock_init(&xprt_info->lock);
2323 wake_lock_init(&xprt_info->wakelock,
2324 WAKE_LOCK_SUSPEND, xprt->name);
2325 xprt_info->need_len = 0;
2326 xprt_info->abort_data_read = 0;
2327 INIT_WORK(&xprt_info->read_data, do_read_data);
2328 INIT_LIST_HEAD(&xprt_info->list);
2329
2330 xprt_info->workqueue = create_singlethread_workqueue(xprt->name);
2331 if (!xprt_info->workqueue) {
2332 kfree(xprt_info);
2333 return -ENOMEM;
2334 }
2335
2336 if (!strcmp(xprt->name, "rpcrouter_loopback_xprt")) {
2337 xprt_info->remote_pid = RPCROUTER_PID_LOCAL;
2338 xprt_info->initialized = 1;
2339 } else {
2340 smsm_change_state(SMSM_APPS_STATE, 0, SMSM_RPCINIT);
2341 }
2342
2343 mutex_lock(&xprt_info_list_lock);
2344 list_add_tail(&xprt_info->list, &xprt_info_list);
2345 mutex_unlock(&xprt_info_list_lock);
2346
2347 queue_work(xprt_info->workqueue, &xprt_info->read_data);
2348
2349 xprt->priv = xprt_info;
2350
2351 return 0;
2352}
2353
2354static void msm_rpcrouter_remove_xprt(struct rpcrouter_xprt *xprt)
2355{
2356 struct rpcrouter_xprt_info *xprt_info;
2357 unsigned long flags;
2358
2359 if (xprt && xprt->priv) {
2360 xprt_info = xprt->priv;
2361
2362 /* abort rr_read thread */
2363 xprt_info->abort_data_read = 1;
2364 wake_up(&xprt_info->read_wait);
2365
2366 /* remove xprt from available xprts */
2367 mutex_lock(&xprt_info_list_lock);
2368 spin_lock_irqsave(&xprt_info->lock, flags);
2369 list_del(&xprt_info->list);
2370
2371 /* unlock the spinlock last to avoid a race
2372 * condition with rpcrouter_get_xprt_info
2373 * in msm_rpc_write_pkt in which the
2374 * xprt is returned from rpcrouter_get_xprt_info
2375 * and then deleted here. */
2376 mutex_unlock(&xprt_info_list_lock);
2377 spin_unlock_irqrestore(&xprt_info->lock, flags);
2378
2379 /* cleanup workqueues and wakelocks */
2380 flush_workqueue(xprt_info->workqueue);
2381 destroy_workqueue(xprt_info->workqueue);
2382 wake_lock_destroy(&xprt_info->wakelock);
2383
2384
2385 /* free memory */
2386 xprt->priv = 0;
2387 kfree(xprt_info);
2388 }
2389}
2390
2391struct rpcrouter_xprt_work {
2392 struct rpcrouter_xprt *xprt;
2393 struct work_struct work;
2394};
2395
2396static void xprt_open_worker(struct work_struct *work)
2397{
2398 struct rpcrouter_xprt_work *xprt_work =
2399 container_of(work, struct rpcrouter_xprt_work, work);
2400
2401 msm_rpcrouter_add_xprt(xprt_work->xprt);
2402
2403 kfree(xprt_work);
2404}
2405
2406static void xprt_close_worker(struct work_struct *work)
2407{
2408 struct rpcrouter_xprt_work *xprt_work =
2409 container_of(work, struct rpcrouter_xprt_work, work);
2410
2411 modem_reset_cleanup(xprt_work->xprt->priv);
2412 msm_rpcrouter_remove_xprt(xprt_work->xprt);
2413
2414 if (atomic_dec_return(&pending_close_count) == 0)
2415 wake_up(&subsystem_restart_wait);
2416
2417 kfree(xprt_work);
2418}
2419
2420void msm_rpcrouter_xprt_notify(struct rpcrouter_xprt *xprt, unsigned event)
2421{
2422 struct rpcrouter_xprt_info *xprt_info;
2423 struct rpcrouter_xprt_work *xprt_work;
2424
2425 /* Workqueue is created in init function which works for all existing
2426 * clients. If this fails in the future, then it will need to be
2427 * created earlier. */
2428 BUG_ON(!rpcrouter_workqueue);
2429
2430 switch (event) {
2431 case RPCROUTER_XPRT_EVENT_OPEN:
2432 D("open event for '%s'\n", xprt->name);
2433 xprt_work = kmalloc(sizeof(struct rpcrouter_xprt_work),
2434 GFP_ATOMIC);
2435 xprt_work->xprt = xprt;
2436 INIT_WORK(&xprt_work->work, xprt_open_worker);
2437 queue_work(rpcrouter_workqueue, &xprt_work->work);
2438 break;
2439
2440 case RPCROUTER_XPRT_EVENT_CLOSE:
2441 D("close event for '%s'\n", xprt->name);
2442
2443 atomic_inc(&pending_close_count);
2444
2445 xprt_work = kmalloc(sizeof(struct rpcrouter_xprt_work),
2446 GFP_ATOMIC);
2447 xprt_work->xprt = xprt;
2448 INIT_WORK(&xprt_work->work, xprt_close_worker);
2449 queue_work(rpcrouter_workqueue, &xprt_work->work);
2450 break;
2451 }
2452
2453 xprt_info = xprt->priv;
2454 if (xprt_info) {
2455 /* Check read_avail even for OPEN event to handle missed
2456 DATA events while processing the OPEN event*/
2457 if (xprt->read_avail() >= xprt_info->need_len)
2458 wake_lock(&xprt_info->wakelock);
2459 wake_up(&xprt_info->read_wait);
2460 }
2461}
2462
2463static int modem_restart_notifier_cb(struct notifier_block *this,
2464 unsigned long code,
2465 void *data);
2466static struct notifier_block nb = {
2467 .notifier_call = modem_restart_notifier_cb,
2468};
2469
2470static int modem_restart_notifier_cb(struct notifier_block *this,
2471 unsigned long code,
2472 void *data)
2473{
2474 switch (code) {
2475 case SUBSYS_BEFORE_SHUTDOWN:
2476 D("%s: SUBSYS_BEFORE_SHUTDOWN\n", __func__);
2477 break;
2478
2479 case SUBSYS_BEFORE_POWERUP:
2480 D("%s: waiting for RPC restart to complete\n", __func__);
2481 wait_event(subsystem_restart_wait,
2482 atomic_read(&pending_close_count) == 0);
2483 D("%s: finished restart wait\n", __func__);
2484 break;
2485
2486 default:
2487 break;
2488 }
2489
2490 return NOTIFY_DONE;
2491}
2492
2493static void *restart_notifier_handle;
2494static __init int modem_restart_late_init(void)
2495{
2496 restart_notifier_handle = subsys_notif_register_notifier("modem", &nb);
2497 return 0;
2498}
2499late_initcall(modem_restart_late_init);
2500
2501static int __init rpcrouter_init(void)
2502{
2503 int ret;
2504
2505 msm_rpc_connect_timeout_ms = 0;
2506 smd_rpcrouter_debug_mask |= SMEM_LOG;
2507 debugfs_init();
2508
2509
2510 /* Initialize what we need to start processing */
2511 rpcrouter_workqueue =
2512 create_singlethread_workqueue("rpcrouter");
2513 if (!rpcrouter_workqueue) {
2514 msm_rpcrouter_exit_devices();
2515 return -ENOMEM;
2516 }
2517
2518 init_waitqueue_head(&newserver_wait);
2519 init_waitqueue_head(&subsystem_restart_wait);
2520
2521 ret = msm_rpcrouter_init_devices();
2522 if (ret < 0)
2523 return ret;
2524
2525 return ret;
2526}
2527
2528module_init(rpcrouter_init);
2529MODULE_DESCRIPTION("MSM RPC Router");
2530MODULE_AUTHOR("San Mehat <san@android.com>");
2531MODULE_LICENSE("GPL");