blob: ff68d813d3dc3c84c997d955f567310a1fcc9b00 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* arch/arm/mach-msm/smd_rpcrouter.c
2 *
3 * Copyright (C) 2007 Google, Inc.
Arun Kumar Neelakantam0975c602012-10-16 23:13:09 +05304 * Copyright (c) 2007-2012, The Linux Foundation. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005 * Author: San Mehat <san@android.com>
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18/* TODO: handle cases where smd_write() will tempfail due to full fifo */
19/* TODO: thread priority? schedule a work to bump it? */
20/* TODO: maybe make server_list_lock a mutex */
21/* TODO: pool fragments to avoid kmalloc/kfree churn */
22
23#include <linux/slab.h>
24#include <linux/module.h>
25#include <linux/kernel.h>
26#include <linux/string.h>
27#include <linux/errno.h>
28#include <linux/cdev.h>
29#include <linux/init.h>
30#include <linux/device.h>
31#include <linux/types.h>
32#include <linux/delay.h>
33#include <linux/fs.h>
34#include <linux/err.h>
35#include <linux/sched.h>
36#include <linux/poll.h>
37#include <linux/wakelock.h>
38#include <asm/uaccess.h>
39#include <asm/byteorder.h>
40#include <linux/platform_device.h>
41#include <linux/uaccess.h>
42#include <linux/debugfs.h>
Arun Kumar Neelakantam0975c602012-10-16 23:13:09 +053043#include <linux/reboot.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070044
45#include <asm/byteorder.h>
46
47#include <mach/msm_smd.h>
48#include <mach/smem_log.h>
49#include <mach/subsystem_notif.h>
50
51#include "smd_rpcrouter.h"
52#include "modem_notifier.h"
53#include "smd_rpc_sym.h"
54#include "smd_private.h"
55
56enum {
57 SMEM_LOG = 1U << 0,
58 RTR_DBG = 1U << 1,
59 R2R_MSG = 1U << 2,
60 R2R_RAW = 1U << 3,
61 RPC_MSG = 1U << 4,
62 NTFY_MSG = 1U << 5,
63 RAW_PMR = 1U << 6,
64 RAW_PMW = 1U << 7,
65 R2R_RAW_HDR = 1U << 8,
66};
67static int msm_rpc_connect_timeout_ms;
68module_param_named(connect_timeout, msm_rpc_connect_timeout_ms,
69 int, S_IRUGO | S_IWUSR | S_IWGRP);
70
71static int smd_rpcrouter_debug_mask;
72module_param_named(debug_mask, smd_rpcrouter_debug_mask,
73 int, S_IRUGO | S_IWUSR | S_IWGRP);
74
75#define DIAG(x...) printk(KERN_ERR "[RR] ERROR " x)
76
77#if defined(CONFIG_MSM_ONCRPCROUTER_DEBUG)
78#define D(x...) do { \
79if (smd_rpcrouter_debug_mask & RTR_DBG) \
80 printk(KERN_ERR x); \
81} while (0)
82
83#define RR(x...) do { \
84if (smd_rpcrouter_debug_mask & R2R_MSG) \
85 printk(KERN_ERR "[RR] "x); \
86} while (0)
87
88#define RAW(x...) do { \
89if (smd_rpcrouter_debug_mask & R2R_RAW) \
90 printk(KERN_ERR "[RAW] "x); \
91} while (0)
92
93#define RAW_HDR(x...) do { \
94if (smd_rpcrouter_debug_mask & R2R_RAW_HDR) \
95 printk(KERN_ERR "[HDR] "x); \
96} while (0)
97
98#define RAW_PMR(x...) do { \
99if (smd_rpcrouter_debug_mask & RAW_PMR) \
100 printk(KERN_ERR "[PMR] "x); \
101} while (0)
102
103#define RAW_PMR_NOMASK(x...) do { \
104 printk(KERN_ERR "[PMR] "x); \
105} while (0)
106
107#define RAW_PMW(x...) do { \
108if (smd_rpcrouter_debug_mask & RAW_PMW) \
109 printk(KERN_ERR "[PMW] "x); \
110} while (0)
111
112#define RAW_PMW_NOMASK(x...) do { \
113 printk(KERN_ERR "[PMW] "x); \
114} while (0)
115
116#define IO(x...) do { \
117if (smd_rpcrouter_debug_mask & RPC_MSG) \
118 printk(KERN_ERR "[RPC] "x); \
119} while (0)
120
121#define NTFY(x...) do { \
122if (smd_rpcrouter_debug_mask & NTFY_MSG) \
123 printk(KERN_ERR "[NOTIFY] "x); \
124} while (0)
125#else
126#define D(x...) do { } while (0)
127#define RR(x...) do { } while (0)
128#define RAW(x...) do { } while (0)
129#define RAW_HDR(x...) do { } while (0)
130#define RAW_PMR(x...) do { } while (0)
131#define RAW_PMR_NO_MASK(x...) do { } while (0)
132#define RAW_PMW(x...) do { } while (0)
133#define RAW_PMW_NO_MASK(x...) do { } while (0)
134#define IO(x...) do { } while (0)
135#define NTFY(x...) do { } while (0)
136#endif
137
138
139static LIST_HEAD(local_endpoints);
140static LIST_HEAD(remote_endpoints);
141
142static LIST_HEAD(server_list);
143
144static wait_queue_head_t newserver_wait;
145static wait_queue_head_t subsystem_restart_wait;
146
147static DEFINE_SPINLOCK(local_endpoints_lock);
148static DEFINE_SPINLOCK(remote_endpoints_lock);
149static DEFINE_SPINLOCK(server_list_lock);
150
151static LIST_HEAD(rpc_board_dev_list);
152static DEFINE_SPINLOCK(rpc_board_dev_list_lock);
153
154static struct workqueue_struct *rpcrouter_workqueue;
155
156static atomic_t next_xid = ATOMIC_INIT(1);
157static atomic_t pm_mid = ATOMIC_INIT(1);
158
159static void do_read_data(struct work_struct *work);
160static void do_create_pdevs(struct work_struct *work);
161static void do_create_rpcrouter_pdev(struct work_struct *work);
Arun Kumar Neelakantam0975c602012-10-16 23:13:09 +0530162static int msm_rpcrouter_close(void);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700163
164static DECLARE_WORK(work_create_pdevs, do_create_pdevs);
165static DECLARE_WORK(work_create_rpcrouter_pdev, do_create_rpcrouter_pdev);
166
167#define RR_STATE_IDLE 0
168#define RR_STATE_HEADER 1
169#define RR_STATE_BODY 2
170#define RR_STATE_ERROR 3
171
172/* State for remote ep following restart */
173#define RESTART_QUOTA_ABORT 1
174
175struct rr_context {
176 struct rr_packet *pkt;
177 uint8_t *ptr;
178 uint32_t state; /* current assembly state */
179 uint32_t count; /* bytes needed in this state */
180};
181
182struct rr_context the_rr_context;
183
184struct rpc_board_dev_info {
185 struct list_head list;
186
187 struct rpc_board_dev *dev;
188};
189
190static struct platform_device rpcrouter_pdev = {
191 .name = "oncrpc_router",
192 .id = -1,
193};
194
195struct rpcrouter_xprt_info {
196 struct list_head list;
197
198 struct rpcrouter_xprt *xprt;
199
200 int remote_pid;
201 uint32_t initialized;
202 wait_queue_head_t read_wait;
203 struct wake_lock wakelock;
204 spinlock_t lock;
205 uint32_t need_len;
206 struct work_struct read_data;
207 struct workqueue_struct *workqueue;
208 int abort_data_read;
209 unsigned char r2r_buf[RPCROUTER_MSGSIZE_MAX];
210};
211
212static LIST_HEAD(xprt_info_list);
213static DEFINE_MUTEX(xprt_info_list_lock);
214
215DECLARE_COMPLETION(rpc_remote_router_up);
216static atomic_t pending_close_count = ATOMIC_INIT(0);
217
Arun Kumar Neelakantam0975c602012-10-16 23:13:09 +0530218static int msm_rpc_reboot_call(struct notifier_block *this,
219 unsigned long code, void *_cmd)
220{
221 switch (code) {
222 case SYS_RESTART:
223 case SYS_HALT:
224 case SYS_POWER_OFF:
225 msm_rpcrouter_close();
226 break;
227 }
228 return NOTIFY_DONE;
229}
230
231static struct notifier_block msm_rpc_reboot_notifier = {
232 .notifier_call = msm_rpc_reboot_call,
233 .priority = 100
234};
235
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700236/*
237 * Search for transport (xprt) that matches the provided PID.
238 *
239 * Note: The calling function must ensure that the mutex
240 * xprt_info_list_lock is locked when this function
241 * is called.
242 *
243 * @remote_pid Remote PID for the transport
244 *
245 * @returns Pointer to transport or NULL if not found
246 */
247static struct rpcrouter_xprt_info *rpcrouter_get_xprt_info(uint32_t remote_pid)
248{
249 struct rpcrouter_xprt_info *xprt_info;
250
251 list_for_each_entry(xprt_info, &xprt_info_list, list) {
252 if (xprt_info->remote_pid == remote_pid)
253 return xprt_info;
254 }
255 return NULL;
256}
257
258static int rpcrouter_send_control_msg(struct rpcrouter_xprt_info *xprt_info,
259 union rr_control_msg *msg)
260{
261 struct rr_header hdr;
262 unsigned long flags = 0;
263 int need;
264
265 if (xprt_info->remote_pid == RPCROUTER_PID_LOCAL)
266 return 0;
267
268 if (!(msg->cmd == RPCROUTER_CTRL_CMD_HELLO) &&
269 !xprt_info->initialized) {
270 printk(KERN_ERR "rpcrouter_send_control_msg(): Warning, "
271 "router not initialized\n");
272 return -EINVAL;
273 }
274
275 hdr.version = RPCROUTER_VERSION;
276 hdr.type = msg->cmd;
277 hdr.src_pid = RPCROUTER_PID_LOCAL;
278 hdr.src_cid = RPCROUTER_ROUTER_ADDRESS;
279 hdr.confirm_rx = 0;
280 hdr.size = sizeof(*msg);
281 hdr.dst_pid = xprt_info->remote_pid;
282 hdr.dst_cid = RPCROUTER_ROUTER_ADDRESS;
283
284 /* TODO: what if channel is full? */
285
286 need = sizeof(hdr) + hdr.size;
287 spin_lock_irqsave(&xprt_info->lock, flags);
288 while (xprt_info->xprt->write_avail() < need) {
289 spin_unlock_irqrestore(&xprt_info->lock, flags);
290 msleep(250);
291 spin_lock_irqsave(&xprt_info->lock, flags);
292 }
293 xprt_info->xprt->write(&hdr, sizeof(hdr), HEADER);
294 xprt_info->xprt->write(msg, hdr.size, PAYLOAD);
295 spin_unlock_irqrestore(&xprt_info->lock, flags);
296
297 return 0;
298}
299
300static void modem_reset_cleanup(struct rpcrouter_xprt_info *xprt_info)
301{
302 struct msm_rpc_endpoint *ept;
303 struct rr_remote_endpoint *r_ept;
304 struct rr_packet *pkt, *tmp_pkt;
305 struct rr_fragment *frag, *next;
306 struct msm_rpc_reply *reply, *reply_tmp;
307 unsigned long flags;
308
Arun Kumar Neelakantame201a312012-10-06 12:51:13 +0530309 if (!xprt_info) {
310 pr_err("%s: Invalid xprt_info\n", __func__);
311 return;
312 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700313 spin_lock_irqsave(&local_endpoints_lock, flags);
314 /* remove all partial packets received */
315 list_for_each_entry(ept, &local_endpoints, list) {
316 RR("%s EPT DST PID %x, remote_pid:%d\n", __func__,
317 ept->dst_pid, xprt_info->remote_pid);
318
319 if (xprt_info->remote_pid != ept->dst_pid)
320 continue;
321
322 D("calling teardown cb %p\n", ept->cb_restart_teardown);
323 if (ept->cb_restart_teardown)
324 ept->cb_restart_teardown(ept->client_data);
325 ept->do_setup_notif = 1;
326
327 /* remove replies */
328 spin_lock(&ept->reply_q_lock);
329 list_for_each_entry_safe(reply, reply_tmp,
330 &ept->reply_pend_q, list) {
331 list_del(&reply->list);
332 kfree(reply);
333 }
334 list_for_each_entry_safe(reply, reply_tmp,
335 &ept->reply_avail_q, list) {
336 list_del(&reply->list);
337 kfree(reply);
338 }
Karthikeyan Ramasubramanian1079f782011-08-23 10:01:47 -0600339 ept->reply_cnt = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700340 spin_unlock(&ept->reply_q_lock);
341
342 /* Set restart state for local ep */
343 RR("EPT:0x%p, State %d RESTART_PEND_NTFY_SVR "
344 "PROG:0x%08x VERS:0x%08x\n",
345 ept, ept->restart_state,
346 be32_to_cpu(ept->dst_prog),
347 be32_to_cpu(ept->dst_vers));
348 spin_lock(&ept->restart_lock);
349 ept->restart_state = RESTART_PEND_NTFY_SVR;
350
351 /* remove incomplete packets */
352 spin_lock(&ept->incomplete_lock);
353 list_for_each_entry_safe(pkt, tmp_pkt,
354 &ept->incomplete, list) {
355 list_del(&pkt->list);
356 frag = pkt->first;
357 while (frag != NULL) {
358 next = frag->next;
359 kfree(frag);
360 frag = next;
361 }
362 kfree(pkt);
363 }
364 spin_unlock(&ept->incomplete_lock);
365
366 /* remove all completed packets waiting to be read */
367 spin_lock(&ept->read_q_lock);
368 list_for_each_entry_safe(pkt, tmp_pkt, &ept->read_q,
369 list) {
370 list_del(&pkt->list);
371 frag = pkt->first;
372 while (frag != NULL) {
373 next = frag->next;
374 kfree(frag);
375 frag = next;
376 }
377 kfree(pkt);
378 }
379 spin_unlock(&ept->read_q_lock);
380
381 spin_unlock(&ept->restart_lock);
382 wake_up(&ept->wait_q);
383 }
384
385 spin_unlock_irqrestore(&local_endpoints_lock, flags);
386
387 /* Unblock endpoints waiting for quota ack*/
388 spin_lock_irqsave(&remote_endpoints_lock, flags);
389 list_for_each_entry(r_ept, &remote_endpoints, list) {
390 spin_lock(&r_ept->quota_lock);
391 r_ept->quota_restart_state = RESTART_QUOTA_ABORT;
392 RR("Set STATE_PENDING PID:0x%08x CID:0x%08x \n", r_ept->pid,
393 r_ept->cid);
394 spin_unlock(&r_ept->quota_lock);
395 wake_up(&r_ept->quota_wait);
396 }
397 spin_unlock_irqrestore(&remote_endpoints_lock, flags);
398}
399
400static void modem_reset_startup(struct rpcrouter_xprt_info *xprt_info)
401{
402 struct msm_rpc_endpoint *ept;
403 unsigned long flags;
404
405 spin_lock_irqsave(&local_endpoints_lock, flags);
406
407 /* notify all endpoints that we are coming back up */
408 list_for_each_entry(ept, &local_endpoints, list) {
409 RR("%s EPT DST PID %x, remote_pid:%d\n", __func__,
410 ept->dst_pid, xprt_info->remote_pid);
411
412 if (xprt_info->remote_pid != ept->dst_pid)
413 continue;
414
415 D("calling setup cb %d:%p\n", ept->do_setup_notif,
416 ept->cb_restart_setup);
417 if (ept->do_setup_notif && ept->cb_restart_setup)
418 ept->cb_restart_setup(ept->client_data);
419 ept->do_setup_notif = 0;
420 }
421
422 spin_unlock_irqrestore(&local_endpoints_lock, flags);
423}
424
425/*
426 * Blocks and waits for endpoint if a reset is in progress.
427 *
428 * @returns
429 * ENETRESET Reset is in progress and a notification needed
430 * ERESTARTSYS Signal occurred
431 * 0 Reset is not in progress
432 */
433static int wait_for_restart_and_notify(struct msm_rpc_endpoint *ept)
434{
435 unsigned long flags;
436 int ret = 0;
437 DEFINE_WAIT(__wait);
438
439 for (;;) {
440 prepare_to_wait(&ept->restart_wait, &__wait,
441 TASK_INTERRUPTIBLE);
442
443 spin_lock_irqsave(&ept->restart_lock, flags);
444 if (ept->restart_state == RESTART_NORMAL) {
445 spin_unlock_irqrestore(&ept->restart_lock, flags);
446 break;
447 } else if (ept->restart_state & RESTART_PEND_NTFY) {
448 ept->restart_state &= ~RESTART_PEND_NTFY;
449 spin_unlock_irqrestore(&ept->restart_lock, flags);
450 ret = -ENETRESET;
451 break;
452 }
453 if (signal_pending(current) &&
454 ((!(ept->flags & MSM_RPC_UNINTERRUPTIBLE)))) {
455 spin_unlock_irqrestore(&ept->restart_lock, flags);
456 ret = -ERESTARTSYS;
457 break;
458 }
459 spin_unlock_irqrestore(&ept->restart_lock, flags);
460 schedule();
461 }
462 finish_wait(&ept->restart_wait, &__wait);
463 return ret;
464}
465
466static struct rr_server *rpcrouter_create_server(uint32_t pid,
467 uint32_t cid,
468 uint32_t prog,
469 uint32_t ver)
470{
471 struct rr_server *server;
472 unsigned long flags;
473 int rc;
474
475 server = kmalloc(sizeof(struct rr_server), GFP_KERNEL);
476 if (!server)
477 return ERR_PTR(-ENOMEM);
478
479 memset(server, 0, sizeof(struct rr_server));
480 server->pid = pid;
481 server->cid = cid;
482 server->prog = prog;
483 server->vers = ver;
484
485 spin_lock_irqsave(&server_list_lock, flags);
486 list_add_tail(&server->list, &server_list);
487 spin_unlock_irqrestore(&server_list_lock, flags);
488
489 rc = msm_rpcrouter_create_server_cdev(server);
490 if (rc < 0)
491 goto out_fail;
492
493 return server;
494out_fail:
495 spin_lock_irqsave(&server_list_lock, flags);
496 list_del(&server->list);
497 spin_unlock_irqrestore(&server_list_lock, flags);
498 kfree(server);
499 return ERR_PTR(rc);
500}
501
502static void rpcrouter_destroy_server(struct rr_server *server)
503{
504 unsigned long flags;
505
506 spin_lock_irqsave(&server_list_lock, flags);
507 list_del(&server->list);
508 spin_unlock_irqrestore(&server_list_lock, flags);
509 device_destroy(msm_rpcrouter_class, server->device_number);
510 kfree(server);
511}
512
513int msm_rpc_add_board_dev(struct rpc_board_dev *devices, int num)
514{
515 unsigned long flags;
516 struct rpc_board_dev_info *board_info;
517 int i;
518
519 for (i = 0; i < num; i++) {
520 board_info = kzalloc(sizeof(struct rpc_board_dev_info),
521 GFP_KERNEL);
522 if (!board_info)
523 return -ENOMEM;
524
525 board_info->dev = &devices[i];
526 D("%s: adding program %x\n", __func__, board_info->dev->prog);
527 spin_lock_irqsave(&rpc_board_dev_list_lock, flags);
528 list_add_tail(&board_info->list, &rpc_board_dev_list);
529 spin_unlock_irqrestore(&rpc_board_dev_list_lock, flags);
530 }
531
532 return 0;
533}
534EXPORT_SYMBOL(msm_rpc_add_board_dev);
535
536static void rpcrouter_register_board_dev(struct rr_server *server)
537{
538 struct rpc_board_dev_info *board_info;
539 unsigned long flags;
540 int rc;
541
542 spin_lock_irqsave(&rpc_board_dev_list_lock, flags);
543 list_for_each_entry(board_info, &rpc_board_dev_list, list) {
544 if (server->prog == board_info->dev->prog) {
545 D("%s: registering device %x\n",
546 __func__, board_info->dev->prog);
547 list_del(&board_info->list);
Arun Kumar Neelakantam21eba4a2012-11-20 16:10:55 +0530548 spin_unlock_irqrestore(&rpc_board_dev_list_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700549 rc = platform_device_register(&board_info->dev->pdev);
550 if (rc)
551 pr_err("%s: board dev register failed %d\n",
552 __func__, rc);
553 kfree(board_info);
Arun Kumar Neelakantam21eba4a2012-11-20 16:10:55 +0530554 return;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700555 }
556 }
557 spin_unlock_irqrestore(&rpc_board_dev_list_lock, flags);
558}
559
560static struct rr_server *rpcrouter_lookup_server(uint32_t prog, uint32_t ver)
561{
562 struct rr_server *server;
563 unsigned long flags;
564
565 spin_lock_irqsave(&server_list_lock, flags);
566 list_for_each_entry(server, &server_list, list) {
567 if (server->prog == prog
568 && server->vers == ver) {
569 spin_unlock_irqrestore(&server_list_lock, flags);
570 return server;
571 }
572 }
573 spin_unlock_irqrestore(&server_list_lock, flags);
574 return NULL;
575}
576
577static struct rr_server *rpcrouter_lookup_server_by_dev(dev_t dev)
578{
579 struct rr_server *server;
580 unsigned long flags;
581
582 spin_lock_irqsave(&server_list_lock, flags);
583 list_for_each_entry(server, &server_list, list) {
584 if (server->device_number == dev) {
585 spin_unlock_irqrestore(&server_list_lock, flags);
586 return server;
587 }
588 }
589 spin_unlock_irqrestore(&server_list_lock, flags);
590 return NULL;
591}
592
593struct msm_rpc_endpoint *msm_rpcrouter_create_local_endpoint(dev_t dev)
594{
595 struct msm_rpc_endpoint *ept;
596 unsigned long flags;
597
598 ept = kmalloc(sizeof(struct msm_rpc_endpoint), GFP_KERNEL);
599 if (!ept)
600 return NULL;
601 memset(ept, 0, sizeof(struct msm_rpc_endpoint));
602 ept->cid = (uint32_t) ept;
603 ept->pid = RPCROUTER_PID_LOCAL;
604 ept->dev = dev;
605
606 if ((dev != msm_rpcrouter_devno) && (dev != MKDEV(0, 0))) {
607 struct rr_server *srv;
608 /*
609 * This is a userspace client which opened
610 * a program/ver devicenode. Bind the client
611 * to that destination
612 */
613 srv = rpcrouter_lookup_server_by_dev(dev);
614 /* TODO: bug? really? */
615 BUG_ON(!srv);
616
617 ept->dst_pid = srv->pid;
618 ept->dst_cid = srv->cid;
619 ept->dst_prog = cpu_to_be32(srv->prog);
620 ept->dst_vers = cpu_to_be32(srv->vers);
621 } else {
622 /* mark not connected */
623 ept->dst_pid = 0xffffffff;
624 }
625
626 init_waitqueue_head(&ept->wait_q);
627 INIT_LIST_HEAD(&ept->read_q);
628 spin_lock_init(&ept->read_q_lock);
629 INIT_LIST_HEAD(&ept->reply_avail_q);
630 INIT_LIST_HEAD(&ept->reply_pend_q);
631 spin_lock_init(&ept->reply_q_lock);
632 spin_lock_init(&ept->restart_lock);
633 init_waitqueue_head(&ept->restart_wait);
634 ept->restart_state = RESTART_NORMAL;
635 wake_lock_init(&ept->read_q_wake_lock, WAKE_LOCK_SUSPEND, "rpc_read");
636 wake_lock_init(&ept->reply_q_wake_lock, WAKE_LOCK_SUSPEND, "rpc_reply");
637 INIT_LIST_HEAD(&ept->incomplete);
638 spin_lock_init(&ept->incomplete_lock);
639
640 spin_lock_irqsave(&local_endpoints_lock, flags);
641 list_add_tail(&ept->list, &local_endpoints);
642 spin_unlock_irqrestore(&local_endpoints_lock, flags);
643 return ept;
644}
645
646int msm_rpcrouter_destroy_local_endpoint(struct msm_rpc_endpoint *ept)
647{
648 int rc;
649 union rr_control_msg msg;
650 struct msm_rpc_reply *reply, *reply_tmp;
651 unsigned long flags;
652 struct rpcrouter_xprt_info *xprt_info;
653
654 /* Endpoint with dst_pid = 0xffffffff corresponds to that of
655 ** router port. So don't send a REMOVE CLIENT message while
656 ** destroying it.*/
Karthikeyan Ramasubramanian768567a2011-08-11 11:53:21 -0600657 spin_lock_irqsave(&local_endpoints_lock, flags);
658 list_del(&ept->list);
659 spin_unlock_irqrestore(&local_endpoints_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700660 if (ept->dst_pid != 0xffffffff) {
661 msg.cmd = RPCROUTER_CTRL_CMD_REMOVE_CLIENT;
662 msg.cli.pid = ept->pid;
663 msg.cli.cid = ept->cid;
664
665 RR("x REMOVE_CLIENT id=%d:%08x\n", ept->pid, ept->cid);
666 mutex_lock(&xprt_info_list_lock);
667 list_for_each_entry(xprt_info, &xprt_info_list, list) {
668 rc = rpcrouter_send_control_msg(xprt_info, &msg);
669 if (rc < 0) {
670 mutex_unlock(&xprt_info_list_lock);
671 return rc;
672 }
673 }
674 mutex_unlock(&xprt_info_list_lock);
675 }
676
677 /* Free replies */
678 spin_lock_irqsave(&ept->reply_q_lock, flags);
679 list_for_each_entry_safe(reply, reply_tmp, &ept->reply_pend_q, list) {
680 list_del(&reply->list);
681 kfree(reply);
682 }
683 list_for_each_entry_safe(reply, reply_tmp, &ept->reply_avail_q, list) {
684 list_del(&reply->list);
685 kfree(reply);
686 }
687 spin_unlock_irqrestore(&ept->reply_q_lock, flags);
688
689 wake_lock_destroy(&ept->read_q_wake_lock);
690 wake_lock_destroy(&ept->reply_q_wake_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700691 kfree(ept);
692 return 0;
693}
694
695static int rpcrouter_create_remote_endpoint(uint32_t pid, uint32_t cid)
696{
697 struct rr_remote_endpoint *new_c;
698 unsigned long flags;
699
700 new_c = kmalloc(sizeof(struct rr_remote_endpoint), GFP_KERNEL);
701 if (!new_c)
702 return -ENOMEM;
703 memset(new_c, 0, sizeof(struct rr_remote_endpoint));
704
705 new_c->cid = cid;
706 new_c->pid = pid;
707 init_waitqueue_head(&new_c->quota_wait);
708 spin_lock_init(&new_c->quota_lock);
709
710 spin_lock_irqsave(&remote_endpoints_lock, flags);
711 list_add_tail(&new_c->list, &remote_endpoints);
712 new_c->quota_restart_state = RESTART_NORMAL;
713 spin_unlock_irqrestore(&remote_endpoints_lock, flags);
714 return 0;
715}
716
717static struct msm_rpc_endpoint *rpcrouter_lookup_local_endpoint(uint32_t cid)
718{
719 struct msm_rpc_endpoint *ept;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700720
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700721 list_for_each_entry(ept, &local_endpoints, list) {
Karthikeyan Ramasubramanian768567a2011-08-11 11:53:21 -0600722 if (ept->cid == cid)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700723 return ept;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700724 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700725 return NULL;
726}
727
728static struct rr_remote_endpoint *rpcrouter_lookup_remote_endpoint(uint32_t pid,
729 uint32_t cid)
730{
731 struct rr_remote_endpoint *ept;
732 unsigned long flags;
733
734 spin_lock_irqsave(&remote_endpoints_lock, flags);
735 list_for_each_entry(ept, &remote_endpoints, list) {
736 if ((ept->pid == pid) && (ept->cid == cid)) {
737 spin_unlock_irqrestore(&remote_endpoints_lock, flags);
738 return ept;
739 }
740 }
741 spin_unlock_irqrestore(&remote_endpoints_lock, flags);
742 return NULL;
743}
744
745static void handle_server_restart(struct rr_server *server,
746 uint32_t pid, uint32_t cid,
747 uint32_t prog, uint32_t vers)
748{
749 struct rr_remote_endpoint *r_ept;
750 struct msm_rpc_endpoint *ept;
751 unsigned long flags;
752 r_ept = rpcrouter_lookup_remote_endpoint(pid, cid);
753 if (r_ept && (r_ept->quota_restart_state !=
754 RESTART_NORMAL)) {
755 spin_lock_irqsave(&r_ept->quota_lock, flags);
756 r_ept->tx_quota_cntr = 0;
757 r_ept->quota_restart_state =
758 RESTART_NORMAL;
759 spin_unlock_irqrestore(&r_ept->quota_lock, flags);
760 D(KERN_INFO "rpcrouter: Remote EPT Reset %0x\n",
761 (unsigned int)r_ept);
762 wake_up(&r_ept->quota_wait);
763 }
764 spin_lock_irqsave(&local_endpoints_lock, flags);
765 list_for_each_entry(ept, &local_endpoints, list) {
766 if ((be32_to_cpu(ept->dst_prog) == prog) &&
767 (be32_to_cpu(ept->dst_vers) == vers) &&
768 (ept->restart_state & RESTART_PEND_SVR)) {
769 spin_lock(&ept->restart_lock);
770 ept->restart_state &= ~RESTART_PEND_SVR;
771 spin_unlock(&ept->restart_lock);
772 D("rpcrouter: Local EPT Reset %08x:%08x \n",
773 prog, vers);
774 wake_up(&ept->restart_wait);
775 wake_up(&ept->wait_q);
776 }
777 }
778 spin_unlock_irqrestore(&local_endpoints_lock, flags);
779}
780
781static int process_control_msg(struct rpcrouter_xprt_info *xprt_info,
782 union rr_control_msg *msg, int len)
783{
784 union rr_control_msg ctl;
785 struct rr_server *server;
786 struct rr_remote_endpoint *r_ept;
787 int rc = 0;
788 unsigned long flags;
789 static int first = 1;
790
791 if (len != sizeof(*msg)) {
792 RR(KERN_ERR "rpcrouter: r2r msg size %d != %d\n",
793 len, sizeof(*msg));
794 return -EINVAL;
795 }
796
797 switch (msg->cmd) {
798 case RPCROUTER_CTRL_CMD_HELLO:
799 RR("o HELLO PID %d\n", xprt_info->remote_pid);
800 memset(&ctl, 0, sizeof(ctl));
801 ctl.cmd = RPCROUTER_CTRL_CMD_HELLO;
802 rpcrouter_send_control_msg(xprt_info, &ctl);
803
804 xprt_info->initialized = 1;
805
806 /* Send list of servers one at a time */
807 ctl.cmd = RPCROUTER_CTRL_CMD_NEW_SERVER;
808
809 /* TODO: long time to hold a spinlock... */
810 spin_lock_irqsave(&server_list_lock, flags);
811 list_for_each_entry(server, &server_list, list) {
812 if (server->pid != RPCROUTER_PID_LOCAL)
813 continue;
814 ctl.srv.pid = server->pid;
815 ctl.srv.cid = server->cid;
816 ctl.srv.prog = server->prog;
817 ctl.srv.vers = server->vers;
818
819 RR("x NEW_SERVER id=%d:%08x prog=%08x:%08x\n",
820 server->pid, server->cid,
821 server->prog, server->vers);
822
823 rpcrouter_send_control_msg(xprt_info, &ctl);
824 }
825 spin_unlock_irqrestore(&server_list_lock, flags);
826
827 if (first) {
828 first = 0;
829 queue_work(rpcrouter_workqueue,
830 &work_create_rpcrouter_pdev);
831 }
832 break;
833
834 case RPCROUTER_CTRL_CMD_RESUME_TX:
835 RR("o RESUME_TX id=%d:%08x\n", msg->cli.pid, msg->cli.cid);
836
837 r_ept = rpcrouter_lookup_remote_endpoint(msg->cli.pid,
838 msg->cli.cid);
839 if (!r_ept) {
840 printk(KERN_ERR
841 "rpcrouter: Unable to resume client\n");
842 break;
843 }
844 spin_lock_irqsave(&r_ept->quota_lock, flags);
845 r_ept->tx_quota_cntr = 0;
846 spin_unlock_irqrestore(&r_ept->quota_lock, flags);
847 wake_up(&r_ept->quota_wait);
848 break;
849
850 case RPCROUTER_CTRL_CMD_NEW_SERVER:
851 if (msg->srv.vers == 0) {
852 pr_err(
853 "rpcrouter: Server create rejected, version = 0, "
854 "program = %08x\n", msg->srv.prog);
855 break;
856 }
857
858 RR("o NEW_SERVER id=%d:%08x prog=%08x:%08x\n",
859 msg->srv.pid, msg->srv.cid, msg->srv.prog, msg->srv.vers);
860
861 server = rpcrouter_lookup_server(msg->srv.prog, msg->srv.vers);
862
863 if (!server) {
864 server = rpcrouter_create_server(
865 msg->srv.pid, msg->srv.cid,
866 msg->srv.prog, msg->srv.vers);
867 if (!server)
868 return -ENOMEM;
869 /*
870 * XXX: Verify that its okay to add the
871 * client to our remote client list
872 * if we get a NEW_SERVER notification
873 */
874 if (!rpcrouter_lookup_remote_endpoint(msg->srv.pid,
875 msg->srv.cid)) {
876 rc = rpcrouter_create_remote_endpoint(
877 msg->srv.pid, msg->srv.cid);
878 if (rc < 0)
879 printk(KERN_ERR
880 "rpcrouter:Client create"
881 "error (%d)\n", rc);
882 }
883 rpcrouter_register_board_dev(server);
884 schedule_work(&work_create_pdevs);
885 wake_up(&newserver_wait);
886 } else {
887 if ((server->pid == msg->srv.pid) &&
888 (server->cid == msg->srv.cid)) {
889 handle_server_restart(server,
890 msg->srv.pid,
891 msg->srv.cid,
892 msg->srv.prog,
893 msg->srv.vers);
894 } else {
895 server->pid = msg->srv.pid;
896 server->cid = msg->srv.cid;
897 }
898 }
899 break;
900
901 case RPCROUTER_CTRL_CMD_REMOVE_SERVER:
902 RR("o REMOVE_SERVER prog=%08x:%d\n",
903 msg->srv.prog, msg->srv.vers);
904 server = rpcrouter_lookup_server(msg->srv.prog, msg->srv.vers);
905 if (server)
906 rpcrouter_destroy_server(server);
907 break;
908
909 case RPCROUTER_CTRL_CMD_REMOVE_CLIENT:
910 RR("o REMOVE_CLIENT id=%d:%08x\n", msg->cli.pid, msg->cli.cid);
911 if (msg->cli.pid == RPCROUTER_PID_LOCAL) {
912 printk(KERN_ERR
913 "rpcrouter: Denying remote removal of "
914 "local client\n");
915 break;
916 }
917 r_ept = rpcrouter_lookup_remote_endpoint(msg->cli.pid,
918 msg->cli.cid);
919 if (r_ept) {
920 spin_lock_irqsave(&remote_endpoints_lock, flags);
921 list_del(&r_ept->list);
922 spin_unlock_irqrestore(&remote_endpoints_lock, flags);
923 kfree(r_ept);
924 }
925
926 /* Notify local clients of this event */
927 printk(KERN_ERR "rpcrouter: LOCAL NOTIFICATION NOT IMP\n");
928 rc = -ENOSYS;
929
930 break;
931 case RPCROUTER_CTRL_CMD_PING:
932 /* No action needed for ping messages received */
933 RR("o PING\n");
934 break;
935 default:
936 RR("o UNKNOWN(%08x)\n", msg->cmd);
937 rc = -ENOSYS;
938 }
939
940 return rc;
941}
942
943static void do_create_rpcrouter_pdev(struct work_struct *work)
944{
945 D("%s: modem rpc router up\n", __func__);
946 platform_device_register(&rpcrouter_pdev);
947 complete_all(&rpc_remote_router_up);
948}
949
950static void do_create_pdevs(struct work_struct *work)
951{
952 unsigned long flags;
953 struct rr_server *server;
954
955 /* TODO: race if destroyed while being registered */
956 spin_lock_irqsave(&server_list_lock, flags);
957 list_for_each_entry(server, &server_list, list) {
958 if (server->pid != RPCROUTER_PID_LOCAL) {
959 if (server->pdev_name[0] == 0) {
960 sprintf(server->pdev_name, "rs%.8x",
961 server->prog);
962 spin_unlock_irqrestore(&server_list_lock,
963 flags);
964 msm_rpcrouter_create_server_pdev(server);
965 schedule_work(&work_create_pdevs);
966 return;
967 }
968 }
969 }
970 spin_unlock_irqrestore(&server_list_lock, flags);
971}
972
973static void *rr_malloc(unsigned sz)
974{
975 void *ptr = kmalloc(sz, GFP_KERNEL);
976 if (ptr)
977 return ptr;
978
979 printk(KERN_ERR "rpcrouter: kmalloc of %d failed, retrying...\n", sz);
980 do {
981 ptr = kmalloc(sz, GFP_KERNEL);
982 } while (!ptr);
983
984 return ptr;
985}
986
987static int rr_read(struct rpcrouter_xprt_info *xprt_info,
988 void *data, uint32_t len)
989{
990 int rc;
991 unsigned long flags;
992
993 while (!xprt_info->abort_data_read) {
994 spin_lock_irqsave(&xprt_info->lock, flags);
995 if (xprt_info->xprt->read_avail() >= len) {
996 rc = xprt_info->xprt->read(data, len);
997 spin_unlock_irqrestore(&xprt_info->lock, flags);
998 if (rc == len && !xprt_info->abort_data_read)
999 return 0;
1000 else
1001 return -EIO;
1002 }
1003 xprt_info->need_len = len;
1004 wake_unlock(&xprt_info->wakelock);
1005 spin_unlock_irqrestore(&xprt_info->lock, flags);
1006
1007 wait_event(xprt_info->read_wait,
1008 xprt_info->xprt->read_avail() >= len
1009 || xprt_info->abort_data_read);
1010 }
1011 return -EIO;
1012}
1013
1014#if defined(CONFIG_MSM_ONCRPCROUTER_DEBUG)
1015static char *type_to_str(int i)
1016{
1017 switch (i) {
1018 case RPCROUTER_CTRL_CMD_DATA:
1019 return "data ";
1020 case RPCROUTER_CTRL_CMD_HELLO:
1021 return "hello ";
1022 case RPCROUTER_CTRL_CMD_BYE:
1023 return "bye ";
1024 case RPCROUTER_CTRL_CMD_NEW_SERVER:
1025 return "new_srvr";
1026 case RPCROUTER_CTRL_CMD_REMOVE_SERVER:
1027 return "rmv_srvr";
1028 case RPCROUTER_CTRL_CMD_REMOVE_CLIENT:
1029 return "rmv_clnt";
1030 case RPCROUTER_CTRL_CMD_RESUME_TX:
1031 return "resum_tx";
1032 case RPCROUTER_CTRL_CMD_EXIT:
1033 return "cmd_exit";
1034 default:
1035 return "invalid";
1036 }
1037}
1038#endif
1039
1040static void do_read_data(struct work_struct *work)
1041{
1042 struct rr_header hdr;
1043 struct rr_packet *pkt;
1044 struct rr_fragment *frag;
1045 struct msm_rpc_endpoint *ept;
1046#if defined(CONFIG_MSM_ONCRPCROUTER_DEBUG)
1047 struct rpc_request_hdr *rq;
1048#endif
1049 uint32_t pm, mid;
1050 unsigned long flags;
1051
1052 struct rpcrouter_xprt_info *xprt_info =
1053 container_of(work,
1054 struct rpcrouter_xprt_info,
1055 read_data);
1056
1057 if (rr_read(xprt_info, &hdr, sizeof(hdr)))
1058 goto fail_io;
1059
1060 RR("- ver=%d type=%d src=%d:%08x crx=%d siz=%d dst=%d:%08x\n",
1061 hdr.version, hdr.type, hdr.src_pid, hdr.src_cid,
1062 hdr.confirm_rx, hdr.size, hdr.dst_pid, hdr.dst_cid);
1063 RAW_HDR("[r rr_h] "
1064 "ver=%i,type=%s,src_pid=%08x,src_cid=%08x,"
1065 "confirm_rx=%i,size=%3i,dst_pid=%08x,dst_cid=%08x\n",
1066 hdr.version, type_to_str(hdr.type), hdr.src_pid, hdr.src_cid,
1067 hdr.confirm_rx, hdr.size, hdr.dst_pid, hdr.dst_cid);
1068
1069 if (hdr.version != RPCROUTER_VERSION) {
1070 DIAG("version %d != %d\n", hdr.version, RPCROUTER_VERSION);
1071 goto fail_data;
1072 }
1073 if (hdr.size > RPCROUTER_MSGSIZE_MAX) {
1074 DIAG("msg size %d > max %d\n", hdr.size, RPCROUTER_MSGSIZE_MAX);
1075 goto fail_data;
1076 }
1077
1078 if (hdr.dst_cid == RPCROUTER_ROUTER_ADDRESS) {
1079 if (xprt_info->remote_pid == -1) {
1080 xprt_info->remote_pid = hdr.src_pid;
1081
1082 /* do restart notification */
1083 modem_reset_startup(xprt_info);
1084 }
1085
1086 if (rr_read(xprt_info, xprt_info->r2r_buf, hdr.size))
1087 goto fail_io;
1088 process_control_msg(xprt_info,
1089 (void *) xprt_info->r2r_buf, hdr.size);
1090 goto done;
1091 }
1092
1093 if (hdr.size < sizeof(pm)) {
1094 DIAG("runt packet (no pacmark)\n");
1095 goto fail_data;
1096 }
1097 if (rr_read(xprt_info, &pm, sizeof(pm)))
1098 goto fail_io;
1099
1100 hdr.size -= sizeof(pm);
1101
1102 frag = rr_malloc(sizeof(*frag));
1103 frag->next = NULL;
1104 frag->length = hdr.size;
1105 if (rr_read(xprt_info, frag->data, hdr.size)) {
1106 kfree(frag);
1107 goto fail_io;
1108 }
1109
1110#if defined(CONFIG_MSM_ONCRPCROUTER_DEBUG)
1111 if ((smd_rpcrouter_debug_mask & RAW_PMR) &&
1112 ((pm >> 30 & 0x1) || (pm >> 31 & 0x1))) {
1113 uint32_t xid = 0;
1114 if (pm >> 30 & 0x1) {
1115 rq = (struct rpc_request_hdr *) frag->data;
1116 xid = ntohl(rq->xid);
1117 }
1118 if ((pm >> 31 & 0x1) || (pm >> 30 & 0x1))
1119 RAW_PMR_NOMASK("xid:0x%03x first=%i,last=%i,mid=%3i,"
1120 "len=%3i,dst_cid=%08x\n",
1121 xid,
1122 pm >> 30 & 0x1,
1123 pm >> 31 & 0x1,
1124 pm >> 16 & 0xFF,
1125 pm & 0xFFFF, hdr.dst_cid);
1126 }
1127
1128 if (smd_rpcrouter_debug_mask & SMEM_LOG) {
1129 rq = (struct rpc_request_hdr *) frag->data;
1130 if (rq->xid == 0)
1131 smem_log_event(SMEM_LOG_PROC_ID_APPS |
1132 RPC_ROUTER_LOG_EVENT_MID_READ,
1133 PACMARK_MID(pm),
1134 hdr.dst_cid,
1135 hdr.src_cid);
1136 else
1137 smem_log_event(SMEM_LOG_PROC_ID_APPS |
1138 RPC_ROUTER_LOG_EVENT_MSG_READ,
1139 ntohl(rq->xid),
1140 hdr.dst_cid,
1141 hdr.src_cid);
1142 }
1143#endif
1144
Karthikeyan Ramasubramanian768567a2011-08-11 11:53:21 -06001145 spin_lock_irqsave(&local_endpoints_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001146 ept = rpcrouter_lookup_local_endpoint(hdr.dst_cid);
1147 if (!ept) {
Karthikeyan Ramasubramanian768567a2011-08-11 11:53:21 -06001148 spin_unlock_irqrestore(&local_endpoints_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001149 DIAG("no local ept for cid %08x\n", hdr.dst_cid);
1150 kfree(frag);
1151 goto done;
1152 }
1153
1154 /* See if there is already a partial packet that matches our mid
1155 * and if so, append this fragment to that packet.
1156 */
1157 mid = PACMARK_MID(pm);
Karthikeyan Ramasubramanian768567a2011-08-11 11:53:21 -06001158 spin_lock(&ept->incomplete_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001159 list_for_each_entry(pkt, &ept->incomplete, list) {
1160 if (pkt->mid == mid) {
1161 pkt->last->next = frag;
1162 pkt->last = frag;
1163 pkt->length += frag->length;
1164 if (PACMARK_LAST(pm)) {
1165 list_del(&pkt->list);
Karthikeyan Ramasubramanian768567a2011-08-11 11:53:21 -06001166 spin_unlock(&ept->incomplete_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001167 goto packet_complete;
1168 }
Karthikeyan Ramasubramanian768567a2011-08-11 11:53:21 -06001169 spin_unlock(&ept->incomplete_lock);
1170 spin_unlock_irqrestore(&local_endpoints_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001171 goto done;
1172 }
1173 }
Karthikeyan Ramasubramanian9dea7212011-09-07 18:37:16 -06001174 spin_unlock(&ept->incomplete_lock);
1175 spin_unlock_irqrestore(&local_endpoints_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001176 /* This mid is new -- create a packet for it, and put it on
1177 * the incomplete list if this fragment is not a last fragment,
1178 * otherwise put it on the read queue.
1179 */
1180 pkt = rr_malloc(sizeof(struct rr_packet));
1181 pkt->first = frag;
1182 pkt->last = frag;
1183 memcpy(&pkt->hdr, &hdr, sizeof(hdr));
1184 pkt->mid = mid;
1185 pkt->length = frag->length;
Karthikeyan Ramasubramanian9dea7212011-09-07 18:37:16 -06001186
1187 spin_lock_irqsave(&local_endpoints_lock, flags);
1188 ept = rpcrouter_lookup_local_endpoint(hdr.dst_cid);
1189 if (!ept) {
1190 spin_unlock_irqrestore(&local_endpoints_lock, flags);
1191 DIAG("no local ept for cid %08x\n", hdr.dst_cid);
1192 kfree(frag);
1193 kfree(pkt);
1194 goto done;
1195 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001196 if (!PACMARK_LAST(pm)) {
Karthikeyan Ramasubramanian9dea7212011-09-07 18:37:16 -06001197 spin_lock(&ept->incomplete_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001198 list_add_tail(&pkt->list, &ept->incomplete);
Karthikeyan Ramasubramanian768567a2011-08-11 11:53:21 -06001199 spin_unlock(&ept->incomplete_lock);
1200 spin_unlock_irqrestore(&local_endpoints_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001201 goto done;
1202 }
1203
1204packet_complete:
Karthikeyan Ramasubramanian768567a2011-08-11 11:53:21 -06001205 spin_lock(&ept->read_q_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001206 D("%s: take read lock on ept %p\n", __func__, ept);
1207 wake_lock(&ept->read_q_wake_lock);
1208 list_add_tail(&pkt->list, &ept->read_q);
1209 wake_up(&ept->wait_q);
Karthikeyan Ramasubramanian768567a2011-08-11 11:53:21 -06001210 spin_unlock(&ept->read_q_lock);
1211 spin_unlock_irqrestore(&local_endpoints_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001212done:
1213
1214 if (hdr.confirm_rx) {
1215 union rr_control_msg msg;
1216
1217 msg.cmd = RPCROUTER_CTRL_CMD_RESUME_TX;
1218 msg.cli.pid = hdr.dst_pid;
1219 msg.cli.cid = hdr.dst_cid;
1220
1221 RR("x RESUME_TX id=%d:%08x\n", msg.cli.pid, msg.cli.cid);
1222 rpcrouter_send_control_msg(xprt_info, &msg);
1223
1224#if defined(CONFIG_MSM_ONCRPCROUTER_DEBUG)
1225 if (smd_rpcrouter_debug_mask & SMEM_LOG)
1226 smem_log_event(SMEM_LOG_PROC_ID_APPS |
1227 RPC_ROUTER_LOG_EVENT_MSG_CFM_SNT,
1228 RPCROUTER_PID_LOCAL,
1229 hdr.dst_cid,
1230 hdr.src_cid);
1231#endif
1232
1233 }
1234
1235 /* don't requeue if we should be shutting down */
1236 if (!xprt_info->abort_data_read) {
1237 queue_work(xprt_info->workqueue, &xprt_info->read_data);
1238 return;
1239 }
1240
1241 D("rpc_router terminating for '%s'\n",
1242 xprt_info->xprt->name);
1243
1244fail_io:
1245fail_data:
1246 D(KERN_ERR "rpc_router has died for '%s'\n",
1247 xprt_info->xprt->name);
1248}
1249
1250void msm_rpc_setup_req(struct rpc_request_hdr *hdr, uint32_t prog,
1251 uint32_t vers, uint32_t proc)
1252{
1253 memset(hdr, 0, sizeof(struct rpc_request_hdr));
1254 hdr->xid = cpu_to_be32(atomic_add_return(1, &next_xid));
1255 hdr->rpc_vers = cpu_to_be32(2);
1256 hdr->prog = cpu_to_be32(prog);
1257 hdr->vers = cpu_to_be32(vers);
1258 hdr->procedure = cpu_to_be32(proc);
1259}
1260EXPORT_SYMBOL(msm_rpc_setup_req);
1261
1262struct msm_rpc_endpoint *msm_rpc_open(void)
1263{
1264 struct msm_rpc_endpoint *ept;
1265
1266 ept = msm_rpcrouter_create_local_endpoint(MKDEV(0, 0));
1267 if (ept == NULL)
1268 return ERR_PTR(-ENOMEM);
1269
1270 return ept;
1271}
1272
1273void msm_rpc_read_wakeup(struct msm_rpc_endpoint *ept)
1274{
1275 ept->forced_wakeup = 1;
1276 wake_up(&ept->wait_q);
1277}
1278
1279int msm_rpc_close(struct msm_rpc_endpoint *ept)
1280{
1281 if (!ept)
1282 return -EINVAL;
1283 return msm_rpcrouter_destroy_local_endpoint(ept);
1284}
1285EXPORT_SYMBOL(msm_rpc_close);
1286
1287static int msm_rpc_write_pkt(
1288 struct rr_header *hdr,
1289 struct msm_rpc_endpoint *ept,
1290 struct rr_remote_endpoint *r_ept,
1291 void *buffer,
1292 int count,
1293 int first,
1294 int last,
1295 uint32_t mid
1296 )
1297{
1298#if defined(CONFIG_MSM_ONCRPCROUTER_DEBUG)
1299 struct rpc_request_hdr *rq = buffer;
1300 uint32_t event_id;
1301#endif
1302 uint32_t pacmark;
1303 unsigned long flags = 0;
1304 int rc;
1305 struct rpcrouter_xprt_info *xprt_info;
1306 int needed;
1307
1308 DEFINE_WAIT(__wait);
1309
1310 /* Create routing header */
1311 hdr->type = RPCROUTER_CTRL_CMD_DATA;
1312 hdr->version = RPCROUTER_VERSION;
1313 hdr->src_pid = ept->pid;
1314 hdr->src_cid = ept->cid;
1315 hdr->confirm_rx = 0;
1316 hdr->size = count + sizeof(uint32_t);
1317
1318 rc = wait_for_restart_and_notify(ept);
1319 if (rc)
1320 return rc;
1321
1322 if (r_ept) {
1323 for (;;) {
1324 prepare_to_wait(&r_ept->quota_wait, &__wait,
1325 TASK_INTERRUPTIBLE);
1326 spin_lock_irqsave(&r_ept->quota_lock, flags);
1327 if ((r_ept->tx_quota_cntr <
1328 RPCROUTER_DEFAULT_RX_QUOTA) ||
1329 (r_ept->quota_restart_state != RESTART_NORMAL))
1330 break;
1331 if (signal_pending(current) &&
1332 (!(ept->flags & MSM_RPC_UNINTERRUPTIBLE)))
1333 break;
1334 spin_unlock_irqrestore(&r_ept->quota_lock, flags);
1335 schedule();
1336 }
1337 finish_wait(&r_ept->quota_wait, &__wait);
1338
1339 if (r_ept->quota_restart_state != RESTART_NORMAL) {
1340 spin_lock(&ept->restart_lock);
1341 ept->restart_state &= ~RESTART_PEND_NTFY;
1342 spin_unlock(&ept->restart_lock);
1343 spin_unlock_irqrestore(&r_ept->quota_lock, flags);
1344 return -ENETRESET;
1345 }
1346
1347 if (signal_pending(current) &&
1348 (!(ept->flags & MSM_RPC_UNINTERRUPTIBLE))) {
1349 spin_unlock_irqrestore(&r_ept->quota_lock, flags);
1350 return -ERESTARTSYS;
1351 }
1352 r_ept->tx_quota_cntr++;
1353 if (r_ept->tx_quota_cntr == RPCROUTER_DEFAULT_RX_QUOTA) {
1354 hdr->confirm_rx = 1;
1355
1356#if defined(CONFIG_MSM_ONCRPCROUTER_DEBUG)
1357 if (smd_rpcrouter_debug_mask & SMEM_LOG) {
1358 event_id = (rq->xid == 0) ?
1359 RPC_ROUTER_LOG_EVENT_MID_CFM_REQ :
1360 RPC_ROUTER_LOG_EVENT_MSG_CFM_REQ;
1361
1362 smem_log_event(SMEM_LOG_PROC_ID_APPS | event_id,
1363 hdr->dst_pid,
1364 hdr->dst_cid,
1365 hdr->src_cid);
1366 }
1367#endif
1368
1369 }
1370 }
1371 pacmark = PACMARK(count, mid, first, last);
1372
1373 if (r_ept)
1374 spin_unlock_irqrestore(&r_ept->quota_lock, flags);
1375
1376 mutex_lock(&xprt_info_list_lock);
1377 xprt_info = rpcrouter_get_xprt_info(hdr->dst_pid);
1378 if (!xprt_info) {
1379 mutex_unlock(&xprt_info_list_lock);
1380 return -ENETRESET;
1381 }
1382 spin_lock_irqsave(&xprt_info->lock, flags);
1383 mutex_unlock(&xprt_info_list_lock);
1384 spin_lock(&ept->restart_lock);
1385 if (ept->restart_state != RESTART_NORMAL) {
1386 ept->restart_state &= ~RESTART_PEND_NTFY;
1387 spin_unlock(&ept->restart_lock);
1388 spin_unlock_irqrestore(&xprt_info->lock, flags);
1389 return -ENETRESET;
1390 }
1391
1392 needed = sizeof(*hdr) + hdr->size;
1393 while ((ept->restart_state == RESTART_NORMAL) &&
1394 (xprt_info->xprt->write_avail() < needed)) {
1395 spin_unlock(&ept->restart_lock);
1396 spin_unlock_irqrestore(&xprt_info->lock, flags);
1397 msleep(250);
1398
1399 /* refresh xprt pointer to ensure that it hasn't
1400 * been deleted since our last retrieval */
1401 mutex_lock(&xprt_info_list_lock);
1402 xprt_info = rpcrouter_get_xprt_info(hdr->dst_pid);
1403 if (!xprt_info) {
1404 mutex_unlock(&xprt_info_list_lock);
1405 return -ENETRESET;
1406 }
1407 spin_lock_irqsave(&xprt_info->lock, flags);
1408 mutex_unlock(&xprt_info_list_lock);
1409 spin_lock(&ept->restart_lock);
1410 }
1411 if (ept->restart_state != RESTART_NORMAL) {
1412 ept->restart_state &= ~RESTART_PEND_NTFY;
1413 spin_unlock(&ept->restart_lock);
1414 spin_unlock_irqrestore(&xprt_info->lock, flags);
1415 return -ENETRESET;
1416 }
1417
1418 /* TODO: deal with full fifo */
1419 xprt_info->xprt->write(hdr, sizeof(*hdr), HEADER);
1420 RAW_HDR("[w rr_h] "
1421 "ver=%i,type=%s,src_pid=%08x,src_cid=%08x,"
1422 "confirm_rx=%i,size=%3i,dst_pid=%08x,dst_cid=%08x\n",
1423 hdr->version, type_to_str(hdr->type),
1424 hdr->src_pid, hdr->src_cid,
1425 hdr->confirm_rx, hdr->size, hdr->dst_pid, hdr->dst_cid);
1426 xprt_info->xprt->write(&pacmark, sizeof(pacmark), PACKMARK);
1427
1428#if defined(CONFIG_MSM_ONCRPCROUTER_DEBUG)
1429 if ((smd_rpcrouter_debug_mask & RAW_PMW) &&
1430 ((pacmark >> 30 & 0x1) || (pacmark >> 31 & 0x1))) {
1431 uint32_t xid = 0;
1432 if (pacmark >> 30 & 0x1)
1433 xid = ntohl(rq->xid);
1434 if ((pacmark >> 31 & 0x1) || (pacmark >> 30 & 0x1))
1435 RAW_PMW_NOMASK("xid:0x%03x first=%i,last=%i,mid=%3i,"
1436 "len=%3i,src_cid=%x\n",
1437 xid,
1438 pacmark >> 30 & 0x1,
1439 pacmark >> 31 & 0x1,
1440 pacmark >> 16 & 0xFF,
1441 pacmark & 0xFFFF, hdr->src_cid);
1442 }
1443#endif
1444
1445 xprt_info->xprt->write(buffer, count, PAYLOAD);
1446 spin_unlock(&ept->restart_lock);
1447 spin_unlock_irqrestore(&xprt_info->lock, flags);
1448
1449#if defined(CONFIG_MSM_ONCRPCROUTER_DEBUG)
1450 if (smd_rpcrouter_debug_mask & SMEM_LOG) {
1451 if (rq->xid == 0)
1452 smem_log_event(SMEM_LOG_PROC_ID_APPS |
1453 RPC_ROUTER_LOG_EVENT_MID_WRITTEN,
1454 PACMARK_MID(pacmark),
1455 hdr->dst_cid,
1456 hdr->src_cid);
1457 else
1458 smem_log_event(SMEM_LOG_PROC_ID_APPS |
1459 RPC_ROUTER_LOG_EVENT_MSG_WRITTEN,
1460 ntohl(rq->xid),
1461 hdr->dst_cid,
1462 hdr->src_cid);
1463 }
1464#endif
1465
1466 return needed;
1467}
1468
1469static struct msm_rpc_reply *get_pend_reply(struct msm_rpc_endpoint *ept,
1470 uint32_t xid)
1471{
1472 unsigned long flags;
1473 struct msm_rpc_reply *reply;
1474 spin_lock_irqsave(&ept->reply_q_lock, flags);
1475 list_for_each_entry(reply, &ept->reply_pend_q, list) {
1476 if (reply->xid == xid) {
1477 list_del(&reply->list);
1478 spin_unlock_irqrestore(&ept->reply_q_lock, flags);
1479 return reply;
1480 }
1481 }
1482 spin_unlock_irqrestore(&ept->reply_q_lock, flags);
1483 return NULL;
1484}
1485
1486void get_requesting_client(struct msm_rpc_endpoint *ept, uint32_t xid,
1487 struct msm_rpc_client_info *clnt_info)
1488{
1489 unsigned long flags;
1490 struct msm_rpc_reply *reply;
1491
1492 if (!clnt_info)
1493 return;
1494
1495 spin_lock_irqsave(&ept->reply_q_lock, flags);
1496 list_for_each_entry(reply, &ept->reply_pend_q, list) {
1497 if (reply->xid == xid) {
1498 clnt_info->pid = reply->pid;
1499 clnt_info->cid = reply->cid;
1500 clnt_info->prog = reply->prog;
1501 clnt_info->vers = reply->vers;
1502 spin_unlock_irqrestore(&ept->reply_q_lock, flags);
1503 return;
1504 }
1505 }
1506 spin_unlock_irqrestore(&ept->reply_q_lock, flags);
1507 return;
1508}
1509
1510static void set_avail_reply(struct msm_rpc_endpoint *ept,
1511 struct msm_rpc_reply *reply)
1512{
1513 unsigned long flags;
1514 spin_lock_irqsave(&ept->reply_q_lock, flags);
1515 list_add_tail(&reply->list, &ept->reply_avail_q);
1516 spin_unlock_irqrestore(&ept->reply_q_lock, flags);
1517}
1518
1519static struct msm_rpc_reply *get_avail_reply(struct msm_rpc_endpoint *ept)
1520{
1521 struct msm_rpc_reply *reply;
1522 unsigned long flags;
1523 if (list_empty(&ept->reply_avail_q)) {
1524 if (ept->reply_cnt >= RPCROUTER_PEND_REPLIES_MAX) {
1525 printk(KERN_ERR
1526 "exceeding max replies of %d \n",
1527 RPCROUTER_PEND_REPLIES_MAX);
1528 return 0;
1529 }
1530 reply = kmalloc(sizeof(struct msm_rpc_reply), GFP_KERNEL);
1531 if (!reply)
1532 return 0;
1533 D("Adding reply 0x%08x \n", (unsigned int)reply);
1534 memset(reply, 0, sizeof(struct msm_rpc_reply));
1535 spin_lock_irqsave(&ept->reply_q_lock, flags);
1536 ept->reply_cnt++;
1537 spin_unlock_irqrestore(&ept->reply_q_lock, flags);
1538 } else {
1539 spin_lock_irqsave(&ept->reply_q_lock, flags);
1540 reply = list_first_entry(&ept->reply_avail_q,
1541 struct msm_rpc_reply,
1542 list);
1543 list_del(&reply->list);
1544 spin_unlock_irqrestore(&ept->reply_q_lock, flags);
1545 }
1546 return reply;
1547}
1548
1549static void set_pend_reply(struct msm_rpc_endpoint *ept,
1550 struct msm_rpc_reply *reply)
1551{
1552 unsigned long flags;
1553 spin_lock_irqsave(&ept->reply_q_lock, flags);
1554 D("%s: take reply lock on ept %p\n", __func__, ept);
1555 wake_lock(&ept->reply_q_wake_lock);
1556 list_add_tail(&reply->list, &ept->reply_pend_q);
1557 spin_unlock_irqrestore(&ept->reply_q_lock, flags);
1558}
1559
1560int msm_rpc_write(struct msm_rpc_endpoint *ept, void *buffer, int count)
1561{
1562 struct rr_header hdr;
1563 struct rpc_request_hdr *rq = buffer;
1564 struct rr_remote_endpoint *r_ept;
1565 struct msm_rpc_reply *reply = NULL;
1566 int max_tx;
1567 int tx_cnt;
1568 char *tx_buf;
1569 int rc;
1570 int first_pkt = 1;
1571 uint32_t mid;
1572 unsigned long flags;
1573
1574 /* snoop the RPC packet and enforce permissions */
1575
1576 /* has to have at least the xid and type fields */
1577 if (count < (sizeof(uint32_t) * 2)) {
1578 printk(KERN_ERR "rr_write: rejecting runt packet\n");
1579 return -EINVAL;
1580 }
1581
1582 if (rq->type == 0) {
1583 /* RPC CALL */
1584 if (count < (sizeof(uint32_t) * 6)) {
1585 printk(KERN_ERR
1586 "rr_write: rejecting runt call packet\n");
1587 return -EINVAL;
1588 }
1589 if (ept->dst_pid == 0xffffffff) {
1590 printk(KERN_ERR "rr_write: not connected\n");
1591 return -ENOTCONN;
1592 }
1593 if ((ept->dst_prog != rq->prog) ||
1594 ((be32_to_cpu(ept->dst_vers) & 0x0fff0000) !=
1595 (be32_to_cpu(rq->vers) & 0x0fff0000))) {
1596 printk(KERN_ERR
1597 "rr_write: cannot write to %08x:%08x "
1598 "(bound to %08x:%08x)\n",
1599 be32_to_cpu(rq->prog), be32_to_cpu(rq->vers),
1600 be32_to_cpu(ept->dst_prog),
1601 be32_to_cpu(ept->dst_vers));
1602 return -EINVAL;
1603 }
1604 hdr.dst_pid = ept->dst_pid;
1605 hdr.dst_cid = ept->dst_cid;
1606 IO("CALL to %08x:%d @ %d:%08x (%d bytes)\n",
1607 be32_to_cpu(rq->prog), be32_to_cpu(rq->vers),
1608 ept->dst_pid, ept->dst_cid, count);
1609 } else {
1610 /* RPC REPLY */
1611 reply = get_pend_reply(ept, rq->xid);
1612 if (!reply) {
1613 printk(KERN_ERR
1614 "rr_write: rejecting, reply not found \n");
1615 return -EINVAL;
1616 }
1617 hdr.dst_pid = reply->pid;
1618 hdr.dst_cid = reply->cid;
1619 IO("REPLY to xid=%d @ %d:%08x (%d bytes)\n",
1620 be32_to_cpu(rq->xid), hdr.dst_pid, hdr.dst_cid, count);
1621 }
1622
1623 r_ept = rpcrouter_lookup_remote_endpoint(hdr.dst_pid, hdr.dst_cid);
1624
1625 if ((!r_ept) && (hdr.dst_pid != RPCROUTER_PID_LOCAL)) {
1626 printk(KERN_ERR
1627 "msm_rpc_write(): No route to ept "
1628 "[PID %x CID %x]\n", hdr.dst_pid, hdr.dst_cid);
1629 count = -EHOSTUNREACH;
1630 goto write_release_lock;
1631 }
1632
1633 tx_cnt = count;
1634 tx_buf = buffer;
1635 mid = atomic_add_return(1, &pm_mid) & 0xFF;
1636 /* The modem's router can only take 500 bytes of data. The
1637 first 8 bytes it uses on the modem side for addressing,
1638 the next 4 bytes are for the pacmark header. */
1639 max_tx = RPCROUTER_MSGSIZE_MAX - 8 - sizeof(uint32_t);
1640 IO("Writing %d bytes, max pkt size is %d\n",
1641 tx_cnt, max_tx);
1642 while (tx_cnt > 0) {
1643 if (tx_cnt > max_tx) {
1644 rc = msm_rpc_write_pkt(&hdr, ept, r_ept,
1645 tx_buf, max_tx,
1646 first_pkt, 0, mid);
1647 if (rc < 0) {
1648 count = rc;
1649 goto write_release_lock;
1650 }
1651 IO("Wrote %d bytes First %d, Last 0 mid %d\n",
1652 rc, first_pkt, mid);
1653 tx_cnt -= max_tx;
1654 tx_buf += max_tx;
1655 } else {
1656 rc = msm_rpc_write_pkt(&hdr, ept, r_ept,
1657 tx_buf, tx_cnt,
1658 first_pkt, 1, mid);
1659 if (rc < 0) {
1660 count = rc;
1661 goto write_release_lock;
1662 }
1663 IO("Wrote %d bytes First %d Last 1 mid %d\n",
1664 rc, first_pkt, mid);
1665 break;
1666 }
1667 first_pkt = 0;
1668 }
1669
1670 write_release_lock:
1671 /* if reply, release wakelock after writing to the transport */
1672 if (rq->type != 0) {
1673 /* Upon failure, add reply tag to the pending list.
1674 ** Else add reply tag to the avail/free list. */
1675 if (count < 0)
1676 set_pend_reply(ept, reply);
1677 else
1678 set_avail_reply(ept, reply);
1679
1680 spin_lock_irqsave(&ept->reply_q_lock, flags);
1681 if (list_empty(&ept->reply_pend_q)) {
1682 D("%s: release reply lock on ept %p\n", __func__, ept);
1683 wake_unlock(&ept->reply_q_wake_lock);
1684 }
1685 spin_unlock_irqrestore(&ept->reply_q_lock, flags);
1686 }
1687
1688 return count;
1689}
1690EXPORT_SYMBOL(msm_rpc_write);
1691
1692/*
1693 * NOTE: It is the responsibility of the caller to kfree buffer
1694 */
1695int msm_rpc_read(struct msm_rpc_endpoint *ept, void **buffer,
1696 unsigned user_len, long timeout)
1697{
1698 struct rr_fragment *frag, *next;
1699 char *buf;
1700 int rc;
1701
1702 rc = __msm_rpc_read(ept, &frag, user_len, timeout);
1703 if (rc <= 0)
1704 return rc;
1705
1706 /* single-fragment messages conveniently can be
1707 * returned as-is (the buffer is at the front)
1708 */
1709 if (frag->next == 0) {
1710 *buffer = (void*) frag;
1711 return rc;
1712 }
1713
1714 /* multi-fragment messages, we have to do it the
1715 * hard way, which is rather disgusting right now
1716 */
1717 buf = rr_malloc(rc);
1718 *buffer = buf;
1719
1720 while (frag != NULL) {
1721 memcpy(buf, frag->data, frag->length);
1722 next = frag->next;
1723 buf += frag->length;
1724 kfree(frag);
1725 frag = next;
1726 }
1727
1728 return rc;
1729}
1730EXPORT_SYMBOL(msm_rpc_read);
1731
1732int msm_rpc_call(struct msm_rpc_endpoint *ept, uint32_t proc,
1733 void *_request, int request_size,
1734 long timeout)
1735{
1736 return msm_rpc_call_reply(ept, proc,
1737 _request, request_size,
1738 NULL, 0, timeout);
1739}
1740EXPORT_SYMBOL(msm_rpc_call);
1741
1742int msm_rpc_call_reply(struct msm_rpc_endpoint *ept, uint32_t proc,
1743 void *_request, int request_size,
1744 void *_reply, int reply_size,
1745 long timeout)
1746{
1747 struct rpc_request_hdr *req = _request;
1748 struct rpc_reply_hdr *reply;
1749 int rc;
1750
1751 if (request_size < sizeof(*req))
1752 return -ETOOSMALL;
1753
1754 if (ept->dst_pid == 0xffffffff)
1755 return -ENOTCONN;
1756
1757 memset(req, 0, sizeof(*req));
1758 req->xid = cpu_to_be32(atomic_add_return(1, &next_xid));
1759 req->rpc_vers = cpu_to_be32(2);
1760 req->prog = ept->dst_prog;
1761 req->vers = ept->dst_vers;
1762 req->procedure = cpu_to_be32(proc);
1763
1764 rc = msm_rpc_write(ept, req, request_size);
1765 if (rc < 0)
1766 return rc;
1767
1768 for (;;) {
1769 rc = msm_rpc_read(ept, (void*) &reply, -1, timeout);
1770 if (rc < 0)
1771 return rc;
1772 if (rc < (3 * sizeof(uint32_t))) {
1773 rc = -EIO;
1774 break;
1775 }
1776 /* we should not get CALL packets -- ignore them */
1777 if (reply->type == 0) {
1778 kfree(reply);
1779 continue;
1780 }
1781 /* If an earlier call timed out, we could get the (no
1782 * longer wanted) reply for it. Ignore replies that
1783 * we don't expect
1784 */
1785 if (reply->xid != req->xid) {
1786 kfree(reply);
1787 continue;
1788 }
1789 if (reply->reply_stat != 0) {
1790 rc = -EPERM;
1791 break;
1792 }
1793 if (reply->data.acc_hdr.accept_stat != 0) {
1794 rc = -EINVAL;
1795 break;
1796 }
1797 if (_reply == NULL) {
1798 rc = 0;
1799 break;
1800 }
1801 if (rc > reply_size) {
1802 rc = -ENOMEM;
1803 } else {
1804 memcpy(_reply, reply, rc);
1805 }
1806 break;
1807 }
1808 kfree(reply);
1809 return rc;
1810}
1811EXPORT_SYMBOL(msm_rpc_call_reply);
1812
1813
1814static inline int ept_packet_available(struct msm_rpc_endpoint *ept)
1815{
1816 unsigned long flags;
1817 int ret;
1818 spin_lock_irqsave(&ept->read_q_lock, flags);
1819 ret = !list_empty(&ept->read_q);
1820 spin_unlock_irqrestore(&ept->read_q_lock, flags);
1821 return ret;
1822}
1823
1824int __msm_rpc_read(struct msm_rpc_endpoint *ept,
1825 struct rr_fragment **frag_ret,
1826 unsigned len, long timeout)
1827{
1828 struct rr_packet *pkt;
1829 struct rpc_request_hdr *rq;
1830 struct msm_rpc_reply *reply;
1831 unsigned long flags;
1832 int rc;
1833
1834 rc = wait_for_restart_and_notify(ept);
1835 if (rc)
1836 return rc;
1837
1838 IO("READ on ept %p\n", ept);
1839 if (ept->flags & MSM_RPC_UNINTERRUPTIBLE) {
1840 if (timeout < 0) {
1841 wait_event(ept->wait_q, (ept_packet_available(ept) ||
1842 ept->forced_wakeup ||
1843 ept->restart_state));
1844 if (!msm_rpc_clear_netreset(ept))
1845 return -ENETRESET;
1846 } else {
1847 rc = wait_event_timeout(
1848 ept->wait_q,
1849 (ept_packet_available(ept) ||
1850 ept->forced_wakeup ||
1851 ept->restart_state),
1852 timeout);
1853 if (!msm_rpc_clear_netreset(ept))
1854 return -ENETRESET;
1855 if (rc == 0)
1856 return -ETIMEDOUT;
1857 }
1858 } else {
1859 if (timeout < 0) {
1860 rc = wait_event_interruptible(
1861 ept->wait_q, (ept_packet_available(ept) ||
1862 ept->forced_wakeup ||
1863 ept->restart_state));
1864 if (!msm_rpc_clear_netreset(ept))
1865 return -ENETRESET;
1866 if (rc < 0)
1867 return rc;
1868 } else {
1869 rc = wait_event_interruptible_timeout(
1870 ept->wait_q,
1871 (ept_packet_available(ept) ||
1872 ept->forced_wakeup ||
1873 ept->restart_state),
1874 timeout);
1875 if (!msm_rpc_clear_netreset(ept))
1876 return -ENETRESET;
1877 if (rc == 0)
1878 return -ETIMEDOUT;
1879 }
1880 }
1881
1882 if (ept->forced_wakeup) {
1883 ept->forced_wakeup = 0;
1884 return 0;
1885 }
1886
1887 spin_lock_irqsave(&ept->read_q_lock, flags);
1888 if (list_empty(&ept->read_q)) {
1889 spin_unlock_irqrestore(&ept->read_q_lock, flags);
1890 return -EAGAIN;
1891 }
1892 pkt = list_first_entry(&ept->read_q, struct rr_packet, list);
1893 if (pkt->length > len) {
1894 spin_unlock_irqrestore(&ept->read_q_lock, flags);
1895 return -ETOOSMALL;
1896 }
1897 list_del(&pkt->list);
1898 spin_unlock_irqrestore(&ept->read_q_lock, flags);
1899
1900 rc = pkt->length;
1901
1902 *frag_ret = pkt->first;
1903 rq = (void*) pkt->first->data;
1904 if ((rc >= (sizeof(uint32_t) * 3)) && (rq->type == 0)) {
1905 /* RPC CALL */
1906 reply = get_avail_reply(ept);
1907 if (!reply) {
1908 rc = -ENOMEM;
1909 goto read_release_lock;
1910 }
1911 reply->cid = pkt->hdr.src_cid;
1912 reply->pid = pkt->hdr.src_pid;
1913 reply->xid = rq->xid;
1914 reply->prog = rq->prog;
1915 reply->vers = rq->vers;
1916 set_pend_reply(ept, reply);
1917 }
1918
1919 kfree(pkt);
1920
1921 IO("READ on ept %p (%d bytes)\n", ept, rc);
1922
1923 read_release_lock:
1924
1925 /* release read wakelock after taking reply wakelock */
1926 spin_lock_irqsave(&ept->read_q_lock, flags);
1927 if (list_empty(&ept->read_q)) {
1928 D("%s: release read lock on ept %p\n", __func__, ept);
1929 wake_unlock(&ept->read_q_wake_lock);
1930 }
1931 spin_unlock_irqrestore(&ept->read_q_lock, flags);
1932
1933 return rc;
1934}
1935
1936int msm_rpc_is_compatible_version(uint32_t server_version,
1937 uint32_t client_version)
1938{
1939
1940 if ((server_version & RPC_VERSION_MODE_MASK) !=
1941 (client_version & RPC_VERSION_MODE_MASK))
1942 return 0;
1943
1944 if (server_version & RPC_VERSION_MODE_MASK)
1945 return server_version == client_version;
1946
1947 return ((server_version & RPC_VERSION_MAJOR_MASK) ==
1948 (client_version & RPC_VERSION_MAJOR_MASK)) &&
1949 ((server_version & RPC_VERSION_MINOR_MASK) >=
1950 (client_version & RPC_VERSION_MINOR_MASK));
1951}
1952EXPORT_SYMBOL(msm_rpc_is_compatible_version);
1953
1954static struct rr_server *msm_rpc_get_server(uint32_t prog, uint32_t vers,
1955 uint32_t accept_compatible,
1956 uint32_t *found_prog)
1957{
1958 struct rr_server *server;
1959 unsigned long flags;
1960
1961 if (found_prog == NULL)
1962 return NULL;
1963
1964 *found_prog = 0;
1965 spin_lock_irqsave(&server_list_lock, flags);
1966 list_for_each_entry(server, &server_list, list) {
1967 if (server->prog == prog) {
1968 *found_prog = 1;
1969 spin_unlock_irqrestore(&server_list_lock, flags);
1970 if (accept_compatible) {
1971 if (msm_rpc_is_compatible_version(server->vers,
1972 vers)) {
1973 return server;
1974 } else {
1975 return NULL;
1976 }
1977 } else if (server->vers == vers) {
1978 return server;
1979 } else
1980 return NULL;
1981 }
1982 }
1983 spin_unlock_irqrestore(&server_list_lock, flags);
1984 return NULL;
1985}
1986
1987static struct msm_rpc_endpoint *__msm_rpc_connect(uint32_t prog, uint32_t vers,
1988 uint32_t accept_compatible,
1989 unsigned flags)
1990{
1991 struct msm_rpc_endpoint *ept;
1992 struct rr_server *server;
1993 uint32_t found_prog;
1994 int rc = 0;
1995
1996 DEFINE_WAIT(__wait);
1997
1998 for (;;) {
1999 prepare_to_wait(&newserver_wait, &__wait,
2000 TASK_INTERRUPTIBLE);
2001
2002 server = msm_rpc_get_server(prog, vers, accept_compatible,
2003 &found_prog);
2004 if (server)
2005 break;
2006
2007 if (found_prog) {
2008 pr_info("%s: server not found %x:%x\n",
2009 __func__, prog, vers);
2010 rc = -EHOSTUNREACH;
2011 break;
2012 }
2013
2014 if (msm_rpc_connect_timeout_ms == 0) {
2015 rc = -EHOSTUNREACH;
2016 break;
2017 }
2018
2019 if (signal_pending(current)) {
2020 rc = -ERESTARTSYS;
2021 break;
2022 }
2023
2024 rc = schedule_timeout(
2025 msecs_to_jiffies(msm_rpc_connect_timeout_ms));
2026 if (!rc) {
2027 rc = -ETIMEDOUT;
2028 break;
2029 }
2030 }
2031 finish_wait(&newserver_wait, &__wait);
2032
2033 if (!server)
2034 return ERR_PTR(rc);
2035
2036 if (accept_compatible && (server->vers != vers)) {
2037 D("RPC Using new version 0x%08x(0x%08x) prog 0x%08x",
2038 vers, server->vers, prog);
2039 D(" ... Continuing\n");
2040 }
2041
2042 ept = msm_rpc_open();
2043 if (IS_ERR(ept))
2044 return ept;
2045
2046 ept->flags = flags;
2047 ept->dst_pid = server->pid;
2048 ept->dst_cid = server->cid;
2049 ept->dst_prog = cpu_to_be32(prog);
2050 ept->dst_vers = cpu_to_be32(server->vers);
2051
2052 return ept;
2053}
2054
2055struct msm_rpc_endpoint *msm_rpc_connect_compatible(uint32_t prog,
2056 uint32_t vers, unsigned flags)
2057{
2058 return __msm_rpc_connect(prog, vers, 1, flags);
2059}
2060EXPORT_SYMBOL(msm_rpc_connect_compatible);
2061
2062struct msm_rpc_endpoint *msm_rpc_connect(uint32_t prog,
2063 uint32_t vers, unsigned flags)
2064{
2065 return __msm_rpc_connect(prog, vers, 0, flags);
2066}
2067EXPORT_SYMBOL(msm_rpc_connect);
2068
2069/* TODO: permission check? */
2070int msm_rpc_register_server(struct msm_rpc_endpoint *ept,
2071 uint32_t prog, uint32_t vers)
2072{
2073 int rc;
2074 union rr_control_msg msg;
2075 struct rr_server *server;
2076 struct rpcrouter_xprt_info *xprt_info;
2077
2078 server = rpcrouter_create_server(ept->pid, ept->cid,
2079 prog, vers);
2080 if (!server)
2081 return -ENODEV;
2082
2083 msg.srv.cmd = RPCROUTER_CTRL_CMD_NEW_SERVER;
2084 msg.srv.pid = ept->pid;
2085 msg.srv.cid = ept->cid;
2086 msg.srv.prog = prog;
2087 msg.srv.vers = vers;
2088
2089 RR("x NEW_SERVER id=%d:%08x prog=%08x:%08x\n",
2090 ept->pid, ept->cid, prog, vers);
2091
2092 mutex_lock(&xprt_info_list_lock);
2093 list_for_each_entry(xprt_info, &xprt_info_list, list) {
2094 rc = rpcrouter_send_control_msg(xprt_info, &msg);
2095 if (rc < 0) {
2096 mutex_unlock(&xprt_info_list_lock);
2097 return rc;
2098 }
2099 }
2100 mutex_unlock(&xprt_info_list_lock);
2101 return 0;
2102}
2103
2104int msm_rpc_clear_netreset(struct msm_rpc_endpoint *ept)
2105{
2106 unsigned long flags;
2107 int rc = 1;
2108 spin_lock_irqsave(&ept->restart_lock, flags);
2109 if (ept->restart_state != RESTART_NORMAL) {
2110 ept->restart_state &= ~RESTART_PEND_NTFY;
2111 rc = 0;
2112 }
2113 spin_unlock_irqrestore(&ept->restart_lock, flags);
2114 return rc;
2115}
2116
2117/* TODO: permission check -- disallow unreg of somebody else's server */
2118int msm_rpc_unregister_server(struct msm_rpc_endpoint *ept,
2119 uint32_t prog, uint32_t vers)
2120{
2121 struct rr_server *server;
2122 server = rpcrouter_lookup_server(prog, vers);
2123
2124 if (!server)
2125 return -ENOENT;
2126 rpcrouter_destroy_server(server);
2127 return 0;
2128}
2129
2130int msm_rpc_get_curr_pkt_size(struct msm_rpc_endpoint *ept)
2131{
2132 unsigned long flags;
2133 struct rr_packet *pkt;
2134 int rc = 0;
2135
2136 if (!ept)
2137 return -EINVAL;
2138
2139 if (!msm_rpc_clear_netreset(ept))
2140 return -ENETRESET;
2141
2142 spin_lock_irqsave(&ept->read_q_lock, flags);
2143 if (!list_empty(&ept->read_q)) {
2144 pkt = list_first_entry(&ept->read_q, struct rr_packet, list);
2145 rc = pkt->length;
2146 }
2147 spin_unlock_irqrestore(&ept->read_q_lock, flags);
2148
2149 return rc;
2150}
2151
Arun Kumar Neelakantam0975c602012-10-16 23:13:09 +05302152static int msm_rpcrouter_close(void)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002153{
Karthikeyan Ramasubramanianc74ab972011-10-17 12:17:03 -06002154 struct rpcrouter_xprt_info *xprt_info;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002155 union rr_control_msg ctl;
2156
2157 ctl.cmd = RPCROUTER_CTRL_CMD_BYE;
2158 mutex_lock(&xprt_info_list_lock);
Karthikeyan Ramasubramanianc74ab972011-10-17 12:17:03 -06002159 while (!list_empty(&xprt_info_list)) {
2160 xprt_info = list_first_entry(&xprt_info_list,
2161 struct rpcrouter_xprt_info, list);
Arun Kumar Neelakantame201a312012-10-06 12:51:13 +05302162 modem_reset_cleanup(xprt_info);
Karthikeyan Ramasubramanianc74ab972011-10-17 12:17:03 -06002163 xprt_info->abort_data_read = 1;
2164 wake_up(&xprt_info->read_wait);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002165 rpcrouter_send_control_msg(xprt_info, &ctl);
2166 xprt_info->xprt->close();
2167 list_del(&xprt_info->list);
Karthikeyan Ramasubramanianc74ab972011-10-17 12:17:03 -06002168 mutex_unlock(&xprt_info_list_lock);
2169
2170 flush_workqueue(xprt_info->workqueue);
2171 destroy_workqueue(xprt_info->workqueue);
2172 wake_lock_destroy(&xprt_info->wakelock);
Arun Kumar Neelakantame201a312012-10-06 12:51:13 +05302173 /*free memory*/
2174 xprt_info->xprt->priv = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002175 kfree(xprt_info);
Karthikeyan Ramasubramanianc74ab972011-10-17 12:17:03 -06002176
2177 mutex_lock(&xprt_info_list_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002178 }
2179 mutex_unlock(&xprt_info_list_lock);
2180 return 0;
2181}
2182
2183#if defined(CONFIG_DEBUG_FS)
2184static int dump_servers(char *buf, int max)
2185{
2186 int i = 0;
2187 unsigned long flags;
2188 struct rr_server *svr;
2189 const char *sym;
2190
2191 spin_lock_irqsave(&server_list_lock, flags);
2192 list_for_each_entry(svr, &server_list, list) {
2193 i += scnprintf(buf + i, max - i, "pdev_name: %s\n",
2194 svr->pdev_name);
2195 i += scnprintf(buf + i, max - i, "pid: 0x%08x\n", svr->pid);
2196 i += scnprintf(buf + i, max - i, "cid: 0x%08x\n", svr->cid);
2197 i += scnprintf(buf + i, max - i, "prog: 0x%08x", svr->prog);
2198 sym = smd_rpc_get_sym(svr->prog);
2199 if (sym)
2200 i += scnprintf(buf + i, max - i, " (%s)\n", sym);
2201 else
2202 i += scnprintf(buf + i, max - i, "\n");
2203 i += scnprintf(buf + i, max - i, "vers: 0x%08x\n", svr->vers);
2204 i += scnprintf(buf + i, max - i, "\n");
2205 }
2206 spin_unlock_irqrestore(&server_list_lock, flags);
2207
2208 return i;
2209}
2210
2211static int dump_remote_endpoints(char *buf, int max)
2212{
2213 int i = 0;
2214 unsigned long flags;
2215 struct rr_remote_endpoint *ept;
2216
2217 spin_lock_irqsave(&remote_endpoints_lock, flags);
2218 list_for_each_entry(ept, &remote_endpoints, list) {
2219 i += scnprintf(buf + i, max - i, "pid: 0x%08x\n", ept->pid);
2220 i += scnprintf(buf + i, max - i, "cid: 0x%08x\n", ept->cid);
2221 i += scnprintf(buf + i, max - i, "tx_quota_cntr: %i\n",
2222 ept->tx_quota_cntr);
2223 i += scnprintf(buf + i, max - i, "quota_restart_state: %i\n",
2224 ept->quota_restart_state);
2225 i += scnprintf(buf + i, max - i, "\n");
2226 }
2227 spin_unlock_irqrestore(&remote_endpoints_lock, flags);
2228
2229 return i;
2230}
2231
2232static int dump_msm_rpc_endpoint(char *buf, int max)
2233{
2234 int i = 0;
2235 unsigned long flags;
2236 struct msm_rpc_reply *reply;
2237 struct msm_rpc_endpoint *ept;
2238 struct rr_packet *pkt;
2239 const char *sym;
2240
2241 spin_lock_irqsave(&local_endpoints_lock, flags);
2242 list_for_each_entry(ept, &local_endpoints, list) {
2243 i += scnprintf(buf + i, max - i, "pid: 0x%08x\n", ept->pid);
2244 i += scnprintf(buf + i, max - i, "cid: 0x%08x\n", ept->cid);
2245 i += scnprintf(buf + i, max - i, "dst_pid: 0x%08x\n",
2246 ept->dst_pid);
2247 i += scnprintf(buf + i, max - i, "dst_cid: 0x%08x\n",
2248 ept->dst_cid);
2249 i += scnprintf(buf + i, max - i, "dst_prog: 0x%08x",
2250 be32_to_cpu(ept->dst_prog));
2251 sym = smd_rpc_get_sym(be32_to_cpu(ept->dst_prog));
2252 if (sym)
2253 i += scnprintf(buf + i, max - i, " (%s)\n", sym);
2254 else
2255 i += scnprintf(buf + i, max - i, "\n");
2256 i += scnprintf(buf + i, max - i, "dst_vers: 0x%08x\n",
2257 be32_to_cpu(ept->dst_vers));
2258 i += scnprintf(buf + i, max - i, "reply_cnt: %i\n",
2259 ept->reply_cnt);
2260 i += scnprintf(buf + i, max - i, "restart_state: %i\n",
2261 ept->restart_state);
2262
2263 i += scnprintf(buf + i, max - i, "outstanding xids:\n");
2264 spin_lock(&ept->reply_q_lock);
2265 list_for_each_entry(reply, &ept->reply_pend_q, list)
2266 i += scnprintf(buf + i, max - i, " xid = %u\n",
2267 ntohl(reply->xid));
2268 spin_unlock(&ept->reply_q_lock);
2269
2270 i += scnprintf(buf + i, max - i, "complete unread packets:\n");
2271 spin_lock(&ept->read_q_lock);
2272 list_for_each_entry(pkt, &ept->read_q, list) {
2273 i += scnprintf(buf + i, max - i, " mid = %i\n",
2274 pkt->mid);
2275 i += scnprintf(buf + i, max - i, " length = %i\n",
2276 pkt->length);
2277 }
2278 spin_unlock(&ept->read_q_lock);
2279 i += scnprintf(buf + i, max - i, "\n");
2280 }
2281 spin_unlock_irqrestore(&local_endpoints_lock, flags);
2282
2283 return i;
2284}
2285
2286#define DEBUG_BUFMAX 4096
2287static char debug_buffer[DEBUG_BUFMAX];
2288
2289static ssize_t debug_read(struct file *file, char __user *buf,
2290 size_t count, loff_t *ppos)
2291{
2292 int (*fill)(char *buf, int max) = file->private_data;
2293 int bsize = fill(debug_buffer, DEBUG_BUFMAX);
2294 return simple_read_from_buffer(buf, count, ppos, debug_buffer, bsize);
2295}
2296
2297static int debug_open(struct inode *inode, struct file *file)
2298{
2299 file->private_data = inode->i_private;
2300 return 0;
2301}
2302
2303static const struct file_operations debug_ops = {
2304 .read = debug_read,
2305 .open = debug_open,
2306};
2307
2308static void debug_create(const char *name, mode_t mode,
2309 struct dentry *dent,
2310 int (*fill)(char *buf, int max))
2311{
2312 debugfs_create_file(name, mode, dent, fill, &debug_ops);
2313}
2314
2315static void debugfs_init(void)
2316{
2317 struct dentry *dent;
2318
2319 dent = debugfs_create_dir("smd_rpcrouter", 0);
2320 if (IS_ERR(dent))
2321 return;
2322
2323 debug_create("dump_msm_rpc_endpoints", 0444, dent,
2324 dump_msm_rpc_endpoint);
2325 debug_create("dump_remote_endpoints", 0444, dent,
2326 dump_remote_endpoints);
2327 debug_create("dump_servers", 0444, dent,
2328 dump_servers);
2329
2330}
2331
2332#else
2333static void debugfs_init(void) {}
2334#endif
2335
2336static int msm_rpcrouter_add_xprt(struct rpcrouter_xprt *xprt)
2337{
2338 struct rpcrouter_xprt_info *xprt_info;
2339
2340 D("Registering xprt %s to RPC Router\n", xprt->name);
2341
2342 xprt_info = kmalloc(sizeof(struct rpcrouter_xprt_info), GFP_KERNEL);
2343 if (!xprt_info)
2344 return -ENOMEM;
2345
2346 xprt_info->xprt = xprt;
2347 xprt_info->initialized = 0;
2348 xprt_info->remote_pid = -1;
2349 init_waitqueue_head(&xprt_info->read_wait);
2350 spin_lock_init(&xprt_info->lock);
2351 wake_lock_init(&xprt_info->wakelock,
2352 WAKE_LOCK_SUSPEND, xprt->name);
2353 xprt_info->need_len = 0;
2354 xprt_info->abort_data_read = 0;
2355 INIT_WORK(&xprt_info->read_data, do_read_data);
2356 INIT_LIST_HEAD(&xprt_info->list);
2357
2358 xprt_info->workqueue = create_singlethread_workqueue(xprt->name);
2359 if (!xprt_info->workqueue) {
2360 kfree(xprt_info);
2361 return -ENOMEM;
2362 }
2363
2364 if (!strcmp(xprt->name, "rpcrouter_loopback_xprt")) {
2365 xprt_info->remote_pid = RPCROUTER_PID_LOCAL;
2366 xprt_info->initialized = 1;
2367 } else {
2368 smsm_change_state(SMSM_APPS_STATE, 0, SMSM_RPCINIT);
2369 }
2370
2371 mutex_lock(&xprt_info_list_lock);
2372 list_add_tail(&xprt_info->list, &xprt_info_list);
2373 mutex_unlock(&xprt_info_list_lock);
2374
2375 queue_work(xprt_info->workqueue, &xprt_info->read_data);
2376
2377 xprt->priv = xprt_info;
2378
2379 return 0;
2380}
2381
2382static void msm_rpcrouter_remove_xprt(struct rpcrouter_xprt *xprt)
2383{
2384 struct rpcrouter_xprt_info *xprt_info;
2385 unsigned long flags;
2386
2387 if (xprt && xprt->priv) {
2388 xprt_info = xprt->priv;
2389
2390 /* abort rr_read thread */
2391 xprt_info->abort_data_read = 1;
2392 wake_up(&xprt_info->read_wait);
2393
2394 /* remove xprt from available xprts */
2395 mutex_lock(&xprt_info_list_lock);
2396 spin_lock_irqsave(&xprt_info->lock, flags);
2397 list_del(&xprt_info->list);
2398
2399 /* unlock the spinlock last to avoid a race
2400 * condition with rpcrouter_get_xprt_info
2401 * in msm_rpc_write_pkt in which the
2402 * xprt is returned from rpcrouter_get_xprt_info
2403 * and then deleted here. */
2404 mutex_unlock(&xprt_info_list_lock);
2405 spin_unlock_irqrestore(&xprt_info->lock, flags);
2406
2407 /* cleanup workqueues and wakelocks */
2408 flush_workqueue(xprt_info->workqueue);
2409 destroy_workqueue(xprt_info->workqueue);
2410 wake_lock_destroy(&xprt_info->wakelock);
2411
2412
2413 /* free memory */
2414 xprt->priv = 0;
2415 kfree(xprt_info);
2416 }
2417}
2418
2419struct rpcrouter_xprt_work {
2420 struct rpcrouter_xprt *xprt;
2421 struct work_struct work;
2422};
2423
2424static void xprt_open_worker(struct work_struct *work)
2425{
2426 struct rpcrouter_xprt_work *xprt_work =
2427 container_of(work, struct rpcrouter_xprt_work, work);
2428
2429 msm_rpcrouter_add_xprt(xprt_work->xprt);
2430
2431 kfree(xprt_work);
2432}
2433
2434static void xprt_close_worker(struct work_struct *work)
2435{
2436 struct rpcrouter_xprt_work *xprt_work =
2437 container_of(work, struct rpcrouter_xprt_work, work);
2438
2439 modem_reset_cleanup(xprt_work->xprt->priv);
2440 msm_rpcrouter_remove_xprt(xprt_work->xprt);
2441
2442 if (atomic_dec_return(&pending_close_count) == 0)
2443 wake_up(&subsystem_restart_wait);
2444
2445 kfree(xprt_work);
2446}
2447
2448void msm_rpcrouter_xprt_notify(struct rpcrouter_xprt *xprt, unsigned event)
2449{
2450 struct rpcrouter_xprt_info *xprt_info;
2451 struct rpcrouter_xprt_work *xprt_work;
Angshuman Sarkar902de082012-09-06 13:51:21 +05302452 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002453
2454 /* Workqueue is created in init function which works for all existing
2455 * clients. If this fails in the future, then it will need to be
2456 * created earlier. */
2457 BUG_ON(!rpcrouter_workqueue);
2458
2459 switch (event) {
2460 case RPCROUTER_XPRT_EVENT_OPEN:
2461 D("open event for '%s'\n", xprt->name);
2462 xprt_work = kmalloc(sizeof(struct rpcrouter_xprt_work),
2463 GFP_ATOMIC);
2464 xprt_work->xprt = xprt;
2465 INIT_WORK(&xprt_work->work, xprt_open_worker);
2466 queue_work(rpcrouter_workqueue, &xprt_work->work);
2467 break;
2468
2469 case RPCROUTER_XPRT_EVENT_CLOSE:
2470 D("close event for '%s'\n", xprt->name);
2471
2472 atomic_inc(&pending_close_count);
2473
2474 xprt_work = kmalloc(sizeof(struct rpcrouter_xprt_work),
2475 GFP_ATOMIC);
2476 xprt_work->xprt = xprt;
2477 INIT_WORK(&xprt_work->work, xprt_close_worker);
2478 queue_work(rpcrouter_workqueue, &xprt_work->work);
2479 break;
2480 }
2481
2482 xprt_info = xprt->priv;
2483 if (xprt_info) {
Angshuman Sarkar902de082012-09-06 13:51:21 +05302484 spin_lock_irqsave(&xprt_info->lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002485 /* Check read_avail even for OPEN event to handle missed
2486 DATA events while processing the OPEN event*/
2487 if (xprt->read_avail() >= xprt_info->need_len)
2488 wake_lock(&xprt_info->wakelock);
2489 wake_up(&xprt_info->read_wait);
Angshuman Sarkar902de082012-09-06 13:51:21 +05302490 spin_unlock_irqrestore(&xprt_info->lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002491 }
2492}
2493
2494static int modem_restart_notifier_cb(struct notifier_block *this,
2495 unsigned long code,
2496 void *data);
2497static struct notifier_block nb = {
2498 .notifier_call = modem_restart_notifier_cb,
2499};
2500
2501static int modem_restart_notifier_cb(struct notifier_block *this,
2502 unsigned long code,
2503 void *data)
2504{
2505 switch (code) {
2506 case SUBSYS_BEFORE_SHUTDOWN:
2507 D("%s: SUBSYS_BEFORE_SHUTDOWN\n", __func__);
2508 break;
2509
2510 case SUBSYS_BEFORE_POWERUP:
2511 D("%s: waiting for RPC restart to complete\n", __func__);
2512 wait_event(subsystem_restart_wait,
2513 atomic_read(&pending_close_count) == 0);
2514 D("%s: finished restart wait\n", __func__);
2515 break;
2516
2517 default:
2518 break;
2519 }
2520
2521 return NOTIFY_DONE;
2522}
2523
2524static void *restart_notifier_handle;
2525static __init int modem_restart_late_init(void)
2526{
2527 restart_notifier_handle = subsys_notif_register_notifier("modem", &nb);
2528 return 0;
2529}
2530late_initcall(modem_restart_late_init);
2531
2532static int __init rpcrouter_init(void)
2533{
2534 int ret;
2535
2536 msm_rpc_connect_timeout_ms = 0;
2537 smd_rpcrouter_debug_mask |= SMEM_LOG;
2538 debugfs_init();
Arun Kumar Neelakantam0975c602012-10-16 23:13:09 +05302539 ret = register_reboot_notifier(&msm_rpc_reboot_notifier);
2540 if (ret)
2541 pr_err("%s: Failed to register reboot notifier", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002542
2543 /* Initialize what we need to start processing */
2544 rpcrouter_workqueue =
2545 create_singlethread_workqueue("rpcrouter");
2546 if (!rpcrouter_workqueue) {
2547 msm_rpcrouter_exit_devices();
2548 return -ENOMEM;
2549 }
2550
2551 init_waitqueue_head(&newserver_wait);
2552 init_waitqueue_head(&subsystem_restart_wait);
2553
2554 ret = msm_rpcrouter_init_devices();
2555 if (ret < 0)
2556 return ret;
2557
2558 return ret;
2559}
2560
2561module_init(rpcrouter_init);
2562MODULE_DESCRIPTION("MSM RPC Router");
2563MODULE_AUTHOR("San Mehat <san@android.com>");
2564MODULE_LICENSE("GPL");