blob: e0aad46dd477a424aff79462726f89c80dd0fed3 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* arch/arm/mach-msm/smd_rpcrouter.c
2 *
3 * Copyright (C) 2007 Google, Inc.
4 * Copyright (c) 2007-2011, Code Aurora Forum. All rights reserved.
5 * Author: San Mehat <san@android.com>
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18/* TODO: handle cases where smd_write() will tempfail due to full fifo */
19/* TODO: thread priority? schedule a work to bump it? */
20/* TODO: maybe make server_list_lock a mutex */
21/* TODO: pool fragments to avoid kmalloc/kfree churn */
22
23#include <linux/slab.h>
24#include <linux/module.h>
25#include <linux/kernel.h>
26#include <linux/string.h>
27#include <linux/errno.h>
28#include <linux/cdev.h>
29#include <linux/init.h>
30#include <linux/device.h>
31#include <linux/types.h>
32#include <linux/delay.h>
33#include <linux/fs.h>
34#include <linux/err.h>
35#include <linux/sched.h>
36#include <linux/poll.h>
37#include <linux/wakelock.h>
38#include <asm/uaccess.h>
39#include <asm/byteorder.h>
40#include <linux/platform_device.h>
41#include <linux/uaccess.h>
42#include <linux/debugfs.h>
43
44#include <asm/byteorder.h>
45
46#include <mach/msm_smd.h>
47#include <mach/smem_log.h>
48#include <mach/subsystem_notif.h>
49
50#include "smd_rpcrouter.h"
51#include "modem_notifier.h"
52#include "smd_rpc_sym.h"
53#include "smd_private.h"
54
55enum {
56 SMEM_LOG = 1U << 0,
57 RTR_DBG = 1U << 1,
58 R2R_MSG = 1U << 2,
59 R2R_RAW = 1U << 3,
60 RPC_MSG = 1U << 4,
61 NTFY_MSG = 1U << 5,
62 RAW_PMR = 1U << 6,
63 RAW_PMW = 1U << 7,
64 R2R_RAW_HDR = 1U << 8,
65};
66static int msm_rpc_connect_timeout_ms;
67module_param_named(connect_timeout, msm_rpc_connect_timeout_ms,
68 int, S_IRUGO | S_IWUSR | S_IWGRP);
69
70static int smd_rpcrouter_debug_mask;
71module_param_named(debug_mask, smd_rpcrouter_debug_mask,
72 int, S_IRUGO | S_IWUSR | S_IWGRP);
73
74#define DIAG(x...) printk(KERN_ERR "[RR] ERROR " x)
75
76#if defined(CONFIG_MSM_ONCRPCROUTER_DEBUG)
77#define D(x...) do { \
78if (smd_rpcrouter_debug_mask & RTR_DBG) \
79 printk(KERN_ERR x); \
80} while (0)
81
82#define RR(x...) do { \
83if (smd_rpcrouter_debug_mask & R2R_MSG) \
84 printk(KERN_ERR "[RR] "x); \
85} while (0)
86
87#define RAW(x...) do { \
88if (smd_rpcrouter_debug_mask & R2R_RAW) \
89 printk(KERN_ERR "[RAW] "x); \
90} while (0)
91
92#define RAW_HDR(x...) do { \
93if (smd_rpcrouter_debug_mask & R2R_RAW_HDR) \
94 printk(KERN_ERR "[HDR] "x); \
95} while (0)
96
97#define RAW_PMR(x...) do { \
98if (smd_rpcrouter_debug_mask & RAW_PMR) \
99 printk(KERN_ERR "[PMR] "x); \
100} while (0)
101
102#define RAW_PMR_NOMASK(x...) do { \
103 printk(KERN_ERR "[PMR] "x); \
104} while (0)
105
106#define RAW_PMW(x...) do { \
107if (smd_rpcrouter_debug_mask & RAW_PMW) \
108 printk(KERN_ERR "[PMW] "x); \
109} while (0)
110
111#define RAW_PMW_NOMASK(x...) do { \
112 printk(KERN_ERR "[PMW] "x); \
113} while (0)
114
115#define IO(x...) do { \
116if (smd_rpcrouter_debug_mask & RPC_MSG) \
117 printk(KERN_ERR "[RPC] "x); \
118} while (0)
119
120#define NTFY(x...) do { \
121if (smd_rpcrouter_debug_mask & NTFY_MSG) \
122 printk(KERN_ERR "[NOTIFY] "x); \
123} while (0)
124#else
125#define D(x...) do { } while (0)
126#define RR(x...) do { } while (0)
127#define RAW(x...) do { } while (0)
128#define RAW_HDR(x...) do { } while (0)
129#define RAW_PMR(x...) do { } while (0)
130#define RAW_PMR_NO_MASK(x...) do { } while (0)
131#define RAW_PMW(x...) do { } while (0)
132#define RAW_PMW_NO_MASK(x...) do { } while (0)
133#define IO(x...) do { } while (0)
134#define NTFY(x...) do { } while (0)
135#endif
136
137
138static LIST_HEAD(local_endpoints);
139static LIST_HEAD(remote_endpoints);
140
141static LIST_HEAD(server_list);
142
143static wait_queue_head_t newserver_wait;
144static wait_queue_head_t subsystem_restart_wait;
145
146static DEFINE_SPINLOCK(local_endpoints_lock);
147static DEFINE_SPINLOCK(remote_endpoints_lock);
148static DEFINE_SPINLOCK(server_list_lock);
149
150static LIST_HEAD(rpc_board_dev_list);
151static DEFINE_SPINLOCK(rpc_board_dev_list_lock);
152
153static struct workqueue_struct *rpcrouter_workqueue;
154
155static atomic_t next_xid = ATOMIC_INIT(1);
156static atomic_t pm_mid = ATOMIC_INIT(1);
157
158static void do_read_data(struct work_struct *work);
159static void do_create_pdevs(struct work_struct *work);
160static void do_create_rpcrouter_pdev(struct work_struct *work);
161
162static DECLARE_WORK(work_create_pdevs, do_create_pdevs);
163static DECLARE_WORK(work_create_rpcrouter_pdev, do_create_rpcrouter_pdev);
164
165#define RR_STATE_IDLE 0
166#define RR_STATE_HEADER 1
167#define RR_STATE_BODY 2
168#define RR_STATE_ERROR 3
169
170/* State for remote ep following restart */
171#define RESTART_QUOTA_ABORT 1
172
173struct rr_context {
174 struct rr_packet *pkt;
175 uint8_t *ptr;
176 uint32_t state; /* current assembly state */
177 uint32_t count; /* bytes needed in this state */
178};
179
180struct rr_context the_rr_context;
181
182struct rpc_board_dev_info {
183 struct list_head list;
184
185 struct rpc_board_dev *dev;
186};
187
188static struct platform_device rpcrouter_pdev = {
189 .name = "oncrpc_router",
190 .id = -1,
191};
192
193struct rpcrouter_xprt_info {
194 struct list_head list;
195
196 struct rpcrouter_xprt *xprt;
197
198 int remote_pid;
199 uint32_t initialized;
200 wait_queue_head_t read_wait;
201 struct wake_lock wakelock;
202 spinlock_t lock;
203 uint32_t need_len;
204 struct work_struct read_data;
205 struct workqueue_struct *workqueue;
206 int abort_data_read;
207 unsigned char r2r_buf[RPCROUTER_MSGSIZE_MAX];
208};
209
210static LIST_HEAD(xprt_info_list);
211static DEFINE_MUTEX(xprt_info_list_lock);
212
213DECLARE_COMPLETION(rpc_remote_router_up);
214static atomic_t pending_close_count = ATOMIC_INIT(0);
215
216/*
217 * Search for transport (xprt) that matches the provided PID.
218 *
219 * Note: The calling function must ensure that the mutex
220 * xprt_info_list_lock is locked when this function
221 * is called.
222 *
223 * @remote_pid Remote PID for the transport
224 *
225 * @returns Pointer to transport or NULL if not found
226 */
227static struct rpcrouter_xprt_info *rpcrouter_get_xprt_info(uint32_t remote_pid)
228{
229 struct rpcrouter_xprt_info *xprt_info;
230
231 list_for_each_entry(xprt_info, &xprt_info_list, list) {
232 if (xprt_info->remote_pid == remote_pid)
233 return xprt_info;
234 }
235 return NULL;
236}
237
238static int rpcrouter_send_control_msg(struct rpcrouter_xprt_info *xprt_info,
239 union rr_control_msg *msg)
240{
241 struct rr_header hdr;
242 unsigned long flags = 0;
243 int need;
244
245 if (xprt_info->remote_pid == RPCROUTER_PID_LOCAL)
246 return 0;
247
248 if (!(msg->cmd == RPCROUTER_CTRL_CMD_HELLO) &&
249 !xprt_info->initialized) {
250 printk(KERN_ERR "rpcrouter_send_control_msg(): Warning, "
251 "router not initialized\n");
252 return -EINVAL;
253 }
254
255 hdr.version = RPCROUTER_VERSION;
256 hdr.type = msg->cmd;
257 hdr.src_pid = RPCROUTER_PID_LOCAL;
258 hdr.src_cid = RPCROUTER_ROUTER_ADDRESS;
259 hdr.confirm_rx = 0;
260 hdr.size = sizeof(*msg);
261 hdr.dst_pid = xprt_info->remote_pid;
262 hdr.dst_cid = RPCROUTER_ROUTER_ADDRESS;
263
264 /* TODO: what if channel is full? */
265
266 need = sizeof(hdr) + hdr.size;
267 spin_lock_irqsave(&xprt_info->lock, flags);
268 while (xprt_info->xprt->write_avail() < need) {
269 spin_unlock_irqrestore(&xprt_info->lock, flags);
270 msleep(250);
271 spin_lock_irqsave(&xprt_info->lock, flags);
272 }
273 xprt_info->xprt->write(&hdr, sizeof(hdr), HEADER);
274 xprt_info->xprt->write(msg, hdr.size, PAYLOAD);
275 spin_unlock_irqrestore(&xprt_info->lock, flags);
276
277 return 0;
278}
279
280static void modem_reset_cleanup(struct rpcrouter_xprt_info *xprt_info)
281{
282 struct msm_rpc_endpoint *ept;
283 struct rr_remote_endpoint *r_ept;
284 struct rr_packet *pkt, *tmp_pkt;
285 struct rr_fragment *frag, *next;
286 struct msm_rpc_reply *reply, *reply_tmp;
287 unsigned long flags;
288
289 spin_lock_irqsave(&local_endpoints_lock, flags);
290 /* remove all partial packets received */
291 list_for_each_entry(ept, &local_endpoints, list) {
292 RR("%s EPT DST PID %x, remote_pid:%d\n", __func__,
293 ept->dst_pid, xprt_info->remote_pid);
294
295 if (xprt_info->remote_pid != ept->dst_pid)
296 continue;
297
298 D("calling teardown cb %p\n", ept->cb_restart_teardown);
299 if (ept->cb_restart_teardown)
300 ept->cb_restart_teardown(ept->client_data);
301 ept->do_setup_notif = 1;
302
303 /* remove replies */
304 spin_lock(&ept->reply_q_lock);
305 list_for_each_entry_safe(reply, reply_tmp,
306 &ept->reply_pend_q, list) {
307 list_del(&reply->list);
308 kfree(reply);
309 }
310 list_for_each_entry_safe(reply, reply_tmp,
311 &ept->reply_avail_q, list) {
312 list_del(&reply->list);
313 kfree(reply);
314 }
315 spin_unlock(&ept->reply_q_lock);
316
317 /* Set restart state for local ep */
318 RR("EPT:0x%p, State %d RESTART_PEND_NTFY_SVR "
319 "PROG:0x%08x VERS:0x%08x\n",
320 ept, ept->restart_state,
321 be32_to_cpu(ept->dst_prog),
322 be32_to_cpu(ept->dst_vers));
323 spin_lock(&ept->restart_lock);
324 ept->restart_state = RESTART_PEND_NTFY_SVR;
325
326 /* remove incomplete packets */
327 spin_lock(&ept->incomplete_lock);
328 list_for_each_entry_safe(pkt, tmp_pkt,
329 &ept->incomplete, list) {
330 list_del(&pkt->list);
331 frag = pkt->first;
332 while (frag != NULL) {
333 next = frag->next;
334 kfree(frag);
335 frag = next;
336 }
337 kfree(pkt);
338 }
339 spin_unlock(&ept->incomplete_lock);
340
341 /* remove all completed packets waiting to be read */
342 spin_lock(&ept->read_q_lock);
343 list_for_each_entry_safe(pkt, tmp_pkt, &ept->read_q,
344 list) {
345 list_del(&pkt->list);
346 frag = pkt->first;
347 while (frag != NULL) {
348 next = frag->next;
349 kfree(frag);
350 frag = next;
351 }
352 kfree(pkt);
353 }
354 spin_unlock(&ept->read_q_lock);
355
356 spin_unlock(&ept->restart_lock);
357 wake_up(&ept->wait_q);
358 }
359
360 spin_unlock_irqrestore(&local_endpoints_lock, flags);
361
362 /* Unblock endpoints waiting for quota ack*/
363 spin_lock_irqsave(&remote_endpoints_lock, flags);
364 list_for_each_entry(r_ept, &remote_endpoints, list) {
365 spin_lock(&r_ept->quota_lock);
366 r_ept->quota_restart_state = RESTART_QUOTA_ABORT;
367 RR("Set STATE_PENDING PID:0x%08x CID:0x%08x \n", r_ept->pid,
368 r_ept->cid);
369 spin_unlock(&r_ept->quota_lock);
370 wake_up(&r_ept->quota_wait);
371 }
372 spin_unlock_irqrestore(&remote_endpoints_lock, flags);
373}
374
375static void modem_reset_startup(struct rpcrouter_xprt_info *xprt_info)
376{
377 struct msm_rpc_endpoint *ept;
378 unsigned long flags;
379
380 spin_lock_irqsave(&local_endpoints_lock, flags);
381
382 /* notify all endpoints that we are coming back up */
383 list_for_each_entry(ept, &local_endpoints, list) {
384 RR("%s EPT DST PID %x, remote_pid:%d\n", __func__,
385 ept->dst_pid, xprt_info->remote_pid);
386
387 if (xprt_info->remote_pid != ept->dst_pid)
388 continue;
389
390 D("calling setup cb %d:%p\n", ept->do_setup_notif,
391 ept->cb_restart_setup);
392 if (ept->do_setup_notif && ept->cb_restart_setup)
393 ept->cb_restart_setup(ept->client_data);
394 ept->do_setup_notif = 0;
395 }
396
397 spin_unlock_irqrestore(&local_endpoints_lock, flags);
398}
399
400/*
401 * Blocks and waits for endpoint if a reset is in progress.
402 *
403 * @returns
404 * ENETRESET Reset is in progress and a notification needed
405 * ERESTARTSYS Signal occurred
406 * 0 Reset is not in progress
407 */
408static int wait_for_restart_and_notify(struct msm_rpc_endpoint *ept)
409{
410 unsigned long flags;
411 int ret = 0;
412 DEFINE_WAIT(__wait);
413
414 for (;;) {
415 prepare_to_wait(&ept->restart_wait, &__wait,
416 TASK_INTERRUPTIBLE);
417
418 spin_lock_irqsave(&ept->restart_lock, flags);
419 if (ept->restart_state == RESTART_NORMAL) {
420 spin_unlock_irqrestore(&ept->restart_lock, flags);
421 break;
422 } else if (ept->restart_state & RESTART_PEND_NTFY) {
423 ept->restart_state &= ~RESTART_PEND_NTFY;
424 spin_unlock_irqrestore(&ept->restart_lock, flags);
425 ret = -ENETRESET;
426 break;
427 }
428 if (signal_pending(current) &&
429 ((!(ept->flags & MSM_RPC_UNINTERRUPTIBLE)))) {
430 spin_unlock_irqrestore(&ept->restart_lock, flags);
431 ret = -ERESTARTSYS;
432 break;
433 }
434 spin_unlock_irqrestore(&ept->restart_lock, flags);
435 schedule();
436 }
437 finish_wait(&ept->restart_wait, &__wait);
438 return ret;
439}
440
441static struct rr_server *rpcrouter_create_server(uint32_t pid,
442 uint32_t cid,
443 uint32_t prog,
444 uint32_t ver)
445{
446 struct rr_server *server;
447 unsigned long flags;
448 int rc;
449
450 server = kmalloc(sizeof(struct rr_server), GFP_KERNEL);
451 if (!server)
452 return ERR_PTR(-ENOMEM);
453
454 memset(server, 0, sizeof(struct rr_server));
455 server->pid = pid;
456 server->cid = cid;
457 server->prog = prog;
458 server->vers = ver;
459
460 spin_lock_irqsave(&server_list_lock, flags);
461 list_add_tail(&server->list, &server_list);
462 spin_unlock_irqrestore(&server_list_lock, flags);
463
464 rc = msm_rpcrouter_create_server_cdev(server);
465 if (rc < 0)
466 goto out_fail;
467
468 return server;
469out_fail:
470 spin_lock_irqsave(&server_list_lock, flags);
471 list_del(&server->list);
472 spin_unlock_irqrestore(&server_list_lock, flags);
473 kfree(server);
474 return ERR_PTR(rc);
475}
476
477static void rpcrouter_destroy_server(struct rr_server *server)
478{
479 unsigned long flags;
480
481 spin_lock_irqsave(&server_list_lock, flags);
482 list_del(&server->list);
483 spin_unlock_irqrestore(&server_list_lock, flags);
484 device_destroy(msm_rpcrouter_class, server->device_number);
485 kfree(server);
486}
487
488int msm_rpc_add_board_dev(struct rpc_board_dev *devices, int num)
489{
490 unsigned long flags;
491 struct rpc_board_dev_info *board_info;
492 int i;
493
494 for (i = 0; i < num; i++) {
495 board_info = kzalloc(sizeof(struct rpc_board_dev_info),
496 GFP_KERNEL);
497 if (!board_info)
498 return -ENOMEM;
499
500 board_info->dev = &devices[i];
501 D("%s: adding program %x\n", __func__, board_info->dev->prog);
502 spin_lock_irqsave(&rpc_board_dev_list_lock, flags);
503 list_add_tail(&board_info->list, &rpc_board_dev_list);
504 spin_unlock_irqrestore(&rpc_board_dev_list_lock, flags);
505 }
506
507 return 0;
508}
509EXPORT_SYMBOL(msm_rpc_add_board_dev);
510
511static void rpcrouter_register_board_dev(struct rr_server *server)
512{
513 struct rpc_board_dev_info *board_info;
514 unsigned long flags;
515 int rc;
516
517 spin_lock_irqsave(&rpc_board_dev_list_lock, flags);
518 list_for_each_entry(board_info, &rpc_board_dev_list, list) {
519 if (server->prog == board_info->dev->prog) {
520 D("%s: registering device %x\n",
521 __func__, board_info->dev->prog);
522 list_del(&board_info->list);
523 rc = platform_device_register(&board_info->dev->pdev);
524 if (rc)
525 pr_err("%s: board dev register failed %d\n",
526 __func__, rc);
527 kfree(board_info);
528 break;
529 }
530 }
531 spin_unlock_irqrestore(&rpc_board_dev_list_lock, flags);
532}
533
534static struct rr_server *rpcrouter_lookup_server(uint32_t prog, uint32_t ver)
535{
536 struct rr_server *server;
537 unsigned long flags;
538
539 spin_lock_irqsave(&server_list_lock, flags);
540 list_for_each_entry(server, &server_list, list) {
541 if (server->prog == prog
542 && server->vers == ver) {
543 spin_unlock_irqrestore(&server_list_lock, flags);
544 return server;
545 }
546 }
547 spin_unlock_irqrestore(&server_list_lock, flags);
548 return NULL;
549}
550
551static struct rr_server *rpcrouter_lookup_server_by_dev(dev_t dev)
552{
553 struct rr_server *server;
554 unsigned long flags;
555
556 spin_lock_irqsave(&server_list_lock, flags);
557 list_for_each_entry(server, &server_list, list) {
558 if (server->device_number == dev) {
559 spin_unlock_irqrestore(&server_list_lock, flags);
560 return server;
561 }
562 }
563 spin_unlock_irqrestore(&server_list_lock, flags);
564 return NULL;
565}
566
567struct msm_rpc_endpoint *msm_rpcrouter_create_local_endpoint(dev_t dev)
568{
569 struct msm_rpc_endpoint *ept;
570 unsigned long flags;
571
572 ept = kmalloc(sizeof(struct msm_rpc_endpoint), GFP_KERNEL);
573 if (!ept)
574 return NULL;
575 memset(ept, 0, sizeof(struct msm_rpc_endpoint));
576 ept->cid = (uint32_t) ept;
577 ept->pid = RPCROUTER_PID_LOCAL;
578 ept->dev = dev;
579
580 if ((dev != msm_rpcrouter_devno) && (dev != MKDEV(0, 0))) {
581 struct rr_server *srv;
582 /*
583 * This is a userspace client which opened
584 * a program/ver devicenode. Bind the client
585 * to that destination
586 */
587 srv = rpcrouter_lookup_server_by_dev(dev);
588 /* TODO: bug? really? */
589 BUG_ON(!srv);
590
591 ept->dst_pid = srv->pid;
592 ept->dst_cid = srv->cid;
593 ept->dst_prog = cpu_to_be32(srv->prog);
594 ept->dst_vers = cpu_to_be32(srv->vers);
595 } else {
596 /* mark not connected */
597 ept->dst_pid = 0xffffffff;
598 }
599
600 init_waitqueue_head(&ept->wait_q);
601 INIT_LIST_HEAD(&ept->read_q);
602 spin_lock_init(&ept->read_q_lock);
603 INIT_LIST_HEAD(&ept->reply_avail_q);
604 INIT_LIST_HEAD(&ept->reply_pend_q);
605 spin_lock_init(&ept->reply_q_lock);
606 spin_lock_init(&ept->restart_lock);
607 init_waitqueue_head(&ept->restart_wait);
608 ept->restart_state = RESTART_NORMAL;
609 wake_lock_init(&ept->read_q_wake_lock, WAKE_LOCK_SUSPEND, "rpc_read");
610 wake_lock_init(&ept->reply_q_wake_lock, WAKE_LOCK_SUSPEND, "rpc_reply");
611 INIT_LIST_HEAD(&ept->incomplete);
612 spin_lock_init(&ept->incomplete_lock);
613
614 spin_lock_irqsave(&local_endpoints_lock, flags);
615 list_add_tail(&ept->list, &local_endpoints);
616 spin_unlock_irqrestore(&local_endpoints_lock, flags);
617 return ept;
618}
619
620int msm_rpcrouter_destroy_local_endpoint(struct msm_rpc_endpoint *ept)
621{
622 int rc;
623 union rr_control_msg msg;
624 struct msm_rpc_reply *reply, *reply_tmp;
625 unsigned long flags;
626 struct rpcrouter_xprt_info *xprt_info;
627
628 /* Endpoint with dst_pid = 0xffffffff corresponds to that of
629 ** router port. So don't send a REMOVE CLIENT message while
630 ** destroying it.*/
Karthikeyan Ramasubramanian768567a2011-08-11 11:53:21 -0600631 spin_lock_irqsave(&local_endpoints_lock, flags);
632 list_del(&ept->list);
633 spin_unlock_irqrestore(&local_endpoints_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700634 if (ept->dst_pid != 0xffffffff) {
635 msg.cmd = RPCROUTER_CTRL_CMD_REMOVE_CLIENT;
636 msg.cli.pid = ept->pid;
637 msg.cli.cid = ept->cid;
638
639 RR("x REMOVE_CLIENT id=%d:%08x\n", ept->pid, ept->cid);
640 mutex_lock(&xprt_info_list_lock);
641 list_for_each_entry(xprt_info, &xprt_info_list, list) {
642 rc = rpcrouter_send_control_msg(xprt_info, &msg);
643 if (rc < 0) {
644 mutex_unlock(&xprt_info_list_lock);
645 return rc;
646 }
647 }
648 mutex_unlock(&xprt_info_list_lock);
649 }
650
651 /* Free replies */
652 spin_lock_irqsave(&ept->reply_q_lock, flags);
653 list_for_each_entry_safe(reply, reply_tmp, &ept->reply_pend_q, list) {
654 list_del(&reply->list);
655 kfree(reply);
656 }
657 list_for_each_entry_safe(reply, reply_tmp, &ept->reply_avail_q, list) {
658 list_del(&reply->list);
659 kfree(reply);
660 }
661 spin_unlock_irqrestore(&ept->reply_q_lock, flags);
662
663 wake_lock_destroy(&ept->read_q_wake_lock);
664 wake_lock_destroy(&ept->reply_q_wake_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700665 kfree(ept);
666 return 0;
667}
668
669static int rpcrouter_create_remote_endpoint(uint32_t pid, uint32_t cid)
670{
671 struct rr_remote_endpoint *new_c;
672 unsigned long flags;
673
674 new_c = kmalloc(sizeof(struct rr_remote_endpoint), GFP_KERNEL);
675 if (!new_c)
676 return -ENOMEM;
677 memset(new_c, 0, sizeof(struct rr_remote_endpoint));
678
679 new_c->cid = cid;
680 new_c->pid = pid;
681 init_waitqueue_head(&new_c->quota_wait);
682 spin_lock_init(&new_c->quota_lock);
683
684 spin_lock_irqsave(&remote_endpoints_lock, flags);
685 list_add_tail(&new_c->list, &remote_endpoints);
686 new_c->quota_restart_state = RESTART_NORMAL;
687 spin_unlock_irqrestore(&remote_endpoints_lock, flags);
688 return 0;
689}
690
691static struct msm_rpc_endpoint *rpcrouter_lookup_local_endpoint(uint32_t cid)
692{
693 struct msm_rpc_endpoint *ept;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700694
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700695 list_for_each_entry(ept, &local_endpoints, list) {
Karthikeyan Ramasubramanian768567a2011-08-11 11:53:21 -0600696 if (ept->cid == cid)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700697 return ept;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700698 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700699 return NULL;
700}
701
702static struct rr_remote_endpoint *rpcrouter_lookup_remote_endpoint(uint32_t pid,
703 uint32_t cid)
704{
705 struct rr_remote_endpoint *ept;
706 unsigned long flags;
707
708 spin_lock_irqsave(&remote_endpoints_lock, flags);
709 list_for_each_entry(ept, &remote_endpoints, list) {
710 if ((ept->pid == pid) && (ept->cid == cid)) {
711 spin_unlock_irqrestore(&remote_endpoints_lock, flags);
712 return ept;
713 }
714 }
715 spin_unlock_irqrestore(&remote_endpoints_lock, flags);
716 return NULL;
717}
718
719static void handle_server_restart(struct rr_server *server,
720 uint32_t pid, uint32_t cid,
721 uint32_t prog, uint32_t vers)
722{
723 struct rr_remote_endpoint *r_ept;
724 struct msm_rpc_endpoint *ept;
725 unsigned long flags;
726 r_ept = rpcrouter_lookup_remote_endpoint(pid, cid);
727 if (r_ept && (r_ept->quota_restart_state !=
728 RESTART_NORMAL)) {
729 spin_lock_irqsave(&r_ept->quota_lock, flags);
730 r_ept->tx_quota_cntr = 0;
731 r_ept->quota_restart_state =
732 RESTART_NORMAL;
733 spin_unlock_irqrestore(&r_ept->quota_lock, flags);
734 D(KERN_INFO "rpcrouter: Remote EPT Reset %0x\n",
735 (unsigned int)r_ept);
736 wake_up(&r_ept->quota_wait);
737 }
738 spin_lock_irqsave(&local_endpoints_lock, flags);
739 list_for_each_entry(ept, &local_endpoints, list) {
740 if ((be32_to_cpu(ept->dst_prog) == prog) &&
741 (be32_to_cpu(ept->dst_vers) == vers) &&
742 (ept->restart_state & RESTART_PEND_SVR)) {
743 spin_lock(&ept->restart_lock);
744 ept->restart_state &= ~RESTART_PEND_SVR;
745 spin_unlock(&ept->restart_lock);
746 D("rpcrouter: Local EPT Reset %08x:%08x \n",
747 prog, vers);
748 wake_up(&ept->restart_wait);
749 wake_up(&ept->wait_q);
750 }
751 }
752 spin_unlock_irqrestore(&local_endpoints_lock, flags);
753}
754
755static int process_control_msg(struct rpcrouter_xprt_info *xprt_info,
756 union rr_control_msg *msg, int len)
757{
758 union rr_control_msg ctl;
759 struct rr_server *server;
760 struct rr_remote_endpoint *r_ept;
761 int rc = 0;
762 unsigned long flags;
763 static int first = 1;
764
765 if (len != sizeof(*msg)) {
766 RR(KERN_ERR "rpcrouter: r2r msg size %d != %d\n",
767 len, sizeof(*msg));
768 return -EINVAL;
769 }
770
771 switch (msg->cmd) {
772 case RPCROUTER_CTRL_CMD_HELLO:
773 RR("o HELLO PID %d\n", xprt_info->remote_pid);
774 memset(&ctl, 0, sizeof(ctl));
775 ctl.cmd = RPCROUTER_CTRL_CMD_HELLO;
776 rpcrouter_send_control_msg(xprt_info, &ctl);
777
778 xprt_info->initialized = 1;
779
780 /* Send list of servers one at a time */
781 ctl.cmd = RPCROUTER_CTRL_CMD_NEW_SERVER;
782
783 /* TODO: long time to hold a spinlock... */
784 spin_lock_irqsave(&server_list_lock, flags);
785 list_for_each_entry(server, &server_list, list) {
786 if (server->pid != RPCROUTER_PID_LOCAL)
787 continue;
788 ctl.srv.pid = server->pid;
789 ctl.srv.cid = server->cid;
790 ctl.srv.prog = server->prog;
791 ctl.srv.vers = server->vers;
792
793 RR("x NEW_SERVER id=%d:%08x prog=%08x:%08x\n",
794 server->pid, server->cid,
795 server->prog, server->vers);
796
797 rpcrouter_send_control_msg(xprt_info, &ctl);
798 }
799 spin_unlock_irqrestore(&server_list_lock, flags);
800
801 if (first) {
802 first = 0;
803 queue_work(rpcrouter_workqueue,
804 &work_create_rpcrouter_pdev);
805 }
806 break;
807
808 case RPCROUTER_CTRL_CMD_RESUME_TX:
809 RR("o RESUME_TX id=%d:%08x\n", msg->cli.pid, msg->cli.cid);
810
811 r_ept = rpcrouter_lookup_remote_endpoint(msg->cli.pid,
812 msg->cli.cid);
813 if (!r_ept) {
814 printk(KERN_ERR
815 "rpcrouter: Unable to resume client\n");
816 break;
817 }
818 spin_lock_irqsave(&r_ept->quota_lock, flags);
819 r_ept->tx_quota_cntr = 0;
820 spin_unlock_irqrestore(&r_ept->quota_lock, flags);
821 wake_up(&r_ept->quota_wait);
822 break;
823
824 case RPCROUTER_CTRL_CMD_NEW_SERVER:
825 if (msg->srv.vers == 0) {
826 pr_err(
827 "rpcrouter: Server create rejected, version = 0, "
828 "program = %08x\n", msg->srv.prog);
829 break;
830 }
831
832 RR("o NEW_SERVER id=%d:%08x prog=%08x:%08x\n",
833 msg->srv.pid, msg->srv.cid, msg->srv.prog, msg->srv.vers);
834
835 server = rpcrouter_lookup_server(msg->srv.prog, msg->srv.vers);
836
837 if (!server) {
838 server = rpcrouter_create_server(
839 msg->srv.pid, msg->srv.cid,
840 msg->srv.prog, msg->srv.vers);
841 if (!server)
842 return -ENOMEM;
843 /*
844 * XXX: Verify that its okay to add the
845 * client to our remote client list
846 * if we get a NEW_SERVER notification
847 */
848 if (!rpcrouter_lookup_remote_endpoint(msg->srv.pid,
849 msg->srv.cid)) {
850 rc = rpcrouter_create_remote_endpoint(
851 msg->srv.pid, msg->srv.cid);
852 if (rc < 0)
853 printk(KERN_ERR
854 "rpcrouter:Client create"
855 "error (%d)\n", rc);
856 }
857 rpcrouter_register_board_dev(server);
858 schedule_work(&work_create_pdevs);
859 wake_up(&newserver_wait);
860 } else {
861 if ((server->pid == msg->srv.pid) &&
862 (server->cid == msg->srv.cid)) {
863 handle_server_restart(server,
864 msg->srv.pid,
865 msg->srv.cid,
866 msg->srv.prog,
867 msg->srv.vers);
868 } else {
869 server->pid = msg->srv.pid;
870 server->cid = msg->srv.cid;
871 }
872 }
873 break;
874
875 case RPCROUTER_CTRL_CMD_REMOVE_SERVER:
876 RR("o REMOVE_SERVER prog=%08x:%d\n",
877 msg->srv.prog, msg->srv.vers);
878 server = rpcrouter_lookup_server(msg->srv.prog, msg->srv.vers);
879 if (server)
880 rpcrouter_destroy_server(server);
881 break;
882
883 case RPCROUTER_CTRL_CMD_REMOVE_CLIENT:
884 RR("o REMOVE_CLIENT id=%d:%08x\n", msg->cli.pid, msg->cli.cid);
885 if (msg->cli.pid == RPCROUTER_PID_LOCAL) {
886 printk(KERN_ERR
887 "rpcrouter: Denying remote removal of "
888 "local client\n");
889 break;
890 }
891 r_ept = rpcrouter_lookup_remote_endpoint(msg->cli.pid,
892 msg->cli.cid);
893 if (r_ept) {
894 spin_lock_irqsave(&remote_endpoints_lock, flags);
895 list_del(&r_ept->list);
896 spin_unlock_irqrestore(&remote_endpoints_lock, flags);
897 kfree(r_ept);
898 }
899
900 /* Notify local clients of this event */
901 printk(KERN_ERR "rpcrouter: LOCAL NOTIFICATION NOT IMP\n");
902 rc = -ENOSYS;
903
904 break;
905 case RPCROUTER_CTRL_CMD_PING:
906 /* No action needed for ping messages received */
907 RR("o PING\n");
908 break;
909 default:
910 RR("o UNKNOWN(%08x)\n", msg->cmd);
911 rc = -ENOSYS;
912 }
913
914 return rc;
915}
916
917static void do_create_rpcrouter_pdev(struct work_struct *work)
918{
919 D("%s: modem rpc router up\n", __func__);
920 platform_device_register(&rpcrouter_pdev);
921 complete_all(&rpc_remote_router_up);
922}
923
924static void do_create_pdevs(struct work_struct *work)
925{
926 unsigned long flags;
927 struct rr_server *server;
928
929 /* TODO: race if destroyed while being registered */
930 spin_lock_irqsave(&server_list_lock, flags);
931 list_for_each_entry(server, &server_list, list) {
932 if (server->pid != RPCROUTER_PID_LOCAL) {
933 if (server->pdev_name[0] == 0) {
934 sprintf(server->pdev_name, "rs%.8x",
935 server->prog);
936 spin_unlock_irqrestore(&server_list_lock,
937 flags);
938 msm_rpcrouter_create_server_pdev(server);
939 schedule_work(&work_create_pdevs);
940 return;
941 }
942 }
943 }
944 spin_unlock_irqrestore(&server_list_lock, flags);
945}
946
947static void *rr_malloc(unsigned sz)
948{
949 void *ptr = kmalloc(sz, GFP_KERNEL);
950 if (ptr)
951 return ptr;
952
953 printk(KERN_ERR "rpcrouter: kmalloc of %d failed, retrying...\n", sz);
954 do {
955 ptr = kmalloc(sz, GFP_KERNEL);
956 } while (!ptr);
957
958 return ptr;
959}
960
961static int rr_read(struct rpcrouter_xprt_info *xprt_info,
962 void *data, uint32_t len)
963{
964 int rc;
965 unsigned long flags;
966
967 while (!xprt_info->abort_data_read) {
968 spin_lock_irqsave(&xprt_info->lock, flags);
969 if (xprt_info->xprt->read_avail() >= len) {
970 rc = xprt_info->xprt->read(data, len);
971 spin_unlock_irqrestore(&xprt_info->lock, flags);
972 if (rc == len && !xprt_info->abort_data_read)
973 return 0;
974 else
975 return -EIO;
976 }
977 xprt_info->need_len = len;
978 wake_unlock(&xprt_info->wakelock);
979 spin_unlock_irqrestore(&xprt_info->lock, flags);
980
981 wait_event(xprt_info->read_wait,
982 xprt_info->xprt->read_avail() >= len
983 || xprt_info->abort_data_read);
984 }
985 return -EIO;
986}
987
988#if defined(CONFIG_MSM_ONCRPCROUTER_DEBUG)
989static char *type_to_str(int i)
990{
991 switch (i) {
992 case RPCROUTER_CTRL_CMD_DATA:
993 return "data ";
994 case RPCROUTER_CTRL_CMD_HELLO:
995 return "hello ";
996 case RPCROUTER_CTRL_CMD_BYE:
997 return "bye ";
998 case RPCROUTER_CTRL_CMD_NEW_SERVER:
999 return "new_srvr";
1000 case RPCROUTER_CTRL_CMD_REMOVE_SERVER:
1001 return "rmv_srvr";
1002 case RPCROUTER_CTRL_CMD_REMOVE_CLIENT:
1003 return "rmv_clnt";
1004 case RPCROUTER_CTRL_CMD_RESUME_TX:
1005 return "resum_tx";
1006 case RPCROUTER_CTRL_CMD_EXIT:
1007 return "cmd_exit";
1008 default:
1009 return "invalid";
1010 }
1011}
1012#endif
1013
1014static void do_read_data(struct work_struct *work)
1015{
1016 struct rr_header hdr;
1017 struct rr_packet *pkt;
1018 struct rr_fragment *frag;
1019 struct msm_rpc_endpoint *ept;
1020#if defined(CONFIG_MSM_ONCRPCROUTER_DEBUG)
1021 struct rpc_request_hdr *rq;
1022#endif
1023 uint32_t pm, mid;
1024 unsigned long flags;
1025
1026 struct rpcrouter_xprt_info *xprt_info =
1027 container_of(work,
1028 struct rpcrouter_xprt_info,
1029 read_data);
1030
1031 if (rr_read(xprt_info, &hdr, sizeof(hdr)))
1032 goto fail_io;
1033
1034 RR("- ver=%d type=%d src=%d:%08x crx=%d siz=%d dst=%d:%08x\n",
1035 hdr.version, hdr.type, hdr.src_pid, hdr.src_cid,
1036 hdr.confirm_rx, hdr.size, hdr.dst_pid, hdr.dst_cid);
1037 RAW_HDR("[r rr_h] "
1038 "ver=%i,type=%s,src_pid=%08x,src_cid=%08x,"
1039 "confirm_rx=%i,size=%3i,dst_pid=%08x,dst_cid=%08x\n",
1040 hdr.version, type_to_str(hdr.type), hdr.src_pid, hdr.src_cid,
1041 hdr.confirm_rx, hdr.size, hdr.dst_pid, hdr.dst_cid);
1042
1043 if (hdr.version != RPCROUTER_VERSION) {
1044 DIAG("version %d != %d\n", hdr.version, RPCROUTER_VERSION);
1045 goto fail_data;
1046 }
1047 if (hdr.size > RPCROUTER_MSGSIZE_MAX) {
1048 DIAG("msg size %d > max %d\n", hdr.size, RPCROUTER_MSGSIZE_MAX);
1049 goto fail_data;
1050 }
1051
1052 if (hdr.dst_cid == RPCROUTER_ROUTER_ADDRESS) {
1053 if (xprt_info->remote_pid == -1) {
1054 xprt_info->remote_pid = hdr.src_pid;
1055
1056 /* do restart notification */
1057 modem_reset_startup(xprt_info);
1058 }
1059
1060 if (rr_read(xprt_info, xprt_info->r2r_buf, hdr.size))
1061 goto fail_io;
1062 process_control_msg(xprt_info,
1063 (void *) xprt_info->r2r_buf, hdr.size);
1064 goto done;
1065 }
1066
1067 if (hdr.size < sizeof(pm)) {
1068 DIAG("runt packet (no pacmark)\n");
1069 goto fail_data;
1070 }
1071 if (rr_read(xprt_info, &pm, sizeof(pm)))
1072 goto fail_io;
1073
1074 hdr.size -= sizeof(pm);
1075
1076 frag = rr_malloc(sizeof(*frag));
1077 frag->next = NULL;
1078 frag->length = hdr.size;
1079 if (rr_read(xprt_info, frag->data, hdr.size)) {
1080 kfree(frag);
1081 goto fail_io;
1082 }
1083
1084#if defined(CONFIG_MSM_ONCRPCROUTER_DEBUG)
1085 if ((smd_rpcrouter_debug_mask & RAW_PMR) &&
1086 ((pm >> 30 & 0x1) || (pm >> 31 & 0x1))) {
1087 uint32_t xid = 0;
1088 if (pm >> 30 & 0x1) {
1089 rq = (struct rpc_request_hdr *) frag->data;
1090 xid = ntohl(rq->xid);
1091 }
1092 if ((pm >> 31 & 0x1) || (pm >> 30 & 0x1))
1093 RAW_PMR_NOMASK("xid:0x%03x first=%i,last=%i,mid=%3i,"
1094 "len=%3i,dst_cid=%08x\n",
1095 xid,
1096 pm >> 30 & 0x1,
1097 pm >> 31 & 0x1,
1098 pm >> 16 & 0xFF,
1099 pm & 0xFFFF, hdr.dst_cid);
1100 }
1101
1102 if (smd_rpcrouter_debug_mask & SMEM_LOG) {
1103 rq = (struct rpc_request_hdr *) frag->data;
1104 if (rq->xid == 0)
1105 smem_log_event(SMEM_LOG_PROC_ID_APPS |
1106 RPC_ROUTER_LOG_EVENT_MID_READ,
1107 PACMARK_MID(pm),
1108 hdr.dst_cid,
1109 hdr.src_cid);
1110 else
1111 smem_log_event(SMEM_LOG_PROC_ID_APPS |
1112 RPC_ROUTER_LOG_EVENT_MSG_READ,
1113 ntohl(rq->xid),
1114 hdr.dst_cid,
1115 hdr.src_cid);
1116 }
1117#endif
1118
Karthikeyan Ramasubramanian768567a2011-08-11 11:53:21 -06001119 spin_lock_irqsave(&local_endpoints_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001120 ept = rpcrouter_lookup_local_endpoint(hdr.dst_cid);
1121 if (!ept) {
Karthikeyan Ramasubramanian768567a2011-08-11 11:53:21 -06001122 spin_unlock_irqrestore(&local_endpoints_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001123 DIAG("no local ept for cid %08x\n", hdr.dst_cid);
1124 kfree(frag);
1125 goto done;
1126 }
1127
1128 /* See if there is already a partial packet that matches our mid
1129 * and if so, append this fragment to that packet.
1130 */
1131 mid = PACMARK_MID(pm);
Karthikeyan Ramasubramanian768567a2011-08-11 11:53:21 -06001132 spin_lock(&ept->incomplete_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001133 list_for_each_entry(pkt, &ept->incomplete, list) {
1134 if (pkt->mid == mid) {
1135 pkt->last->next = frag;
1136 pkt->last = frag;
1137 pkt->length += frag->length;
1138 if (PACMARK_LAST(pm)) {
1139 list_del(&pkt->list);
Karthikeyan Ramasubramanian768567a2011-08-11 11:53:21 -06001140 spin_unlock(&ept->incomplete_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001141 goto packet_complete;
1142 }
Karthikeyan Ramasubramanian768567a2011-08-11 11:53:21 -06001143 spin_unlock(&ept->incomplete_lock);
1144 spin_unlock_irqrestore(&local_endpoints_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001145 goto done;
1146 }
1147 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001148 /* This mid is new -- create a packet for it, and put it on
1149 * the incomplete list if this fragment is not a last fragment,
1150 * otherwise put it on the read queue.
1151 */
1152 pkt = rr_malloc(sizeof(struct rr_packet));
1153 pkt->first = frag;
1154 pkt->last = frag;
1155 memcpy(&pkt->hdr, &hdr, sizeof(hdr));
1156 pkt->mid = mid;
1157 pkt->length = frag->length;
1158 if (!PACMARK_LAST(pm)) {
1159 list_add_tail(&pkt->list, &ept->incomplete);
Karthikeyan Ramasubramanian768567a2011-08-11 11:53:21 -06001160 spin_unlock(&ept->incomplete_lock);
1161 spin_unlock_irqrestore(&local_endpoints_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001162 goto done;
1163 }
Karthikeyan Ramasubramanian768567a2011-08-11 11:53:21 -06001164 spin_unlock(&ept->incomplete_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001165
1166packet_complete:
Karthikeyan Ramasubramanian768567a2011-08-11 11:53:21 -06001167 spin_lock(&ept->read_q_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001168 D("%s: take read lock on ept %p\n", __func__, ept);
1169 wake_lock(&ept->read_q_wake_lock);
1170 list_add_tail(&pkt->list, &ept->read_q);
1171 wake_up(&ept->wait_q);
Karthikeyan Ramasubramanian768567a2011-08-11 11:53:21 -06001172 spin_unlock(&ept->read_q_lock);
1173 spin_unlock_irqrestore(&local_endpoints_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001174done:
1175
1176 if (hdr.confirm_rx) {
1177 union rr_control_msg msg;
1178
1179 msg.cmd = RPCROUTER_CTRL_CMD_RESUME_TX;
1180 msg.cli.pid = hdr.dst_pid;
1181 msg.cli.cid = hdr.dst_cid;
1182
1183 RR("x RESUME_TX id=%d:%08x\n", msg.cli.pid, msg.cli.cid);
1184 rpcrouter_send_control_msg(xprt_info, &msg);
1185
1186#if defined(CONFIG_MSM_ONCRPCROUTER_DEBUG)
1187 if (smd_rpcrouter_debug_mask & SMEM_LOG)
1188 smem_log_event(SMEM_LOG_PROC_ID_APPS |
1189 RPC_ROUTER_LOG_EVENT_MSG_CFM_SNT,
1190 RPCROUTER_PID_LOCAL,
1191 hdr.dst_cid,
1192 hdr.src_cid);
1193#endif
1194
1195 }
1196
1197 /* don't requeue if we should be shutting down */
1198 if (!xprt_info->abort_data_read) {
1199 queue_work(xprt_info->workqueue, &xprt_info->read_data);
1200 return;
1201 }
1202
1203 D("rpc_router terminating for '%s'\n",
1204 xprt_info->xprt->name);
1205
1206fail_io:
1207fail_data:
1208 D(KERN_ERR "rpc_router has died for '%s'\n",
1209 xprt_info->xprt->name);
1210}
1211
1212void msm_rpc_setup_req(struct rpc_request_hdr *hdr, uint32_t prog,
1213 uint32_t vers, uint32_t proc)
1214{
1215 memset(hdr, 0, sizeof(struct rpc_request_hdr));
1216 hdr->xid = cpu_to_be32(atomic_add_return(1, &next_xid));
1217 hdr->rpc_vers = cpu_to_be32(2);
1218 hdr->prog = cpu_to_be32(prog);
1219 hdr->vers = cpu_to_be32(vers);
1220 hdr->procedure = cpu_to_be32(proc);
1221}
1222EXPORT_SYMBOL(msm_rpc_setup_req);
1223
1224struct msm_rpc_endpoint *msm_rpc_open(void)
1225{
1226 struct msm_rpc_endpoint *ept;
1227
1228 ept = msm_rpcrouter_create_local_endpoint(MKDEV(0, 0));
1229 if (ept == NULL)
1230 return ERR_PTR(-ENOMEM);
1231
1232 return ept;
1233}
1234
1235void msm_rpc_read_wakeup(struct msm_rpc_endpoint *ept)
1236{
1237 ept->forced_wakeup = 1;
1238 wake_up(&ept->wait_q);
1239}
1240
1241int msm_rpc_close(struct msm_rpc_endpoint *ept)
1242{
1243 if (!ept)
1244 return -EINVAL;
1245 return msm_rpcrouter_destroy_local_endpoint(ept);
1246}
1247EXPORT_SYMBOL(msm_rpc_close);
1248
1249static int msm_rpc_write_pkt(
1250 struct rr_header *hdr,
1251 struct msm_rpc_endpoint *ept,
1252 struct rr_remote_endpoint *r_ept,
1253 void *buffer,
1254 int count,
1255 int first,
1256 int last,
1257 uint32_t mid
1258 )
1259{
1260#if defined(CONFIG_MSM_ONCRPCROUTER_DEBUG)
1261 struct rpc_request_hdr *rq = buffer;
1262 uint32_t event_id;
1263#endif
1264 uint32_t pacmark;
1265 unsigned long flags = 0;
1266 int rc;
1267 struct rpcrouter_xprt_info *xprt_info;
1268 int needed;
1269
1270 DEFINE_WAIT(__wait);
1271
1272 /* Create routing header */
1273 hdr->type = RPCROUTER_CTRL_CMD_DATA;
1274 hdr->version = RPCROUTER_VERSION;
1275 hdr->src_pid = ept->pid;
1276 hdr->src_cid = ept->cid;
1277 hdr->confirm_rx = 0;
1278 hdr->size = count + sizeof(uint32_t);
1279
1280 rc = wait_for_restart_and_notify(ept);
1281 if (rc)
1282 return rc;
1283
1284 if (r_ept) {
1285 for (;;) {
1286 prepare_to_wait(&r_ept->quota_wait, &__wait,
1287 TASK_INTERRUPTIBLE);
1288 spin_lock_irqsave(&r_ept->quota_lock, flags);
1289 if ((r_ept->tx_quota_cntr <
1290 RPCROUTER_DEFAULT_RX_QUOTA) ||
1291 (r_ept->quota_restart_state != RESTART_NORMAL))
1292 break;
1293 if (signal_pending(current) &&
1294 (!(ept->flags & MSM_RPC_UNINTERRUPTIBLE)))
1295 break;
1296 spin_unlock_irqrestore(&r_ept->quota_lock, flags);
1297 schedule();
1298 }
1299 finish_wait(&r_ept->quota_wait, &__wait);
1300
1301 if (r_ept->quota_restart_state != RESTART_NORMAL) {
1302 spin_lock(&ept->restart_lock);
1303 ept->restart_state &= ~RESTART_PEND_NTFY;
1304 spin_unlock(&ept->restart_lock);
1305 spin_unlock_irqrestore(&r_ept->quota_lock, flags);
1306 return -ENETRESET;
1307 }
1308
1309 if (signal_pending(current) &&
1310 (!(ept->flags & MSM_RPC_UNINTERRUPTIBLE))) {
1311 spin_unlock_irqrestore(&r_ept->quota_lock, flags);
1312 return -ERESTARTSYS;
1313 }
1314 r_ept->tx_quota_cntr++;
1315 if (r_ept->tx_quota_cntr == RPCROUTER_DEFAULT_RX_QUOTA) {
1316 hdr->confirm_rx = 1;
1317
1318#if defined(CONFIG_MSM_ONCRPCROUTER_DEBUG)
1319 if (smd_rpcrouter_debug_mask & SMEM_LOG) {
1320 event_id = (rq->xid == 0) ?
1321 RPC_ROUTER_LOG_EVENT_MID_CFM_REQ :
1322 RPC_ROUTER_LOG_EVENT_MSG_CFM_REQ;
1323
1324 smem_log_event(SMEM_LOG_PROC_ID_APPS | event_id,
1325 hdr->dst_pid,
1326 hdr->dst_cid,
1327 hdr->src_cid);
1328 }
1329#endif
1330
1331 }
1332 }
1333 pacmark = PACMARK(count, mid, first, last);
1334
1335 if (r_ept)
1336 spin_unlock_irqrestore(&r_ept->quota_lock, flags);
1337
1338 mutex_lock(&xprt_info_list_lock);
1339 xprt_info = rpcrouter_get_xprt_info(hdr->dst_pid);
1340 if (!xprt_info) {
1341 mutex_unlock(&xprt_info_list_lock);
1342 return -ENETRESET;
1343 }
1344 spin_lock_irqsave(&xprt_info->lock, flags);
1345 mutex_unlock(&xprt_info_list_lock);
1346 spin_lock(&ept->restart_lock);
1347 if (ept->restart_state != RESTART_NORMAL) {
1348 ept->restart_state &= ~RESTART_PEND_NTFY;
1349 spin_unlock(&ept->restart_lock);
1350 spin_unlock_irqrestore(&xprt_info->lock, flags);
1351 return -ENETRESET;
1352 }
1353
1354 needed = sizeof(*hdr) + hdr->size;
1355 while ((ept->restart_state == RESTART_NORMAL) &&
1356 (xprt_info->xprt->write_avail() < needed)) {
1357 spin_unlock(&ept->restart_lock);
1358 spin_unlock_irqrestore(&xprt_info->lock, flags);
1359 msleep(250);
1360
1361 /* refresh xprt pointer to ensure that it hasn't
1362 * been deleted since our last retrieval */
1363 mutex_lock(&xprt_info_list_lock);
1364 xprt_info = rpcrouter_get_xprt_info(hdr->dst_pid);
1365 if (!xprt_info) {
1366 mutex_unlock(&xprt_info_list_lock);
1367 return -ENETRESET;
1368 }
1369 spin_lock_irqsave(&xprt_info->lock, flags);
1370 mutex_unlock(&xprt_info_list_lock);
1371 spin_lock(&ept->restart_lock);
1372 }
1373 if (ept->restart_state != RESTART_NORMAL) {
1374 ept->restart_state &= ~RESTART_PEND_NTFY;
1375 spin_unlock(&ept->restart_lock);
1376 spin_unlock_irqrestore(&xprt_info->lock, flags);
1377 return -ENETRESET;
1378 }
1379
1380 /* TODO: deal with full fifo */
1381 xprt_info->xprt->write(hdr, sizeof(*hdr), HEADER);
1382 RAW_HDR("[w rr_h] "
1383 "ver=%i,type=%s,src_pid=%08x,src_cid=%08x,"
1384 "confirm_rx=%i,size=%3i,dst_pid=%08x,dst_cid=%08x\n",
1385 hdr->version, type_to_str(hdr->type),
1386 hdr->src_pid, hdr->src_cid,
1387 hdr->confirm_rx, hdr->size, hdr->dst_pid, hdr->dst_cid);
1388 xprt_info->xprt->write(&pacmark, sizeof(pacmark), PACKMARK);
1389
1390#if defined(CONFIG_MSM_ONCRPCROUTER_DEBUG)
1391 if ((smd_rpcrouter_debug_mask & RAW_PMW) &&
1392 ((pacmark >> 30 & 0x1) || (pacmark >> 31 & 0x1))) {
1393 uint32_t xid = 0;
1394 if (pacmark >> 30 & 0x1)
1395 xid = ntohl(rq->xid);
1396 if ((pacmark >> 31 & 0x1) || (pacmark >> 30 & 0x1))
1397 RAW_PMW_NOMASK("xid:0x%03x first=%i,last=%i,mid=%3i,"
1398 "len=%3i,src_cid=%x\n",
1399 xid,
1400 pacmark >> 30 & 0x1,
1401 pacmark >> 31 & 0x1,
1402 pacmark >> 16 & 0xFF,
1403 pacmark & 0xFFFF, hdr->src_cid);
1404 }
1405#endif
1406
1407 xprt_info->xprt->write(buffer, count, PAYLOAD);
1408 spin_unlock(&ept->restart_lock);
1409 spin_unlock_irqrestore(&xprt_info->lock, flags);
1410
1411#if defined(CONFIG_MSM_ONCRPCROUTER_DEBUG)
1412 if (smd_rpcrouter_debug_mask & SMEM_LOG) {
1413 if (rq->xid == 0)
1414 smem_log_event(SMEM_LOG_PROC_ID_APPS |
1415 RPC_ROUTER_LOG_EVENT_MID_WRITTEN,
1416 PACMARK_MID(pacmark),
1417 hdr->dst_cid,
1418 hdr->src_cid);
1419 else
1420 smem_log_event(SMEM_LOG_PROC_ID_APPS |
1421 RPC_ROUTER_LOG_EVENT_MSG_WRITTEN,
1422 ntohl(rq->xid),
1423 hdr->dst_cid,
1424 hdr->src_cid);
1425 }
1426#endif
1427
1428 return needed;
1429}
1430
1431static struct msm_rpc_reply *get_pend_reply(struct msm_rpc_endpoint *ept,
1432 uint32_t xid)
1433{
1434 unsigned long flags;
1435 struct msm_rpc_reply *reply;
1436 spin_lock_irqsave(&ept->reply_q_lock, flags);
1437 list_for_each_entry(reply, &ept->reply_pend_q, list) {
1438 if (reply->xid == xid) {
1439 list_del(&reply->list);
1440 spin_unlock_irqrestore(&ept->reply_q_lock, flags);
1441 return reply;
1442 }
1443 }
1444 spin_unlock_irqrestore(&ept->reply_q_lock, flags);
1445 return NULL;
1446}
1447
1448void get_requesting_client(struct msm_rpc_endpoint *ept, uint32_t xid,
1449 struct msm_rpc_client_info *clnt_info)
1450{
1451 unsigned long flags;
1452 struct msm_rpc_reply *reply;
1453
1454 if (!clnt_info)
1455 return;
1456
1457 spin_lock_irqsave(&ept->reply_q_lock, flags);
1458 list_for_each_entry(reply, &ept->reply_pend_q, list) {
1459 if (reply->xid == xid) {
1460 clnt_info->pid = reply->pid;
1461 clnt_info->cid = reply->cid;
1462 clnt_info->prog = reply->prog;
1463 clnt_info->vers = reply->vers;
1464 spin_unlock_irqrestore(&ept->reply_q_lock, flags);
1465 return;
1466 }
1467 }
1468 spin_unlock_irqrestore(&ept->reply_q_lock, flags);
1469 return;
1470}
1471
1472static void set_avail_reply(struct msm_rpc_endpoint *ept,
1473 struct msm_rpc_reply *reply)
1474{
1475 unsigned long flags;
1476 spin_lock_irqsave(&ept->reply_q_lock, flags);
1477 list_add_tail(&reply->list, &ept->reply_avail_q);
1478 spin_unlock_irqrestore(&ept->reply_q_lock, flags);
1479}
1480
1481static struct msm_rpc_reply *get_avail_reply(struct msm_rpc_endpoint *ept)
1482{
1483 struct msm_rpc_reply *reply;
1484 unsigned long flags;
1485 if (list_empty(&ept->reply_avail_q)) {
1486 if (ept->reply_cnt >= RPCROUTER_PEND_REPLIES_MAX) {
1487 printk(KERN_ERR
1488 "exceeding max replies of %d \n",
1489 RPCROUTER_PEND_REPLIES_MAX);
1490 return 0;
1491 }
1492 reply = kmalloc(sizeof(struct msm_rpc_reply), GFP_KERNEL);
1493 if (!reply)
1494 return 0;
1495 D("Adding reply 0x%08x \n", (unsigned int)reply);
1496 memset(reply, 0, sizeof(struct msm_rpc_reply));
1497 spin_lock_irqsave(&ept->reply_q_lock, flags);
1498 ept->reply_cnt++;
1499 spin_unlock_irqrestore(&ept->reply_q_lock, flags);
1500 } else {
1501 spin_lock_irqsave(&ept->reply_q_lock, flags);
1502 reply = list_first_entry(&ept->reply_avail_q,
1503 struct msm_rpc_reply,
1504 list);
1505 list_del(&reply->list);
1506 spin_unlock_irqrestore(&ept->reply_q_lock, flags);
1507 }
1508 return reply;
1509}
1510
1511static void set_pend_reply(struct msm_rpc_endpoint *ept,
1512 struct msm_rpc_reply *reply)
1513{
1514 unsigned long flags;
1515 spin_lock_irqsave(&ept->reply_q_lock, flags);
1516 D("%s: take reply lock on ept %p\n", __func__, ept);
1517 wake_lock(&ept->reply_q_wake_lock);
1518 list_add_tail(&reply->list, &ept->reply_pend_q);
1519 spin_unlock_irqrestore(&ept->reply_q_lock, flags);
1520}
1521
1522int msm_rpc_write(struct msm_rpc_endpoint *ept, void *buffer, int count)
1523{
1524 struct rr_header hdr;
1525 struct rpc_request_hdr *rq = buffer;
1526 struct rr_remote_endpoint *r_ept;
1527 struct msm_rpc_reply *reply = NULL;
1528 int max_tx;
1529 int tx_cnt;
1530 char *tx_buf;
1531 int rc;
1532 int first_pkt = 1;
1533 uint32_t mid;
1534 unsigned long flags;
1535
1536 /* snoop the RPC packet and enforce permissions */
1537
1538 /* has to have at least the xid and type fields */
1539 if (count < (sizeof(uint32_t) * 2)) {
1540 printk(KERN_ERR "rr_write: rejecting runt packet\n");
1541 return -EINVAL;
1542 }
1543
1544 if (rq->type == 0) {
1545 /* RPC CALL */
1546 if (count < (sizeof(uint32_t) * 6)) {
1547 printk(KERN_ERR
1548 "rr_write: rejecting runt call packet\n");
1549 return -EINVAL;
1550 }
1551 if (ept->dst_pid == 0xffffffff) {
1552 printk(KERN_ERR "rr_write: not connected\n");
1553 return -ENOTCONN;
1554 }
1555 if ((ept->dst_prog != rq->prog) ||
1556 ((be32_to_cpu(ept->dst_vers) & 0x0fff0000) !=
1557 (be32_to_cpu(rq->vers) & 0x0fff0000))) {
1558 printk(KERN_ERR
1559 "rr_write: cannot write to %08x:%08x "
1560 "(bound to %08x:%08x)\n",
1561 be32_to_cpu(rq->prog), be32_to_cpu(rq->vers),
1562 be32_to_cpu(ept->dst_prog),
1563 be32_to_cpu(ept->dst_vers));
1564 return -EINVAL;
1565 }
1566 hdr.dst_pid = ept->dst_pid;
1567 hdr.dst_cid = ept->dst_cid;
1568 IO("CALL to %08x:%d @ %d:%08x (%d bytes)\n",
1569 be32_to_cpu(rq->prog), be32_to_cpu(rq->vers),
1570 ept->dst_pid, ept->dst_cid, count);
1571 } else {
1572 /* RPC REPLY */
1573 reply = get_pend_reply(ept, rq->xid);
1574 if (!reply) {
1575 printk(KERN_ERR
1576 "rr_write: rejecting, reply not found \n");
1577 return -EINVAL;
1578 }
1579 hdr.dst_pid = reply->pid;
1580 hdr.dst_cid = reply->cid;
1581 IO("REPLY to xid=%d @ %d:%08x (%d bytes)\n",
1582 be32_to_cpu(rq->xid), hdr.dst_pid, hdr.dst_cid, count);
1583 }
1584
1585 r_ept = rpcrouter_lookup_remote_endpoint(hdr.dst_pid, hdr.dst_cid);
1586
1587 if ((!r_ept) && (hdr.dst_pid != RPCROUTER_PID_LOCAL)) {
1588 printk(KERN_ERR
1589 "msm_rpc_write(): No route to ept "
1590 "[PID %x CID %x]\n", hdr.dst_pid, hdr.dst_cid);
1591 count = -EHOSTUNREACH;
1592 goto write_release_lock;
1593 }
1594
1595 tx_cnt = count;
1596 tx_buf = buffer;
1597 mid = atomic_add_return(1, &pm_mid) & 0xFF;
1598 /* The modem's router can only take 500 bytes of data. The
1599 first 8 bytes it uses on the modem side for addressing,
1600 the next 4 bytes are for the pacmark header. */
1601 max_tx = RPCROUTER_MSGSIZE_MAX - 8 - sizeof(uint32_t);
1602 IO("Writing %d bytes, max pkt size is %d\n",
1603 tx_cnt, max_tx);
1604 while (tx_cnt > 0) {
1605 if (tx_cnt > max_tx) {
1606 rc = msm_rpc_write_pkt(&hdr, ept, r_ept,
1607 tx_buf, max_tx,
1608 first_pkt, 0, mid);
1609 if (rc < 0) {
1610 count = rc;
1611 goto write_release_lock;
1612 }
1613 IO("Wrote %d bytes First %d, Last 0 mid %d\n",
1614 rc, first_pkt, mid);
1615 tx_cnt -= max_tx;
1616 tx_buf += max_tx;
1617 } else {
1618 rc = msm_rpc_write_pkt(&hdr, ept, r_ept,
1619 tx_buf, tx_cnt,
1620 first_pkt, 1, mid);
1621 if (rc < 0) {
1622 count = rc;
1623 goto write_release_lock;
1624 }
1625 IO("Wrote %d bytes First %d Last 1 mid %d\n",
1626 rc, first_pkt, mid);
1627 break;
1628 }
1629 first_pkt = 0;
1630 }
1631
1632 write_release_lock:
1633 /* if reply, release wakelock after writing to the transport */
1634 if (rq->type != 0) {
1635 /* Upon failure, add reply tag to the pending list.
1636 ** Else add reply tag to the avail/free list. */
1637 if (count < 0)
1638 set_pend_reply(ept, reply);
1639 else
1640 set_avail_reply(ept, reply);
1641
1642 spin_lock_irqsave(&ept->reply_q_lock, flags);
1643 if (list_empty(&ept->reply_pend_q)) {
1644 D("%s: release reply lock on ept %p\n", __func__, ept);
1645 wake_unlock(&ept->reply_q_wake_lock);
1646 }
1647 spin_unlock_irqrestore(&ept->reply_q_lock, flags);
1648 }
1649
1650 return count;
1651}
1652EXPORT_SYMBOL(msm_rpc_write);
1653
1654/*
1655 * NOTE: It is the responsibility of the caller to kfree buffer
1656 */
1657int msm_rpc_read(struct msm_rpc_endpoint *ept, void **buffer,
1658 unsigned user_len, long timeout)
1659{
1660 struct rr_fragment *frag, *next;
1661 char *buf;
1662 int rc;
1663
1664 rc = __msm_rpc_read(ept, &frag, user_len, timeout);
1665 if (rc <= 0)
1666 return rc;
1667
1668 /* single-fragment messages conveniently can be
1669 * returned as-is (the buffer is at the front)
1670 */
1671 if (frag->next == 0) {
1672 *buffer = (void*) frag;
1673 return rc;
1674 }
1675
1676 /* multi-fragment messages, we have to do it the
1677 * hard way, which is rather disgusting right now
1678 */
1679 buf = rr_malloc(rc);
1680 *buffer = buf;
1681
1682 while (frag != NULL) {
1683 memcpy(buf, frag->data, frag->length);
1684 next = frag->next;
1685 buf += frag->length;
1686 kfree(frag);
1687 frag = next;
1688 }
1689
1690 return rc;
1691}
1692EXPORT_SYMBOL(msm_rpc_read);
1693
1694int msm_rpc_call(struct msm_rpc_endpoint *ept, uint32_t proc,
1695 void *_request, int request_size,
1696 long timeout)
1697{
1698 return msm_rpc_call_reply(ept, proc,
1699 _request, request_size,
1700 NULL, 0, timeout);
1701}
1702EXPORT_SYMBOL(msm_rpc_call);
1703
1704int msm_rpc_call_reply(struct msm_rpc_endpoint *ept, uint32_t proc,
1705 void *_request, int request_size,
1706 void *_reply, int reply_size,
1707 long timeout)
1708{
1709 struct rpc_request_hdr *req = _request;
1710 struct rpc_reply_hdr *reply;
1711 int rc;
1712
1713 if (request_size < sizeof(*req))
1714 return -ETOOSMALL;
1715
1716 if (ept->dst_pid == 0xffffffff)
1717 return -ENOTCONN;
1718
1719 memset(req, 0, sizeof(*req));
1720 req->xid = cpu_to_be32(atomic_add_return(1, &next_xid));
1721 req->rpc_vers = cpu_to_be32(2);
1722 req->prog = ept->dst_prog;
1723 req->vers = ept->dst_vers;
1724 req->procedure = cpu_to_be32(proc);
1725
1726 rc = msm_rpc_write(ept, req, request_size);
1727 if (rc < 0)
1728 return rc;
1729
1730 for (;;) {
1731 rc = msm_rpc_read(ept, (void*) &reply, -1, timeout);
1732 if (rc < 0)
1733 return rc;
1734 if (rc < (3 * sizeof(uint32_t))) {
1735 rc = -EIO;
1736 break;
1737 }
1738 /* we should not get CALL packets -- ignore them */
1739 if (reply->type == 0) {
1740 kfree(reply);
1741 continue;
1742 }
1743 /* If an earlier call timed out, we could get the (no
1744 * longer wanted) reply for it. Ignore replies that
1745 * we don't expect
1746 */
1747 if (reply->xid != req->xid) {
1748 kfree(reply);
1749 continue;
1750 }
1751 if (reply->reply_stat != 0) {
1752 rc = -EPERM;
1753 break;
1754 }
1755 if (reply->data.acc_hdr.accept_stat != 0) {
1756 rc = -EINVAL;
1757 break;
1758 }
1759 if (_reply == NULL) {
1760 rc = 0;
1761 break;
1762 }
1763 if (rc > reply_size) {
1764 rc = -ENOMEM;
1765 } else {
1766 memcpy(_reply, reply, rc);
1767 }
1768 break;
1769 }
1770 kfree(reply);
1771 return rc;
1772}
1773EXPORT_SYMBOL(msm_rpc_call_reply);
1774
1775
1776static inline int ept_packet_available(struct msm_rpc_endpoint *ept)
1777{
1778 unsigned long flags;
1779 int ret;
1780 spin_lock_irqsave(&ept->read_q_lock, flags);
1781 ret = !list_empty(&ept->read_q);
1782 spin_unlock_irqrestore(&ept->read_q_lock, flags);
1783 return ret;
1784}
1785
1786int __msm_rpc_read(struct msm_rpc_endpoint *ept,
1787 struct rr_fragment **frag_ret,
1788 unsigned len, long timeout)
1789{
1790 struct rr_packet *pkt;
1791 struct rpc_request_hdr *rq;
1792 struct msm_rpc_reply *reply;
1793 unsigned long flags;
1794 int rc;
1795
1796 rc = wait_for_restart_and_notify(ept);
1797 if (rc)
1798 return rc;
1799
1800 IO("READ on ept %p\n", ept);
1801 if (ept->flags & MSM_RPC_UNINTERRUPTIBLE) {
1802 if (timeout < 0) {
1803 wait_event(ept->wait_q, (ept_packet_available(ept) ||
1804 ept->forced_wakeup ||
1805 ept->restart_state));
1806 if (!msm_rpc_clear_netreset(ept))
1807 return -ENETRESET;
1808 } else {
1809 rc = wait_event_timeout(
1810 ept->wait_q,
1811 (ept_packet_available(ept) ||
1812 ept->forced_wakeup ||
1813 ept->restart_state),
1814 timeout);
1815 if (!msm_rpc_clear_netreset(ept))
1816 return -ENETRESET;
1817 if (rc == 0)
1818 return -ETIMEDOUT;
1819 }
1820 } else {
1821 if (timeout < 0) {
1822 rc = wait_event_interruptible(
1823 ept->wait_q, (ept_packet_available(ept) ||
1824 ept->forced_wakeup ||
1825 ept->restart_state));
1826 if (!msm_rpc_clear_netreset(ept))
1827 return -ENETRESET;
1828 if (rc < 0)
1829 return rc;
1830 } else {
1831 rc = wait_event_interruptible_timeout(
1832 ept->wait_q,
1833 (ept_packet_available(ept) ||
1834 ept->forced_wakeup ||
1835 ept->restart_state),
1836 timeout);
1837 if (!msm_rpc_clear_netreset(ept))
1838 return -ENETRESET;
1839 if (rc == 0)
1840 return -ETIMEDOUT;
1841 }
1842 }
1843
1844 if (ept->forced_wakeup) {
1845 ept->forced_wakeup = 0;
1846 return 0;
1847 }
1848
1849 spin_lock_irqsave(&ept->read_q_lock, flags);
1850 if (list_empty(&ept->read_q)) {
1851 spin_unlock_irqrestore(&ept->read_q_lock, flags);
1852 return -EAGAIN;
1853 }
1854 pkt = list_first_entry(&ept->read_q, struct rr_packet, list);
1855 if (pkt->length > len) {
1856 spin_unlock_irqrestore(&ept->read_q_lock, flags);
1857 return -ETOOSMALL;
1858 }
1859 list_del(&pkt->list);
1860 spin_unlock_irqrestore(&ept->read_q_lock, flags);
1861
1862 rc = pkt->length;
1863
1864 *frag_ret = pkt->first;
1865 rq = (void*) pkt->first->data;
1866 if ((rc >= (sizeof(uint32_t) * 3)) && (rq->type == 0)) {
1867 /* RPC CALL */
1868 reply = get_avail_reply(ept);
1869 if (!reply) {
1870 rc = -ENOMEM;
1871 goto read_release_lock;
1872 }
1873 reply->cid = pkt->hdr.src_cid;
1874 reply->pid = pkt->hdr.src_pid;
1875 reply->xid = rq->xid;
1876 reply->prog = rq->prog;
1877 reply->vers = rq->vers;
1878 set_pend_reply(ept, reply);
1879 }
1880
1881 kfree(pkt);
1882
1883 IO("READ on ept %p (%d bytes)\n", ept, rc);
1884
1885 read_release_lock:
1886
1887 /* release read wakelock after taking reply wakelock */
1888 spin_lock_irqsave(&ept->read_q_lock, flags);
1889 if (list_empty(&ept->read_q)) {
1890 D("%s: release read lock on ept %p\n", __func__, ept);
1891 wake_unlock(&ept->read_q_wake_lock);
1892 }
1893 spin_unlock_irqrestore(&ept->read_q_lock, flags);
1894
1895 return rc;
1896}
1897
1898int msm_rpc_is_compatible_version(uint32_t server_version,
1899 uint32_t client_version)
1900{
1901
1902 if ((server_version & RPC_VERSION_MODE_MASK) !=
1903 (client_version & RPC_VERSION_MODE_MASK))
1904 return 0;
1905
1906 if (server_version & RPC_VERSION_MODE_MASK)
1907 return server_version == client_version;
1908
1909 return ((server_version & RPC_VERSION_MAJOR_MASK) ==
1910 (client_version & RPC_VERSION_MAJOR_MASK)) &&
1911 ((server_version & RPC_VERSION_MINOR_MASK) >=
1912 (client_version & RPC_VERSION_MINOR_MASK));
1913}
1914EXPORT_SYMBOL(msm_rpc_is_compatible_version);
1915
1916static struct rr_server *msm_rpc_get_server(uint32_t prog, uint32_t vers,
1917 uint32_t accept_compatible,
1918 uint32_t *found_prog)
1919{
1920 struct rr_server *server;
1921 unsigned long flags;
1922
1923 if (found_prog == NULL)
1924 return NULL;
1925
1926 *found_prog = 0;
1927 spin_lock_irqsave(&server_list_lock, flags);
1928 list_for_each_entry(server, &server_list, list) {
1929 if (server->prog == prog) {
1930 *found_prog = 1;
1931 spin_unlock_irqrestore(&server_list_lock, flags);
1932 if (accept_compatible) {
1933 if (msm_rpc_is_compatible_version(server->vers,
1934 vers)) {
1935 return server;
1936 } else {
1937 return NULL;
1938 }
1939 } else if (server->vers == vers) {
1940 return server;
1941 } else
1942 return NULL;
1943 }
1944 }
1945 spin_unlock_irqrestore(&server_list_lock, flags);
1946 return NULL;
1947}
1948
1949static struct msm_rpc_endpoint *__msm_rpc_connect(uint32_t prog, uint32_t vers,
1950 uint32_t accept_compatible,
1951 unsigned flags)
1952{
1953 struct msm_rpc_endpoint *ept;
1954 struct rr_server *server;
1955 uint32_t found_prog;
1956 int rc = 0;
1957
1958 DEFINE_WAIT(__wait);
1959
1960 for (;;) {
1961 prepare_to_wait(&newserver_wait, &__wait,
1962 TASK_INTERRUPTIBLE);
1963
1964 server = msm_rpc_get_server(prog, vers, accept_compatible,
1965 &found_prog);
1966 if (server)
1967 break;
1968
1969 if (found_prog) {
1970 pr_info("%s: server not found %x:%x\n",
1971 __func__, prog, vers);
1972 rc = -EHOSTUNREACH;
1973 break;
1974 }
1975
1976 if (msm_rpc_connect_timeout_ms == 0) {
1977 rc = -EHOSTUNREACH;
1978 break;
1979 }
1980
1981 if (signal_pending(current)) {
1982 rc = -ERESTARTSYS;
1983 break;
1984 }
1985
1986 rc = schedule_timeout(
1987 msecs_to_jiffies(msm_rpc_connect_timeout_ms));
1988 if (!rc) {
1989 rc = -ETIMEDOUT;
1990 break;
1991 }
1992 }
1993 finish_wait(&newserver_wait, &__wait);
1994
1995 if (!server)
1996 return ERR_PTR(rc);
1997
1998 if (accept_compatible && (server->vers != vers)) {
1999 D("RPC Using new version 0x%08x(0x%08x) prog 0x%08x",
2000 vers, server->vers, prog);
2001 D(" ... Continuing\n");
2002 }
2003
2004 ept = msm_rpc_open();
2005 if (IS_ERR(ept))
2006 return ept;
2007
2008 ept->flags = flags;
2009 ept->dst_pid = server->pid;
2010 ept->dst_cid = server->cid;
2011 ept->dst_prog = cpu_to_be32(prog);
2012 ept->dst_vers = cpu_to_be32(server->vers);
2013
2014 return ept;
2015}
2016
2017struct msm_rpc_endpoint *msm_rpc_connect_compatible(uint32_t prog,
2018 uint32_t vers, unsigned flags)
2019{
2020 return __msm_rpc_connect(prog, vers, 1, flags);
2021}
2022EXPORT_SYMBOL(msm_rpc_connect_compatible);
2023
2024struct msm_rpc_endpoint *msm_rpc_connect(uint32_t prog,
2025 uint32_t vers, unsigned flags)
2026{
2027 return __msm_rpc_connect(prog, vers, 0, flags);
2028}
2029EXPORT_SYMBOL(msm_rpc_connect);
2030
2031/* TODO: permission check? */
2032int msm_rpc_register_server(struct msm_rpc_endpoint *ept,
2033 uint32_t prog, uint32_t vers)
2034{
2035 int rc;
2036 union rr_control_msg msg;
2037 struct rr_server *server;
2038 struct rpcrouter_xprt_info *xprt_info;
2039
2040 server = rpcrouter_create_server(ept->pid, ept->cid,
2041 prog, vers);
2042 if (!server)
2043 return -ENODEV;
2044
2045 msg.srv.cmd = RPCROUTER_CTRL_CMD_NEW_SERVER;
2046 msg.srv.pid = ept->pid;
2047 msg.srv.cid = ept->cid;
2048 msg.srv.prog = prog;
2049 msg.srv.vers = vers;
2050
2051 RR("x NEW_SERVER id=%d:%08x prog=%08x:%08x\n",
2052 ept->pid, ept->cid, prog, vers);
2053
2054 mutex_lock(&xprt_info_list_lock);
2055 list_for_each_entry(xprt_info, &xprt_info_list, list) {
2056 rc = rpcrouter_send_control_msg(xprt_info, &msg);
2057 if (rc < 0) {
2058 mutex_unlock(&xprt_info_list_lock);
2059 return rc;
2060 }
2061 }
2062 mutex_unlock(&xprt_info_list_lock);
2063 return 0;
2064}
2065
2066int msm_rpc_clear_netreset(struct msm_rpc_endpoint *ept)
2067{
2068 unsigned long flags;
2069 int rc = 1;
2070 spin_lock_irqsave(&ept->restart_lock, flags);
2071 if (ept->restart_state != RESTART_NORMAL) {
2072 ept->restart_state &= ~RESTART_PEND_NTFY;
2073 rc = 0;
2074 }
2075 spin_unlock_irqrestore(&ept->restart_lock, flags);
2076 return rc;
2077}
2078
2079/* TODO: permission check -- disallow unreg of somebody else's server */
2080int msm_rpc_unregister_server(struct msm_rpc_endpoint *ept,
2081 uint32_t prog, uint32_t vers)
2082{
2083 struct rr_server *server;
2084 server = rpcrouter_lookup_server(prog, vers);
2085
2086 if (!server)
2087 return -ENOENT;
2088 rpcrouter_destroy_server(server);
2089 return 0;
2090}
2091
2092int msm_rpc_get_curr_pkt_size(struct msm_rpc_endpoint *ept)
2093{
2094 unsigned long flags;
2095 struct rr_packet *pkt;
2096 int rc = 0;
2097
2098 if (!ept)
2099 return -EINVAL;
2100
2101 if (!msm_rpc_clear_netreset(ept))
2102 return -ENETRESET;
2103
2104 spin_lock_irqsave(&ept->read_q_lock, flags);
2105 if (!list_empty(&ept->read_q)) {
2106 pkt = list_first_entry(&ept->read_q, struct rr_packet, list);
2107 rc = pkt->length;
2108 }
2109 spin_unlock_irqrestore(&ept->read_q_lock, flags);
2110
2111 return rc;
2112}
2113
2114int msm_rpcrouter_close(void)
2115{
2116 struct rpcrouter_xprt_info *xprt_info, *tmp_xprt_info;
2117 union rr_control_msg ctl;
2118
2119 ctl.cmd = RPCROUTER_CTRL_CMD_BYE;
2120 mutex_lock(&xprt_info_list_lock);
2121 list_for_each_entry_safe(xprt_info, tmp_xprt_info,
2122 &xprt_info_list, list) {
2123 rpcrouter_send_control_msg(xprt_info, &ctl);
2124 xprt_info->xprt->close();
2125 list_del(&xprt_info->list);
2126 kfree(xprt_info);
2127 }
2128 mutex_unlock(&xprt_info_list_lock);
2129 return 0;
2130}
2131
2132#if defined(CONFIG_DEBUG_FS)
2133static int dump_servers(char *buf, int max)
2134{
2135 int i = 0;
2136 unsigned long flags;
2137 struct rr_server *svr;
2138 const char *sym;
2139
2140 spin_lock_irqsave(&server_list_lock, flags);
2141 list_for_each_entry(svr, &server_list, list) {
2142 i += scnprintf(buf + i, max - i, "pdev_name: %s\n",
2143 svr->pdev_name);
2144 i += scnprintf(buf + i, max - i, "pid: 0x%08x\n", svr->pid);
2145 i += scnprintf(buf + i, max - i, "cid: 0x%08x\n", svr->cid);
2146 i += scnprintf(buf + i, max - i, "prog: 0x%08x", svr->prog);
2147 sym = smd_rpc_get_sym(svr->prog);
2148 if (sym)
2149 i += scnprintf(buf + i, max - i, " (%s)\n", sym);
2150 else
2151 i += scnprintf(buf + i, max - i, "\n");
2152 i += scnprintf(buf + i, max - i, "vers: 0x%08x\n", svr->vers);
2153 i += scnprintf(buf + i, max - i, "\n");
2154 }
2155 spin_unlock_irqrestore(&server_list_lock, flags);
2156
2157 return i;
2158}
2159
2160static int dump_remote_endpoints(char *buf, int max)
2161{
2162 int i = 0;
2163 unsigned long flags;
2164 struct rr_remote_endpoint *ept;
2165
2166 spin_lock_irqsave(&remote_endpoints_lock, flags);
2167 list_for_each_entry(ept, &remote_endpoints, list) {
2168 i += scnprintf(buf + i, max - i, "pid: 0x%08x\n", ept->pid);
2169 i += scnprintf(buf + i, max - i, "cid: 0x%08x\n", ept->cid);
2170 i += scnprintf(buf + i, max - i, "tx_quota_cntr: %i\n",
2171 ept->tx_quota_cntr);
2172 i += scnprintf(buf + i, max - i, "quota_restart_state: %i\n",
2173 ept->quota_restart_state);
2174 i += scnprintf(buf + i, max - i, "\n");
2175 }
2176 spin_unlock_irqrestore(&remote_endpoints_lock, flags);
2177
2178 return i;
2179}
2180
2181static int dump_msm_rpc_endpoint(char *buf, int max)
2182{
2183 int i = 0;
2184 unsigned long flags;
2185 struct msm_rpc_reply *reply;
2186 struct msm_rpc_endpoint *ept;
2187 struct rr_packet *pkt;
2188 const char *sym;
2189
2190 spin_lock_irqsave(&local_endpoints_lock, flags);
2191 list_for_each_entry(ept, &local_endpoints, list) {
2192 i += scnprintf(buf + i, max - i, "pid: 0x%08x\n", ept->pid);
2193 i += scnprintf(buf + i, max - i, "cid: 0x%08x\n", ept->cid);
2194 i += scnprintf(buf + i, max - i, "dst_pid: 0x%08x\n",
2195 ept->dst_pid);
2196 i += scnprintf(buf + i, max - i, "dst_cid: 0x%08x\n",
2197 ept->dst_cid);
2198 i += scnprintf(buf + i, max - i, "dst_prog: 0x%08x",
2199 be32_to_cpu(ept->dst_prog));
2200 sym = smd_rpc_get_sym(be32_to_cpu(ept->dst_prog));
2201 if (sym)
2202 i += scnprintf(buf + i, max - i, " (%s)\n", sym);
2203 else
2204 i += scnprintf(buf + i, max - i, "\n");
2205 i += scnprintf(buf + i, max - i, "dst_vers: 0x%08x\n",
2206 be32_to_cpu(ept->dst_vers));
2207 i += scnprintf(buf + i, max - i, "reply_cnt: %i\n",
2208 ept->reply_cnt);
2209 i += scnprintf(buf + i, max - i, "restart_state: %i\n",
2210 ept->restart_state);
2211
2212 i += scnprintf(buf + i, max - i, "outstanding xids:\n");
2213 spin_lock(&ept->reply_q_lock);
2214 list_for_each_entry(reply, &ept->reply_pend_q, list)
2215 i += scnprintf(buf + i, max - i, " xid = %u\n",
2216 ntohl(reply->xid));
2217 spin_unlock(&ept->reply_q_lock);
2218
2219 i += scnprintf(buf + i, max - i, "complete unread packets:\n");
2220 spin_lock(&ept->read_q_lock);
2221 list_for_each_entry(pkt, &ept->read_q, list) {
2222 i += scnprintf(buf + i, max - i, " mid = %i\n",
2223 pkt->mid);
2224 i += scnprintf(buf + i, max - i, " length = %i\n",
2225 pkt->length);
2226 }
2227 spin_unlock(&ept->read_q_lock);
2228 i += scnprintf(buf + i, max - i, "\n");
2229 }
2230 spin_unlock_irqrestore(&local_endpoints_lock, flags);
2231
2232 return i;
2233}
2234
2235#define DEBUG_BUFMAX 4096
2236static char debug_buffer[DEBUG_BUFMAX];
2237
2238static ssize_t debug_read(struct file *file, char __user *buf,
2239 size_t count, loff_t *ppos)
2240{
2241 int (*fill)(char *buf, int max) = file->private_data;
2242 int bsize = fill(debug_buffer, DEBUG_BUFMAX);
2243 return simple_read_from_buffer(buf, count, ppos, debug_buffer, bsize);
2244}
2245
2246static int debug_open(struct inode *inode, struct file *file)
2247{
2248 file->private_data = inode->i_private;
2249 return 0;
2250}
2251
2252static const struct file_operations debug_ops = {
2253 .read = debug_read,
2254 .open = debug_open,
2255};
2256
2257static void debug_create(const char *name, mode_t mode,
2258 struct dentry *dent,
2259 int (*fill)(char *buf, int max))
2260{
2261 debugfs_create_file(name, mode, dent, fill, &debug_ops);
2262}
2263
2264static void debugfs_init(void)
2265{
2266 struct dentry *dent;
2267
2268 dent = debugfs_create_dir("smd_rpcrouter", 0);
2269 if (IS_ERR(dent))
2270 return;
2271
2272 debug_create("dump_msm_rpc_endpoints", 0444, dent,
2273 dump_msm_rpc_endpoint);
2274 debug_create("dump_remote_endpoints", 0444, dent,
2275 dump_remote_endpoints);
2276 debug_create("dump_servers", 0444, dent,
2277 dump_servers);
2278
2279}
2280
2281#else
2282static void debugfs_init(void) {}
2283#endif
2284
2285static int msm_rpcrouter_add_xprt(struct rpcrouter_xprt *xprt)
2286{
2287 struct rpcrouter_xprt_info *xprt_info;
2288
2289 D("Registering xprt %s to RPC Router\n", xprt->name);
2290
2291 xprt_info = kmalloc(sizeof(struct rpcrouter_xprt_info), GFP_KERNEL);
2292 if (!xprt_info)
2293 return -ENOMEM;
2294
2295 xprt_info->xprt = xprt;
2296 xprt_info->initialized = 0;
2297 xprt_info->remote_pid = -1;
2298 init_waitqueue_head(&xprt_info->read_wait);
2299 spin_lock_init(&xprt_info->lock);
2300 wake_lock_init(&xprt_info->wakelock,
2301 WAKE_LOCK_SUSPEND, xprt->name);
2302 xprt_info->need_len = 0;
2303 xprt_info->abort_data_read = 0;
2304 INIT_WORK(&xprt_info->read_data, do_read_data);
2305 INIT_LIST_HEAD(&xprt_info->list);
2306
2307 xprt_info->workqueue = create_singlethread_workqueue(xprt->name);
2308 if (!xprt_info->workqueue) {
2309 kfree(xprt_info);
2310 return -ENOMEM;
2311 }
2312
2313 if (!strcmp(xprt->name, "rpcrouter_loopback_xprt")) {
2314 xprt_info->remote_pid = RPCROUTER_PID_LOCAL;
2315 xprt_info->initialized = 1;
2316 } else {
2317 smsm_change_state(SMSM_APPS_STATE, 0, SMSM_RPCINIT);
2318 }
2319
2320 mutex_lock(&xprt_info_list_lock);
2321 list_add_tail(&xprt_info->list, &xprt_info_list);
2322 mutex_unlock(&xprt_info_list_lock);
2323
2324 queue_work(xprt_info->workqueue, &xprt_info->read_data);
2325
2326 xprt->priv = xprt_info;
2327
2328 return 0;
2329}
2330
2331static void msm_rpcrouter_remove_xprt(struct rpcrouter_xprt *xprt)
2332{
2333 struct rpcrouter_xprt_info *xprt_info;
2334 unsigned long flags;
2335
2336 if (xprt && xprt->priv) {
2337 xprt_info = xprt->priv;
2338
2339 /* abort rr_read thread */
2340 xprt_info->abort_data_read = 1;
2341 wake_up(&xprt_info->read_wait);
2342
2343 /* remove xprt from available xprts */
2344 mutex_lock(&xprt_info_list_lock);
2345 spin_lock_irqsave(&xprt_info->lock, flags);
2346 list_del(&xprt_info->list);
2347
2348 /* unlock the spinlock last to avoid a race
2349 * condition with rpcrouter_get_xprt_info
2350 * in msm_rpc_write_pkt in which the
2351 * xprt is returned from rpcrouter_get_xprt_info
2352 * and then deleted here. */
2353 mutex_unlock(&xprt_info_list_lock);
2354 spin_unlock_irqrestore(&xprt_info->lock, flags);
2355
2356 /* cleanup workqueues and wakelocks */
2357 flush_workqueue(xprt_info->workqueue);
2358 destroy_workqueue(xprt_info->workqueue);
2359 wake_lock_destroy(&xprt_info->wakelock);
2360
2361
2362 /* free memory */
2363 xprt->priv = 0;
2364 kfree(xprt_info);
2365 }
2366}
2367
2368struct rpcrouter_xprt_work {
2369 struct rpcrouter_xprt *xprt;
2370 struct work_struct work;
2371};
2372
2373static void xprt_open_worker(struct work_struct *work)
2374{
2375 struct rpcrouter_xprt_work *xprt_work =
2376 container_of(work, struct rpcrouter_xprt_work, work);
2377
2378 msm_rpcrouter_add_xprt(xprt_work->xprt);
2379
2380 kfree(xprt_work);
2381}
2382
2383static void xprt_close_worker(struct work_struct *work)
2384{
2385 struct rpcrouter_xprt_work *xprt_work =
2386 container_of(work, struct rpcrouter_xprt_work, work);
2387
2388 modem_reset_cleanup(xprt_work->xprt->priv);
2389 msm_rpcrouter_remove_xprt(xprt_work->xprt);
2390
2391 if (atomic_dec_return(&pending_close_count) == 0)
2392 wake_up(&subsystem_restart_wait);
2393
2394 kfree(xprt_work);
2395}
2396
2397void msm_rpcrouter_xprt_notify(struct rpcrouter_xprt *xprt, unsigned event)
2398{
2399 struct rpcrouter_xprt_info *xprt_info;
2400 struct rpcrouter_xprt_work *xprt_work;
2401
2402 /* Workqueue is created in init function which works for all existing
2403 * clients. If this fails in the future, then it will need to be
2404 * created earlier. */
2405 BUG_ON(!rpcrouter_workqueue);
2406
2407 switch (event) {
2408 case RPCROUTER_XPRT_EVENT_OPEN:
2409 D("open event for '%s'\n", xprt->name);
2410 xprt_work = kmalloc(sizeof(struct rpcrouter_xprt_work),
2411 GFP_ATOMIC);
2412 xprt_work->xprt = xprt;
2413 INIT_WORK(&xprt_work->work, xprt_open_worker);
2414 queue_work(rpcrouter_workqueue, &xprt_work->work);
2415 break;
2416
2417 case RPCROUTER_XPRT_EVENT_CLOSE:
2418 D("close event for '%s'\n", xprt->name);
2419
2420 atomic_inc(&pending_close_count);
2421
2422 xprt_work = kmalloc(sizeof(struct rpcrouter_xprt_work),
2423 GFP_ATOMIC);
2424 xprt_work->xprt = xprt;
2425 INIT_WORK(&xprt_work->work, xprt_close_worker);
2426 queue_work(rpcrouter_workqueue, &xprt_work->work);
2427 break;
2428 }
2429
2430 xprt_info = xprt->priv;
2431 if (xprt_info) {
2432 /* Check read_avail even for OPEN event to handle missed
2433 DATA events while processing the OPEN event*/
2434 if (xprt->read_avail() >= xprt_info->need_len)
2435 wake_lock(&xprt_info->wakelock);
2436 wake_up(&xprt_info->read_wait);
2437 }
2438}
2439
2440static int modem_restart_notifier_cb(struct notifier_block *this,
2441 unsigned long code,
2442 void *data);
2443static struct notifier_block nb = {
2444 .notifier_call = modem_restart_notifier_cb,
2445};
2446
2447static int modem_restart_notifier_cb(struct notifier_block *this,
2448 unsigned long code,
2449 void *data)
2450{
2451 switch (code) {
2452 case SUBSYS_BEFORE_SHUTDOWN:
2453 D("%s: SUBSYS_BEFORE_SHUTDOWN\n", __func__);
2454 break;
2455
2456 case SUBSYS_BEFORE_POWERUP:
2457 D("%s: waiting for RPC restart to complete\n", __func__);
2458 wait_event(subsystem_restart_wait,
2459 atomic_read(&pending_close_count) == 0);
2460 D("%s: finished restart wait\n", __func__);
2461 break;
2462
2463 default:
2464 break;
2465 }
2466
2467 return NOTIFY_DONE;
2468}
2469
2470static void *restart_notifier_handle;
2471static __init int modem_restart_late_init(void)
2472{
2473 restart_notifier_handle = subsys_notif_register_notifier("modem", &nb);
2474 return 0;
2475}
2476late_initcall(modem_restart_late_init);
2477
2478static int __init rpcrouter_init(void)
2479{
2480 int ret;
2481
2482 msm_rpc_connect_timeout_ms = 0;
2483 smd_rpcrouter_debug_mask |= SMEM_LOG;
2484 debugfs_init();
2485
2486
2487 /* Initialize what we need to start processing */
2488 rpcrouter_workqueue =
2489 create_singlethread_workqueue("rpcrouter");
2490 if (!rpcrouter_workqueue) {
2491 msm_rpcrouter_exit_devices();
2492 return -ENOMEM;
2493 }
2494
2495 init_waitqueue_head(&newserver_wait);
2496 init_waitqueue_head(&subsystem_restart_wait);
2497
2498 ret = msm_rpcrouter_init_devices();
2499 if (ret < 0)
2500 return ret;
2501
2502 return ret;
2503}
2504
2505module_init(rpcrouter_init);
2506MODULE_DESCRIPTION("MSM RPC Router");
2507MODULE_AUTHOR("San Mehat <san@android.com>");
2508MODULE_LICENSE("GPL");