blob: a28b1afb272a8ccd543e88814d887e6440da9935 [file] [log] [blame]
Arun Kumar Neelakantam74ff8562017-05-26 17:57:52 +05301/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -06002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/slab.h>
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/string.h>
17#include <linux/errno.h>
18#include <linux/init.h>
19#include <linux/types.h>
20#include <linux/delay.h>
21#include <linux/err.h>
22#include <linux/sched.h>
23#include <linux/poll.h>
24#include <linux/pm.h>
25#include <linux/platform_device.h>
26#include <linux/uaccess.h>
27#include <linux/debugfs.h>
28#include <linux/rwsem.h>
29#include <linux/ipc_logging.h>
30#include <linux/uaccess.h>
31#include <linux/ipc_router.h>
32#include <linux/ipc_router_xprt.h>
33#include <linux/kref.h>
34#include <soc/qcom/subsystem_notif.h>
35#include <soc/qcom/subsystem_restart.h>
36
37#include <asm/byteorder.h>
38
39#include "ipc_router_private.h"
40#include "ipc_router_security.h"
41
42enum {
43 SMEM_LOG = 1U << 0,
44 RTR_DBG = 1U << 1,
45};
46
47static int msm_ipc_router_debug_mask;
48module_param_named(debug_mask, msm_ipc_router_debug_mask,
49 int, 0664);
50#define MODULE_NAME "ipc_router"
51
52#define IPC_RTR_INFO_PAGES 6
53
54#define IPC_RTR_INFO(log_ctx, x...) do { \
55typeof(log_ctx) _log_ctx = (log_ctx); \
56if (_log_ctx) \
57 ipc_log_string(_log_ctx, x); \
58if (msm_ipc_router_debug_mask & RTR_DBG) \
59 pr_info("[IPCRTR] "x); \
60} while (0)
61
62#define IPC_ROUTER_LOG_EVENT_TX 0x01
63#define IPC_ROUTER_LOG_EVENT_RX 0x02
64#define IPC_ROUTER_LOG_EVENT_TX_ERR 0x03
65#define IPC_ROUTER_LOG_EVENT_RX_ERR 0x04
66#define IPC_ROUTER_DUMMY_DEST_NODE 0xFFFFFFFF
67
68#define ipc_port_sk(port) ((struct sock *)(port))
69
70static LIST_HEAD(control_ports);
71static DECLARE_RWSEM(control_ports_lock_lha5);
72
73#define LP_HASH_SIZE 32
74static struct list_head local_ports[LP_HASH_SIZE];
75static DECLARE_RWSEM(local_ports_lock_lhc2);
76
77/* Server info is organized as a hash table. The server's service ID is
78 * used to index into the hash table. The instance ID of most of the servers
79 * are 1 or 2. The service IDs are well distributed compared to the instance
80 * IDs and hence choosing service ID to index into this hash table optimizes
81 * the hash table operations like add, lookup, destroy.
82 */
83#define SRV_HASH_SIZE 32
84static struct list_head server_list[SRV_HASH_SIZE];
85static DECLARE_RWSEM(server_list_lock_lha2);
86
87struct msm_ipc_server {
88 struct list_head list;
89 struct kref ref;
90 struct msm_ipc_port_name name;
91 char pdev_name[32];
92 int next_pdev_id;
93 int synced_sec_rule;
94 struct list_head server_port_list;
95};
96
97struct msm_ipc_server_port {
98 struct list_head list;
99 struct platform_device *pdev;
100 struct msm_ipc_port_addr server_addr;
101 struct msm_ipc_router_xprt_info *xprt_info;
102};
103
104struct msm_ipc_resume_tx_port {
105 struct list_head list;
106 u32 port_id;
107 u32 node_id;
108};
109
110struct ipc_router_conn_info {
111 struct list_head list;
112 u32 port_id;
113};
114
115enum {
116 RESET = 0,
117 VALID = 1,
118};
119
120#define RP_HASH_SIZE 32
121struct msm_ipc_router_remote_port {
122 struct list_head list;
123 struct kref ref;
124 struct mutex rport_lock_lhb2; /* lock for remote port state access */
125 u32 node_id;
126 u32 port_id;
127 int status;
128 u32 tx_quota_cnt;
129 struct list_head resume_tx_port_list;
130 struct list_head conn_info_list;
131 void *sec_rule;
132 struct msm_ipc_server *server;
133};
134
135struct msm_ipc_router_xprt_info {
136 struct list_head list;
137 struct msm_ipc_router_xprt *xprt;
138 u32 remote_node_id;
139 u32 initialized;
140 struct list_head pkt_list;
141 struct wakeup_source ws;
142 struct mutex rx_lock_lhb2; /* lock for xprt rx operations */
143 struct mutex tx_lock_lhb2; /* lock for xprt tx operations */
144 u32 need_len;
145 u32 abort_data_read;
146 struct work_struct read_data;
147 struct workqueue_struct *workqueue;
148 void *log_ctx;
149 struct kref ref;
150 struct completion ref_complete;
Arun Kumar Neelakantam74ff8562017-05-26 17:57:52 +0530151 bool dynamic_ws;
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -0600152};
153
154#define RT_HASH_SIZE 4
155struct msm_ipc_routing_table_entry {
156 struct list_head list;
157 struct kref ref;
158 u32 node_id;
159 u32 neighbor_node_id;
160 struct list_head remote_port_list[RP_HASH_SIZE];
161 struct msm_ipc_router_xprt_info *xprt_info;
162 struct rw_semaphore lock_lha4;
163 unsigned long num_tx_bytes;
164 unsigned long num_rx_bytes;
165};
166
167#define LOG_CTX_NAME_LEN 32
168struct ipc_rtr_log_ctx {
169 struct list_head list;
170 char log_ctx_name[LOG_CTX_NAME_LEN];
171 void *log_ctx;
172};
173
174static struct list_head routing_table[RT_HASH_SIZE];
175static DECLARE_RWSEM(routing_table_lock_lha3);
176static int routing_table_inited;
177
178static void do_read_data(struct work_struct *work);
179
180static LIST_HEAD(xprt_info_list);
181static DECLARE_RWSEM(xprt_info_list_lock_lha5);
182
183static DEFINE_MUTEX(log_ctx_list_lock_lha0);
184static LIST_HEAD(log_ctx_list);
185static DEFINE_MUTEX(ipc_router_init_lock);
186static bool is_ipc_router_inited;
187static int ipc_router_core_init(void);
188#define IPC_ROUTER_INIT_TIMEOUT (10 * HZ)
189
190static u32 next_port_id;
191static DEFINE_MUTEX(next_port_id_lock_lhc1);
192static struct workqueue_struct *msm_ipc_router_workqueue;
193
194static void *local_log_ctx;
195static void *ipc_router_get_log_ctx(char *sub_name);
196static int process_resume_tx_msg(union rr_control_msg *msg,
197 struct rr_packet *pkt);
198static void ipc_router_reset_conn(struct msm_ipc_router_remote_port *rport_ptr);
199static int ipc_router_get_xprt_info_ref(
200 struct msm_ipc_router_xprt_info *xprt_info);
201static void ipc_router_put_xprt_info_ref(
202 struct msm_ipc_router_xprt_info *xprt_info);
203static void ipc_router_release_xprt_info_ref(struct kref *ref);
204
205struct pil_vote_info {
206 void *pil_handle;
207 struct work_struct load_work;
208 struct work_struct unload_work;
209};
210
211#define PIL_SUBSYSTEM_NAME_LEN 32
212static char default_peripheral[PIL_SUBSYSTEM_NAME_LEN];
213
214enum {
215 DOWN,
216 UP,
217};
218
Arun Kumar Neelakantam74ff8562017-05-26 17:57:52 +0530219static bool is_wakeup_source_allowed;
220
221void msm_ipc_router_set_ws_allowed(bool flag)
222{
223 is_wakeup_source_allowed = flag;
224}
225
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -0600226static void init_routing_table(void)
227{
228 int i;
229
230 for (i = 0; i < RT_HASH_SIZE; i++)
231 INIT_LIST_HEAD(&routing_table[i]);
232}
233
234/**
235 * ipc_router_calc_checksum() - compute the checksum for extended HELLO message
236 * @msg: Reference to the IPC Router HELLO message.
237 *
238 * Return: Computed checksum value, 0 if msg is NULL.
239 */
240static u32 ipc_router_calc_checksum(union rr_control_msg *msg)
241{
242 u32 checksum = 0;
243 int i, len;
244 u16 upper_nb;
245 u16 lower_nb;
246 void *hello;
247
248 if (!msg)
249 return checksum;
250 hello = msg;
251 len = sizeof(*msg);
252
253 for (i = 0; i < len / IPCR_WORD_SIZE; i++) {
254 lower_nb = (*((u32 *)hello)) & IPC_ROUTER_CHECKSUM_MASK;
255 upper_nb = ((*((u32 *)hello)) >> 16) &
256 IPC_ROUTER_CHECKSUM_MASK;
257 checksum = checksum + upper_nb + lower_nb;
258 hello = ((u32 *)hello) + 1;
259 }
260 while (checksum > 0xFFFF)
261 checksum = (checksum & IPC_ROUTER_CHECKSUM_MASK) +
262 ((checksum >> 16) & IPC_ROUTER_CHECKSUM_MASK);
263
264 checksum = ~checksum & IPC_ROUTER_CHECKSUM_MASK;
265 return checksum;
266}
267
268/**
269 * skb_copy_to_log_buf() - copies the required number bytes from the skb_queue
270 * @skb_head: skb_queue head that contains the data.
271 * @pl_len: length of payload need to be copied.
272 * @hdr_offset: length of the header present in first skb
273 * @log_buf: The output buffer which will contain the formatted log string
274 *
275 * This function copies the first specified number of bytes from the skb_queue
276 * to a new buffer and formats them to a string for logging.
277 */
278static void skb_copy_to_log_buf(struct sk_buff_head *skb_head,
279 unsigned int pl_len, unsigned int hdr_offset,
280 u64 *log_buf)
281{
282 struct sk_buff *temp_skb;
283 unsigned int copied_len = 0, copy_len = 0;
284 int remaining;
285
286 if (!skb_head) {
287 IPC_RTR_ERR("%s: NULL skb_head\n", __func__);
288 return;
289 }
290 temp_skb = skb_peek(skb_head);
291 if (unlikely(!temp_skb || !temp_skb->data)) {
292 IPC_RTR_ERR("%s: No SKBs in skb_queue\n", __func__);
293 return;
294 }
295
296 remaining = temp_skb->len - hdr_offset;
297 skb_queue_walk(skb_head, temp_skb) {
298 copy_len = remaining < pl_len ? remaining : pl_len;
299 memcpy(log_buf + copied_len, temp_skb->data + hdr_offset,
300 copy_len);
301 copied_len += copy_len;
302 hdr_offset = 0;
303 if (copied_len == pl_len)
304 break;
305 remaining = pl_len - remaining;
306 }
307}
308
309/**
310 * ipc_router_log_msg() - log all data messages exchanged
311 * @log_ctx: IPC Logging context specific to each transport
312 * @xchng_type: Identifies the data to be a receive or send.
313 * @data: IPC Router data packet or control msg received or to be send.
314 * @hdr: Reference to the router header
315 * @port_ptr: Local IPC Router port.
316 * @rport_ptr: Remote IPC Router port
317 *
318 * This function builds the log message that would be passed on to the IPC
319 * logging framework. The data messages that would be passed corresponds to
320 * the information that is exchanged between the IPC Router and it's clients.
321 */
322static void ipc_router_log_msg(void *log_ctx, u32 xchng_type,
323 void *data, struct rr_header_v1 *hdr,
324 struct msm_ipc_port *port_ptr,
325 struct msm_ipc_router_remote_port *rport_ptr)
326{
327 struct sk_buff_head *skb_head = NULL;
328 union rr_control_msg *msg = NULL;
329 struct rr_packet *pkt = NULL;
330 u64 pl_buf = 0;
331 struct sk_buff *skb;
332 u32 buf_len = 8;
333 u32 svc_id = 0;
334 u32 svc_ins = 0;
335 unsigned int hdr_offset = 0;
336 u32 port_type = 0;
337
338 if (!log_ctx || !hdr || !data)
339 return;
340
341 if (hdr->type == IPC_ROUTER_CTRL_CMD_DATA) {
342 pkt = (struct rr_packet *)data;
343 skb_head = pkt->pkt_fragment_q;
344 skb = skb_peek(skb_head);
345 if (!skb || !skb->data) {
346 IPC_RTR_ERR("%s: No SKBs in skb_queue\n", __func__);
347 return;
348 }
349
350 if (skb_queue_len(skb_head) == 1 && skb->len < 8)
351 buf_len = skb->len;
352 if (xchng_type == IPC_ROUTER_LOG_EVENT_TX && hdr->dst_node_id
353 != IPC_ROUTER_NID_LOCAL) {
354 if (hdr->version == IPC_ROUTER_V1)
355 hdr_offset = sizeof(struct rr_header_v1);
356 else if (hdr->version == IPC_ROUTER_V2)
357 hdr_offset = sizeof(struct rr_header_v2);
358 }
359 skb_copy_to_log_buf(skb_head, buf_len, hdr_offset, &pl_buf);
360
361 if (port_ptr && rport_ptr && (port_ptr->type == CLIENT_PORT) &&
362 rport_ptr->server) {
363 svc_id = rport_ptr->server->name.service;
364 svc_ins = rport_ptr->server->name.instance;
365 port_type = CLIENT_PORT;
Dhoat Harpal3ad5fe32017-06-19 21:26:13 +0530366 port_ptr->last_served_svc_id =
367 rport_ptr->server->name.service;
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -0600368 } else if (port_ptr && (port_ptr->type == SERVER_PORT)) {
369 svc_id = port_ptr->port_name.service;
370 svc_ins = port_ptr->port_name.instance;
371 port_type = SERVER_PORT;
372 }
373 IPC_RTR_INFO(log_ctx,
374 "%s %s %s Len:0x%x T:0x%x CF:0x%x SVC:<0x%x:0x%x> SRC:<0x%x:0x%x> DST:<0x%x:0x%x> DATA: %08x %08x",
375 (xchng_type == IPC_ROUTER_LOG_EVENT_RX ? "" :
376 (xchng_type == IPC_ROUTER_LOG_EVENT_TX ?
377 current->comm : "")),
378 (port_type == CLIENT_PORT ? "CLI" : "SRV"),
379 (xchng_type == IPC_ROUTER_LOG_EVENT_RX ? "RX" :
380 (xchng_type == IPC_ROUTER_LOG_EVENT_TX ? "TX" :
381 (xchng_type == IPC_ROUTER_LOG_EVENT_TX_ERR ? "TX_ERR" :
382 (xchng_type == IPC_ROUTER_LOG_EVENT_RX_ERR ? "RX_ERR" :
383 "UNKNOWN")))),
384 hdr->size, hdr->type, hdr->control_flag,
385 svc_id, svc_ins, hdr->src_node_id, hdr->src_port_id,
386 hdr->dst_node_id, hdr->dst_port_id,
387 (unsigned int)pl_buf, (unsigned int)(pl_buf >> 32));
388
389 } else {
390 msg = (union rr_control_msg *)data;
391 if (msg->cmd == IPC_ROUTER_CTRL_CMD_NEW_SERVER ||
392 msg->cmd == IPC_ROUTER_CTRL_CMD_REMOVE_SERVER)
393 IPC_RTR_INFO(log_ctx,
394 "CTL MSG: %s cmd:0x%x SVC:<0x%x:0x%x> ADDR:<0x%x:0x%x>",
395 (xchng_type == IPC_ROUTER_LOG_EVENT_RX ? "RX" :
396 (xchng_type == IPC_ROUTER_LOG_EVENT_TX ? "TX" :
397 (xchng_type == IPC_ROUTER_LOG_EVENT_TX_ERR ? "TX_ERR" :
398 (xchng_type == IPC_ROUTER_LOG_EVENT_RX_ERR ? "RX_ERR" :
399 "UNKNOWN")))),
400 msg->cmd, msg->srv.service, msg->srv.instance,
401 msg->srv.node_id, msg->srv.port_id);
402 else if (msg->cmd == IPC_ROUTER_CTRL_CMD_REMOVE_CLIENT ||
403 msg->cmd == IPC_ROUTER_CTRL_CMD_RESUME_TX)
404 IPC_RTR_INFO(log_ctx,
405 "CTL MSG: %s cmd:0x%x ADDR: <0x%x:0x%x>",
406 (xchng_type == IPC_ROUTER_LOG_EVENT_RX ? "RX" :
407 (xchng_type == IPC_ROUTER_LOG_EVENT_TX ? "TX" : "ERR")),
408 msg->cmd, msg->cli.node_id, msg->cli.port_id);
409 else if (msg->cmd == IPC_ROUTER_CTRL_CMD_HELLO && hdr)
410 IPC_RTR_INFO(log_ctx,
411 "CTL MSG %s cmd:0x%x ADDR:0x%x",
412 (xchng_type == IPC_ROUTER_LOG_EVENT_RX ? "RX" :
413 (xchng_type == IPC_ROUTER_LOG_EVENT_TX ? "TX" : "ERR")),
414 msg->cmd, hdr->src_node_id);
415 else
416 IPC_RTR_INFO(log_ctx,
417 "%s UNKNOWN cmd:0x%x",
418 (xchng_type == IPC_ROUTER_LOG_EVENT_RX ? "RX" :
419 (xchng_type == IPC_ROUTER_LOG_EVENT_TX ? "TX" : "ERR")),
420 msg->cmd);
421 }
422}
423
424/* Must be called with routing_table_lock_lha3 locked. */
425static struct msm_ipc_routing_table_entry *lookup_routing_table(
426 u32 node_id)
427{
428 u32 key = (node_id % RT_HASH_SIZE);
429 struct msm_ipc_routing_table_entry *rt_entry;
430
431 list_for_each_entry(rt_entry, &routing_table[key], list) {
432 if (rt_entry->node_id == node_id)
433 return rt_entry;
434 }
435 return NULL;
436}
437
438/**
439 * create_routing_table_entry() - Lookup and create a routing table entry
440 * @node_id: Node ID of the routing table entry to be created.
441 * @xprt_info: XPRT through which the node ID is reachable.
442 *
443 * @return: a reference to the routing table entry on success, NULL on failure.
444 */
445static struct msm_ipc_routing_table_entry *create_routing_table_entry(
446 u32 node_id, struct msm_ipc_router_xprt_info *xprt_info)
447{
448 int i;
449 struct msm_ipc_routing_table_entry *rt_entry;
450 u32 key;
451
452 down_write(&routing_table_lock_lha3);
453 rt_entry = lookup_routing_table(node_id);
454 if (rt_entry)
455 goto out_create_rtentry1;
456
457 rt_entry = kmalloc(sizeof(*rt_entry), GFP_KERNEL);
458 if (!rt_entry) {
459 IPC_RTR_ERR("%s: rt_entry allocation failed for %d\n",
460 __func__, node_id);
461 goto out_create_rtentry2;
462 }
463
464 for (i = 0; i < RP_HASH_SIZE; i++)
465 INIT_LIST_HEAD(&rt_entry->remote_port_list[i]);
466 init_rwsem(&rt_entry->lock_lha4);
467 kref_init(&rt_entry->ref);
468 rt_entry->node_id = node_id;
469 rt_entry->xprt_info = xprt_info;
470 if (xprt_info)
471 rt_entry->neighbor_node_id = xprt_info->remote_node_id;
472
473 key = (node_id % RT_HASH_SIZE);
474 list_add_tail(&rt_entry->list, &routing_table[key]);
475out_create_rtentry1:
476 kref_get(&rt_entry->ref);
477out_create_rtentry2:
478 up_write(&routing_table_lock_lha3);
479 return rt_entry;
480}
481
482/**
483 * ipc_router_get_rtentry_ref() - Get a reference to the routing table entry
484 * @node_id: Node ID of the routing table entry.
485 *
486 * @return: a reference to the routing table entry on success, NULL on failure.
487 *
488 * This function is used to obtain a reference to the rounting table entry
489 * corresponding to a node id.
490 */
491static struct msm_ipc_routing_table_entry *ipc_router_get_rtentry_ref(
492 u32 node_id)
493{
494 struct msm_ipc_routing_table_entry *rt_entry;
495
496 down_read(&routing_table_lock_lha3);
497 rt_entry = lookup_routing_table(node_id);
498 if (rt_entry)
499 kref_get(&rt_entry->ref);
500 up_read(&routing_table_lock_lha3);
501 return rt_entry;
502}
503
504/**
505 * ipc_router_release_rtentry() - Cleanup and release the routing table entry
506 * @ref: Reference to the entry.
507 *
508 * This function is called when all references to the routing table entry are
509 * released.
510 */
511void ipc_router_release_rtentry(struct kref *ref)
512{
513 struct msm_ipc_routing_table_entry *rt_entry =
514 container_of(ref, struct msm_ipc_routing_table_entry, ref);
515
516 /* All references to a routing entry will be put only under SSR.
517 * As part of SSR, all the internals of the routing table entry
518 * are cleaned. So just free the routing table entry.
519 */
520 kfree(rt_entry);
521}
522
523struct rr_packet *rr_read(struct msm_ipc_router_xprt_info *xprt_info)
524{
525 struct rr_packet *temp_pkt;
526
527 if (!xprt_info)
528 return NULL;
529
530 mutex_lock(&xprt_info->rx_lock_lhb2);
531 if (xprt_info->abort_data_read) {
532 mutex_unlock(&xprt_info->rx_lock_lhb2);
533 IPC_RTR_ERR("%s detected SSR & exiting now\n",
534 xprt_info->xprt->name);
535 return NULL;
536 }
537
538 if (list_empty(&xprt_info->pkt_list)) {
539 mutex_unlock(&xprt_info->rx_lock_lhb2);
540 return NULL;
541 }
542
543 temp_pkt = list_first_entry(&xprt_info->pkt_list,
544 struct rr_packet, list);
545 list_del(&temp_pkt->list);
546 if (list_empty(&xprt_info->pkt_list))
547 __pm_relax(&xprt_info->ws);
548 mutex_unlock(&xprt_info->rx_lock_lhb2);
549 return temp_pkt;
550}
551
552struct rr_packet *clone_pkt(struct rr_packet *pkt)
553{
554 struct rr_packet *cloned_pkt;
555 struct sk_buff *temp_skb, *cloned_skb;
556 struct sk_buff_head *pkt_fragment_q;
557
558 cloned_pkt = kzalloc(sizeof(*cloned_pkt), GFP_KERNEL);
559 if (!cloned_pkt) {
560 IPC_RTR_ERR("%s: failure\n", __func__);
561 return NULL;
562 }
563 memcpy(&cloned_pkt->hdr, &pkt->hdr, sizeof(struct rr_header_v1));
564 if (pkt->opt_hdr.len > 0) {
565 cloned_pkt->opt_hdr.data = kmalloc(pkt->opt_hdr.len,
566 GFP_KERNEL);
567 if (!cloned_pkt->opt_hdr.data) {
568 IPC_RTR_ERR("%s: Memory allocation Failed\n", __func__);
569 } else {
570 cloned_pkt->opt_hdr.len = pkt->opt_hdr.len;
571 memcpy(cloned_pkt->opt_hdr.data, pkt->opt_hdr.data,
572 pkt->opt_hdr.len);
573 }
574 }
575
576 pkt_fragment_q = kmalloc(sizeof(*pkt_fragment_q), GFP_KERNEL);
577 if (!pkt_fragment_q) {
578 IPC_RTR_ERR("%s: pkt_frag_q alloc failure\n", __func__);
579 kfree(cloned_pkt);
580 return NULL;
581 }
582 skb_queue_head_init(pkt_fragment_q);
583 kref_init(&cloned_pkt->ref);
584
585 skb_queue_walk(pkt->pkt_fragment_q, temp_skb) {
586 cloned_skb = skb_clone(temp_skb, GFP_KERNEL);
587 if (!cloned_skb)
588 goto fail_clone;
589 skb_queue_tail(pkt_fragment_q, cloned_skb);
590 }
591 cloned_pkt->pkt_fragment_q = pkt_fragment_q;
592 cloned_pkt->length = pkt->length;
Arun Kumar Neelakantam74ff8562017-05-26 17:57:52 +0530593 cloned_pkt->ws_need = pkt->ws_need;
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -0600594 return cloned_pkt;
595
596fail_clone:
597 while (!skb_queue_empty(pkt_fragment_q)) {
598 temp_skb = skb_dequeue(pkt_fragment_q);
599 kfree_skb(temp_skb);
600 }
601 kfree(pkt_fragment_q);
602 if (cloned_pkt->opt_hdr.len > 0)
603 kfree(cloned_pkt->opt_hdr.data);
604 kfree(cloned_pkt);
605 return NULL;
606}
607
608/**
609 * create_pkt() - Create a Router packet
610 * @data: SKB queue to be contained inside the packet.
611 *
612 * @return: pointer to packet on success, NULL on failure.
613 */
614struct rr_packet *create_pkt(struct sk_buff_head *data)
615{
616 struct rr_packet *pkt;
617 struct sk_buff *temp_skb;
618
619 pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
620 if (!pkt) {
621 IPC_RTR_ERR("%s: failure\n", __func__);
622 return NULL;
623 }
624
625 if (data) {
626 pkt->pkt_fragment_q = data;
627 skb_queue_walk(pkt->pkt_fragment_q, temp_skb)
628 pkt->length += temp_skb->len;
629 } else {
630 pkt->pkt_fragment_q = kmalloc(sizeof(*pkt->pkt_fragment_q),
631 GFP_KERNEL);
632 if (!pkt->pkt_fragment_q) {
633 IPC_RTR_ERR("%s: Couldn't alloc pkt_fragment_q\n",
634 __func__);
635 kfree(pkt);
636 return NULL;
637 }
638 skb_queue_head_init(pkt->pkt_fragment_q);
639 }
640 kref_init(&pkt->ref);
641 return pkt;
642}
643
644void release_pkt(struct rr_packet *pkt)
645{
646 struct sk_buff *temp_skb;
647
648 if (!pkt)
649 return;
650
651 if (!pkt->pkt_fragment_q) {
652 kfree(pkt);
653 return;
654 }
655
656 while (!skb_queue_empty(pkt->pkt_fragment_q)) {
657 temp_skb = skb_dequeue(pkt->pkt_fragment_q);
658 kfree_skb(temp_skb);
659 }
660 kfree(pkt->pkt_fragment_q);
661 if (pkt->opt_hdr.len > 0)
662 kfree(pkt->opt_hdr.data);
663 kfree(pkt);
664}
665
666static struct sk_buff_head *msm_ipc_router_buf_to_skb(void *buf,
667 unsigned int buf_len)
668{
669 struct sk_buff_head *skb_head;
670 struct sk_buff *skb;
671 int first = 1, offset = 0;
672 int skb_size, data_size;
673 void *data;
674 int last = 1;
675 int align_size;
676
677 skb_head = kmalloc(sizeof(*skb_head), GFP_KERNEL);
678 if (!skb_head) {
679 IPC_RTR_ERR("%s: Couldnot allocate skb_head\n", __func__);
680 return NULL;
681 }
682 skb_queue_head_init(skb_head);
683
684 data_size = buf_len;
685 align_size = ALIGN_SIZE(data_size);
686 while (offset != buf_len) {
687 skb_size = data_size;
688 if (first)
689 skb_size += IPC_ROUTER_HDR_SIZE;
690 if (last)
691 skb_size += align_size;
692
693 skb = alloc_skb(skb_size, GFP_KERNEL);
694 if (!skb) {
695 if (skb_size <= (PAGE_SIZE / 2)) {
696 IPC_RTR_ERR("%s: cannot allocate skb\n",
697 __func__);
698 goto buf_to_skb_error;
699 }
700 data_size = data_size / 2;
701 last = 0;
702 continue;
703 }
704
705 if (first) {
706 skb_reserve(skb, IPC_ROUTER_HDR_SIZE);
707 first = 0;
708 }
709
710 data = skb_put(skb, data_size);
711 memcpy(skb->data, buf + offset, data_size);
712 skb_queue_tail(skb_head, skb);
713 offset += data_size;
714 data_size = buf_len - offset;
715 last = 1;
716 }
717 return skb_head;
718
719buf_to_skb_error:
720 while (!skb_queue_empty(skb_head)) {
721 skb = skb_dequeue(skb_head);
722 kfree_skb(skb);
723 }
724 kfree(skb_head);
725 return NULL;
726}
727
728static void *msm_ipc_router_skb_to_buf(struct sk_buff_head *skb_head,
729 unsigned int len)
730{
731 struct sk_buff *temp;
732 unsigned int offset = 0, buf_len = 0, copy_len;
733 void *buf;
734
735 if (!skb_head) {
736 IPC_RTR_ERR("%s: NULL skb_head\n", __func__);
737 return NULL;
738 }
739
740 temp = skb_peek(skb_head);
741 buf_len = len;
742 buf = kmalloc(buf_len, GFP_KERNEL);
743 if (!buf) {
744 IPC_RTR_ERR("%s: cannot allocate buf\n", __func__);
745 return NULL;
746 }
747 skb_queue_walk(skb_head, temp) {
748 copy_len = buf_len < temp->len ? buf_len : temp->len;
749 memcpy(buf + offset, temp->data, copy_len);
750 offset += copy_len;
751 buf_len -= copy_len;
752 }
753 return buf;
754}
755
756void msm_ipc_router_free_skb(struct sk_buff_head *skb_head)
757{
758 struct sk_buff *temp_skb;
759
760 if (!skb_head)
761 return;
762
763 while (!skb_queue_empty(skb_head)) {
764 temp_skb = skb_dequeue(skb_head);
765 kfree_skb(temp_skb);
766 }
767 kfree(skb_head);
768}
769
770/**
771 * extract_optional_header() - Extract the optional header from skb
772 * @pkt: Packet structure into which the header has to be extracted.
773 * @opt_len: The optional header length in word size.
774 *
775 * @return: Length of optional header in bytes if success, zero otherwise.
776 */
777static int extract_optional_header(struct rr_packet *pkt, u8 opt_len)
778{
779 size_t offset = 0, buf_len = 0, copy_len, opt_hdr_len;
780 struct sk_buff *temp;
781 struct sk_buff_head *skb_head;
782
783 opt_hdr_len = opt_len * IPCR_WORD_SIZE;
784 pkt->opt_hdr.data = kmalloc(opt_hdr_len, GFP_KERNEL);
785 if (!pkt->opt_hdr.data) {
786 IPC_RTR_ERR("%s: Memory allocation Failed\n", __func__);
787 return 0;
788 }
789 skb_head = pkt->pkt_fragment_q;
790 buf_len = opt_hdr_len;
791 skb_queue_walk(skb_head, temp) {
792 copy_len = buf_len < temp->len ? buf_len : temp->len;
793 memcpy(pkt->opt_hdr.data + offset, temp->data, copy_len);
794 offset += copy_len;
795 buf_len -= copy_len;
796 skb_pull(temp, copy_len);
797 if (temp->len == 0) {
798 skb_dequeue(skb_head);
799 kfree_skb(temp);
800 }
801 }
802 pkt->opt_hdr.len = opt_hdr_len;
803 return opt_hdr_len;
804}
805
806/**
807 * extract_header_v1() - Extract IPC Router header of version 1
808 * @pkt: Packet structure into which the header has to be extraced.
809 * @skb: SKB from which the header has to be extracted.
810 *
811 * @return: 0 on success, standard Linux error codes on failure.
812 */
813static int extract_header_v1(struct rr_packet *pkt, struct sk_buff *skb)
814{
815 if (!pkt || !skb) {
816 IPC_RTR_ERR("%s: Invalid pkt or skb\n", __func__);
817 return -EINVAL;
818 }
819
820 memcpy(&pkt->hdr, skb->data, sizeof(struct rr_header_v1));
821 skb_pull(skb, sizeof(struct rr_header_v1));
822 pkt->length -= sizeof(struct rr_header_v1);
823 return 0;
824}
825
826/**
827 * extract_header_v2() - Extract IPC Router header of version 2
828 * @pkt: Packet structure into which the header has to be extraced.
829 * @skb: SKB from which the header has to be extracted.
830 *
831 * @return: 0 on success, standard Linux error codes on failure.
832 */
833static int extract_header_v2(struct rr_packet *pkt, struct sk_buff *skb)
834{
835 struct rr_header_v2 *hdr;
836 u8 opt_len;
837 size_t opt_hdr_len;
838 size_t total_hdr_size = sizeof(*hdr);
839
840 if (!pkt || !skb) {
841 IPC_RTR_ERR("%s: Invalid pkt or skb\n", __func__);
842 return -EINVAL;
843 }
844
845 hdr = (struct rr_header_v2 *)skb->data;
846 pkt->hdr.version = (u32)hdr->version;
847 pkt->hdr.type = (u32)hdr->type;
848 pkt->hdr.src_node_id = (u32)hdr->src_node_id;
849 pkt->hdr.src_port_id = (u32)hdr->src_port_id;
850 pkt->hdr.size = (u32)hdr->size;
851 pkt->hdr.control_flag = (u32)hdr->control_flag;
852 pkt->hdr.dst_node_id = (u32)hdr->dst_node_id;
853 pkt->hdr.dst_port_id = (u32)hdr->dst_port_id;
854 opt_len = hdr->opt_len;
855 skb_pull(skb, total_hdr_size);
856 if (opt_len > 0) {
857 opt_hdr_len = extract_optional_header(pkt, opt_len);
858 total_hdr_size += opt_hdr_len;
859 }
860 pkt->length -= total_hdr_size;
861 return 0;
862}
863
864/**
865 * extract_header() - Extract IPC Router header
866 * @pkt: Packet from which the header has to be extraced.
867 *
868 * @return: 0 on success, standard Linux error codes on failure.
869 *
870 * This function will check if the header version is v1 or v2 and invoke
871 * the corresponding helper function to extract the IPC Router header.
872 */
873static int extract_header(struct rr_packet *pkt)
874{
875 struct sk_buff *temp_skb;
876 int ret;
877
878 if (!pkt) {
879 IPC_RTR_ERR("%s: NULL PKT\n", __func__);
880 return -EINVAL;
881 }
882
883 temp_skb = skb_peek(pkt->pkt_fragment_q);
884 if (!temp_skb || !temp_skb->data) {
885 IPC_RTR_ERR("%s: No SKBs in skb_queue\n", __func__);
886 return -EINVAL;
887 }
888
889 if (temp_skb->data[0] == IPC_ROUTER_V1) {
890 ret = extract_header_v1(pkt, temp_skb);
891 } else if (temp_skb->data[0] == IPC_ROUTER_V2) {
892 ret = extract_header_v2(pkt, temp_skb);
893 } else {
894 IPC_RTR_ERR("%s: Invalid Header version %02x\n",
895 __func__, temp_skb->data[0]);
896 print_hex_dump(KERN_ERR, "Header: ", DUMP_PREFIX_ADDRESS,
897 16, 1, temp_skb->data, pkt->length, true);
898 return -EINVAL;
899 }
900 return ret;
901}
902
903/**
904 * calc_tx_header_size() - Calculate header size to be reserved in SKB
905 * @pkt: Packet in which the space for header has to be reserved.
906 * @dst_xprt_info: XPRT through which the destination is reachable.
907 *
908 * @return: required header size on success,
909 * starndard Linux error codes on failure.
910 *
911 * This function is used to calculate the header size that has to be reserved
912 * in a transmit SKB. The header size is calculated based on the XPRT through
913 * which the destination node is reachable.
914 */
915static int calc_tx_header_size(struct rr_packet *pkt,
916 struct msm_ipc_router_xprt_info *dst_xprt_info)
917{
918 int hdr_size = 0;
919 int xprt_version = 0;
920 struct msm_ipc_router_xprt_info *xprt_info = dst_xprt_info;
921
922 if (!pkt) {
923 IPC_RTR_ERR("%s: NULL PKT\n", __func__);
924 return -EINVAL;
925 }
926
927 if (xprt_info)
928 xprt_version = xprt_info->xprt->get_version(xprt_info->xprt);
929
930 if (xprt_version == IPC_ROUTER_V1) {
931 pkt->hdr.version = IPC_ROUTER_V1;
932 hdr_size = sizeof(struct rr_header_v1);
933 } else if (xprt_version == IPC_ROUTER_V2) {
934 pkt->hdr.version = IPC_ROUTER_V2;
935 hdr_size = sizeof(struct rr_header_v2) + pkt->opt_hdr.len;
936 } else {
937 IPC_RTR_ERR("%s: Invalid xprt_version %d\n",
938 __func__, xprt_version);
939 hdr_size = -EINVAL;
940 }
941
942 return hdr_size;
943}
944
945/**
946 * calc_rx_header_size() - Calculate the RX header size
947 * @xprt_info: XPRT info of the received message.
948 *
949 * @return: valid header size on success, INT_MAX on failure.
950 */
951static int calc_rx_header_size(struct msm_ipc_router_xprt_info *xprt_info)
952{
953 int xprt_version = 0;
954 int hdr_size = INT_MAX;
955
956 if (xprt_info)
957 xprt_version = xprt_info->xprt->get_version(xprt_info->xprt);
958
959 if (xprt_version == IPC_ROUTER_V1)
960 hdr_size = sizeof(struct rr_header_v1);
961 else if (xprt_version == IPC_ROUTER_V2)
962 hdr_size = sizeof(struct rr_header_v2);
963 return hdr_size;
964}
965
966/**
967 * prepend_header_v1() - Prepend IPC Router header of version 1
968 * @pkt: Packet structure which contains the header info to be prepended.
969 * @hdr_size: Size of the header
970 *
971 * @return: 0 on success, standard Linux error codes on failure.
972 */
973static int prepend_header_v1(struct rr_packet *pkt, int hdr_size)
974{
975 struct sk_buff *temp_skb;
976 struct rr_header_v1 *hdr;
977
978 if (!pkt || hdr_size <= 0) {
979 IPC_RTR_ERR("%s: Invalid input parameters\n", __func__);
980 return -EINVAL;
981 }
982
983 temp_skb = skb_peek(pkt->pkt_fragment_q);
984 if (!temp_skb || !temp_skb->data) {
985 IPC_RTR_ERR("%s: No SKBs in skb_queue\n", __func__);
986 return -EINVAL;
987 }
988
989 if (skb_headroom(temp_skb) < hdr_size) {
990 temp_skb = alloc_skb(hdr_size, GFP_KERNEL);
991 if (!temp_skb) {
992 IPC_RTR_ERR("%s: Could not allocate SKB of size %d\n",
993 __func__, hdr_size);
994 return -ENOMEM;
995 }
996 skb_reserve(temp_skb, hdr_size);
997 }
998
999 hdr = (struct rr_header_v1 *)skb_push(temp_skb, hdr_size);
1000 memcpy(hdr, &pkt->hdr, hdr_size);
1001 if (temp_skb != skb_peek(pkt->pkt_fragment_q))
1002 skb_queue_head(pkt->pkt_fragment_q, temp_skb);
1003 pkt->length += hdr_size;
1004 return 0;
1005}
1006
1007/**
1008 * prepend_header_v2() - Prepend IPC Router header of version 2
1009 * @pkt: Packet structure which contains the header info to be prepended.
1010 * @hdr_size: Size of the header
1011 *
1012 * @return: 0 on success, standard Linux error codes on failure.
1013 */
1014static int prepend_header_v2(struct rr_packet *pkt, int hdr_size)
1015{
1016 struct sk_buff *temp_skb;
1017 struct rr_header_v2 *hdr;
1018
1019 if (!pkt || hdr_size <= 0) {
1020 IPC_RTR_ERR("%s: Invalid input parameters\n", __func__);
1021 return -EINVAL;
1022 }
1023
1024 temp_skb = skb_peek(pkt->pkt_fragment_q);
1025 if (!temp_skb || !temp_skb->data) {
1026 IPC_RTR_ERR("%s: No SKBs in skb_queue\n", __func__);
1027 return -EINVAL;
1028 }
1029
1030 if (skb_headroom(temp_skb) < hdr_size) {
1031 temp_skb = alloc_skb(hdr_size, GFP_KERNEL);
1032 if (!temp_skb) {
1033 IPC_RTR_ERR("%s: Could not allocate SKB of size %d\n",
1034 __func__, hdr_size);
1035 return -ENOMEM;
1036 }
1037 skb_reserve(temp_skb, hdr_size);
1038 }
1039
1040 hdr = (struct rr_header_v2 *)skb_push(temp_skb, hdr_size);
1041 hdr->version = (u8)pkt->hdr.version;
1042 hdr->type = (u8)pkt->hdr.type;
1043 hdr->control_flag = (u8)pkt->hdr.control_flag;
1044 hdr->size = (u32)pkt->hdr.size;
1045 hdr->src_node_id = (u16)pkt->hdr.src_node_id;
1046 hdr->src_port_id = (u16)pkt->hdr.src_port_id;
1047 hdr->dst_node_id = (u16)pkt->hdr.dst_node_id;
1048 hdr->dst_port_id = (u16)pkt->hdr.dst_port_id;
1049 if (pkt->opt_hdr.len > 0) {
1050 hdr->opt_len = pkt->opt_hdr.len / IPCR_WORD_SIZE;
1051 memcpy(hdr + sizeof(*hdr), pkt->opt_hdr.data, pkt->opt_hdr.len);
1052 } else {
1053 hdr->opt_len = 0;
1054 }
1055 if (temp_skb != skb_peek(pkt->pkt_fragment_q))
1056 skb_queue_head(pkt->pkt_fragment_q, temp_skb);
1057 pkt->length += hdr_size;
1058 return 0;
1059}
1060
1061/**
1062 * prepend_header() - Prepend IPC Router header
1063 * @pkt: Packet structure which contains the header info to be prepended.
1064 * @xprt_info: XPRT through which the packet is transmitted.
1065 *
1066 * @return: 0 on success, standard Linux error codes on failure.
1067 *
1068 * This function prepends the header to the packet to be transmitted. The
1069 * IPC Router header version to be prepended depends on the XPRT through
1070 * which the destination is reachable.
1071 */
1072static int prepend_header(struct rr_packet *pkt,
1073 struct msm_ipc_router_xprt_info *xprt_info)
1074{
1075 int hdr_size;
1076 struct sk_buff *temp_skb;
1077
1078 if (!pkt) {
1079 IPC_RTR_ERR("%s: NULL PKT\n", __func__);
1080 return -EINVAL;
1081 }
1082
1083 temp_skb = skb_peek(pkt->pkt_fragment_q);
1084 if (!temp_skb || !temp_skb->data) {
1085 IPC_RTR_ERR("%s: No SKBs in skb_queue\n", __func__);
1086 return -EINVAL;
1087 }
1088
1089 hdr_size = calc_tx_header_size(pkt, xprt_info);
1090 if (hdr_size <= 0)
1091 return hdr_size;
1092
1093 if (pkt->hdr.version == IPC_ROUTER_V1)
1094 return prepend_header_v1(pkt, hdr_size);
1095 else if (pkt->hdr.version == IPC_ROUTER_V2)
1096 return prepend_header_v2(pkt, hdr_size);
1097 else
1098 return -EINVAL;
1099}
1100
1101/**
1102 * defragment_pkt() - Defragment and linearize the packet
1103 * @pkt: Packet to be linearized.
1104 *
1105 * @return: 0 on success, standard Linux error codes on failure.
1106 *
1107 * Some packets contain fragments of data over multiple SKBs. If an XPRT
1108 * does not supported fragmented writes, linearize multiple SKBs into one
1109 * single SKB.
1110 */
1111static int defragment_pkt(struct rr_packet *pkt)
1112{
1113 struct sk_buff *dst_skb, *src_skb, *temp_skb;
1114 int offset = 0, buf_len = 0, copy_len;
1115 void *buf;
1116 int align_size;
1117
1118 if (!pkt || pkt->length <= 0) {
1119 IPC_RTR_ERR("%s: Invalid PKT\n", __func__);
1120 return -EINVAL;
1121 }
1122
1123 if (skb_queue_len(pkt->pkt_fragment_q) == 1)
1124 return 0;
1125
1126 align_size = ALIGN_SIZE(pkt->length);
1127 dst_skb = alloc_skb(pkt->length + align_size, GFP_KERNEL);
1128 if (!dst_skb) {
1129 IPC_RTR_ERR("%s: could not allocate one skb of size %d\n",
1130 __func__, pkt->length);
1131 return -ENOMEM;
1132 }
1133 buf = skb_put(dst_skb, pkt->length);
1134 buf_len = pkt->length;
1135
1136 skb_queue_walk(pkt->pkt_fragment_q, src_skb) {
1137 copy_len = buf_len < src_skb->len ? buf_len : src_skb->len;
1138 memcpy(buf + offset, src_skb->data, copy_len);
1139 offset += copy_len;
1140 buf_len -= copy_len;
1141 }
1142
1143 while (!skb_queue_empty(pkt->pkt_fragment_q)) {
1144 temp_skb = skb_dequeue(pkt->pkt_fragment_q);
1145 kfree_skb(temp_skb);
1146 }
1147 skb_queue_tail(pkt->pkt_fragment_q, dst_skb);
1148 return 0;
1149}
1150
1151static int post_pkt_to_port(struct msm_ipc_port *port_ptr,
1152 struct rr_packet *pkt, int clone)
1153{
1154 struct rr_packet *temp_pkt = pkt;
1155 void (*notify)(unsigned int event, void *oob_data,
1156 size_t oob_data_len, void *priv);
1157 void (*data_ready)(struct sock *sk) = NULL;
1158 struct sock *sk;
1159 u32 pkt_type;
1160
1161 if (unlikely(!port_ptr || !pkt))
1162 return -EINVAL;
1163
1164 if (clone) {
1165 temp_pkt = clone_pkt(pkt);
1166 if (!temp_pkt) {
1167 IPC_RTR_ERR(
1168 "%s: Error cloning packet for port %08x:%08x\n",
1169 __func__, port_ptr->this_port.node_id,
1170 port_ptr->this_port.port_id);
1171 return -ENOMEM;
1172 }
1173 }
1174
1175 mutex_lock(&port_ptr->port_rx_q_lock_lhc3);
Arun Kumar Neelakantam74ff8562017-05-26 17:57:52 +05301176 if (pkt->ws_need)
1177 __pm_stay_awake(port_ptr->port_rx_ws);
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -06001178 list_add_tail(&temp_pkt->list, &port_ptr->port_rx_q);
1179 wake_up(&port_ptr->port_rx_wait_q);
1180 notify = port_ptr->notify;
1181 pkt_type = temp_pkt->hdr.type;
1182 sk = (struct sock *)port_ptr->endpoint;
1183 if (sk) {
1184 read_lock(&sk->sk_callback_lock);
1185 data_ready = sk->sk_data_ready;
1186 read_unlock(&sk->sk_callback_lock);
1187 }
1188 mutex_unlock(&port_ptr->port_rx_q_lock_lhc3);
1189 if (notify)
1190 notify(pkt_type, NULL, 0, port_ptr->priv);
1191 else if (sk && data_ready)
1192 data_ready(sk);
1193
1194 return 0;
1195}
1196
1197/**
1198 * ipc_router_peek_pkt_size() - Peek into the packet header to get potential
1199 * packet size
1200 * @data: Starting address of the packet which points to router header.
1201 *
1202 * @returns: potential packet size on success, < 0 on error.
1203 *
1204 * This function is used by the underlying transport abstraction layer to
1205 * peek into the potential packet size of an incoming packet. This information
1206 * is used to perform link layer fragmentation and re-assembly
1207 */
1208int ipc_router_peek_pkt_size(char *data)
1209{
1210 int size;
1211
1212 if (!data) {
1213 pr_err("%s: NULL PKT\n", __func__);
1214 return -EINVAL;
1215 }
1216
1217 if (data[0] == IPC_ROUTER_V1)
1218 size = ((struct rr_header_v1 *)data)->size +
1219 sizeof(struct rr_header_v1);
1220 else if (data[0] == IPC_ROUTER_V2)
1221 size = ((struct rr_header_v2 *)data)->size +
1222 ((struct rr_header_v2 *)data)->opt_len * IPCR_WORD_SIZE
1223 + sizeof(struct rr_header_v2);
1224 else
1225 return -EINVAL;
1226
1227 size += ALIGN_SIZE(size);
1228 return size;
1229}
1230
1231static int post_control_ports(struct rr_packet *pkt)
1232{
1233 struct msm_ipc_port *port_ptr;
1234
1235 if (!pkt)
1236 return -EINVAL;
1237
1238 down_read(&control_ports_lock_lha5);
1239 list_for_each_entry(port_ptr, &control_ports, list)
1240 post_pkt_to_port(port_ptr, pkt, 1);
1241 up_read(&control_ports_lock_lha5);
1242 return 0;
1243}
1244
1245static u32 allocate_port_id(void)
1246{
1247 u32 port_id = 0, prev_port_id, key;
1248 struct msm_ipc_port *port_ptr;
1249
1250 mutex_lock(&next_port_id_lock_lhc1);
1251 prev_port_id = next_port_id;
1252 down_read(&local_ports_lock_lhc2);
1253 do {
1254 next_port_id++;
1255 if ((next_port_id & IPC_ROUTER_ADDRESS) == IPC_ROUTER_ADDRESS)
1256 next_port_id = 1;
1257
1258 key = (next_port_id & (LP_HASH_SIZE - 1));
1259 if (list_empty(&local_ports[key])) {
1260 port_id = next_port_id;
1261 break;
1262 }
1263 list_for_each_entry(port_ptr, &local_ports[key], list) {
1264 if (port_ptr->this_port.port_id == next_port_id) {
1265 port_id = next_port_id;
1266 break;
1267 }
1268 }
1269 if (!port_id) {
1270 port_id = next_port_id;
1271 break;
1272 }
1273 port_id = 0;
1274 } while (next_port_id != prev_port_id);
1275 up_read(&local_ports_lock_lhc2);
1276 mutex_unlock(&next_port_id_lock_lhc1);
1277
1278 return port_id;
1279}
1280
1281void msm_ipc_router_add_local_port(struct msm_ipc_port *port_ptr)
1282{
1283 u32 key;
1284
1285 if (!port_ptr)
1286 return;
1287
1288 key = (port_ptr->this_port.port_id & (LP_HASH_SIZE - 1));
1289 down_write(&local_ports_lock_lhc2);
1290 list_add_tail(&port_ptr->list, &local_ports[key]);
1291 up_write(&local_ports_lock_lhc2);
1292}
1293
1294/**
1295 * msm_ipc_router_create_raw_port() - Create an IPC Router port
1296 * @endpoint: User-space space socket information to be cached.
1297 * @notify: Function to notify incoming events on the port.
1298 * @event: Event ID to be handled.
1299 * @oob_data: Any out-of-band data associated with the event.
1300 * @oob_data_len: Size of the out-of-band data, if valid.
1301 * @priv: Private data registered during the port creation.
1302 * @priv: Private Data to be passed during the event notification.
1303 *
1304 * @return: Valid pointer to port on success, NULL on failure.
1305 *
1306 * This function is used to create an IPC Router port. The port is used for
1307 * communication locally or outside the subsystem.
1308 */
1309struct msm_ipc_port *
1310msm_ipc_router_create_raw_port(void *endpoint,
1311 void (*notify)(unsigned int event,
1312 void *oob_data,
1313 size_t oob_data_len, void *priv),
1314 void *priv)
1315{
1316 struct msm_ipc_port *port_ptr;
1317
1318 port_ptr = kzalloc(sizeof(*port_ptr), GFP_KERNEL);
1319 if (!port_ptr)
1320 return NULL;
1321
1322 port_ptr->this_port.node_id = IPC_ROUTER_NID_LOCAL;
1323 port_ptr->this_port.port_id = allocate_port_id();
1324 if (!port_ptr->this_port.port_id) {
1325 IPC_RTR_ERR("%s: All port ids are in use\n", __func__);
1326 kfree(port_ptr);
1327 return NULL;
1328 }
1329
1330 mutex_init(&port_ptr->port_lock_lhc3);
1331 INIT_LIST_HEAD(&port_ptr->port_rx_q);
1332 mutex_init(&port_ptr->port_rx_q_lock_lhc3);
1333 init_waitqueue_head(&port_ptr->port_rx_wait_q);
1334 snprintf(port_ptr->rx_ws_name, MAX_WS_NAME_SZ,
Dhoat Harpal3ad5fe32017-06-19 21:26:13 +05301335 "ipc%08x_%d_%s",
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -06001336 port_ptr->this_port.port_id,
Dhoat Harpal3ad5fe32017-06-19 21:26:13 +05301337 task_pid_nr(current),
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -06001338 current->comm);
1339 port_ptr->port_rx_ws = wakeup_source_register(port_ptr->rx_ws_name);
1340 if (!port_ptr->port_rx_ws) {
1341 kfree(port_ptr);
1342 return NULL;
1343 }
1344 init_waitqueue_head(&port_ptr->port_tx_wait_q);
1345 kref_init(&port_ptr->ref);
1346
1347 port_ptr->endpoint = endpoint;
1348 port_ptr->notify = notify;
1349 port_ptr->priv = priv;
1350
1351 msm_ipc_router_add_local_port(port_ptr);
1352 if (endpoint)
1353 sock_hold(ipc_port_sk(endpoint));
1354 return port_ptr;
1355}
1356
1357/**
1358 * ipc_router_get_port_ref() - Get a reference to the local port
1359 * @port_id: Port ID of the local port for which reference is get.
1360 *
1361 * @return: If port is found, a reference to the port is returned.
1362 * Else NULL is returned.
1363 */
1364static struct msm_ipc_port *ipc_router_get_port_ref(u32 port_id)
1365{
1366 int key = (port_id & (LP_HASH_SIZE - 1));
1367 struct msm_ipc_port *port_ptr;
1368
1369 down_read(&local_ports_lock_lhc2);
1370 list_for_each_entry(port_ptr, &local_ports[key], list) {
1371 if (port_ptr->this_port.port_id == port_id) {
1372 kref_get(&port_ptr->ref);
1373 up_read(&local_ports_lock_lhc2);
1374 return port_ptr;
1375 }
1376 }
1377 up_read(&local_ports_lock_lhc2);
1378 return NULL;
1379}
1380
1381/**
1382 * ipc_router_release_port() - Cleanup and release the port
1383 * @ref: Reference to the port.
1384 *
1385 * This function is called when all references to the port are released.
1386 */
1387void ipc_router_release_port(struct kref *ref)
1388{
1389 struct rr_packet *pkt, *temp_pkt;
1390 struct msm_ipc_port *port_ptr =
1391 container_of(ref, struct msm_ipc_port, ref);
1392
1393 mutex_lock(&port_ptr->port_rx_q_lock_lhc3);
1394 list_for_each_entry_safe(pkt, temp_pkt, &port_ptr->port_rx_q, list) {
1395 list_del(&pkt->list);
1396 release_pkt(pkt);
1397 }
1398 mutex_unlock(&port_ptr->port_rx_q_lock_lhc3);
1399 wakeup_source_unregister(port_ptr->port_rx_ws);
1400 if (port_ptr->endpoint)
1401 sock_put(ipc_port_sk(port_ptr->endpoint));
1402 kfree(port_ptr);
1403}
1404
1405/**
1406 * ipc_router_get_rport_ref()- Get reference to the remote port
1407 * @node_id: Node ID corresponding to the remote port.
1408 * @port_id: Port ID corresponding to the remote port.
1409 *
1410 * @return: a reference to the remote port on success, NULL on failure.
1411 */
1412static struct msm_ipc_router_remote_port *ipc_router_get_rport_ref(
1413 u32 node_id, u32 port_id)
1414{
1415 struct msm_ipc_router_remote_port *rport_ptr;
1416 struct msm_ipc_routing_table_entry *rt_entry;
1417 int key = (port_id & (RP_HASH_SIZE - 1));
1418
1419 rt_entry = ipc_router_get_rtentry_ref(node_id);
1420 if (!rt_entry) {
1421 IPC_RTR_ERR("%s: Node is not up\n", __func__);
1422 return NULL;
1423 }
1424
1425 down_read(&rt_entry->lock_lha4);
1426 list_for_each_entry(rport_ptr,
1427 &rt_entry->remote_port_list[key], list) {
1428 if (rport_ptr->port_id == port_id) {
1429 kref_get(&rport_ptr->ref);
1430 goto out_lookup_rmt_port1;
1431 }
1432 }
1433 rport_ptr = NULL;
1434out_lookup_rmt_port1:
1435 up_read(&rt_entry->lock_lha4);
1436 kref_put(&rt_entry->ref, ipc_router_release_rtentry);
1437 return rport_ptr;
1438}
1439
1440/**
1441 * ipc_router_create_rport() - Create a remote port
1442 * @node_id: Node ID corresponding to the remote port.
1443 * @port_id: Port ID corresponding to the remote port.
1444 * @xprt_info: XPRT through which the concerned node is reachable.
1445 *
1446 * @return: a reference to the remote port on success, NULL on failure.
1447 */
1448static struct msm_ipc_router_remote_port *ipc_router_create_rport(
1449 u32 node_id, u32 port_id,
1450 struct msm_ipc_router_xprt_info *xprt_info)
1451{
1452 struct msm_ipc_router_remote_port *rport_ptr;
1453 struct msm_ipc_routing_table_entry *rt_entry;
1454 int key = (port_id & (RP_HASH_SIZE - 1));
1455
1456 rt_entry = create_routing_table_entry(node_id, xprt_info);
1457 if (!rt_entry) {
1458 IPC_RTR_ERR("%s: Node cannot be created\n", __func__);
1459 return NULL;
1460 }
1461
1462 down_write(&rt_entry->lock_lha4);
1463 list_for_each_entry(rport_ptr,
1464 &rt_entry->remote_port_list[key], list) {
1465 if (rport_ptr->port_id == port_id)
1466 goto out_create_rmt_port1;
1467 }
1468
1469 rport_ptr = kmalloc(sizeof(*rport_ptr), GFP_KERNEL);
1470 if (!rport_ptr) {
1471 IPC_RTR_ERR("%s: Remote port alloc failed\n", __func__);
1472 goto out_create_rmt_port2;
1473 }
1474 rport_ptr->port_id = port_id;
1475 rport_ptr->node_id = node_id;
1476 rport_ptr->status = VALID;
1477 rport_ptr->sec_rule = NULL;
1478 rport_ptr->server = NULL;
1479 rport_ptr->tx_quota_cnt = 0;
1480 kref_init(&rport_ptr->ref);
1481 mutex_init(&rport_ptr->rport_lock_lhb2);
1482 INIT_LIST_HEAD(&rport_ptr->resume_tx_port_list);
1483 INIT_LIST_HEAD(&rport_ptr->conn_info_list);
1484 list_add_tail(&rport_ptr->list,
1485 &rt_entry->remote_port_list[key]);
1486out_create_rmt_port1:
1487 kref_get(&rport_ptr->ref);
1488out_create_rmt_port2:
1489 up_write(&rt_entry->lock_lha4);
1490 kref_put(&rt_entry->ref, ipc_router_release_rtentry);
1491 return rport_ptr;
1492}
1493
1494/**
1495 * msm_ipc_router_free_resume_tx_port() - Free the resume_tx ports
1496 * @rport_ptr: Pointer to the remote port.
1497 *
1498 * This function deletes all the resume_tx ports associated with a remote port
1499 * and frees the memory allocated to each resume_tx port.
1500 *
1501 * Must be called with rport_ptr->rport_lock_lhb2 locked.
1502 */
1503static void msm_ipc_router_free_resume_tx_port(
1504 struct msm_ipc_router_remote_port *rport_ptr)
1505{
1506 struct msm_ipc_resume_tx_port *rtx_port, *tmp_rtx_port;
1507
1508 list_for_each_entry_safe(rtx_port, tmp_rtx_port,
1509 &rport_ptr->resume_tx_port_list, list) {
1510 list_del(&rtx_port->list);
1511 kfree(rtx_port);
1512 }
1513}
1514
1515/**
1516 * msm_ipc_router_lookup_resume_tx_port() - Lookup resume_tx port list
1517 * @rport_ptr: Remote port whose resume_tx port list needs to be looked.
1518 * @port_id: Port ID which needs to be looked from the list.
1519 *
1520 * return 1 if the port_id is found in the list, else 0.
1521 *
1522 * This function is used to lookup the existence of a local port in
1523 * remote port's resume_tx list. This function is used to ensure that
1524 * the same port is not added to the remote_port's resume_tx list repeatedly.
1525 *
1526 * Must be called with rport_ptr->rport_lock_lhb2 locked.
1527 */
1528static int msm_ipc_router_lookup_resume_tx_port(
1529 struct msm_ipc_router_remote_port *rport_ptr, u32 port_id)
1530{
1531 struct msm_ipc_resume_tx_port *rtx_port;
1532
1533 list_for_each_entry(rtx_port, &rport_ptr->resume_tx_port_list, list) {
1534 if (port_id == rtx_port->port_id)
1535 return 1;
1536 }
1537 return 0;
1538}
1539
1540/**
1541 * ipc_router_dummy_write_space() - Dummy write space available callback
1542 * @sk: Socket pointer for which the callback is called.
1543 */
1544void ipc_router_dummy_write_space(struct sock *sk)
1545{
1546}
1547
1548/**
1549 * post_resume_tx() - Post the resume_tx event
1550 * @rport_ptr: Pointer to the remote port
1551 * @pkt : The data packet that is received on a resume_tx event
1552 * @msg: Out of band data to be passed to kernel drivers
1553 *
1554 * This function informs about the reception of the resume_tx message from a
1555 * remote port pointed by rport_ptr to all the local ports that are in the
1556 * resume_tx_ports_list of this remote port. On posting the information, this
1557 * function sequentially deletes each entry in the resume_tx_port_list of the
1558 * remote port.
1559 *
1560 * Must be called with rport_ptr->rport_lock_lhb2 locked.
1561 */
1562static void post_resume_tx(struct msm_ipc_router_remote_port *rport_ptr,
1563 struct rr_packet *pkt, union rr_control_msg *msg)
1564{
1565 struct msm_ipc_resume_tx_port *rtx_port, *tmp_rtx_port;
1566 struct msm_ipc_port *local_port;
1567 struct sock *sk;
1568 void (*write_space)(struct sock *sk) = NULL;
1569
1570 list_for_each_entry_safe(rtx_port, tmp_rtx_port,
1571 &rport_ptr->resume_tx_port_list, list) {
1572 local_port = ipc_router_get_port_ref(rtx_port->port_id);
1573 if (local_port && local_port->notify) {
1574 wake_up(&local_port->port_tx_wait_q);
1575 local_port->notify(IPC_ROUTER_CTRL_CMD_RESUME_TX, msg,
1576 sizeof(*msg), local_port->priv);
1577 } else if (local_port) {
1578 wake_up(&local_port->port_tx_wait_q);
1579 sk = ipc_port_sk(local_port->endpoint);
1580 if (sk) {
1581 read_lock(&sk->sk_callback_lock);
1582 write_space = sk->sk_write_space;
1583 read_unlock(&sk->sk_callback_lock);
1584 }
1585 if (write_space &&
1586 write_space != ipc_router_dummy_write_space)
1587 write_space(sk);
1588 else
1589 post_pkt_to_port(local_port, pkt, 1);
1590 } else {
1591 IPC_RTR_ERR("%s: Local Port %d not Found",
1592 __func__, rtx_port->port_id);
1593 }
1594 if (local_port)
1595 kref_put(&local_port->ref, ipc_router_release_port);
1596 list_del(&rtx_port->list);
1597 kfree(rtx_port);
1598 }
1599}
1600
1601/**
1602 * signal_rport_exit() - Signal the local ports of remote port exit
1603 * @rport_ptr: Remote port that is exiting.
1604 *
1605 * This function is used to signal the local ports that are waiting
1606 * to resume transmission to a remote port that is exiting.
1607 */
1608static void signal_rport_exit(struct msm_ipc_router_remote_port *rport_ptr)
1609{
1610 struct msm_ipc_resume_tx_port *rtx_port, *tmp_rtx_port;
1611 struct msm_ipc_port *local_port;
1612
1613 mutex_lock(&rport_ptr->rport_lock_lhb2);
1614 rport_ptr->status = RESET;
1615 list_for_each_entry_safe(rtx_port, tmp_rtx_port,
1616 &rport_ptr->resume_tx_port_list, list) {
1617 local_port = ipc_router_get_port_ref(rtx_port->port_id);
1618 if (local_port) {
1619 wake_up(&local_port->port_tx_wait_q);
1620 kref_put(&local_port->ref, ipc_router_release_port);
1621 }
1622 list_del(&rtx_port->list);
1623 kfree(rtx_port);
1624 }
1625 mutex_unlock(&rport_ptr->rport_lock_lhb2);
1626}
1627
1628/**
1629 * ipc_router_release_rport() - Cleanup and release the remote port
1630 * @ref: Reference to the remote port.
1631 *
1632 * This function is called when all references to the remote port are released.
1633 */
1634static void ipc_router_release_rport(struct kref *ref)
1635{
1636 struct msm_ipc_router_remote_port *rport_ptr =
1637 container_of(ref, struct msm_ipc_router_remote_port, ref);
1638
1639 mutex_lock(&rport_ptr->rport_lock_lhb2);
1640 msm_ipc_router_free_resume_tx_port(rport_ptr);
1641 mutex_unlock(&rport_ptr->rport_lock_lhb2);
1642 kfree(rport_ptr);
1643}
1644
1645/**
1646 * ipc_router_destroy_rport() - Destroy the remote port
1647 * @rport_ptr: Pointer to the remote port to be destroyed.
1648 */
1649static void ipc_router_destroy_rport(
1650 struct msm_ipc_router_remote_port *rport_ptr)
1651{
1652 u32 node_id;
1653 struct msm_ipc_routing_table_entry *rt_entry;
1654
1655 if (!rport_ptr)
1656 return;
1657
1658 node_id = rport_ptr->node_id;
1659 rt_entry = ipc_router_get_rtentry_ref(node_id);
1660 if (!rt_entry) {
1661 IPC_RTR_ERR("%s: Node %d is not up\n", __func__, node_id);
1662 return;
1663 }
1664 down_write(&rt_entry->lock_lha4);
1665 list_del(&rport_ptr->list);
1666 up_write(&rt_entry->lock_lha4);
1667 signal_rport_exit(rport_ptr);
1668 kref_put(&rport_ptr->ref, ipc_router_release_rport);
1669 kref_put(&rt_entry->ref, ipc_router_release_rtentry);
1670}
1671
1672/**
1673 * msm_ipc_router_lookup_server() - Lookup server information
1674 * @service: Service ID of the server info to be looked up.
1675 * @instance: Instance ID of the server info to be looked up.
1676 * @node_id: Node/Processor ID in which the server is hosted.
1677 * @port_id: Port ID within the node in which the server is hosted.
1678 *
1679 * @return: If found Pointer to server structure, else NULL.
1680 *
1681 * Note1: Lock the server_list_lock_lha2 before accessing this function.
1682 * Note2: If the <node_id:port_id> are <0:0>, then the lookup is restricted
1683 * to <service:instance>. Used only when a client wants to send a
1684 * message to any QMI server.
1685 */
1686static struct msm_ipc_server *msm_ipc_router_lookup_server(
1687 u32 service,
1688 u32 instance,
1689 u32 node_id,
1690 u32 port_id)
1691{
1692 struct msm_ipc_server *server;
1693 struct msm_ipc_server_port *server_port;
1694 int key = (service & (SRV_HASH_SIZE - 1));
1695
1696 list_for_each_entry(server, &server_list[key], list) {
1697 if ((server->name.service != service) ||
1698 (server->name.instance != instance))
1699 continue;
1700 if ((node_id == 0) && (port_id == 0))
1701 return server;
1702 list_for_each_entry(server_port, &server->server_port_list,
1703 list) {
1704 if ((server_port->server_addr.node_id == node_id) &&
1705 (server_port->server_addr.port_id == port_id))
1706 return server;
1707 }
1708 }
1709 return NULL;
1710}
1711
1712/**
1713 * ipc_router_get_server_ref() - Get reference to the server
1714 * @svc: Service ID for which the reference is required.
1715 * @ins: Instance ID for which the reference is required.
1716 * @node_id: Node/Processor ID in which the server is hosted.
1717 * @port_id: Port ID within the node in which the server is hosted.
1718 *
1719 * @return: If found return reference to server, else NULL.
1720 */
1721static struct msm_ipc_server *ipc_router_get_server_ref(
1722 u32 svc, u32 ins, u32 node_id, u32 port_id)
1723{
1724 struct msm_ipc_server *server;
1725
1726 down_read(&server_list_lock_lha2);
1727 server = msm_ipc_router_lookup_server(svc, ins, node_id, port_id);
1728 if (server)
1729 kref_get(&server->ref);
1730 up_read(&server_list_lock_lha2);
1731 return server;
1732}
1733
1734/**
1735 * ipc_router_release_server() - Cleanup and release the server
1736 * @ref: Reference to the server.
1737 *
1738 * This function is called when all references to the server are released.
1739 */
1740static void ipc_router_release_server(struct kref *ref)
1741{
1742 struct msm_ipc_server *server =
1743 container_of(ref, struct msm_ipc_server, ref);
1744
1745 kfree(server);
1746}
1747
1748/**
1749 * msm_ipc_router_create_server() - Add server info to hash table
1750 * @service: Service ID of the server info to be created.
1751 * @instance: Instance ID of the server info to be created.
1752 * @node_id: Node/Processor ID in which the server is hosted.
1753 * @port_id: Port ID within the node in which the server is hosted.
1754 * @xprt_info: XPRT through which the node hosting the server is reached.
1755 *
1756 * @return: Pointer to server structure on success, else NULL.
1757 *
1758 * This function adds the server info to the hash table. If the same
1759 * server(i.e. <service_id:instance_id>) is hosted in different nodes,
1760 * they are maintained as list of "server_port" under "server" structure.
1761 */
1762static struct msm_ipc_server *msm_ipc_router_create_server(
1763 u32 service,
1764 u32 instance,
1765 u32 node_id,
1766 u32 port_id,
1767 struct msm_ipc_router_xprt_info *xprt_info)
1768{
1769 struct msm_ipc_server *server = NULL;
1770 struct msm_ipc_server_port *server_port;
1771 struct platform_device *pdev;
1772 int key = (service & (SRV_HASH_SIZE - 1));
1773
1774 down_write(&server_list_lock_lha2);
1775 server = msm_ipc_router_lookup_server(service, instance, 0, 0);
1776 if (server) {
1777 list_for_each_entry(server_port, &server->server_port_list,
1778 list) {
1779 if ((server_port->server_addr.node_id == node_id) &&
1780 (server_port->server_addr.port_id == port_id))
1781 goto return_server;
1782 }
1783 goto create_srv_port;
1784 }
1785
1786 server = kzalloc(sizeof(*server), GFP_KERNEL);
1787 if (!server) {
1788 up_write(&server_list_lock_lha2);
1789 IPC_RTR_ERR("%s: Server allocation failed\n", __func__);
1790 return NULL;
1791 }
1792 server->name.service = service;
1793 server->name.instance = instance;
1794 server->synced_sec_rule = 0;
1795 INIT_LIST_HEAD(&server->server_port_list);
1796 kref_init(&server->ref);
1797 list_add_tail(&server->list, &server_list[key]);
1798 scnprintf(server->pdev_name, sizeof(server->pdev_name),
1799 "SVC%08x:%08x", service, instance);
1800 server->next_pdev_id = 1;
1801
1802create_srv_port:
1803 server_port = kzalloc(sizeof(*server_port), GFP_KERNEL);
1804 pdev = platform_device_alloc(server->pdev_name, server->next_pdev_id);
1805 if (!server_port || !pdev) {
1806 kfree(server_port);
1807 if (pdev)
1808 platform_device_put(pdev);
1809 if (list_empty(&server->server_port_list)) {
1810 list_del(&server->list);
1811 kfree(server);
1812 }
1813 up_write(&server_list_lock_lha2);
1814 IPC_RTR_ERR("%s: Server Port allocation failed\n", __func__);
1815 return NULL;
1816 }
1817 server_port->pdev = pdev;
1818 server_port->server_addr.node_id = node_id;
1819 server_port->server_addr.port_id = port_id;
1820 server_port->xprt_info = xprt_info;
1821 list_add_tail(&server_port->list, &server->server_port_list);
1822 server->next_pdev_id++;
1823 platform_device_add(server_port->pdev);
1824
1825return_server:
1826 /* Add a reference so that the caller can put it back */
1827 kref_get(&server->ref);
1828 up_write(&server_list_lock_lha2);
1829 return server;
1830}
1831
1832/**
1833 * ipc_router_destroy_server_nolock() - Remove server info from hash table
1834 * @server: Server info to be removed.
1835 * @node_id: Node/Processor ID in which the server is hosted.
1836 * @port_id: Port ID within the node in which the server is hosted.
1837 *
1838 * This function removes the server_port identified using <node_id:port_id>
1839 * from the server structure. If the server_port list under server structure
1840 * is empty after removal, then remove the server structure from the server
1841 * hash table. This function must be called with server_list_lock_lha2 locked.
1842 */
1843static void ipc_router_destroy_server_nolock(struct msm_ipc_server *server,
1844 u32 node_id, u32 port_id)
1845{
1846 struct msm_ipc_server_port *server_port;
1847 bool server_port_found = false;
1848
1849 if (!server)
1850 return;
1851
1852 list_for_each_entry(server_port, &server->server_port_list, list) {
1853 if ((server_port->server_addr.node_id == node_id) &&
1854 (server_port->server_addr.port_id == port_id)) {
1855 server_port_found = true;
1856 break;
1857 }
1858 }
1859 if (server_port_found && server_port) {
1860 platform_device_unregister(server_port->pdev);
1861 list_del(&server_port->list);
1862 kfree(server_port);
1863 }
1864 if (list_empty(&server->server_port_list)) {
1865 list_del(&server->list);
1866 kref_put(&server->ref, ipc_router_release_server);
1867 }
1868}
1869
1870/**
1871 * ipc_router_destroy_server() - Remove server info from hash table
1872 * @server: Server info to be removed.
1873 * @node_id: Node/Processor ID in which the server is hosted.
1874 * @port_id: Port ID within the node in which the server is hosted.
1875 *
1876 * This function removes the server_port identified using <node_id:port_id>
1877 * from the server structure. If the server_port list under server structure
1878 * is empty after removal, then remove the server structure from the server
1879 * hash table.
1880 */
1881static void ipc_router_destroy_server(struct msm_ipc_server *server,
1882 u32 node_id, u32 port_id)
1883{
1884 down_write(&server_list_lock_lha2);
1885 ipc_router_destroy_server_nolock(server, node_id, port_id);
1886 up_write(&server_list_lock_lha2);
1887}
1888
1889static int ipc_router_send_ctl_msg(
1890 struct msm_ipc_router_xprt_info *xprt_info,
1891 union rr_control_msg *msg,
1892 u32 dst_node_id)
1893{
1894 struct rr_packet *pkt;
1895 struct sk_buff *ipc_rtr_pkt;
1896 struct rr_header_v1 *hdr;
1897 int pkt_size;
1898 void *data;
1899 int ret = -EINVAL;
1900
1901 pkt = create_pkt(NULL);
1902 if (!pkt) {
1903 IPC_RTR_ERR("%s: pkt alloc failed\n", __func__);
1904 return -ENOMEM;
1905 }
1906
1907 pkt_size = IPC_ROUTER_HDR_SIZE + sizeof(*msg);
1908 ipc_rtr_pkt = alloc_skb(pkt_size, GFP_KERNEL);
1909 if (!ipc_rtr_pkt) {
1910 IPC_RTR_ERR("%s: ipc_rtr_pkt alloc failed\n", __func__);
1911 release_pkt(pkt);
1912 return -ENOMEM;
1913 }
1914
1915 skb_reserve(ipc_rtr_pkt, IPC_ROUTER_HDR_SIZE);
1916 data = skb_put(ipc_rtr_pkt, sizeof(*msg));
1917 memcpy(data, msg, sizeof(*msg));
1918 skb_queue_tail(pkt->pkt_fragment_q, ipc_rtr_pkt);
1919 pkt->length = sizeof(*msg);
1920
1921 hdr = &pkt->hdr;
1922 hdr->version = IPC_ROUTER_V1;
1923 hdr->type = msg->cmd;
1924 hdr->src_node_id = IPC_ROUTER_NID_LOCAL;
1925 hdr->src_port_id = IPC_ROUTER_ADDRESS;
1926 hdr->control_flag = 0;
1927 hdr->size = sizeof(*msg);
1928 if (hdr->type == IPC_ROUTER_CTRL_CMD_RESUME_TX ||
1929 (!xprt_info && dst_node_id == IPC_ROUTER_NID_LOCAL))
1930 hdr->dst_node_id = dst_node_id;
1931 else if (xprt_info)
1932 hdr->dst_node_id = xprt_info->remote_node_id;
1933 hdr->dst_port_id = IPC_ROUTER_ADDRESS;
1934
1935 if (dst_node_id == IPC_ROUTER_NID_LOCAL &&
1936 msg->cmd != IPC_ROUTER_CTRL_CMD_RESUME_TX) {
1937 ipc_router_log_msg(local_log_ctx, IPC_ROUTER_LOG_EVENT_TX, msg,
1938 hdr, NULL, NULL);
1939 ret = post_control_ports(pkt);
1940 } else if (dst_node_id == IPC_ROUTER_NID_LOCAL &&
1941 msg->cmd == IPC_ROUTER_CTRL_CMD_RESUME_TX) {
1942 ipc_router_log_msg(local_log_ctx, IPC_ROUTER_LOG_EVENT_TX, msg,
1943 hdr, NULL, NULL);
1944 ret = process_resume_tx_msg(msg, pkt);
1945 } else if (xprt_info && (msg->cmd == IPC_ROUTER_CTRL_CMD_HELLO ||
1946 xprt_info->initialized)) {
1947 mutex_lock(&xprt_info->tx_lock_lhb2);
1948 ipc_router_log_msg(xprt_info->log_ctx, IPC_ROUTER_LOG_EVENT_TX,
1949 msg, hdr, NULL, NULL);
1950 ret = prepend_header(pkt, xprt_info);
1951 if (ret < 0) {
1952 mutex_unlock(&xprt_info->tx_lock_lhb2);
1953 IPC_RTR_ERR("%s: Prepend Header failed\n", __func__);
1954 release_pkt(pkt);
1955 return ret;
1956 }
1957
1958 ret = xprt_info->xprt->write(pkt, pkt->length, xprt_info->xprt);
1959 mutex_unlock(&xprt_info->tx_lock_lhb2);
1960 }
1961
1962 release_pkt(pkt);
1963 return ret;
1964}
1965
1966static int
1967msm_ipc_router_send_server_list(u32 node_id,
1968 struct msm_ipc_router_xprt_info *xprt_info)
1969{
1970 union rr_control_msg ctl;
1971 struct msm_ipc_server *server;
1972 struct msm_ipc_server_port *server_port;
1973 int i;
1974
1975 if (!xprt_info || !xprt_info->initialized) {
1976 IPC_RTR_ERR("%s: Xprt info not initialized\n", __func__);
1977 return -EINVAL;
1978 }
1979
1980 memset(&ctl, 0, sizeof(ctl));
1981 ctl.cmd = IPC_ROUTER_CTRL_CMD_NEW_SERVER;
1982
1983 for (i = 0; i < SRV_HASH_SIZE; i++) {
1984 list_for_each_entry(server, &server_list[i], list) {
1985 ctl.srv.service = server->name.service;
1986 ctl.srv.instance = server->name.instance;
1987 list_for_each_entry(server_port,
1988 &server->server_port_list, list) {
1989 if (server_port->server_addr.node_id !=
1990 node_id)
1991 continue;
1992
1993 ctl.srv.node_id =
1994 server_port->server_addr.node_id;
1995 ctl.srv.port_id =
1996 server_port->server_addr.port_id;
1997 ipc_router_send_ctl_msg
1998 (xprt_info, &ctl,
1999 IPC_ROUTER_DUMMY_DEST_NODE);
2000 }
2001 }
2002 }
2003
2004 return 0;
2005}
2006
2007static int broadcast_ctl_msg_locally(union rr_control_msg *msg)
2008{
2009 return ipc_router_send_ctl_msg(NULL, msg, IPC_ROUTER_NID_LOCAL);
2010}
2011
2012static int broadcast_ctl_msg(union rr_control_msg *ctl)
2013{
2014 struct msm_ipc_router_xprt_info *xprt_info;
2015
2016 down_read(&xprt_info_list_lock_lha5);
2017 list_for_each_entry(xprt_info, &xprt_info_list, list) {
2018 ipc_router_send_ctl_msg(xprt_info, ctl,
2019 IPC_ROUTER_DUMMY_DEST_NODE);
2020 }
2021 up_read(&xprt_info_list_lock_lha5);
2022 broadcast_ctl_msg_locally(ctl);
2023
2024 return 0;
2025}
2026
2027static int relay_ctl_msg(struct msm_ipc_router_xprt_info *xprt_info,
2028 union rr_control_msg *ctl)
2029{
2030 struct msm_ipc_router_xprt_info *fwd_xprt_info;
2031
2032 if (!xprt_info || !ctl)
2033 return -EINVAL;
2034
2035 down_read(&xprt_info_list_lock_lha5);
2036 list_for_each_entry(fwd_xprt_info, &xprt_info_list, list) {
2037 if (xprt_info->xprt->link_id != fwd_xprt_info->xprt->link_id)
2038 ipc_router_send_ctl_msg(fwd_xprt_info, ctl,
2039 IPC_ROUTER_DUMMY_DEST_NODE);
2040 }
2041 up_read(&xprt_info_list_lock_lha5);
2042
2043 return 0;
2044}
2045
2046static int forward_msg(struct msm_ipc_router_xprt_info *xprt_info,
2047 struct rr_packet *pkt)
2048{
2049 struct rr_header_v1 *hdr;
2050 struct msm_ipc_router_xprt_info *fwd_xprt_info;
2051 struct msm_ipc_routing_table_entry *rt_entry;
2052 int ret = 0;
2053 int fwd_xprt_option;
2054
2055 if (!xprt_info || !pkt)
2056 return -EINVAL;
2057
2058 hdr = &pkt->hdr;
2059 rt_entry = ipc_router_get_rtentry_ref(hdr->dst_node_id);
2060 if (!(rt_entry) || !(rt_entry->xprt_info)) {
2061 IPC_RTR_ERR("%s: Routing table not initialized\n", __func__);
2062 ret = -ENODEV;
2063 goto fm_error1;
2064 }
2065
2066 down_read(&rt_entry->lock_lha4);
2067 fwd_xprt_info = rt_entry->xprt_info;
2068 ret = ipc_router_get_xprt_info_ref(fwd_xprt_info);
2069 if (ret < 0) {
2070 IPC_RTR_ERR("%s: Abort invalid xprt\n", __func__);
2071 goto fm_error_xprt;
2072 }
2073 ret = prepend_header(pkt, fwd_xprt_info);
2074 if (ret < 0) {
2075 IPC_RTR_ERR("%s: Prepend Header failed\n", __func__);
2076 goto fm_error2;
2077 }
2078 fwd_xprt_option = fwd_xprt_info->xprt->get_option(fwd_xprt_info->xprt);
2079 if (!(fwd_xprt_option & FRAG_PKT_WRITE_ENABLE)) {
2080 ret = defragment_pkt(pkt);
2081 if (ret < 0)
2082 goto fm_error2;
2083 }
2084
2085 mutex_lock(&fwd_xprt_info->tx_lock_lhb2);
2086 if (xprt_info->remote_node_id == fwd_xprt_info->remote_node_id) {
2087 IPC_RTR_ERR("%s: Discarding Command to route back\n", __func__);
2088 ret = -EINVAL;
2089 goto fm_error3;
2090 }
2091
2092 if (xprt_info->xprt->link_id == fwd_xprt_info->xprt->link_id) {
2093 IPC_RTR_ERR("%s: DST in the same cluster\n", __func__);
2094 ret = 0;
2095 goto fm_error3;
2096 }
2097 fwd_xprt_info->xprt->write(pkt, pkt->length, fwd_xprt_info->xprt);
2098 IPC_RTR_INFO(fwd_xprt_info->log_ctx,
2099 "%s %s Len:0x%x T:0x%x CF:0x%x SRC:<0x%x:0x%x> DST:<0x%x:0x%x>\n",
2100 "FWD", "TX", hdr->size, hdr->type, hdr->control_flag,
2101 hdr->src_node_id, hdr->src_port_id,
2102 hdr->dst_node_id, hdr->dst_port_id);
2103
2104fm_error3:
2105 mutex_unlock(&fwd_xprt_info->tx_lock_lhb2);
2106fm_error2:
2107 ipc_router_put_xprt_info_ref(fwd_xprt_info);
2108fm_error_xprt:
2109 up_read(&rt_entry->lock_lha4);
2110fm_error1:
2111 if (rt_entry)
2112 kref_put(&rt_entry->ref, ipc_router_release_rtentry);
2113 return ret;
2114}
2115
2116static int msm_ipc_router_send_remove_client(struct comm_mode_info *mode_info,
2117 u32 node_id, u32 port_id)
2118{
2119 union rr_control_msg msg;
2120 struct msm_ipc_router_xprt_info *tmp_xprt_info;
2121 int mode;
2122 void *xprt_info;
2123 int rc = 0;
2124
2125 if (!mode_info) {
2126 IPC_RTR_ERR("%s: NULL mode_info\n", __func__);
2127 return -EINVAL;
2128 }
2129 mode = mode_info->mode;
2130 xprt_info = mode_info->xprt_info;
2131
2132 memset(&msg, 0, sizeof(msg));
2133 msg.cmd = IPC_ROUTER_CTRL_CMD_REMOVE_CLIENT;
2134 msg.cli.node_id = node_id;
2135 msg.cli.port_id = port_id;
2136
2137 if ((mode == SINGLE_LINK_MODE) && xprt_info) {
2138 down_read(&xprt_info_list_lock_lha5);
2139 list_for_each_entry(tmp_xprt_info, &xprt_info_list, list) {
2140 if (tmp_xprt_info != xprt_info)
2141 continue;
2142 ipc_router_send_ctl_msg(tmp_xprt_info, &msg,
2143 IPC_ROUTER_DUMMY_DEST_NODE);
2144 break;
2145 }
2146 up_read(&xprt_info_list_lock_lha5);
2147 } else if ((mode == SINGLE_LINK_MODE) && !xprt_info) {
2148 broadcast_ctl_msg_locally(&msg);
2149 } else if (mode == MULTI_LINK_MODE) {
2150 broadcast_ctl_msg(&msg);
2151 } else if (mode != NULL_MODE) {
2152 IPC_RTR_ERR(
2153 "%s: Invalid mode(%d) + xprt_inf(%p) for %08x:%08x\n",
2154 __func__, mode, xprt_info, node_id, port_id);
2155 rc = -EINVAL;
2156 }
2157 return rc;
2158}
2159
2160static void update_comm_mode_info(struct comm_mode_info *mode_info,
2161 struct msm_ipc_router_xprt_info *xprt_info)
2162{
2163 if (!mode_info) {
2164 IPC_RTR_ERR("%s: NULL mode_info\n", __func__);
2165 return;
2166 }
2167
2168 if (mode_info->mode == NULL_MODE) {
2169 mode_info->xprt_info = xprt_info;
2170 mode_info->mode = SINGLE_LINK_MODE;
2171 } else if (mode_info->mode == SINGLE_LINK_MODE &&
2172 mode_info->xprt_info != xprt_info) {
2173 mode_info->mode = MULTI_LINK_MODE;
2174 }
2175}
2176
2177/**
2178 * cleanup_rmt_server() - Cleanup server hosted in the remote port
2179 * @xprt_info: XPRT through which this cleanup event is handled.
2180 * @rport_ptr: Remote port that is being cleaned up.
2181 * @server: Server that is hosted in the remote port.
2182 */
2183static void cleanup_rmt_server(struct msm_ipc_router_xprt_info *xprt_info,
2184 struct msm_ipc_router_remote_port *rport_ptr,
2185 struct msm_ipc_server *server)
2186{
2187 union rr_control_msg ctl;
2188
2189 memset(&ctl, 0, sizeof(ctl));
2190 ctl.cmd = IPC_ROUTER_CTRL_CMD_REMOVE_SERVER;
2191 ctl.srv.service = server->name.service;
2192 ctl.srv.instance = server->name.instance;
2193 ctl.srv.node_id = rport_ptr->node_id;
2194 ctl.srv.port_id = rport_ptr->port_id;
2195 if (xprt_info)
2196 relay_ctl_msg(xprt_info, &ctl);
2197 broadcast_ctl_msg_locally(&ctl);
2198 ipc_router_destroy_server_nolock(server, rport_ptr->node_id,
2199 rport_ptr->port_id);
2200}
2201
2202static void cleanup_rmt_ports(struct msm_ipc_router_xprt_info *xprt_info,
2203 struct msm_ipc_routing_table_entry *rt_entry)
2204{
2205 struct msm_ipc_router_remote_port *rport_ptr, *tmp_rport_ptr;
2206 struct msm_ipc_server *server;
2207 union rr_control_msg ctl;
2208 int j;
2209
2210 memset(&ctl, 0, sizeof(ctl));
2211 for (j = 0; j < RP_HASH_SIZE; j++) {
2212 list_for_each_entry_safe(rport_ptr, tmp_rport_ptr,
2213 &rt_entry->remote_port_list[j], list) {
2214 list_del(&rport_ptr->list);
2215 mutex_lock(&rport_ptr->rport_lock_lhb2);
2216 server = rport_ptr->server;
2217 rport_ptr->server = NULL;
2218 mutex_unlock(&rport_ptr->rport_lock_lhb2);
2219 ipc_router_reset_conn(rport_ptr);
2220 if (server) {
2221 cleanup_rmt_server(xprt_info, rport_ptr,
2222 server);
2223 server = NULL;
2224 }
2225
2226 ctl.cmd = IPC_ROUTER_CTRL_CMD_REMOVE_CLIENT;
2227 ctl.cli.node_id = rport_ptr->node_id;
2228 ctl.cli.port_id = rport_ptr->port_id;
2229 kref_put(&rport_ptr->ref, ipc_router_release_rport);
2230
2231 relay_ctl_msg(xprt_info, &ctl);
2232 broadcast_ctl_msg_locally(&ctl);
2233 }
2234 }
2235}
2236
2237static void msm_ipc_cleanup_routing_table(
2238 struct msm_ipc_router_xprt_info *xprt_info)
2239{
2240 int i;
2241 struct msm_ipc_routing_table_entry *rt_entry, *tmp_rt_entry;
2242
2243 if (!xprt_info) {
2244 IPC_RTR_ERR("%s: Invalid xprt_info\n", __func__);
2245 return;
2246 }
2247
2248 down_write(&server_list_lock_lha2);
2249 down_write(&routing_table_lock_lha3);
2250 for (i = 0; i < RT_HASH_SIZE; i++) {
2251 list_for_each_entry_safe(rt_entry, tmp_rt_entry,
2252 &routing_table[i], list) {
2253 down_write(&rt_entry->lock_lha4);
2254 if (rt_entry->xprt_info != xprt_info) {
2255 up_write(&rt_entry->lock_lha4);
2256 continue;
2257 }
2258 cleanup_rmt_ports(xprt_info, rt_entry);
2259 rt_entry->xprt_info = NULL;
2260 up_write(&rt_entry->lock_lha4);
2261 list_del(&rt_entry->list);
2262 kref_put(&rt_entry->ref, ipc_router_release_rtentry);
2263 }
2264 }
2265 up_write(&routing_table_lock_lha3);
2266 up_write(&server_list_lock_lha2);
2267}
2268
2269/**
2270 * sync_sec_rule() - Synchrnoize the security rule into the server structure
2271 * @server: Server structure where the rule has to be synchronized.
2272 * @rule: Security tule to be synchronized.
2273 *
2274 * This function is used to update the server structure with the security
2275 * rule configured for the <service:instance> corresponding to that server.
2276 */
2277static void sync_sec_rule(struct msm_ipc_server *server, void *rule)
2278{
2279 struct msm_ipc_server_port *server_port;
2280 struct msm_ipc_router_remote_port *rport_ptr = NULL;
2281
2282 list_for_each_entry(server_port, &server->server_port_list, list) {
2283 rport_ptr = ipc_router_get_rport_ref(
2284 server_port->server_addr.node_id,
2285 server_port->server_addr.port_id);
2286 if (!rport_ptr)
2287 continue;
2288 rport_ptr->sec_rule = rule;
2289 kref_put(&rport_ptr->ref, ipc_router_release_rport);
2290 }
2291 server->synced_sec_rule = 1;
2292}
2293
2294/**
2295 * msm_ipc_sync_sec_rule() - Sync the security rule to the service
2296 * @service: Service for which the rule has to be synchronized.
2297 * @instance: Instance for which the rule has to be synchronized.
2298 * @rule: Security rule to be synchronized.
2299 *
2300 * This function is used to syncrhonize the security rule with the server
2301 * hash table, if the user-space script configures the rule after the service
2302 * has come up. This function is used to synchronize the security rule to a
2303 * specific service and optionally a specific instance.
2304 */
2305void msm_ipc_sync_sec_rule(u32 service, u32 instance, void *rule)
2306{
2307 int key = (service & (SRV_HASH_SIZE - 1));
2308 struct msm_ipc_server *server;
2309
2310 down_write(&server_list_lock_lha2);
2311 list_for_each_entry(server, &server_list[key], list) {
2312 if (server->name.service != service)
2313 continue;
2314
2315 if (server->name.instance != instance &&
2316 instance != ALL_INSTANCE)
2317 continue;
2318
2319 /* If the rule applies to all instances and if the specific
2320 * instance of a service has a rule synchronized already,
2321 * do not apply the rule for that specific instance.
2322 */
2323 if (instance == ALL_INSTANCE && server->synced_sec_rule)
2324 continue;
2325
2326 sync_sec_rule(server, rule);
2327 }
2328 up_write(&server_list_lock_lha2);
2329}
2330
2331/**
2332 * msm_ipc_sync_default_sec_rule() - Default security rule to all services
2333 * @rule: Security rule to be synchronized.
2334 *
2335 * This function is used to syncrhonize the security rule with the server
2336 * hash table, if the user-space script configures the rule after the service
2337 * has come up. This function is used to synchronize the security rule that
2338 * applies to all services, if the concerned service do not have any rule
2339 * defined.
2340 */
2341void msm_ipc_sync_default_sec_rule(void *rule)
2342{
2343 int key;
2344 struct msm_ipc_server *server;
2345
2346 down_write(&server_list_lock_lha2);
2347 for (key = 0; key < SRV_HASH_SIZE; key++) {
2348 list_for_each_entry(server, &server_list[key], list) {
2349 if (server->synced_sec_rule)
2350 continue;
2351
2352 sync_sec_rule(server, rule);
2353 }
2354 }
2355 up_write(&server_list_lock_lha2);
2356}
2357
2358/**
2359 * ipc_router_reset_conn() - Reset the connection to remote port
2360 * @rport_ptr: Pointer to the remote port to be disconnected.
2361 *
2362 * This function is used to reset all the local ports that are connected to
2363 * the remote port being passed.
2364 */
2365static void ipc_router_reset_conn(struct msm_ipc_router_remote_port *rport_ptr)
2366{
2367 struct msm_ipc_port *port_ptr;
2368 struct ipc_router_conn_info *conn_info, *tmp_conn_info;
2369
2370 mutex_lock(&rport_ptr->rport_lock_lhb2);
2371 list_for_each_entry_safe(conn_info, tmp_conn_info,
2372 &rport_ptr->conn_info_list, list) {
2373 port_ptr = ipc_router_get_port_ref(conn_info->port_id);
2374 if (port_ptr) {
2375 mutex_lock(&port_ptr->port_lock_lhc3);
2376 port_ptr->conn_status = CONNECTION_RESET;
2377 mutex_unlock(&port_ptr->port_lock_lhc3);
2378 wake_up(&port_ptr->port_rx_wait_q);
2379 kref_put(&port_ptr->ref, ipc_router_release_port);
2380 }
2381
2382 list_del(&conn_info->list);
2383 kfree(conn_info);
2384 }
2385 mutex_unlock(&rport_ptr->rport_lock_lhb2);
2386}
2387
2388/**
2389 * ipc_router_set_conn() - Set the connection by initializing dest address
2390 * @port_ptr: Local port in which the connection has to be set.
2391 * @addr: Destination address of the connection.
2392 *
2393 * @return: 0 on success, standard Linux error codes on failure.
2394 */
2395int ipc_router_set_conn(struct msm_ipc_port *port_ptr,
2396 struct msm_ipc_addr *addr)
2397{
2398 struct msm_ipc_router_remote_port *rport_ptr;
2399 struct ipc_router_conn_info *conn_info;
2400
2401 if (unlikely(!port_ptr || !addr))
2402 return -EINVAL;
2403
2404 if (addr->addrtype != MSM_IPC_ADDR_ID) {
2405 IPC_RTR_ERR("%s: Invalid Address type\n", __func__);
2406 return -EINVAL;
2407 }
2408
2409 if (port_ptr->type == SERVER_PORT) {
2410 IPC_RTR_ERR("%s: Connection refused on a server port\n",
2411 __func__);
2412 return -ECONNREFUSED;
2413 }
2414
2415 if (port_ptr->conn_status == CONNECTED) {
2416 IPC_RTR_ERR("%s: Port %08x already connected\n",
2417 __func__, port_ptr->this_port.port_id);
2418 return -EISCONN;
2419 }
2420
2421 conn_info = kzalloc(sizeof(*conn_info), GFP_KERNEL);
2422 if (!conn_info) {
2423 IPC_RTR_ERR("%s: Error allocating conn_info\n", __func__);
2424 return -ENOMEM;
2425 }
2426 INIT_LIST_HEAD(&conn_info->list);
2427 conn_info->port_id = port_ptr->this_port.port_id;
2428
2429 rport_ptr = ipc_router_get_rport_ref(addr->addr.port_addr.node_id,
2430 addr->addr.port_addr.port_id);
2431 if (!rport_ptr) {
2432 IPC_RTR_ERR("%s: Invalid remote endpoint\n", __func__);
2433 kfree(conn_info);
2434 return -ENODEV;
2435 }
2436 mutex_lock(&rport_ptr->rport_lock_lhb2);
2437 list_add_tail(&conn_info->list, &rport_ptr->conn_info_list);
2438 mutex_unlock(&rport_ptr->rport_lock_lhb2);
2439
2440 mutex_lock(&port_ptr->port_lock_lhc3);
2441 memcpy(&port_ptr->dest_addr, &addr->addr.port_addr,
2442 sizeof(struct msm_ipc_port_addr));
2443 port_ptr->conn_status = CONNECTED;
2444 mutex_unlock(&port_ptr->port_lock_lhc3);
2445 kref_put(&rport_ptr->ref, ipc_router_release_rport);
2446 return 0;
2447}
2448
2449/**
2450 * do_version_negotiation() - perform a version negotiation and set the version
2451 * @xprt_info: Pointer to the IPC Router transport info structure.
2452 * @msg: Pointer to the IPC Router HELLO message.
2453 *
2454 * This function performs the version negotiation by verifying the computed
2455 * checksum first. If the checksum matches with the magic number, it sets the
2456 * negotiated IPC Router version in transport.
2457 */
2458static void do_version_negotiation(struct msm_ipc_router_xprt_info *xprt_info,
2459 union rr_control_msg *msg)
2460{
2461 u32 magic;
2462 unsigned int version;
2463
2464 if (!xprt_info)
2465 return;
2466 magic = ipc_router_calc_checksum(msg);
2467 if (magic == IPC_ROUTER_HELLO_MAGIC) {
2468 version = fls(msg->hello.versions & IPC_ROUTER_VER_BITMASK) - 1;
2469 /*Bit 0 & 31 are reserved for future usage*/
2470 if ((version > 0) &&
2471 (version != (sizeof(version) * BITS_PER_BYTE - 1)) &&
2472 xprt_info->xprt->set_version)
2473 xprt_info->xprt->set_version(xprt_info->xprt, version);
2474 }
2475}
2476
2477static int process_hello_msg(struct msm_ipc_router_xprt_info *xprt_info,
2478 union rr_control_msg *msg,
2479 struct rr_header_v1 *hdr)
2480{
2481 int i, rc = 0;
2482 union rr_control_msg ctl;
2483 struct msm_ipc_routing_table_entry *rt_entry;
2484
2485 if (!hdr)
2486 return -EINVAL;
2487
2488 xprt_info->remote_node_id = hdr->src_node_id;
2489 rt_entry = create_routing_table_entry(hdr->src_node_id, xprt_info);
2490 if (!rt_entry) {
2491 IPC_RTR_ERR("%s: rt_entry allocation failed\n", __func__);
2492 return -ENOMEM;
2493 }
2494 kref_put(&rt_entry->ref, ipc_router_release_rtentry);
2495
2496 do_version_negotiation(xprt_info, msg);
2497 /* Send a reply HELLO message */
2498 memset(&ctl, 0, sizeof(ctl));
2499 ctl.hello.cmd = IPC_ROUTER_CTRL_CMD_HELLO;
2500 ctl.hello.checksum = IPC_ROUTER_HELLO_MAGIC;
2501 ctl.hello.versions = (u32)IPC_ROUTER_VER_BITMASK;
2502 ctl.hello.checksum = ipc_router_calc_checksum(&ctl);
2503 rc = ipc_router_send_ctl_msg(xprt_info, &ctl,
2504 IPC_ROUTER_DUMMY_DEST_NODE);
2505 if (rc < 0) {
2506 IPC_RTR_ERR("%s: Error sending reply HELLO message\n",
2507 __func__);
2508 return rc;
2509 }
2510 xprt_info->initialized = 1;
2511
2512 /* Send list of servers from the local node and from nodes
2513 * outside the mesh network in which this XPRT is part of.
2514 */
2515 down_read(&server_list_lock_lha2);
2516 down_read(&routing_table_lock_lha3);
2517 for (i = 0; i < RT_HASH_SIZE; i++) {
2518 list_for_each_entry(rt_entry, &routing_table[i], list) {
2519 if ((rt_entry->node_id != IPC_ROUTER_NID_LOCAL) &&
2520 (!rt_entry->xprt_info ||
2521 (rt_entry->xprt_info->xprt->link_id ==
2522 xprt_info->xprt->link_id)))
2523 continue;
2524 rc = msm_ipc_router_send_server_list(rt_entry->node_id,
2525 xprt_info);
2526 if (rc < 0) {
2527 up_read(&routing_table_lock_lha3);
2528 up_read(&server_list_lock_lha2);
2529 return rc;
2530 }
2531 }
2532 }
2533 up_read(&routing_table_lock_lha3);
2534 up_read(&server_list_lock_lha2);
2535 return rc;
2536}
2537
2538static int process_resume_tx_msg(union rr_control_msg *msg,
2539 struct rr_packet *pkt)
2540{
2541 struct msm_ipc_router_remote_port *rport_ptr;
2542
2543 rport_ptr = ipc_router_get_rport_ref(msg->cli.node_id,
2544 msg->cli.port_id);
2545 if (!rport_ptr) {
2546 IPC_RTR_ERR("%s: Unable to resume client\n", __func__);
2547 return -ENODEV;
2548 }
2549 mutex_lock(&rport_ptr->rport_lock_lhb2);
2550 rport_ptr->tx_quota_cnt = 0;
2551 post_resume_tx(rport_ptr, pkt, msg);
2552 mutex_unlock(&rport_ptr->rport_lock_lhb2);
2553 kref_put(&rport_ptr->ref, ipc_router_release_rport);
2554 return 0;
2555}
2556
2557static int process_new_server_msg(struct msm_ipc_router_xprt_info *xprt_info,
2558 union rr_control_msg *msg,
2559 struct rr_packet *pkt)
2560{
2561 struct msm_ipc_routing_table_entry *rt_entry;
2562 struct msm_ipc_server *server;
2563 struct msm_ipc_router_remote_port *rport_ptr;
2564
2565 if (msg->srv.instance == 0) {
2566 IPC_RTR_ERR("%s: Server %08x create rejected, version = 0\n",
2567 __func__, msg->srv.service);
2568 return -EINVAL;
2569 }
2570
2571 rt_entry = ipc_router_get_rtentry_ref(msg->srv.node_id);
2572 if (!rt_entry) {
2573 rt_entry = create_routing_table_entry(msg->srv.node_id,
2574 xprt_info);
2575 if (!rt_entry) {
2576 IPC_RTR_ERR("%s: rt_entry allocation failed\n",
2577 __func__);
2578 return -ENOMEM;
2579 }
2580 }
2581 kref_put(&rt_entry->ref, ipc_router_release_rtentry);
2582
2583 /* If the service already exists in the table, create_server returns
2584 * a reference to it.
2585 */
2586 rport_ptr = ipc_router_create_rport(msg->srv.node_id,
2587 msg->srv.port_id, xprt_info);
2588 if (!rport_ptr)
2589 return -ENOMEM;
2590
2591 server = msm_ipc_router_create_server(
2592 msg->srv.service, msg->srv.instance,
2593 msg->srv.node_id, msg->srv.port_id, xprt_info);
2594 if (!server) {
2595 IPC_RTR_ERR("%s: Server %08x:%08x Create failed\n",
2596 __func__, msg->srv.service, msg->srv.instance);
2597 kref_put(&rport_ptr->ref, ipc_router_release_rport);
2598 ipc_router_destroy_rport(rport_ptr);
2599 return -ENOMEM;
2600 }
2601 mutex_lock(&rport_ptr->rport_lock_lhb2);
2602 rport_ptr->server = server;
2603 mutex_unlock(&rport_ptr->rport_lock_lhb2);
2604 rport_ptr->sec_rule = msm_ipc_get_security_rule(
2605 msg->srv.service, msg->srv.instance);
2606 kref_put(&rport_ptr->ref, ipc_router_release_rport);
2607 kref_put(&server->ref, ipc_router_release_server);
2608
2609 /* Relay the new server message to other subsystems that do not belong
2610 * to the cluster from which this message is received. Notify the
2611 * local clients waiting for this service.
2612 */
2613 relay_ctl_msg(xprt_info, msg);
2614 post_control_ports(pkt);
2615 return 0;
2616}
2617
2618static int process_rmv_server_msg(struct msm_ipc_router_xprt_info *xprt_info,
2619 union rr_control_msg *msg,
2620 struct rr_packet *pkt)
2621{
2622 struct msm_ipc_server *server;
2623 struct msm_ipc_router_remote_port *rport_ptr;
2624
2625 server = ipc_router_get_server_ref(msg->srv.service, msg->srv.instance,
2626 msg->srv.node_id, msg->srv.port_id);
2627 rport_ptr = ipc_router_get_rport_ref(msg->srv.node_id,
2628 msg->srv.port_id);
2629 if (rport_ptr) {
2630 mutex_lock(&rport_ptr->rport_lock_lhb2);
2631 if (rport_ptr->server == server)
2632 rport_ptr->server = NULL;
2633 mutex_unlock(&rport_ptr->rport_lock_lhb2);
2634 kref_put(&rport_ptr->ref, ipc_router_release_rport);
2635 }
2636
2637 if (server) {
2638 kref_put(&server->ref, ipc_router_release_server);
2639 ipc_router_destroy_server(server, msg->srv.node_id,
2640 msg->srv.port_id);
2641 /* Relay the new server message to other subsystems that do not
2642 * belong to the cluster from which this message is received.
2643 * Notify the local clients communicating with the service.
2644 */
2645 relay_ctl_msg(xprt_info, msg);
2646 post_control_ports(pkt);
2647 }
2648 return 0;
2649}
2650
2651static int process_rmv_client_msg(struct msm_ipc_router_xprt_info *xprt_info,
2652 union rr_control_msg *msg,
2653 struct rr_packet *pkt)
2654{
2655 struct msm_ipc_router_remote_port *rport_ptr;
2656 struct msm_ipc_server *server;
2657
2658 rport_ptr = ipc_router_get_rport_ref(msg->cli.node_id,
2659 msg->cli.port_id);
2660 if (rport_ptr) {
2661 mutex_lock(&rport_ptr->rport_lock_lhb2);
2662 server = rport_ptr->server;
2663 rport_ptr->server = NULL;
2664 mutex_unlock(&rport_ptr->rport_lock_lhb2);
2665 ipc_router_reset_conn(rport_ptr);
2666 down_write(&server_list_lock_lha2);
2667 if (server)
2668 cleanup_rmt_server(NULL, rport_ptr, server);
2669 up_write(&server_list_lock_lha2);
2670 kref_put(&rport_ptr->ref, ipc_router_release_rport);
2671 ipc_router_destroy_rport(rport_ptr);
2672 }
2673
2674 relay_ctl_msg(xprt_info, msg);
2675 post_control_ports(pkt);
2676 return 0;
2677}
2678
2679static int process_control_msg(struct msm_ipc_router_xprt_info *xprt_info,
2680 struct rr_packet *pkt)
2681{
2682 union rr_control_msg *msg;
2683 int rc = 0;
2684 struct rr_header_v1 *hdr;
2685
2686 if (pkt->length != sizeof(*msg)) {
2687 IPC_RTR_ERR("%s: r2r msg size %d != %zu\n", __func__,
2688 pkt->length, sizeof(*msg));
2689 return -EINVAL;
2690 }
2691
2692 hdr = &pkt->hdr;
2693 msg = msm_ipc_router_skb_to_buf(pkt->pkt_fragment_q, sizeof(*msg));
2694 if (!msg) {
2695 IPC_RTR_ERR("%s: Error extracting control msg\n", __func__);
2696 return -ENOMEM;
2697 }
2698
2699 ipc_router_log_msg(xprt_info->log_ctx, IPC_ROUTER_LOG_EVENT_RX, msg,
2700 hdr, NULL, NULL);
2701
2702 switch (msg->cmd) {
2703 case IPC_ROUTER_CTRL_CMD_HELLO:
2704 rc = process_hello_msg(xprt_info, msg, hdr);
2705 break;
2706 case IPC_ROUTER_CTRL_CMD_RESUME_TX:
2707 rc = process_resume_tx_msg(msg, pkt);
2708 break;
2709 case IPC_ROUTER_CTRL_CMD_NEW_SERVER:
2710 rc = process_new_server_msg(xprt_info, msg, pkt);
2711 break;
2712 case IPC_ROUTER_CTRL_CMD_REMOVE_SERVER:
2713 rc = process_rmv_server_msg(xprt_info, msg, pkt);
2714 break;
2715 case IPC_ROUTER_CTRL_CMD_REMOVE_CLIENT:
2716 rc = process_rmv_client_msg(xprt_info, msg, pkt);
2717 break;
2718 default:
2719 rc = -EINVAL;
2720 }
2721 kfree(msg);
2722 return rc;
2723}
2724
2725static void do_read_data(struct work_struct *work)
2726{
2727 struct rr_header_v1 *hdr;
2728 struct rr_packet *pkt = NULL;
2729 struct msm_ipc_port *port_ptr;
2730 struct msm_ipc_router_remote_port *rport_ptr;
2731 int ret;
2732
2733 struct msm_ipc_router_xprt_info *xprt_info =
2734 container_of(work,
2735 struct msm_ipc_router_xprt_info,
2736 read_data);
2737
2738 while ((pkt = rr_read(xprt_info)) != NULL) {
2739 if (pkt->length < calc_rx_header_size(xprt_info) ||
2740 pkt->length > MAX_IPC_PKT_SIZE) {
2741 IPC_RTR_ERR("%s: Invalid pkt length %d\n", __func__,
2742 pkt->length);
2743 goto read_next_pkt1;
2744 }
2745
2746 ret = extract_header(pkt);
2747 if (ret < 0)
2748 goto read_next_pkt1;
2749 hdr = &pkt->hdr;
2750
2751 if ((hdr->dst_node_id != IPC_ROUTER_NID_LOCAL) &&
2752 ((hdr->type == IPC_ROUTER_CTRL_CMD_RESUME_TX) ||
2753 (hdr->type == IPC_ROUTER_CTRL_CMD_DATA))) {
2754 IPC_RTR_INFO(xprt_info->log_ctx,
2755 "%s %s Len:0x%x T:0x%x CF:0x%x SRC:<0x%x:0x%x> DST:<0x%x:0x%x>\n",
2756 "FWD", "RX", hdr->size, hdr->type,
2757 hdr->control_flag, hdr->src_node_id,
2758 hdr->src_port_id, hdr->dst_node_id,
2759 hdr->dst_port_id);
2760 forward_msg(xprt_info, pkt);
2761 goto read_next_pkt1;
2762 }
2763
2764 if (hdr->type != IPC_ROUTER_CTRL_CMD_DATA) {
2765 process_control_msg(xprt_info, pkt);
2766 goto read_next_pkt1;
2767 }
2768
2769 port_ptr = ipc_router_get_port_ref(hdr->dst_port_id);
2770 if (!port_ptr) {
2771 IPC_RTR_ERR("%s: No local port id %08x\n", __func__,
2772 hdr->dst_port_id);
2773 goto read_next_pkt1;
2774 }
2775
2776 rport_ptr = ipc_router_get_rport_ref(hdr->src_node_id,
2777 hdr->src_port_id);
2778 if (!rport_ptr) {
2779 rport_ptr = ipc_router_create_rport(hdr->src_node_id,
2780 hdr->src_port_id,
2781 xprt_info);
2782 if (!rport_ptr) {
2783 IPC_RTR_ERR(
2784 "%s: Rmt Prt %08x:%08x create failed\n",
2785 __func__, hdr->src_node_id,
2786 hdr->src_port_id);
2787 goto read_next_pkt2;
2788 }
2789 }
2790
2791 ipc_router_log_msg(xprt_info->log_ctx, IPC_ROUTER_LOG_EVENT_RX,
2792 pkt, hdr, port_ptr, rport_ptr);
2793 kref_put(&rport_ptr->ref, ipc_router_release_rport);
2794 post_pkt_to_port(port_ptr, pkt, 0);
2795 kref_put(&port_ptr->ref, ipc_router_release_port);
2796 continue;
2797read_next_pkt2:
2798 kref_put(&port_ptr->ref, ipc_router_release_port);
2799read_next_pkt1:
2800 release_pkt(pkt);
2801 }
2802}
2803
2804int msm_ipc_router_register_server(struct msm_ipc_port *port_ptr,
2805 struct msm_ipc_addr *name)
2806{
2807 struct msm_ipc_server *server;
2808 union rr_control_msg ctl;
2809 struct msm_ipc_router_remote_port *rport_ptr;
2810
2811 if (!port_ptr || !name)
2812 return -EINVAL;
2813
Karthikeyan Ramasubramanian63cf3592016-12-15 08:13:20 -07002814 if (port_ptr->type != CLIENT_PORT)
2815 return -EINVAL;
2816
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -06002817 if (name->addrtype != MSM_IPC_ADDR_NAME)
2818 return -EINVAL;
2819
2820 rport_ptr = ipc_router_create_rport(IPC_ROUTER_NID_LOCAL,
2821 port_ptr->this_port.port_id, NULL);
2822 if (!rport_ptr) {
2823 IPC_RTR_ERR("%s: RPort %08x:%08x creation failed\n", __func__,
2824 IPC_ROUTER_NID_LOCAL, port_ptr->this_port.port_id);
2825 return -ENOMEM;
2826 }
2827
2828 server = msm_ipc_router_create_server(name->addr.port_name.service,
2829 name->addr.port_name.instance,
2830 IPC_ROUTER_NID_LOCAL,
2831 port_ptr->this_port.port_id,
2832 NULL);
2833 if (!server) {
2834 IPC_RTR_ERR("%s: Server %08x:%08x Create failed\n",
2835 __func__, name->addr.port_name.service,
2836 name->addr.port_name.instance);
2837 kref_put(&rport_ptr->ref, ipc_router_release_rport);
2838 ipc_router_destroy_rport(rport_ptr);
2839 return -ENOMEM;
2840 }
2841
2842 memset(&ctl, 0, sizeof(ctl));
2843 ctl.cmd = IPC_ROUTER_CTRL_CMD_NEW_SERVER;
2844 ctl.srv.service = server->name.service;
2845 ctl.srv.instance = server->name.instance;
2846 ctl.srv.node_id = IPC_ROUTER_NID_LOCAL;
2847 ctl.srv.port_id = port_ptr->this_port.port_id;
2848 broadcast_ctl_msg(&ctl);
2849 mutex_lock(&port_ptr->port_lock_lhc3);
2850 port_ptr->type = SERVER_PORT;
2851 port_ptr->mode_info.mode = MULTI_LINK_MODE;
2852 port_ptr->port_name.service = server->name.service;
2853 port_ptr->port_name.instance = server->name.instance;
2854 port_ptr->rport_info = rport_ptr;
2855 mutex_unlock(&port_ptr->port_lock_lhc3);
2856 kref_put(&rport_ptr->ref, ipc_router_release_rport);
2857 kref_put(&server->ref, ipc_router_release_server);
2858 return 0;
2859}
2860
2861int msm_ipc_router_unregister_server(struct msm_ipc_port *port_ptr)
2862{
2863 struct msm_ipc_server *server;
2864 union rr_control_msg ctl;
2865 struct msm_ipc_router_remote_port *rport_ptr;
2866
2867 if (!port_ptr)
2868 return -EINVAL;
2869
2870 if (port_ptr->type != SERVER_PORT) {
2871 IPC_RTR_ERR("%s: Trying to unregister a non-server port\n",
2872 __func__);
2873 return -EINVAL;
2874 }
2875
2876 if (port_ptr->this_port.node_id != IPC_ROUTER_NID_LOCAL) {
2877 IPC_RTR_ERR(
2878 "%s: Trying to unregister a remote server locally\n",
2879 __func__);
2880 return -EINVAL;
2881 }
2882
2883 server = ipc_router_get_server_ref(port_ptr->port_name.service,
2884 port_ptr->port_name.instance,
2885 port_ptr->this_port.node_id,
2886 port_ptr->this_port.port_id);
2887 if (!server) {
2888 IPC_RTR_ERR("%s: Server lookup failed\n", __func__);
2889 return -ENODEV;
2890 }
2891
2892 mutex_lock(&port_ptr->port_lock_lhc3);
2893 port_ptr->type = CLIENT_PORT;
2894 rport_ptr = (struct msm_ipc_router_remote_port *)port_ptr->rport_info;
2895 mutex_unlock(&port_ptr->port_lock_lhc3);
2896 if (rport_ptr)
2897 ipc_router_reset_conn(rport_ptr);
2898 memset(&ctl, 0, sizeof(ctl));
2899 ctl.cmd = IPC_ROUTER_CTRL_CMD_REMOVE_SERVER;
2900 ctl.srv.service = server->name.service;
2901 ctl.srv.instance = server->name.instance;
2902 ctl.srv.node_id = IPC_ROUTER_NID_LOCAL;
2903 ctl.srv.port_id = port_ptr->this_port.port_id;
2904 kref_put(&server->ref, ipc_router_release_server);
2905 ipc_router_destroy_server(server, port_ptr->this_port.node_id,
2906 port_ptr->this_port.port_id);
2907 broadcast_ctl_msg(&ctl);
2908 mutex_lock(&port_ptr->port_lock_lhc3);
2909 port_ptr->type = CLIENT_PORT;
2910 mutex_unlock(&port_ptr->port_lock_lhc3);
2911 return 0;
2912}
2913
2914static int loopback_data(struct msm_ipc_port *src,
2915 u32 port_id,
2916 struct rr_packet *pkt)
2917{
2918 struct msm_ipc_port *port_ptr;
2919 struct sk_buff *temp_skb;
2920 int align_size;
2921
2922 if (!pkt) {
2923 IPC_RTR_ERR("%s: Invalid pkt pointer\n", __func__);
2924 return -EINVAL;
2925 }
2926
2927 temp_skb = skb_peek_tail(pkt->pkt_fragment_q);
2928 align_size = ALIGN_SIZE(pkt->length);
2929 skb_put(temp_skb, align_size);
2930 pkt->length += align_size;
2931
2932 port_ptr = ipc_router_get_port_ref(port_id);
2933 if (!port_ptr) {
2934 IPC_RTR_ERR("%s: Local port %d not present\n", __func__,
2935 port_id);
2936 return -ENODEV;
2937 }
2938 post_pkt_to_port(port_ptr, pkt, 1);
2939 update_comm_mode_info(&src->mode_info, NULL);
2940 kref_put(&port_ptr->ref, ipc_router_release_port);
2941
2942 return pkt->hdr.size;
2943}
2944
2945static int ipc_router_tx_wait(struct msm_ipc_port *src,
2946 struct msm_ipc_router_remote_port *rport_ptr,
2947 u32 *set_confirm_rx,
2948 long timeout)
2949{
2950 struct msm_ipc_resume_tx_port *resume_tx_port;
2951 int ret;
2952
2953 if (unlikely(!src || !rport_ptr))
2954 return -EINVAL;
2955
2956 for (;;) {
2957 mutex_lock(&rport_ptr->rport_lock_lhb2);
2958 if (rport_ptr->status == RESET) {
2959 mutex_unlock(&rport_ptr->rport_lock_lhb2);
2960 IPC_RTR_ERR("%s: RPort %08x:%08x is in reset state\n",
2961 __func__, rport_ptr->node_id,
2962 rport_ptr->port_id);
2963 return -ENETRESET;
2964 }
2965
2966 if (rport_ptr->tx_quota_cnt < IPC_ROUTER_HIGH_RX_QUOTA)
2967 break;
2968
2969 if (msm_ipc_router_lookup_resume_tx_port(
2970 rport_ptr, src->this_port.port_id))
2971 goto check_timeo;
2972
2973 resume_tx_port =
2974 kzalloc(sizeof(struct msm_ipc_resume_tx_port),
2975 GFP_KERNEL);
2976 if (!resume_tx_port) {
2977 IPC_RTR_ERR("%s: Resume_Tx port allocation failed\n",
2978 __func__);
2979 mutex_unlock(&rport_ptr->rport_lock_lhb2);
2980 return -ENOMEM;
2981 }
2982 INIT_LIST_HEAD(&resume_tx_port->list);
2983 resume_tx_port->port_id = src->this_port.port_id;
2984 resume_tx_port->node_id = src->this_port.node_id;
2985 list_add_tail(&resume_tx_port->list,
2986 &rport_ptr->resume_tx_port_list);
2987check_timeo:
2988 mutex_unlock(&rport_ptr->rport_lock_lhb2);
2989 if (!timeout) {
2990 return -EAGAIN;
2991 } else if (timeout < 0) {
2992 ret =
2993 wait_event_interruptible(src->port_tx_wait_q,
2994 (rport_ptr->tx_quota_cnt !=
2995 IPC_ROUTER_HIGH_RX_QUOTA ||
2996 rport_ptr->status == RESET));
2997 if (ret)
2998 return ret;
2999 } else {
3000 ret = wait_event_interruptible_timeout(
3001 src->port_tx_wait_q,
3002 (rport_ptr->tx_quota_cnt !=
3003 IPC_ROUTER_HIGH_RX_QUOTA ||
3004 rport_ptr->status == RESET),
3005 msecs_to_jiffies(timeout));
3006 if (ret < 0) {
3007 return ret;
3008 } else if (ret == 0) {
3009 IPC_RTR_ERR("%s: Resume_tx Timeout %08x:%08x\n",
3010 __func__, rport_ptr->node_id,
3011 rport_ptr->port_id);
3012 return -ETIMEDOUT;
3013 }
3014 }
3015 }
3016 rport_ptr->tx_quota_cnt++;
3017 if (rport_ptr->tx_quota_cnt == IPC_ROUTER_LOW_RX_QUOTA)
3018 *set_confirm_rx = 1;
3019 mutex_unlock(&rport_ptr->rport_lock_lhb2);
3020 return 0;
3021}
3022
3023static int
3024msm_ipc_router_write_pkt(struct msm_ipc_port *src,
3025 struct msm_ipc_router_remote_port *rport_ptr,
3026 struct rr_packet *pkt, long timeout)
3027{
3028 struct rr_header_v1 *hdr;
3029 struct msm_ipc_router_xprt_info *xprt_info;
3030 struct msm_ipc_routing_table_entry *rt_entry;
3031 struct sk_buff *temp_skb;
3032 int xprt_option;
3033 int ret;
3034 int align_size;
3035 u32 set_confirm_rx = 0;
3036
3037 if (!rport_ptr || !src || !pkt)
3038 return -EINVAL;
3039
3040 hdr = &pkt->hdr;
3041 hdr->version = IPC_ROUTER_V1;
3042 hdr->type = IPC_ROUTER_CTRL_CMD_DATA;
3043 hdr->src_node_id = src->this_port.node_id;
3044 hdr->src_port_id = src->this_port.port_id;
3045 hdr->size = pkt->length;
3046 hdr->control_flag = 0;
3047 hdr->dst_node_id = rport_ptr->node_id;
3048 hdr->dst_port_id = rport_ptr->port_id;
3049
3050 ret = ipc_router_tx_wait(src, rport_ptr, &set_confirm_rx, timeout);
3051 if (ret < 0)
3052 return ret;
3053 if (set_confirm_rx)
3054 hdr->control_flag |= CONTROL_FLAG_CONFIRM_RX;
3055
3056 if (hdr->dst_node_id == IPC_ROUTER_NID_LOCAL) {
3057 ipc_router_log_msg(local_log_ctx,
3058 IPC_ROUTER_LOG_EVENT_TX, pkt, hdr, src,
3059 rport_ptr);
3060 ret = loopback_data(src, hdr->dst_port_id, pkt);
3061 return ret;
3062 }
3063
3064 rt_entry = ipc_router_get_rtentry_ref(hdr->dst_node_id);
3065 if (!rt_entry) {
3066 IPC_RTR_ERR("%s: Remote node %d not up\n",
3067 __func__, hdr->dst_node_id);
3068 return -ENODEV;
3069 }
3070 down_read(&rt_entry->lock_lha4);
3071 xprt_info = rt_entry->xprt_info;
3072 ret = ipc_router_get_xprt_info_ref(xprt_info);
3073 if (ret < 0) {
3074 IPC_RTR_ERR("%s: Abort invalid xprt\n", __func__);
3075 up_read(&rt_entry->lock_lha4);
3076 kref_put(&rt_entry->ref, ipc_router_release_rtentry);
3077 return ret;
3078 }
3079 ret = prepend_header(pkt, xprt_info);
3080 if (ret < 0) {
3081 IPC_RTR_ERR("%s: Prepend Header failed\n", __func__);
3082 goto out_write_pkt;
3083 }
3084 xprt_option = xprt_info->xprt->get_option(xprt_info->xprt);
3085 if (!(xprt_option & FRAG_PKT_WRITE_ENABLE)) {
3086 ret = defragment_pkt(pkt);
3087 if (ret < 0)
3088 goto out_write_pkt;
3089 }
3090
3091 temp_skb = skb_peek_tail(pkt->pkt_fragment_q);
3092 align_size = ALIGN_SIZE(pkt->length);
3093 skb_put(temp_skb, align_size);
3094 pkt->length += align_size;
3095 mutex_lock(&xprt_info->tx_lock_lhb2);
3096 ret = xprt_info->xprt->write(pkt, pkt->length, xprt_info->xprt);
3097 mutex_unlock(&xprt_info->tx_lock_lhb2);
3098out_write_pkt:
3099 up_read(&rt_entry->lock_lha4);
3100 kref_put(&rt_entry->ref, ipc_router_release_rtentry);
3101
3102 if (ret < 0) {
3103 IPC_RTR_ERR("%s: Write on XPRT failed\n", __func__);
3104 ipc_router_log_msg(xprt_info->log_ctx,
3105 IPC_ROUTER_LOG_EVENT_TX_ERR, pkt, hdr, src,
3106 rport_ptr);
3107
3108 ipc_router_put_xprt_info_ref(xprt_info);
3109 return ret;
3110 }
3111 update_comm_mode_info(&src->mode_info, xprt_info);
3112 ipc_router_log_msg(xprt_info->log_ctx,
3113 IPC_ROUTER_LOG_EVENT_TX, pkt, hdr, src, rport_ptr);
3114
3115 ipc_router_put_xprt_info_ref(xprt_info);
3116 return hdr->size;
3117}
3118
3119int msm_ipc_router_send_to(struct msm_ipc_port *src,
3120 struct sk_buff_head *data,
3121 struct msm_ipc_addr *dest,
3122 long timeout)
3123{
3124 u32 dst_node_id = 0, dst_port_id = 0;
3125 struct msm_ipc_server *server;
3126 struct msm_ipc_server_port *server_port;
3127 struct msm_ipc_router_remote_port *rport_ptr = NULL;
3128 struct msm_ipc_router_remote_port *src_rport_ptr = NULL;
3129 struct rr_packet *pkt;
3130 int ret;
3131
3132 if (!src || !data || !dest) {
3133 IPC_RTR_ERR("%s: Invalid Parameters\n", __func__);
3134 return -EINVAL;
3135 }
3136
3137 /* Resolve Address*/
3138 if (dest->addrtype == MSM_IPC_ADDR_ID) {
3139 dst_node_id = dest->addr.port_addr.node_id;
3140 dst_port_id = dest->addr.port_addr.port_id;
3141 } else if (dest->addrtype == MSM_IPC_ADDR_NAME) {
3142 server =
3143 ipc_router_get_server_ref(dest->addr.port_name.service,
3144 dest->addr.port_name.instance,
3145 0, 0);
3146 if (!server) {
3147 IPC_RTR_ERR("%s: Destination not reachable\n",
3148 __func__);
3149 return -ENODEV;
3150 }
3151 server_port = list_first_entry(&server->server_port_list,
3152 struct msm_ipc_server_port,
3153 list);
3154 dst_node_id = server_port->server_addr.node_id;
3155 dst_port_id = server_port->server_addr.port_id;
3156 kref_put(&server->ref, ipc_router_release_server);
3157 }
3158
3159 rport_ptr = ipc_router_get_rport_ref(dst_node_id, dst_port_id);
3160 if (!rport_ptr) {
3161 IPC_RTR_ERR("%s: Remote port not found\n", __func__);
3162 return -ENODEV;
3163 }
3164
3165 if (src->check_send_permissions) {
3166 ret = src->check_send_permissions(rport_ptr->sec_rule);
3167 if (ret <= 0) {
3168 kref_put(&rport_ptr->ref, ipc_router_release_rport);
3169 IPC_RTR_ERR("%s: permission failure for %s\n",
3170 __func__, current->comm);
3171 return -EPERM;
3172 }
3173 }
3174
3175 if (dst_node_id == IPC_ROUTER_NID_LOCAL && !src->rport_info) {
3176 src_rport_ptr = ipc_router_create_rport(IPC_ROUTER_NID_LOCAL,
3177 src->this_port.port_id,
3178 NULL);
3179 if (!src_rport_ptr) {
3180 kref_put(&rport_ptr->ref, ipc_router_release_rport);
3181 IPC_RTR_ERR("%s: RPort creation failed\n", __func__);
3182 return -ENOMEM;
3183 }
3184 mutex_lock(&src->port_lock_lhc3);
3185 src->rport_info = src_rport_ptr;
3186 mutex_unlock(&src->port_lock_lhc3);
3187 kref_put(&src_rport_ptr->ref, ipc_router_release_rport);
3188 }
3189
3190 pkt = create_pkt(data);
3191 if (!pkt) {
3192 kref_put(&rport_ptr->ref, ipc_router_release_rport);
3193 IPC_RTR_ERR("%s: Pkt creation failed\n", __func__);
3194 return -ENOMEM;
3195 }
3196
3197 ret = msm_ipc_router_write_pkt(src, rport_ptr, pkt, timeout);
3198 kref_put(&rport_ptr->ref, ipc_router_release_rport);
3199 if (ret < 0)
3200 pkt->pkt_fragment_q = NULL;
3201 release_pkt(pkt);
3202
3203 return ret;
3204}
3205
3206int msm_ipc_router_send_msg(struct msm_ipc_port *src,
3207 struct msm_ipc_addr *dest,
3208 void *data, unsigned int data_len)
3209{
3210 struct sk_buff_head *out_skb_head;
3211 int ret;
3212
3213 out_skb_head = msm_ipc_router_buf_to_skb(data, data_len);
3214 if (!out_skb_head) {
3215 IPC_RTR_ERR("%s: SKB conversion failed\n", __func__);
3216 return -EFAULT;
3217 }
3218
3219 ret = msm_ipc_router_send_to(src, out_skb_head, dest, 0);
3220 if (ret < 0) {
3221 if (ret != -EAGAIN)
3222 IPC_RTR_ERR(
3223 "%s: msm_ipc_router_send_to failed - ret: %d\n",
3224 __func__, ret);
3225 msm_ipc_router_free_skb(out_skb_head);
3226 return ret;
3227 }
3228 return 0;
3229}
3230
3231/**
3232 * msm_ipc_router_send_resume_tx() - Send Resume_Tx message
3233 * @data: Pointer to received data packet that has confirm_rx bit set
3234 *
3235 * @return: On success, number of bytes transferred is returned, else
3236 * standard linux error code is returned.
3237 *
3238 * This function sends the Resume_Tx event to the remote node that
3239 * sent the data with confirm_rx field set. In case of a multi-hop
3240 * scenario also, this function makes sure that the destination node_id
3241 * to which the resume_tx event should reach is right.
3242 */
3243static int msm_ipc_router_send_resume_tx(void *data)
3244{
3245 union rr_control_msg msg;
3246 struct rr_header_v1 *hdr = (struct rr_header_v1 *)data;
3247 struct msm_ipc_routing_table_entry *rt_entry;
3248 int ret;
3249
3250 memset(&msg, 0, sizeof(msg));
3251 msg.cmd = IPC_ROUTER_CTRL_CMD_RESUME_TX;
3252 msg.cli.node_id = hdr->dst_node_id;
3253 msg.cli.port_id = hdr->dst_port_id;
3254 rt_entry = ipc_router_get_rtentry_ref(hdr->src_node_id);
3255 if (!rt_entry) {
3256 IPC_RTR_ERR("%s: %d Node is not present", __func__,
3257 hdr->src_node_id);
3258 return -ENODEV;
3259 }
3260 ret = ipc_router_get_xprt_info_ref(rt_entry->xprt_info);
3261 if (ret < 0) {
3262 IPC_RTR_ERR("%s: Abort invalid xprt\n", __func__);
3263 kref_put(&rt_entry->ref, ipc_router_release_rtentry);
3264 return ret;
3265 }
3266 ret = ipc_router_send_ctl_msg(rt_entry->xprt_info, &msg,
3267 hdr->src_node_id);
3268 ipc_router_put_xprt_info_ref(rt_entry->xprt_info);
3269 kref_put(&rt_entry->ref, ipc_router_release_rtentry);
3270 if (ret < 0)
3271 IPC_RTR_ERR(
3272 "%s: Send Resume_Tx Failed SRC_NODE: %d SRC_PORT: %d DEST_NODE: %d",
3273 __func__, hdr->dst_node_id, hdr->dst_port_id,
3274 hdr->src_node_id);
3275
3276 return ret;
3277}
3278
3279int msm_ipc_router_read(struct msm_ipc_port *port_ptr,
3280 struct rr_packet **read_pkt,
3281 size_t buf_len)
3282{
3283 struct rr_packet *pkt;
3284
3285 if (!port_ptr || !read_pkt)
3286 return -EINVAL;
3287
3288 mutex_lock(&port_ptr->port_rx_q_lock_lhc3);
3289 if (list_empty(&port_ptr->port_rx_q)) {
3290 mutex_unlock(&port_ptr->port_rx_q_lock_lhc3);
3291 return -EAGAIN;
3292 }
3293
3294 pkt = list_first_entry(&port_ptr->port_rx_q, struct rr_packet, list);
3295 if ((buf_len) && (pkt->hdr.size > buf_len)) {
3296 mutex_unlock(&port_ptr->port_rx_q_lock_lhc3);
3297 return -ETOOSMALL;
3298 }
3299 list_del(&pkt->list);
3300 if (list_empty(&port_ptr->port_rx_q))
3301 __pm_relax(port_ptr->port_rx_ws);
3302 *read_pkt = pkt;
3303 mutex_unlock(&port_ptr->port_rx_q_lock_lhc3);
3304 if (pkt->hdr.control_flag & CONTROL_FLAG_CONFIRM_RX)
3305 msm_ipc_router_send_resume_tx(&pkt->hdr);
3306
3307 return pkt->length;
3308}
3309
3310/**
3311 * msm_ipc_router_rx_data_wait() - Wait for new message destined to a local
3312 * port.
3313 * @port_ptr: Pointer to the local port
3314 * @timeout: < 0 timeout indicates infinite wait till a message arrives.
3315 * > 0 timeout indicates the wait time.
3316 * 0 indicates that we do not wait.
3317 * @return: 0 if there are pending messages to read,
3318 * standard Linux error code otherwise.
3319 *
3320 * Checks for the availability of messages that are destined to a local port.
3321 * If no messages are present then waits as per @timeout.
3322 */
3323int msm_ipc_router_rx_data_wait(struct msm_ipc_port *port_ptr, long timeout)
3324{
3325 int ret = 0;
3326
3327 mutex_lock(&port_ptr->port_rx_q_lock_lhc3);
3328 while (list_empty(&port_ptr->port_rx_q)) {
3329 mutex_unlock(&port_ptr->port_rx_q_lock_lhc3);
3330 if (timeout < 0) {
3331 ret = wait_event_interruptible(
3332 port_ptr->port_rx_wait_q,
3333 !list_empty(&port_ptr->port_rx_q));
3334 if (ret)
3335 return ret;
3336 } else if (timeout > 0) {
3337 timeout = wait_event_interruptible_timeout(
3338 port_ptr->port_rx_wait_q,
3339 !list_empty(&port_ptr->port_rx_q),
3340 timeout);
3341 if (timeout < 0)
3342 return -EFAULT;
3343 }
3344 if (timeout == 0)
3345 return -ENOMSG;
3346 mutex_lock(&port_ptr->port_rx_q_lock_lhc3);
3347 }
3348 mutex_unlock(&port_ptr->port_rx_q_lock_lhc3);
3349
3350 return ret;
3351}
3352
3353/**
3354 * msm_ipc_router_recv_from() - Receive messages destined to a local port.
3355 * @port_ptr: Pointer to the local port
3356 * @pkt : Pointer to the router-to-router packet
3357 * @src: Pointer to local port address
3358 * @timeout: < 0 timeout indicates infinite wait till a message arrives.
3359 * > 0 timeout indicates the wait time.
3360 * 0 indicates that we do not wait.
3361 * @return: = Number of bytes read(On successful read operation).
3362 * = -ENOMSG (If there are no pending messages and timeout is 0).
3363 * = -EINVAL (If either of the arguments, port_ptr or data is invalid)
3364 * = -EFAULT (If there are no pending messages when timeout is > 0
3365 * and the wait_event_interruptible_timeout has returned value > 0)
3366 * = -ERESTARTSYS (If there are no pending messages when timeout
3367 * is < 0 and wait_event_interruptible was interrupted by a signal)
3368 *
3369 * This function reads the messages that are destined for a local port. It
3370 * is used by modules that exist with-in the kernel and use IPC Router for
3371 * transport. The function checks if there are any messages that are already
3372 * received. If yes, it reads them, else it waits as per the timeout value.
3373 * On a successful read, the return value of the function indicates the number
3374 * of bytes that are read.
3375 */
3376int msm_ipc_router_recv_from(struct msm_ipc_port *port_ptr,
3377 struct rr_packet **pkt,
3378 struct msm_ipc_addr *src,
3379 long timeout)
3380{
3381 int ret, data_len, align_size;
3382 struct sk_buff *temp_skb;
3383 struct rr_header_v1 *hdr = NULL;
3384
3385 if (!port_ptr || !pkt) {
3386 IPC_RTR_ERR("%s: Invalid pointers being passed\n", __func__);
3387 return -EINVAL;
3388 }
3389
3390 *pkt = NULL;
3391
3392 ret = msm_ipc_router_rx_data_wait(port_ptr, timeout);
3393 if (ret)
3394 return ret;
3395
3396 ret = msm_ipc_router_read(port_ptr, pkt, 0);
3397 if (ret <= 0 || !(*pkt))
3398 return ret;
3399
3400 hdr = &((*pkt)->hdr);
3401 if (src) {
3402 src->addrtype = MSM_IPC_ADDR_ID;
3403 src->addr.port_addr.node_id = hdr->src_node_id;
3404 src->addr.port_addr.port_id = hdr->src_port_id;
3405 }
3406
3407 data_len = hdr->size;
3408 align_size = ALIGN_SIZE(data_len);
3409 if (align_size) {
3410 temp_skb = skb_peek_tail((*pkt)->pkt_fragment_q);
3411 skb_trim(temp_skb, (temp_skb->len - align_size));
3412 }
3413 return data_len;
3414}
3415
3416int msm_ipc_router_read_msg(struct msm_ipc_port *port_ptr,
3417 struct msm_ipc_addr *src,
3418 unsigned char **data,
3419 unsigned int *len)
3420{
3421 struct rr_packet *pkt;
3422 int ret;
3423
3424 ret = msm_ipc_router_recv_from(port_ptr, &pkt, src, 0);
3425 if (ret < 0) {
3426 if (ret != -ENOMSG)
3427 IPC_RTR_ERR(
3428 "%s: msm_ipc_router_recv_from failed - ret: %d\n",
3429 __func__, ret);
3430 return ret;
3431 }
3432
3433 *data = msm_ipc_router_skb_to_buf(pkt->pkt_fragment_q, ret);
3434 if (!(*data)) {
3435 IPC_RTR_ERR("%s: Buf conversion failed\n", __func__);
3436 release_pkt(pkt);
3437 return -ENOMEM;
3438 }
3439
3440 *len = ret;
3441 release_pkt(pkt);
3442 return 0;
3443}
3444
3445/**
3446 * msm_ipc_router_create_port() - Create a IPC Router port/endpoint
3447 * @notify: Callback function to notify any event on the port.
3448 * @event: Event ID to be handled.
3449 * @oob_data: Any out-of-band data associated with the event.
3450 * @oob_data_len: Size of the out-of-band data, if valid.
3451 * @priv: Private data registered during the port creation.
3452 * @priv: Private info to be passed while the notification is generated.
3453 *
3454 * @return: Pointer to the port on success, NULL on error.
3455 */
3456struct msm_ipc_port *msm_ipc_router_create_port(
3457 void (*notify)(unsigned int event, void *oob_data,
3458 size_t oob_data_len, void *priv),
3459 void *priv)
3460{
3461 struct msm_ipc_port *port_ptr;
3462 int ret;
3463
3464 ret = ipc_router_core_init();
3465 if (ret < 0) {
3466 IPC_RTR_ERR("%s: Error %d initializing IPC Router\n",
3467 __func__, ret);
3468 return NULL;
3469 }
3470
3471 port_ptr = msm_ipc_router_create_raw_port(NULL, notify, priv);
3472 if (!port_ptr)
3473 IPC_RTR_ERR("%s: port_ptr alloc failed\n", __func__);
3474
3475 return port_ptr;
3476}
3477
3478int msm_ipc_router_close_port(struct msm_ipc_port *port_ptr)
3479{
3480 union rr_control_msg msg;
3481 struct msm_ipc_server *server;
3482 struct msm_ipc_router_remote_port *rport_ptr;
3483
3484 if (!port_ptr)
3485 return -EINVAL;
3486
3487 if (port_ptr->type == SERVER_PORT || port_ptr->type == CLIENT_PORT) {
3488 down_write(&local_ports_lock_lhc2);
3489 list_del(&port_ptr->list);
3490 up_write(&local_ports_lock_lhc2);
3491
3492 mutex_lock(&port_ptr->port_lock_lhc3);
3493 rport_ptr = (struct msm_ipc_router_remote_port *)
3494 port_ptr->rport_info;
3495 port_ptr->rport_info = NULL;
3496 mutex_unlock(&port_ptr->port_lock_lhc3);
3497 if (rport_ptr) {
3498 ipc_router_reset_conn(rport_ptr);
3499 ipc_router_destroy_rport(rport_ptr);
3500 }
3501
3502 if (port_ptr->type == SERVER_PORT) {
3503 memset(&msg, 0, sizeof(msg));
3504 msg.cmd = IPC_ROUTER_CTRL_CMD_REMOVE_SERVER;
3505 msg.srv.service = port_ptr->port_name.service;
3506 msg.srv.instance = port_ptr->port_name.instance;
3507 msg.srv.node_id = port_ptr->this_port.node_id;
3508 msg.srv.port_id = port_ptr->this_port.port_id;
3509 broadcast_ctl_msg(&msg);
3510 }
3511
3512 /* Server port could have been a client port earlier.
3513 * Send REMOVE_CLIENT message in either case.
3514 */
3515 msm_ipc_router_send_remove_client(&port_ptr->mode_info,
3516 port_ptr->this_port.node_id,
3517 port_ptr->this_port.port_id);
3518 } else if (port_ptr->type == CONTROL_PORT) {
3519 down_write(&control_ports_lock_lha5);
3520 list_del(&port_ptr->list);
3521 up_write(&control_ports_lock_lha5);
3522 } else if (port_ptr->type == IRSC_PORT) {
3523 down_write(&local_ports_lock_lhc2);
3524 list_del(&port_ptr->list);
3525 up_write(&local_ports_lock_lhc2);
3526 signal_irsc_completion();
3527 }
3528
3529 if (port_ptr->type == SERVER_PORT) {
3530 server = ipc_router_get_server_ref(
3531 port_ptr->port_name.service,
3532 port_ptr->port_name.instance,
3533 port_ptr->this_port.node_id,
3534 port_ptr->this_port.port_id);
3535 if (server) {
3536 kref_put(&server->ref, ipc_router_release_server);
3537 ipc_router_destroy_server(server,
3538 port_ptr->this_port.node_id,
3539 port_ptr->this_port.port_id);
3540 }
3541 }
3542
3543 mutex_lock(&port_ptr->port_lock_lhc3);
3544 rport_ptr = (struct msm_ipc_router_remote_port *)port_ptr->rport_info;
3545 port_ptr->rport_info = NULL;
3546 mutex_unlock(&port_ptr->port_lock_lhc3);
3547 if (rport_ptr)
3548 ipc_router_destroy_rport(rport_ptr);
3549
3550 kref_put(&port_ptr->ref, ipc_router_release_port);
3551 return 0;
3552}
3553
3554int msm_ipc_router_get_curr_pkt_size(struct msm_ipc_port *port_ptr)
3555{
3556 struct rr_packet *pkt;
3557 int rc = 0;
3558
3559 if (!port_ptr)
3560 return -EINVAL;
3561
3562 mutex_lock(&port_ptr->port_rx_q_lock_lhc3);
3563 if (!list_empty(&port_ptr->port_rx_q)) {
3564 pkt = list_first_entry(&port_ptr->port_rx_q, struct rr_packet,
3565 list);
3566 rc = pkt->hdr.size;
3567 }
3568 mutex_unlock(&port_ptr->port_rx_q_lock_lhc3);
3569
3570 return rc;
3571}
3572
3573int msm_ipc_router_bind_control_port(struct msm_ipc_port *port_ptr)
3574{
3575 if (unlikely(!port_ptr || port_ptr->type != CLIENT_PORT))
3576 return -EINVAL;
3577
3578 down_write(&local_ports_lock_lhc2);
3579 list_del(&port_ptr->list);
3580 up_write(&local_ports_lock_lhc2);
3581 port_ptr->type = CONTROL_PORT;
3582 down_write(&control_ports_lock_lha5);
3583 list_add_tail(&port_ptr->list, &control_ports);
3584 up_write(&control_ports_lock_lha5);
3585
3586 return 0;
3587}
3588
3589int msm_ipc_router_lookup_server_name(struct msm_ipc_port_name *srv_name,
3590 struct msm_ipc_server_info *srv_info,
3591 int num_entries_in_array, u32 lookup_mask)
3592{
3593 struct msm_ipc_server *server;
3594 struct msm_ipc_server_port *server_port;
3595 int key, i = 0; /*num_entries_found*/
3596
3597 if (!srv_name) {
3598 IPC_RTR_ERR("%s: Invalid srv_name\n", __func__);
3599 return -EINVAL;
3600 }
3601
3602 if (num_entries_in_array && !srv_info) {
3603 IPC_RTR_ERR("%s: srv_info NULL\n", __func__);
3604 return -EINVAL;
3605 }
3606
3607 down_read(&server_list_lock_lha2);
3608 key = (srv_name->service & (SRV_HASH_SIZE - 1));
3609 list_for_each_entry(server, &server_list[key], list) {
3610 if ((server->name.service != srv_name->service) ||
3611 ((server->name.instance & lookup_mask) !=
3612 srv_name->instance))
3613 continue;
3614
3615 list_for_each_entry(server_port, &server->server_port_list,
3616 list) {
3617 if (i < num_entries_in_array) {
3618 srv_info[i].node_id =
3619 server_port->server_addr.node_id;
3620 srv_info[i].port_id =
3621 server_port->server_addr.port_id;
3622 srv_info[i].service = server->name.service;
3623 srv_info[i].instance = server->name.instance;
3624 }
3625 i++;
3626 }
3627 }
3628 up_read(&server_list_lock_lha2);
3629
3630 return i;
3631}
3632
3633int msm_ipc_router_close(void)
3634{
3635 struct msm_ipc_router_xprt_info *xprt_info, *tmp_xprt_info;
3636
3637 down_write(&xprt_info_list_lock_lha5);
3638 list_for_each_entry_safe(xprt_info, tmp_xprt_info,
3639 &xprt_info_list, list) {
3640 xprt_info->xprt->close(xprt_info->xprt);
3641 list_del(&xprt_info->list);
3642 kfree(xprt_info);
3643 }
3644 up_write(&xprt_info_list_lock_lha5);
3645 return 0;
3646}
3647
3648/**
3649 * pil_vote_load_worker() - Process vote to load the modem
3650 *
3651 * @work: Work item to process
3652 *
3653 * This function is called to process votes to load the modem that have been
3654 * queued by msm_ipc_load_default_node().
3655 */
3656static void pil_vote_load_worker(struct work_struct *work)
3657{
3658 struct pil_vote_info *vote_info;
3659
3660 vote_info = container_of(work, struct pil_vote_info, load_work);
3661 if (strlen(default_peripheral)) {
3662 vote_info->pil_handle = subsystem_get(default_peripheral);
3663 if (IS_ERR(vote_info->pil_handle)) {
3664 IPC_RTR_ERR("%s: Failed to load %s\n",
3665 __func__, default_peripheral);
3666 vote_info->pil_handle = NULL;
3667 }
3668 } else {
3669 vote_info->pil_handle = NULL;
3670 }
3671}
3672
3673/**
3674 * pil_vote_unload_worker() - Process vote to unload the modem
3675 *
3676 * @work: Work item to process
3677 *
3678 * This function is called to process votes to unload the modem that have been
3679 * queued by msm_ipc_unload_default_node().
3680 */
3681static void pil_vote_unload_worker(struct work_struct *work)
3682{
3683 struct pil_vote_info *vote_info;
3684
3685 vote_info = container_of(work, struct pil_vote_info, unload_work);
3686
3687 if (vote_info->pil_handle) {
3688 subsystem_put(vote_info->pil_handle);
3689 vote_info->pil_handle = NULL;
3690 }
3691 kfree(vote_info);
3692}
3693
3694/**
3695 * msm_ipc_load_default_node() - Queue a vote to load the modem.
3696 *
3697 * @return: PIL vote info structure on success, NULL on failure.
3698 *
3699 * This function places a work item that loads the modem on the
3700 * single-threaded workqueue used for processing PIL votes to load
3701 * or unload the modem.
3702 */
3703void *msm_ipc_load_default_node(void)
3704{
3705 struct pil_vote_info *vote_info;
3706
3707 vote_info = kmalloc(sizeof(*vote_info), GFP_KERNEL);
3708 if (!vote_info)
3709 return vote_info;
3710
3711 INIT_WORK(&vote_info->load_work, pil_vote_load_worker);
3712 queue_work(msm_ipc_router_workqueue, &vote_info->load_work);
3713
3714 return vote_info;
3715}
3716
3717/**
3718 * msm_ipc_unload_default_node() - Queue a vote to unload the modem.
3719 *
3720 * @pil_vote: PIL vote info structure, containing the PIL handle
3721 * and work structure.
3722 *
3723 * This function places a work item that unloads the modem on the
3724 * single-threaded workqueue used for processing PIL votes to load
3725 * or unload the modem.
3726 */
3727void msm_ipc_unload_default_node(void *pil_vote)
3728{
3729 struct pil_vote_info *vote_info;
3730
3731 if (pil_vote) {
3732 vote_info = (struct pil_vote_info *)pil_vote;
3733 INIT_WORK(&vote_info->unload_work, pil_vote_unload_worker);
3734 queue_work(msm_ipc_router_workqueue, &vote_info->unload_work);
3735 }
3736}
3737
3738#if defined(CONFIG_DEBUG_FS)
3739static void dump_routing_table(struct seq_file *s)
3740{
3741 int j;
3742 struct msm_ipc_routing_table_entry *rt_entry;
3743
3744 seq_printf(s, "%-10s|%-20s|%-10s|\n", "Node Id", "XPRT Name",
3745 "Next Hop");
3746 seq_puts(s, "----------------------------------------------\n");
3747 for (j = 0; j < RT_HASH_SIZE; j++) {
3748 down_read(&routing_table_lock_lha3);
3749 list_for_each_entry(rt_entry, &routing_table[j], list) {
3750 down_read(&rt_entry->lock_lha4);
3751 seq_printf(s, "0x%08x|", rt_entry->node_id);
3752 if (rt_entry->node_id == IPC_ROUTER_NID_LOCAL)
3753 seq_printf(s, "%-20s|0x%08x|\n", "Loopback",
3754 rt_entry->node_id);
3755 else
3756 seq_printf(s, "%-20s|0x%08x|\n",
3757 rt_entry->xprt_info->xprt->name,
3758 rt_entry->node_id);
3759 up_read(&rt_entry->lock_lha4);
3760 }
3761 up_read(&routing_table_lock_lha3);
3762 }
3763}
3764
3765static void dump_xprt_info(struct seq_file *s)
3766{
3767 struct msm_ipc_router_xprt_info *xprt_info;
3768
3769 seq_printf(s, "%-20s|%-10s|%-12s|%-15s|\n", "XPRT Name", "Link ID",
3770 "Initialized", "Remote Node Id");
3771 seq_puts(s, "------------------------------------------------------------\n");
3772 down_read(&xprt_info_list_lock_lha5);
3773 list_for_each_entry(xprt_info, &xprt_info_list, list)
3774 seq_printf(s, "%-20s|0x%08x|%-12s|0x%08x|\n",
3775 xprt_info->xprt->name, xprt_info->xprt->link_id,
3776 (xprt_info->initialized ? "Y" : "N"),
3777 xprt_info->remote_node_id);
3778 up_read(&xprt_info_list_lock_lha5);
3779}
3780
3781static void dump_servers(struct seq_file *s)
3782{
3783 int j;
3784 struct msm_ipc_server *server;
3785 struct msm_ipc_server_port *server_port;
3786
3787 seq_printf(s, "%-11s|%-11s|%-11s|%-11s|\n", "Service", "Instance",
3788 "Node_id", "Port_id");
3789 seq_puts(s, "------------------------------------------------------------\n");
3790 down_read(&server_list_lock_lha2);
3791 for (j = 0; j < SRV_HASH_SIZE; j++) {
3792 list_for_each_entry(server, &server_list[j], list) {
3793 list_for_each_entry(server_port,
3794 &server->server_port_list,
3795 list)
3796 seq_printf(s, "0x%08x |0x%08x |0x%08x |0x%08x |\n",
3797 server->name.service,
3798 server->name.instance,
3799 server_port->server_addr.node_id,
3800 server_port->server_addr.port_id);
3801 }
3802 }
3803 up_read(&server_list_lock_lha2);
3804}
3805
3806static void dump_remote_ports(struct seq_file *s)
3807{
3808 int j, k;
3809 struct msm_ipc_router_remote_port *rport_ptr;
3810 struct msm_ipc_routing_table_entry *rt_entry;
3811
3812 seq_printf(s, "%-11s|%-11s|%-10s|\n", "Node_id", "Port_id",
3813 "Quota_cnt");
3814 seq_puts(s, "------------------------------------------------------------\n");
3815 for (j = 0; j < RT_HASH_SIZE; j++) {
3816 down_read(&routing_table_lock_lha3);
3817 list_for_each_entry(rt_entry, &routing_table[j], list) {
3818 down_read(&rt_entry->lock_lha4);
3819 for (k = 0; k < RP_HASH_SIZE; k++) {
3820 list_for_each_entry
3821 (rport_ptr,
3822 &rt_entry->remote_port_list[k],
3823 list)
3824 seq_printf(s, "0x%08x |0x%08x |0x%08x|\n",
3825 rport_ptr->node_id,
3826 rport_ptr->port_id,
3827 rport_ptr->tx_quota_cnt);
3828 }
3829 up_read(&rt_entry->lock_lha4);
3830 }
3831 up_read(&routing_table_lock_lha3);
3832 }
3833}
3834
3835static void dump_control_ports(struct seq_file *s)
3836{
3837 struct msm_ipc_port *port_ptr;
3838
3839 seq_printf(s, "%-11s|%-11s|\n", "Node_id", "Port_id");
3840 seq_puts(s, "------------------------------------------------------------\n");
3841 down_read(&control_ports_lock_lha5);
3842 list_for_each_entry(port_ptr, &control_ports, list)
3843 seq_printf(s, "0x%08x |0x%08x |\n", port_ptr->this_port.node_id,
3844 port_ptr->this_port.port_id);
3845 up_read(&control_ports_lock_lha5);
3846}
3847
3848static void dump_local_ports(struct seq_file *s)
3849{
3850 int j;
3851 struct msm_ipc_port *port_ptr;
3852
Dhoat Harpal3ad5fe32017-06-19 21:26:13 +05303853 seq_printf(s, "%-11s|%-11s|%-32s|%-11s|\n",
3854 "Node_id", "Port_id", "Wakelock", "Last SVCID");
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -06003855 seq_puts(s, "------------------------------------------------------------\n");
3856 down_read(&local_ports_lock_lhc2);
3857 for (j = 0; j < LP_HASH_SIZE; j++) {
3858 list_for_each_entry(port_ptr, &local_ports[j], list) {
3859 mutex_lock(&port_ptr->port_lock_lhc3);
Dhoat Harpal3ad5fe32017-06-19 21:26:13 +05303860 seq_printf(s, "0x%08x |0x%08x |%-32s|0x%08x |\n",
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -06003861 port_ptr->this_port.node_id,
Dhoat Harpal3ad5fe32017-06-19 21:26:13 +05303862 port_ptr->this_port.port_id,
3863 port_ptr->rx_ws_name,
3864 port_ptr->last_served_svc_id);
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -06003865 mutex_unlock(&port_ptr->port_lock_lhc3);
3866 }
3867 }
3868 up_read(&local_ports_lock_lhc2);
3869}
3870
3871static int debugfs_show(struct seq_file *s, void *data)
3872{
3873 void (*show)(struct seq_file *) = s->private;
3874
3875 show(s);
3876 return 0;
3877}
3878
3879static int debug_open(struct inode *inode, struct file *file)
3880{
3881 return single_open(file, debugfs_show, inode->i_private);
3882}
3883
3884static const struct file_operations debug_ops = {
3885 .open = debug_open,
3886 .release = single_release,
3887 .read = seq_read,
3888 .llseek = seq_lseek,
3889};
3890
3891static void debug_create(const char *name, struct dentry *dent,
3892 void (*show)(struct seq_file *))
3893{
3894 debugfs_create_file(name, 0444, dent, show, &debug_ops);
3895}
3896
3897static void debugfs_init(void)
3898{
3899 struct dentry *dent;
3900
3901 dent = debugfs_create_dir("msm_ipc_router", 0);
3902 if (IS_ERR(dent))
3903 return;
3904
3905 debug_create("dump_local_ports", dent, dump_local_ports);
3906 debug_create("dump_remote_ports", dent, dump_remote_ports);
3907 debug_create("dump_control_ports", dent, dump_control_ports);
3908 debug_create("dump_servers", dent, dump_servers);
3909 debug_create("dump_xprt_info", dent, dump_xprt_info);
3910 debug_create("dump_routing_table", dent, dump_routing_table);
3911}
3912
3913#else
3914static void debugfs_init(void) {}
3915#endif
3916
3917/**
3918 * ipc_router_create_log_ctx() - Create and add the log context based on
3919 * transport
3920 * @name: subsystem name
3921 *
3922 * Return: a reference to the log context created
3923 *
3924 * This function creates ipc log context based on transport and adds it to a
3925 * global list. This log context can be reused from the list in case of a
3926 * subsystem restart.
3927 */
3928static void *ipc_router_create_log_ctx(char *name)
3929{
3930 struct ipc_rtr_log_ctx *sub_log_ctx;
3931
3932 sub_log_ctx = kmalloc(sizeof(*sub_log_ctx), GFP_KERNEL);
3933 if (!sub_log_ctx)
3934 return NULL;
3935 sub_log_ctx->log_ctx = ipc_log_context_create(
3936 IPC_RTR_INFO_PAGES, name, 0);
3937 if (!sub_log_ctx->log_ctx) {
3938 IPC_RTR_ERR("%s: Unable to create IPC logging for [%s]",
3939 __func__, name);
3940 kfree(sub_log_ctx);
3941 return NULL;
3942 }
3943 strlcpy(sub_log_ctx->log_ctx_name, name, LOG_CTX_NAME_LEN);
3944 INIT_LIST_HEAD(&sub_log_ctx->list);
3945 list_add_tail(&sub_log_ctx->list, &log_ctx_list);
3946 return sub_log_ctx->log_ctx;
3947}
3948
3949static void ipc_router_log_ctx_init(void)
3950{
3951 mutex_lock(&log_ctx_list_lock_lha0);
3952 local_log_ctx = ipc_router_create_log_ctx("local_IPCRTR");
3953 mutex_unlock(&log_ctx_list_lock_lha0);
3954}
3955
3956/**
3957 * ipc_router_get_log_ctx() - Retrieves the ipc log context based on subsystem
3958 * name.
3959 * @sub_name: subsystem name
3960 *
3961 * Return: a reference to the log context
3962 */
3963static void *ipc_router_get_log_ctx(char *sub_name)
3964{
3965 void *log_ctx = NULL;
3966 struct ipc_rtr_log_ctx *temp_log_ctx;
3967
3968 mutex_lock(&log_ctx_list_lock_lha0);
3969 list_for_each_entry(temp_log_ctx, &log_ctx_list, list)
3970 if (!strcmp(temp_log_ctx->log_ctx_name, sub_name)) {
3971 log_ctx = temp_log_ctx->log_ctx;
3972 mutex_unlock(&log_ctx_list_lock_lha0);
3973 return log_ctx;
3974 }
3975 log_ctx = ipc_router_create_log_ctx(sub_name);
3976 mutex_unlock(&log_ctx_list_lock_lha0);
3977
3978 return log_ctx;
3979}
3980
3981/**
3982 * ipc_router_get_xprt_info_ref() - Get a reference to the xprt_info structure
3983 * @xprt_info: pointer to the xprt_info.
3984 *
3985 * @return: Zero on success, -ENODEV on failure.
3986 *
3987 * This function is used to obtain a reference to the xprt_info structure
3988 * corresponding to the requested @xprt_info pointer.
3989 */
3990static int ipc_router_get_xprt_info_ref(
3991 struct msm_ipc_router_xprt_info *xprt_info)
3992{
3993 int ret = -ENODEV;
3994 struct msm_ipc_router_xprt_info *tmp_xprt_info;
3995
3996 if (!xprt_info)
3997 return 0;
3998
3999 down_read(&xprt_info_list_lock_lha5);
4000 list_for_each_entry(tmp_xprt_info, &xprt_info_list, list) {
4001 if (tmp_xprt_info == xprt_info) {
4002 kref_get(&xprt_info->ref);
4003 ret = 0;
4004 break;
4005 }
4006 }
4007 up_read(&xprt_info_list_lock_lha5);
4008
4009 return ret;
4010}
4011
4012/**
4013 * ipc_router_put_xprt_info_ref() - Put a reference to the xprt_info structure
4014 * @xprt_info: pointer to the xprt_info.
4015 *
4016 * This function is used to put the reference to the xprt_info structure
4017 * corresponding to the requested @xprt_info pointer.
4018 */
4019static void ipc_router_put_xprt_info_ref(
4020 struct msm_ipc_router_xprt_info *xprt_info)
4021{
4022 if (xprt_info)
4023 kref_put(&xprt_info->ref, ipc_router_release_xprt_info_ref);
4024}
4025
4026/**
4027 * ipc_router_release_xprt_info_ref() - release the xprt_info last reference
4028 * @ref: Reference to the xprt_info structure.
4029 *
4030 * This function is called when all references to the xprt_info structure
4031 * are released.
4032 */
4033static void ipc_router_release_xprt_info_ref(struct kref *ref)
4034{
4035 struct msm_ipc_router_xprt_info *xprt_info =
4036 container_of(ref, struct msm_ipc_router_xprt_info, ref);
4037
4038 complete_all(&xprt_info->ref_complete);
4039}
4040
4041static int msm_ipc_router_add_xprt(struct msm_ipc_router_xprt *xprt)
4042{
4043 struct msm_ipc_router_xprt_info *xprt_info;
4044
4045 xprt_info = kmalloc(sizeof(*xprt_info), GFP_KERNEL);
4046 if (!xprt_info)
4047 return -ENOMEM;
4048
4049 xprt_info->xprt = xprt;
4050 xprt_info->initialized = 0;
4051 xprt_info->remote_node_id = -1;
4052 INIT_LIST_HEAD(&xprt_info->pkt_list);
4053 mutex_init(&xprt_info->rx_lock_lhb2);
4054 mutex_init(&xprt_info->tx_lock_lhb2);
4055 wakeup_source_init(&xprt_info->ws, xprt->name);
4056 xprt_info->need_len = 0;
4057 xprt_info->abort_data_read = 0;
4058 INIT_WORK(&xprt_info->read_data, do_read_data);
4059 INIT_LIST_HEAD(&xprt_info->list);
4060 kref_init(&xprt_info->ref);
4061 init_completion(&xprt_info->ref_complete);
Arun Kumar Neelakantam74ff8562017-05-26 17:57:52 +05304062 xprt_info->dynamic_ws = 0;
4063 if (xprt->get_ws_info)
4064 xprt_info->dynamic_ws = xprt->get_ws_info(xprt);
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -06004065
4066 xprt_info->workqueue = create_singlethread_workqueue(xprt->name);
4067 if (!xprt_info->workqueue) {
4068 kfree(xprt_info);
4069 return -ENOMEM;
4070 }
4071
4072 xprt_info->log_ctx = ipc_router_get_log_ctx(xprt->name);
4073
4074 if (!strcmp(xprt->name, "msm_ipc_router_loopback_xprt")) {
4075 xprt_info->remote_node_id = IPC_ROUTER_NID_LOCAL;
4076 xprt_info->initialized = 1;
4077 }
4078
4079 IPC_RTR_INFO(xprt_info->log_ctx, "Adding xprt: [%s]\n", xprt->name);
4080 down_write(&xprt_info_list_lock_lha5);
4081 list_add_tail(&xprt_info->list, &xprt_info_list);
4082 up_write(&xprt_info_list_lock_lha5);
4083
4084 down_write(&routing_table_lock_lha3);
4085 if (!routing_table_inited) {
4086 init_routing_table();
4087 routing_table_inited = 1;
4088 }
4089 up_write(&routing_table_lock_lha3);
4090
4091 xprt->priv = xprt_info;
4092
4093 return 0;
4094}
4095
4096static void msm_ipc_router_remove_xprt(struct msm_ipc_router_xprt *xprt)
4097{
4098 struct msm_ipc_router_xprt_info *xprt_info;
4099 struct rr_packet *temp_pkt, *pkt;
4100
4101 if (xprt && xprt->priv) {
4102 xprt_info = xprt->priv;
4103
4104 IPC_RTR_INFO(xprt_info->log_ctx, "Removing xprt: [%s]\n",
4105 xprt->name);
4106 mutex_lock(&xprt_info->rx_lock_lhb2);
4107 xprt_info->abort_data_read = 1;
4108 mutex_unlock(&xprt_info->rx_lock_lhb2);
4109 flush_workqueue(xprt_info->workqueue);
4110 destroy_workqueue(xprt_info->workqueue);
4111 mutex_lock(&xprt_info->rx_lock_lhb2);
4112 list_for_each_entry_safe(pkt, temp_pkt,
4113 &xprt_info->pkt_list, list) {
4114 list_del(&pkt->list);
4115 release_pkt(pkt);
4116 }
4117 mutex_unlock(&xprt_info->rx_lock_lhb2);
4118
4119 down_write(&xprt_info_list_lock_lha5);
4120 list_del(&xprt_info->list);
4121 up_write(&xprt_info_list_lock_lha5);
4122
4123 msm_ipc_cleanup_routing_table(xprt_info);
4124
4125 wakeup_source_trash(&xprt_info->ws);
4126
4127 ipc_router_put_xprt_info_ref(xprt_info);
4128 wait_for_completion(&xprt_info->ref_complete);
4129
4130 xprt->priv = 0;
4131 kfree(xprt_info);
4132 }
4133}
4134
4135struct msm_ipc_router_xprt_work {
4136 struct msm_ipc_router_xprt *xprt;
4137 struct work_struct work;
4138};
4139
4140static void xprt_open_worker(struct work_struct *work)
4141{
4142 struct msm_ipc_router_xprt_work *xprt_work =
4143 container_of(work, struct msm_ipc_router_xprt_work, work);
4144
4145 msm_ipc_router_add_xprt(xprt_work->xprt);
4146 kfree(xprt_work);
4147}
4148
4149static void xprt_close_worker(struct work_struct *work)
4150{
4151 struct msm_ipc_router_xprt_work *xprt_work =
4152 container_of(work, struct msm_ipc_router_xprt_work, work);
4153
4154 msm_ipc_router_remove_xprt(xprt_work->xprt);
4155 xprt_work->xprt->sft_close_done(xprt_work->xprt);
4156 kfree(xprt_work);
4157}
4158
4159void msm_ipc_router_xprt_notify(struct msm_ipc_router_xprt *xprt,
4160 unsigned int event,
4161 void *data)
4162{
4163 struct msm_ipc_router_xprt_info *xprt_info = xprt->priv;
4164 struct msm_ipc_router_xprt_work *xprt_work;
4165 struct rr_packet *pkt;
4166 int ret;
4167
4168 ret = ipc_router_core_init();
4169 if (ret < 0) {
4170 IPC_RTR_ERR("%s: Error %d initializing IPC Router\n",
4171 __func__, ret);
4172 return;
4173 }
4174
4175 switch (event) {
4176 case IPC_ROUTER_XPRT_EVENT_OPEN:
4177 xprt_work = kmalloc(sizeof(*xprt_work), GFP_ATOMIC);
4178 if (xprt_work) {
4179 xprt_work->xprt = xprt;
4180 INIT_WORK(&xprt_work->work, xprt_open_worker);
4181 queue_work(msm_ipc_router_workqueue, &xprt_work->work);
4182 } else {
4183 IPC_RTR_ERR(
4184 "%s: malloc failure - Couldn't notify OPEN event",
4185 __func__);
4186 }
4187 break;
4188
4189 case IPC_ROUTER_XPRT_EVENT_CLOSE:
4190 xprt_work = kmalloc(sizeof(*xprt_work), GFP_ATOMIC);
4191 if (xprt_work) {
4192 xprt_work->xprt = xprt;
4193 INIT_WORK(&xprt_work->work, xprt_close_worker);
4194 queue_work(msm_ipc_router_workqueue, &xprt_work->work);
4195 } else {
4196 IPC_RTR_ERR(
4197 "%s: malloc failure - Couldn't notify CLOSE event",
4198 __func__);
4199 }
4200 break;
4201 }
4202
4203 if (!data)
4204 return;
4205
4206 while (!xprt_info) {
4207 msleep(100);
4208 xprt_info = xprt->priv;
4209 }
4210
4211 pkt = clone_pkt((struct rr_packet *)data);
4212 if (!pkt)
4213 return;
4214
Arun Kumar Neelakantam74ff8562017-05-26 17:57:52 +05304215 pkt->ws_need = false;
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -06004216 mutex_lock(&xprt_info->rx_lock_lhb2);
4217 list_add_tail(&pkt->list, &xprt_info->pkt_list);
Arun Kumar Neelakantam74ff8562017-05-26 17:57:52 +05304218 if (!xprt_info->dynamic_ws) {
4219 __pm_stay_awake(&xprt_info->ws);
4220 pkt->ws_need = true;
4221 } else {
4222 if (is_wakeup_source_allowed) {
4223 __pm_stay_awake(&xprt_info->ws);
4224 pkt->ws_need = true;
4225 }
4226 }
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -06004227 mutex_unlock(&xprt_info->rx_lock_lhb2);
4228 queue_work(xprt_info->workqueue, &xprt_info->read_data);
4229}
4230
4231/**
4232 * parse_devicetree() - parse device tree binding
4233 *
4234 * @node: pointer to device tree node
4235 *
4236 * @return: 0 on success, -ENODEV on failure.
4237 */
4238static int parse_devicetree(struct device_node *node)
4239{
4240 char *key;
4241 const char *peripheral = NULL;
4242
4243 key = "qcom,default-peripheral";
4244 peripheral = of_get_property(node, key, NULL);
4245 if (peripheral)
4246 strlcpy(default_peripheral, peripheral, PIL_SUBSYSTEM_NAME_LEN);
4247
4248 return 0;
4249}
4250
4251/**
4252 * ipc_router_probe() - Probe the IPC Router
4253 *
4254 * @pdev: Platform device corresponding to IPC Router.
4255 *
4256 * @return: 0 on success, standard Linux error codes on error.
4257 *
4258 * This function is called when the underlying device tree driver registers
4259 * a platform device, mapped to IPC Router.
4260 */
4261static int ipc_router_probe(struct platform_device *pdev)
4262{
4263 int ret = 0;
4264
4265 if (pdev && pdev->dev.of_node) {
4266 ret = parse_devicetree(pdev->dev.of_node);
4267 if (ret)
4268 IPC_RTR_ERR("%s: Failed to parse device tree\n",
4269 __func__);
4270 }
4271 return ret;
4272}
4273
4274static const struct of_device_id ipc_router_match_table[] = {
4275 { .compatible = "qcom,ipc_router" },
4276 {},
4277};
4278
4279static struct platform_driver ipc_router_driver = {
4280 .probe = ipc_router_probe,
4281 .driver = {
4282 .name = MODULE_NAME,
4283 .owner = THIS_MODULE,
4284 .of_match_table = ipc_router_match_table,
4285 },
4286};
4287
4288/**
4289 * ipc_router_core_init() - Initialize all IPC Router core data structures
4290 *
4291 * Return: 0 on Success or Standard error code otherwise.
4292 *
4293 * This function only initializes all the core data structures to the IPC Router
4294 * module. The remaining initialization is done inside msm_ipc_router_init().
4295 */
4296static int ipc_router_core_init(void)
4297{
4298 int i;
4299 int ret;
4300 struct msm_ipc_routing_table_entry *rt_entry;
4301
4302 mutex_lock(&ipc_router_init_lock);
4303 if (likely(is_ipc_router_inited)) {
4304 mutex_unlock(&ipc_router_init_lock);
4305 return 0;
4306 }
4307
4308 debugfs_init();
4309
4310 for (i = 0; i < SRV_HASH_SIZE; i++)
4311 INIT_LIST_HEAD(&server_list[i]);
4312
4313 for (i = 0; i < LP_HASH_SIZE; i++)
4314 INIT_LIST_HEAD(&local_ports[i]);
4315
4316 down_write(&routing_table_lock_lha3);
4317 if (!routing_table_inited) {
4318 init_routing_table();
4319 routing_table_inited = 1;
4320 }
4321 up_write(&routing_table_lock_lha3);
4322 rt_entry = create_routing_table_entry(IPC_ROUTER_NID_LOCAL, NULL);
4323 kref_put(&rt_entry->ref, ipc_router_release_rtentry);
4324
4325 msm_ipc_router_workqueue =
4326 create_singlethread_workqueue("msm_ipc_router");
4327 if (!msm_ipc_router_workqueue) {
4328 mutex_unlock(&ipc_router_init_lock);
4329 return -ENOMEM;
4330 }
4331
4332 ret = msm_ipc_router_security_init();
4333 if (ret < 0)
4334 IPC_RTR_ERR("%s: Security Init failed\n", __func__);
4335 else
4336 is_ipc_router_inited = true;
4337 mutex_unlock(&ipc_router_init_lock);
4338
4339 return ret;
4340}
4341
4342static int msm_ipc_router_init(void)
4343{
4344 int ret;
4345
4346 ret = ipc_router_core_init();
4347 if (ret < 0)
4348 return ret;
4349
4350 ret = platform_driver_register(&ipc_router_driver);
4351 if (ret)
4352 IPC_RTR_ERR(
4353 "%s: ipc_router_driver register failed %d\n", __func__, ret);
4354
4355 ret = msm_ipc_router_init_sockets();
4356 if (ret < 0)
4357 IPC_RTR_ERR("%s: Init sockets failed\n", __func__);
4358
4359 ipc_router_log_ctx_init();
4360 return ret;
4361}
4362
4363module_init(msm_ipc_router_init);
4364MODULE_DESCRIPTION("MSM IPC Router");
4365MODULE_LICENSE("GPL v2");