blob: d38157d93681faef9965d8c80b5603eadc9cc5c0 [file] [log] [blame]
Arun Kumar Neelakantam74ff8562017-05-26 17:57:52 +05301/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -06002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/slab.h>
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/string.h>
17#include <linux/errno.h>
18#include <linux/init.h>
19#include <linux/types.h>
20#include <linux/delay.h>
21#include <linux/err.h>
22#include <linux/sched.h>
23#include <linux/poll.h>
24#include <linux/pm.h>
25#include <linux/platform_device.h>
26#include <linux/uaccess.h>
27#include <linux/debugfs.h>
28#include <linux/rwsem.h>
29#include <linux/ipc_logging.h>
30#include <linux/uaccess.h>
31#include <linux/ipc_router.h>
32#include <linux/ipc_router_xprt.h>
33#include <linux/kref.h>
34#include <soc/qcom/subsystem_notif.h>
35#include <soc/qcom/subsystem_restart.h>
36
37#include <asm/byteorder.h>
38
39#include "ipc_router_private.h"
40#include "ipc_router_security.h"
41
42enum {
43 SMEM_LOG = 1U << 0,
44 RTR_DBG = 1U << 1,
45};
46
47static int msm_ipc_router_debug_mask;
48module_param_named(debug_mask, msm_ipc_router_debug_mask,
49 int, 0664);
50#define MODULE_NAME "ipc_router"
51
52#define IPC_RTR_INFO_PAGES 6
53
54#define IPC_RTR_INFO(log_ctx, x...) do { \
55typeof(log_ctx) _log_ctx = (log_ctx); \
56if (_log_ctx) \
57 ipc_log_string(_log_ctx, x); \
58if (msm_ipc_router_debug_mask & RTR_DBG) \
59 pr_info("[IPCRTR] "x); \
60} while (0)
61
62#define IPC_ROUTER_LOG_EVENT_TX 0x01
63#define IPC_ROUTER_LOG_EVENT_RX 0x02
64#define IPC_ROUTER_LOG_EVENT_TX_ERR 0x03
65#define IPC_ROUTER_LOG_EVENT_RX_ERR 0x04
66#define IPC_ROUTER_DUMMY_DEST_NODE 0xFFFFFFFF
67
68#define ipc_port_sk(port) ((struct sock *)(port))
69
70static LIST_HEAD(control_ports);
71static DECLARE_RWSEM(control_ports_lock_lha5);
72
73#define LP_HASH_SIZE 32
74static struct list_head local_ports[LP_HASH_SIZE];
75static DECLARE_RWSEM(local_ports_lock_lhc2);
76
77/* Server info is organized as a hash table. The server's service ID is
78 * used to index into the hash table. The instance ID of most of the servers
79 * are 1 or 2. The service IDs are well distributed compared to the instance
80 * IDs and hence choosing service ID to index into this hash table optimizes
81 * the hash table operations like add, lookup, destroy.
82 */
83#define SRV_HASH_SIZE 32
84static struct list_head server_list[SRV_HASH_SIZE];
85static DECLARE_RWSEM(server_list_lock_lha2);
86
87struct msm_ipc_server {
88 struct list_head list;
89 struct kref ref;
90 struct msm_ipc_port_name name;
91 char pdev_name[32];
92 int next_pdev_id;
93 int synced_sec_rule;
94 struct list_head server_port_list;
95};
96
97struct msm_ipc_server_port {
98 struct list_head list;
99 struct platform_device *pdev;
100 struct msm_ipc_port_addr server_addr;
101 struct msm_ipc_router_xprt_info *xprt_info;
102};
103
104struct msm_ipc_resume_tx_port {
105 struct list_head list;
106 u32 port_id;
107 u32 node_id;
108};
109
110struct ipc_router_conn_info {
111 struct list_head list;
112 u32 port_id;
113};
114
115enum {
116 RESET = 0,
117 VALID = 1,
118};
119
120#define RP_HASH_SIZE 32
121struct msm_ipc_router_remote_port {
122 struct list_head list;
123 struct kref ref;
124 struct mutex rport_lock_lhb2; /* lock for remote port state access */
125 u32 node_id;
126 u32 port_id;
127 int status;
128 u32 tx_quota_cnt;
129 struct list_head resume_tx_port_list;
130 struct list_head conn_info_list;
131 void *sec_rule;
132 struct msm_ipc_server *server;
133};
134
135struct msm_ipc_router_xprt_info {
136 struct list_head list;
137 struct msm_ipc_router_xprt *xprt;
138 u32 remote_node_id;
139 u32 initialized;
140 struct list_head pkt_list;
141 struct wakeup_source ws;
142 struct mutex rx_lock_lhb2; /* lock for xprt rx operations */
143 struct mutex tx_lock_lhb2; /* lock for xprt tx operations */
144 u32 need_len;
145 u32 abort_data_read;
146 struct work_struct read_data;
147 struct workqueue_struct *workqueue;
148 void *log_ctx;
149 struct kref ref;
150 struct completion ref_complete;
Arun Kumar Neelakantam74ff8562017-05-26 17:57:52 +0530151 bool dynamic_ws;
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -0600152};
153
154#define RT_HASH_SIZE 4
155struct msm_ipc_routing_table_entry {
156 struct list_head list;
157 struct kref ref;
158 u32 node_id;
159 u32 neighbor_node_id;
160 struct list_head remote_port_list[RP_HASH_SIZE];
161 struct msm_ipc_router_xprt_info *xprt_info;
162 struct rw_semaphore lock_lha4;
163 unsigned long num_tx_bytes;
164 unsigned long num_rx_bytes;
165};
166
167#define LOG_CTX_NAME_LEN 32
168struct ipc_rtr_log_ctx {
169 struct list_head list;
170 char log_ctx_name[LOG_CTX_NAME_LEN];
171 void *log_ctx;
172};
173
174static struct list_head routing_table[RT_HASH_SIZE];
175static DECLARE_RWSEM(routing_table_lock_lha3);
176static int routing_table_inited;
177
178static void do_read_data(struct work_struct *work);
179
180static LIST_HEAD(xprt_info_list);
181static DECLARE_RWSEM(xprt_info_list_lock_lha5);
182
183static DEFINE_MUTEX(log_ctx_list_lock_lha0);
184static LIST_HEAD(log_ctx_list);
185static DEFINE_MUTEX(ipc_router_init_lock);
186static bool is_ipc_router_inited;
187static int ipc_router_core_init(void);
188#define IPC_ROUTER_INIT_TIMEOUT (10 * HZ)
189
190static u32 next_port_id;
191static DEFINE_MUTEX(next_port_id_lock_lhc1);
192static struct workqueue_struct *msm_ipc_router_workqueue;
193
194static void *local_log_ctx;
195static void *ipc_router_get_log_ctx(char *sub_name);
196static int process_resume_tx_msg(union rr_control_msg *msg,
197 struct rr_packet *pkt);
198static void ipc_router_reset_conn(struct msm_ipc_router_remote_port *rport_ptr);
199static int ipc_router_get_xprt_info_ref(
200 struct msm_ipc_router_xprt_info *xprt_info);
201static void ipc_router_put_xprt_info_ref(
202 struct msm_ipc_router_xprt_info *xprt_info);
203static void ipc_router_release_xprt_info_ref(struct kref *ref);
204
205struct pil_vote_info {
206 void *pil_handle;
207 struct work_struct load_work;
208 struct work_struct unload_work;
209};
210
211#define PIL_SUBSYSTEM_NAME_LEN 32
212static char default_peripheral[PIL_SUBSYSTEM_NAME_LEN];
213
214enum {
215 DOWN,
216 UP,
217};
218
Arun Kumar Neelakantam74ff8562017-05-26 17:57:52 +0530219static bool is_wakeup_source_allowed;
220
221void msm_ipc_router_set_ws_allowed(bool flag)
222{
223 is_wakeup_source_allowed = flag;
224}
225
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -0600226static void init_routing_table(void)
227{
228 int i;
229
230 for (i = 0; i < RT_HASH_SIZE; i++)
231 INIT_LIST_HEAD(&routing_table[i]);
232}
233
234/**
235 * ipc_router_calc_checksum() - compute the checksum for extended HELLO message
236 * @msg: Reference to the IPC Router HELLO message.
237 *
238 * Return: Computed checksum value, 0 if msg is NULL.
239 */
240static u32 ipc_router_calc_checksum(union rr_control_msg *msg)
241{
242 u32 checksum = 0;
243 int i, len;
244 u16 upper_nb;
245 u16 lower_nb;
246 void *hello;
247
248 if (!msg)
249 return checksum;
250 hello = msg;
251 len = sizeof(*msg);
252
253 for (i = 0; i < len / IPCR_WORD_SIZE; i++) {
254 lower_nb = (*((u32 *)hello)) & IPC_ROUTER_CHECKSUM_MASK;
255 upper_nb = ((*((u32 *)hello)) >> 16) &
256 IPC_ROUTER_CHECKSUM_MASK;
257 checksum = checksum + upper_nb + lower_nb;
258 hello = ((u32 *)hello) + 1;
259 }
260 while (checksum > 0xFFFF)
261 checksum = (checksum & IPC_ROUTER_CHECKSUM_MASK) +
262 ((checksum >> 16) & IPC_ROUTER_CHECKSUM_MASK);
263
264 checksum = ~checksum & IPC_ROUTER_CHECKSUM_MASK;
265 return checksum;
266}
267
268/**
269 * skb_copy_to_log_buf() - copies the required number bytes from the skb_queue
270 * @skb_head: skb_queue head that contains the data.
271 * @pl_len: length of payload need to be copied.
272 * @hdr_offset: length of the header present in first skb
273 * @log_buf: The output buffer which will contain the formatted log string
274 *
275 * This function copies the first specified number of bytes from the skb_queue
276 * to a new buffer and formats them to a string for logging.
277 */
278static void skb_copy_to_log_buf(struct sk_buff_head *skb_head,
279 unsigned int pl_len, unsigned int hdr_offset,
280 u64 *log_buf)
281{
282 struct sk_buff *temp_skb;
283 unsigned int copied_len = 0, copy_len = 0;
284 int remaining;
285
286 if (!skb_head) {
287 IPC_RTR_ERR("%s: NULL skb_head\n", __func__);
288 return;
289 }
290 temp_skb = skb_peek(skb_head);
291 if (unlikely(!temp_skb || !temp_skb->data)) {
292 IPC_RTR_ERR("%s: No SKBs in skb_queue\n", __func__);
293 return;
294 }
295
296 remaining = temp_skb->len - hdr_offset;
297 skb_queue_walk(skb_head, temp_skb) {
298 copy_len = remaining < pl_len ? remaining : pl_len;
299 memcpy(log_buf + copied_len, temp_skb->data + hdr_offset,
300 copy_len);
301 copied_len += copy_len;
302 hdr_offset = 0;
303 if (copied_len == pl_len)
304 break;
305 remaining = pl_len - remaining;
306 }
307}
308
309/**
310 * ipc_router_log_msg() - log all data messages exchanged
311 * @log_ctx: IPC Logging context specific to each transport
312 * @xchng_type: Identifies the data to be a receive or send.
313 * @data: IPC Router data packet or control msg received or to be send.
314 * @hdr: Reference to the router header
315 * @port_ptr: Local IPC Router port.
316 * @rport_ptr: Remote IPC Router port
317 *
318 * This function builds the log message that would be passed on to the IPC
319 * logging framework. The data messages that would be passed corresponds to
320 * the information that is exchanged between the IPC Router and it's clients.
321 */
322static void ipc_router_log_msg(void *log_ctx, u32 xchng_type,
323 void *data, struct rr_header_v1 *hdr,
324 struct msm_ipc_port *port_ptr,
325 struct msm_ipc_router_remote_port *rport_ptr)
326{
327 struct sk_buff_head *skb_head = NULL;
328 union rr_control_msg *msg = NULL;
329 struct rr_packet *pkt = NULL;
330 u64 pl_buf = 0;
331 struct sk_buff *skb;
332 u32 buf_len = 8;
333 u32 svc_id = 0;
334 u32 svc_ins = 0;
335 unsigned int hdr_offset = 0;
336 u32 port_type = 0;
337
338 if (!log_ctx || !hdr || !data)
339 return;
340
341 if (hdr->type == IPC_ROUTER_CTRL_CMD_DATA) {
342 pkt = (struct rr_packet *)data;
343 skb_head = pkt->pkt_fragment_q;
344 skb = skb_peek(skb_head);
345 if (!skb || !skb->data) {
346 IPC_RTR_ERR("%s: No SKBs in skb_queue\n", __func__);
347 return;
348 }
349
350 if (skb_queue_len(skb_head) == 1 && skb->len < 8)
351 buf_len = skb->len;
352 if (xchng_type == IPC_ROUTER_LOG_EVENT_TX && hdr->dst_node_id
353 != IPC_ROUTER_NID_LOCAL) {
354 if (hdr->version == IPC_ROUTER_V1)
355 hdr_offset = sizeof(struct rr_header_v1);
356 else if (hdr->version == IPC_ROUTER_V2)
357 hdr_offset = sizeof(struct rr_header_v2);
358 }
359 skb_copy_to_log_buf(skb_head, buf_len, hdr_offset, &pl_buf);
360
361 if (port_ptr && rport_ptr && (port_ptr->type == CLIENT_PORT) &&
362 rport_ptr->server) {
363 svc_id = rport_ptr->server->name.service;
364 svc_ins = rport_ptr->server->name.instance;
365 port_type = CLIENT_PORT;
Dhoat Harpal3ad5fe32017-06-19 21:26:13 +0530366 port_ptr->last_served_svc_id =
367 rport_ptr->server->name.service;
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -0600368 } else if (port_ptr && (port_ptr->type == SERVER_PORT)) {
369 svc_id = port_ptr->port_name.service;
370 svc_ins = port_ptr->port_name.instance;
371 port_type = SERVER_PORT;
372 }
373 IPC_RTR_INFO(log_ctx,
374 "%s %s %s Len:0x%x T:0x%x CF:0x%x SVC:<0x%x:0x%x> SRC:<0x%x:0x%x> DST:<0x%x:0x%x> DATA: %08x %08x",
375 (xchng_type == IPC_ROUTER_LOG_EVENT_RX ? "" :
376 (xchng_type == IPC_ROUTER_LOG_EVENT_TX ?
377 current->comm : "")),
378 (port_type == CLIENT_PORT ? "CLI" : "SRV"),
379 (xchng_type == IPC_ROUTER_LOG_EVENT_RX ? "RX" :
380 (xchng_type == IPC_ROUTER_LOG_EVENT_TX ? "TX" :
381 (xchng_type == IPC_ROUTER_LOG_EVENT_TX_ERR ? "TX_ERR" :
382 (xchng_type == IPC_ROUTER_LOG_EVENT_RX_ERR ? "RX_ERR" :
383 "UNKNOWN")))),
384 hdr->size, hdr->type, hdr->control_flag,
385 svc_id, svc_ins, hdr->src_node_id, hdr->src_port_id,
386 hdr->dst_node_id, hdr->dst_port_id,
387 (unsigned int)pl_buf, (unsigned int)(pl_buf >> 32));
388
389 } else {
390 msg = (union rr_control_msg *)data;
391 if (msg->cmd == IPC_ROUTER_CTRL_CMD_NEW_SERVER ||
392 msg->cmd == IPC_ROUTER_CTRL_CMD_REMOVE_SERVER)
393 IPC_RTR_INFO(log_ctx,
394 "CTL MSG: %s cmd:0x%x SVC:<0x%x:0x%x> ADDR:<0x%x:0x%x>",
395 (xchng_type == IPC_ROUTER_LOG_EVENT_RX ? "RX" :
396 (xchng_type == IPC_ROUTER_LOG_EVENT_TX ? "TX" :
397 (xchng_type == IPC_ROUTER_LOG_EVENT_TX_ERR ? "TX_ERR" :
398 (xchng_type == IPC_ROUTER_LOG_EVENT_RX_ERR ? "RX_ERR" :
399 "UNKNOWN")))),
400 msg->cmd, msg->srv.service, msg->srv.instance,
401 msg->srv.node_id, msg->srv.port_id);
402 else if (msg->cmd == IPC_ROUTER_CTRL_CMD_REMOVE_CLIENT ||
403 msg->cmd == IPC_ROUTER_CTRL_CMD_RESUME_TX)
404 IPC_RTR_INFO(log_ctx,
405 "CTL MSG: %s cmd:0x%x ADDR: <0x%x:0x%x>",
406 (xchng_type == IPC_ROUTER_LOG_EVENT_RX ? "RX" :
407 (xchng_type == IPC_ROUTER_LOG_EVENT_TX ? "TX" : "ERR")),
408 msg->cmd, msg->cli.node_id, msg->cli.port_id);
409 else if (msg->cmd == IPC_ROUTER_CTRL_CMD_HELLO && hdr)
410 IPC_RTR_INFO(log_ctx,
411 "CTL MSG %s cmd:0x%x ADDR:0x%x",
412 (xchng_type == IPC_ROUTER_LOG_EVENT_RX ? "RX" :
413 (xchng_type == IPC_ROUTER_LOG_EVENT_TX ? "TX" : "ERR")),
414 msg->cmd, hdr->src_node_id);
415 else
416 IPC_RTR_INFO(log_ctx,
417 "%s UNKNOWN cmd:0x%x",
418 (xchng_type == IPC_ROUTER_LOG_EVENT_RX ? "RX" :
419 (xchng_type == IPC_ROUTER_LOG_EVENT_TX ? "TX" : "ERR")),
420 msg->cmd);
421 }
422}
423
424/* Must be called with routing_table_lock_lha3 locked. */
425static struct msm_ipc_routing_table_entry *lookup_routing_table(
426 u32 node_id)
427{
428 u32 key = (node_id % RT_HASH_SIZE);
429 struct msm_ipc_routing_table_entry *rt_entry;
430
431 list_for_each_entry(rt_entry, &routing_table[key], list) {
432 if (rt_entry->node_id == node_id)
433 return rt_entry;
434 }
435 return NULL;
436}
437
438/**
439 * create_routing_table_entry() - Lookup and create a routing table entry
440 * @node_id: Node ID of the routing table entry to be created.
441 * @xprt_info: XPRT through which the node ID is reachable.
442 *
443 * @return: a reference to the routing table entry on success, NULL on failure.
444 */
445static struct msm_ipc_routing_table_entry *create_routing_table_entry(
446 u32 node_id, struct msm_ipc_router_xprt_info *xprt_info)
447{
448 int i;
449 struct msm_ipc_routing_table_entry *rt_entry;
450 u32 key;
451
452 down_write(&routing_table_lock_lha3);
453 rt_entry = lookup_routing_table(node_id);
454 if (rt_entry)
455 goto out_create_rtentry1;
456
457 rt_entry = kmalloc(sizeof(*rt_entry), GFP_KERNEL);
458 if (!rt_entry) {
459 IPC_RTR_ERR("%s: rt_entry allocation failed for %d\n",
460 __func__, node_id);
461 goto out_create_rtentry2;
462 }
463
464 for (i = 0; i < RP_HASH_SIZE; i++)
465 INIT_LIST_HEAD(&rt_entry->remote_port_list[i]);
466 init_rwsem(&rt_entry->lock_lha4);
467 kref_init(&rt_entry->ref);
468 rt_entry->node_id = node_id;
469 rt_entry->xprt_info = xprt_info;
470 if (xprt_info)
471 rt_entry->neighbor_node_id = xprt_info->remote_node_id;
472
473 key = (node_id % RT_HASH_SIZE);
474 list_add_tail(&rt_entry->list, &routing_table[key]);
475out_create_rtentry1:
476 kref_get(&rt_entry->ref);
477out_create_rtentry2:
478 up_write(&routing_table_lock_lha3);
479 return rt_entry;
480}
481
482/**
483 * ipc_router_get_rtentry_ref() - Get a reference to the routing table entry
484 * @node_id: Node ID of the routing table entry.
485 *
486 * @return: a reference to the routing table entry on success, NULL on failure.
487 *
488 * This function is used to obtain a reference to the rounting table entry
489 * corresponding to a node id.
490 */
491static struct msm_ipc_routing_table_entry *ipc_router_get_rtentry_ref(
492 u32 node_id)
493{
494 struct msm_ipc_routing_table_entry *rt_entry;
495
496 down_read(&routing_table_lock_lha3);
497 rt_entry = lookup_routing_table(node_id);
498 if (rt_entry)
499 kref_get(&rt_entry->ref);
500 up_read(&routing_table_lock_lha3);
501 return rt_entry;
502}
503
504/**
505 * ipc_router_release_rtentry() - Cleanup and release the routing table entry
506 * @ref: Reference to the entry.
507 *
508 * This function is called when all references to the routing table entry are
509 * released.
510 */
511void ipc_router_release_rtentry(struct kref *ref)
512{
513 struct msm_ipc_routing_table_entry *rt_entry =
514 container_of(ref, struct msm_ipc_routing_table_entry, ref);
515
516 /* All references to a routing entry will be put only under SSR.
517 * As part of SSR, all the internals of the routing table entry
518 * are cleaned. So just free the routing table entry.
519 */
520 kfree(rt_entry);
521}
522
523struct rr_packet *rr_read(struct msm_ipc_router_xprt_info *xprt_info)
524{
525 struct rr_packet *temp_pkt;
526
527 if (!xprt_info)
528 return NULL;
529
530 mutex_lock(&xprt_info->rx_lock_lhb2);
531 if (xprt_info->abort_data_read) {
532 mutex_unlock(&xprt_info->rx_lock_lhb2);
533 IPC_RTR_ERR("%s detected SSR & exiting now\n",
534 xprt_info->xprt->name);
535 return NULL;
536 }
537
538 if (list_empty(&xprt_info->pkt_list)) {
539 mutex_unlock(&xprt_info->rx_lock_lhb2);
540 return NULL;
541 }
542
543 temp_pkt = list_first_entry(&xprt_info->pkt_list,
544 struct rr_packet, list);
545 list_del(&temp_pkt->list);
546 if (list_empty(&xprt_info->pkt_list))
547 __pm_relax(&xprt_info->ws);
548 mutex_unlock(&xprt_info->rx_lock_lhb2);
549 return temp_pkt;
550}
551
552struct rr_packet *clone_pkt(struct rr_packet *pkt)
553{
554 struct rr_packet *cloned_pkt;
555 struct sk_buff *temp_skb, *cloned_skb;
556 struct sk_buff_head *pkt_fragment_q;
557
558 cloned_pkt = kzalloc(sizeof(*cloned_pkt), GFP_KERNEL);
559 if (!cloned_pkt) {
560 IPC_RTR_ERR("%s: failure\n", __func__);
561 return NULL;
562 }
563 memcpy(&cloned_pkt->hdr, &pkt->hdr, sizeof(struct rr_header_v1));
564 if (pkt->opt_hdr.len > 0) {
565 cloned_pkt->opt_hdr.data = kmalloc(pkt->opt_hdr.len,
566 GFP_KERNEL);
567 if (!cloned_pkt->opt_hdr.data) {
568 IPC_RTR_ERR("%s: Memory allocation Failed\n", __func__);
569 } else {
570 cloned_pkt->opt_hdr.len = pkt->opt_hdr.len;
571 memcpy(cloned_pkt->opt_hdr.data, pkt->opt_hdr.data,
572 pkt->opt_hdr.len);
573 }
574 }
575
576 pkt_fragment_q = kmalloc(sizeof(*pkt_fragment_q), GFP_KERNEL);
577 if (!pkt_fragment_q) {
578 IPC_RTR_ERR("%s: pkt_frag_q alloc failure\n", __func__);
579 kfree(cloned_pkt);
580 return NULL;
581 }
582 skb_queue_head_init(pkt_fragment_q);
583 kref_init(&cloned_pkt->ref);
584
585 skb_queue_walk(pkt->pkt_fragment_q, temp_skb) {
586 cloned_skb = skb_clone(temp_skb, GFP_KERNEL);
587 if (!cloned_skb)
588 goto fail_clone;
589 skb_queue_tail(pkt_fragment_q, cloned_skb);
590 }
591 cloned_pkt->pkt_fragment_q = pkt_fragment_q;
592 cloned_pkt->length = pkt->length;
Arun Kumar Neelakantam74ff8562017-05-26 17:57:52 +0530593 cloned_pkt->ws_need = pkt->ws_need;
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -0600594 return cloned_pkt;
595
596fail_clone:
597 while (!skb_queue_empty(pkt_fragment_q)) {
598 temp_skb = skb_dequeue(pkt_fragment_q);
599 kfree_skb(temp_skb);
600 }
601 kfree(pkt_fragment_q);
602 if (cloned_pkt->opt_hdr.len > 0)
603 kfree(cloned_pkt->opt_hdr.data);
604 kfree(cloned_pkt);
605 return NULL;
606}
607
608/**
609 * create_pkt() - Create a Router packet
610 * @data: SKB queue to be contained inside the packet.
611 *
612 * @return: pointer to packet on success, NULL on failure.
613 */
614struct rr_packet *create_pkt(struct sk_buff_head *data)
615{
616 struct rr_packet *pkt;
617 struct sk_buff *temp_skb;
618
619 pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
620 if (!pkt) {
621 IPC_RTR_ERR("%s: failure\n", __func__);
622 return NULL;
623 }
624
625 if (data) {
626 pkt->pkt_fragment_q = data;
627 skb_queue_walk(pkt->pkt_fragment_q, temp_skb)
628 pkt->length += temp_skb->len;
629 } else {
630 pkt->pkt_fragment_q = kmalloc(sizeof(*pkt->pkt_fragment_q),
631 GFP_KERNEL);
632 if (!pkt->pkt_fragment_q) {
633 IPC_RTR_ERR("%s: Couldn't alloc pkt_fragment_q\n",
634 __func__);
635 kfree(pkt);
636 return NULL;
637 }
638 skb_queue_head_init(pkt->pkt_fragment_q);
639 }
640 kref_init(&pkt->ref);
641 return pkt;
642}
643
644void release_pkt(struct rr_packet *pkt)
645{
646 struct sk_buff *temp_skb;
647
648 if (!pkt)
649 return;
650
651 if (!pkt->pkt_fragment_q) {
652 kfree(pkt);
653 return;
654 }
655
656 while (!skb_queue_empty(pkt->pkt_fragment_q)) {
657 temp_skb = skb_dequeue(pkt->pkt_fragment_q);
658 kfree_skb(temp_skb);
659 }
660 kfree(pkt->pkt_fragment_q);
661 if (pkt->opt_hdr.len > 0)
662 kfree(pkt->opt_hdr.data);
663 kfree(pkt);
664}
665
666static struct sk_buff_head *msm_ipc_router_buf_to_skb(void *buf,
667 unsigned int buf_len)
668{
669 struct sk_buff_head *skb_head;
670 struct sk_buff *skb;
671 int first = 1, offset = 0;
672 int skb_size, data_size;
673 void *data;
674 int last = 1;
675 int align_size;
676
677 skb_head = kmalloc(sizeof(*skb_head), GFP_KERNEL);
678 if (!skb_head) {
679 IPC_RTR_ERR("%s: Couldnot allocate skb_head\n", __func__);
680 return NULL;
681 }
682 skb_queue_head_init(skb_head);
683
684 data_size = buf_len;
685 align_size = ALIGN_SIZE(data_size);
686 while (offset != buf_len) {
687 skb_size = data_size;
688 if (first)
689 skb_size += IPC_ROUTER_HDR_SIZE;
690 if (last)
691 skb_size += align_size;
692
693 skb = alloc_skb(skb_size, GFP_KERNEL);
694 if (!skb) {
695 if (skb_size <= (PAGE_SIZE / 2)) {
696 IPC_RTR_ERR("%s: cannot allocate skb\n",
697 __func__);
698 goto buf_to_skb_error;
699 }
700 data_size = data_size / 2;
701 last = 0;
702 continue;
703 }
704
705 if (first) {
706 skb_reserve(skb, IPC_ROUTER_HDR_SIZE);
707 first = 0;
708 }
709
710 data = skb_put(skb, data_size);
711 memcpy(skb->data, buf + offset, data_size);
712 skb_queue_tail(skb_head, skb);
713 offset += data_size;
714 data_size = buf_len - offset;
715 last = 1;
716 }
717 return skb_head;
718
719buf_to_skb_error:
720 while (!skb_queue_empty(skb_head)) {
721 skb = skb_dequeue(skb_head);
722 kfree_skb(skb);
723 }
724 kfree(skb_head);
725 return NULL;
726}
727
728static void *msm_ipc_router_skb_to_buf(struct sk_buff_head *skb_head,
729 unsigned int len)
730{
731 struct sk_buff *temp;
732 unsigned int offset = 0, buf_len = 0, copy_len;
733 void *buf;
734
735 if (!skb_head) {
736 IPC_RTR_ERR("%s: NULL skb_head\n", __func__);
737 return NULL;
738 }
739
740 temp = skb_peek(skb_head);
741 buf_len = len;
742 buf = kmalloc(buf_len, GFP_KERNEL);
743 if (!buf) {
744 IPC_RTR_ERR("%s: cannot allocate buf\n", __func__);
745 return NULL;
746 }
747 skb_queue_walk(skb_head, temp) {
748 copy_len = buf_len < temp->len ? buf_len : temp->len;
749 memcpy(buf + offset, temp->data, copy_len);
750 offset += copy_len;
751 buf_len -= copy_len;
752 }
753 return buf;
754}
755
756void msm_ipc_router_free_skb(struct sk_buff_head *skb_head)
757{
758 struct sk_buff *temp_skb;
759
760 if (!skb_head)
761 return;
762
763 while (!skb_queue_empty(skb_head)) {
764 temp_skb = skb_dequeue(skb_head);
765 kfree_skb(temp_skb);
766 }
767 kfree(skb_head);
768}
769
770/**
771 * extract_optional_header() - Extract the optional header from skb
772 * @pkt: Packet structure into which the header has to be extracted.
773 * @opt_len: The optional header length in word size.
774 *
775 * @return: Length of optional header in bytes if success, zero otherwise.
776 */
777static int extract_optional_header(struct rr_packet *pkt, u8 opt_len)
778{
779 size_t offset = 0, buf_len = 0, copy_len, opt_hdr_len;
780 struct sk_buff *temp;
781 struct sk_buff_head *skb_head;
782
783 opt_hdr_len = opt_len * IPCR_WORD_SIZE;
784 pkt->opt_hdr.data = kmalloc(opt_hdr_len, GFP_KERNEL);
785 if (!pkt->opt_hdr.data) {
786 IPC_RTR_ERR("%s: Memory allocation Failed\n", __func__);
787 return 0;
788 }
789 skb_head = pkt->pkt_fragment_q;
790 buf_len = opt_hdr_len;
791 skb_queue_walk(skb_head, temp) {
792 copy_len = buf_len < temp->len ? buf_len : temp->len;
793 memcpy(pkt->opt_hdr.data + offset, temp->data, copy_len);
794 offset += copy_len;
795 buf_len -= copy_len;
796 skb_pull(temp, copy_len);
797 if (temp->len == 0) {
798 skb_dequeue(skb_head);
799 kfree_skb(temp);
800 }
801 }
802 pkt->opt_hdr.len = opt_hdr_len;
803 return opt_hdr_len;
804}
805
806/**
807 * extract_header_v1() - Extract IPC Router header of version 1
808 * @pkt: Packet structure into which the header has to be extraced.
809 * @skb: SKB from which the header has to be extracted.
810 *
811 * @return: 0 on success, standard Linux error codes on failure.
812 */
813static int extract_header_v1(struct rr_packet *pkt, struct sk_buff *skb)
814{
815 if (!pkt || !skb) {
816 IPC_RTR_ERR("%s: Invalid pkt or skb\n", __func__);
817 return -EINVAL;
818 }
819
820 memcpy(&pkt->hdr, skb->data, sizeof(struct rr_header_v1));
821 skb_pull(skb, sizeof(struct rr_header_v1));
822 pkt->length -= sizeof(struct rr_header_v1);
823 return 0;
824}
825
826/**
827 * extract_header_v2() - Extract IPC Router header of version 2
828 * @pkt: Packet structure into which the header has to be extraced.
829 * @skb: SKB from which the header has to be extracted.
830 *
831 * @return: 0 on success, standard Linux error codes on failure.
832 */
833static int extract_header_v2(struct rr_packet *pkt, struct sk_buff *skb)
834{
835 struct rr_header_v2 *hdr;
836 u8 opt_len;
837 size_t opt_hdr_len;
838 size_t total_hdr_size = sizeof(*hdr);
839
840 if (!pkt || !skb) {
841 IPC_RTR_ERR("%s: Invalid pkt or skb\n", __func__);
842 return -EINVAL;
843 }
844
845 hdr = (struct rr_header_v2 *)skb->data;
846 pkt->hdr.version = (u32)hdr->version;
847 pkt->hdr.type = (u32)hdr->type;
848 pkt->hdr.src_node_id = (u32)hdr->src_node_id;
849 pkt->hdr.src_port_id = (u32)hdr->src_port_id;
850 pkt->hdr.size = (u32)hdr->size;
851 pkt->hdr.control_flag = (u32)hdr->control_flag;
852 pkt->hdr.dst_node_id = (u32)hdr->dst_node_id;
853 pkt->hdr.dst_port_id = (u32)hdr->dst_port_id;
854 opt_len = hdr->opt_len;
855 skb_pull(skb, total_hdr_size);
856 if (opt_len > 0) {
857 opt_hdr_len = extract_optional_header(pkt, opt_len);
858 total_hdr_size += opt_hdr_len;
859 }
860 pkt->length -= total_hdr_size;
861 return 0;
862}
863
864/**
865 * extract_header() - Extract IPC Router header
866 * @pkt: Packet from which the header has to be extraced.
867 *
868 * @return: 0 on success, standard Linux error codes on failure.
869 *
870 * This function will check if the header version is v1 or v2 and invoke
871 * the corresponding helper function to extract the IPC Router header.
872 */
873static int extract_header(struct rr_packet *pkt)
874{
875 struct sk_buff *temp_skb;
876 int ret;
877
878 if (!pkt) {
879 IPC_RTR_ERR("%s: NULL PKT\n", __func__);
880 return -EINVAL;
881 }
882
883 temp_skb = skb_peek(pkt->pkt_fragment_q);
884 if (!temp_skb || !temp_skb->data) {
885 IPC_RTR_ERR("%s: No SKBs in skb_queue\n", __func__);
886 return -EINVAL;
887 }
888
889 if (temp_skb->data[0] == IPC_ROUTER_V1) {
890 ret = extract_header_v1(pkt, temp_skb);
891 } else if (temp_skb->data[0] == IPC_ROUTER_V2) {
892 ret = extract_header_v2(pkt, temp_skb);
893 } else {
894 IPC_RTR_ERR("%s: Invalid Header version %02x\n",
895 __func__, temp_skb->data[0]);
896 print_hex_dump(KERN_ERR, "Header: ", DUMP_PREFIX_ADDRESS,
897 16, 1, temp_skb->data, pkt->length, true);
898 return -EINVAL;
899 }
900 return ret;
901}
902
903/**
904 * calc_tx_header_size() - Calculate header size to be reserved in SKB
905 * @pkt: Packet in which the space for header has to be reserved.
906 * @dst_xprt_info: XPRT through which the destination is reachable.
907 *
908 * @return: required header size on success,
909 * starndard Linux error codes on failure.
910 *
911 * This function is used to calculate the header size that has to be reserved
912 * in a transmit SKB. The header size is calculated based on the XPRT through
913 * which the destination node is reachable.
914 */
915static int calc_tx_header_size(struct rr_packet *pkt,
916 struct msm_ipc_router_xprt_info *dst_xprt_info)
917{
918 int hdr_size = 0;
919 int xprt_version = 0;
920 struct msm_ipc_router_xprt_info *xprt_info = dst_xprt_info;
921
922 if (!pkt) {
923 IPC_RTR_ERR("%s: NULL PKT\n", __func__);
924 return -EINVAL;
925 }
926
927 if (xprt_info)
928 xprt_version = xprt_info->xprt->get_version(xprt_info->xprt);
929
930 if (xprt_version == IPC_ROUTER_V1) {
931 pkt->hdr.version = IPC_ROUTER_V1;
932 hdr_size = sizeof(struct rr_header_v1);
933 } else if (xprt_version == IPC_ROUTER_V2) {
934 pkt->hdr.version = IPC_ROUTER_V2;
935 hdr_size = sizeof(struct rr_header_v2) + pkt->opt_hdr.len;
936 } else {
937 IPC_RTR_ERR("%s: Invalid xprt_version %d\n",
938 __func__, xprt_version);
939 hdr_size = -EINVAL;
940 }
941
942 return hdr_size;
943}
944
945/**
946 * calc_rx_header_size() - Calculate the RX header size
947 * @xprt_info: XPRT info of the received message.
948 *
949 * @return: valid header size on success, INT_MAX on failure.
950 */
951static int calc_rx_header_size(struct msm_ipc_router_xprt_info *xprt_info)
952{
953 int xprt_version = 0;
954 int hdr_size = INT_MAX;
955
956 if (xprt_info)
957 xprt_version = xprt_info->xprt->get_version(xprt_info->xprt);
958
959 if (xprt_version == IPC_ROUTER_V1)
960 hdr_size = sizeof(struct rr_header_v1);
961 else if (xprt_version == IPC_ROUTER_V2)
962 hdr_size = sizeof(struct rr_header_v2);
963 return hdr_size;
964}
965
966/**
967 * prepend_header_v1() - Prepend IPC Router header of version 1
968 * @pkt: Packet structure which contains the header info to be prepended.
969 * @hdr_size: Size of the header
970 *
971 * @return: 0 on success, standard Linux error codes on failure.
972 */
973static int prepend_header_v1(struct rr_packet *pkt, int hdr_size)
974{
975 struct sk_buff *temp_skb;
976 struct rr_header_v1 *hdr;
977
978 if (!pkt || hdr_size <= 0) {
979 IPC_RTR_ERR("%s: Invalid input parameters\n", __func__);
980 return -EINVAL;
981 }
982
983 temp_skb = skb_peek(pkt->pkt_fragment_q);
984 if (!temp_skb || !temp_skb->data) {
985 IPC_RTR_ERR("%s: No SKBs in skb_queue\n", __func__);
986 return -EINVAL;
987 }
988
989 if (skb_headroom(temp_skb) < hdr_size) {
990 temp_skb = alloc_skb(hdr_size, GFP_KERNEL);
991 if (!temp_skb) {
992 IPC_RTR_ERR("%s: Could not allocate SKB of size %d\n",
993 __func__, hdr_size);
994 return -ENOMEM;
995 }
996 skb_reserve(temp_skb, hdr_size);
997 }
998
999 hdr = (struct rr_header_v1 *)skb_push(temp_skb, hdr_size);
1000 memcpy(hdr, &pkt->hdr, hdr_size);
1001 if (temp_skb != skb_peek(pkt->pkt_fragment_q))
1002 skb_queue_head(pkt->pkt_fragment_q, temp_skb);
1003 pkt->length += hdr_size;
1004 return 0;
1005}
1006
1007/**
1008 * prepend_header_v2() - Prepend IPC Router header of version 2
1009 * @pkt: Packet structure which contains the header info to be prepended.
1010 * @hdr_size: Size of the header
1011 *
1012 * @return: 0 on success, standard Linux error codes on failure.
1013 */
1014static int prepend_header_v2(struct rr_packet *pkt, int hdr_size)
1015{
1016 struct sk_buff *temp_skb;
1017 struct rr_header_v2 *hdr;
1018
1019 if (!pkt || hdr_size <= 0) {
1020 IPC_RTR_ERR("%s: Invalid input parameters\n", __func__);
1021 return -EINVAL;
1022 }
1023
1024 temp_skb = skb_peek(pkt->pkt_fragment_q);
1025 if (!temp_skb || !temp_skb->data) {
1026 IPC_RTR_ERR("%s: No SKBs in skb_queue\n", __func__);
1027 return -EINVAL;
1028 }
1029
1030 if (skb_headroom(temp_skb) < hdr_size) {
1031 temp_skb = alloc_skb(hdr_size, GFP_KERNEL);
1032 if (!temp_skb) {
1033 IPC_RTR_ERR("%s: Could not allocate SKB of size %d\n",
1034 __func__, hdr_size);
1035 return -ENOMEM;
1036 }
1037 skb_reserve(temp_skb, hdr_size);
1038 }
1039
1040 hdr = (struct rr_header_v2 *)skb_push(temp_skb, hdr_size);
1041 hdr->version = (u8)pkt->hdr.version;
1042 hdr->type = (u8)pkt->hdr.type;
1043 hdr->control_flag = (u8)pkt->hdr.control_flag;
1044 hdr->size = (u32)pkt->hdr.size;
1045 hdr->src_node_id = (u16)pkt->hdr.src_node_id;
1046 hdr->src_port_id = (u16)pkt->hdr.src_port_id;
1047 hdr->dst_node_id = (u16)pkt->hdr.dst_node_id;
1048 hdr->dst_port_id = (u16)pkt->hdr.dst_port_id;
1049 if (pkt->opt_hdr.len > 0) {
1050 hdr->opt_len = pkt->opt_hdr.len / IPCR_WORD_SIZE;
1051 memcpy(hdr + sizeof(*hdr), pkt->opt_hdr.data, pkt->opt_hdr.len);
1052 } else {
1053 hdr->opt_len = 0;
1054 }
1055 if (temp_skb != skb_peek(pkt->pkt_fragment_q))
1056 skb_queue_head(pkt->pkt_fragment_q, temp_skb);
1057 pkt->length += hdr_size;
1058 return 0;
1059}
1060
1061/**
1062 * prepend_header() - Prepend IPC Router header
1063 * @pkt: Packet structure which contains the header info to be prepended.
1064 * @xprt_info: XPRT through which the packet is transmitted.
1065 *
1066 * @return: 0 on success, standard Linux error codes on failure.
1067 *
1068 * This function prepends the header to the packet to be transmitted. The
1069 * IPC Router header version to be prepended depends on the XPRT through
1070 * which the destination is reachable.
1071 */
1072static int prepend_header(struct rr_packet *pkt,
1073 struct msm_ipc_router_xprt_info *xprt_info)
1074{
1075 int hdr_size;
1076 struct sk_buff *temp_skb;
1077
1078 if (!pkt) {
1079 IPC_RTR_ERR("%s: NULL PKT\n", __func__);
1080 return -EINVAL;
1081 }
1082
1083 temp_skb = skb_peek(pkt->pkt_fragment_q);
1084 if (!temp_skb || !temp_skb->data) {
1085 IPC_RTR_ERR("%s: No SKBs in skb_queue\n", __func__);
1086 return -EINVAL;
1087 }
1088
1089 hdr_size = calc_tx_header_size(pkt, xprt_info);
1090 if (hdr_size <= 0)
1091 return hdr_size;
1092
1093 if (pkt->hdr.version == IPC_ROUTER_V1)
1094 return prepend_header_v1(pkt, hdr_size);
1095 else if (pkt->hdr.version == IPC_ROUTER_V2)
1096 return prepend_header_v2(pkt, hdr_size);
1097 else
1098 return -EINVAL;
1099}
1100
1101/**
1102 * defragment_pkt() - Defragment and linearize the packet
1103 * @pkt: Packet to be linearized.
1104 *
1105 * @return: 0 on success, standard Linux error codes on failure.
1106 *
1107 * Some packets contain fragments of data over multiple SKBs. If an XPRT
1108 * does not supported fragmented writes, linearize multiple SKBs into one
1109 * single SKB.
1110 */
1111static int defragment_pkt(struct rr_packet *pkt)
1112{
1113 struct sk_buff *dst_skb, *src_skb, *temp_skb;
1114 int offset = 0, buf_len = 0, copy_len;
1115 void *buf;
1116 int align_size;
1117
1118 if (!pkt || pkt->length <= 0) {
1119 IPC_RTR_ERR("%s: Invalid PKT\n", __func__);
1120 return -EINVAL;
1121 }
1122
1123 if (skb_queue_len(pkt->pkt_fragment_q) == 1)
1124 return 0;
1125
1126 align_size = ALIGN_SIZE(pkt->length);
1127 dst_skb = alloc_skb(pkt->length + align_size, GFP_KERNEL);
1128 if (!dst_skb) {
1129 IPC_RTR_ERR("%s: could not allocate one skb of size %d\n",
1130 __func__, pkt->length);
1131 return -ENOMEM;
1132 }
1133 buf = skb_put(dst_skb, pkt->length);
1134 buf_len = pkt->length;
1135
1136 skb_queue_walk(pkt->pkt_fragment_q, src_skb) {
1137 copy_len = buf_len < src_skb->len ? buf_len : src_skb->len;
1138 memcpy(buf + offset, src_skb->data, copy_len);
1139 offset += copy_len;
1140 buf_len -= copy_len;
1141 }
1142
1143 while (!skb_queue_empty(pkt->pkt_fragment_q)) {
1144 temp_skb = skb_dequeue(pkt->pkt_fragment_q);
1145 kfree_skb(temp_skb);
1146 }
1147 skb_queue_tail(pkt->pkt_fragment_q, dst_skb);
1148 return 0;
1149}
1150
1151static int post_pkt_to_port(struct msm_ipc_port *port_ptr,
1152 struct rr_packet *pkt, int clone)
1153{
1154 struct rr_packet *temp_pkt = pkt;
1155 void (*notify)(unsigned int event, void *oob_data,
1156 size_t oob_data_len, void *priv);
1157 void (*data_ready)(struct sock *sk) = NULL;
1158 struct sock *sk;
1159 u32 pkt_type;
1160
1161 if (unlikely(!port_ptr || !pkt))
1162 return -EINVAL;
1163
1164 if (clone) {
1165 temp_pkt = clone_pkt(pkt);
1166 if (!temp_pkt) {
1167 IPC_RTR_ERR(
1168 "%s: Error cloning packet for port %08x:%08x\n",
1169 __func__, port_ptr->this_port.node_id,
1170 port_ptr->this_port.port_id);
1171 return -ENOMEM;
1172 }
1173 }
1174
1175 mutex_lock(&port_ptr->port_rx_q_lock_lhc3);
Arun Kumar Neelakantam74ff8562017-05-26 17:57:52 +05301176 if (pkt->ws_need)
1177 __pm_stay_awake(port_ptr->port_rx_ws);
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -06001178 list_add_tail(&temp_pkt->list, &port_ptr->port_rx_q);
1179 wake_up(&port_ptr->port_rx_wait_q);
1180 notify = port_ptr->notify;
1181 pkt_type = temp_pkt->hdr.type;
1182 sk = (struct sock *)port_ptr->endpoint;
1183 if (sk) {
1184 read_lock(&sk->sk_callback_lock);
1185 data_ready = sk->sk_data_ready;
1186 read_unlock(&sk->sk_callback_lock);
1187 }
1188 mutex_unlock(&port_ptr->port_rx_q_lock_lhc3);
1189 if (notify)
1190 notify(pkt_type, NULL, 0, port_ptr->priv);
1191 else if (sk && data_ready)
1192 data_ready(sk);
1193
1194 return 0;
1195}
1196
1197/**
1198 * ipc_router_peek_pkt_size() - Peek into the packet header to get potential
1199 * packet size
1200 * @data: Starting address of the packet which points to router header.
1201 *
1202 * @returns: potential packet size on success, < 0 on error.
1203 *
1204 * This function is used by the underlying transport abstraction layer to
1205 * peek into the potential packet size of an incoming packet. This information
1206 * is used to perform link layer fragmentation and re-assembly
1207 */
1208int ipc_router_peek_pkt_size(char *data)
1209{
1210 int size;
1211
1212 if (!data) {
1213 pr_err("%s: NULL PKT\n", __func__);
1214 return -EINVAL;
1215 }
1216
1217 if (data[0] == IPC_ROUTER_V1)
1218 size = ((struct rr_header_v1 *)data)->size +
1219 sizeof(struct rr_header_v1);
1220 else if (data[0] == IPC_ROUTER_V2)
1221 size = ((struct rr_header_v2 *)data)->size +
1222 ((struct rr_header_v2 *)data)->opt_len * IPCR_WORD_SIZE
1223 + sizeof(struct rr_header_v2);
1224 else
1225 return -EINVAL;
1226
1227 size += ALIGN_SIZE(size);
1228 return size;
1229}
1230
1231static int post_control_ports(struct rr_packet *pkt)
1232{
1233 struct msm_ipc_port *port_ptr;
1234
1235 if (!pkt)
1236 return -EINVAL;
1237
1238 down_read(&control_ports_lock_lha5);
1239 list_for_each_entry(port_ptr, &control_ports, list)
1240 post_pkt_to_port(port_ptr, pkt, 1);
1241 up_read(&control_ports_lock_lha5);
1242 return 0;
1243}
1244
1245static u32 allocate_port_id(void)
1246{
1247 u32 port_id = 0, prev_port_id, key;
1248 struct msm_ipc_port *port_ptr;
1249
1250 mutex_lock(&next_port_id_lock_lhc1);
1251 prev_port_id = next_port_id;
1252 down_read(&local_ports_lock_lhc2);
1253 do {
1254 next_port_id++;
1255 if ((next_port_id & IPC_ROUTER_ADDRESS) == IPC_ROUTER_ADDRESS)
1256 next_port_id = 1;
1257
1258 key = (next_port_id & (LP_HASH_SIZE - 1));
1259 if (list_empty(&local_ports[key])) {
1260 port_id = next_port_id;
1261 break;
1262 }
1263 list_for_each_entry(port_ptr, &local_ports[key], list) {
1264 if (port_ptr->this_port.port_id == next_port_id) {
1265 port_id = next_port_id;
1266 break;
1267 }
1268 }
1269 if (!port_id) {
1270 port_id = next_port_id;
1271 break;
1272 }
1273 port_id = 0;
1274 } while (next_port_id != prev_port_id);
1275 up_read(&local_ports_lock_lhc2);
1276 mutex_unlock(&next_port_id_lock_lhc1);
1277
1278 return port_id;
1279}
1280
1281void msm_ipc_router_add_local_port(struct msm_ipc_port *port_ptr)
1282{
1283 u32 key;
1284
1285 if (!port_ptr)
1286 return;
1287
1288 key = (port_ptr->this_port.port_id & (LP_HASH_SIZE - 1));
1289 down_write(&local_ports_lock_lhc2);
1290 list_add_tail(&port_ptr->list, &local_ports[key]);
1291 up_write(&local_ports_lock_lhc2);
1292}
1293
1294/**
1295 * msm_ipc_router_create_raw_port() - Create an IPC Router port
1296 * @endpoint: User-space space socket information to be cached.
1297 * @notify: Function to notify incoming events on the port.
1298 * @event: Event ID to be handled.
1299 * @oob_data: Any out-of-band data associated with the event.
1300 * @oob_data_len: Size of the out-of-band data, if valid.
1301 * @priv: Private data registered during the port creation.
1302 * @priv: Private Data to be passed during the event notification.
1303 *
1304 * @return: Valid pointer to port on success, NULL on failure.
1305 *
1306 * This function is used to create an IPC Router port. The port is used for
1307 * communication locally or outside the subsystem.
1308 */
1309struct msm_ipc_port *
1310msm_ipc_router_create_raw_port(void *endpoint,
1311 void (*notify)(unsigned int event,
1312 void *oob_data,
1313 size_t oob_data_len, void *priv),
1314 void *priv)
1315{
1316 struct msm_ipc_port *port_ptr;
1317
1318 port_ptr = kzalloc(sizeof(*port_ptr), GFP_KERNEL);
1319 if (!port_ptr)
1320 return NULL;
1321
1322 port_ptr->this_port.node_id = IPC_ROUTER_NID_LOCAL;
1323 port_ptr->this_port.port_id = allocate_port_id();
1324 if (!port_ptr->this_port.port_id) {
1325 IPC_RTR_ERR("%s: All port ids are in use\n", __func__);
1326 kfree(port_ptr);
1327 return NULL;
1328 }
1329
1330 mutex_init(&port_ptr->port_lock_lhc3);
1331 INIT_LIST_HEAD(&port_ptr->port_rx_q);
1332 mutex_init(&port_ptr->port_rx_q_lock_lhc3);
1333 init_waitqueue_head(&port_ptr->port_rx_wait_q);
1334 snprintf(port_ptr->rx_ws_name, MAX_WS_NAME_SZ,
Dhoat Harpal3ad5fe32017-06-19 21:26:13 +05301335 "ipc%08x_%d_%s",
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -06001336 port_ptr->this_port.port_id,
Dhoat Harpal3ad5fe32017-06-19 21:26:13 +05301337 task_pid_nr(current),
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -06001338 current->comm);
1339 port_ptr->port_rx_ws = wakeup_source_register(port_ptr->rx_ws_name);
1340 if (!port_ptr->port_rx_ws) {
1341 kfree(port_ptr);
1342 return NULL;
1343 }
1344 init_waitqueue_head(&port_ptr->port_tx_wait_q);
1345 kref_init(&port_ptr->ref);
1346
1347 port_ptr->endpoint = endpoint;
1348 port_ptr->notify = notify;
1349 port_ptr->priv = priv;
1350
1351 msm_ipc_router_add_local_port(port_ptr);
1352 if (endpoint)
1353 sock_hold(ipc_port_sk(endpoint));
1354 return port_ptr;
1355}
1356
1357/**
1358 * ipc_router_get_port_ref() - Get a reference to the local port
1359 * @port_id: Port ID of the local port for which reference is get.
1360 *
1361 * @return: If port is found, a reference to the port is returned.
1362 * Else NULL is returned.
1363 */
1364static struct msm_ipc_port *ipc_router_get_port_ref(u32 port_id)
1365{
1366 int key = (port_id & (LP_HASH_SIZE - 1));
1367 struct msm_ipc_port *port_ptr;
1368
1369 down_read(&local_ports_lock_lhc2);
1370 list_for_each_entry(port_ptr, &local_ports[key], list) {
1371 if (port_ptr->this_port.port_id == port_id) {
1372 kref_get(&port_ptr->ref);
1373 up_read(&local_ports_lock_lhc2);
1374 return port_ptr;
1375 }
1376 }
1377 up_read(&local_ports_lock_lhc2);
1378 return NULL;
1379}
1380
1381/**
1382 * ipc_router_release_port() - Cleanup and release the port
1383 * @ref: Reference to the port.
1384 *
1385 * This function is called when all references to the port are released.
1386 */
1387void ipc_router_release_port(struct kref *ref)
1388{
1389 struct rr_packet *pkt, *temp_pkt;
1390 struct msm_ipc_port *port_ptr =
1391 container_of(ref, struct msm_ipc_port, ref);
1392
1393 mutex_lock(&port_ptr->port_rx_q_lock_lhc3);
1394 list_for_each_entry_safe(pkt, temp_pkt, &port_ptr->port_rx_q, list) {
1395 list_del(&pkt->list);
1396 release_pkt(pkt);
1397 }
1398 mutex_unlock(&port_ptr->port_rx_q_lock_lhc3);
1399 wakeup_source_unregister(port_ptr->port_rx_ws);
1400 if (port_ptr->endpoint)
1401 sock_put(ipc_port_sk(port_ptr->endpoint));
1402 kfree(port_ptr);
1403}
1404
1405/**
1406 * ipc_router_get_rport_ref()- Get reference to the remote port
1407 * @node_id: Node ID corresponding to the remote port.
1408 * @port_id: Port ID corresponding to the remote port.
1409 *
1410 * @return: a reference to the remote port on success, NULL on failure.
1411 */
1412static struct msm_ipc_router_remote_port *ipc_router_get_rport_ref(
1413 u32 node_id, u32 port_id)
1414{
1415 struct msm_ipc_router_remote_port *rport_ptr;
1416 struct msm_ipc_routing_table_entry *rt_entry;
1417 int key = (port_id & (RP_HASH_SIZE - 1));
1418
1419 rt_entry = ipc_router_get_rtentry_ref(node_id);
1420 if (!rt_entry) {
1421 IPC_RTR_ERR("%s: Node is not up\n", __func__);
1422 return NULL;
1423 }
1424
1425 down_read(&rt_entry->lock_lha4);
1426 list_for_each_entry(rport_ptr,
1427 &rt_entry->remote_port_list[key], list) {
1428 if (rport_ptr->port_id == port_id) {
1429 kref_get(&rport_ptr->ref);
1430 goto out_lookup_rmt_port1;
1431 }
1432 }
1433 rport_ptr = NULL;
1434out_lookup_rmt_port1:
1435 up_read(&rt_entry->lock_lha4);
1436 kref_put(&rt_entry->ref, ipc_router_release_rtentry);
1437 return rport_ptr;
1438}
1439
1440/**
1441 * ipc_router_create_rport() - Create a remote port
1442 * @node_id: Node ID corresponding to the remote port.
1443 * @port_id: Port ID corresponding to the remote port.
1444 * @xprt_info: XPRT through which the concerned node is reachable.
1445 *
1446 * @return: a reference to the remote port on success, NULL on failure.
1447 */
1448static struct msm_ipc_router_remote_port *ipc_router_create_rport(
1449 u32 node_id, u32 port_id,
1450 struct msm_ipc_router_xprt_info *xprt_info)
1451{
1452 struct msm_ipc_router_remote_port *rport_ptr;
1453 struct msm_ipc_routing_table_entry *rt_entry;
1454 int key = (port_id & (RP_HASH_SIZE - 1));
1455
1456 rt_entry = create_routing_table_entry(node_id, xprt_info);
1457 if (!rt_entry) {
1458 IPC_RTR_ERR("%s: Node cannot be created\n", __func__);
1459 return NULL;
1460 }
1461
1462 down_write(&rt_entry->lock_lha4);
1463 list_for_each_entry(rport_ptr,
1464 &rt_entry->remote_port_list[key], list) {
1465 if (rport_ptr->port_id == port_id)
1466 goto out_create_rmt_port1;
1467 }
1468
1469 rport_ptr = kmalloc(sizeof(*rport_ptr), GFP_KERNEL);
1470 if (!rport_ptr) {
1471 IPC_RTR_ERR("%s: Remote port alloc failed\n", __func__);
1472 goto out_create_rmt_port2;
1473 }
1474 rport_ptr->port_id = port_id;
1475 rport_ptr->node_id = node_id;
1476 rport_ptr->status = VALID;
1477 rport_ptr->sec_rule = NULL;
1478 rport_ptr->server = NULL;
1479 rport_ptr->tx_quota_cnt = 0;
1480 kref_init(&rport_ptr->ref);
1481 mutex_init(&rport_ptr->rport_lock_lhb2);
1482 INIT_LIST_HEAD(&rport_ptr->resume_tx_port_list);
1483 INIT_LIST_HEAD(&rport_ptr->conn_info_list);
1484 list_add_tail(&rport_ptr->list,
1485 &rt_entry->remote_port_list[key]);
1486out_create_rmt_port1:
1487 kref_get(&rport_ptr->ref);
1488out_create_rmt_port2:
1489 up_write(&rt_entry->lock_lha4);
1490 kref_put(&rt_entry->ref, ipc_router_release_rtentry);
1491 return rport_ptr;
1492}
1493
1494/**
1495 * msm_ipc_router_free_resume_tx_port() - Free the resume_tx ports
1496 * @rport_ptr: Pointer to the remote port.
1497 *
1498 * This function deletes all the resume_tx ports associated with a remote port
1499 * and frees the memory allocated to each resume_tx port.
1500 *
1501 * Must be called with rport_ptr->rport_lock_lhb2 locked.
1502 */
1503static void msm_ipc_router_free_resume_tx_port(
1504 struct msm_ipc_router_remote_port *rport_ptr)
1505{
1506 struct msm_ipc_resume_tx_port *rtx_port, *tmp_rtx_port;
1507
1508 list_for_each_entry_safe(rtx_port, tmp_rtx_port,
1509 &rport_ptr->resume_tx_port_list, list) {
1510 list_del(&rtx_port->list);
1511 kfree(rtx_port);
1512 }
1513}
1514
1515/**
1516 * msm_ipc_router_lookup_resume_tx_port() - Lookup resume_tx port list
1517 * @rport_ptr: Remote port whose resume_tx port list needs to be looked.
1518 * @port_id: Port ID which needs to be looked from the list.
1519 *
1520 * return 1 if the port_id is found in the list, else 0.
1521 *
1522 * This function is used to lookup the existence of a local port in
1523 * remote port's resume_tx list. This function is used to ensure that
1524 * the same port is not added to the remote_port's resume_tx list repeatedly.
1525 *
1526 * Must be called with rport_ptr->rport_lock_lhb2 locked.
1527 */
1528static int msm_ipc_router_lookup_resume_tx_port(
1529 struct msm_ipc_router_remote_port *rport_ptr, u32 port_id)
1530{
1531 struct msm_ipc_resume_tx_port *rtx_port;
1532
1533 list_for_each_entry(rtx_port, &rport_ptr->resume_tx_port_list, list) {
1534 if (port_id == rtx_port->port_id)
1535 return 1;
1536 }
1537 return 0;
1538}
1539
1540/**
1541 * ipc_router_dummy_write_space() - Dummy write space available callback
1542 * @sk: Socket pointer for which the callback is called.
1543 */
1544void ipc_router_dummy_write_space(struct sock *sk)
1545{
1546}
1547
1548/**
1549 * post_resume_tx() - Post the resume_tx event
1550 * @rport_ptr: Pointer to the remote port
1551 * @pkt : The data packet that is received on a resume_tx event
1552 * @msg: Out of band data to be passed to kernel drivers
1553 *
1554 * This function informs about the reception of the resume_tx message from a
1555 * remote port pointed by rport_ptr to all the local ports that are in the
1556 * resume_tx_ports_list of this remote port. On posting the information, this
1557 * function sequentially deletes each entry in the resume_tx_port_list of the
1558 * remote port.
1559 *
1560 * Must be called with rport_ptr->rport_lock_lhb2 locked.
1561 */
1562static void post_resume_tx(struct msm_ipc_router_remote_port *rport_ptr,
1563 struct rr_packet *pkt, union rr_control_msg *msg)
1564{
1565 struct msm_ipc_resume_tx_port *rtx_port, *tmp_rtx_port;
1566 struct msm_ipc_port *local_port;
1567 struct sock *sk;
1568 void (*write_space)(struct sock *sk) = NULL;
1569
1570 list_for_each_entry_safe(rtx_port, tmp_rtx_port,
1571 &rport_ptr->resume_tx_port_list, list) {
1572 local_port = ipc_router_get_port_ref(rtx_port->port_id);
1573 if (local_port && local_port->notify) {
1574 wake_up(&local_port->port_tx_wait_q);
1575 local_port->notify(IPC_ROUTER_CTRL_CMD_RESUME_TX, msg,
1576 sizeof(*msg), local_port->priv);
1577 } else if (local_port) {
1578 wake_up(&local_port->port_tx_wait_q);
1579 sk = ipc_port_sk(local_port->endpoint);
1580 if (sk) {
1581 read_lock(&sk->sk_callback_lock);
1582 write_space = sk->sk_write_space;
1583 read_unlock(&sk->sk_callback_lock);
1584 }
1585 if (write_space &&
1586 write_space != ipc_router_dummy_write_space)
1587 write_space(sk);
1588 else
1589 post_pkt_to_port(local_port, pkt, 1);
1590 } else {
1591 IPC_RTR_ERR("%s: Local Port %d not Found",
1592 __func__, rtx_port->port_id);
1593 }
1594 if (local_port)
1595 kref_put(&local_port->ref, ipc_router_release_port);
1596 list_del(&rtx_port->list);
1597 kfree(rtx_port);
1598 }
1599}
1600
1601/**
1602 * signal_rport_exit() - Signal the local ports of remote port exit
1603 * @rport_ptr: Remote port that is exiting.
1604 *
1605 * This function is used to signal the local ports that are waiting
1606 * to resume transmission to a remote port that is exiting.
1607 */
1608static void signal_rport_exit(struct msm_ipc_router_remote_port *rport_ptr)
1609{
1610 struct msm_ipc_resume_tx_port *rtx_port, *tmp_rtx_port;
1611 struct msm_ipc_port *local_port;
1612
1613 mutex_lock(&rport_ptr->rport_lock_lhb2);
1614 rport_ptr->status = RESET;
1615 list_for_each_entry_safe(rtx_port, tmp_rtx_port,
1616 &rport_ptr->resume_tx_port_list, list) {
1617 local_port = ipc_router_get_port_ref(rtx_port->port_id);
1618 if (local_port) {
1619 wake_up(&local_port->port_tx_wait_q);
1620 kref_put(&local_port->ref, ipc_router_release_port);
1621 }
1622 list_del(&rtx_port->list);
1623 kfree(rtx_port);
1624 }
1625 mutex_unlock(&rport_ptr->rport_lock_lhb2);
1626}
1627
1628/**
1629 * ipc_router_release_rport() - Cleanup and release the remote port
1630 * @ref: Reference to the remote port.
1631 *
1632 * This function is called when all references to the remote port are released.
1633 */
1634static void ipc_router_release_rport(struct kref *ref)
1635{
1636 struct msm_ipc_router_remote_port *rport_ptr =
1637 container_of(ref, struct msm_ipc_router_remote_port, ref);
1638
1639 mutex_lock(&rport_ptr->rport_lock_lhb2);
1640 msm_ipc_router_free_resume_tx_port(rport_ptr);
1641 mutex_unlock(&rport_ptr->rport_lock_lhb2);
1642 kfree(rport_ptr);
1643}
1644
1645/**
1646 * ipc_router_destroy_rport() - Destroy the remote port
1647 * @rport_ptr: Pointer to the remote port to be destroyed.
1648 */
1649static void ipc_router_destroy_rport(
1650 struct msm_ipc_router_remote_port *rport_ptr)
1651{
1652 u32 node_id;
1653 struct msm_ipc_routing_table_entry *rt_entry;
1654
1655 if (!rport_ptr)
1656 return;
1657
1658 node_id = rport_ptr->node_id;
1659 rt_entry = ipc_router_get_rtentry_ref(node_id);
1660 if (!rt_entry) {
1661 IPC_RTR_ERR("%s: Node %d is not up\n", __func__, node_id);
1662 return;
1663 }
1664 down_write(&rt_entry->lock_lha4);
1665 list_del(&rport_ptr->list);
1666 up_write(&rt_entry->lock_lha4);
1667 signal_rport_exit(rport_ptr);
1668 kref_put(&rport_ptr->ref, ipc_router_release_rport);
1669 kref_put(&rt_entry->ref, ipc_router_release_rtentry);
1670}
1671
1672/**
1673 * msm_ipc_router_lookup_server() - Lookup server information
1674 * @service: Service ID of the server info to be looked up.
1675 * @instance: Instance ID of the server info to be looked up.
1676 * @node_id: Node/Processor ID in which the server is hosted.
1677 * @port_id: Port ID within the node in which the server is hosted.
1678 *
1679 * @return: If found Pointer to server structure, else NULL.
1680 *
1681 * Note1: Lock the server_list_lock_lha2 before accessing this function.
1682 * Note2: If the <node_id:port_id> are <0:0>, then the lookup is restricted
1683 * to <service:instance>. Used only when a client wants to send a
1684 * message to any QMI server.
1685 */
1686static struct msm_ipc_server *msm_ipc_router_lookup_server(
1687 u32 service,
1688 u32 instance,
1689 u32 node_id,
1690 u32 port_id)
1691{
1692 struct msm_ipc_server *server;
1693 struct msm_ipc_server_port *server_port;
1694 int key = (service & (SRV_HASH_SIZE - 1));
1695
1696 list_for_each_entry(server, &server_list[key], list) {
1697 if ((server->name.service != service) ||
1698 (server->name.instance != instance))
1699 continue;
1700 if ((node_id == 0) && (port_id == 0))
1701 return server;
1702 list_for_each_entry(server_port, &server->server_port_list,
1703 list) {
1704 if ((server_port->server_addr.node_id == node_id) &&
1705 (server_port->server_addr.port_id == port_id))
1706 return server;
1707 }
1708 }
1709 return NULL;
1710}
1711
1712/**
1713 * ipc_router_get_server_ref() - Get reference to the server
1714 * @svc: Service ID for which the reference is required.
1715 * @ins: Instance ID for which the reference is required.
1716 * @node_id: Node/Processor ID in which the server is hosted.
1717 * @port_id: Port ID within the node in which the server is hosted.
1718 *
1719 * @return: If found return reference to server, else NULL.
1720 */
1721static struct msm_ipc_server *ipc_router_get_server_ref(
1722 u32 svc, u32 ins, u32 node_id, u32 port_id)
1723{
1724 struct msm_ipc_server *server;
1725
1726 down_read(&server_list_lock_lha2);
1727 server = msm_ipc_router_lookup_server(svc, ins, node_id, port_id);
1728 if (server)
1729 kref_get(&server->ref);
1730 up_read(&server_list_lock_lha2);
1731 return server;
1732}
1733
1734/**
1735 * ipc_router_release_server() - Cleanup and release the server
1736 * @ref: Reference to the server.
1737 *
1738 * This function is called when all references to the server are released.
1739 */
1740static void ipc_router_release_server(struct kref *ref)
1741{
1742 struct msm_ipc_server *server =
1743 container_of(ref, struct msm_ipc_server, ref);
1744
1745 kfree(server);
1746}
1747
1748/**
1749 * msm_ipc_router_create_server() - Add server info to hash table
1750 * @service: Service ID of the server info to be created.
1751 * @instance: Instance ID of the server info to be created.
1752 * @node_id: Node/Processor ID in which the server is hosted.
1753 * @port_id: Port ID within the node in which the server is hosted.
1754 * @xprt_info: XPRT through which the node hosting the server is reached.
1755 *
1756 * @return: Pointer to server structure on success, else NULL.
1757 *
1758 * This function adds the server info to the hash table. If the same
1759 * server(i.e. <service_id:instance_id>) is hosted in different nodes,
1760 * they are maintained as list of "server_port" under "server" structure.
1761 */
1762static struct msm_ipc_server *msm_ipc_router_create_server(
1763 u32 service,
1764 u32 instance,
1765 u32 node_id,
1766 u32 port_id,
1767 struct msm_ipc_router_xprt_info *xprt_info)
1768{
1769 struct msm_ipc_server *server = NULL;
1770 struct msm_ipc_server_port *server_port;
1771 struct platform_device *pdev;
1772 int key = (service & (SRV_HASH_SIZE - 1));
1773
1774 down_write(&server_list_lock_lha2);
1775 server = msm_ipc_router_lookup_server(service, instance, 0, 0);
1776 if (server) {
1777 list_for_each_entry(server_port, &server->server_port_list,
1778 list) {
1779 if ((server_port->server_addr.node_id == node_id) &&
1780 (server_port->server_addr.port_id == port_id))
1781 goto return_server;
1782 }
1783 goto create_srv_port;
1784 }
1785
1786 server = kzalloc(sizeof(*server), GFP_KERNEL);
1787 if (!server) {
1788 up_write(&server_list_lock_lha2);
1789 IPC_RTR_ERR("%s: Server allocation failed\n", __func__);
1790 return NULL;
1791 }
1792 server->name.service = service;
1793 server->name.instance = instance;
1794 server->synced_sec_rule = 0;
1795 INIT_LIST_HEAD(&server->server_port_list);
1796 kref_init(&server->ref);
1797 list_add_tail(&server->list, &server_list[key]);
1798 scnprintf(server->pdev_name, sizeof(server->pdev_name),
1799 "SVC%08x:%08x", service, instance);
1800 server->next_pdev_id = 1;
1801
1802create_srv_port:
1803 server_port = kzalloc(sizeof(*server_port), GFP_KERNEL);
1804 pdev = platform_device_alloc(server->pdev_name, server->next_pdev_id);
1805 if (!server_port || !pdev) {
1806 kfree(server_port);
1807 if (pdev)
1808 platform_device_put(pdev);
1809 if (list_empty(&server->server_port_list)) {
1810 list_del(&server->list);
1811 kfree(server);
1812 }
1813 up_write(&server_list_lock_lha2);
1814 IPC_RTR_ERR("%s: Server Port allocation failed\n", __func__);
1815 return NULL;
1816 }
1817 server_port->pdev = pdev;
1818 server_port->server_addr.node_id = node_id;
1819 server_port->server_addr.port_id = port_id;
1820 server_port->xprt_info = xprt_info;
1821 list_add_tail(&server_port->list, &server->server_port_list);
1822 server->next_pdev_id++;
1823 platform_device_add(server_port->pdev);
1824
1825return_server:
1826 /* Add a reference so that the caller can put it back */
1827 kref_get(&server->ref);
1828 up_write(&server_list_lock_lha2);
1829 return server;
1830}
1831
1832/**
1833 * ipc_router_destroy_server_nolock() - Remove server info from hash table
1834 * @server: Server info to be removed.
1835 * @node_id: Node/Processor ID in which the server is hosted.
1836 * @port_id: Port ID within the node in which the server is hosted.
1837 *
1838 * This function removes the server_port identified using <node_id:port_id>
1839 * from the server structure. If the server_port list under server structure
1840 * is empty after removal, then remove the server structure from the server
1841 * hash table. This function must be called with server_list_lock_lha2 locked.
1842 */
1843static void ipc_router_destroy_server_nolock(struct msm_ipc_server *server,
1844 u32 node_id, u32 port_id)
1845{
1846 struct msm_ipc_server_port *server_port;
1847 bool server_port_found = false;
1848
1849 if (!server)
1850 return;
1851
1852 list_for_each_entry(server_port, &server->server_port_list, list) {
1853 if ((server_port->server_addr.node_id == node_id) &&
1854 (server_port->server_addr.port_id == port_id)) {
1855 server_port_found = true;
1856 break;
1857 }
1858 }
1859 if (server_port_found && server_port) {
1860 platform_device_unregister(server_port->pdev);
1861 list_del(&server_port->list);
1862 kfree(server_port);
1863 }
1864 if (list_empty(&server->server_port_list)) {
1865 list_del(&server->list);
1866 kref_put(&server->ref, ipc_router_release_server);
1867 }
1868}
1869
1870/**
1871 * ipc_router_destroy_server() - Remove server info from hash table
1872 * @server: Server info to be removed.
1873 * @node_id: Node/Processor ID in which the server is hosted.
1874 * @port_id: Port ID within the node in which the server is hosted.
1875 *
1876 * This function removes the server_port identified using <node_id:port_id>
1877 * from the server structure. If the server_port list under server structure
1878 * is empty after removal, then remove the server structure from the server
1879 * hash table.
1880 */
1881static void ipc_router_destroy_server(struct msm_ipc_server *server,
1882 u32 node_id, u32 port_id)
1883{
1884 down_write(&server_list_lock_lha2);
1885 ipc_router_destroy_server_nolock(server, node_id, port_id);
1886 up_write(&server_list_lock_lha2);
1887}
1888
1889static int ipc_router_send_ctl_msg(
1890 struct msm_ipc_router_xprt_info *xprt_info,
1891 union rr_control_msg *msg,
1892 u32 dst_node_id)
1893{
1894 struct rr_packet *pkt;
1895 struct sk_buff *ipc_rtr_pkt;
1896 struct rr_header_v1 *hdr;
1897 int pkt_size;
1898 void *data;
1899 int ret = -EINVAL;
1900
1901 pkt = create_pkt(NULL);
1902 if (!pkt) {
1903 IPC_RTR_ERR("%s: pkt alloc failed\n", __func__);
1904 return -ENOMEM;
1905 }
1906
1907 pkt_size = IPC_ROUTER_HDR_SIZE + sizeof(*msg);
1908 ipc_rtr_pkt = alloc_skb(pkt_size, GFP_KERNEL);
1909 if (!ipc_rtr_pkt) {
1910 IPC_RTR_ERR("%s: ipc_rtr_pkt alloc failed\n", __func__);
1911 release_pkt(pkt);
1912 return -ENOMEM;
1913 }
1914
1915 skb_reserve(ipc_rtr_pkt, IPC_ROUTER_HDR_SIZE);
1916 data = skb_put(ipc_rtr_pkt, sizeof(*msg));
1917 memcpy(data, msg, sizeof(*msg));
1918 skb_queue_tail(pkt->pkt_fragment_q, ipc_rtr_pkt);
1919 pkt->length = sizeof(*msg);
1920
1921 hdr = &pkt->hdr;
1922 hdr->version = IPC_ROUTER_V1;
1923 hdr->type = msg->cmd;
1924 hdr->src_node_id = IPC_ROUTER_NID_LOCAL;
1925 hdr->src_port_id = IPC_ROUTER_ADDRESS;
1926 hdr->control_flag = 0;
1927 hdr->size = sizeof(*msg);
1928 if (hdr->type == IPC_ROUTER_CTRL_CMD_RESUME_TX ||
1929 (!xprt_info && dst_node_id == IPC_ROUTER_NID_LOCAL))
1930 hdr->dst_node_id = dst_node_id;
1931 else if (xprt_info)
1932 hdr->dst_node_id = xprt_info->remote_node_id;
1933 hdr->dst_port_id = IPC_ROUTER_ADDRESS;
1934
1935 if (dst_node_id == IPC_ROUTER_NID_LOCAL &&
1936 msg->cmd != IPC_ROUTER_CTRL_CMD_RESUME_TX) {
1937 ipc_router_log_msg(local_log_ctx, IPC_ROUTER_LOG_EVENT_TX, msg,
1938 hdr, NULL, NULL);
1939 ret = post_control_ports(pkt);
1940 } else if (dst_node_id == IPC_ROUTER_NID_LOCAL &&
1941 msg->cmd == IPC_ROUTER_CTRL_CMD_RESUME_TX) {
1942 ipc_router_log_msg(local_log_ctx, IPC_ROUTER_LOG_EVENT_TX, msg,
1943 hdr, NULL, NULL);
1944 ret = process_resume_tx_msg(msg, pkt);
1945 } else if (xprt_info && (msg->cmd == IPC_ROUTER_CTRL_CMD_HELLO ||
1946 xprt_info->initialized)) {
1947 mutex_lock(&xprt_info->tx_lock_lhb2);
1948 ipc_router_log_msg(xprt_info->log_ctx, IPC_ROUTER_LOG_EVENT_TX,
1949 msg, hdr, NULL, NULL);
1950 ret = prepend_header(pkt, xprt_info);
1951 if (ret < 0) {
1952 mutex_unlock(&xprt_info->tx_lock_lhb2);
1953 IPC_RTR_ERR("%s: Prepend Header failed\n", __func__);
1954 release_pkt(pkt);
1955 return ret;
1956 }
1957
1958 ret = xprt_info->xprt->write(pkt, pkt->length, xprt_info->xprt);
1959 mutex_unlock(&xprt_info->tx_lock_lhb2);
1960 }
1961
1962 release_pkt(pkt);
1963 return ret;
1964}
1965
1966static int
1967msm_ipc_router_send_server_list(u32 node_id,
1968 struct msm_ipc_router_xprt_info *xprt_info)
1969{
1970 union rr_control_msg ctl;
1971 struct msm_ipc_server *server;
1972 struct msm_ipc_server_port *server_port;
1973 int i;
1974
1975 if (!xprt_info || !xprt_info->initialized) {
1976 IPC_RTR_ERR("%s: Xprt info not initialized\n", __func__);
1977 return -EINVAL;
1978 }
1979
1980 memset(&ctl, 0, sizeof(ctl));
1981 ctl.cmd = IPC_ROUTER_CTRL_CMD_NEW_SERVER;
1982
1983 for (i = 0; i < SRV_HASH_SIZE; i++) {
1984 list_for_each_entry(server, &server_list[i], list) {
1985 ctl.srv.service = server->name.service;
1986 ctl.srv.instance = server->name.instance;
1987 list_for_each_entry(server_port,
1988 &server->server_port_list, list) {
1989 if (server_port->server_addr.node_id !=
1990 node_id)
1991 continue;
1992
1993 ctl.srv.node_id =
1994 server_port->server_addr.node_id;
1995 ctl.srv.port_id =
1996 server_port->server_addr.port_id;
1997 ipc_router_send_ctl_msg
1998 (xprt_info, &ctl,
1999 IPC_ROUTER_DUMMY_DEST_NODE);
2000 }
2001 }
2002 }
2003
2004 return 0;
2005}
2006
2007static int broadcast_ctl_msg_locally(union rr_control_msg *msg)
2008{
2009 return ipc_router_send_ctl_msg(NULL, msg, IPC_ROUTER_NID_LOCAL);
2010}
2011
2012static int broadcast_ctl_msg(union rr_control_msg *ctl)
2013{
2014 struct msm_ipc_router_xprt_info *xprt_info;
2015
2016 down_read(&xprt_info_list_lock_lha5);
2017 list_for_each_entry(xprt_info, &xprt_info_list, list) {
2018 ipc_router_send_ctl_msg(xprt_info, ctl,
2019 IPC_ROUTER_DUMMY_DEST_NODE);
2020 }
2021 up_read(&xprt_info_list_lock_lha5);
2022 broadcast_ctl_msg_locally(ctl);
2023
2024 return 0;
2025}
2026
2027static int relay_ctl_msg(struct msm_ipc_router_xprt_info *xprt_info,
2028 union rr_control_msg *ctl)
2029{
2030 struct msm_ipc_router_xprt_info *fwd_xprt_info;
2031
2032 if (!xprt_info || !ctl)
2033 return -EINVAL;
2034
2035 down_read(&xprt_info_list_lock_lha5);
2036 list_for_each_entry(fwd_xprt_info, &xprt_info_list, list) {
2037 if (xprt_info->xprt->link_id != fwd_xprt_info->xprt->link_id)
2038 ipc_router_send_ctl_msg(fwd_xprt_info, ctl,
2039 IPC_ROUTER_DUMMY_DEST_NODE);
2040 }
2041 up_read(&xprt_info_list_lock_lha5);
2042
2043 return 0;
2044}
2045
2046static int forward_msg(struct msm_ipc_router_xprt_info *xprt_info,
2047 struct rr_packet *pkt)
2048{
2049 struct rr_header_v1 *hdr;
2050 struct msm_ipc_router_xprt_info *fwd_xprt_info;
2051 struct msm_ipc_routing_table_entry *rt_entry;
2052 int ret = 0;
2053 int fwd_xprt_option;
2054
2055 if (!xprt_info || !pkt)
2056 return -EINVAL;
2057
2058 hdr = &pkt->hdr;
2059 rt_entry = ipc_router_get_rtentry_ref(hdr->dst_node_id);
2060 if (!(rt_entry) || !(rt_entry->xprt_info)) {
2061 IPC_RTR_ERR("%s: Routing table not initialized\n", __func__);
2062 ret = -ENODEV;
2063 goto fm_error1;
2064 }
2065
2066 down_read(&rt_entry->lock_lha4);
2067 fwd_xprt_info = rt_entry->xprt_info;
2068 ret = ipc_router_get_xprt_info_ref(fwd_xprt_info);
2069 if (ret < 0) {
2070 IPC_RTR_ERR("%s: Abort invalid xprt\n", __func__);
2071 goto fm_error_xprt;
2072 }
2073 ret = prepend_header(pkt, fwd_xprt_info);
2074 if (ret < 0) {
2075 IPC_RTR_ERR("%s: Prepend Header failed\n", __func__);
2076 goto fm_error2;
2077 }
2078 fwd_xprt_option = fwd_xprt_info->xprt->get_option(fwd_xprt_info->xprt);
2079 if (!(fwd_xprt_option & FRAG_PKT_WRITE_ENABLE)) {
2080 ret = defragment_pkt(pkt);
2081 if (ret < 0)
2082 goto fm_error2;
2083 }
2084
2085 mutex_lock(&fwd_xprt_info->tx_lock_lhb2);
2086 if (xprt_info->remote_node_id == fwd_xprt_info->remote_node_id) {
2087 IPC_RTR_ERR("%s: Discarding Command to route back\n", __func__);
2088 ret = -EINVAL;
2089 goto fm_error3;
2090 }
2091
2092 if (xprt_info->xprt->link_id == fwd_xprt_info->xprt->link_id) {
2093 IPC_RTR_ERR("%s: DST in the same cluster\n", __func__);
2094 ret = 0;
2095 goto fm_error3;
2096 }
2097 fwd_xprt_info->xprt->write(pkt, pkt->length, fwd_xprt_info->xprt);
2098 IPC_RTR_INFO(fwd_xprt_info->log_ctx,
2099 "%s %s Len:0x%x T:0x%x CF:0x%x SRC:<0x%x:0x%x> DST:<0x%x:0x%x>\n",
2100 "FWD", "TX", hdr->size, hdr->type, hdr->control_flag,
2101 hdr->src_node_id, hdr->src_port_id,
2102 hdr->dst_node_id, hdr->dst_port_id);
2103
2104fm_error3:
2105 mutex_unlock(&fwd_xprt_info->tx_lock_lhb2);
2106fm_error2:
2107 ipc_router_put_xprt_info_ref(fwd_xprt_info);
2108fm_error_xprt:
2109 up_read(&rt_entry->lock_lha4);
2110fm_error1:
2111 if (rt_entry)
2112 kref_put(&rt_entry->ref, ipc_router_release_rtentry);
2113 return ret;
2114}
2115
2116static int msm_ipc_router_send_remove_client(struct comm_mode_info *mode_info,
2117 u32 node_id, u32 port_id)
2118{
2119 union rr_control_msg msg;
2120 struct msm_ipc_router_xprt_info *tmp_xprt_info;
2121 int mode;
2122 void *xprt_info;
2123 int rc = 0;
2124
2125 if (!mode_info) {
2126 IPC_RTR_ERR("%s: NULL mode_info\n", __func__);
2127 return -EINVAL;
2128 }
2129 mode = mode_info->mode;
2130 xprt_info = mode_info->xprt_info;
2131
2132 memset(&msg, 0, sizeof(msg));
2133 msg.cmd = IPC_ROUTER_CTRL_CMD_REMOVE_CLIENT;
2134 msg.cli.node_id = node_id;
2135 msg.cli.port_id = port_id;
2136
2137 if ((mode == SINGLE_LINK_MODE) && xprt_info) {
2138 down_read(&xprt_info_list_lock_lha5);
2139 list_for_each_entry(tmp_xprt_info, &xprt_info_list, list) {
2140 if (tmp_xprt_info != xprt_info)
2141 continue;
2142 ipc_router_send_ctl_msg(tmp_xprt_info, &msg,
2143 IPC_ROUTER_DUMMY_DEST_NODE);
2144 break;
2145 }
2146 up_read(&xprt_info_list_lock_lha5);
2147 } else if ((mode == SINGLE_LINK_MODE) && !xprt_info) {
2148 broadcast_ctl_msg_locally(&msg);
2149 } else if (mode == MULTI_LINK_MODE) {
2150 broadcast_ctl_msg(&msg);
2151 } else if (mode != NULL_MODE) {
2152 IPC_RTR_ERR(
2153 "%s: Invalid mode(%d) + xprt_inf(%p) for %08x:%08x\n",
2154 __func__, mode, xprt_info, node_id, port_id);
2155 rc = -EINVAL;
2156 }
2157 return rc;
2158}
2159
2160static void update_comm_mode_info(struct comm_mode_info *mode_info,
2161 struct msm_ipc_router_xprt_info *xprt_info)
2162{
2163 if (!mode_info) {
2164 IPC_RTR_ERR("%s: NULL mode_info\n", __func__);
2165 return;
2166 }
2167
2168 if (mode_info->mode == NULL_MODE) {
2169 mode_info->xprt_info = xprt_info;
2170 mode_info->mode = SINGLE_LINK_MODE;
2171 } else if (mode_info->mode == SINGLE_LINK_MODE &&
2172 mode_info->xprt_info != xprt_info) {
2173 mode_info->mode = MULTI_LINK_MODE;
2174 }
2175}
2176
2177/**
2178 * cleanup_rmt_server() - Cleanup server hosted in the remote port
2179 * @xprt_info: XPRT through which this cleanup event is handled.
2180 * @rport_ptr: Remote port that is being cleaned up.
2181 * @server: Server that is hosted in the remote port.
2182 */
2183static void cleanup_rmt_server(struct msm_ipc_router_xprt_info *xprt_info,
2184 struct msm_ipc_router_remote_port *rport_ptr,
2185 struct msm_ipc_server *server)
2186{
2187 union rr_control_msg ctl;
2188
2189 memset(&ctl, 0, sizeof(ctl));
2190 ctl.cmd = IPC_ROUTER_CTRL_CMD_REMOVE_SERVER;
2191 ctl.srv.service = server->name.service;
2192 ctl.srv.instance = server->name.instance;
2193 ctl.srv.node_id = rport_ptr->node_id;
2194 ctl.srv.port_id = rport_ptr->port_id;
2195 if (xprt_info)
2196 relay_ctl_msg(xprt_info, &ctl);
2197 broadcast_ctl_msg_locally(&ctl);
2198 ipc_router_destroy_server_nolock(server, rport_ptr->node_id,
2199 rport_ptr->port_id);
2200}
2201
2202static void cleanup_rmt_ports(struct msm_ipc_router_xprt_info *xprt_info,
2203 struct msm_ipc_routing_table_entry *rt_entry)
2204{
2205 struct msm_ipc_router_remote_port *rport_ptr, *tmp_rport_ptr;
2206 struct msm_ipc_server *server;
2207 union rr_control_msg ctl;
2208 int j;
2209
2210 memset(&ctl, 0, sizeof(ctl));
2211 for (j = 0; j < RP_HASH_SIZE; j++) {
2212 list_for_each_entry_safe(rport_ptr, tmp_rport_ptr,
2213 &rt_entry->remote_port_list[j], list) {
2214 list_del(&rport_ptr->list);
2215 mutex_lock(&rport_ptr->rport_lock_lhb2);
2216 server = rport_ptr->server;
2217 rport_ptr->server = NULL;
2218 mutex_unlock(&rport_ptr->rport_lock_lhb2);
2219 ipc_router_reset_conn(rport_ptr);
2220 if (server) {
2221 cleanup_rmt_server(xprt_info, rport_ptr,
2222 server);
2223 server = NULL;
2224 }
2225
2226 ctl.cmd = IPC_ROUTER_CTRL_CMD_REMOVE_CLIENT;
2227 ctl.cli.node_id = rport_ptr->node_id;
2228 ctl.cli.port_id = rport_ptr->port_id;
2229 kref_put(&rport_ptr->ref, ipc_router_release_rport);
2230
2231 relay_ctl_msg(xprt_info, &ctl);
2232 broadcast_ctl_msg_locally(&ctl);
2233 }
2234 }
2235}
2236
2237static void msm_ipc_cleanup_routing_table(
2238 struct msm_ipc_router_xprt_info *xprt_info)
2239{
2240 int i;
2241 struct msm_ipc_routing_table_entry *rt_entry, *tmp_rt_entry;
2242
2243 if (!xprt_info) {
2244 IPC_RTR_ERR("%s: Invalid xprt_info\n", __func__);
2245 return;
2246 }
2247
2248 down_write(&server_list_lock_lha2);
2249 down_write(&routing_table_lock_lha3);
2250 for (i = 0; i < RT_HASH_SIZE; i++) {
2251 list_for_each_entry_safe(rt_entry, tmp_rt_entry,
2252 &routing_table[i], list) {
2253 down_write(&rt_entry->lock_lha4);
2254 if (rt_entry->xprt_info != xprt_info) {
2255 up_write(&rt_entry->lock_lha4);
2256 continue;
2257 }
2258 cleanup_rmt_ports(xprt_info, rt_entry);
2259 rt_entry->xprt_info = NULL;
2260 up_write(&rt_entry->lock_lha4);
2261 list_del(&rt_entry->list);
2262 kref_put(&rt_entry->ref, ipc_router_release_rtentry);
2263 }
2264 }
2265 up_write(&routing_table_lock_lha3);
2266 up_write(&server_list_lock_lha2);
2267}
2268
2269/**
2270 * sync_sec_rule() - Synchrnoize the security rule into the server structure
2271 * @server: Server structure where the rule has to be synchronized.
2272 * @rule: Security tule to be synchronized.
2273 *
2274 * This function is used to update the server structure with the security
2275 * rule configured for the <service:instance> corresponding to that server.
2276 */
2277static void sync_sec_rule(struct msm_ipc_server *server, void *rule)
2278{
2279 struct msm_ipc_server_port *server_port;
2280 struct msm_ipc_router_remote_port *rport_ptr = NULL;
2281
2282 list_for_each_entry(server_port, &server->server_port_list, list) {
2283 rport_ptr = ipc_router_get_rport_ref(
2284 server_port->server_addr.node_id,
2285 server_port->server_addr.port_id);
2286 if (!rport_ptr)
2287 continue;
2288 rport_ptr->sec_rule = rule;
2289 kref_put(&rport_ptr->ref, ipc_router_release_rport);
2290 }
2291 server->synced_sec_rule = 1;
2292}
2293
2294/**
2295 * msm_ipc_sync_sec_rule() - Sync the security rule to the service
2296 * @service: Service for which the rule has to be synchronized.
2297 * @instance: Instance for which the rule has to be synchronized.
2298 * @rule: Security rule to be synchronized.
2299 *
2300 * This function is used to syncrhonize the security rule with the server
2301 * hash table, if the user-space script configures the rule after the service
2302 * has come up. This function is used to synchronize the security rule to a
2303 * specific service and optionally a specific instance.
2304 */
2305void msm_ipc_sync_sec_rule(u32 service, u32 instance, void *rule)
2306{
2307 int key = (service & (SRV_HASH_SIZE - 1));
2308 struct msm_ipc_server *server;
2309
2310 down_write(&server_list_lock_lha2);
2311 list_for_each_entry(server, &server_list[key], list) {
2312 if (server->name.service != service)
2313 continue;
2314
2315 if (server->name.instance != instance &&
2316 instance != ALL_INSTANCE)
2317 continue;
2318
2319 /* If the rule applies to all instances and if the specific
2320 * instance of a service has a rule synchronized already,
2321 * do not apply the rule for that specific instance.
2322 */
2323 if (instance == ALL_INSTANCE && server->synced_sec_rule)
2324 continue;
2325
2326 sync_sec_rule(server, rule);
2327 }
2328 up_write(&server_list_lock_lha2);
2329}
2330
2331/**
2332 * msm_ipc_sync_default_sec_rule() - Default security rule to all services
2333 * @rule: Security rule to be synchronized.
2334 *
2335 * This function is used to syncrhonize the security rule with the server
2336 * hash table, if the user-space script configures the rule after the service
2337 * has come up. This function is used to synchronize the security rule that
2338 * applies to all services, if the concerned service do not have any rule
2339 * defined.
2340 */
2341void msm_ipc_sync_default_sec_rule(void *rule)
2342{
2343 int key;
2344 struct msm_ipc_server *server;
2345
2346 down_write(&server_list_lock_lha2);
2347 for (key = 0; key < SRV_HASH_SIZE; key++) {
2348 list_for_each_entry(server, &server_list[key], list) {
2349 if (server->synced_sec_rule)
2350 continue;
2351
2352 sync_sec_rule(server, rule);
2353 }
2354 }
2355 up_write(&server_list_lock_lha2);
2356}
2357
2358/**
2359 * ipc_router_reset_conn() - Reset the connection to remote port
2360 * @rport_ptr: Pointer to the remote port to be disconnected.
2361 *
2362 * This function is used to reset all the local ports that are connected to
2363 * the remote port being passed.
2364 */
2365static void ipc_router_reset_conn(struct msm_ipc_router_remote_port *rport_ptr)
2366{
2367 struct msm_ipc_port *port_ptr;
2368 struct ipc_router_conn_info *conn_info, *tmp_conn_info;
2369
2370 mutex_lock(&rport_ptr->rport_lock_lhb2);
2371 list_for_each_entry_safe(conn_info, tmp_conn_info,
2372 &rport_ptr->conn_info_list, list) {
2373 port_ptr = ipc_router_get_port_ref(conn_info->port_id);
2374 if (port_ptr) {
2375 mutex_lock(&port_ptr->port_lock_lhc3);
2376 port_ptr->conn_status = CONNECTION_RESET;
2377 mutex_unlock(&port_ptr->port_lock_lhc3);
2378 wake_up(&port_ptr->port_rx_wait_q);
2379 kref_put(&port_ptr->ref, ipc_router_release_port);
2380 }
2381
2382 list_del(&conn_info->list);
2383 kfree(conn_info);
2384 }
2385 mutex_unlock(&rport_ptr->rport_lock_lhb2);
2386}
2387
2388/**
2389 * ipc_router_set_conn() - Set the connection by initializing dest address
2390 * @port_ptr: Local port in which the connection has to be set.
2391 * @addr: Destination address of the connection.
2392 *
2393 * @return: 0 on success, standard Linux error codes on failure.
2394 */
2395int ipc_router_set_conn(struct msm_ipc_port *port_ptr,
2396 struct msm_ipc_addr *addr)
2397{
2398 struct msm_ipc_router_remote_port *rport_ptr;
2399 struct ipc_router_conn_info *conn_info;
2400
2401 if (unlikely(!port_ptr || !addr))
2402 return -EINVAL;
2403
2404 if (addr->addrtype != MSM_IPC_ADDR_ID) {
2405 IPC_RTR_ERR("%s: Invalid Address type\n", __func__);
2406 return -EINVAL;
2407 }
2408
2409 if (port_ptr->type == SERVER_PORT) {
2410 IPC_RTR_ERR("%s: Connection refused on a server port\n",
2411 __func__);
2412 return -ECONNREFUSED;
2413 }
2414
2415 if (port_ptr->conn_status == CONNECTED) {
2416 IPC_RTR_ERR("%s: Port %08x already connected\n",
2417 __func__, port_ptr->this_port.port_id);
2418 return -EISCONN;
2419 }
2420
2421 conn_info = kzalloc(sizeof(*conn_info), GFP_KERNEL);
2422 if (!conn_info) {
2423 IPC_RTR_ERR("%s: Error allocating conn_info\n", __func__);
2424 return -ENOMEM;
2425 }
2426 INIT_LIST_HEAD(&conn_info->list);
2427 conn_info->port_id = port_ptr->this_port.port_id;
2428
2429 rport_ptr = ipc_router_get_rport_ref(addr->addr.port_addr.node_id,
2430 addr->addr.port_addr.port_id);
2431 if (!rport_ptr) {
2432 IPC_RTR_ERR("%s: Invalid remote endpoint\n", __func__);
2433 kfree(conn_info);
2434 return -ENODEV;
2435 }
2436 mutex_lock(&rport_ptr->rport_lock_lhb2);
2437 list_add_tail(&conn_info->list, &rport_ptr->conn_info_list);
2438 mutex_unlock(&rport_ptr->rport_lock_lhb2);
2439
2440 mutex_lock(&port_ptr->port_lock_lhc3);
2441 memcpy(&port_ptr->dest_addr, &addr->addr.port_addr,
2442 sizeof(struct msm_ipc_port_addr));
2443 port_ptr->conn_status = CONNECTED;
2444 mutex_unlock(&port_ptr->port_lock_lhc3);
2445 kref_put(&rport_ptr->ref, ipc_router_release_rport);
2446 return 0;
2447}
2448
2449/**
2450 * do_version_negotiation() - perform a version negotiation and set the version
2451 * @xprt_info: Pointer to the IPC Router transport info structure.
2452 * @msg: Pointer to the IPC Router HELLO message.
2453 *
2454 * This function performs the version negotiation by verifying the computed
2455 * checksum first. If the checksum matches with the magic number, it sets the
2456 * negotiated IPC Router version in transport.
2457 */
2458static void do_version_negotiation(struct msm_ipc_router_xprt_info *xprt_info,
2459 union rr_control_msg *msg)
2460{
2461 u32 magic;
2462 unsigned int version;
2463
2464 if (!xprt_info)
2465 return;
2466 magic = ipc_router_calc_checksum(msg);
2467 if (magic == IPC_ROUTER_HELLO_MAGIC) {
2468 version = fls(msg->hello.versions & IPC_ROUTER_VER_BITMASK) - 1;
2469 /*Bit 0 & 31 are reserved for future usage*/
2470 if ((version > 0) &&
2471 (version != (sizeof(version) * BITS_PER_BYTE - 1)) &&
2472 xprt_info->xprt->set_version)
2473 xprt_info->xprt->set_version(xprt_info->xprt, version);
2474 }
2475}
2476
2477static int process_hello_msg(struct msm_ipc_router_xprt_info *xprt_info,
2478 union rr_control_msg *msg,
2479 struct rr_header_v1 *hdr)
2480{
2481 int i, rc = 0;
2482 union rr_control_msg ctl;
2483 struct msm_ipc_routing_table_entry *rt_entry;
2484
2485 if (!hdr)
2486 return -EINVAL;
2487
2488 xprt_info->remote_node_id = hdr->src_node_id;
2489 rt_entry = create_routing_table_entry(hdr->src_node_id, xprt_info);
2490 if (!rt_entry) {
2491 IPC_RTR_ERR("%s: rt_entry allocation failed\n", __func__);
2492 return -ENOMEM;
2493 }
2494 kref_put(&rt_entry->ref, ipc_router_release_rtentry);
2495
2496 do_version_negotiation(xprt_info, msg);
2497 /* Send a reply HELLO message */
2498 memset(&ctl, 0, sizeof(ctl));
2499 ctl.hello.cmd = IPC_ROUTER_CTRL_CMD_HELLO;
2500 ctl.hello.checksum = IPC_ROUTER_HELLO_MAGIC;
2501 ctl.hello.versions = (u32)IPC_ROUTER_VER_BITMASK;
2502 ctl.hello.checksum = ipc_router_calc_checksum(&ctl);
2503 rc = ipc_router_send_ctl_msg(xprt_info, &ctl,
2504 IPC_ROUTER_DUMMY_DEST_NODE);
2505 if (rc < 0) {
2506 IPC_RTR_ERR("%s: Error sending reply HELLO message\n",
2507 __func__);
2508 return rc;
2509 }
2510 xprt_info->initialized = 1;
2511
2512 /* Send list of servers from the local node and from nodes
2513 * outside the mesh network in which this XPRT is part of.
2514 */
2515 down_read(&server_list_lock_lha2);
2516 down_read(&routing_table_lock_lha3);
2517 for (i = 0; i < RT_HASH_SIZE; i++) {
2518 list_for_each_entry(rt_entry, &routing_table[i], list) {
2519 if ((rt_entry->node_id != IPC_ROUTER_NID_LOCAL) &&
2520 (!rt_entry->xprt_info ||
2521 (rt_entry->xprt_info->xprt->link_id ==
2522 xprt_info->xprt->link_id)))
2523 continue;
2524 rc = msm_ipc_router_send_server_list(rt_entry->node_id,
2525 xprt_info);
2526 if (rc < 0) {
2527 up_read(&routing_table_lock_lha3);
2528 up_read(&server_list_lock_lha2);
2529 return rc;
2530 }
2531 }
2532 }
2533 up_read(&routing_table_lock_lha3);
2534 up_read(&server_list_lock_lha2);
2535 return rc;
2536}
2537
2538static int process_resume_tx_msg(union rr_control_msg *msg,
2539 struct rr_packet *pkt)
2540{
2541 struct msm_ipc_router_remote_port *rport_ptr;
2542
2543 rport_ptr = ipc_router_get_rport_ref(msg->cli.node_id,
2544 msg->cli.port_id);
2545 if (!rport_ptr) {
2546 IPC_RTR_ERR("%s: Unable to resume client\n", __func__);
2547 return -ENODEV;
2548 }
2549 mutex_lock(&rport_ptr->rport_lock_lhb2);
2550 rport_ptr->tx_quota_cnt = 0;
2551 post_resume_tx(rport_ptr, pkt, msg);
2552 mutex_unlock(&rport_ptr->rport_lock_lhb2);
2553 kref_put(&rport_ptr->ref, ipc_router_release_rport);
2554 return 0;
2555}
2556
2557static int process_new_server_msg(struct msm_ipc_router_xprt_info *xprt_info,
2558 union rr_control_msg *msg,
2559 struct rr_packet *pkt)
2560{
2561 struct msm_ipc_routing_table_entry *rt_entry;
2562 struct msm_ipc_server *server;
2563 struct msm_ipc_router_remote_port *rport_ptr;
2564
2565 if (msg->srv.instance == 0) {
2566 IPC_RTR_ERR("%s: Server %08x create rejected, version = 0\n",
2567 __func__, msg->srv.service);
2568 return -EINVAL;
2569 }
2570
2571 rt_entry = ipc_router_get_rtentry_ref(msg->srv.node_id);
2572 if (!rt_entry) {
2573 rt_entry = create_routing_table_entry(msg->srv.node_id,
2574 xprt_info);
2575 if (!rt_entry) {
2576 IPC_RTR_ERR("%s: rt_entry allocation failed\n",
2577 __func__);
2578 return -ENOMEM;
2579 }
2580 }
2581 kref_put(&rt_entry->ref, ipc_router_release_rtentry);
2582
2583 /* If the service already exists in the table, create_server returns
2584 * a reference to it.
2585 */
2586 rport_ptr = ipc_router_create_rport(msg->srv.node_id,
2587 msg->srv.port_id, xprt_info);
2588 if (!rport_ptr)
2589 return -ENOMEM;
2590
2591 server = msm_ipc_router_create_server(
2592 msg->srv.service, msg->srv.instance,
2593 msg->srv.node_id, msg->srv.port_id, xprt_info);
2594 if (!server) {
2595 IPC_RTR_ERR("%s: Server %08x:%08x Create failed\n",
2596 __func__, msg->srv.service, msg->srv.instance);
2597 kref_put(&rport_ptr->ref, ipc_router_release_rport);
2598 ipc_router_destroy_rport(rport_ptr);
2599 return -ENOMEM;
2600 }
2601 mutex_lock(&rport_ptr->rport_lock_lhb2);
2602 rport_ptr->server = server;
2603 mutex_unlock(&rport_ptr->rport_lock_lhb2);
2604 rport_ptr->sec_rule = msm_ipc_get_security_rule(
2605 msg->srv.service, msg->srv.instance);
2606 kref_put(&rport_ptr->ref, ipc_router_release_rport);
2607 kref_put(&server->ref, ipc_router_release_server);
2608
2609 /* Relay the new server message to other subsystems that do not belong
2610 * to the cluster from which this message is received. Notify the
2611 * local clients waiting for this service.
2612 */
2613 relay_ctl_msg(xprt_info, msg);
2614 post_control_ports(pkt);
2615 return 0;
2616}
2617
2618static int process_rmv_server_msg(struct msm_ipc_router_xprt_info *xprt_info,
2619 union rr_control_msg *msg,
2620 struct rr_packet *pkt)
2621{
2622 struct msm_ipc_server *server;
2623 struct msm_ipc_router_remote_port *rport_ptr;
2624
2625 server = ipc_router_get_server_ref(msg->srv.service, msg->srv.instance,
2626 msg->srv.node_id, msg->srv.port_id);
2627 rport_ptr = ipc_router_get_rport_ref(msg->srv.node_id,
2628 msg->srv.port_id);
2629 if (rport_ptr) {
2630 mutex_lock(&rport_ptr->rport_lock_lhb2);
2631 if (rport_ptr->server == server)
2632 rport_ptr->server = NULL;
2633 mutex_unlock(&rport_ptr->rport_lock_lhb2);
2634 kref_put(&rport_ptr->ref, ipc_router_release_rport);
2635 }
2636
2637 if (server) {
2638 kref_put(&server->ref, ipc_router_release_server);
2639 ipc_router_destroy_server(server, msg->srv.node_id,
2640 msg->srv.port_id);
2641 /* Relay the new server message to other subsystems that do not
2642 * belong to the cluster from which this message is received.
2643 * Notify the local clients communicating with the service.
2644 */
2645 relay_ctl_msg(xprt_info, msg);
2646 post_control_ports(pkt);
2647 }
2648 return 0;
2649}
2650
2651static int process_rmv_client_msg(struct msm_ipc_router_xprt_info *xprt_info,
2652 union rr_control_msg *msg,
2653 struct rr_packet *pkt)
2654{
2655 struct msm_ipc_router_remote_port *rport_ptr;
2656 struct msm_ipc_server *server;
2657
2658 rport_ptr = ipc_router_get_rport_ref(msg->cli.node_id,
2659 msg->cli.port_id);
2660 if (rport_ptr) {
2661 mutex_lock(&rport_ptr->rport_lock_lhb2);
2662 server = rport_ptr->server;
2663 rport_ptr->server = NULL;
2664 mutex_unlock(&rport_ptr->rport_lock_lhb2);
2665 ipc_router_reset_conn(rport_ptr);
2666 down_write(&server_list_lock_lha2);
2667 if (server)
2668 cleanup_rmt_server(NULL, rport_ptr, server);
2669 up_write(&server_list_lock_lha2);
2670 kref_put(&rport_ptr->ref, ipc_router_release_rport);
2671 ipc_router_destroy_rport(rport_ptr);
2672 }
2673
2674 relay_ctl_msg(xprt_info, msg);
2675 post_control_ports(pkt);
2676 return 0;
2677}
2678
2679static int process_control_msg(struct msm_ipc_router_xprt_info *xprt_info,
2680 struct rr_packet *pkt)
2681{
2682 union rr_control_msg *msg;
2683 int rc = 0;
2684 struct rr_header_v1 *hdr;
2685
2686 if (pkt->length != sizeof(*msg)) {
2687 IPC_RTR_ERR("%s: r2r msg size %d != %zu\n", __func__,
2688 pkt->length, sizeof(*msg));
2689 return -EINVAL;
2690 }
2691
2692 hdr = &pkt->hdr;
2693 msg = msm_ipc_router_skb_to_buf(pkt->pkt_fragment_q, sizeof(*msg));
2694 if (!msg) {
2695 IPC_RTR_ERR("%s: Error extracting control msg\n", __func__);
2696 return -ENOMEM;
2697 }
2698
2699 ipc_router_log_msg(xprt_info->log_ctx, IPC_ROUTER_LOG_EVENT_RX, msg,
2700 hdr, NULL, NULL);
2701
2702 switch (msg->cmd) {
2703 case IPC_ROUTER_CTRL_CMD_HELLO:
2704 rc = process_hello_msg(xprt_info, msg, hdr);
2705 break;
2706 case IPC_ROUTER_CTRL_CMD_RESUME_TX:
2707 rc = process_resume_tx_msg(msg, pkt);
2708 break;
2709 case IPC_ROUTER_CTRL_CMD_NEW_SERVER:
2710 rc = process_new_server_msg(xprt_info, msg, pkt);
2711 break;
2712 case IPC_ROUTER_CTRL_CMD_REMOVE_SERVER:
2713 rc = process_rmv_server_msg(xprt_info, msg, pkt);
2714 break;
2715 case IPC_ROUTER_CTRL_CMD_REMOVE_CLIENT:
2716 rc = process_rmv_client_msg(xprt_info, msg, pkt);
2717 break;
2718 default:
2719 rc = -EINVAL;
2720 }
2721 kfree(msg);
2722 return rc;
2723}
2724
2725static void do_read_data(struct work_struct *work)
2726{
2727 struct rr_header_v1 *hdr;
2728 struct rr_packet *pkt = NULL;
2729 struct msm_ipc_port *port_ptr;
2730 struct msm_ipc_router_remote_port *rport_ptr;
2731 int ret;
2732
2733 struct msm_ipc_router_xprt_info *xprt_info =
2734 container_of(work,
2735 struct msm_ipc_router_xprt_info,
2736 read_data);
2737
2738 while ((pkt = rr_read(xprt_info)) != NULL) {
2739 if (pkt->length < calc_rx_header_size(xprt_info) ||
2740 pkt->length > MAX_IPC_PKT_SIZE) {
2741 IPC_RTR_ERR("%s: Invalid pkt length %d\n", __func__,
2742 pkt->length);
2743 goto read_next_pkt1;
2744 }
2745
2746 ret = extract_header(pkt);
2747 if (ret < 0)
2748 goto read_next_pkt1;
2749 hdr = &pkt->hdr;
2750
2751 if ((hdr->dst_node_id != IPC_ROUTER_NID_LOCAL) &&
2752 ((hdr->type == IPC_ROUTER_CTRL_CMD_RESUME_TX) ||
2753 (hdr->type == IPC_ROUTER_CTRL_CMD_DATA))) {
2754 IPC_RTR_INFO(xprt_info->log_ctx,
2755 "%s %s Len:0x%x T:0x%x CF:0x%x SRC:<0x%x:0x%x> DST:<0x%x:0x%x>\n",
2756 "FWD", "RX", hdr->size, hdr->type,
2757 hdr->control_flag, hdr->src_node_id,
2758 hdr->src_port_id, hdr->dst_node_id,
2759 hdr->dst_port_id);
2760 forward_msg(xprt_info, pkt);
2761 goto read_next_pkt1;
2762 }
2763
2764 if (hdr->type != IPC_ROUTER_CTRL_CMD_DATA) {
2765 process_control_msg(xprt_info, pkt);
2766 goto read_next_pkt1;
2767 }
2768
2769 port_ptr = ipc_router_get_port_ref(hdr->dst_port_id);
2770 if (!port_ptr) {
2771 IPC_RTR_ERR("%s: No local port id %08x\n", __func__,
2772 hdr->dst_port_id);
2773 goto read_next_pkt1;
2774 }
2775
2776 rport_ptr = ipc_router_get_rport_ref(hdr->src_node_id,
2777 hdr->src_port_id);
2778 if (!rport_ptr) {
2779 rport_ptr = ipc_router_create_rport(hdr->src_node_id,
2780 hdr->src_port_id,
2781 xprt_info);
2782 if (!rport_ptr) {
2783 IPC_RTR_ERR(
2784 "%s: Rmt Prt %08x:%08x create failed\n",
2785 __func__, hdr->src_node_id,
2786 hdr->src_port_id);
2787 goto read_next_pkt2;
2788 }
2789 }
2790
2791 ipc_router_log_msg(xprt_info->log_ctx, IPC_ROUTER_LOG_EVENT_RX,
2792 pkt, hdr, port_ptr, rport_ptr);
2793 kref_put(&rport_ptr->ref, ipc_router_release_rport);
2794 post_pkt_to_port(port_ptr, pkt, 0);
2795 kref_put(&port_ptr->ref, ipc_router_release_port);
2796 continue;
2797read_next_pkt2:
2798 kref_put(&port_ptr->ref, ipc_router_release_port);
2799read_next_pkt1:
2800 release_pkt(pkt);
2801 }
2802}
2803
2804int msm_ipc_router_register_server(struct msm_ipc_port *port_ptr,
2805 struct msm_ipc_addr *name)
2806{
2807 struct msm_ipc_server *server;
2808 union rr_control_msg ctl;
2809 struct msm_ipc_router_remote_port *rport_ptr;
2810
2811 if (!port_ptr || !name)
2812 return -EINVAL;
2813
Karthikeyan Ramasubramanian63cf3592016-12-15 08:13:20 -07002814 if (port_ptr->type != CLIENT_PORT)
2815 return -EINVAL;
2816
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -06002817 if (name->addrtype != MSM_IPC_ADDR_NAME)
2818 return -EINVAL;
2819
2820 rport_ptr = ipc_router_create_rport(IPC_ROUTER_NID_LOCAL,
2821 port_ptr->this_port.port_id, NULL);
2822 if (!rport_ptr) {
2823 IPC_RTR_ERR("%s: RPort %08x:%08x creation failed\n", __func__,
2824 IPC_ROUTER_NID_LOCAL, port_ptr->this_port.port_id);
2825 return -ENOMEM;
2826 }
2827
2828 server = msm_ipc_router_create_server(name->addr.port_name.service,
2829 name->addr.port_name.instance,
2830 IPC_ROUTER_NID_LOCAL,
2831 port_ptr->this_port.port_id,
2832 NULL);
2833 if (!server) {
2834 IPC_RTR_ERR("%s: Server %08x:%08x Create failed\n",
2835 __func__, name->addr.port_name.service,
2836 name->addr.port_name.instance);
2837 kref_put(&rport_ptr->ref, ipc_router_release_rport);
2838 ipc_router_destroy_rport(rport_ptr);
2839 return -ENOMEM;
2840 }
2841
2842 memset(&ctl, 0, sizeof(ctl));
2843 ctl.cmd = IPC_ROUTER_CTRL_CMD_NEW_SERVER;
2844 ctl.srv.service = server->name.service;
2845 ctl.srv.instance = server->name.instance;
2846 ctl.srv.node_id = IPC_ROUTER_NID_LOCAL;
2847 ctl.srv.port_id = port_ptr->this_port.port_id;
2848 broadcast_ctl_msg(&ctl);
2849 mutex_lock(&port_ptr->port_lock_lhc3);
2850 port_ptr->type = SERVER_PORT;
2851 port_ptr->mode_info.mode = MULTI_LINK_MODE;
2852 port_ptr->port_name.service = server->name.service;
2853 port_ptr->port_name.instance = server->name.instance;
2854 port_ptr->rport_info = rport_ptr;
2855 mutex_unlock(&port_ptr->port_lock_lhc3);
2856 kref_put(&rport_ptr->ref, ipc_router_release_rport);
2857 kref_put(&server->ref, ipc_router_release_server);
2858 return 0;
2859}
2860
2861int msm_ipc_router_unregister_server(struct msm_ipc_port *port_ptr)
2862{
2863 struct msm_ipc_server *server;
2864 union rr_control_msg ctl;
2865 struct msm_ipc_router_remote_port *rport_ptr;
2866
2867 if (!port_ptr)
2868 return -EINVAL;
2869
2870 if (port_ptr->type != SERVER_PORT) {
2871 IPC_RTR_ERR("%s: Trying to unregister a non-server port\n",
2872 __func__);
2873 return -EINVAL;
2874 }
2875
2876 if (port_ptr->this_port.node_id != IPC_ROUTER_NID_LOCAL) {
2877 IPC_RTR_ERR(
2878 "%s: Trying to unregister a remote server locally\n",
2879 __func__);
2880 return -EINVAL;
2881 }
2882
2883 server = ipc_router_get_server_ref(port_ptr->port_name.service,
2884 port_ptr->port_name.instance,
2885 port_ptr->this_port.node_id,
2886 port_ptr->this_port.port_id);
2887 if (!server) {
2888 IPC_RTR_ERR("%s: Server lookup failed\n", __func__);
2889 return -ENODEV;
2890 }
2891
2892 mutex_lock(&port_ptr->port_lock_lhc3);
2893 port_ptr->type = CLIENT_PORT;
2894 rport_ptr = (struct msm_ipc_router_remote_port *)port_ptr->rport_info;
2895 mutex_unlock(&port_ptr->port_lock_lhc3);
2896 if (rport_ptr)
2897 ipc_router_reset_conn(rport_ptr);
2898 memset(&ctl, 0, sizeof(ctl));
2899 ctl.cmd = IPC_ROUTER_CTRL_CMD_REMOVE_SERVER;
2900 ctl.srv.service = server->name.service;
2901 ctl.srv.instance = server->name.instance;
2902 ctl.srv.node_id = IPC_ROUTER_NID_LOCAL;
2903 ctl.srv.port_id = port_ptr->this_port.port_id;
2904 kref_put(&server->ref, ipc_router_release_server);
2905 ipc_router_destroy_server(server, port_ptr->this_port.node_id,
2906 port_ptr->this_port.port_id);
2907 broadcast_ctl_msg(&ctl);
2908 mutex_lock(&port_ptr->port_lock_lhc3);
2909 port_ptr->type = CLIENT_PORT;
2910 mutex_unlock(&port_ptr->port_lock_lhc3);
2911 return 0;
2912}
2913
2914static int loopback_data(struct msm_ipc_port *src,
2915 u32 port_id,
2916 struct rr_packet *pkt)
2917{
2918 struct msm_ipc_port *port_ptr;
2919 struct sk_buff *temp_skb;
2920 int align_size;
2921
2922 if (!pkt) {
2923 IPC_RTR_ERR("%s: Invalid pkt pointer\n", __func__);
2924 return -EINVAL;
2925 }
2926
2927 temp_skb = skb_peek_tail(pkt->pkt_fragment_q);
Chris Lew42ea9612017-10-04 15:58:16 -07002928 if (!temp_skb) {
2929 IPC_RTR_ERR("%s: Empty skb\n", __func__);
2930 return -EINVAL;
2931 }
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -06002932 align_size = ALIGN_SIZE(pkt->length);
2933 skb_put(temp_skb, align_size);
2934 pkt->length += align_size;
2935
2936 port_ptr = ipc_router_get_port_ref(port_id);
2937 if (!port_ptr) {
2938 IPC_RTR_ERR("%s: Local port %d not present\n", __func__,
2939 port_id);
2940 return -ENODEV;
2941 }
2942 post_pkt_to_port(port_ptr, pkt, 1);
2943 update_comm_mode_info(&src->mode_info, NULL);
2944 kref_put(&port_ptr->ref, ipc_router_release_port);
2945
2946 return pkt->hdr.size;
2947}
2948
2949static int ipc_router_tx_wait(struct msm_ipc_port *src,
2950 struct msm_ipc_router_remote_port *rport_ptr,
2951 u32 *set_confirm_rx,
2952 long timeout)
2953{
2954 struct msm_ipc_resume_tx_port *resume_tx_port;
2955 int ret;
2956
2957 if (unlikely(!src || !rport_ptr))
2958 return -EINVAL;
2959
2960 for (;;) {
2961 mutex_lock(&rport_ptr->rport_lock_lhb2);
2962 if (rport_ptr->status == RESET) {
2963 mutex_unlock(&rport_ptr->rport_lock_lhb2);
2964 IPC_RTR_ERR("%s: RPort %08x:%08x is in reset state\n",
2965 __func__, rport_ptr->node_id,
2966 rport_ptr->port_id);
2967 return -ENETRESET;
2968 }
2969
2970 if (rport_ptr->tx_quota_cnt < IPC_ROUTER_HIGH_RX_QUOTA)
2971 break;
2972
2973 if (msm_ipc_router_lookup_resume_tx_port(
2974 rport_ptr, src->this_port.port_id))
2975 goto check_timeo;
2976
2977 resume_tx_port =
2978 kzalloc(sizeof(struct msm_ipc_resume_tx_port),
2979 GFP_KERNEL);
2980 if (!resume_tx_port) {
2981 IPC_RTR_ERR("%s: Resume_Tx port allocation failed\n",
2982 __func__);
2983 mutex_unlock(&rport_ptr->rport_lock_lhb2);
2984 return -ENOMEM;
2985 }
2986 INIT_LIST_HEAD(&resume_tx_port->list);
2987 resume_tx_port->port_id = src->this_port.port_id;
2988 resume_tx_port->node_id = src->this_port.node_id;
2989 list_add_tail(&resume_tx_port->list,
2990 &rport_ptr->resume_tx_port_list);
2991check_timeo:
2992 mutex_unlock(&rport_ptr->rport_lock_lhb2);
2993 if (!timeout) {
2994 return -EAGAIN;
2995 } else if (timeout < 0) {
2996 ret =
2997 wait_event_interruptible(src->port_tx_wait_q,
2998 (rport_ptr->tx_quota_cnt !=
2999 IPC_ROUTER_HIGH_RX_QUOTA ||
3000 rport_ptr->status == RESET));
3001 if (ret)
3002 return ret;
3003 } else {
3004 ret = wait_event_interruptible_timeout(
3005 src->port_tx_wait_q,
3006 (rport_ptr->tx_quota_cnt !=
3007 IPC_ROUTER_HIGH_RX_QUOTA ||
3008 rport_ptr->status == RESET),
3009 msecs_to_jiffies(timeout));
3010 if (ret < 0) {
3011 return ret;
3012 } else if (ret == 0) {
3013 IPC_RTR_ERR("%s: Resume_tx Timeout %08x:%08x\n",
3014 __func__, rport_ptr->node_id,
3015 rport_ptr->port_id);
3016 return -ETIMEDOUT;
3017 }
3018 }
3019 }
3020 rport_ptr->tx_quota_cnt++;
3021 if (rport_ptr->tx_quota_cnt == IPC_ROUTER_LOW_RX_QUOTA)
3022 *set_confirm_rx = 1;
3023 mutex_unlock(&rport_ptr->rport_lock_lhb2);
3024 return 0;
3025}
3026
3027static int
3028msm_ipc_router_write_pkt(struct msm_ipc_port *src,
3029 struct msm_ipc_router_remote_port *rport_ptr,
3030 struct rr_packet *pkt, long timeout)
3031{
3032 struct rr_header_v1 *hdr;
3033 struct msm_ipc_router_xprt_info *xprt_info;
3034 struct msm_ipc_routing_table_entry *rt_entry;
3035 struct sk_buff *temp_skb;
3036 int xprt_option;
3037 int ret;
3038 int align_size;
3039 u32 set_confirm_rx = 0;
3040
3041 if (!rport_ptr || !src || !pkt)
3042 return -EINVAL;
3043
3044 hdr = &pkt->hdr;
3045 hdr->version = IPC_ROUTER_V1;
3046 hdr->type = IPC_ROUTER_CTRL_CMD_DATA;
3047 hdr->src_node_id = src->this_port.node_id;
3048 hdr->src_port_id = src->this_port.port_id;
3049 hdr->size = pkt->length;
3050 hdr->control_flag = 0;
3051 hdr->dst_node_id = rport_ptr->node_id;
3052 hdr->dst_port_id = rport_ptr->port_id;
3053
3054 ret = ipc_router_tx_wait(src, rport_ptr, &set_confirm_rx, timeout);
3055 if (ret < 0)
3056 return ret;
3057 if (set_confirm_rx)
3058 hdr->control_flag |= CONTROL_FLAG_CONFIRM_RX;
3059
3060 if (hdr->dst_node_id == IPC_ROUTER_NID_LOCAL) {
3061 ipc_router_log_msg(local_log_ctx,
3062 IPC_ROUTER_LOG_EVENT_TX, pkt, hdr, src,
3063 rport_ptr);
3064 ret = loopback_data(src, hdr->dst_port_id, pkt);
3065 return ret;
3066 }
3067
3068 rt_entry = ipc_router_get_rtentry_ref(hdr->dst_node_id);
3069 if (!rt_entry) {
3070 IPC_RTR_ERR("%s: Remote node %d not up\n",
3071 __func__, hdr->dst_node_id);
3072 return -ENODEV;
3073 }
3074 down_read(&rt_entry->lock_lha4);
3075 xprt_info = rt_entry->xprt_info;
3076 ret = ipc_router_get_xprt_info_ref(xprt_info);
3077 if (ret < 0) {
3078 IPC_RTR_ERR("%s: Abort invalid xprt\n", __func__);
3079 up_read(&rt_entry->lock_lha4);
3080 kref_put(&rt_entry->ref, ipc_router_release_rtentry);
3081 return ret;
3082 }
3083 ret = prepend_header(pkt, xprt_info);
3084 if (ret < 0) {
3085 IPC_RTR_ERR("%s: Prepend Header failed\n", __func__);
3086 goto out_write_pkt;
3087 }
3088 xprt_option = xprt_info->xprt->get_option(xprt_info->xprt);
3089 if (!(xprt_option & FRAG_PKT_WRITE_ENABLE)) {
3090 ret = defragment_pkt(pkt);
3091 if (ret < 0)
3092 goto out_write_pkt;
3093 }
3094
3095 temp_skb = skb_peek_tail(pkt->pkt_fragment_q);
Chris Lew42ea9612017-10-04 15:58:16 -07003096 if (!temp_skb) {
3097 IPC_RTR_ERR("%s: Abort invalid pkt\n", __func__);
3098 ret = -EINVAL;
3099 goto out_write_pkt;
3100 }
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -06003101 align_size = ALIGN_SIZE(pkt->length);
3102 skb_put(temp_skb, align_size);
3103 pkt->length += align_size;
3104 mutex_lock(&xprt_info->tx_lock_lhb2);
3105 ret = xprt_info->xprt->write(pkt, pkt->length, xprt_info->xprt);
3106 mutex_unlock(&xprt_info->tx_lock_lhb2);
3107out_write_pkt:
3108 up_read(&rt_entry->lock_lha4);
3109 kref_put(&rt_entry->ref, ipc_router_release_rtentry);
3110
3111 if (ret < 0) {
3112 IPC_RTR_ERR("%s: Write on XPRT failed\n", __func__);
3113 ipc_router_log_msg(xprt_info->log_ctx,
3114 IPC_ROUTER_LOG_EVENT_TX_ERR, pkt, hdr, src,
3115 rport_ptr);
3116
3117 ipc_router_put_xprt_info_ref(xprt_info);
3118 return ret;
3119 }
3120 update_comm_mode_info(&src->mode_info, xprt_info);
3121 ipc_router_log_msg(xprt_info->log_ctx,
3122 IPC_ROUTER_LOG_EVENT_TX, pkt, hdr, src, rport_ptr);
3123
3124 ipc_router_put_xprt_info_ref(xprt_info);
3125 return hdr->size;
3126}
3127
3128int msm_ipc_router_send_to(struct msm_ipc_port *src,
3129 struct sk_buff_head *data,
3130 struct msm_ipc_addr *dest,
3131 long timeout)
3132{
3133 u32 dst_node_id = 0, dst_port_id = 0;
3134 struct msm_ipc_server *server;
3135 struct msm_ipc_server_port *server_port;
3136 struct msm_ipc_router_remote_port *rport_ptr = NULL;
3137 struct msm_ipc_router_remote_port *src_rport_ptr = NULL;
3138 struct rr_packet *pkt;
3139 int ret;
3140
3141 if (!src || !data || !dest) {
3142 IPC_RTR_ERR("%s: Invalid Parameters\n", __func__);
3143 return -EINVAL;
3144 }
3145
3146 /* Resolve Address*/
3147 if (dest->addrtype == MSM_IPC_ADDR_ID) {
3148 dst_node_id = dest->addr.port_addr.node_id;
3149 dst_port_id = dest->addr.port_addr.port_id;
3150 } else if (dest->addrtype == MSM_IPC_ADDR_NAME) {
3151 server =
3152 ipc_router_get_server_ref(dest->addr.port_name.service,
3153 dest->addr.port_name.instance,
3154 0, 0);
3155 if (!server) {
3156 IPC_RTR_ERR("%s: Destination not reachable\n",
3157 __func__);
3158 return -ENODEV;
3159 }
3160 server_port = list_first_entry(&server->server_port_list,
3161 struct msm_ipc_server_port,
3162 list);
3163 dst_node_id = server_port->server_addr.node_id;
3164 dst_port_id = server_port->server_addr.port_id;
3165 kref_put(&server->ref, ipc_router_release_server);
3166 }
3167
3168 rport_ptr = ipc_router_get_rport_ref(dst_node_id, dst_port_id);
3169 if (!rport_ptr) {
3170 IPC_RTR_ERR("%s: Remote port not found\n", __func__);
3171 return -ENODEV;
3172 }
3173
3174 if (src->check_send_permissions) {
3175 ret = src->check_send_permissions(rport_ptr->sec_rule);
3176 if (ret <= 0) {
3177 kref_put(&rport_ptr->ref, ipc_router_release_rport);
3178 IPC_RTR_ERR("%s: permission failure for %s\n",
3179 __func__, current->comm);
3180 return -EPERM;
3181 }
3182 }
3183
3184 if (dst_node_id == IPC_ROUTER_NID_LOCAL && !src->rport_info) {
3185 src_rport_ptr = ipc_router_create_rport(IPC_ROUTER_NID_LOCAL,
3186 src->this_port.port_id,
3187 NULL);
3188 if (!src_rport_ptr) {
3189 kref_put(&rport_ptr->ref, ipc_router_release_rport);
3190 IPC_RTR_ERR("%s: RPort creation failed\n", __func__);
3191 return -ENOMEM;
3192 }
3193 mutex_lock(&src->port_lock_lhc3);
3194 src->rport_info = src_rport_ptr;
3195 mutex_unlock(&src->port_lock_lhc3);
3196 kref_put(&src_rport_ptr->ref, ipc_router_release_rport);
3197 }
3198
3199 pkt = create_pkt(data);
3200 if (!pkt) {
3201 kref_put(&rport_ptr->ref, ipc_router_release_rport);
3202 IPC_RTR_ERR("%s: Pkt creation failed\n", __func__);
3203 return -ENOMEM;
3204 }
3205
3206 ret = msm_ipc_router_write_pkt(src, rport_ptr, pkt, timeout);
3207 kref_put(&rport_ptr->ref, ipc_router_release_rport);
3208 if (ret < 0)
3209 pkt->pkt_fragment_q = NULL;
3210 release_pkt(pkt);
3211
3212 return ret;
3213}
3214
3215int msm_ipc_router_send_msg(struct msm_ipc_port *src,
3216 struct msm_ipc_addr *dest,
3217 void *data, unsigned int data_len)
3218{
3219 struct sk_buff_head *out_skb_head;
3220 int ret;
3221
3222 out_skb_head = msm_ipc_router_buf_to_skb(data, data_len);
3223 if (!out_skb_head) {
3224 IPC_RTR_ERR("%s: SKB conversion failed\n", __func__);
3225 return -EFAULT;
3226 }
3227
3228 ret = msm_ipc_router_send_to(src, out_skb_head, dest, 0);
3229 if (ret < 0) {
3230 if (ret != -EAGAIN)
3231 IPC_RTR_ERR(
3232 "%s: msm_ipc_router_send_to failed - ret: %d\n",
3233 __func__, ret);
3234 msm_ipc_router_free_skb(out_skb_head);
3235 return ret;
3236 }
3237 return 0;
3238}
3239
3240/**
3241 * msm_ipc_router_send_resume_tx() - Send Resume_Tx message
3242 * @data: Pointer to received data packet that has confirm_rx bit set
3243 *
3244 * @return: On success, number of bytes transferred is returned, else
3245 * standard linux error code is returned.
3246 *
3247 * This function sends the Resume_Tx event to the remote node that
3248 * sent the data with confirm_rx field set. In case of a multi-hop
3249 * scenario also, this function makes sure that the destination node_id
3250 * to which the resume_tx event should reach is right.
3251 */
3252static int msm_ipc_router_send_resume_tx(void *data)
3253{
3254 union rr_control_msg msg;
3255 struct rr_header_v1 *hdr = (struct rr_header_v1 *)data;
3256 struct msm_ipc_routing_table_entry *rt_entry;
3257 int ret;
3258
3259 memset(&msg, 0, sizeof(msg));
3260 msg.cmd = IPC_ROUTER_CTRL_CMD_RESUME_TX;
3261 msg.cli.node_id = hdr->dst_node_id;
3262 msg.cli.port_id = hdr->dst_port_id;
3263 rt_entry = ipc_router_get_rtentry_ref(hdr->src_node_id);
3264 if (!rt_entry) {
3265 IPC_RTR_ERR("%s: %d Node is not present", __func__,
3266 hdr->src_node_id);
3267 return -ENODEV;
3268 }
3269 ret = ipc_router_get_xprt_info_ref(rt_entry->xprt_info);
3270 if (ret < 0) {
3271 IPC_RTR_ERR("%s: Abort invalid xprt\n", __func__);
3272 kref_put(&rt_entry->ref, ipc_router_release_rtentry);
3273 return ret;
3274 }
3275 ret = ipc_router_send_ctl_msg(rt_entry->xprt_info, &msg,
3276 hdr->src_node_id);
3277 ipc_router_put_xprt_info_ref(rt_entry->xprt_info);
3278 kref_put(&rt_entry->ref, ipc_router_release_rtentry);
3279 if (ret < 0)
3280 IPC_RTR_ERR(
3281 "%s: Send Resume_Tx Failed SRC_NODE: %d SRC_PORT: %d DEST_NODE: %d",
3282 __func__, hdr->dst_node_id, hdr->dst_port_id,
3283 hdr->src_node_id);
3284
3285 return ret;
3286}
3287
3288int msm_ipc_router_read(struct msm_ipc_port *port_ptr,
3289 struct rr_packet **read_pkt,
3290 size_t buf_len)
3291{
3292 struct rr_packet *pkt;
3293
3294 if (!port_ptr || !read_pkt)
3295 return -EINVAL;
3296
3297 mutex_lock(&port_ptr->port_rx_q_lock_lhc3);
3298 if (list_empty(&port_ptr->port_rx_q)) {
3299 mutex_unlock(&port_ptr->port_rx_q_lock_lhc3);
3300 return -EAGAIN;
3301 }
3302
3303 pkt = list_first_entry(&port_ptr->port_rx_q, struct rr_packet, list);
3304 if ((buf_len) && (pkt->hdr.size > buf_len)) {
3305 mutex_unlock(&port_ptr->port_rx_q_lock_lhc3);
3306 return -ETOOSMALL;
3307 }
3308 list_del(&pkt->list);
3309 if (list_empty(&port_ptr->port_rx_q))
3310 __pm_relax(port_ptr->port_rx_ws);
3311 *read_pkt = pkt;
3312 mutex_unlock(&port_ptr->port_rx_q_lock_lhc3);
3313 if (pkt->hdr.control_flag & CONTROL_FLAG_CONFIRM_RX)
3314 msm_ipc_router_send_resume_tx(&pkt->hdr);
3315
3316 return pkt->length;
3317}
3318
3319/**
3320 * msm_ipc_router_rx_data_wait() - Wait for new message destined to a local
3321 * port.
3322 * @port_ptr: Pointer to the local port
3323 * @timeout: < 0 timeout indicates infinite wait till a message arrives.
3324 * > 0 timeout indicates the wait time.
3325 * 0 indicates that we do not wait.
3326 * @return: 0 if there are pending messages to read,
3327 * standard Linux error code otherwise.
3328 *
3329 * Checks for the availability of messages that are destined to a local port.
3330 * If no messages are present then waits as per @timeout.
3331 */
3332int msm_ipc_router_rx_data_wait(struct msm_ipc_port *port_ptr, long timeout)
3333{
3334 int ret = 0;
3335
3336 mutex_lock(&port_ptr->port_rx_q_lock_lhc3);
3337 while (list_empty(&port_ptr->port_rx_q)) {
3338 mutex_unlock(&port_ptr->port_rx_q_lock_lhc3);
3339 if (timeout < 0) {
3340 ret = wait_event_interruptible(
3341 port_ptr->port_rx_wait_q,
3342 !list_empty(&port_ptr->port_rx_q));
3343 if (ret)
3344 return ret;
3345 } else if (timeout > 0) {
3346 timeout = wait_event_interruptible_timeout(
3347 port_ptr->port_rx_wait_q,
3348 !list_empty(&port_ptr->port_rx_q),
3349 timeout);
3350 if (timeout < 0)
3351 return -EFAULT;
3352 }
3353 if (timeout == 0)
3354 return -ENOMSG;
3355 mutex_lock(&port_ptr->port_rx_q_lock_lhc3);
3356 }
3357 mutex_unlock(&port_ptr->port_rx_q_lock_lhc3);
3358
3359 return ret;
3360}
3361
3362/**
3363 * msm_ipc_router_recv_from() - Receive messages destined to a local port.
3364 * @port_ptr: Pointer to the local port
3365 * @pkt : Pointer to the router-to-router packet
3366 * @src: Pointer to local port address
3367 * @timeout: < 0 timeout indicates infinite wait till a message arrives.
3368 * > 0 timeout indicates the wait time.
3369 * 0 indicates that we do not wait.
3370 * @return: = Number of bytes read(On successful read operation).
3371 * = -ENOMSG (If there are no pending messages and timeout is 0).
3372 * = -EINVAL (If either of the arguments, port_ptr or data is invalid)
3373 * = -EFAULT (If there are no pending messages when timeout is > 0
3374 * and the wait_event_interruptible_timeout has returned value > 0)
3375 * = -ERESTARTSYS (If there are no pending messages when timeout
3376 * is < 0 and wait_event_interruptible was interrupted by a signal)
3377 *
3378 * This function reads the messages that are destined for a local port. It
3379 * is used by modules that exist with-in the kernel and use IPC Router for
3380 * transport. The function checks if there are any messages that are already
3381 * received. If yes, it reads them, else it waits as per the timeout value.
3382 * On a successful read, the return value of the function indicates the number
3383 * of bytes that are read.
3384 */
3385int msm_ipc_router_recv_from(struct msm_ipc_port *port_ptr,
3386 struct rr_packet **pkt,
3387 struct msm_ipc_addr *src,
3388 long timeout)
3389{
3390 int ret, data_len, align_size;
3391 struct sk_buff *temp_skb;
3392 struct rr_header_v1 *hdr = NULL;
3393
3394 if (!port_ptr || !pkt) {
3395 IPC_RTR_ERR("%s: Invalid pointers being passed\n", __func__);
3396 return -EINVAL;
3397 }
3398
3399 *pkt = NULL;
3400
3401 ret = msm_ipc_router_rx_data_wait(port_ptr, timeout);
3402 if (ret)
3403 return ret;
3404
3405 ret = msm_ipc_router_read(port_ptr, pkt, 0);
3406 if (ret <= 0 || !(*pkt))
3407 return ret;
3408
3409 hdr = &((*pkt)->hdr);
3410 if (src) {
3411 src->addrtype = MSM_IPC_ADDR_ID;
3412 src->addr.port_addr.node_id = hdr->src_node_id;
3413 src->addr.port_addr.port_id = hdr->src_port_id;
3414 }
3415
3416 data_len = hdr->size;
3417 align_size = ALIGN_SIZE(data_len);
3418 if (align_size) {
3419 temp_skb = skb_peek_tail((*pkt)->pkt_fragment_q);
Chris Lew42ea9612017-10-04 15:58:16 -07003420 if (temp_skb)
3421 skb_trim(temp_skb, (temp_skb->len - align_size));
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -06003422 }
3423 return data_len;
3424}
3425
3426int msm_ipc_router_read_msg(struct msm_ipc_port *port_ptr,
3427 struct msm_ipc_addr *src,
3428 unsigned char **data,
3429 unsigned int *len)
3430{
3431 struct rr_packet *pkt;
3432 int ret;
3433
3434 ret = msm_ipc_router_recv_from(port_ptr, &pkt, src, 0);
3435 if (ret < 0) {
3436 if (ret != -ENOMSG)
3437 IPC_RTR_ERR(
3438 "%s: msm_ipc_router_recv_from failed - ret: %d\n",
3439 __func__, ret);
3440 return ret;
3441 }
3442
3443 *data = msm_ipc_router_skb_to_buf(pkt->pkt_fragment_q, ret);
3444 if (!(*data)) {
3445 IPC_RTR_ERR("%s: Buf conversion failed\n", __func__);
3446 release_pkt(pkt);
3447 return -ENOMEM;
3448 }
3449
3450 *len = ret;
3451 release_pkt(pkt);
3452 return 0;
3453}
3454
3455/**
3456 * msm_ipc_router_create_port() - Create a IPC Router port/endpoint
3457 * @notify: Callback function to notify any event on the port.
3458 * @event: Event ID to be handled.
3459 * @oob_data: Any out-of-band data associated with the event.
3460 * @oob_data_len: Size of the out-of-band data, if valid.
3461 * @priv: Private data registered during the port creation.
3462 * @priv: Private info to be passed while the notification is generated.
3463 *
3464 * @return: Pointer to the port on success, NULL on error.
3465 */
3466struct msm_ipc_port *msm_ipc_router_create_port(
3467 void (*notify)(unsigned int event, void *oob_data,
3468 size_t oob_data_len, void *priv),
3469 void *priv)
3470{
3471 struct msm_ipc_port *port_ptr;
3472 int ret;
3473
3474 ret = ipc_router_core_init();
3475 if (ret < 0) {
3476 IPC_RTR_ERR("%s: Error %d initializing IPC Router\n",
3477 __func__, ret);
3478 return NULL;
3479 }
3480
3481 port_ptr = msm_ipc_router_create_raw_port(NULL, notify, priv);
3482 if (!port_ptr)
3483 IPC_RTR_ERR("%s: port_ptr alloc failed\n", __func__);
3484
3485 return port_ptr;
3486}
3487
3488int msm_ipc_router_close_port(struct msm_ipc_port *port_ptr)
3489{
3490 union rr_control_msg msg;
3491 struct msm_ipc_server *server;
3492 struct msm_ipc_router_remote_port *rport_ptr;
3493
3494 if (!port_ptr)
3495 return -EINVAL;
3496
3497 if (port_ptr->type == SERVER_PORT || port_ptr->type == CLIENT_PORT) {
3498 down_write(&local_ports_lock_lhc2);
3499 list_del(&port_ptr->list);
3500 up_write(&local_ports_lock_lhc2);
3501
3502 mutex_lock(&port_ptr->port_lock_lhc3);
3503 rport_ptr = (struct msm_ipc_router_remote_port *)
3504 port_ptr->rport_info;
3505 port_ptr->rport_info = NULL;
3506 mutex_unlock(&port_ptr->port_lock_lhc3);
3507 if (rport_ptr) {
3508 ipc_router_reset_conn(rport_ptr);
3509 ipc_router_destroy_rport(rport_ptr);
3510 }
3511
3512 if (port_ptr->type == SERVER_PORT) {
3513 memset(&msg, 0, sizeof(msg));
3514 msg.cmd = IPC_ROUTER_CTRL_CMD_REMOVE_SERVER;
3515 msg.srv.service = port_ptr->port_name.service;
3516 msg.srv.instance = port_ptr->port_name.instance;
3517 msg.srv.node_id = port_ptr->this_port.node_id;
3518 msg.srv.port_id = port_ptr->this_port.port_id;
3519 broadcast_ctl_msg(&msg);
3520 }
3521
3522 /* Server port could have been a client port earlier.
3523 * Send REMOVE_CLIENT message in either case.
3524 */
3525 msm_ipc_router_send_remove_client(&port_ptr->mode_info,
3526 port_ptr->this_port.node_id,
3527 port_ptr->this_port.port_id);
3528 } else if (port_ptr->type == CONTROL_PORT) {
3529 down_write(&control_ports_lock_lha5);
3530 list_del(&port_ptr->list);
3531 up_write(&control_ports_lock_lha5);
3532 } else if (port_ptr->type == IRSC_PORT) {
3533 down_write(&local_ports_lock_lhc2);
3534 list_del(&port_ptr->list);
3535 up_write(&local_ports_lock_lhc2);
3536 signal_irsc_completion();
3537 }
3538
3539 if (port_ptr->type == SERVER_PORT) {
3540 server = ipc_router_get_server_ref(
3541 port_ptr->port_name.service,
3542 port_ptr->port_name.instance,
3543 port_ptr->this_port.node_id,
3544 port_ptr->this_port.port_id);
3545 if (server) {
3546 kref_put(&server->ref, ipc_router_release_server);
3547 ipc_router_destroy_server(server,
3548 port_ptr->this_port.node_id,
3549 port_ptr->this_port.port_id);
3550 }
3551 }
3552
3553 mutex_lock(&port_ptr->port_lock_lhc3);
3554 rport_ptr = (struct msm_ipc_router_remote_port *)port_ptr->rport_info;
3555 port_ptr->rport_info = NULL;
3556 mutex_unlock(&port_ptr->port_lock_lhc3);
3557 if (rport_ptr)
3558 ipc_router_destroy_rport(rport_ptr);
3559
3560 kref_put(&port_ptr->ref, ipc_router_release_port);
3561 return 0;
3562}
3563
3564int msm_ipc_router_get_curr_pkt_size(struct msm_ipc_port *port_ptr)
3565{
3566 struct rr_packet *pkt;
3567 int rc = 0;
3568
3569 if (!port_ptr)
3570 return -EINVAL;
3571
3572 mutex_lock(&port_ptr->port_rx_q_lock_lhc3);
3573 if (!list_empty(&port_ptr->port_rx_q)) {
3574 pkt = list_first_entry(&port_ptr->port_rx_q, struct rr_packet,
3575 list);
3576 rc = pkt->hdr.size;
3577 }
3578 mutex_unlock(&port_ptr->port_rx_q_lock_lhc3);
3579
3580 return rc;
3581}
3582
3583int msm_ipc_router_bind_control_port(struct msm_ipc_port *port_ptr)
3584{
3585 if (unlikely(!port_ptr || port_ptr->type != CLIENT_PORT))
3586 return -EINVAL;
3587
3588 down_write(&local_ports_lock_lhc2);
3589 list_del(&port_ptr->list);
3590 up_write(&local_ports_lock_lhc2);
3591 port_ptr->type = CONTROL_PORT;
3592 down_write(&control_ports_lock_lha5);
3593 list_add_tail(&port_ptr->list, &control_ports);
3594 up_write(&control_ports_lock_lha5);
3595
3596 return 0;
3597}
3598
3599int msm_ipc_router_lookup_server_name(struct msm_ipc_port_name *srv_name,
3600 struct msm_ipc_server_info *srv_info,
3601 int num_entries_in_array, u32 lookup_mask)
3602{
3603 struct msm_ipc_server *server;
3604 struct msm_ipc_server_port *server_port;
3605 int key, i = 0; /*num_entries_found*/
3606
3607 if (!srv_name) {
3608 IPC_RTR_ERR("%s: Invalid srv_name\n", __func__);
3609 return -EINVAL;
3610 }
3611
3612 if (num_entries_in_array && !srv_info) {
3613 IPC_RTR_ERR("%s: srv_info NULL\n", __func__);
3614 return -EINVAL;
3615 }
3616
3617 down_read(&server_list_lock_lha2);
3618 key = (srv_name->service & (SRV_HASH_SIZE - 1));
3619 list_for_each_entry(server, &server_list[key], list) {
3620 if ((server->name.service != srv_name->service) ||
3621 ((server->name.instance & lookup_mask) !=
3622 srv_name->instance))
3623 continue;
3624
3625 list_for_each_entry(server_port, &server->server_port_list,
3626 list) {
3627 if (i < num_entries_in_array) {
3628 srv_info[i].node_id =
3629 server_port->server_addr.node_id;
3630 srv_info[i].port_id =
3631 server_port->server_addr.port_id;
3632 srv_info[i].service = server->name.service;
3633 srv_info[i].instance = server->name.instance;
3634 }
3635 i++;
3636 }
3637 }
3638 up_read(&server_list_lock_lha2);
3639
3640 return i;
3641}
3642
3643int msm_ipc_router_close(void)
3644{
3645 struct msm_ipc_router_xprt_info *xprt_info, *tmp_xprt_info;
3646
3647 down_write(&xprt_info_list_lock_lha5);
3648 list_for_each_entry_safe(xprt_info, tmp_xprt_info,
3649 &xprt_info_list, list) {
3650 xprt_info->xprt->close(xprt_info->xprt);
3651 list_del(&xprt_info->list);
3652 kfree(xprt_info);
3653 }
3654 up_write(&xprt_info_list_lock_lha5);
3655 return 0;
3656}
3657
3658/**
3659 * pil_vote_load_worker() - Process vote to load the modem
3660 *
3661 * @work: Work item to process
3662 *
3663 * This function is called to process votes to load the modem that have been
3664 * queued by msm_ipc_load_default_node().
3665 */
3666static void pil_vote_load_worker(struct work_struct *work)
3667{
3668 struct pil_vote_info *vote_info;
3669
3670 vote_info = container_of(work, struct pil_vote_info, load_work);
3671 if (strlen(default_peripheral)) {
3672 vote_info->pil_handle = subsystem_get(default_peripheral);
3673 if (IS_ERR(vote_info->pil_handle)) {
3674 IPC_RTR_ERR("%s: Failed to load %s\n",
3675 __func__, default_peripheral);
3676 vote_info->pil_handle = NULL;
3677 }
3678 } else {
3679 vote_info->pil_handle = NULL;
3680 }
3681}
3682
3683/**
3684 * pil_vote_unload_worker() - Process vote to unload the modem
3685 *
3686 * @work: Work item to process
3687 *
3688 * This function is called to process votes to unload the modem that have been
3689 * queued by msm_ipc_unload_default_node().
3690 */
3691static void pil_vote_unload_worker(struct work_struct *work)
3692{
3693 struct pil_vote_info *vote_info;
3694
3695 vote_info = container_of(work, struct pil_vote_info, unload_work);
3696
3697 if (vote_info->pil_handle) {
3698 subsystem_put(vote_info->pil_handle);
3699 vote_info->pil_handle = NULL;
3700 }
3701 kfree(vote_info);
3702}
3703
3704/**
3705 * msm_ipc_load_default_node() - Queue a vote to load the modem.
3706 *
3707 * @return: PIL vote info structure on success, NULL on failure.
3708 *
3709 * This function places a work item that loads the modem on the
3710 * single-threaded workqueue used for processing PIL votes to load
3711 * or unload the modem.
3712 */
3713void *msm_ipc_load_default_node(void)
3714{
3715 struct pil_vote_info *vote_info;
3716
3717 vote_info = kmalloc(sizeof(*vote_info), GFP_KERNEL);
3718 if (!vote_info)
3719 return vote_info;
3720
3721 INIT_WORK(&vote_info->load_work, pil_vote_load_worker);
3722 queue_work(msm_ipc_router_workqueue, &vote_info->load_work);
3723
3724 return vote_info;
3725}
3726
3727/**
3728 * msm_ipc_unload_default_node() - Queue a vote to unload the modem.
3729 *
3730 * @pil_vote: PIL vote info structure, containing the PIL handle
3731 * and work structure.
3732 *
3733 * This function places a work item that unloads the modem on the
3734 * single-threaded workqueue used for processing PIL votes to load
3735 * or unload the modem.
3736 */
3737void msm_ipc_unload_default_node(void *pil_vote)
3738{
3739 struct pil_vote_info *vote_info;
3740
3741 if (pil_vote) {
3742 vote_info = (struct pil_vote_info *)pil_vote;
3743 INIT_WORK(&vote_info->unload_work, pil_vote_unload_worker);
3744 queue_work(msm_ipc_router_workqueue, &vote_info->unload_work);
3745 }
3746}
3747
3748#if defined(CONFIG_DEBUG_FS)
3749static void dump_routing_table(struct seq_file *s)
3750{
3751 int j;
3752 struct msm_ipc_routing_table_entry *rt_entry;
3753
3754 seq_printf(s, "%-10s|%-20s|%-10s|\n", "Node Id", "XPRT Name",
3755 "Next Hop");
3756 seq_puts(s, "----------------------------------------------\n");
3757 for (j = 0; j < RT_HASH_SIZE; j++) {
3758 down_read(&routing_table_lock_lha3);
3759 list_for_each_entry(rt_entry, &routing_table[j], list) {
3760 down_read(&rt_entry->lock_lha4);
3761 seq_printf(s, "0x%08x|", rt_entry->node_id);
3762 if (rt_entry->node_id == IPC_ROUTER_NID_LOCAL)
3763 seq_printf(s, "%-20s|0x%08x|\n", "Loopback",
3764 rt_entry->node_id);
3765 else
3766 seq_printf(s, "%-20s|0x%08x|\n",
3767 rt_entry->xprt_info->xprt->name,
3768 rt_entry->node_id);
3769 up_read(&rt_entry->lock_lha4);
3770 }
3771 up_read(&routing_table_lock_lha3);
3772 }
3773}
3774
3775static void dump_xprt_info(struct seq_file *s)
3776{
3777 struct msm_ipc_router_xprt_info *xprt_info;
3778
3779 seq_printf(s, "%-20s|%-10s|%-12s|%-15s|\n", "XPRT Name", "Link ID",
3780 "Initialized", "Remote Node Id");
3781 seq_puts(s, "------------------------------------------------------------\n");
3782 down_read(&xprt_info_list_lock_lha5);
3783 list_for_each_entry(xprt_info, &xprt_info_list, list)
3784 seq_printf(s, "%-20s|0x%08x|%-12s|0x%08x|\n",
3785 xprt_info->xprt->name, xprt_info->xprt->link_id,
3786 (xprt_info->initialized ? "Y" : "N"),
3787 xprt_info->remote_node_id);
3788 up_read(&xprt_info_list_lock_lha5);
3789}
3790
3791static void dump_servers(struct seq_file *s)
3792{
3793 int j;
3794 struct msm_ipc_server *server;
3795 struct msm_ipc_server_port *server_port;
3796
3797 seq_printf(s, "%-11s|%-11s|%-11s|%-11s|\n", "Service", "Instance",
3798 "Node_id", "Port_id");
3799 seq_puts(s, "------------------------------------------------------------\n");
3800 down_read(&server_list_lock_lha2);
3801 for (j = 0; j < SRV_HASH_SIZE; j++) {
3802 list_for_each_entry(server, &server_list[j], list) {
3803 list_for_each_entry(server_port,
3804 &server->server_port_list,
3805 list)
3806 seq_printf(s, "0x%08x |0x%08x |0x%08x |0x%08x |\n",
3807 server->name.service,
3808 server->name.instance,
3809 server_port->server_addr.node_id,
3810 server_port->server_addr.port_id);
3811 }
3812 }
3813 up_read(&server_list_lock_lha2);
3814}
3815
3816static void dump_remote_ports(struct seq_file *s)
3817{
3818 int j, k;
3819 struct msm_ipc_router_remote_port *rport_ptr;
3820 struct msm_ipc_routing_table_entry *rt_entry;
3821
3822 seq_printf(s, "%-11s|%-11s|%-10s|\n", "Node_id", "Port_id",
3823 "Quota_cnt");
3824 seq_puts(s, "------------------------------------------------------------\n");
3825 for (j = 0; j < RT_HASH_SIZE; j++) {
3826 down_read(&routing_table_lock_lha3);
3827 list_for_each_entry(rt_entry, &routing_table[j], list) {
3828 down_read(&rt_entry->lock_lha4);
3829 for (k = 0; k < RP_HASH_SIZE; k++) {
3830 list_for_each_entry
3831 (rport_ptr,
3832 &rt_entry->remote_port_list[k],
3833 list)
3834 seq_printf(s, "0x%08x |0x%08x |0x%08x|\n",
3835 rport_ptr->node_id,
3836 rport_ptr->port_id,
3837 rport_ptr->tx_quota_cnt);
3838 }
3839 up_read(&rt_entry->lock_lha4);
3840 }
3841 up_read(&routing_table_lock_lha3);
3842 }
3843}
3844
3845static void dump_control_ports(struct seq_file *s)
3846{
3847 struct msm_ipc_port *port_ptr;
3848
3849 seq_printf(s, "%-11s|%-11s|\n", "Node_id", "Port_id");
3850 seq_puts(s, "------------------------------------------------------------\n");
3851 down_read(&control_ports_lock_lha5);
3852 list_for_each_entry(port_ptr, &control_ports, list)
3853 seq_printf(s, "0x%08x |0x%08x |\n", port_ptr->this_port.node_id,
3854 port_ptr->this_port.port_id);
3855 up_read(&control_ports_lock_lha5);
3856}
3857
3858static void dump_local_ports(struct seq_file *s)
3859{
3860 int j;
3861 struct msm_ipc_port *port_ptr;
3862
Dhoat Harpal3ad5fe32017-06-19 21:26:13 +05303863 seq_printf(s, "%-11s|%-11s|%-32s|%-11s|\n",
3864 "Node_id", "Port_id", "Wakelock", "Last SVCID");
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -06003865 seq_puts(s, "------------------------------------------------------------\n");
3866 down_read(&local_ports_lock_lhc2);
3867 for (j = 0; j < LP_HASH_SIZE; j++) {
3868 list_for_each_entry(port_ptr, &local_ports[j], list) {
3869 mutex_lock(&port_ptr->port_lock_lhc3);
Dhoat Harpal3ad5fe32017-06-19 21:26:13 +05303870 seq_printf(s, "0x%08x |0x%08x |%-32s|0x%08x |\n",
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -06003871 port_ptr->this_port.node_id,
Dhoat Harpal3ad5fe32017-06-19 21:26:13 +05303872 port_ptr->this_port.port_id,
3873 port_ptr->rx_ws_name,
3874 port_ptr->last_served_svc_id);
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -06003875 mutex_unlock(&port_ptr->port_lock_lhc3);
3876 }
3877 }
3878 up_read(&local_ports_lock_lhc2);
3879}
3880
3881static int debugfs_show(struct seq_file *s, void *data)
3882{
3883 void (*show)(struct seq_file *) = s->private;
3884
3885 show(s);
3886 return 0;
3887}
3888
3889static int debug_open(struct inode *inode, struct file *file)
3890{
3891 return single_open(file, debugfs_show, inode->i_private);
3892}
3893
3894static const struct file_operations debug_ops = {
3895 .open = debug_open,
3896 .release = single_release,
3897 .read = seq_read,
3898 .llseek = seq_lseek,
3899};
3900
3901static void debug_create(const char *name, struct dentry *dent,
3902 void (*show)(struct seq_file *))
3903{
3904 debugfs_create_file(name, 0444, dent, show, &debug_ops);
3905}
3906
3907static void debugfs_init(void)
3908{
3909 struct dentry *dent;
3910
3911 dent = debugfs_create_dir("msm_ipc_router", 0);
3912 if (IS_ERR(dent))
3913 return;
3914
3915 debug_create("dump_local_ports", dent, dump_local_ports);
3916 debug_create("dump_remote_ports", dent, dump_remote_ports);
3917 debug_create("dump_control_ports", dent, dump_control_ports);
3918 debug_create("dump_servers", dent, dump_servers);
3919 debug_create("dump_xprt_info", dent, dump_xprt_info);
3920 debug_create("dump_routing_table", dent, dump_routing_table);
3921}
3922
3923#else
3924static void debugfs_init(void) {}
3925#endif
3926
3927/**
3928 * ipc_router_create_log_ctx() - Create and add the log context based on
3929 * transport
3930 * @name: subsystem name
3931 *
3932 * Return: a reference to the log context created
3933 *
3934 * This function creates ipc log context based on transport and adds it to a
3935 * global list. This log context can be reused from the list in case of a
3936 * subsystem restart.
3937 */
3938static void *ipc_router_create_log_ctx(char *name)
3939{
3940 struct ipc_rtr_log_ctx *sub_log_ctx;
3941
3942 sub_log_ctx = kmalloc(sizeof(*sub_log_ctx), GFP_KERNEL);
3943 if (!sub_log_ctx)
3944 return NULL;
3945 sub_log_ctx->log_ctx = ipc_log_context_create(
3946 IPC_RTR_INFO_PAGES, name, 0);
3947 if (!sub_log_ctx->log_ctx) {
3948 IPC_RTR_ERR("%s: Unable to create IPC logging for [%s]",
3949 __func__, name);
3950 kfree(sub_log_ctx);
3951 return NULL;
3952 }
3953 strlcpy(sub_log_ctx->log_ctx_name, name, LOG_CTX_NAME_LEN);
3954 INIT_LIST_HEAD(&sub_log_ctx->list);
3955 list_add_tail(&sub_log_ctx->list, &log_ctx_list);
3956 return sub_log_ctx->log_ctx;
3957}
3958
3959static void ipc_router_log_ctx_init(void)
3960{
3961 mutex_lock(&log_ctx_list_lock_lha0);
3962 local_log_ctx = ipc_router_create_log_ctx("local_IPCRTR");
3963 mutex_unlock(&log_ctx_list_lock_lha0);
3964}
3965
3966/**
3967 * ipc_router_get_log_ctx() - Retrieves the ipc log context based on subsystem
3968 * name.
3969 * @sub_name: subsystem name
3970 *
3971 * Return: a reference to the log context
3972 */
3973static void *ipc_router_get_log_ctx(char *sub_name)
3974{
3975 void *log_ctx = NULL;
3976 struct ipc_rtr_log_ctx *temp_log_ctx;
3977
3978 mutex_lock(&log_ctx_list_lock_lha0);
3979 list_for_each_entry(temp_log_ctx, &log_ctx_list, list)
3980 if (!strcmp(temp_log_ctx->log_ctx_name, sub_name)) {
3981 log_ctx = temp_log_ctx->log_ctx;
3982 mutex_unlock(&log_ctx_list_lock_lha0);
3983 return log_ctx;
3984 }
3985 log_ctx = ipc_router_create_log_ctx(sub_name);
3986 mutex_unlock(&log_ctx_list_lock_lha0);
3987
3988 return log_ctx;
3989}
3990
3991/**
3992 * ipc_router_get_xprt_info_ref() - Get a reference to the xprt_info structure
3993 * @xprt_info: pointer to the xprt_info.
3994 *
3995 * @return: Zero on success, -ENODEV on failure.
3996 *
3997 * This function is used to obtain a reference to the xprt_info structure
3998 * corresponding to the requested @xprt_info pointer.
3999 */
4000static int ipc_router_get_xprt_info_ref(
4001 struct msm_ipc_router_xprt_info *xprt_info)
4002{
4003 int ret = -ENODEV;
4004 struct msm_ipc_router_xprt_info *tmp_xprt_info;
4005
4006 if (!xprt_info)
4007 return 0;
4008
4009 down_read(&xprt_info_list_lock_lha5);
4010 list_for_each_entry(tmp_xprt_info, &xprt_info_list, list) {
4011 if (tmp_xprt_info == xprt_info) {
4012 kref_get(&xprt_info->ref);
4013 ret = 0;
4014 break;
4015 }
4016 }
4017 up_read(&xprt_info_list_lock_lha5);
4018
4019 return ret;
4020}
4021
4022/**
4023 * ipc_router_put_xprt_info_ref() - Put a reference to the xprt_info structure
4024 * @xprt_info: pointer to the xprt_info.
4025 *
4026 * This function is used to put the reference to the xprt_info structure
4027 * corresponding to the requested @xprt_info pointer.
4028 */
4029static void ipc_router_put_xprt_info_ref(
4030 struct msm_ipc_router_xprt_info *xprt_info)
4031{
4032 if (xprt_info)
4033 kref_put(&xprt_info->ref, ipc_router_release_xprt_info_ref);
4034}
4035
4036/**
4037 * ipc_router_release_xprt_info_ref() - release the xprt_info last reference
4038 * @ref: Reference to the xprt_info structure.
4039 *
4040 * This function is called when all references to the xprt_info structure
4041 * are released.
4042 */
4043static void ipc_router_release_xprt_info_ref(struct kref *ref)
4044{
4045 struct msm_ipc_router_xprt_info *xprt_info =
4046 container_of(ref, struct msm_ipc_router_xprt_info, ref);
4047
4048 complete_all(&xprt_info->ref_complete);
4049}
4050
4051static int msm_ipc_router_add_xprt(struct msm_ipc_router_xprt *xprt)
4052{
4053 struct msm_ipc_router_xprt_info *xprt_info;
4054
4055 xprt_info = kmalloc(sizeof(*xprt_info), GFP_KERNEL);
4056 if (!xprt_info)
4057 return -ENOMEM;
4058
4059 xprt_info->xprt = xprt;
4060 xprt_info->initialized = 0;
4061 xprt_info->remote_node_id = -1;
4062 INIT_LIST_HEAD(&xprt_info->pkt_list);
4063 mutex_init(&xprt_info->rx_lock_lhb2);
4064 mutex_init(&xprt_info->tx_lock_lhb2);
4065 wakeup_source_init(&xprt_info->ws, xprt->name);
4066 xprt_info->need_len = 0;
4067 xprt_info->abort_data_read = 0;
4068 INIT_WORK(&xprt_info->read_data, do_read_data);
4069 INIT_LIST_HEAD(&xprt_info->list);
4070 kref_init(&xprt_info->ref);
4071 init_completion(&xprt_info->ref_complete);
Arun Kumar Neelakantam74ff8562017-05-26 17:57:52 +05304072 xprt_info->dynamic_ws = 0;
4073 if (xprt->get_ws_info)
4074 xprt_info->dynamic_ws = xprt->get_ws_info(xprt);
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -06004075
4076 xprt_info->workqueue = create_singlethread_workqueue(xprt->name);
4077 if (!xprt_info->workqueue) {
4078 kfree(xprt_info);
4079 return -ENOMEM;
4080 }
4081
4082 xprt_info->log_ctx = ipc_router_get_log_ctx(xprt->name);
4083
4084 if (!strcmp(xprt->name, "msm_ipc_router_loopback_xprt")) {
4085 xprt_info->remote_node_id = IPC_ROUTER_NID_LOCAL;
4086 xprt_info->initialized = 1;
4087 }
4088
4089 IPC_RTR_INFO(xprt_info->log_ctx, "Adding xprt: [%s]\n", xprt->name);
4090 down_write(&xprt_info_list_lock_lha5);
4091 list_add_tail(&xprt_info->list, &xprt_info_list);
4092 up_write(&xprt_info_list_lock_lha5);
4093
4094 down_write(&routing_table_lock_lha3);
4095 if (!routing_table_inited) {
4096 init_routing_table();
4097 routing_table_inited = 1;
4098 }
4099 up_write(&routing_table_lock_lha3);
4100
4101 xprt->priv = xprt_info;
4102
4103 return 0;
4104}
4105
4106static void msm_ipc_router_remove_xprt(struct msm_ipc_router_xprt *xprt)
4107{
4108 struct msm_ipc_router_xprt_info *xprt_info;
4109 struct rr_packet *temp_pkt, *pkt;
4110
4111 if (xprt && xprt->priv) {
4112 xprt_info = xprt->priv;
4113
4114 IPC_RTR_INFO(xprt_info->log_ctx, "Removing xprt: [%s]\n",
4115 xprt->name);
4116 mutex_lock(&xprt_info->rx_lock_lhb2);
4117 xprt_info->abort_data_read = 1;
4118 mutex_unlock(&xprt_info->rx_lock_lhb2);
4119 flush_workqueue(xprt_info->workqueue);
4120 destroy_workqueue(xprt_info->workqueue);
4121 mutex_lock(&xprt_info->rx_lock_lhb2);
4122 list_for_each_entry_safe(pkt, temp_pkt,
4123 &xprt_info->pkt_list, list) {
4124 list_del(&pkt->list);
4125 release_pkt(pkt);
4126 }
4127 mutex_unlock(&xprt_info->rx_lock_lhb2);
4128
4129 down_write(&xprt_info_list_lock_lha5);
4130 list_del(&xprt_info->list);
4131 up_write(&xprt_info_list_lock_lha5);
4132
4133 msm_ipc_cleanup_routing_table(xprt_info);
4134
4135 wakeup_source_trash(&xprt_info->ws);
4136
4137 ipc_router_put_xprt_info_ref(xprt_info);
4138 wait_for_completion(&xprt_info->ref_complete);
4139
4140 xprt->priv = 0;
4141 kfree(xprt_info);
4142 }
4143}
4144
4145struct msm_ipc_router_xprt_work {
4146 struct msm_ipc_router_xprt *xprt;
4147 struct work_struct work;
4148};
4149
4150static void xprt_open_worker(struct work_struct *work)
4151{
4152 struct msm_ipc_router_xprt_work *xprt_work =
4153 container_of(work, struct msm_ipc_router_xprt_work, work);
4154
4155 msm_ipc_router_add_xprt(xprt_work->xprt);
4156 kfree(xprt_work);
4157}
4158
4159static void xprt_close_worker(struct work_struct *work)
4160{
4161 struct msm_ipc_router_xprt_work *xprt_work =
4162 container_of(work, struct msm_ipc_router_xprt_work, work);
4163
4164 msm_ipc_router_remove_xprt(xprt_work->xprt);
4165 xprt_work->xprt->sft_close_done(xprt_work->xprt);
4166 kfree(xprt_work);
4167}
4168
4169void msm_ipc_router_xprt_notify(struct msm_ipc_router_xprt *xprt,
4170 unsigned int event,
4171 void *data)
4172{
4173 struct msm_ipc_router_xprt_info *xprt_info = xprt->priv;
4174 struct msm_ipc_router_xprt_work *xprt_work;
4175 struct rr_packet *pkt;
4176 int ret;
4177
4178 ret = ipc_router_core_init();
4179 if (ret < 0) {
4180 IPC_RTR_ERR("%s: Error %d initializing IPC Router\n",
4181 __func__, ret);
4182 return;
4183 }
4184
4185 switch (event) {
4186 case IPC_ROUTER_XPRT_EVENT_OPEN:
4187 xprt_work = kmalloc(sizeof(*xprt_work), GFP_ATOMIC);
4188 if (xprt_work) {
4189 xprt_work->xprt = xprt;
4190 INIT_WORK(&xprt_work->work, xprt_open_worker);
4191 queue_work(msm_ipc_router_workqueue, &xprt_work->work);
4192 } else {
4193 IPC_RTR_ERR(
4194 "%s: malloc failure - Couldn't notify OPEN event",
4195 __func__);
4196 }
4197 break;
4198
4199 case IPC_ROUTER_XPRT_EVENT_CLOSE:
4200 xprt_work = kmalloc(sizeof(*xprt_work), GFP_ATOMIC);
4201 if (xprt_work) {
4202 xprt_work->xprt = xprt;
4203 INIT_WORK(&xprt_work->work, xprt_close_worker);
4204 queue_work(msm_ipc_router_workqueue, &xprt_work->work);
4205 } else {
4206 IPC_RTR_ERR(
4207 "%s: malloc failure - Couldn't notify CLOSE event",
4208 __func__);
4209 }
4210 break;
4211 }
4212
4213 if (!data)
4214 return;
4215
4216 while (!xprt_info) {
4217 msleep(100);
4218 xprt_info = xprt->priv;
4219 }
4220
4221 pkt = clone_pkt((struct rr_packet *)data);
4222 if (!pkt)
4223 return;
4224
Arun Kumar Neelakantam74ff8562017-05-26 17:57:52 +05304225 pkt->ws_need = false;
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -06004226 mutex_lock(&xprt_info->rx_lock_lhb2);
4227 list_add_tail(&pkt->list, &xprt_info->pkt_list);
Arun Kumar Neelakantam74ff8562017-05-26 17:57:52 +05304228 if (!xprt_info->dynamic_ws) {
4229 __pm_stay_awake(&xprt_info->ws);
4230 pkt->ws_need = true;
4231 } else {
4232 if (is_wakeup_source_allowed) {
4233 __pm_stay_awake(&xprt_info->ws);
4234 pkt->ws_need = true;
4235 }
4236 }
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -06004237 mutex_unlock(&xprt_info->rx_lock_lhb2);
4238 queue_work(xprt_info->workqueue, &xprt_info->read_data);
4239}
4240
4241/**
4242 * parse_devicetree() - parse device tree binding
4243 *
4244 * @node: pointer to device tree node
4245 *
4246 * @return: 0 on success, -ENODEV on failure.
4247 */
4248static int parse_devicetree(struct device_node *node)
4249{
4250 char *key;
4251 const char *peripheral = NULL;
4252
4253 key = "qcom,default-peripheral";
4254 peripheral = of_get_property(node, key, NULL);
4255 if (peripheral)
4256 strlcpy(default_peripheral, peripheral, PIL_SUBSYSTEM_NAME_LEN);
4257
4258 return 0;
4259}
4260
4261/**
4262 * ipc_router_probe() - Probe the IPC Router
4263 *
4264 * @pdev: Platform device corresponding to IPC Router.
4265 *
4266 * @return: 0 on success, standard Linux error codes on error.
4267 *
4268 * This function is called when the underlying device tree driver registers
4269 * a platform device, mapped to IPC Router.
4270 */
4271static int ipc_router_probe(struct platform_device *pdev)
4272{
4273 int ret = 0;
4274
4275 if (pdev && pdev->dev.of_node) {
4276 ret = parse_devicetree(pdev->dev.of_node);
4277 if (ret)
4278 IPC_RTR_ERR("%s: Failed to parse device tree\n",
4279 __func__);
4280 }
4281 return ret;
4282}
4283
4284static const struct of_device_id ipc_router_match_table[] = {
4285 { .compatible = "qcom,ipc_router" },
4286 {},
4287};
4288
4289static struct platform_driver ipc_router_driver = {
4290 .probe = ipc_router_probe,
4291 .driver = {
4292 .name = MODULE_NAME,
4293 .owner = THIS_MODULE,
4294 .of_match_table = ipc_router_match_table,
4295 },
4296};
4297
4298/**
4299 * ipc_router_core_init() - Initialize all IPC Router core data structures
4300 *
4301 * Return: 0 on Success or Standard error code otherwise.
4302 *
4303 * This function only initializes all the core data structures to the IPC Router
4304 * module. The remaining initialization is done inside msm_ipc_router_init().
4305 */
4306static int ipc_router_core_init(void)
4307{
4308 int i;
4309 int ret;
4310 struct msm_ipc_routing_table_entry *rt_entry;
4311
4312 mutex_lock(&ipc_router_init_lock);
4313 if (likely(is_ipc_router_inited)) {
4314 mutex_unlock(&ipc_router_init_lock);
4315 return 0;
4316 }
4317
4318 debugfs_init();
4319
4320 for (i = 0; i < SRV_HASH_SIZE; i++)
4321 INIT_LIST_HEAD(&server_list[i]);
4322
4323 for (i = 0; i < LP_HASH_SIZE; i++)
4324 INIT_LIST_HEAD(&local_ports[i]);
4325
4326 down_write(&routing_table_lock_lha3);
4327 if (!routing_table_inited) {
4328 init_routing_table();
4329 routing_table_inited = 1;
4330 }
4331 up_write(&routing_table_lock_lha3);
4332 rt_entry = create_routing_table_entry(IPC_ROUTER_NID_LOCAL, NULL);
4333 kref_put(&rt_entry->ref, ipc_router_release_rtentry);
4334
4335 msm_ipc_router_workqueue =
4336 create_singlethread_workqueue("msm_ipc_router");
4337 if (!msm_ipc_router_workqueue) {
4338 mutex_unlock(&ipc_router_init_lock);
4339 return -ENOMEM;
4340 }
4341
4342 ret = msm_ipc_router_security_init();
4343 if (ret < 0)
4344 IPC_RTR_ERR("%s: Security Init failed\n", __func__);
4345 else
4346 is_ipc_router_inited = true;
4347 mutex_unlock(&ipc_router_init_lock);
4348
4349 return ret;
4350}
4351
4352static int msm_ipc_router_init(void)
4353{
4354 int ret;
4355
4356 ret = ipc_router_core_init();
4357 if (ret < 0)
4358 return ret;
4359
4360 ret = platform_driver_register(&ipc_router_driver);
4361 if (ret)
4362 IPC_RTR_ERR(
4363 "%s: ipc_router_driver register failed %d\n", __func__, ret);
4364
4365 ret = msm_ipc_router_init_sockets();
4366 if (ret < 0)
4367 IPC_RTR_ERR("%s: Init sockets failed\n", __func__);
4368
4369 ipc_router_log_ctx_init();
4370 return ret;
4371}
4372
4373module_init(msm_ipc_router_init);
4374MODULE_DESCRIPTION("MSM IPC Router");
4375MODULE_LICENSE("GPL v2");