blob: 7c8af29fac13446f2f1a58febaf8c30d13190eeb [file] [log] [blame]
Arun Kumar Neelakantam74ff8562017-05-26 17:57:52 +05301/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -06002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/slab.h>
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/string.h>
17#include <linux/errno.h>
18#include <linux/init.h>
19#include <linux/types.h>
20#include <linux/delay.h>
21#include <linux/err.h>
22#include <linux/sched.h>
23#include <linux/poll.h>
24#include <linux/pm.h>
25#include <linux/platform_device.h>
26#include <linux/uaccess.h>
27#include <linux/debugfs.h>
28#include <linux/rwsem.h>
29#include <linux/ipc_logging.h>
30#include <linux/uaccess.h>
31#include <linux/ipc_router.h>
32#include <linux/ipc_router_xprt.h>
33#include <linux/kref.h>
34#include <soc/qcom/subsystem_notif.h>
35#include <soc/qcom/subsystem_restart.h>
36
37#include <asm/byteorder.h>
38
39#include "ipc_router_private.h"
40#include "ipc_router_security.h"
41
42enum {
43 SMEM_LOG = 1U << 0,
44 RTR_DBG = 1U << 1,
45};
46
47static int msm_ipc_router_debug_mask;
48module_param_named(debug_mask, msm_ipc_router_debug_mask,
49 int, 0664);
50#define MODULE_NAME "ipc_router"
51
52#define IPC_RTR_INFO_PAGES 6
53
54#define IPC_RTR_INFO(log_ctx, x...) do { \
55typeof(log_ctx) _log_ctx = (log_ctx); \
56if (_log_ctx) \
57 ipc_log_string(_log_ctx, x); \
58if (msm_ipc_router_debug_mask & RTR_DBG) \
59 pr_info("[IPCRTR] "x); \
60} while (0)
61
62#define IPC_ROUTER_LOG_EVENT_TX 0x01
63#define IPC_ROUTER_LOG_EVENT_RX 0x02
64#define IPC_ROUTER_LOG_EVENT_TX_ERR 0x03
65#define IPC_ROUTER_LOG_EVENT_RX_ERR 0x04
66#define IPC_ROUTER_DUMMY_DEST_NODE 0xFFFFFFFF
67
68#define ipc_port_sk(port) ((struct sock *)(port))
69
70static LIST_HEAD(control_ports);
71static DECLARE_RWSEM(control_ports_lock_lha5);
72
73#define LP_HASH_SIZE 32
74static struct list_head local_ports[LP_HASH_SIZE];
75static DECLARE_RWSEM(local_ports_lock_lhc2);
76
77/* Server info is organized as a hash table. The server's service ID is
78 * used to index into the hash table. The instance ID of most of the servers
79 * are 1 or 2. The service IDs are well distributed compared to the instance
80 * IDs and hence choosing service ID to index into this hash table optimizes
81 * the hash table operations like add, lookup, destroy.
82 */
83#define SRV_HASH_SIZE 32
84static struct list_head server_list[SRV_HASH_SIZE];
85static DECLARE_RWSEM(server_list_lock_lha2);
86
87struct msm_ipc_server {
88 struct list_head list;
89 struct kref ref;
90 struct msm_ipc_port_name name;
91 char pdev_name[32];
92 int next_pdev_id;
93 int synced_sec_rule;
94 struct list_head server_port_list;
95};
96
97struct msm_ipc_server_port {
98 struct list_head list;
99 struct platform_device *pdev;
100 struct msm_ipc_port_addr server_addr;
101 struct msm_ipc_router_xprt_info *xprt_info;
102};
103
104struct msm_ipc_resume_tx_port {
105 struct list_head list;
106 u32 port_id;
107 u32 node_id;
108};
109
110struct ipc_router_conn_info {
111 struct list_head list;
112 u32 port_id;
113};
114
115enum {
116 RESET = 0,
117 VALID = 1,
118};
119
120#define RP_HASH_SIZE 32
121struct msm_ipc_router_remote_port {
122 struct list_head list;
123 struct kref ref;
124 struct mutex rport_lock_lhb2; /* lock for remote port state access */
125 u32 node_id;
126 u32 port_id;
127 int status;
128 u32 tx_quota_cnt;
129 struct list_head resume_tx_port_list;
130 struct list_head conn_info_list;
131 void *sec_rule;
132 struct msm_ipc_server *server;
133};
134
135struct msm_ipc_router_xprt_info {
136 struct list_head list;
137 struct msm_ipc_router_xprt *xprt;
138 u32 remote_node_id;
139 u32 initialized;
140 struct list_head pkt_list;
141 struct wakeup_source ws;
142 struct mutex rx_lock_lhb2; /* lock for xprt rx operations */
143 struct mutex tx_lock_lhb2; /* lock for xprt tx operations */
144 u32 need_len;
145 u32 abort_data_read;
146 struct work_struct read_data;
147 struct workqueue_struct *workqueue;
148 void *log_ctx;
149 struct kref ref;
150 struct completion ref_complete;
Arun Kumar Neelakantam74ff8562017-05-26 17:57:52 +0530151 bool dynamic_ws;
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -0600152};
153
154#define RT_HASH_SIZE 4
155struct msm_ipc_routing_table_entry {
156 struct list_head list;
157 struct kref ref;
158 u32 node_id;
159 u32 neighbor_node_id;
160 struct list_head remote_port_list[RP_HASH_SIZE];
161 struct msm_ipc_router_xprt_info *xprt_info;
162 struct rw_semaphore lock_lha4;
163 unsigned long num_tx_bytes;
164 unsigned long num_rx_bytes;
165};
166
167#define LOG_CTX_NAME_LEN 32
168struct ipc_rtr_log_ctx {
169 struct list_head list;
170 char log_ctx_name[LOG_CTX_NAME_LEN];
171 void *log_ctx;
172};
173
174static struct list_head routing_table[RT_HASH_SIZE];
175static DECLARE_RWSEM(routing_table_lock_lha3);
176static int routing_table_inited;
177
178static void do_read_data(struct work_struct *work);
179
180static LIST_HEAD(xprt_info_list);
181static DECLARE_RWSEM(xprt_info_list_lock_lha5);
182
183static DEFINE_MUTEX(log_ctx_list_lock_lha0);
184static LIST_HEAD(log_ctx_list);
185static DEFINE_MUTEX(ipc_router_init_lock);
186static bool is_ipc_router_inited;
187static int ipc_router_core_init(void);
188#define IPC_ROUTER_INIT_TIMEOUT (10 * HZ)
189
190static u32 next_port_id;
191static DEFINE_MUTEX(next_port_id_lock_lhc1);
192static struct workqueue_struct *msm_ipc_router_workqueue;
193
194static void *local_log_ctx;
195static void *ipc_router_get_log_ctx(char *sub_name);
196static int process_resume_tx_msg(union rr_control_msg *msg,
197 struct rr_packet *pkt);
198static void ipc_router_reset_conn(struct msm_ipc_router_remote_port *rport_ptr);
199static int ipc_router_get_xprt_info_ref(
200 struct msm_ipc_router_xprt_info *xprt_info);
201static void ipc_router_put_xprt_info_ref(
202 struct msm_ipc_router_xprt_info *xprt_info);
203static void ipc_router_release_xprt_info_ref(struct kref *ref);
204
205struct pil_vote_info {
206 void *pil_handle;
207 struct work_struct load_work;
208 struct work_struct unload_work;
209};
210
211#define PIL_SUBSYSTEM_NAME_LEN 32
212static char default_peripheral[PIL_SUBSYSTEM_NAME_LEN];
213
214enum {
215 DOWN,
216 UP,
217};
218
Arun Kumar Neelakantam74ff8562017-05-26 17:57:52 +0530219static bool is_wakeup_source_allowed;
220
221void msm_ipc_router_set_ws_allowed(bool flag)
222{
223 is_wakeup_source_allowed = flag;
224}
225
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -0600226static void init_routing_table(void)
227{
228 int i;
229
230 for (i = 0; i < RT_HASH_SIZE; i++)
231 INIT_LIST_HEAD(&routing_table[i]);
232}
233
234/**
235 * ipc_router_calc_checksum() - compute the checksum for extended HELLO message
236 * @msg: Reference to the IPC Router HELLO message.
237 *
238 * Return: Computed checksum value, 0 if msg is NULL.
239 */
240static u32 ipc_router_calc_checksum(union rr_control_msg *msg)
241{
242 u32 checksum = 0;
243 int i, len;
244 u16 upper_nb;
245 u16 lower_nb;
246 void *hello;
247
248 if (!msg)
249 return checksum;
250 hello = msg;
251 len = sizeof(*msg);
252
253 for (i = 0; i < len / IPCR_WORD_SIZE; i++) {
254 lower_nb = (*((u32 *)hello)) & IPC_ROUTER_CHECKSUM_MASK;
255 upper_nb = ((*((u32 *)hello)) >> 16) &
256 IPC_ROUTER_CHECKSUM_MASK;
257 checksum = checksum + upper_nb + lower_nb;
258 hello = ((u32 *)hello) + 1;
259 }
260 while (checksum > 0xFFFF)
261 checksum = (checksum & IPC_ROUTER_CHECKSUM_MASK) +
262 ((checksum >> 16) & IPC_ROUTER_CHECKSUM_MASK);
263
264 checksum = ~checksum & IPC_ROUTER_CHECKSUM_MASK;
265 return checksum;
266}
267
268/**
269 * skb_copy_to_log_buf() - copies the required number bytes from the skb_queue
270 * @skb_head: skb_queue head that contains the data.
271 * @pl_len: length of payload need to be copied.
272 * @hdr_offset: length of the header present in first skb
273 * @log_buf: The output buffer which will contain the formatted log string
274 *
275 * This function copies the first specified number of bytes from the skb_queue
276 * to a new buffer and formats them to a string for logging.
277 */
278static void skb_copy_to_log_buf(struct sk_buff_head *skb_head,
279 unsigned int pl_len, unsigned int hdr_offset,
280 u64 *log_buf)
281{
282 struct sk_buff *temp_skb;
283 unsigned int copied_len = 0, copy_len = 0;
284 int remaining;
285
286 if (!skb_head) {
287 IPC_RTR_ERR("%s: NULL skb_head\n", __func__);
288 return;
289 }
290 temp_skb = skb_peek(skb_head);
291 if (unlikely(!temp_skb || !temp_skb->data)) {
292 IPC_RTR_ERR("%s: No SKBs in skb_queue\n", __func__);
293 return;
294 }
295
296 remaining = temp_skb->len - hdr_offset;
297 skb_queue_walk(skb_head, temp_skb) {
298 copy_len = remaining < pl_len ? remaining : pl_len;
299 memcpy(log_buf + copied_len, temp_skb->data + hdr_offset,
300 copy_len);
301 copied_len += copy_len;
302 hdr_offset = 0;
303 if (copied_len == pl_len)
304 break;
305 remaining = pl_len - remaining;
306 }
307}
308
309/**
310 * ipc_router_log_msg() - log all data messages exchanged
311 * @log_ctx: IPC Logging context specific to each transport
312 * @xchng_type: Identifies the data to be a receive or send.
313 * @data: IPC Router data packet or control msg received or to be send.
314 * @hdr: Reference to the router header
315 * @port_ptr: Local IPC Router port.
316 * @rport_ptr: Remote IPC Router port
317 *
318 * This function builds the log message that would be passed on to the IPC
319 * logging framework. The data messages that would be passed corresponds to
320 * the information that is exchanged between the IPC Router and it's clients.
321 */
322static void ipc_router_log_msg(void *log_ctx, u32 xchng_type,
323 void *data, struct rr_header_v1 *hdr,
324 struct msm_ipc_port *port_ptr,
325 struct msm_ipc_router_remote_port *rport_ptr)
326{
327 struct sk_buff_head *skb_head = NULL;
328 union rr_control_msg *msg = NULL;
329 struct rr_packet *pkt = NULL;
330 u64 pl_buf = 0;
331 struct sk_buff *skb;
332 u32 buf_len = 8;
333 u32 svc_id = 0;
334 u32 svc_ins = 0;
335 unsigned int hdr_offset = 0;
336 u32 port_type = 0;
337
338 if (!log_ctx || !hdr || !data)
339 return;
340
341 if (hdr->type == IPC_ROUTER_CTRL_CMD_DATA) {
342 pkt = (struct rr_packet *)data;
343 skb_head = pkt->pkt_fragment_q;
344 skb = skb_peek(skb_head);
345 if (!skb || !skb->data) {
346 IPC_RTR_ERR("%s: No SKBs in skb_queue\n", __func__);
347 return;
348 }
349
350 if (skb_queue_len(skb_head) == 1 && skb->len < 8)
351 buf_len = skb->len;
352 if (xchng_type == IPC_ROUTER_LOG_EVENT_TX && hdr->dst_node_id
353 != IPC_ROUTER_NID_LOCAL) {
354 if (hdr->version == IPC_ROUTER_V1)
355 hdr_offset = sizeof(struct rr_header_v1);
356 else if (hdr->version == IPC_ROUTER_V2)
357 hdr_offset = sizeof(struct rr_header_v2);
358 }
359 skb_copy_to_log_buf(skb_head, buf_len, hdr_offset, &pl_buf);
360
361 if (port_ptr && rport_ptr && (port_ptr->type == CLIENT_PORT) &&
362 rport_ptr->server) {
363 svc_id = rport_ptr->server->name.service;
364 svc_ins = rport_ptr->server->name.instance;
365 port_type = CLIENT_PORT;
366 } else if (port_ptr && (port_ptr->type == SERVER_PORT)) {
367 svc_id = port_ptr->port_name.service;
368 svc_ins = port_ptr->port_name.instance;
369 port_type = SERVER_PORT;
370 }
371 IPC_RTR_INFO(log_ctx,
372 "%s %s %s Len:0x%x T:0x%x CF:0x%x SVC:<0x%x:0x%x> SRC:<0x%x:0x%x> DST:<0x%x:0x%x> DATA: %08x %08x",
373 (xchng_type == IPC_ROUTER_LOG_EVENT_RX ? "" :
374 (xchng_type == IPC_ROUTER_LOG_EVENT_TX ?
375 current->comm : "")),
376 (port_type == CLIENT_PORT ? "CLI" : "SRV"),
377 (xchng_type == IPC_ROUTER_LOG_EVENT_RX ? "RX" :
378 (xchng_type == IPC_ROUTER_LOG_EVENT_TX ? "TX" :
379 (xchng_type == IPC_ROUTER_LOG_EVENT_TX_ERR ? "TX_ERR" :
380 (xchng_type == IPC_ROUTER_LOG_EVENT_RX_ERR ? "RX_ERR" :
381 "UNKNOWN")))),
382 hdr->size, hdr->type, hdr->control_flag,
383 svc_id, svc_ins, hdr->src_node_id, hdr->src_port_id,
384 hdr->dst_node_id, hdr->dst_port_id,
385 (unsigned int)pl_buf, (unsigned int)(pl_buf >> 32));
386
387 } else {
388 msg = (union rr_control_msg *)data;
389 if (msg->cmd == IPC_ROUTER_CTRL_CMD_NEW_SERVER ||
390 msg->cmd == IPC_ROUTER_CTRL_CMD_REMOVE_SERVER)
391 IPC_RTR_INFO(log_ctx,
392 "CTL MSG: %s cmd:0x%x SVC:<0x%x:0x%x> ADDR:<0x%x:0x%x>",
393 (xchng_type == IPC_ROUTER_LOG_EVENT_RX ? "RX" :
394 (xchng_type == IPC_ROUTER_LOG_EVENT_TX ? "TX" :
395 (xchng_type == IPC_ROUTER_LOG_EVENT_TX_ERR ? "TX_ERR" :
396 (xchng_type == IPC_ROUTER_LOG_EVENT_RX_ERR ? "RX_ERR" :
397 "UNKNOWN")))),
398 msg->cmd, msg->srv.service, msg->srv.instance,
399 msg->srv.node_id, msg->srv.port_id);
400 else if (msg->cmd == IPC_ROUTER_CTRL_CMD_REMOVE_CLIENT ||
401 msg->cmd == IPC_ROUTER_CTRL_CMD_RESUME_TX)
402 IPC_RTR_INFO(log_ctx,
403 "CTL MSG: %s cmd:0x%x ADDR: <0x%x:0x%x>",
404 (xchng_type == IPC_ROUTER_LOG_EVENT_RX ? "RX" :
405 (xchng_type == IPC_ROUTER_LOG_EVENT_TX ? "TX" : "ERR")),
406 msg->cmd, msg->cli.node_id, msg->cli.port_id);
407 else if (msg->cmd == IPC_ROUTER_CTRL_CMD_HELLO && hdr)
408 IPC_RTR_INFO(log_ctx,
409 "CTL MSG %s cmd:0x%x ADDR:0x%x",
410 (xchng_type == IPC_ROUTER_LOG_EVENT_RX ? "RX" :
411 (xchng_type == IPC_ROUTER_LOG_EVENT_TX ? "TX" : "ERR")),
412 msg->cmd, hdr->src_node_id);
413 else
414 IPC_RTR_INFO(log_ctx,
415 "%s UNKNOWN cmd:0x%x",
416 (xchng_type == IPC_ROUTER_LOG_EVENT_RX ? "RX" :
417 (xchng_type == IPC_ROUTER_LOG_EVENT_TX ? "TX" : "ERR")),
418 msg->cmd);
419 }
420}
421
422/* Must be called with routing_table_lock_lha3 locked. */
423static struct msm_ipc_routing_table_entry *lookup_routing_table(
424 u32 node_id)
425{
426 u32 key = (node_id % RT_HASH_SIZE);
427 struct msm_ipc_routing_table_entry *rt_entry;
428
429 list_for_each_entry(rt_entry, &routing_table[key], list) {
430 if (rt_entry->node_id == node_id)
431 return rt_entry;
432 }
433 return NULL;
434}
435
436/**
437 * create_routing_table_entry() - Lookup and create a routing table entry
438 * @node_id: Node ID of the routing table entry to be created.
439 * @xprt_info: XPRT through which the node ID is reachable.
440 *
441 * @return: a reference to the routing table entry on success, NULL on failure.
442 */
443static struct msm_ipc_routing_table_entry *create_routing_table_entry(
444 u32 node_id, struct msm_ipc_router_xprt_info *xprt_info)
445{
446 int i;
447 struct msm_ipc_routing_table_entry *rt_entry;
448 u32 key;
449
450 down_write(&routing_table_lock_lha3);
451 rt_entry = lookup_routing_table(node_id);
452 if (rt_entry)
453 goto out_create_rtentry1;
454
455 rt_entry = kmalloc(sizeof(*rt_entry), GFP_KERNEL);
456 if (!rt_entry) {
457 IPC_RTR_ERR("%s: rt_entry allocation failed for %d\n",
458 __func__, node_id);
459 goto out_create_rtentry2;
460 }
461
462 for (i = 0; i < RP_HASH_SIZE; i++)
463 INIT_LIST_HEAD(&rt_entry->remote_port_list[i]);
464 init_rwsem(&rt_entry->lock_lha4);
465 kref_init(&rt_entry->ref);
466 rt_entry->node_id = node_id;
467 rt_entry->xprt_info = xprt_info;
468 if (xprt_info)
469 rt_entry->neighbor_node_id = xprt_info->remote_node_id;
470
471 key = (node_id % RT_HASH_SIZE);
472 list_add_tail(&rt_entry->list, &routing_table[key]);
473out_create_rtentry1:
474 kref_get(&rt_entry->ref);
475out_create_rtentry2:
476 up_write(&routing_table_lock_lha3);
477 return rt_entry;
478}
479
480/**
481 * ipc_router_get_rtentry_ref() - Get a reference to the routing table entry
482 * @node_id: Node ID of the routing table entry.
483 *
484 * @return: a reference to the routing table entry on success, NULL on failure.
485 *
486 * This function is used to obtain a reference to the rounting table entry
487 * corresponding to a node id.
488 */
489static struct msm_ipc_routing_table_entry *ipc_router_get_rtentry_ref(
490 u32 node_id)
491{
492 struct msm_ipc_routing_table_entry *rt_entry;
493
494 down_read(&routing_table_lock_lha3);
495 rt_entry = lookup_routing_table(node_id);
496 if (rt_entry)
497 kref_get(&rt_entry->ref);
498 up_read(&routing_table_lock_lha3);
499 return rt_entry;
500}
501
502/**
503 * ipc_router_release_rtentry() - Cleanup and release the routing table entry
504 * @ref: Reference to the entry.
505 *
506 * This function is called when all references to the routing table entry are
507 * released.
508 */
509void ipc_router_release_rtentry(struct kref *ref)
510{
511 struct msm_ipc_routing_table_entry *rt_entry =
512 container_of(ref, struct msm_ipc_routing_table_entry, ref);
513
514 /* All references to a routing entry will be put only under SSR.
515 * As part of SSR, all the internals of the routing table entry
516 * are cleaned. So just free the routing table entry.
517 */
518 kfree(rt_entry);
519}
520
521struct rr_packet *rr_read(struct msm_ipc_router_xprt_info *xprt_info)
522{
523 struct rr_packet *temp_pkt;
524
525 if (!xprt_info)
526 return NULL;
527
528 mutex_lock(&xprt_info->rx_lock_lhb2);
529 if (xprt_info->abort_data_read) {
530 mutex_unlock(&xprt_info->rx_lock_lhb2);
531 IPC_RTR_ERR("%s detected SSR & exiting now\n",
532 xprt_info->xprt->name);
533 return NULL;
534 }
535
536 if (list_empty(&xprt_info->pkt_list)) {
537 mutex_unlock(&xprt_info->rx_lock_lhb2);
538 return NULL;
539 }
540
541 temp_pkt = list_first_entry(&xprt_info->pkt_list,
542 struct rr_packet, list);
543 list_del(&temp_pkt->list);
544 if (list_empty(&xprt_info->pkt_list))
545 __pm_relax(&xprt_info->ws);
546 mutex_unlock(&xprt_info->rx_lock_lhb2);
547 return temp_pkt;
548}
549
550struct rr_packet *clone_pkt(struct rr_packet *pkt)
551{
552 struct rr_packet *cloned_pkt;
553 struct sk_buff *temp_skb, *cloned_skb;
554 struct sk_buff_head *pkt_fragment_q;
555
556 cloned_pkt = kzalloc(sizeof(*cloned_pkt), GFP_KERNEL);
557 if (!cloned_pkt) {
558 IPC_RTR_ERR("%s: failure\n", __func__);
559 return NULL;
560 }
561 memcpy(&cloned_pkt->hdr, &pkt->hdr, sizeof(struct rr_header_v1));
562 if (pkt->opt_hdr.len > 0) {
563 cloned_pkt->opt_hdr.data = kmalloc(pkt->opt_hdr.len,
564 GFP_KERNEL);
565 if (!cloned_pkt->opt_hdr.data) {
566 IPC_RTR_ERR("%s: Memory allocation Failed\n", __func__);
567 } else {
568 cloned_pkt->opt_hdr.len = pkt->opt_hdr.len;
569 memcpy(cloned_pkt->opt_hdr.data, pkt->opt_hdr.data,
570 pkt->opt_hdr.len);
571 }
572 }
573
574 pkt_fragment_q = kmalloc(sizeof(*pkt_fragment_q), GFP_KERNEL);
575 if (!pkt_fragment_q) {
576 IPC_RTR_ERR("%s: pkt_frag_q alloc failure\n", __func__);
577 kfree(cloned_pkt);
578 return NULL;
579 }
580 skb_queue_head_init(pkt_fragment_q);
581 kref_init(&cloned_pkt->ref);
582
583 skb_queue_walk(pkt->pkt_fragment_q, temp_skb) {
584 cloned_skb = skb_clone(temp_skb, GFP_KERNEL);
585 if (!cloned_skb)
586 goto fail_clone;
587 skb_queue_tail(pkt_fragment_q, cloned_skb);
588 }
589 cloned_pkt->pkt_fragment_q = pkt_fragment_q;
590 cloned_pkt->length = pkt->length;
Arun Kumar Neelakantam74ff8562017-05-26 17:57:52 +0530591 cloned_pkt->ws_need = pkt->ws_need;
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -0600592 return cloned_pkt;
593
594fail_clone:
595 while (!skb_queue_empty(pkt_fragment_q)) {
596 temp_skb = skb_dequeue(pkt_fragment_q);
597 kfree_skb(temp_skb);
598 }
599 kfree(pkt_fragment_q);
600 if (cloned_pkt->opt_hdr.len > 0)
601 kfree(cloned_pkt->opt_hdr.data);
602 kfree(cloned_pkt);
603 return NULL;
604}
605
606/**
607 * create_pkt() - Create a Router packet
608 * @data: SKB queue to be contained inside the packet.
609 *
610 * @return: pointer to packet on success, NULL on failure.
611 */
612struct rr_packet *create_pkt(struct sk_buff_head *data)
613{
614 struct rr_packet *pkt;
615 struct sk_buff *temp_skb;
616
617 pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
618 if (!pkt) {
619 IPC_RTR_ERR("%s: failure\n", __func__);
620 return NULL;
621 }
622
623 if (data) {
624 pkt->pkt_fragment_q = data;
625 skb_queue_walk(pkt->pkt_fragment_q, temp_skb)
626 pkt->length += temp_skb->len;
627 } else {
628 pkt->pkt_fragment_q = kmalloc(sizeof(*pkt->pkt_fragment_q),
629 GFP_KERNEL);
630 if (!pkt->pkt_fragment_q) {
631 IPC_RTR_ERR("%s: Couldn't alloc pkt_fragment_q\n",
632 __func__);
633 kfree(pkt);
634 return NULL;
635 }
636 skb_queue_head_init(pkt->pkt_fragment_q);
637 }
638 kref_init(&pkt->ref);
639 return pkt;
640}
641
642void release_pkt(struct rr_packet *pkt)
643{
644 struct sk_buff *temp_skb;
645
646 if (!pkt)
647 return;
648
649 if (!pkt->pkt_fragment_q) {
650 kfree(pkt);
651 return;
652 }
653
654 while (!skb_queue_empty(pkt->pkt_fragment_q)) {
655 temp_skb = skb_dequeue(pkt->pkt_fragment_q);
656 kfree_skb(temp_skb);
657 }
658 kfree(pkt->pkt_fragment_q);
659 if (pkt->opt_hdr.len > 0)
660 kfree(pkt->opt_hdr.data);
661 kfree(pkt);
662}
663
664static struct sk_buff_head *msm_ipc_router_buf_to_skb(void *buf,
665 unsigned int buf_len)
666{
667 struct sk_buff_head *skb_head;
668 struct sk_buff *skb;
669 int first = 1, offset = 0;
670 int skb_size, data_size;
671 void *data;
672 int last = 1;
673 int align_size;
674
675 skb_head = kmalloc(sizeof(*skb_head), GFP_KERNEL);
676 if (!skb_head) {
677 IPC_RTR_ERR("%s: Couldnot allocate skb_head\n", __func__);
678 return NULL;
679 }
680 skb_queue_head_init(skb_head);
681
682 data_size = buf_len;
683 align_size = ALIGN_SIZE(data_size);
684 while (offset != buf_len) {
685 skb_size = data_size;
686 if (first)
687 skb_size += IPC_ROUTER_HDR_SIZE;
688 if (last)
689 skb_size += align_size;
690
691 skb = alloc_skb(skb_size, GFP_KERNEL);
692 if (!skb) {
693 if (skb_size <= (PAGE_SIZE / 2)) {
694 IPC_RTR_ERR("%s: cannot allocate skb\n",
695 __func__);
696 goto buf_to_skb_error;
697 }
698 data_size = data_size / 2;
699 last = 0;
700 continue;
701 }
702
703 if (first) {
704 skb_reserve(skb, IPC_ROUTER_HDR_SIZE);
705 first = 0;
706 }
707
708 data = skb_put(skb, data_size);
709 memcpy(skb->data, buf + offset, data_size);
710 skb_queue_tail(skb_head, skb);
711 offset += data_size;
712 data_size = buf_len - offset;
713 last = 1;
714 }
715 return skb_head;
716
717buf_to_skb_error:
718 while (!skb_queue_empty(skb_head)) {
719 skb = skb_dequeue(skb_head);
720 kfree_skb(skb);
721 }
722 kfree(skb_head);
723 return NULL;
724}
725
726static void *msm_ipc_router_skb_to_buf(struct sk_buff_head *skb_head,
727 unsigned int len)
728{
729 struct sk_buff *temp;
730 unsigned int offset = 0, buf_len = 0, copy_len;
731 void *buf;
732
733 if (!skb_head) {
734 IPC_RTR_ERR("%s: NULL skb_head\n", __func__);
735 return NULL;
736 }
737
738 temp = skb_peek(skb_head);
739 buf_len = len;
740 buf = kmalloc(buf_len, GFP_KERNEL);
741 if (!buf) {
742 IPC_RTR_ERR("%s: cannot allocate buf\n", __func__);
743 return NULL;
744 }
745 skb_queue_walk(skb_head, temp) {
746 copy_len = buf_len < temp->len ? buf_len : temp->len;
747 memcpy(buf + offset, temp->data, copy_len);
748 offset += copy_len;
749 buf_len -= copy_len;
750 }
751 return buf;
752}
753
754void msm_ipc_router_free_skb(struct sk_buff_head *skb_head)
755{
756 struct sk_buff *temp_skb;
757
758 if (!skb_head)
759 return;
760
761 while (!skb_queue_empty(skb_head)) {
762 temp_skb = skb_dequeue(skb_head);
763 kfree_skb(temp_skb);
764 }
765 kfree(skb_head);
766}
767
768/**
769 * extract_optional_header() - Extract the optional header from skb
770 * @pkt: Packet structure into which the header has to be extracted.
771 * @opt_len: The optional header length in word size.
772 *
773 * @return: Length of optional header in bytes if success, zero otherwise.
774 */
775static int extract_optional_header(struct rr_packet *pkt, u8 opt_len)
776{
777 size_t offset = 0, buf_len = 0, copy_len, opt_hdr_len;
778 struct sk_buff *temp;
779 struct sk_buff_head *skb_head;
780
781 opt_hdr_len = opt_len * IPCR_WORD_SIZE;
782 pkt->opt_hdr.data = kmalloc(opt_hdr_len, GFP_KERNEL);
783 if (!pkt->opt_hdr.data) {
784 IPC_RTR_ERR("%s: Memory allocation Failed\n", __func__);
785 return 0;
786 }
787 skb_head = pkt->pkt_fragment_q;
788 buf_len = opt_hdr_len;
789 skb_queue_walk(skb_head, temp) {
790 copy_len = buf_len < temp->len ? buf_len : temp->len;
791 memcpy(pkt->opt_hdr.data + offset, temp->data, copy_len);
792 offset += copy_len;
793 buf_len -= copy_len;
794 skb_pull(temp, copy_len);
795 if (temp->len == 0) {
796 skb_dequeue(skb_head);
797 kfree_skb(temp);
798 }
799 }
800 pkt->opt_hdr.len = opt_hdr_len;
801 return opt_hdr_len;
802}
803
804/**
805 * extract_header_v1() - Extract IPC Router header of version 1
806 * @pkt: Packet structure into which the header has to be extraced.
807 * @skb: SKB from which the header has to be extracted.
808 *
809 * @return: 0 on success, standard Linux error codes on failure.
810 */
811static int extract_header_v1(struct rr_packet *pkt, struct sk_buff *skb)
812{
813 if (!pkt || !skb) {
814 IPC_RTR_ERR("%s: Invalid pkt or skb\n", __func__);
815 return -EINVAL;
816 }
817
818 memcpy(&pkt->hdr, skb->data, sizeof(struct rr_header_v1));
819 skb_pull(skb, sizeof(struct rr_header_v1));
820 pkt->length -= sizeof(struct rr_header_v1);
821 return 0;
822}
823
824/**
825 * extract_header_v2() - Extract IPC Router header of version 2
826 * @pkt: Packet structure into which the header has to be extraced.
827 * @skb: SKB from which the header has to be extracted.
828 *
829 * @return: 0 on success, standard Linux error codes on failure.
830 */
831static int extract_header_v2(struct rr_packet *pkt, struct sk_buff *skb)
832{
833 struct rr_header_v2 *hdr;
834 u8 opt_len;
835 size_t opt_hdr_len;
836 size_t total_hdr_size = sizeof(*hdr);
837
838 if (!pkt || !skb) {
839 IPC_RTR_ERR("%s: Invalid pkt or skb\n", __func__);
840 return -EINVAL;
841 }
842
843 hdr = (struct rr_header_v2 *)skb->data;
844 pkt->hdr.version = (u32)hdr->version;
845 pkt->hdr.type = (u32)hdr->type;
846 pkt->hdr.src_node_id = (u32)hdr->src_node_id;
847 pkt->hdr.src_port_id = (u32)hdr->src_port_id;
848 pkt->hdr.size = (u32)hdr->size;
849 pkt->hdr.control_flag = (u32)hdr->control_flag;
850 pkt->hdr.dst_node_id = (u32)hdr->dst_node_id;
851 pkt->hdr.dst_port_id = (u32)hdr->dst_port_id;
852 opt_len = hdr->opt_len;
853 skb_pull(skb, total_hdr_size);
854 if (opt_len > 0) {
855 opt_hdr_len = extract_optional_header(pkt, opt_len);
856 total_hdr_size += opt_hdr_len;
857 }
858 pkt->length -= total_hdr_size;
859 return 0;
860}
861
862/**
863 * extract_header() - Extract IPC Router header
864 * @pkt: Packet from which the header has to be extraced.
865 *
866 * @return: 0 on success, standard Linux error codes on failure.
867 *
868 * This function will check if the header version is v1 or v2 and invoke
869 * the corresponding helper function to extract the IPC Router header.
870 */
871static int extract_header(struct rr_packet *pkt)
872{
873 struct sk_buff *temp_skb;
874 int ret;
875
876 if (!pkt) {
877 IPC_RTR_ERR("%s: NULL PKT\n", __func__);
878 return -EINVAL;
879 }
880
881 temp_skb = skb_peek(pkt->pkt_fragment_q);
882 if (!temp_skb || !temp_skb->data) {
883 IPC_RTR_ERR("%s: No SKBs in skb_queue\n", __func__);
884 return -EINVAL;
885 }
886
887 if (temp_skb->data[0] == IPC_ROUTER_V1) {
888 ret = extract_header_v1(pkt, temp_skb);
889 } else if (temp_skb->data[0] == IPC_ROUTER_V2) {
890 ret = extract_header_v2(pkt, temp_skb);
891 } else {
892 IPC_RTR_ERR("%s: Invalid Header version %02x\n",
893 __func__, temp_skb->data[0]);
894 print_hex_dump(KERN_ERR, "Header: ", DUMP_PREFIX_ADDRESS,
895 16, 1, temp_skb->data, pkt->length, true);
896 return -EINVAL;
897 }
898 return ret;
899}
900
901/**
902 * calc_tx_header_size() - Calculate header size to be reserved in SKB
903 * @pkt: Packet in which the space for header has to be reserved.
904 * @dst_xprt_info: XPRT through which the destination is reachable.
905 *
906 * @return: required header size on success,
907 * starndard Linux error codes on failure.
908 *
909 * This function is used to calculate the header size that has to be reserved
910 * in a transmit SKB. The header size is calculated based on the XPRT through
911 * which the destination node is reachable.
912 */
913static int calc_tx_header_size(struct rr_packet *pkt,
914 struct msm_ipc_router_xprt_info *dst_xprt_info)
915{
916 int hdr_size = 0;
917 int xprt_version = 0;
918 struct msm_ipc_router_xprt_info *xprt_info = dst_xprt_info;
919
920 if (!pkt) {
921 IPC_RTR_ERR("%s: NULL PKT\n", __func__);
922 return -EINVAL;
923 }
924
925 if (xprt_info)
926 xprt_version = xprt_info->xprt->get_version(xprt_info->xprt);
927
928 if (xprt_version == IPC_ROUTER_V1) {
929 pkt->hdr.version = IPC_ROUTER_V1;
930 hdr_size = sizeof(struct rr_header_v1);
931 } else if (xprt_version == IPC_ROUTER_V2) {
932 pkt->hdr.version = IPC_ROUTER_V2;
933 hdr_size = sizeof(struct rr_header_v2) + pkt->opt_hdr.len;
934 } else {
935 IPC_RTR_ERR("%s: Invalid xprt_version %d\n",
936 __func__, xprt_version);
937 hdr_size = -EINVAL;
938 }
939
940 return hdr_size;
941}
942
943/**
944 * calc_rx_header_size() - Calculate the RX header size
945 * @xprt_info: XPRT info of the received message.
946 *
947 * @return: valid header size on success, INT_MAX on failure.
948 */
949static int calc_rx_header_size(struct msm_ipc_router_xprt_info *xprt_info)
950{
951 int xprt_version = 0;
952 int hdr_size = INT_MAX;
953
954 if (xprt_info)
955 xprt_version = xprt_info->xprt->get_version(xprt_info->xprt);
956
957 if (xprt_version == IPC_ROUTER_V1)
958 hdr_size = sizeof(struct rr_header_v1);
959 else if (xprt_version == IPC_ROUTER_V2)
960 hdr_size = sizeof(struct rr_header_v2);
961 return hdr_size;
962}
963
964/**
965 * prepend_header_v1() - Prepend IPC Router header of version 1
966 * @pkt: Packet structure which contains the header info to be prepended.
967 * @hdr_size: Size of the header
968 *
969 * @return: 0 on success, standard Linux error codes on failure.
970 */
971static int prepend_header_v1(struct rr_packet *pkt, int hdr_size)
972{
973 struct sk_buff *temp_skb;
974 struct rr_header_v1 *hdr;
975
976 if (!pkt || hdr_size <= 0) {
977 IPC_RTR_ERR("%s: Invalid input parameters\n", __func__);
978 return -EINVAL;
979 }
980
981 temp_skb = skb_peek(pkt->pkt_fragment_q);
982 if (!temp_skb || !temp_skb->data) {
983 IPC_RTR_ERR("%s: No SKBs in skb_queue\n", __func__);
984 return -EINVAL;
985 }
986
987 if (skb_headroom(temp_skb) < hdr_size) {
988 temp_skb = alloc_skb(hdr_size, GFP_KERNEL);
989 if (!temp_skb) {
990 IPC_RTR_ERR("%s: Could not allocate SKB of size %d\n",
991 __func__, hdr_size);
992 return -ENOMEM;
993 }
994 skb_reserve(temp_skb, hdr_size);
995 }
996
997 hdr = (struct rr_header_v1 *)skb_push(temp_skb, hdr_size);
998 memcpy(hdr, &pkt->hdr, hdr_size);
999 if (temp_skb != skb_peek(pkt->pkt_fragment_q))
1000 skb_queue_head(pkt->pkt_fragment_q, temp_skb);
1001 pkt->length += hdr_size;
1002 return 0;
1003}
1004
1005/**
1006 * prepend_header_v2() - Prepend IPC Router header of version 2
1007 * @pkt: Packet structure which contains the header info to be prepended.
1008 * @hdr_size: Size of the header
1009 *
1010 * @return: 0 on success, standard Linux error codes on failure.
1011 */
1012static int prepend_header_v2(struct rr_packet *pkt, int hdr_size)
1013{
1014 struct sk_buff *temp_skb;
1015 struct rr_header_v2 *hdr;
1016
1017 if (!pkt || hdr_size <= 0) {
1018 IPC_RTR_ERR("%s: Invalid input parameters\n", __func__);
1019 return -EINVAL;
1020 }
1021
1022 temp_skb = skb_peek(pkt->pkt_fragment_q);
1023 if (!temp_skb || !temp_skb->data) {
1024 IPC_RTR_ERR("%s: No SKBs in skb_queue\n", __func__);
1025 return -EINVAL;
1026 }
1027
1028 if (skb_headroom(temp_skb) < hdr_size) {
1029 temp_skb = alloc_skb(hdr_size, GFP_KERNEL);
1030 if (!temp_skb) {
1031 IPC_RTR_ERR("%s: Could not allocate SKB of size %d\n",
1032 __func__, hdr_size);
1033 return -ENOMEM;
1034 }
1035 skb_reserve(temp_skb, hdr_size);
1036 }
1037
1038 hdr = (struct rr_header_v2 *)skb_push(temp_skb, hdr_size);
1039 hdr->version = (u8)pkt->hdr.version;
1040 hdr->type = (u8)pkt->hdr.type;
1041 hdr->control_flag = (u8)pkt->hdr.control_flag;
1042 hdr->size = (u32)pkt->hdr.size;
1043 hdr->src_node_id = (u16)pkt->hdr.src_node_id;
1044 hdr->src_port_id = (u16)pkt->hdr.src_port_id;
1045 hdr->dst_node_id = (u16)pkt->hdr.dst_node_id;
1046 hdr->dst_port_id = (u16)pkt->hdr.dst_port_id;
1047 if (pkt->opt_hdr.len > 0) {
1048 hdr->opt_len = pkt->opt_hdr.len / IPCR_WORD_SIZE;
1049 memcpy(hdr + sizeof(*hdr), pkt->opt_hdr.data, pkt->opt_hdr.len);
1050 } else {
1051 hdr->opt_len = 0;
1052 }
1053 if (temp_skb != skb_peek(pkt->pkt_fragment_q))
1054 skb_queue_head(pkt->pkt_fragment_q, temp_skb);
1055 pkt->length += hdr_size;
1056 return 0;
1057}
1058
1059/**
1060 * prepend_header() - Prepend IPC Router header
1061 * @pkt: Packet structure which contains the header info to be prepended.
1062 * @xprt_info: XPRT through which the packet is transmitted.
1063 *
1064 * @return: 0 on success, standard Linux error codes on failure.
1065 *
1066 * This function prepends the header to the packet to be transmitted. The
1067 * IPC Router header version to be prepended depends on the XPRT through
1068 * which the destination is reachable.
1069 */
1070static int prepend_header(struct rr_packet *pkt,
1071 struct msm_ipc_router_xprt_info *xprt_info)
1072{
1073 int hdr_size;
1074 struct sk_buff *temp_skb;
1075
1076 if (!pkt) {
1077 IPC_RTR_ERR("%s: NULL PKT\n", __func__);
1078 return -EINVAL;
1079 }
1080
1081 temp_skb = skb_peek(pkt->pkt_fragment_q);
1082 if (!temp_skb || !temp_skb->data) {
1083 IPC_RTR_ERR("%s: No SKBs in skb_queue\n", __func__);
1084 return -EINVAL;
1085 }
1086
1087 hdr_size = calc_tx_header_size(pkt, xprt_info);
1088 if (hdr_size <= 0)
1089 return hdr_size;
1090
1091 if (pkt->hdr.version == IPC_ROUTER_V1)
1092 return prepend_header_v1(pkt, hdr_size);
1093 else if (pkt->hdr.version == IPC_ROUTER_V2)
1094 return prepend_header_v2(pkt, hdr_size);
1095 else
1096 return -EINVAL;
1097}
1098
1099/**
1100 * defragment_pkt() - Defragment and linearize the packet
1101 * @pkt: Packet to be linearized.
1102 *
1103 * @return: 0 on success, standard Linux error codes on failure.
1104 *
1105 * Some packets contain fragments of data over multiple SKBs. If an XPRT
1106 * does not supported fragmented writes, linearize multiple SKBs into one
1107 * single SKB.
1108 */
1109static int defragment_pkt(struct rr_packet *pkt)
1110{
1111 struct sk_buff *dst_skb, *src_skb, *temp_skb;
1112 int offset = 0, buf_len = 0, copy_len;
1113 void *buf;
1114 int align_size;
1115
1116 if (!pkt || pkt->length <= 0) {
1117 IPC_RTR_ERR("%s: Invalid PKT\n", __func__);
1118 return -EINVAL;
1119 }
1120
1121 if (skb_queue_len(pkt->pkt_fragment_q) == 1)
1122 return 0;
1123
1124 align_size = ALIGN_SIZE(pkt->length);
1125 dst_skb = alloc_skb(pkt->length + align_size, GFP_KERNEL);
1126 if (!dst_skb) {
1127 IPC_RTR_ERR("%s: could not allocate one skb of size %d\n",
1128 __func__, pkt->length);
1129 return -ENOMEM;
1130 }
1131 buf = skb_put(dst_skb, pkt->length);
1132 buf_len = pkt->length;
1133
1134 skb_queue_walk(pkt->pkt_fragment_q, src_skb) {
1135 copy_len = buf_len < src_skb->len ? buf_len : src_skb->len;
1136 memcpy(buf + offset, src_skb->data, copy_len);
1137 offset += copy_len;
1138 buf_len -= copy_len;
1139 }
1140
1141 while (!skb_queue_empty(pkt->pkt_fragment_q)) {
1142 temp_skb = skb_dequeue(pkt->pkt_fragment_q);
1143 kfree_skb(temp_skb);
1144 }
1145 skb_queue_tail(pkt->pkt_fragment_q, dst_skb);
1146 return 0;
1147}
1148
1149static int post_pkt_to_port(struct msm_ipc_port *port_ptr,
1150 struct rr_packet *pkt, int clone)
1151{
1152 struct rr_packet *temp_pkt = pkt;
1153 void (*notify)(unsigned int event, void *oob_data,
1154 size_t oob_data_len, void *priv);
1155 void (*data_ready)(struct sock *sk) = NULL;
1156 struct sock *sk;
1157 u32 pkt_type;
1158
1159 if (unlikely(!port_ptr || !pkt))
1160 return -EINVAL;
1161
1162 if (clone) {
1163 temp_pkt = clone_pkt(pkt);
1164 if (!temp_pkt) {
1165 IPC_RTR_ERR(
1166 "%s: Error cloning packet for port %08x:%08x\n",
1167 __func__, port_ptr->this_port.node_id,
1168 port_ptr->this_port.port_id);
1169 return -ENOMEM;
1170 }
1171 }
1172
1173 mutex_lock(&port_ptr->port_rx_q_lock_lhc3);
Arun Kumar Neelakantam74ff8562017-05-26 17:57:52 +05301174 if (pkt->ws_need)
1175 __pm_stay_awake(port_ptr->port_rx_ws);
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -06001176 list_add_tail(&temp_pkt->list, &port_ptr->port_rx_q);
1177 wake_up(&port_ptr->port_rx_wait_q);
1178 notify = port_ptr->notify;
1179 pkt_type = temp_pkt->hdr.type;
1180 sk = (struct sock *)port_ptr->endpoint;
1181 if (sk) {
1182 read_lock(&sk->sk_callback_lock);
1183 data_ready = sk->sk_data_ready;
1184 read_unlock(&sk->sk_callback_lock);
1185 }
1186 mutex_unlock(&port_ptr->port_rx_q_lock_lhc3);
1187 if (notify)
1188 notify(pkt_type, NULL, 0, port_ptr->priv);
1189 else if (sk && data_ready)
1190 data_ready(sk);
1191
1192 return 0;
1193}
1194
1195/**
1196 * ipc_router_peek_pkt_size() - Peek into the packet header to get potential
1197 * packet size
1198 * @data: Starting address of the packet which points to router header.
1199 *
1200 * @returns: potential packet size on success, < 0 on error.
1201 *
1202 * This function is used by the underlying transport abstraction layer to
1203 * peek into the potential packet size of an incoming packet. This information
1204 * is used to perform link layer fragmentation and re-assembly
1205 */
1206int ipc_router_peek_pkt_size(char *data)
1207{
1208 int size;
1209
1210 if (!data) {
1211 pr_err("%s: NULL PKT\n", __func__);
1212 return -EINVAL;
1213 }
1214
1215 if (data[0] == IPC_ROUTER_V1)
1216 size = ((struct rr_header_v1 *)data)->size +
1217 sizeof(struct rr_header_v1);
1218 else if (data[0] == IPC_ROUTER_V2)
1219 size = ((struct rr_header_v2 *)data)->size +
1220 ((struct rr_header_v2 *)data)->opt_len * IPCR_WORD_SIZE
1221 + sizeof(struct rr_header_v2);
1222 else
1223 return -EINVAL;
1224
1225 size += ALIGN_SIZE(size);
1226 return size;
1227}
1228
1229static int post_control_ports(struct rr_packet *pkt)
1230{
1231 struct msm_ipc_port *port_ptr;
1232
1233 if (!pkt)
1234 return -EINVAL;
1235
1236 down_read(&control_ports_lock_lha5);
1237 list_for_each_entry(port_ptr, &control_ports, list)
1238 post_pkt_to_port(port_ptr, pkt, 1);
1239 up_read(&control_ports_lock_lha5);
1240 return 0;
1241}
1242
1243static u32 allocate_port_id(void)
1244{
1245 u32 port_id = 0, prev_port_id, key;
1246 struct msm_ipc_port *port_ptr;
1247
1248 mutex_lock(&next_port_id_lock_lhc1);
1249 prev_port_id = next_port_id;
1250 down_read(&local_ports_lock_lhc2);
1251 do {
1252 next_port_id++;
1253 if ((next_port_id & IPC_ROUTER_ADDRESS) == IPC_ROUTER_ADDRESS)
1254 next_port_id = 1;
1255
1256 key = (next_port_id & (LP_HASH_SIZE - 1));
1257 if (list_empty(&local_ports[key])) {
1258 port_id = next_port_id;
1259 break;
1260 }
1261 list_for_each_entry(port_ptr, &local_ports[key], list) {
1262 if (port_ptr->this_port.port_id == next_port_id) {
1263 port_id = next_port_id;
1264 break;
1265 }
1266 }
1267 if (!port_id) {
1268 port_id = next_port_id;
1269 break;
1270 }
1271 port_id = 0;
1272 } while (next_port_id != prev_port_id);
1273 up_read(&local_ports_lock_lhc2);
1274 mutex_unlock(&next_port_id_lock_lhc1);
1275
1276 return port_id;
1277}
1278
1279void msm_ipc_router_add_local_port(struct msm_ipc_port *port_ptr)
1280{
1281 u32 key;
1282
1283 if (!port_ptr)
1284 return;
1285
1286 key = (port_ptr->this_port.port_id & (LP_HASH_SIZE - 1));
1287 down_write(&local_ports_lock_lhc2);
1288 list_add_tail(&port_ptr->list, &local_ports[key]);
1289 up_write(&local_ports_lock_lhc2);
1290}
1291
1292/**
1293 * msm_ipc_router_create_raw_port() - Create an IPC Router port
1294 * @endpoint: User-space space socket information to be cached.
1295 * @notify: Function to notify incoming events on the port.
1296 * @event: Event ID to be handled.
1297 * @oob_data: Any out-of-band data associated with the event.
1298 * @oob_data_len: Size of the out-of-band data, if valid.
1299 * @priv: Private data registered during the port creation.
1300 * @priv: Private Data to be passed during the event notification.
1301 *
1302 * @return: Valid pointer to port on success, NULL on failure.
1303 *
1304 * This function is used to create an IPC Router port. The port is used for
1305 * communication locally or outside the subsystem.
1306 */
1307struct msm_ipc_port *
1308msm_ipc_router_create_raw_port(void *endpoint,
1309 void (*notify)(unsigned int event,
1310 void *oob_data,
1311 size_t oob_data_len, void *priv),
1312 void *priv)
1313{
1314 struct msm_ipc_port *port_ptr;
1315
1316 port_ptr = kzalloc(sizeof(*port_ptr), GFP_KERNEL);
1317 if (!port_ptr)
1318 return NULL;
1319
1320 port_ptr->this_port.node_id = IPC_ROUTER_NID_LOCAL;
1321 port_ptr->this_port.port_id = allocate_port_id();
1322 if (!port_ptr->this_port.port_id) {
1323 IPC_RTR_ERR("%s: All port ids are in use\n", __func__);
1324 kfree(port_ptr);
1325 return NULL;
1326 }
1327
1328 mutex_init(&port_ptr->port_lock_lhc3);
1329 INIT_LIST_HEAD(&port_ptr->port_rx_q);
1330 mutex_init(&port_ptr->port_rx_q_lock_lhc3);
1331 init_waitqueue_head(&port_ptr->port_rx_wait_q);
1332 snprintf(port_ptr->rx_ws_name, MAX_WS_NAME_SZ,
1333 "ipc%08x_%s",
1334 port_ptr->this_port.port_id,
1335 current->comm);
1336 port_ptr->port_rx_ws = wakeup_source_register(port_ptr->rx_ws_name);
1337 if (!port_ptr->port_rx_ws) {
1338 kfree(port_ptr);
1339 return NULL;
1340 }
1341 init_waitqueue_head(&port_ptr->port_tx_wait_q);
1342 kref_init(&port_ptr->ref);
1343
1344 port_ptr->endpoint = endpoint;
1345 port_ptr->notify = notify;
1346 port_ptr->priv = priv;
1347
1348 msm_ipc_router_add_local_port(port_ptr);
1349 if (endpoint)
1350 sock_hold(ipc_port_sk(endpoint));
1351 return port_ptr;
1352}
1353
1354/**
1355 * ipc_router_get_port_ref() - Get a reference to the local port
1356 * @port_id: Port ID of the local port for which reference is get.
1357 *
1358 * @return: If port is found, a reference to the port is returned.
1359 * Else NULL is returned.
1360 */
1361static struct msm_ipc_port *ipc_router_get_port_ref(u32 port_id)
1362{
1363 int key = (port_id & (LP_HASH_SIZE - 1));
1364 struct msm_ipc_port *port_ptr;
1365
1366 down_read(&local_ports_lock_lhc2);
1367 list_for_each_entry(port_ptr, &local_ports[key], list) {
1368 if (port_ptr->this_port.port_id == port_id) {
1369 kref_get(&port_ptr->ref);
1370 up_read(&local_ports_lock_lhc2);
1371 return port_ptr;
1372 }
1373 }
1374 up_read(&local_ports_lock_lhc2);
1375 return NULL;
1376}
1377
1378/**
1379 * ipc_router_release_port() - Cleanup and release the port
1380 * @ref: Reference to the port.
1381 *
1382 * This function is called when all references to the port are released.
1383 */
1384void ipc_router_release_port(struct kref *ref)
1385{
1386 struct rr_packet *pkt, *temp_pkt;
1387 struct msm_ipc_port *port_ptr =
1388 container_of(ref, struct msm_ipc_port, ref);
1389
1390 mutex_lock(&port_ptr->port_rx_q_lock_lhc3);
1391 list_for_each_entry_safe(pkt, temp_pkt, &port_ptr->port_rx_q, list) {
1392 list_del(&pkt->list);
1393 release_pkt(pkt);
1394 }
1395 mutex_unlock(&port_ptr->port_rx_q_lock_lhc3);
1396 wakeup_source_unregister(port_ptr->port_rx_ws);
1397 if (port_ptr->endpoint)
1398 sock_put(ipc_port_sk(port_ptr->endpoint));
1399 kfree(port_ptr);
1400}
1401
1402/**
1403 * ipc_router_get_rport_ref()- Get reference to the remote port
1404 * @node_id: Node ID corresponding to the remote port.
1405 * @port_id: Port ID corresponding to the remote port.
1406 *
1407 * @return: a reference to the remote port on success, NULL on failure.
1408 */
1409static struct msm_ipc_router_remote_port *ipc_router_get_rport_ref(
1410 u32 node_id, u32 port_id)
1411{
1412 struct msm_ipc_router_remote_port *rport_ptr;
1413 struct msm_ipc_routing_table_entry *rt_entry;
1414 int key = (port_id & (RP_HASH_SIZE - 1));
1415
1416 rt_entry = ipc_router_get_rtentry_ref(node_id);
1417 if (!rt_entry) {
1418 IPC_RTR_ERR("%s: Node is not up\n", __func__);
1419 return NULL;
1420 }
1421
1422 down_read(&rt_entry->lock_lha4);
1423 list_for_each_entry(rport_ptr,
1424 &rt_entry->remote_port_list[key], list) {
1425 if (rport_ptr->port_id == port_id) {
1426 kref_get(&rport_ptr->ref);
1427 goto out_lookup_rmt_port1;
1428 }
1429 }
1430 rport_ptr = NULL;
1431out_lookup_rmt_port1:
1432 up_read(&rt_entry->lock_lha4);
1433 kref_put(&rt_entry->ref, ipc_router_release_rtentry);
1434 return rport_ptr;
1435}
1436
1437/**
1438 * ipc_router_create_rport() - Create a remote port
1439 * @node_id: Node ID corresponding to the remote port.
1440 * @port_id: Port ID corresponding to the remote port.
1441 * @xprt_info: XPRT through which the concerned node is reachable.
1442 *
1443 * @return: a reference to the remote port on success, NULL on failure.
1444 */
1445static struct msm_ipc_router_remote_port *ipc_router_create_rport(
1446 u32 node_id, u32 port_id,
1447 struct msm_ipc_router_xprt_info *xprt_info)
1448{
1449 struct msm_ipc_router_remote_port *rport_ptr;
1450 struct msm_ipc_routing_table_entry *rt_entry;
1451 int key = (port_id & (RP_HASH_SIZE - 1));
1452
1453 rt_entry = create_routing_table_entry(node_id, xprt_info);
1454 if (!rt_entry) {
1455 IPC_RTR_ERR("%s: Node cannot be created\n", __func__);
1456 return NULL;
1457 }
1458
1459 down_write(&rt_entry->lock_lha4);
1460 list_for_each_entry(rport_ptr,
1461 &rt_entry->remote_port_list[key], list) {
1462 if (rport_ptr->port_id == port_id)
1463 goto out_create_rmt_port1;
1464 }
1465
1466 rport_ptr = kmalloc(sizeof(*rport_ptr), GFP_KERNEL);
1467 if (!rport_ptr) {
1468 IPC_RTR_ERR("%s: Remote port alloc failed\n", __func__);
1469 goto out_create_rmt_port2;
1470 }
1471 rport_ptr->port_id = port_id;
1472 rport_ptr->node_id = node_id;
1473 rport_ptr->status = VALID;
1474 rport_ptr->sec_rule = NULL;
1475 rport_ptr->server = NULL;
1476 rport_ptr->tx_quota_cnt = 0;
1477 kref_init(&rport_ptr->ref);
1478 mutex_init(&rport_ptr->rport_lock_lhb2);
1479 INIT_LIST_HEAD(&rport_ptr->resume_tx_port_list);
1480 INIT_LIST_HEAD(&rport_ptr->conn_info_list);
1481 list_add_tail(&rport_ptr->list,
1482 &rt_entry->remote_port_list[key]);
1483out_create_rmt_port1:
1484 kref_get(&rport_ptr->ref);
1485out_create_rmt_port2:
1486 up_write(&rt_entry->lock_lha4);
1487 kref_put(&rt_entry->ref, ipc_router_release_rtentry);
1488 return rport_ptr;
1489}
1490
1491/**
1492 * msm_ipc_router_free_resume_tx_port() - Free the resume_tx ports
1493 * @rport_ptr: Pointer to the remote port.
1494 *
1495 * This function deletes all the resume_tx ports associated with a remote port
1496 * and frees the memory allocated to each resume_tx port.
1497 *
1498 * Must be called with rport_ptr->rport_lock_lhb2 locked.
1499 */
1500static void msm_ipc_router_free_resume_tx_port(
1501 struct msm_ipc_router_remote_port *rport_ptr)
1502{
1503 struct msm_ipc_resume_tx_port *rtx_port, *tmp_rtx_port;
1504
1505 list_for_each_entry_safe(rtx_port, tmp_rtx_port,
1506 &rport_ptr->resume_tx_port_list, list) {
1507 list_del(&rtx_port->list);
1508 kfree(rtx_port);
1509 }
1510}
1511
1512/**
1513 * msm_ipc_router_lookup_resume_tx_port() - Lookup resume_tx port list
1514 * @rport_ptr: Remote port whose resume_tx port list needs to be looked.
1515 * @port_id: Port ID which needs to be looked from the list.
1516 *
1517 * return 1 if the port_id is found in the list, else 0.
1518 *
1519 * This function is used to lookup the existence of a local port in
1520 * remote port's resume_tx list. This function is used to ensure that
1521 * the same port is not added to the remote_port's resume_tx list repeatedly.
1522 *
1523 * Must be called with rport_ptr->rport_lock_lhb2 locked.
1524 */
1525static int msm_ipc_router_lookup_resume_tx_port(
1526 struct msm_ipc_router_remote_port *rport_ptr, u32 port_id)
1527{
1528 struct msm_ipc_resume_tx_port *rtx_port;
1529
1530 list_for_each_entry(rtx_port, &rport_ptr->resume_tx_port_list, list) {
1531 if (port_id == rtx_port->port_id)
1532 return 1;
1533 }
1534 return 0;
1535}
1536
1537/**
1538 * ipc_router_dummy_write_space() - Dummy write space available callback
1539 * @sk: Socket pointer for which the callback is called.
1540 */
1541void ipc_router_dummy_write_space(struct sock *sk)
1542{
1543}
1544
1545/**
1546 * post_resume_tx() - Post the resume_tx event
1547 * @rport_ptr: Pointer to the remote port
1548 * @pkt : The data packet that is received on a resume_tx event
1549 * @msg: Out of band data to be passed to kernel drivers
1550 *
1551 * This function informs about the reception of the resume_tx message from a
1552 * remote port pointed by rport_ptr to all the local ports that are in the
1553 * resume_tx_ports_list of this remote port. On posting the information, this
1554 * function sequentially deletes each entry in the resume_tx_port_list of the
1555 * remote port.
1556 *
1557 * Must be called with rport_ptr->rport_lock_lhb2 locked.
1558 */
1559static void post_resume_tx(struct msm_ipc_router_remote_port *rport_ptr,
1560 struct rr_packet *pkt, union rr_control_msg *msg)
1561{
1562 struct msm_ipc_resume_tx_port *rtx_port, *tmp_rtx_port;
1563 struct msm_ipc_port *local_port;
1564 struct sock *sk;
1565 void (*write_space)(struct sock *sk) = NULL;
1566
1567 list_for_each_entry_safe(rtx_port, tmp_rtx_port,
1568 &rport_ptr->resume_tx_port_list, list) {
1569 local_port = ipc_router_get_port_ref(rtx_port->port_id);
1570 if (local_port && local_port->notify) {
1571 wake_up(&local_port->port_tx_wait_q);
1572 local_port->notify(IPC_ROUTER_CTRL_CMD_RESUME_TX, msg,
1573 sizeof(*msg), local_port->priv);
1574 } else if (local_port) {
1575 wake_up(&local_port->port_tx_wait_q);
1576 sk = ipc_port_sk(local_port->endpoint);
1577 if (sk) {
1578 read_lock(&sk->sk_callback_lock);
1579 write_space = sk->sk_write_space;
1580 read_unlock(&sk->sk_callback_lock);
1581 }
1582 if (write_space &&
1583 write_space != ipc_router_dummy_write_space)
1584 write_space(sk);
1585 else
1586 post_pkt_to_port(local_port, pkt, 1);
1587 } else {
1588 IPC_RTR_ERR("%s: Local Port %d not Found",
1589 __func__, rtx_port->port_id);
1590 }
1591 if (local_port)
1592 kref_put(&local_port->ref, ipc_router_release_port);
1593 list_del(&rtx_port->list);
1594 kfree(rtx_port);
1595 }
1596}
1597
1598/**
1599 * signal_rport_exit() - Signal the local ports of remote port exit
1600 * @rport_ptr: Remote port that is exiting.
1601 *
1602 * This function is used to signal the local ports that are waiting
1603 * to resume transmission to a remote port that is exiting.
1604 */
1605static void signal_rport_exit(struct msm_ipc_router_remote_port *rport_ptr)
1606{
1607 struct msm_ipc_resume_tx_port *rtx_port, *tmp_rtx_port;
1608 struct msm_ipc_port *local_port;
1609
1610 mutex_lock(&rport_ptr->rport_lock_lhb2);
1611 rport_ptr->status = RESET;
1612 list_for_each_entry_safe(rtx_port, tmp_rtx_port,
1613 &rport_ptr->resume_tx_port_list, list) {
1614 local_port = ipc_router_get_port_ref(rtx_port->port_id);
1615 if (local_port) {
1616 wake_up(&local_port->port_tx_wait_q);
1617 kref_put(&local_port->ref, ipc_router_release_port);
1618 }
1619 list_del(&rtx_port->list);
1620 kfree(rtx_port);
1621 }
1622 mutex_unlock(&rport_ptr->rport_lock_lhb2);
1623}
1624
1625/**
1626 * ipc_router_release_rport() - Cleanup and release the remote port
1627 * @ref: Reference to the remote port.
1628 *
1629 * This function is called when all references to the remote port are released.
1630 */
1631static void ipc_router_release_rport(struct kref *ref)
1632{
1633 struct msm_ipc_router_remote_port *rport_ptr =
1634 container_of(ref, struct msm_ipc_router_remote_port, ref);
1635
1636 mutex_lock(&rport_ptr->rport_lock_lhb2);
1637 msm_ipc_router_free_resume_tx_port(rport_ptr);
1638 mutex_unlock(&rport_ptr->rport_lock_lhb2);
1639 kfree(rport_ptr);
1640}
1641
1642/**
1643 * ipc_router_destroy_rport() - Destroy the remote port
1644 * @rport_ptr: Pointer to the remote port to be destroyed.
1645 */
1646static void ipc_router_destroy_rport(
1647 struct msm_ipc_router_remote_port *rport_ptr)
1648{
1649 u32 node_id;
1650 struct msm_ipc_routing_table_entry *rt_entry;
1651
1652 if (!rport_ptr)
1653 return;
1654
1655 node_id = rport_ptr->node_id;
1656 rt_entry = ipc_router_get_rtentry_ref(node_id);
1657 if (!rt_entry) {
1658 IPC_RTR_ERR("%s: Node %d is not up\n", __func__, node_id);
1659 return;
1660 }
1661 down_write(&rt_entry->lock_lha4);
1662 list_del(&rport_ptr->list);
1663 up_write(&rt_entry->lock_lha4);
1664 signal_rport_exit(rport_ptr);
1665 kref_put(&rport_ptr->ref, ipc_router_release_rport);
1666 kref_put(&rt_entry->ref, ipc_router_release_rtentry);
1667}
1668
1669/**
1670 * msm_ipc_router_lookup_server() - Lookup server information
1671 * @service: Service ID of the server info to be looked up.
1672 * @instance: Instance ID of the server info to be looked up.
1673 * @node_id: Node/Processor ID in which the server is hosted.
1674 * @port_id: Port ID within the node in which the server is hosted.
1675 *
1676 * @return: If found Pointer to server structure, else NULL.
1677 *
1678 * Note1: Lock the server_list_lock_lha2 before accessing this function.
1679 * Note2: If the <node_id:port_id> are <0:0>, then the lookup is restricted
1680 * to <service:instance>. Used only when a client wants to send a
1681 * message to any QMI server.
1682 */
1683static struct msm_ipc_server *msm_ipc_router_lookup_server(
1684 u32 service,
1685 u32 instance,
1686 u32 node_id,
1687 u32 port_id)
1688{
1689 struct msm_ipc_server *server;
1690 struct msm_ipc_server_port *server_port;
1691 int key = (service & (SRV_HASH_SIZE - 1));
1692
1693 list_for_each_entry(server, &server_list[key], list) {
1694 if ((server->name.service != service) ||
1695 (server->name.instance != instance))
1696 continue;
1697 if ((node_id == 0) && (port_id == 0))
1698 return server;
1699 list_for_each_entry(server_port, &server->server_port_list,
1700 list) {
1701 if ((server_port->server_addr.node_id == node_id) &&
1702 (server_port->server_addr.port_id == port_id))
1703 return server;
1704 }
1705 }
1706 return NULL;
1707}
1708
1709/**
1710 * ipc_router_get_server_ref() - Get reference to the server
1711 * @svc: Service ID for which the reference is required.
1712 * @ins: Instance ID for which the reference is required.
1713 * @node_id: Node/Processor ID in which the server is hosted.
1714 * @port_id: Port ID within the node in which the server is hosted.
1715 *
1716 * @return: If found return reference to server, else NULL.
1717 */
1718static struct msm_ipc_server *ipc_router_get_server_ref(
1719 u32 svc, u32 ins, u32 node_id, u32 port_id)
1720{
1721 struct msm_ipc_server *server;
1722
1723 down_read(&server_list_lock_lha2);
1724 server = msm_ipc_router_lookup_server(svc, ins, node_id, port_id);
1725 if (server)
1726 kref_get(&server->ref);
1727 up_read(&server_list_lock_lha2);
1728 return server;
1729}
1730
1731/**
1732 * ipc_router_release_server() - Cleanup and release the server
1733 * @ref: Reference to the server.
1734 *
1735 * This function is called when all references to the server are released.
1736 */
1737static void ipc_router_release_server(struct kref *ref)
1738{
1739 struct msm_ipc_server *server =
1740 container_of(ref, struct msm_ipc_server, ref);
1741
1742 kfree(server);
1743}
1744
1745/**
1746 * msm_ipc_router_create_server() - Add server info to hash table
1747 * @service: Service ID of the server info to be created.
1748 * @instance: Instance ID of the server info to be created.
1749 * @node_id: Node/Processor ID in which the server is hosted.
1750 * @port_id: Port ID within the node in which the server is hosted.
1751 * @xprt_info: XPRT through which the node hosting the server is reached.
1752 *
1753 * @return: Pointer to server structure on success, else NULL.
1754 *
1755 * This function adds the server info to the hash table. If the same
1756 * server(i.e. <service_id:instance_id>) is hosted in different nodes,
1757 * they are maintained as list of "server_port" under "server" structure.
1758 */
1759static struct msm_ipc_server *msm_ipc_router_create_server(
1760 u32 service,
1761 u32 instance,
1762 u32 node_id,
1763 u32 port_id,
1764 struct msm_ipc_router_xprt_info *xprt_info)
1765{
1766 struct msm_ipc_server *server = NULL;
1767 struct msm_ipc_server_port *server_port;
1768 struct platform_device *pdev;
1769 int key = (service & (SRV_HASH_SIZE - 1));
1770
1771 down_write(&server_list_lock_lha2);
1772 server = msm_ipc_router_lookup_server(service, instance, 0, 0);
1773 if (server) {
1774 list_for_each_entry(server_port, &server->server_port_list,
1775 list) {
1776 if ((server_port->server_addr.node_id == node_id) &&
1777 (server_port->server_addr.port_id == port_id))
1778 goto return_server;
1779 }
1780 goto create_srv_port;
1781 }
1782
1783 server = kzalloc(sizeof(*server), GFP_KERNEL);
1784 if (!server) {
1785 up_write(&server_list_lock_lha2);
1786 IPC_RTR_ERR("%s: Server allocation failed\n", __func__);
1787 return NULL;
1788 }
1789 server->name.service = service;
1790 server->name.instance = instance;
1791 server->synced_sec_rule = 0;
1792 INIT_LIST_HEAD(&server->server_port_list);
1793 kref_init(&server->ref);
1794 list_add_tail(&server->list, &server_list[key]);
1795 scnprintf(server->pdev_name, sizeof(server->pdev_name),
1796 "SVC%08x:%08x", service, instance);
1797 server->next_pdev_id = 1;
1798
1799create_srv_port:
1800 server_port = kzalloc(sizeof(*server_port), GFP_KERNEL);
1801 pdev = platform_device_alloc(server->pdev_name, server->next_pdev_id);
1802 if (!server_port || !pdev) {
1803 kfree(server_port);
1804 if (pdev)
1805 platform_device_put(pdev);
1806 if (list_empty(&server->server_port_list)) {
1807 list_del(&server->list);
1808 kfree(server);
1809 }
1810 up_write(&server_list_lock_lha2);
1811 IPC_RTR_ERR("%s: Server Port allocation failed\n", __func__);
1812 return NULL;
1813 }
1814 server_port->pdev = pdev;
1815 server_port->server_addr.node_id = node_id;
1816 server_port->server_addr.port_id = port_id;
1817 server_port->xprt_info = xprt_info;
1818 list_add_tail(&server_port->list, &server->server_port_list);
1819 server->next_pdev_id++;
1820 platform_device_add(server_port->pdev);
1821
1822return_server:
1823 /* Add a reference so that the caller can put it back */
1824 kref_get(&server->ref);
1825 up_write(&server_list_lock_lha2);
1826 return server;
1827}
1828
1829/**
1830 * ipc_router_destroy_server_nolock() - Remove server info from hash table
1831 * @server: Server info to be removed.
1832 * @node_id: Node/Processor ID in which the server is hosted.
1833 * @port_id: Port ID within the node in which the server is hosted.
1834 *
1835 * This function removes the server_port identified using <node_id:port_id>
1836 * from the server structure. If the server_port list under server structure
1837 * is empty after removal, then remove the server structure from the server
1838 * hash table. This function must be called with server_list_lock_lha2 locked.
1839 */
1840static void ipc_router_destroy_server_nolock(struct msm_ipc_server *server,
1841 u32 node_id, u32 port_id)
1842{
1843 struct msm_ipc_server_port *server_port;
1844 bool server_port_found = false;
1845
1846 if (!server)
1847 return;
1848
1849 list_for_each_entry(server_port, &server->server_port_list, list) {
1850 if ((server_port->server_addr.node_id == node_id) &&
1851 (server_port->server_addr.port_id == port_id)) {
1852 server_port_found = true;
1853 break;
1854 }
1855 }
1856 if (server_port_found && server_port) {
1857 platform_device_unregister(server_port->pdev);
1858 list_del(&server_port->list);
1859 kfree(server_port);
1860 }
1861 if (list_empty(&server->server_port_list)) {
1862 list_del(&server->list);
1863 kref_put(&server->ref, ipc_router_release_server);
1864 }
1865}
1866
1867/**
1868 * ipc_router_destroy_server() - Remove server info from hash table
1869 * @server: Server info to be removed.
1870 * @node_id: Node/Processor ID in which the server is hosted.
1871 * @port_id: Port ID within the node in which the server is hosted.
1872 *
1873 * This function removes the server_port identified using <node_id:port_id>
1874 * from the server structure. If the server_port list under server structure
1875 * is empty after removal, then remove the server structure from the server
1876 * hash table.
1877 */
1878static void ipc_router_destroy_server(struct msm_ipc_server *server,
1879 u32 node_id, u32 port_id)
1880{
1881 down_write(&server_list_lock_lha2);
1882 ipc_router_destroy_server_nolock(server, node_id, port_id);
1883 up_write(&server_list_lock_lha2);
1884}
1885
1886static int ipc_router_send_ctl_msg(
1887 struct msm_ipc_router_xprt_info *xprt_info,
1888 union rr_control_msg *msg,
1889 u32 dst_node_id)
1890{
1891 struct rr_packet *pkt;
1892 struct sk_buff *ipc_rtr_pkt;
1893 struct rr_header_v1 *hdr;
1894 int pkt_size;
1895 void *data;
1896 int ret = -EINVAL;
1897
1898 pkt = create_pkt(NULL);
1899 if (!pkt) {
1900 IPC_RTR_ERR("%s: pkt alloc failed\n", __func__);
1901 return -ENOMEM;
1902 }
1903
1904 pkt_size = IPC_ROUTER_HDR_SIZE + sizeof(*msg);
1905 ipc_rtr_pkt = alloc_skb(pkt_size, GFP_KERNEL);
1906 if (!ipc_rtr_pkt) {
1907 IPC_RTR_ERR("%s: ipc_rtr_pkt alloc failed\n", __func__);
1908 release_pkt(pkt);
1909 return -ENOMEM;
1910 }
1911
1912 skb_reserve(ipc_rtr_pkt, IPC_ROUTER_HDR_SIZE);
1913 data = skb_put(ipc_rtr_pkt, sizeof(*msg));
1914 memcpy(data, msg, sizeof(*msg));
1915 skb_queue_tail(pkt->pkt_fragment_q, ipc_rtr_pkt);
1916 pkt->length = sizeof(*msg);
1917
1918 hdr = &pkt->hdr;
1919 hdr->version = IPC_ROUTER_V1;
1920 hdr->type = msg->cmd;
1921 hdr->src_node_id = IPC_ROUTER_NID_LOCAL;
1922 hdr->src_port_id = IPC_ROUTER_ADDRESS;
1923 hdr->control_flag = 0;
1924 hdr->size = sizeof(*msg);
1925 if (hdr->type == IPC_ROUTER_CTRL_CMD_RESUME_TX ||
1926 (!xprt_info && dst_node_id == IPC_ROUTER_NID_LOCAL))
1927 hdr->dst_node_id = dst_node_id;
1928 else if (xprt_info)
1929 hdr->dst_node_id = xprt_info->remote_node_id;
1930 hdr->dst_port_id = IPC_ROUTER_ADDRESS;
1931
1932 if (dst_node_id == IPC_ROUTER_NID_LOCAL &&
1933 msg->cmd != IPC_ROUTER_CTRL_CMD_RESUME_TX) {
1934 ipc_router_log_msg(local_log_ctx, IPC_ROUTER_LOG_EVENT_TX, msg,
1935 hdr, NULL, NULL);
1936 ret = post_control_ports(pkt);
1937 } else if (dst_node_id == IPC_ROUTER_NID_LOCAL &&
1938 msg->cmd == IPC_ROUTER_CTRL_CMD_RESUME_TX) {
1939 ipc_router_log_msg(local_log_ctx, IPC_ROUTER_LOG_EVENT_TX, msg,
1940 hdr, NULL, NULL);
1941 ret = process_resume_tx_msg(msg, pkt);
1942 } else if (xprt_info && (msg->cmd == IPC_ROUTER_CTRL_CMD_HELLO ||
1943 xprt_info->initialized)) {
1944 mutex_lock(&xprt_info->tx_lock_lhb2);
1945 ipc_router_log_msg(xprt_info->log_ctx, IPC_ROUTER_LOG_EVENT_TX,
1946 msg, hdr, NULL, NULL);
1947 ret = prepend_header(pkt, xprt_info);
1948 if (ret < 0) {
1949 mutex_unlock(&xprt_info->tx_lock_lhb2);
1950 IPC_RTR_ERR("%s: Prepend Header failed\n", __func__);
1951 release_pkt(pkt);
1952 return ret;
1953 }
1954
1955 ret = xprt_info->xprt->write(pkt, pkt->length, xprt_info->xprt);
1956 mutex_unlock(&xprt_info->tx_lock_lhb2);
1957 }
1958
1959 release_pkt(pkt);
1960 return ret;
1961}
1962
1963static int
1964msm_ipc_router_send_server_list(u32 node_id,
1965 struct msm_ipc_router_xprt_info *xprt_info)
1966{
1967 union rr_control_msg ctl;
1968 struct msm_ipc_server *server;
1969 struct msm_ipc_server_port *server_port;
1970 int i;
1971
1972 if (!xprt_info || !xprt_info->initialized) {
1973 IPC_RTR_ERR("%s: Xprt info not initialized\n", __func__);
1974 return -EINVAL;
1975 }
1976
1977 memset(&ctl, 0, sizeof(ctl));
1978 ctl.cmd = IPC_ROUTER_CTRL_CMD_NEW_SERVER;
1979
1980 for (i = 0; i < SRV_HASH_SIZE; i++) {
1981 list_for_each_entry(server, &server_list[i], list) {
1982 ctl.srv.service = server->name.service;
1983 ctl.srv.instance = server->name.instance;
1984 list_for_each_entry(server_port,
1985 &server->server_port_list, list) {
1986 if (server_port->server_addr.node_id !=
1987 node_id)
1988 continue;
1989
1990 ctl.srv.node_id =
1991 server_port->server_addr.node_id;
1992 ctl.srv.port_id =
1993 server_port->server_addr.port_id;
1994 ipc_router_send_ctl_msg
1995 (xprt_info, &ctl,
1996 IPC_ROUTER_DUMMY_DEST_NODE);
1997 }
1998 }
1999 }
2000
2001 return 0;
2002}
2003
2004static int broadcast_ctl_msg_locally(union rr_control_msg *msg)
2005{
2006 return ipc_router_send_ctl_msg(NULL, msg, IPC_ROUTER_NID_LOCAL);
2007}
2008
2009static int broadcast_ctl_msg(union rr_control_msg *ctl)
2010{
2011 struct msm_ipc_router_xprt_info *xprt_info;
2012
2013 down_read(&xprt_info_list_lock_lha5);
2014 list_for_each_entry(xprt_info, &xprt_info_list, list) {
2015 ipc_router_send_ctl_msg(xprt_info, ctl,
2016 IPC_ROUTER_DUMMY_DEST_NODE);
2017 }
2018 up_read(&xprt_info_list_lock_lha5);
2019 broadcast_ctl_msg_locally(ctl);
2020
2021 return 0;
2022}
2023
2024static int relay_ctl_msg(struct msm_ipc_router_xprt_info *xprt_info,
2025 union rr_control_msg *ctl)
2026{
2027 struct msm_ipc_router_xprt_info *fwd_xprt_info;
2028
2029 if (!xprt_info || !ctl)
2030 return -EINVAL;
2031
2032 down_read(&xprt_info_list_lock_lha5);
2033 list_for_each_entry(fwd_xprt_info, &xprt_info_list, list) {
2034 if (xprt_info->xprt->link_id != fwd_xprt_info->xprt->link_id)
2035 ipc_router_send_ctl_msg(fwd_xprt_info, ctl,
2036 IPC_ROUTER_DUMMY_DEST_NODE);
2037 }
2038 up_read(&xprt_info_list_lock_lha5);
2039
2040 return 0;
2041}
2042
2043static int forward_msg(struct msm_ipc_router_xprt_info *xprt_info,
2044 struct rr_packet *pkt)
2045{
2046 struct rr_header_v1 *hdr;
2047 struct msm_ipc_router_xprt_info *fwd_xprt_info;
2048 struct msm_ipc_routing_table_entry *rt_entry;
2049 int ret = 0;
2050 int fwd_xprt_option;
2051
2052 if (!xprt_info || !pkt)
2053 return -EINVAL;
2054
2055 hdr = &pkt->hdr;
2056 rt_entry = ipc_router_get_rtentry_ref(hdr->dst_node_id);
2057 if (!(rt_entry) || !(rt_entry->xprt_info)) {
2058 IPC_RTR_ERR("%s: Routing table not initialized\n", __func__);
2059 ret = -ENODEV;
2060 goto fm_error1;
2061 }
2062
2063 down_read(&rt_entry->lock_lha4);
2064 fwd_xprt_info = rt_entry->xprt_info;
2065 ret = ipc_router_get_xprt_info_ref(fwd_xprt_info);
2066 if (ret < 0) {
2067 IPC_RTR_ERR("%s: Abort invalid xprt\n", __func__);
2068 goto fm_error_xprt;
2069 }
2070 ret = prepend_header(pkt, fwd_xprt_info);
2071 if (ret < 0) {
2072 IPC_RTR_ERR("%s: Prepend Header failed\n", __func__);
2073 goto fm_error2;
2074 }
2075 fwd_xprt_option = fwd_xprt_info->xprt->get_option(fwd_xprt_info->xprt);
2076 if (!(fwd_xprt_option & FRAG_PKT_WRITE_ENABLE)) {
2077 ret = defragment_pkt(pkt);
2078 if (ret < 0)
2079 goto fm_error2;
2080 }
2081
2082 mutex_lock(&fwd_xprt_info->tx_lock_lhb2);
2083 if (xprt_info->remote_node_id == fwd_xprt_info->remote_node_id) {
2084 IPC_RTR_ERR("%s: Discarding Command to route back\n", __func__);
2085 ret = -EINVAL;
2086 goto fm_error3;
2087 }
2088
2089 if (xprt_info->xprt->link_id == fwd_xprt_info->xprt->link_id) {
2090 IPC_RTR_ERR("%s: DST in the same cluster\n", __func__);
2091 ret = 0;
2092 goto fm_error3;
2093 }
2094 fwd_xprt_info->xprt->write(pkt, pkt->length, fwd_xprt_info->xprt);
2095 IPC_RTR_INFO(fwd_xprt_info->log_ctx,
2096 "%s %s Len:0x%x T:0x%x CF:0x%x SRC:<0x%x:0x%x> DST:<0x%x:0x%x>\n",
2097 "FWD", "TX", hdr->size, hdr->type, hdr->control_flag,
2098 hdr->src_node_id, hdr->src_port_id,
2099 hdr->dst_node_id, hdr->dst_port_id);
2100
2101fm_error3:
2102 mutex_unlock(&fwd_xprt_info->tx_lock_lhb2);
2103fm_error2:
2104 ipc_router_put_xprt_info_ref(fwd_xprt_info);
2105fm_error_xprt:
2106 up_read(&rt_entry->lock_lha4);
2107fm_error1:
2108 if (rt_entry)
2109 kref_put(&rt_entry->ref, ipc_router_release_rtentry);
2110 return ret;
2111}
2112
2113static int msm_ipc_router_send_remove_client(struct comm_mode_info *mode_info,
2114 u32 node_id, u32 port_id)
2115{
2116 union rr_control_msg msg;
2117 struct msm_ipc_router_xprt_info *tmp_xprt_info;
2118 int mode;
2119 void *xprt_info;
2120 int rc = 0;
2121
2122 if (!mode_info) {
2123 IPC_RTR_ERR("%s: NULL mode_info\n", __func__);
2124 return -EINVAL;
2125 }
2126 mode = mode_info->mode;
2127 xprt_info = mode_info->xprt_info;
2128
2129 memset(&msg, 0, sizeof(msg));
2130 msg.cmd = IPC_ROUTER_CTRL_CMD_REMOVE_CLIENT;
2131 msg.cli.node_id = node_id;
2132 msg.cli.port_id = port_id;
2133
2134 if ((mode == SINGLE_LINK_MODE) && xprt_info) {
2135 down_read(&xprt_info_list_lock_lha5);
2136 list_for_each_entry(tmp_xprt_info, &xprt_info_list, list) {
2137 if (tmp_xprt_info != xprt_info)
2138 continue;
2139 ipc_router_send_ctl_msg(tmp_xprt_info, &msg,
2140 IPC_ROUTER_DUMMY_DEST_NODE);
2141 break;
2142 }
2143 up_read(&xprt_info_list_lock_lha5);
2144 } else if ((mode == SINGLE_LINK_MODE) && !xprt_info) {
2145 broadcast_ctl_msg_locally(&msg);
2146 } else if (mode == MULTI_LINK_MODE) {
2147 broadcast_ctl_msg(&msg);
2148 } else if (mode != NULL_MODE) {
2149 IPC_RTR_ERR(
2150 "%s: Invalid mode(%d) + xprt_inf(%p) for %08x:%08x\n",
2151 __func__, mode, xprt_info, node_id, port_id);
2152 rc = -EINVAL;
2153 }
2154 return rc;
2155}
2156
2157static void update_comm_mode_info(struct comm_mode_info *mode_info,
2158 struct msm_ipc_router_xprt_info *xprt_info)
2159{
2160 if (!mode_info) {
2161 IPC_RTR_ERR("%s: NULL mode_info\n", __func__);
2162 return;
2163 }
2164
2165 if (mode_info->mode == NULL_MODE) {
2166 mode_info->xprt_info = xprt_info;
2167 mode_info->mode = SINGLE_LINK_MODE;
2168 } else if (mode_info->mode == SINGLE_LINK_MODE &&
2169 mode_info->xprt_info != xprt_info) {
2170 mode_info->mode = MULTI_LINK_MODE;
2171 }
2172}
2173
2174/**
2175 * cleanup_rmt_server() - Cleanup server hosted in the remote port
2176 * @xprt_info: XPRT through which this cleanup event is handled.
2177 * @rport_ptr: Remote port that is being cleaned up.
2178 * @server: Server that is hosted in the remote port.
2179 */
2180static void cleanup_rmt_server(struct msm_ipc_router_xprt_info *xprt_info,
2181 struct msm_ipc_router_remote_port *rport_ptr,
2182 struct msm_ipc_server *server)
2183{
2184 union rr_control_msg ctl;
2185
2186 memset(&ctl, 0, sizeof(ctl));
2187 ctl.cmd = IPC_ROUTER_CTRL_CMD_REMOVE_SERVER;
2188 ctl.srv.service = server->name.service;
2189 ctl.srv.instance = server->name.instance;
2190 ctl.srv.node_id = rport_ptr->node_id;
2191 ctl.srv.port_id = rport_ptr->port_id;
2192 if (xprt_info)
2193 relay_ctl_msg(xprt_info, &ctl);
2194 broadcast_ctl_msg_locally(&ctl);
2195 ipc_router_destroy_server_nolock(server, rport_ptr->node_id,
2196 rport_ptr->port_id);
2197}
2198
2199static void cleanup_rmt_ports(struct msm_ipc_router_xprt_info *xprt_info,
2200 struct msm_ipc_routing_table_entry *rt_entry)
2201{
2202 struct msm_ipc_router_remote_port *rport_ptr, *tmp_rport_ptr;
2203 struct msm_ipc_server *server;
2204 union rr_control_msg ctl;
2205 int j;
2206
2207 memset(&ctl, 0, sizeof(ctl));
2208 for (j = 0; j < RP_HASH_SIZE; j++) {
2209 list_for_each_entry_safe(rport_ptr, tmp_rport_ptr,
2210 &rt_entry->remote_port_list[j], list) {
2211 list_del(&rport_ptr->list);
2212 mutex_lock(&rport_ptr->rport_lock_lhb2);
2213 server = rport_ptr->server;
2214 rport_ptr->server = NULL;
2215 mutex_unlock(&rport_ptr->rport_lock_lhb2);
2216 ipc_router_reset_conn(rport_ptr);
2217 if (server) {
2218 cleanup_rmt_server(xprt_info, rport_ptr,
2219 server);
2220 server = NULL;
2221 }
2222
2223 ctl.cmd = IPC_ROUTER_CTRL_CMD_REMOVE_CLIENT;
2224 ctl.cli.node_id = rport_ptr->node_id;
2225 ctl.cli.port_id = rport_ptr->port_id;
2226 kref_put(&rport_ptr->ref, ipc_router_release_rport);
2227
2228 relay_ctl_msg(xprt_info, &ctl);
2229 broadcast_ctl_msg_locally(&ctl);
2230 }
2231 }
2232}
2233
2234static void msm_ipc_cleanup_routing_table(
2235 struct msm_ipc_router_xprt_info *xprt_info)
2236{
2237 int i;
2238 struct msm_ipc_routing_table_entry *rt_entry, *tmp_rt_entry;
2239
2240 if (!xprt_info) {
2241 IPC_RTR_ERR("%s: Invalid xprt_info\n", __func__);
2242 return;
2243 }
2244
2245 down_write(&server_list_lock_lha2);
2246 down_write(&routing_table_lock_lha3);
2247 for (i = 0; i < RT_HASH_SIZE; i++) {
2248 list_for_each_entry_safe(rt_entry, tmp_rt_entry,
2249 &routing_table[i], list) {
2250 down_write(&rt_entry->lock_lha4);
2251 if (rt_entry->xprt_info != xprt_info) {
2252 up_write(&rt_entry->lock_lha4);
2253 continue;
2254 }
2255 cleanup_rmt_ports(xprt_info, rt_entry);
2256 rt_entry->xprt_info = NULL;
2257 up_write(&rt_entry->lock_lha4);
2258 list_del(&rt_entry->list);
2259 kref_put(&rt_entry->ref, ipc_router_release_rtentry);
2260 }
2261 }
2262 up_write(&routing_table_lock_lha3);
2263 up_write(&server_list_lock_lha2);
2264}
2265
2266/**
2267 * sync_sec_rule() - Synchrnoize the security rule into the server structure
2268 * @server: Server structure where the rule has to be synchronized.
2269 * @rule: Security tule to be synchronized.
2270 *
2271 * This function is used to update the server structure with the security
2272 * rule configured for the <service:instance> corresponding to that server.
2273 */
2274static void sync_sec_rule(struct msm_ipc_server *server, void *rule)
2275{
2276 struct msm_ipc_server_port *server_port;
2277 struct msm_ipc_router_remote_port *rport_ptr = NULL;
2278
2279 list_for_each_entry(server_port, &server->server_port_list, list) {
2280 rport_ptr = ipc_router_get_rport_ref(
2281 server_port->server_addr.node_id,
2282 server_port->server_addr.port_id);
2283 if (!rport_ptr)
2284 continue;
2285 rport_ptr->sec_rule = rule;
2286 kref_put(&rport_ptr->ref, ipc_router_release_rport);
2287 }
2288 server->synced_sec_rule = 1;
2289}
2290
2291/**
2292 * msm_ipc_sync_sec_rule() - Sync the security rule to the service
2293 * @service: Service for which the rule has to be synchronized.
2294 * @instance: Instance for which the rule has to be synchronized.
2295 * @rule: Security rule to be synchronized.
2296 *
2297 * This function is used to syncrhonize the security rule with the server
2298 * hash table, if the user-space script configures the rule after the service
2299 * has come up. This function is used to synchronize the security rule to a
2300 * specific service and optionally a specific instance.
2301 */
2302void msm_ipc_sync_sec_rule(u32 service, u32 instance, void *rule)
2303{
2304 int key = (service & (SRV_HASH_SIZE - 1));
2305 struct msm_ipc_server *server;
2306
2307 down_write(&server_list_lock_lha2);
2308 list_for_each_entry(server, &server_list[key], list) {
2309 if (server->name.service != service)
2310 continue;
2311
2312 if (server->name.instance != instance &&
2313 instance != ALL_INSTANCE)
2314 continue;
2315
2316 /* If the rule applies to all instances and if the specific
2317 * instance of a service has a rule synchronized already,
2318 * do not apply the rule for that specific instance.
2319 */
2320 if (instance == ALL_INSTANCE && server->synced_sec_rule)
2321 continue;
2322
2323 sync_sec_rule(server, rule);
2324 }
2325 up_write(&server_list_lock_lha2);
2326}
2327
2328/**
2329 * msm_ipc_sync_default_sec_rule() - Default security rule to all services
2330 * @rule: Security rule to be synchronized.
2331 *
2332 * This function is used to syncrhonize the security rule with the server
2333 * hash table, if the user-space script configures the rule after the service
2334 * has come up. This function is used to synchronize the security rule that
2335 * applies to all services, if the concerned service do not have any rule
2336 * defined.
2337 */
2338void msm_ipc_sync_default_sec_rule(void *rule)
2339{
2340 int key;
2341 struct msm_ipc_server *server;
2342
2343 down_write(&server_list_lock_lha2);
2344 for (key = 0; key < SRV_HASH_SIZE; key++) {
2345 list_for_each_entry(server, &server_list[key], list) {
2346 if (server->synced_sec_rule)
2347 continue;
2348
2349 sync_sec_rule(server, rule);
2350 }
2351 }
2352 up_write(&server_list_lock_lha2);
2353}
2354
2355/**
2356 * ipc_router_reset_conn() - Reset the connection to remote port
2357 * @rport_ptr: Pointer to the remote port to be disconnected.
2358 *
2359 * This function is used to reset all the local ports that are connected to
2360 * the remote port being passed.
2361 */
2362static void ipc_router_reset_conn(struct msm_ipc_router_remote_port *rport_ptr)
2363{
2364 struct msm_ipc_port *port_ptr;
2365 struct ipc_router_conn_info *conn_info, *tmp_conn_info;
2366
2367 mutex_lock(&rport_ptr->rport_lock_lhb2);
2368 list_for_each_entry_safe(conn_info, tmp_conn_info,
2369 &rport_ptr->conn_info_list, list) {
2370 port_ptr = ipc_router_get_port_ref(conn_info->port_id);
2371 if (port_ptr) {
2372 mutex_lock(&port_ptr->port_lock_lhc3);
2373 port_ptr->conn_status = CONNECTION_RESET;
2374 mutex_unlock(&port_ptr->port_lock_lhc3);
2375 wake_up(&port_ptr->port_rx_wait_q);
2376 kref_put(&port_ptr->ref, ipc_router_release_port);
2377 }
2378
2379 list_del(&conn_info->list);
2380 kfree(conn_info);
2381 }
2382 mutex_unlock(&rport_ptr->rport_lock_lhb2);
2383}
2384
2385/**
2386 * ipc_router_set_conn() - Set the connection by initializing dest address
2387 * @port_ptr: Local port in which the connection has to be set.
2388 * @addr: Destination address of the connection.
2389 *
2390 * @return: 0 on success, standard Linux error codes on failure.
2391 */
2392int ipc_router_set_conn(struct msm_ipc_port *port_ptr,
2393 struct msm_ipc_addr *addr)
2394{
2395 struct msm_ipc_router_remote_port *rport_ptr;
2396 struct ipc_router_conn_info *conn_info;
2397
2398 if (unlikely(!port_ptr || !addr))
2399 return -EINVAL;
2400
2401 if (addr->addrtype != MSM_IPC_ADDR_ID) {
2402 IPC_RTR_ERR("%s: Invalid Address type\n", __func__);
2403 return -EINVAL;
2404 }
2405
2406 if (port_ptr->type == SERVER_PORT) {
2407 IPC_RTR_ERR("%s: Connection refused on a server port\n",
2408 __func__);
2409 return -ECONNREFUSED;
2410 }
2411
2412 if (port_ptr->conn_status == CONNECTED) {
2413 IPC_RTR_ERR("%s: Port %08x already connected\n",
2414 __func__, port_ptr->this_port.port_id);
2415 return -EISCONN;
2416 }
2417
2418 conn_info = kzalloc(sizeof(*conn_info), GFP_KERNEL);
2419 if (!conn_info) {
2420 IPC_RTR_ERR("%s: Error allocating conn_info\n", __func__);
2421 return -ENOMEM;
2422 }
2423 INIT_LIST_HEAD(&conn_info->list);
2424 conn_info->port_id = port_ptr->this_port.port_id;
2425
2426 rport_ptr = ipc_router_get_rport_ref(addr->addr.port_addr.node_id,
2427 addr->addr.port_addr.port_id);
2428 if (!rport_ptr) {
2429 IPC_RTR_ERR("%s: Invalid remote endpoint\n", __func__);
2430 kfree(conn_info);
2431 return -ENODEV;
2432 }
2433 mutex_lock(&rport_ptr->rport_lock_lhb2);
2434 list_add_tail(&conn_info->list, &rport_ptr->conn_info_list);
2435 mutex_unlock(&rport_ptr->rport_lock_lhb2);
2436
2437 mutex_lock(&port_ptr->port_lock_lhc3);
2438 memcpy(&port_ptr->dest_addr, &addr->addr.port_addr,
2439 sizeof(struct msm_ipc_port_addr));
2440 port_ptr->conn_status = CONNECTED;
2441 mutex_unlock(&port_ptr->port_lock_lhc3);
2442 kref_put(&rport_ptr->ref, ipc_router_release_rport);
2443 return 0;
2444}
2445
2446/**
2447 * do_version_negotiation() - perform a version negotiation and set the version
2448 * @xprt_info: Pointer to the IPC Router transport info structure.
2449 * @msg: Pointer to the IPC Router HELLO message.
2450 *
2451 * This function performs the version negotiation by verifying the computed
2452 * checksum first. If the checksum matches with the magic number, it sets the
2453 * negotiated IPC Router version in transport.
2454 */
2455static void do_version_negotiation(struct msm_ipc_router_xprt_info *xprt_info,
2456 union rr_control_msg *msg)
2457{
2458 u32 magic;
2459 unsigned int version;
2460
2461 if (!xprt_info)
2462 return;
2463 magic = ipc_router_calc_checksum(msg);
2464 if (magic == IPC_ROUTER_HELLO_MAGIC) {
2465 version = fls(msg->hello.versions & IPC_ROUTER_VER_BITMASK) - 1;
2466 /*Bit 0 & 31 are reserved for future usage*/
2467 if ((version > 0) &&
2468 (version != (sizeof(version) * BITS_PER_BYTE - 1)) &&
2469 xprt_info->xprt->set_version)
2470 xprt_info->xprt->set_version(xprt_info->xprt, version);
2471 }
2472}
2473
2474static int process_hello_msg(struct msm_ipc_router_xprt_info *xprt_info,
2475 union rr_control_msg *msg,
2476 struct rr_header_v1 *hdr)
2477{
2478 int i, rc = 0;
2479 union rr_control_msg ctl;
2480 struct msm_ipc_routing_table_entry *rt_entry;
2481
2482 if (!hdr)
2483 return -EINVAL;
2484
2485 xprt_info->remote_node_id = hdr->src_node_id;
2486 rt_entry = create_routing_table_entry(hdr->src_node_id, xprt_info);
2487 if (!rt_entry) {
2488 IPC_RTR_ERR("%s: rt_entry allocation failed\n", __func__);
2489 return -ENOMEM;
2490 }
2491 kref_put(&rt_entry->ref, ipc_router_release_rtentry);
2492
2493 do_version_negotiation(xprt_info, msg);
2494 /* Send a reply HELLO message */
2495 memset(&ctl, 0, sizeof(ctl));
2496 ctl.hello.cmd = IPC_ROUTER_CTRL_CMD_HELLO;
2497 ctl.hello.checksum = IPC_ROUTER_HELLO_MAGIC;
2498 ctl.hello.versions = (u32)IPC_ROUTER_VER_BITMASK;
2499 ctl.hello.checksum = ipc_router_calc_checksum(&ctl);
2500 rc = ipc_router_send_ctl_msg(xprt_info, &ctl,
2501 IPC_ROUTER_DUMMY_DEST_NODE);
2502 if (rc < 0) {
2503 IPC_RTR_ERR("%s: Error sending reply HELLO message\n",
2504 __func__);
2505 return rc;
2506 }
2507 xprt_info->initialized = 1;
2508
2509 /* Send list of servers from the local node and from nodes
2510 * outside the mesh network in which this XPRT is part of.
2511 */
2512 down_read(&server_list_lock_lha2);
2513 down_read(&routing_table_lock_lha3);
2514 for (i = 0; i < RT_HASH_SIZE; i++) {
2515 list_for_each_entry(rt_entry, &routing_table[i], list) {
2516 if ((rt_entry->node_id != IPC_ROUTER_NID_LOCAL) &&
2517 (!rt_entry->xprt_info ||
2518 (rt_entry->xprt_info->xprt->link_id ==
2519 xprt_info->xprt->link_id)))
2520 continue;
2521 rc = msm_ipc_router_send_server_list(rt_entry->node_id,
2522 xprt_info);
2523 if (rc < 0) {
2524 up_read(&routing_table_lock_lha3);
2525 up_read(&server_list_lock_lha2);
2526 return rc;
2527 }
2528 }
2529 }
2530 up_read(&routing_table_lock_lha3);
2531 up_read(&server_list_lock_lha2);
2532 return rc;
2533}
2534
2535static int process_resume_tx_msg(union rr_control_msg *msg,
2536 struct rr_packet *pkt)
2537{
2538 struct msm_ipc_router_remote_port *rport_ptr;
2539
2540 rport_ptr = ipc_router_get_rport_ref(msg->cli.node_id,
2541 msg->cli.port_id);
2542 if (!rport_ptr) {
2543 IPC_RTR_ERR("%s: Unable to resume client\n", __func__);
2544 return -ENODEV;
2545 }
2546 mutex_lock(&rport_ptr->rport_lock_lhb2);
2547 rport_ptr->tx_quota_cnt = 0;
2548 post_resume_tx(rport_ptr, pkt, msg);
2549 mutex_unlock(&rport_ptr->rport_lock_lhb2);
2550 kref_put(&rport_ptr->ref, ipc_router_release_rport);
2551 return 0;
2552}
2553
2554static int process_new_server_msg(struct msm_ipc_router_xprt_info *xprt_info,
2555 union rr_control_msg *msg,
2556 struct rr_packet *pkt)
2557{
2558 struct msm_ipc_routing_table_entry *rt_entry;
2559 struct msm_ipc_server *server;
2560 struct msm_ipc_router_remote_port *rport_ptr;
2561
2562 if (msg->srv.instance == 0) {
2563 IPC_RTR_ERR("%s: Server %08x create rejected, version = 0\n",
2564 __func__, msg->srv.service);
2565 return -EINVAL;
2566 }
2567
2568 rt_entry = ipc_router_get_rtentry_ref(msg->srv.node_id);
2569 if (!rt_entry) {
2570 rt_entry = create_routing_table_entry(msg->srv.node_id,
2571 xprt_info);
2572 if (!rt_entry) {
2573 IPC_RTR_ERR("%s: rt_entry allocation failed\n",
2574 __func__);
2575 return -ENOMEM;
2576 }
2577 }
2578 kref_put(&rt_entry->ref, ipc_router_release_rtentry);
2579
2580 /* If the service already exists in the table, create_server returns
2581 * a reference to it.
2582 */
2583 rport_ptr = ipc_router_create_rport(msg->srv.node_id,
2584 msg->srv.port_id, xprt_info);
2585 if (!rport_ptr)
2586 return -ENOMEM;
2587
2588 server = msm_ipc_router_create_server(
2589 msg->srv.service, msg->srv.instance,
2590 msg->srv.node_id, msg->srv.port_id, xprt_info);
2591 if (!server) {
2592 IPC_RTR_ERR("%s: Server %08x:%08x Create failed\n",
2593 __func__, msg->srv.service, msg->srv.instance);
2594 kref_put(&rport_ptr->ref, ipc_router_release_rport);
2595 ipc_router_destroy_rport(rport_ptr);
2596 return -ENOMEM;
2597 }
2598 mutex_lock(&rport_ptr->rport_lock_lhb2);
2599 rport_ptr->server = server;
2600 mutex_unlock(&rport_ptr->rport_lock_lhb2);
2601 rport_ptr->sec_rule = msm_ipc_get_security_rule(
2602 msg->srv.service, msg->srv.instance);
2603 kref_put(&rport_ptr->ref, ipc_router_release_rport);
2604 kref_put(&server->ref, ipc_router_release_server);
2605
2606 /* Relay the new server message to other subsystems that do not belong
2607 * to the cluster from which this message is received. Notify the
2608 * local clients waiting for this service.
2609 */
2610 relay_ctl_msg(xprt_info, msg);
2611 post_control_ports(pkt);
2612 return 0;
2613}
2614
2615static int process_rmv_server_msg(struct msm_ipc_router_xprt_info *xprt_info,
2616 union rr_control_msg *msg,
2617 struct rr_packet *pkt)
2618{
2619 struct msm_ipc_server *server;
2620 struct msm_ipc_router_remote_port *rport_ptr;
2621
2622 server = ipc_router_get_server_ref(msg->srv.service, msg->srv.instance,
2623 msg->srv.node_id, msg->srv.port_id);
2624 rport_ptr = ipc_router_get_rport_ref(msg->srv.node_id,
2625 msg->srv.port_id);
2626 if (rport_ptr) {
2627 mutex_lock(&rport_ptr->rport_lock_lhb2);
2628 if (rport_ptr->server == server)
2629 rport_ptr->server = NULL;
2630 mutex_unlock(&rport_ptr->rport_lock_lhb2);
2631 kref_put(&rport_ptr->ref, ipc_router_release_rport);
2632 }
2633
2634 if (server) {
2635 kref_put(&server->ref, ipc_router_release_server);
2636 ipc_router_destroy_server(server, msg->srv.node_id,
2637 msg->srv.port_id);
2638 /* Relay the new server message to other subsystems that do not
2639 * belong to the cluster from which this message is received.
2640 * Notify the local clients communicating with the service.
2641 */
2642 relay_ctl_msg(xprt_info, msg);
2643 post_control_ports(pkt);
2644 }
2645 return 0;
2646}
2647
2648static int process_rmv_client_msg(struct msm_ipc_router_xprt_info *xprt_info,
2649 union rr_control_msg *msg,
2650 struct rr_packet *pkt)
2651{
2652 struct msm_ipc_router_remote_port *rport_ptr;
2653 struct msm_ipc_server *server;
2654
2655 rport_ptr = ipc_router_get_rport_ref(msg->cli.node_id,
2656 msg->cli.port_id);
2657 if (rport_ptr) {
2658 mutex_lock(&rport_ptr->rport_lock_lhb2);
2659 server = rport_ptr->server;
2660 rport_ptr->server = NULL;
2661 mutex_unlock(&rport_ptr->rport_lock_lhb2);
2662 ipc_router_reset_conn(rport_ptr);
2663 down_write(&server_list_lock_lha2);
2664 if (server)
2665 cleanup_rmt_server(NULL, rport_ptr, server);
2666 up_write(&server_list_lock_lha2);
2667 kref_put(&rport_ptr->ref, ipc_router_release_rport);
2668 ipc_router_destroy_rport(rport_ptr);
2669 }
2670
2671 relay_ctl_msg(xprt_info, msg);
2672 post_control_ports(pkt);
2673 return 0;
2674}
2675
2676static int process_control_msg(struct msm_ipc_router_xprt_info *xprt_info,
2677 struct rr_packet *pkt)
2678{
2679 union rr_control_msg *msg;
2680 int rc = 0;
2681 struct rr_header_v1 *hdr;
2682
2683 if (pkt->length != sizeof(*msg)) {
2684 IPC_RTR_ERR("%s: r2r msg size %d != %zu\n", __func__,
2685 pkt->length, sizeof(*msg));
2686 return -EINVAL;
2687 }
2688
2689 hdr = &pkt->hdr;
2690 msg = msm_ipc_router_skb_to_buf(pkt->pkt_fragment_q, sizeof(*msg));
2691 if (!msg) {
2692 IPC_RTR_ERR("%s: Error extracting control msg\n", __func__);
2693 return -ENOMEM;
2694 }
2695
2696 ipc_router_log_msg(xprt_info->log_ctx, IPC_ROUTER_LOG_EVENT_RX, msg,
2697 hdr, NULL, NULL);
2698
2699 switch (msg->cmd) {
2700 case IPC_ROUTER_CTRL_CMD_HELLO:
2701 rc = process_hello_msg(xprt_info, msg, hdr);
2702 break;
2703 case IPC_ROUTER_CTRL_CMD_RESUME_TX:
2704 rc = process_resume_tx_msg(msg, pkt);
2705 break;
2706 case IPC_ROUTER_CTRL_CMD_NEW_SERVER:
2707 rc = process_new_server_msg(xprt_info, msg, pkt);
2708 break;
2709 case IPC_ROUTER_CTRL_CMD_REMOVE_SERVER:
2710 rc = process_rmv_server_msg(xprt_info, msg, pkt);
2711 break;
2712 case IPC_ROUTER_CTRL_CMD_REMOVE_CLIENT:
2713 rc = process_rmv_client_msg(xprt_info, msg, pkt);
2714 break;
2715 default:
2716 rc = -EINVAL;
2717 }
2718 kfree(msg);
2719 return rc;
2720}
2721
2722static void do_read_data(struct work_struct *work)
2723{
2724 struct rr_header_v1 *hdr;
2725 struct rr_packet *pkt = NULL;
2726 struct msm_ipc_port *port_ptr;
2727 struct msm_ipc_router_remote_port *rport_ptr;
2728 int ret;
2729
2730 struct msm_ipc_router_xprt_info *xprt_info =
2731 container_of(work,
2732 struct msm_ipc_router_xprt_info,
2733 read_data);
2734
2735 while ((pkt = rr_read(xprt_info)) != NULL) {
2736 if (pkt->length < calc_rx_header_size(xprt_info) ||
2737 pkt->length > MAX_IPC_PKT_SIZE) {
2738 IPC_RTR_ERR("%s: Invalid pkt length %d\n", __func__,
2739 pkt->length);
2740 goto read_next_pkt1;
2741 }
2742
2743 ret = extract_header(pkt);
2744 if (ret < 0)
2745 goto read_next_pkt1;
2746 hdr = &pkt->hdr;
2747
2748 if ((hdr->dst_node_id != IPC_ROUTER_NID_LOCAL) &&
2749 ((hdr->type == IPC_ROUTER_CTRL_CMD_RESUME_TX) ||
2750 (hdr->type == IPC_ROUTER_CTRL_CMD_DATA))) {
2751 IPC_RTR_INFO(xprt_info->log_ctx,
2752 "%s %s Len:0x%x T:0x%x CF:0x%x SRC:<0x%x:0x%x> DST:<0x%x:0x%x>\n",
2753 "FWD", "RX", hdr->size, hdr->type,
2754 hdr->control_flag, hdr->src_node_id,
2755 hdr->src_port_id, hdr->dst_node_id,
2756 hdr->dst_port_id);
2757 forward_msg(xprt_info, pkt);
2758 goto read_next_pkt1;
2759 }
2760
2761 if (hdr->type != IPC_ROUTER_CTRL_CMD_DATA) {
2762 process_control_msg(xprt_info, pkt);
2763 goto read_next_pkt1;
2764 }
2765
2766 port_ptr = ipc_router_get_port_ref(hdr->dst_port_id);
2767 if (!port_ptr) {
2768 IPC_RTR_ERR("%s: No local port id %08x\n", __func__,
2769 hdr->dst_port_id);
2770 goto read_next_pkt1;
2771 }
2772
2773 rport_ptr = ipc_router_get_rport_ref(hdr->src_node_id,
2774 hdr->src_port_id);
2775 if (!rport_ptr) {
2776 rport_ptr = ipc_router_create_rport(hdr->src_node_id,
2777 hdr->src_port_id,
2778 xprt_info);
2779 if (!rport_ptr) {
2780 IPC_RTR_ERR(
2781 "%s: Rmt Prt %08x:%08x create failed\n",
2782 __func__, hdr->src_node_id,
2783 hdr->src_port_id);
2784 goto read_next_pkt2;
2785 }
2786 }
2787
2788 ipc_router_log_msg(xprt_info->log_ctx, IPC_ROUTER_LOG_EVENT_RX,
2789 pkt, hdr, port_ptr, rport_ptr);
2790 kref_put(&rport_ptr->ref, ipc_router_release_rport);
2791 post_pkt_to_port(port_ptr, pkt, 0);
2792 kref_put(&port_ptr->ref, ipc_router_release_port);
2793 continue;
2794read_next_pkt2:
2795 kref_put(&port_ptr->ref, ipc_router_release_port);
2796read_next_pkt1:
2797 release_pkt(pkt);
2798 }
2799}
2800
2801int msm_ipc_router_register_server(struct msm_ipc_port *port_ptr,
2802 struct msm_ipc_addr *name)
2803{
2804 struct msm_ipc_server *server;
2805 union rr_control_msg ctl;
2806 struct msm_ipc_router_remote_port *rport_ptr;
2807
2808 if (!port_ptr || !name)
2809 return -EINVAL;
2810
Karthikeyan Ramasubramanian63cf3592016-12-15 08:13:20 -07002811 if (port_ptr->type != CLIENT_PORT)
2812 return -EINVAL;
2813
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -06002814 if (name->addrtype != MSM_IPC_ADDR_NAME)
2815 return -EINVAL;
2816
2817 rport_ptr = ipc_router_create_rport(IPC_ROUTER_NID_LOCAL,
2818 port_ptr->this_port.port_id, NULL);
2819 if (!rport_ptr) {
2820 IPC_RTR_ERR("%s: RPort %08x:%08x creation failed\n", __func__,
2821 IPC_ROUTER_NID_LOCAL, port_ptr->this_port.port_id);
2822 return -ENOMEM;
2823 }
2824
2825 server = msm_ipc_router_create_server(name->addr.port_name.service,
2826 name->addr.port_name.instance,
2827 IPC_ROUTER_NID_LOCAL,
2828 port_ptr->this_port.port_id,
2829 NULL);
2830 if (!server) {
2831 IPC_RTR_ERR("%s: Server %08x:%08x Create failed\n",
2832 __func__, name->addr.port_name.service,
2833 name->addr.port_name.instance);
2834 kref_put(&rport_ptr->ref, ipc_router_release_rport);
2835 ipc_router_destroy_rport(rport_ptr);
2836 return -ENOMEM;
2837 }
2838
2839 memset(&ctl, 0, sizeof(ctl));
2840 ctl.cmd = IPC_ROUTER_CTRL_CMD_NEW_SERVER;
2841 ctl.srv.service = server->name.service;
2842 ctl.srv.instance = server->name.instance;
2843 ctl.srv.node_id = IPC_ROUTER_NID_LOCAL;
2844 ctl.srv.port_id = port_ptr->this_port.port_id;
2845 broadcast_ctl_msg(&ctl);
2846 mutex_lock(&port_ptr->port_lock_lhc3);
2847 port_ptr->type = SERVER_PORT;
2848 port_ptr->mode_info.mode = MULTI_LINK_MODE;
2849 port_ptr->port_name.service = server->name.service;
2850 port_ptr->port_name.instance = server->name.instance;
2851 port_ptr->rport_info = rport_ptr;
2852 mutex_unlock(&port_ptr->port_lock_lhc3);
2853 kref_put(&rport_ptr->ref, ipc_router_release_rport);
2854 kref_put(&server->ref, ipc_router_release_server);
2855 return 0;
2856}
2857
2858int msm_ipc_router_unregister_server(struct msm_ipc_port *port_ptr)
2859{
2860 struct msm_ipc_server *server;
2861 union rr_control_msg ctl;
2862 struct msm_ipc_router_remote_port *rport_ptr;
2863
2864 if (!port_ptr)
2865 return -EINVAL;
2866
2867 if (port_ptr->type != SERVER_PORT) {
2868 IPC_RTR_ERR("%s: Trying to unregister a non-server port\n",
2869 __func__);
2870 return -EINVAL;
2871 }
2872
2873 if (port_ptr->this_port.node_id != IPC_ROUTER_NID_LOCAL) {
2874 IPC_RTR_ERR(
2875 "%s: Trying to unregister a remote server locally\n",
2876 __func__);
2877 return -EINVAL;
2878 }
2879
2880 server = ipc_router_get_server_ref(port_ptr->port_name.service,
2881 port_ptr->port_name.instance,
2882 port_ptr->this_port.node_id,
2883 port_ptr->this_port.port_id);
2884 if (!server) {
2885 IPC_RTR_ERR("%s: Server lookup failed\n", __func__);
2886 return -ENODEV;
2887 }
2888
2889 mutex_lock(&port_ptr->port_lock_lhc3);
2890 port_ptr->type = CLIENT_PORT;
2891 rport_ptr = (struct msm_ipc_router_remote_port *)port_ptr->rport_info;
2892 mutex_unlock(&port_ptr->port_lock_lhc3);
2893 if (rport_ptr)
2894 ipc_router_reset_conn(rport_ptr);
2895 memset(&ctl, 0, sizeof(ctl));
2896 ctl.cmd = IPC_ROUTER_CTRL_CMD_REMOVE_SERVER;
2897 ctl.srv.service = server->name.service;
2898 ctl.srv.instance = server->name.instance;
2899 ctl.srv.node_id = IPC_ROUTER_NID_LOCAL;
2900 ctl.srv.port_id = port_ptr->this_port.port_id;
2901 kref_put(&server->ref, ipc_router_release_server);
2902 ipc_router_destroy_server(server, port_ptr->this_port.node_id,
2903 port_ptr->this_port.port_id);
2904 broadcast_ctl_msg(&ctl);
2905 mutex_lock(&port_ptr->port_lock_lhc3);
2906 port_ptr->type = CLIENT_PORT;
2907 mutex_unlock(&port_ptr->port_lock_lhc3);
2908 return 0;
2909}
2910
2911static int loopback_data(struct msm_ipc_port *src,
2912 u32 port_id,
2913 struct rr_packet *pkt)
2914{
2915 struct msm_ipc_port *port_ptr;
2916 struct sk_buff *temp_skb;
2917 int align_size;
2918
2919 if (!pkt) {
2920 IPC_RTR_ERR("%s: Invalid pkt pointer\n", __func__);
2921 return -EINVAL;
2922 }
2923
2924 temp_skb = skb_peek_tail(pkt->pkt_fragment_q);
2925 align_size = ALIGN_SIZE(pkt->length);
2926 skb_put(temp_skb, align_size);
2927 pkt->length += align_size;
2928
2929 port_ptr = ipc_router_get_port_ref(port_id);
2930 if (!port_ptr) {
2931 IPC_RTR_ERR("%s: Local port %d not present\n", __func__,
2932 port_id);
2933 return -ENODEV;
2934 }
2935 post_pkt_to_port(port_ptr, pkt, 1);
2936 update_comm_mode_info(&src->mode_info, NULL);
2937 kref_put(&port_ptr->ref, ipc_router_release_port);
2938
2939 return pkt->hdr.size;
2940}
2941
2942static int ipc_router_tx_wait(struct msm_ipc_port *src,
2943 struct msm_ipc_router_remote_port *rport_ptr,
2944 u32 *set_confirm_rx,
2945 long timeout)
2946{
2947 struct msm_ipc_resume_tx_port *resume_tx_port;
2948 int ret;
2949
2950 if (unlikely(!src || !rport_ptr))
2951 return -EINVAL;
2952
2953 for (;;) {
2954 mutex_lock(&rport_ptr->rport_lock_lhb2);
2955 if (rport_ptr->status == RESET) {
2956 mutex_unlock(&rport_ptr->rport_lock_lhb2);
2957 IPC_RTR_ERR("%s: RPort %08x:%08x is in reset state\n",
2958 __func__, rport_ptr->node_id,
2959 rport_ptr->port_id);
2960 return -ENETRESET;
2961 }
2962
2963 if (rport_ptr->tx_quota_cnt < IPC_ROUTER_HIGH_RX_QUOTA)
2964 break;
2965
2966 if (msm_ipc_router_lookup_resume_tx_port(
2967 rport_ptr, src->this_port.port_id))
2968 goto check_timeo;
2969
2970 resume_tx_port =
2971 kzalloc(sizeof(struct msm_ipc_resume_tx_port),
2972 GFP_KERNEL);
2973 if (!resume_tx_port) {
2974 IPC_RTR_ERR("%s: Resume_Tx port allocation failed\n",
2975 __func__);
2976 mutex_unlock(&rport_ptr->rport_lock_lhb2);
2977 return -ENOMEM;
2978 }
2979 INIT_LIST_HEAD(&resume_tx_port->list);
2980 resume_tx_port->port_id = src->this_port.port_id;
2981 resume_tx_port->node_id = src->this_port.node_id;
2982 list_add_tail(&resume_tx_port->list,
2983 &rport_ptr->resume_tx_port_list);
2984check_timeo:
2985 mutex_unlock(&rport_ptr->rport_lock_lhb2);
2986 if (!timeout) {
2987 return -EAGAIN;
2988 } else if (timeout < 0) {
2989 ret =
2990 wait_event_interruptible(src->port_tx_wait_q,
2991 (rport_ptr->tx_quota_cnt !=
2992 IPC_ROUTER_HIGH_RX_QUOTA ||
2993 rport_ptr->status == RESET));
2994 if (ret)
2995 return ret;
2996 } else {
2997 ret = wait_event_interruptible_timeout(
2998 src->port_tx_wait_q,
2999 (rport_ptr->tx_quota_cnt !=
3000 IPC_ROUTER_HIGH_RX_QUOTA ||
3001 rport_ptr->status == RESET),
3002 msecs_to_jiffies(timeout));
3003 if (ret < 0) {
3004 return ret;
3005 } else if (ret == 0) {
3006 IPC_RTR_ERR("%s: Resume_tx Timeout %08x:%08x\n",
3007 __func__, rport_ptr->node_id,
3008 rport_ptr->port_id);
3009 return -ETIMEDOUT;
3010 }
3011 }
3012 }
3013 rport_ptr->tx_quota_cnt++;
3014 if (rport_ptr->tx_quota_cnt == IPC_ROUTER_LOW_RX_QUOTA)
3015 *set_confirm_rx = 1;
3016 mutex_unlock(&rport_ptr->rport_lock_lhb2);
3017 return 0;
3018}
3019
3020static int
3021msm_ipc_router_write_pkt(struct msm_ipc_port *src,
3022 struct msm_ipc_router_remote_port *rport_ptr,
3023 struct rr_packet *pkt, long timeout)
3024{
3025 struct rr_header_v1 *hdr;
3026 struct msm_ipc_router_xprt_info *xprt_info;
3027 struct msm_ipc_routing_table_entry *rt_entry;
3028 struct sk_buff *temp_skb;
3029 int xprt_option;
3030 int ret;
3031 int align_size;
3032 u32 set_confirm_rx = 0;
3033
3034 if (!rport_ptr || !src || !pkt)
3035 return -EINVAL;
3036
3037 hdr = &pkt->hdr;
3038 hdr->version = IPC_ROUTER_V1;
3039 hdr->type = IPC_ROUTER_CTRL_CMD_DATA;
3040 hdr->src_node_id = src->this_port.node_id;
3041 hdr->src_port_id = src->this_port.port_id;
3042 hdr->size = pkt->length;
3043 hdr->control_flag = 0;
3044 hdr->dst_node_id = rport_ptr->node_id;
3045 hdr->dst_port_id = rport_ptr->port_id;
3046
3047 ret = ipc_router_tx_wait(src, rport_ptr, &set_confirm_rx, timeout);
3048 if (ret < 0)
3049 return ret;
3050 if (set_confirm_rx)
3051 hdr->control_flag |= CONTROL_FLAG_CONFIRM_RX;
3052
3053 if (hdr->dst_node_id == IPC_ROUTER_NID_LOCAL) {
3054 ipc_router_log_msg(local_log_ctx,
3055 IPC_ROUTER_LOG_EVENT_TX, pkt, hdr, src,
3056 rport_ptr);
3057 ret = loopback_data(src, hdr->dst_port_id, pkt);
3058 return ret;
3059 }
3060
3061 rt_entry = ipc_router_get_rtentry_ref(hdr->dst_node_id);
3062 if (!rt_entry) {
3063 IPC_RTR_ERR("%s: Remote node %d not up\n",
3064 __func__, hdr->dst_node_id);
3065 return -ENODEV;
3066 }
3067 down_read(&rt_entry->lock_lha4);
3068 xprt_info = rt_entry->xprt_info;
3069 ret = ipc_router_get_xprt_info_ref(xprt_info);
3070 if (ret < 0) {
3071 IPC_RTR_ERR("%s: Abort invalid xprt\n", __func__);
3072 up_read(&rt_entry->lock_lha4);
3073 kref_put(&rt_entry->ref, ipc_router_release_rtentry);
3074 return ret;
3075 }
3076 ret = prepend_header(pkt, xprt_info);
3077 if (ret < 0) {
3078 IPC_RTR_ERR("%s: Prepend Header failed\n", __func__);
3079 goto out_write_pkt;
3080 }
3081 xprt_option = xprt_info->xprt->get_option(xprt_info->xprt);
3082 if (!(xprt_option & FRAG_PKT_WRITE_ENABLE)) {
3083 ret = defragment_pkt(pkt);
3084 if (ret < 0)
3085 goto out_write_pkt;
3086 }
3087
3088 temp_skb = skb_peek_tail(pkt->pkt_fragment_q);
3089 align_size = ALIGN_SIZE(pkt->length);
3090 skb_put(temp_skb, align_size);
3091 pkt->length += align_size;
3092 mutex_lock(&xprt_info->tx_lock_lhb2);
3093 ret = xprt_info->xprt->write(pkt, pkt->length, xprt_info->xprt);
3094 mutex_unlock(&xprt_info->tx_lock_lhb2);
3095out_write_pkt:
3096 up_read(&rt_entry->lock_lha4);
3097 kref_put(&rt_entry->ref, ipc_router_release_rtentry);
3098
3099 if (ret < 0) {
3100 IPC_RTR_ERR("%s: Write on XPRT failed\n", __func__);
3101 ipc_router_log_msg(xprt_info->log_ctx,
3102 IPC_ROUTER_LOG_EVENT_TX_ERR, pkt, hdr, src,
3103 rport_ptr);
3104
3105 ipc_router_put_xprt_info_ref(xprt_info);
3106 return ret;
3107 }
3108 update_comm_mode_info(&src->mode_info, xprt_info);
3109 ipc_router_log_msg(xprt_info->log_ctx,
3110 IPC_ROUTER_LOG_EVENT_TX, pkt, hdr, src, rport_ptr);
3111
3112 ipc_router_put_xprt_info_ref(xprt_info);
3113 return hdr->size;
3114}
3115
3116int msm_ipc_router_send_to(struct msm_ipc_port *src,
3117 struct sk_buff_head *data,
3118 struct msm_ipc_addr *dest,
3119 long timeout)
3120{
3121 u32 dst_node_id = 0, dst_port_id = 0;
3122 struct msm_ipc_server *server;
3123 struct msm_ipc_server_port *server_port;
3124 struct msm_ipc_router_remote_port *rport_ptr = NULL;
3125 struct msm_ipc_router_remote_port *src_rport_ptr = NULL;
3126 struct rr_packet *pkt;
3127 int ret;
3128
3129 if (!src || !data || !dest) {
3130 IPC_RTR_ERR("%s: Invalid Parameters\n", __func__);
3131 return -EINVAL;
3132 }
3133
3134 /* Resolve Address*/
3135 if (dest->addrtype == MSM_IPC_ADDR_ID) {
3136 dst_node_id = dest->addr.port_addr.node_id;
3137 dst_port_id = dest->addr.port_addr.port_id;
3138 } else if (dest->addrtype == MSM_IPC_ADDR_NAME) {
3139 server =
3140 ipc_router_get_server_ref(dest->addr.port_name.service,
3141 dest->addr.port_name.instance,
3142 0, 0);
3143 if (!server) {
3144 IPC_RTR_ERR("%s: Destination not reachable\n",
3145 __func__);
3146 return -ENODEV;
3147 }
3148 server_port = list_first_entry(&server->server_port_list,
3149 struct msm_ipc_server_port,
3150 list);
3151 dst_node_id = server_port->server_addr.node_id;
3152 dst_port_id = server_port->server_addr.port_id;
3153 kref_put(&server->ref, ipc_router_release_server);
3154 }
3155
3156 rport_ptr = ipc_router_get_rport_ref(dst_node_id, dst_port_id);
3157 if (!rport_ptr) {
3158 IPC_RTR_ERR("%s: Remote port not found\n", __func__);
3159 return -ENODEV;
3160 }
3161
3162 if (src->check_send_permissions) {
3163 ret = src->check_send_permissions(rport_ptr->sec_rule);
3164 if (ret <= 0) {
3165 kref_put(&rport_ptr->ref, ipc_router_release_rport);
3166 IPC_RTR_ERR("%s: permission failure for %s\n",
3167 __func__, current->comm);
3168 return -EPERM;
3169 }
3170 }
3171
3172 if (dst_node_id == IPC_ROUTER_NID_LOCAL && !src->rport_info) {
3173 src_rport_ptr = ipc_router_create_rport(IPC_ROUTER_NID_LOCAL,
3174 src->this_port.port_id,
3175 NULL);
3176 if (!src_rport_ptr) {
3177 kref_put(&rport_ptr->ref, ipc_router_release_rport);
3178 IPC_RTR_ERR("%s: RPort creation failed\n", __func__);
3179 return -ENOMEM;
3180 }
3181 mutex_lock(&src->port_lock_lhc3);
3182 src->rport_info = src_rport_ptr;
3183 mutex_unlock(&src->port_lock_lhc3);
3184 kref_put(&src_rport_ptr->ref, ipc_router_release_rport);
3185 }
3186
3187 pkt = create_pkt(data);
3188 if (!pkt) {
3189 kref_put(&rport_ptr->ref, ipc_router_release_rport);
3190 IPC_RTR_ERR("%s: Pkt creation failed\n", __func__);
3191 return -ENOMEM;
3192 }
3193
3194 ret = msm_ipc_router_write_pkt(src, rport_ptr, pkt, timeout);
3195 kref_put(&rport_ptr->ref, ipc_router_release_rport);
3196 if (ret < 0)
3197 pkt->pkt_fragment_q = NULL;
3198 release_pkt(pkt);
3199
3200 return ret;
3201}
3202
3203int msm_ipc_router_send_msg(struct msm_ipc_port *src,
3204 struct msm_ipc_addr *dest,
3205 void *data, unsigned int data_len)
3206{
3207 struct sk_buff_head *out_skb_head;
3208 int ret;
3209
3210 out_skb_head = msm_ipc_router_buf_to_skb(data, data_len);
3211 if (!out_skb_head) {
3212 IPC_RTR_ERR("%s: SKB conversion failed\n", __func__);
3213 return -EFAULT;
3214 }
3215
3216 ret = msm_ipc_router_send_to(src, out_skb_head, dest, 0);
3217 if (ret < 0) {
3218 if (ret != -EAGAIN)
3219 IPC_RTR_ERR(
3220 "%s: msm_ipc_router_send_to failed - ret: %d\n",
3221 __func__, ret);
3222 msm_ipc_router_free_skb(out_skb_head);
3223 return ret;
3224 }
3225 return 0;
3226}
3227
3228/**
3229 * msm_ipc_router_send_resume_tx() - Send Resume_Tx message
3230 * @data: Pointer to received data packet that has confirm_rx bit set
3231 *
3232 * @return: On success, number of bytes transferred is returned, else
3233 * standard linux error code is returned.
3234 *
3235 * This function sends the Resume_Tx event to the remote node that
3236 * sent the data with confirm_rx field set. In case of a multi-hop
3237 * scenario also, this function makes sure that the destination node_id
3238 * to which the resume_tx event should reach is right.
3239 */
3240static int msm_ipc_router_send_resume_tx(void *data)
3241{
3242 union rr_control_msg msg;
3243 struct rr_header_v1 *hdr = (struct rr_header_v1 *)data;
3244 struct msm_ipc_routing_table_entry *rt_entry;
3245 int ret;
3246
3247 memset(&msg, 0, sizeof(msg));
3248 msg.cmd = IPC_ROUTER_CTRL_CMD_RESUME_TX;
3249 msg.cli.node_id = hdr->dst_node_id;
3250 msg.cli.port_id = hdr->dst_port_id;
3251 rt_entry = ipc_router_get_rtentry_ref(hdr->src_node_id);
3252 if (!rt_entry) {
3253 IPC_RTR_ERR("%s: %d Node is not present", __func__,
3254 hdr->src_node_id);
3255 return -ENODEV;
3256 }
3257 ret = ipc_router_get_xprt_info_ref(rt_entry->xprt_info);
3258 if (ret < 0) {
3259 IPC_RTR_ERR("%s: Abort invalid xprt\n", __func__);
3260 kref_put(&rt_entry->ref, ipc_router_release_rtentry);
3261 return ret;
3262 }
3263 ret = ipc_router_send_ctl_msg(rt_entry->xprt_info, &msg,
3264 hdr->src_node_id);
3265 ipc_router_put_xprt_info_ref(rt_entry->xprt_info);
3266 kref_put(&rt_entry->ref, ipc_router_release_rtentry);
3267 if (ret < 0)
3268 IPC_RTR_ERR(
3269 "%s: Send Resume_Tx Failed SRC_NODE: %d SRC_PORT: %d DEST_NODE: %d",
3270 __func__, hdr->dst_node_id, hdr->dst_port_id,
3271 hdr->src_node_id);
3272
3273 return ret;
3274}
3275
3276int msm_ipc_router_read(struct msm_ipc_port *port_ptr,
3277 struct rr_packet **read_pkt,
3278 size_t buf_len)
3279{
3280 struct rr_packet *pkt;
3281
3282 if (!port_ptr || !read_pkt)
3283 return -EINVAL;
3284
3285 mutex_lock(&port_ptr->port_rx_q_lock_lhc3);
3286 if (list_empty(&port_ptr->port_rx_q)) {
3287 mutex_unlock(&port_ptr->port_rx_q_lock_lhc3);
3288 return -EAGAIN;
3289 }
3290
3291 pkt = list_first_entry(&port_ptr->port_rx_q, struct rr_packet, list);
3292 if ((buf_len) && (pkt->hdr.size > buf_len)) {
3293 mutex_unlock(&port_ptr->port_rx_q_lock_lhc3);
3294 return -ETOOSMALL;
3295 }
3296 list_del(&pkt->list);
3297 if (list_empty(&port_ptr->port_rx_q))
3298 __pm_relax(port_ptr->port_rx_ws);
3299 *read_pkt = pkt;
3300 mutex_unlock(&port_ptr->port_rx_q_lock_lhc3);
3301 if (pkt->hdr.control_flag & CONTROL_FLAG_CONFIRM_RX)
3302 msm_ipc_router_send_resume_tx(&pkt->hdr);
3303
3304 return pkt->length;
3305}
3306
3307/**
3308 * msm_ipc_router_rx_data_wait() - Wait for new message destined to a local
3309 * port.
3310 * @port_ptr: Pointer to the local port
3311 * @timeout: < 0 timeout indicates infinite wait till a message arrives.
3312 * > 0 timeout indicates the wait time.
3313 * 0 indicates that we do not wait.
3314 * @return: 0 if there are pending messages to read,
3315 * standard Linux error code otherwise.
3316 *
3317 * Checks for the availability of messages that are destined to a local port.
3318 * If no messages are present then waits as per @timeout.
3319 */
3320int msm_ipc_router_rx_data_wait(struct msm_ipc_port *port_ptr, long timeout)
3321{
3322 int ret = 0;
3323
3324 mutex_lock(&port_ptr->port_rx_q_lock_lhc3);
3325 while (list_empty(&port_ptr->port_rx_q)) {
3326 mutex_unlock(&port_ptr->port_rx_q_lock_lhc3);
3327 if (timeout < 0) {
3328 ret = wait_event_interruptible(
3329 port_ptr->port_rx_wait_q,
3330 !list_empty(&port_ptr->port_rx_q));
3331 if (ret)
3332 return ret;
3333 } else if (timeout > 0) {
3334 timeout = wait_event_interruptible_timeout(
3335 port_ptr->port_rx_wait_q,
3336 !list_empty(&port_ptr->port_rx_q),
3337 timeout);
3338 if (timeout < 0)
3339 return -EFAULT;
3340 }
3341 if (timeout == 0)
3342 return -ENOMSG;
3343 mutex_lock(&port_ptr->port_rx_q_lock_lhc3);
3344 }
3345 mutex_unlock(&port_ptr->port_rx_q_lock_lhc3);
3346
3347 return ret;
3348}
3349
3350/**
3351 * msm_ipc_router_recv_from() - Receive messages destined to a local port.
3352 * @port_ptr: Pointer to the local port
3353 * @pkt : Pointer to the router-to-router packet
3354 * @src: Pointer to local port address
3355 * @timeout: < 0 timeout indicates infinite wait till a message arrives.
3356 * > 0 timeout indicates the wait time.
3357 * 0 indicates that we do not wait.
3358 * @return: = Number of bytes read(On successful read operation).
3359 * = -ENOMSG (If there are no pending messages and timeout is 0).
3360 * = -EINVAL (If either of the arguments, port_ptr or data is invalid)
3361 * = -EFAULT (If there are no pending messages when timeout is > 0
3362 * and the wait_event_interruptible_timeout has returned value > 0)
3363 * = -ERESTARTSYS (If there are no pending messages when timeout
3364 * is < 0 and wait_event_interruptible was interrupted by a signal)
3365 *
3366 * This function reads the messages that are destined for a local port. It
3367 * is used by modules that exist with-in the kernel and use IPC Router for
3368 * transport. The function checks if there are any messages that are already
3369 * received. If yes, it reads them, else it waits as per the timeout value.
3370 * On a successful read, the return value of the function indicates the number
3371 * of bytes that are read.
3372 */
3373int msm_ipc_router_recv_from(struct msm_ipc_port *port_ptr,
3374 struct rr_packet **pkt,
3375 struct msm_ipc_addr *src,
3376 long timeout)
3377{
3378 int ret, data_len, align_size;
3379 struct sk_buff *temp_skb;
3380 struct rr_header_v1 *hdr = NULL;
3381
3382 if (!port_ptr || !pkt) {
3383 IPC_RTR_ERR("%s: Invalid pointers being passed\n", __func__);
3384 return -EINVAL;
3385 }
3386
3387 *pkt = NULL;
3388
3389 ret = msm_ipc_router_rx_data_wait(port_ptr, timeout);
3390 if (ret)
3391 return ret;
3392
3393 ret = msm_ipc_router_read(port_ptr, pkt, 0);
3394 if (ret <= 0 || !(*pkt))
3395 return ret;
3396
3397 hdr = &((*pkt)->hdr);
3398 if (src) {
3399 src->addrtype = MSM_IPC_ADDR_ID;
3400 src->addr.port_addr.node_id = hdr->src_node_id;
3401 src->addr.port_addr.port_id = hdr->src_port_id;
3402 }
3403
3404 data_len = hdr->size;
3405 align_size = ALIGN_SIZE(data_len);
3406 if (align_size) {
3407 temp_skb = skb_peek_tail((*pkt)->pkt_fragment_q);
3408 skb_trim(temp_skb, (temp_skb->len - align_size));
3409 }
3410 return data_len;
3411}
3412
3413int msm_ipc_router_read_msg(struct msm_ipc_port *port_ptr,
3414 struct msm_ipc_addr *src,
3415 unsigned char **data,
3416 unsigned int *len)
3417{
3418 struct rr_packet *pkt;
3419 int ret;
3420
3421 ret = msm_ipc_router_recv_from(port_ptr, &pkt, src, 0);
3422 if (ret < 0) {
3423 if (ret != -ENOMSG)
3424 IPC_RTR_ERR(
3425 "%s: msm_ipc_router_recv_from failed - ret: %d\n",
3426 __func__, ret);
3427 return ret;
3428 }
3429
3430 *data = msm_ipc_router_skb_to_buf(pkt->pkt_fragment_q, ret);
3431 if (!(*data)) {
3432 IPC_RTR_ERR("%s: Buf conversion failed\n", __func__);
3433 release_pkt(pkt);
3434 return -ENOMEM;
3435 }
3436
3437 *len = ret;
3438 release_pkt(pkt);
3439 return 0;
3440}
3441
3442/**
3443 * msm_ipc_router_create_port() - Create a IPC Router port/endpoint
3444 * @notify: Callback function to notify any event on the port.
3445 * @event: Event ID to be handled.
3446 * @oob_data: Any out-of-band data associated with the event.
3447 * @oob_data_len: Size of the out-of-band data, if valid.
3448 * @priv: Private data registered during the port creation.
3449 * @priv: Private info to be passed while the notification is generated.
3450 *
3451 * @return: Pointer to the port on success, NULL on error.
3452 */
3453struct msm_ipc_port *msm_ipc_router_create_port(
3454 void (*notify)(unsigned int event, void *oob_data,
3455 size_t oob_data_len, void *priv),
3456 void *priv)
3457{
3458 struct msm_ipc_port *port_ptr;
3459 int ret;
3460
3461 ret = ipc_router_core_init();
3462 if (ret < 0) {
3463 IPC_RTR_ERR("%s: Error %d initializing IPC Router\n",
3464 __func__, ret);
3465 return NULL;
3466 }
3467
3468 port_ptr = msm_ipc_router_create_raw_port(NULL, notify, priv);
3469 if (!port_ptr)
3470 IPC_RTR_ERR("%s: port_ptr alloc failed\n", __func__);
3471
3472 return port_ptr;
3473}
3474
3475int msm_ipc_router_close_port(struct msm_ipc_port *port_ptr)
3476{
3477 union rr_control_msg msg;
3478 struct msm_ipc_server *server;
3479 struct msm_ipc_router_remote_port *rport_ptr;
3480
3481 if (!port_ptr)
3482 return -EINVAL;
3483
3484 if (port_ptr->type == SERVER_PORT || port_ptr->type == CLIENT_PORT) {
3485 down_write(&local_ports_lock_lhc2);
3486 list_del(&port_ptr->list);
3487 up_write(&local_ports_lock_lhc2);
3488
3489 mutex_lock(&port_ptr->port_lock_lhc3);
3490 rport_ptr = (struct msm_ipc_router_remote_port *)
3491 port_ptr->rport_info;
3492 port_ptr->rport_info = NULL;
3493 mutex_unlock(&port_ptr->port_lock_lhc3);
3494 if (rport_ptr) {
3495 ipc_router_reset_conn(rport_ptr);
3496 ipc_router_destroy_rport(rport_ptr);
3497 }
3498
3499 if (port_ptr->type == SERVER_PORT) {
3500 memset(&msg, 0, sizeof(msg));
3501 msg.cmd = IPC_ROUTER_CTRL_CMD_REMOVE_SERVER;
3502 msg.srv.service = port_ptr->port_name.service;
3503 msg.srv.instance = port_ptr->port_name.instance;
3504 msg.srv.node_id = port_ptr->this_port.node_id;
3505 msg.srv.port_id = port_ptr->this_port.port_id;
3506 broadcast_ctl_msg(&msg);
3507 }
3508
3509 /* Server port could have been a client port earlier.
3510 * Send REMOVE_CLIENT message in either case.
3511 */
3512 msm_ipc_router_send_remove_client(&port_ptr->mode_info,
3513 port_ptr->this_port.node_id,
3514 port_ptr->this_port.port_id);
3515 } else if (port_ptr->type == CONTROL_PORT) {
3516 down_write(&control_ports_lock_lha5);
3517 list_del(&port_ptr->list);
3518 up_write(&control_ports_lock_lha5);
3519 } else if (port_ptr->type == IRSC_PORT) {
3520 down_write(&local_ports_lock_lhc2);
3521 list_del(&port_ptr->list);
3522 up_write(&local_ports_lock_lhc2);
3523 signal_irsc_completion();
3524 }
3525
3526 if (port_ptr->type == SERVER_PORT) {
3527 server = ipc_router_get_server_ref(
3528 port_ptr->port_name.service,
3529 port_ptr->port_name.instance,
3530 port_ptr->this_port.node_id,
3531 port_ptr->this_port.port_id);
3532 if (server) {
3533 kref_put(&server->ref, ipc_router_release_server);
3534 ipc_router_destroy_server(server,
3535 port_ptr->this_port.node_id,
3536 port_ptr->this_port.port_id);
3537 }
3538 }
3539
3540 mutex_lock(&port_ptr->port_lock_lhc3);
3541 rport_ptr = (struct msm_ipc_router_remote_port *)port_ptr->rport_info;
3542 port_ptr->rport_info = NULL;
3543 mutex_unlock(&port_ptr->port_lock_lhc3);
3544 if (rport_ptr)
3545 ipc_router_destroy_rport(rport_ptr);
3546
3547 kref_put(&port_ptr->ref, ipc_router_release_port);
3548 return 0;
3549}
3550
3551int msm_ipc_router_get_curr_pkt_size(struct msm_ipc_port *port_ptr)
3552{
3553 struct rr_packet *pkt;
3554 int rc = 0;
3555
3556 if (!port_ptr)
3557 return -EINVAL;
3558
3559 mutex_lock(&port_ptr->port_rx_q_lock_lhc3);
3560 if (!list_empty(&port_ptr->port_rx_q)) {
3561 pkt = list_first_entry(&port_ptr->port_rx_q, struct rr_packet,
3562 list);
3563 rc = pkt->hdr.size;
3564 }
3565 mutex_unlock(&port_ptr->port_rx_q_lock_lhc3);
3566
3567 return rc;
3568}
3569
3570int msm_ipc_router_bind_control_port(struct msm_ipc_port *port_ptr)
3571{
3572 if (unlikely(!port_ptr || port_ptr->type != CLIENT_PORT))
3573 return -EINVAL;
3574
3575 down_write(&local_ports_lock_lhc2);
3576 list_del(&port_ptr->list);
3577 up_write(&local_ports_lock_lhc2);
3578 port_ptr->type = CONTROL_PORT;
3579 down_write(&control_ports_lock_lha5);
3580 list_add_tail(&port_ptr->list, &control_ports);
3581 up_write(&control_ports_lock_lha5);
3582
3583 return 0;
3584}
3585
3586int msm_ipc_router_lookup_server_name(struct msm_ipc_port_name *srv_name,
3587 struct msm_ipc_server_info *srv_info,
3588 int num_entries_in_array, u32 lookup_mask)
3589{
3590 struct msm_ipc_server *server;
3591 struct msm_ipc_server_port *server_port;
3592 int key, i = 0; /*num_entries_found*/
3593
3594 if (!srv_name) {
3595 IPC_RTR_ERR("%s: Invalid srv_name\n", __func__);
3596 return -EINVAL;
3597 }
3598
3599 if (num_entries_in_array && !srv_info) {
3600 IPC_RTR_ERR("%s: srv_info NULL\n", __func__);
3601 return -EINVAL;
3602 }
3603
3604 down_read(&server_list_lock_lha2);
3605 key = (srv_name->service & (SRV_HASH_SIZE - 1));
3606 list_for_each_entry(server, &server_list[key], list) {
3607 if ((server->name.service != srv_name->service) ||
3608 ((server->name.instance & lookup_mask) !=
3609 srv_name->instance))
3610 continue;
3611
3612 list_for_each_entry(server_port, &server->server_port_list,
3613 list) {
3614 if (i < num_entries_in_array) {
3615 srv_info[i].node_id =
3616 server_port->server_addr.node_id;
3617 srv_info[i].port_id =
3618 server_port->server_addr.port_id;
3619 srv_info[i].service = server->name.service;
3620 srv_info[i].instance = server->name.instance;
3621 }
3622 i++;
3623 }
3624 }
3625 up_read(&server_list_lock_lha2);
3626
3627 return i;
3628}
3629
3630int msm_ipc_router_close(void)
3631{
3632 struct msm_ipc_router_xprt_info *xprt_info, *tmp_xprt_info;
3633
3634 down_write(&xprt_info_list_lock_lha5);
3635 list_for_each_entry_safe(xprt_info, tmp_xprt_info,
3636 &xprt_info_list, list) {
3637 xprt_info->xprt->close(xprt_info->xprt);
3638 list_del(&xprt_info->list);
3639 kfree(xprt_info);
3640 }
3641 up_write(&xprt_info_list_lock_lha5);
3642 return 0;
3643}
3644
3645/**
3646 * pil_vote_load_worker() - Process vote to load the modem
3647 *
3648 * @work: Work item to process
3649 *
3650 * This function is called to process votes to load the modem that have been
3651 * queued by msm_ipc_load_default_node().
3652 */
3653static void pil_vote_load_worker(struct work_struct *work)
3654{
3655 struct pil_vote_info *vote_info;
3656
3657 vote_info = container_of(work, struct pil_vote_info, load_work);
3658 if (strlen(default_peripheral)) {
3659 vote_info->pil_handle = subsystem_get(default_peripheral);
3660 if (IS_ERR(vote_info->pil_handle)) {
3661 IPC_RTR_ERR("%s: Failed to load %s\n",
3662 __func__, default_peripheral);
3663 vote_info->pil_handle = NULL;
3664 }
3665 } else {
3666 vote_info->pil_handle = NULL;
3667 }
3668}
3669
3670/**
3671 * pil_vote_unload_worker() - Process vote to unload the modem
3672 *
3673 * @work: Work item to process
3674 *
3675 * This function is called to process votes to unload the modem that have been
3676 * queued by msm_ipc_unload_default_node().
3677 */
3678static void pil_vote_unload_worker(struct work_struct *work)
3679{
3680 struct pil_vote_info *vote_info;
3681
3682 vote_info = container_of(work, struct pil_vote_info, unload_work);
3683
3684 if (vote_info->pil_handle) {
3685 subsystem_put(vote_info->pil_handle);
3686 vote_info->pil_handle = NULL;
3687 }
3688 kfree(vote_info);
3689}
3690
3691/**
3692 * msm_ipc_load_default_node() - Queue a vote to load the modem.
3693 *
3694 * @return: PIL vote info structure on success, NULL on failure.
3695 *
3696 * This function places a work item that loads the modem on the
3697 * single-threaded workqueue used for processing PIL votes to load
3698 * or unload the modem.
3699 */
3700void *msm_ipc_load_default_node(void)
3701{
3702 struct pil_vote_info *vote_info;
3703
3704 vote_info = kmalloc(sizeof(*vote_info), GFP_KERNEL);
3705 if (!vote_info)
3706 return vote_info;
3707
3708 INIT_WORK(&vote_info->load_work, pil_vote_load_worker);
3709 queue_work(msm_ipc_router_workqueue, &vote_info->load_work);
3710
3711 return vote_info;
3712}
3713
3714/**
3715 * msm_ipc_unload_default_node() - Queue a vote to unload the modem.
3716 *
3717 * @pil_vote: PIL vote info structure, containing the PIL handle
3718 * and work structure.
3719 *
3720 * This function places a work item that unloads the modem on the
3721 * single-threaded workqueue used for processing PIL votes to load
3722 * or unload the modem.
3723 */
3724void msm_ipc_unload_default_node(void *pil_vote)
3725{
3726 struct pil_vote_info *vote_info;
3727
3728 if (pil_vote) {
3729 vote_info = (struct pil_vote_info *)pil_vote;
3730 INIT_WORK(&vote_info->unload_work, pil_vote_unload_worker);
3731 queue_work(msm_ipc_router_workqueue, &vote_info->unload_work);
3732 }
3733}
3734
3735#if defined(CONFIG_DEBUG_FS)
3736static void dump_routing_table(struct seq_file *s)
3737{
3738 int j;
3739 struct msm_ipc_routing_table_entry *rt_entry;
3740
3741 seq_printf(s, "%-10s|%-20s|%-10s|\n", "Node Id", "XPRT Name",
3742 "Next Hop");
3743 seq_puts(s, "----------------------------------------------\n");
3744 for (j = 0; j < RT_HASH_SIZE; j++) {
3745 down_read(&routing_table_lock_lha3);
3746 list_for_each_entry(rt_entry, &routing_table[j], list) {
3747 down_read(&rt_entry->lock_lha4);
3748 seq_printf(s, "0x%08x|", rt_entry->node_id);
3749 if (rt_entry->node_id == IPC_ROUTER_NID_LOCAL)
3750 seq_printf(s, "%-20s|0x%08x|\n", "Loopback",
3751 rt_entry->node_id);
3752 else
3753 seq_printf(s, "%-20s|0x%08x|\n",
3754 rt_entry->xprt_info->xprt->name,
3755 rt_entry->node_id);
3756 up_read(&rt_entry->lock_lha4);
3757 }
3758 up_read(&routing_table_lock_lha3);
3759 }
3760}
3761
3762static void dump_xprt_info(struct seq_file *s)
3763{
3764 struct msm_ipc_router_xprt_info *xprt_info;
3765
3766 seq_printf(s, "%-20s|%-10s|%-12s|%-15s|\n", "XPRT Name", "Link ID",
3767 "Initialized", "Remote Node Id");
3768 seq_puts(s, "------------------------------------------------------------\n");
3769 down_read(&xprt_info_list_lock_lha5);
3770 list_for_each_entry(xprt_info, &xprt_info_list, list)
3771 seq_printf(s, "%-20s|0x%08x|%-12s|0x%08x|\n",
3772 xprt_info->xprt->name, xprt_info->xprt->link_id,
3773 (xprt_info->initialized ? "Y" : "N"),
3774 xprt_info->remote_node_id);
3775 up_read(&xprt_info_list_lock_lha5);
3776}
3777
3778static void dump_servers(struct seq_file *s)
3779{
3780 int j;
3781 struct msm_ipc_server *server;
3782 struct msm_ipc_server_port *server_port;
3783
3784 seq_printf(s, "%-11s|%-11s|%-11s|%-11s|\n", "Service", "Instance",
3785 "Node_id", "Port_id");
3786 seq_puts(s, "------------------------------------------------------------\n");
3787 down_read(&server_list_lock_lha2);
3788 for (j = 0; j < SRV_HASH_SIZE; j++) {
3789 list_for_each_entry(server, &server_list[j], list) {
3790 list_for_each_entry(server_port,
3791 &server->server_port_list,
3792 list)
3793 seq_printf(s, "0x%08x |0x%08x |0x%08x |0x%08x |\n",
3794 server->name.service,
3795 server->name.instance,
3796 server_port->server_addr.node_id,
3797 server_port->server_addr.port_id);
3798 }
3799 }
3800 up_read(&server_list_lock_lha2);
3801}
3802
3803static void dump_remote_ports(struct seq_file *s)
3804{
3805 int j, k;
3806 struct msm_ipc_router_remote_port *rport_ptr;
3807 struct msm_ipc_routing_table_entry *rt_entry;
3808
3809 seq_printf(s, "%-11s|%-11s|%-10s|\n", "Node_id", "Port_id",
3810 "Quota_cnt");
3811 seq_puts(s, "------------------------------------------------------------\n");
3812 for (j = 0; j < RT_HASH_SIZE; j++) {
3813 down_read(&routing_table_lock_lha3);
3814 list_for_each_entry(rt_entry, &routing_table[j], list) {
3815 down_read(&rt_entry->lock_lha4);
3816 for (k = 0; k < RP_HASH_SIZE; k++) {
3817 list_for_each_entry
3818 (rport_ptr,
3819 &rt_entry->remote_port_list[k],
3820 list)
3821 seq_printf(s, "0x%08x |0x%08x |0x%08x|\n",
3822 rport_ptr->node_id,
3823 rport_ptr->port_id,
3824 rport_ptr->tx_quota_cnt);
3825 }
3826 up_read(&rt_entry->lock_lha4);
3827 }
3828 up_read(&routing_table_lock_lha3);
3829 }
3830}
3831
3832static void dump_control_ports(struct seq_file *s)
3833{
3834 struct msm_ipc_port *port_ptr;
3835
3836 seq_printf(s, "%-11s|%-11s|\n", "Node_id", "Port_id");
3837 seq_puts(s, "------------------------------------------------------------\n");
3838 down_read(&control_ports_lock_lha5);
3839 list_for_each_entry(port_ptr, &control_ports, list)
3840 seq_printf(s, "0x%08x |0x%08x |\n", port_ptr->this_port.node_id,
3841 port_ptr->this_port.port_id);
3842 up_read(&control_ports_lock_lha5);
3843}
3844
3845static void dump_local_ports(struct seq_file *s)
3846{
3847 int j;
3848 struct msm_ipc_port *port_ptr;
3849
3850 seq_printf(s, "%-11s|%-11s|\n", "Node_id", "Port_id");
3851 seq_puts(s, "------------------------------------------------------------\n");
3852 down_read(&local_ports_lock_lhc2);
3853 for (j = 0; j < LP_HASH_SIZE; j++) {
3854 list_for_each_entry(port_ptr, &local_ports[j], list) {
3855 mutex_lock(&port_ptr->port_lock_lhc3);
3856 seq_printf(s, "0x%08x |0x%08x |\n",
3857 port_ptr->this_port.node_id,
3858 port_ptr->this_port.port_id);
3859 mutex_unlock(&port_ptr->port_lock_lhc3);
3860 }
3861 }
3862 up_read(&local_ports_lock_lhc2);
3863}
3864
3865static int debugfs_show(struct seq_file *s, void *data)
3866{
3867 void (*show)(struct seq_file *) = s->private;
3868
3869 show(s);
3870 return 0;
3871}
3872
3873static int debug_open(struct inode *inode, struct file *file)
3874{
3875 return single_open(file, debugfs_show, inode->i_private);
3876}
3877
3878static const struct file_operations debug_ops = {
3879 .open = debug_open,
3880 .release = single_release,
3881 .read = seq_read,
3882 .llseek = seq_lseek,
3883};
3884
3885static void debug_create(const char *name, struct dentry *dent,
3886 void (*show)(struct seq_file *))
3887{
3888 debugfs_create_file(name, 0444, dent, show, &debug_ops);
3889}
3890
3891static void debugfs_init(void)
3892{
3893 struct dentry *dent;
3894
3895 dent = debugfs_create_dir("msm_ipc_router", 0);
3896 if (IS_ERR(dent))
3897 return;
3898
3899 debug_create("dump_local_ports", dent, dump_local_ports);
3900 debug_create("dump_remote_ports", dent, dump_remote_ports);
3901 debug_create("dump_control_ports", dent, dump_control_ports);
3902 debug_create("dump_servers", dent, dump_servers);
3903 debug_create("dump_xprt_info", dent, dump_xprt_info);
3904 debug_create("dump_routing_table", dent, dump_routing_table);
3905}
3906
3907#else
3908static void debugfs_init(void) {}
3909#endif
3910
3911/**
3912 * ipc_router_create_log_ctx() - Create and add the log context based on
3913 * transport
3914 * @name: subsystem name
3915 *
3916 * Return: a reference to the log context created
3917 *
3918 * This function creates ipc log context based on transport and adds it to a
3919 * global list. This log context can be reused from the list in case of a
3920 * subsystem restart.
3921 */
3922static void *ipc_router_create_log_ctx(char *name)
3923{
3924 struct ipc_rtr_log_ctx *sub_log_ctx;
3925
3926 sub_log_ctx = kmalloc(sizeof(*sub_log_ctx), GFP_KERNEL);
3927 if (!sub_log_ctx)
3928 return NULL;
3929 sub_log_ctx->log_ctx = ipc_log_context_create(
3930 IPC_RTR_INFO_PAGES, name, 0);
3931 if (!sub_log_ctx->log_ctx) {
3932 IPC_RTR_ERR("%s: Unable to create IPC logging for [%s]",
3933 __func__, name);
3934 kfree(sub_log_ctx);
3935 return NULL;
3936 }
3937 strlcpy(sub_log_ctx->log_ctx_name, name, LOG_CTX_NAME_LEN);
3938 INIT_LIST_HEAD(&sub_log_ctx->list);
3939 list_add_tail(&sub_log_ctx->list, &log_ctx_list);
3940 return sub_log_ctx->log_ctx;
3941}
3942
3943static void ipc_router_log_ctx_init(void)
3944{
3945 mutex_lock(&log_ctx_list_lock_lha0);
3946 local_log_ctx = ipc_router_create_log_ctx("local_IPCRTR");
3947 mutex_unlock(&log_ctx_list_lock_lha0);
3948}
3949
3950/**
3951 * ipc_router_get_log_ctx() - Retrieves the ipc log context based on subsystem
3952 * name.
3953 * @sub_name: subsystem name
3954 *
3955 * Return: a reference to the log context
3956 */
3957static void *ipc_router_get_log_ctx(char *sub_name)
3958{
3959 void *log_ctx = NULL;
3960 struct ipc_rtr_log_ctx *temp_log_ctx;
3961
3962 mutex_lock(&log_ctx_list_lock_lha0);
3963 list_for_each_entry(temp_log_ctx, &log_ctx_list, list)
3964 if (!strcmp(temp_log_ctx->log_ctx_name, sub_name)) {
3965 log_ctx = temp_log_ctx->log_ctx;
3966 mutex_unlock(&log_ctx_list_lock_lha0);
3967 return log_ctx;
3968 }
3969 log_ctx = ipc_router_create_log_ctx(sub_name);
3970 mutex_unlock(&log_ctx_list_lock_lha0);
3971
3972 return log_ctx;
3973}
3974
3975/**
3976 * ipc_router_get_xprt_info_ref() - Get a reference to the xprt_info structure
3977 * @xprt_info: pointer to the xprt_info.
3978 *
3979 * @return: Zero on success, -ENODEV on failure.
3980 *
3981 * This function is used to obtain a reference to the xprt_info structure
3982 * corresponding to the requested @xprt_info pointer.
3983 */
3984static int ipc_router_get_xprt_info_ref(
3985 struct msm_ipc_router_xprt_info *xprt_info)
3986{
3987 int ret = -ENODEV;
3988 struct msm_ipc_router_xprt_info *tmp_xprt_info;
3989
3990 if (!xprt_info)
3991 return 0;
3992
3993 down_read(&xprt_info_list_lock_lha5);
3994 list_for_each_entry(tmp_xprt_info, &xprt_info_list, list) {
3995 if (tmp_xprt_info == xprt_info) {
3996 kref_get(&xprt_info->ref);
3997 ret = 0;
3998 break;
3999 }
4000 }
4001 up_read(&xprt_info_list_lock_lha5);
4002
4003 return ret;
4004}
4005
4006/**
4007 * ipc_router_put_xprt_info_ref() - Put a reference to the xprt_info structure
4008 * @xprt_info: pointer to the xprt_info.
4009 *
4010 * This function is used to put the reference to the xprt_info structure
4011 * corresponding to the requested @xprt_info pointer.
4012 */
4013static void ipc_router_put_xprt_info_ref(
4014 struct msm_ipc_router_xprt_info *xprt_info)
4015{
4016 if (xprt_info)
4017 kref_put(&xprt_info->ref, ipc_router_release_xprt_info_ref);
4018}
4019
4020/**
4021 * ipc_router_release_xprt_info_ref() - release the xprt_info last reference
4022 * @ref: Reference to the xprt_info structure.
4023 *
4024 * This function is called when all references to the xprt_info structure
4025 * are released.
4026 */
4027static void ipc_router_release_xprt_info_ref(struct kref *ref)
4028{
4029 struct msm_ipc_router_xprt_info *xprt_info =
4030 container_of(ref, struct msm_ipc_router_xprt_info, ref);
4031
4032 complete_all(&xprt_info->ref_complete);
4033}
4034
4035static int msm_ipc_router_add_xprt(struct msm_ipc_router_xprt *xprt)
4036{
4037 struct msm_ipc_router_xprt_info *xprt_info;
4038
4039 xprt_info = kmalloc(sizeof(*xprt_info), GFP_KERNEL);
4040 if (!xprt_info)
4041 return -ENOMEM;
4042
4043 xprt_info->xprt = xprt;
4044 xprt_info->initialized = 0;
4045 xprt_info->remote_node_id = -1;
4046 INIT_LIST_HEAD(&xprt_info->pkt_list);
4047 mutex_init(&xprt_info->rx_lock_lhb2);
4048 mutex_init(&xprt_info->tx_lock_lhb2);
4049 wakeup_source_init(&xprt_info->ws, xprt->name);
4050 xprt_info->need_len = 0;
4051 xprt_info->abort_data_read = 0;
4052 INIT_WORK(&xprt_info->read_data, do_read_data);
4053 INIT_LIST_HEAD(&xprt_info->list);
4054 kref_init(&xprt_info->ref);
4055 init_completion(&xprt_info->ref_complete);
Arun Kumar Neelakantam74ff8562017-05-26 17:57:52 +05304056 xprt_info->dynamic_ws = 0;
4057 if (xprt->get_ws_info)
4058 xprt_info->dynamic_ws = xprt->get_ws_info(xprt);
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -06004059
4060 xprt_info->workqueue = create_singlethread_workqueue(xprt->name);
4061 if (!xprt_info->workqueue) {
4062 kfree(xprt_info);
4063 return -ENOMEM;
4064 }
4065
4066 xprt_info->log_ctx = ipc_router_get_log_ctx(xprt->name);
4067
4068 if (!strcmp(xprt->name, "msm_ipc_router_loopback_xprt")) {
4069 xprt_info->remote_node_id = IPC_ROUTER_NID_LOCAL;
4070 xprt_info->initialized = 1;
4071 }
4072
4073 IPC_RTR_INFO(xprt_info->log_ctx, "Adding xprt: [%s]\n", xprt->name);
4074 down_write(&xprt_info_list_lock_lha5);
4075 list_add_tail(&xprt_info->list, &xprt_info_list);
4076 up_write(&xprt_info_list_lock_lha5);
4077
4078 down_write(&routing_table_lock_lha3);
4079 if (!routing_table_inited) {
4080 init_routing_table();
4081 routing_table_inited = 1;
4082 }
4083 up_write(&routing_table_lock_lha3);
4084
4085 xprt->priv = xprt_info;
4086
4087 return 0;
4088}
4089
4090static void msm_ipc_router_remove_xprt(struct msm_ipc_router_xprt *xprt)
4091{
4092 struct msm_ipc_router_xprt_info *xprt_info;
4093 struct rr_packet *temp_pkt, *pkt;
4094
4095 if (xprt && xprt->priv) {
4096 xprt_info = xprt->priv;
4097
4098 IPC_RTR_INFO(xprt_info->log_ctx, "Removing xprt: [%s]\n",
4099 xprt->name);
4100 mutex_lock(&xprt_info->rx_lock_lhb2);
4101 xprt_info->abort_data_read = 1;
4102 mutex_unlock(&xprt_info->rx_lock_lhb2);
4103 flush_workqueue(xprt_info->workqueue);
4104 destroy_workqueue(xprt_info->workqueue);
4105 mutex_lock(&xprt_info->rx_lock_lhb2);
4106 list_for_each_entry_safe(pkt, temp_pkt,
4107 &xprt_info->pkt_list, list) {
4108 list_del(&pkt->list);
4109 release_pkt(pkt);
4110 }
4111 mutex_unlock(&xprt_info->rx_lock_lhb2);
4112
4113 down_write(&xprt_info_list_lock_lha5);
4114 list_del(&xprt_info->list);
4115 up_write(&xprt_info_list_lock_lha5);
4116
4117 msm_ipc_cleanup_routing_table(xprt_info);
4118
4119 wakeup_source_trash(&xprt_info->ws);
4120
4121 ipc_router_put_xprt_info_ref(xprt_info);
4122 wait_for_completion(&xprt_info->ref_complete);
4123
4124 xprt->priv = 0;
4125 kfree(xprt_info);
4126 }
4127}
4128
4129struct msm_ipc_router_xprt_work {
4130 struct msm_ipc_router_xprt *xprt;
4131 struct work_struct work;
4132};
4133
4134static void xprt_open_worker(struct work_struct *work)
4135{
4136 struct msm_ipc_router_xprt_work *xprt_work =
4137 container_of(work, struct msm_ipc_router_xprt_work, work);
4138
4139 msm_ipc_router_add_xprt(xprt_work->xprt);
4140 kfree(xprt_work);
4141}
4142
4143static void xprt_close_worker(struct work_struct *work)
4144{
4145 struct msm_ipc_router_xprt_work *xprt_work =
4146 container_of(work, struct msm_ipc_router_xprt_work, work);
4147
4148 msm_ipc_router_remove_xprt(xprt_work->xprt);
4149 xprt_work->xprt->sft_close_done(xprt_work->xprt);
4150 kfree(xprt_work);
4151}
4152
4153void msm_ipc_router_xprt_notify(struct msm_ipc_router_xprt *xprt,
4154 unsigned int event,
4155 void *data)
4156{
4157 struct msm_ipc_router_xprt_info *xprt_info = xprt->priv;
4158 struct msm_ipc_router_xprt_work *xprt_work;
4159 struct rr_packet *pkt;
4160 int ret;
4161
4162 ret = ipc_router_core_init();
4163 if (ret < 0) {
4164 IPC_RTR_ERR("%s: Error %d initializing IPC Router\n",
4165 __func__, ret);
4166 return;
4167 }
4168
4169 switch (event) {
4170 case IPC_ROUTER_XPRT_EVENT_OPEN:
4171 xprt_work = kmalloc(sizeof(*xprt_work), GFP_ATOMIC);
4172 if (xprt_work) {
4173 xprt_work->xprt = xprt;
4174 INIT_WORK(&xprt_work->work, xprt_open_worker);
4175 queue_work(msm_ipc_router_workqueue, &xprt_work->work);
4176 } else {
4177 IPC_RTR_ERR(
4178 "%s: malloc failure - Couldn't notify OPEN event",
4179 __func__);
4180 }
4181 break;
4182
4183 case IPC_ROUTER_XPRT_EVENT_CLOSE:
4184 xprt_work = kmalloc(sizeof(*xprt_work), GFP_ATOMIC);
4185 if (xprt_work) {
4186 xprt_work->xprt = xprt;
4187 INIT_WORK(&xprt_work->work, xprt_close_worker);
4188 queue_work(msm_ipc_router_workqueue, &xprt_work->work);
4189 } else {
4190 IPC_RTR_ERR(
4191 "%s: malloc failure - Couldn't notify CLOSE event",
4192 __func__);
4193 }
4194 break;
4195 }
4196
4197 if (!data)
4198 return;
4199
4200 while (!xprt_info) {
4201 msleep(100);
4202 xprt_info = xprt->priv;
4203 }
4204
4205 pkt = clone_pkt((struct rr_packet *)data);
4206 if (!pkt)
4207 return;
4208
Arun Kumar Neelakantam74ff8562017-05-26 17:57:52 +05304209 pkt->ws_need = false;
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -06004210 mutex_lock(&xprt_info->rx_lock_lhb2);
4211 list_add_tail(&pkt->list, &xprt_info->pkt_list);
Arun Kumar Neelakantam74ff8562017-05-26 17:57:52 +05304212 if (!xprt_info->dynamic_ws) {
4213 __pm_stay_awake(&xprt_info->ws);
4214 pkt->ws_need = true;
4215 } else {
4216 if (is_wakeup_source_allowed) {
4217 __pm_stay_awake(&xprt_info->ws);
4218 pkt->ws_need = true;
4219 }
4220 }
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -06004221 mutex_unlock(&xprt_info->rx_lock_lhb2);
4222 queue_work(xprt_info->workqueue, &xprt_info->read_data);
4223}
4224
4225/**
4226 * parse_devicetree() - parse device tree binding
4227 *
4228 * @node: pointer to device tree node
4229 *
4230 * @return: 0 on success, -ENODEV on failure.
4231 */
4232static int parse_devicetree(struct device_node *node)
4233{
4234 char *key;
4235 const char *peripheral = NULL;
4236
4237 key = "qcom,default-peripheral";
4238 peripheral = of_get_property(node, key, NULL);
4239 if (peripheral)
4240 strlcpy(default_peripheral, peripheral, PIL_SUBSYSTEM_NAME_LEN);
4241
4242 return 0;
4243}
4244
4245/**
4246 * ipc_router_probe() - Probe the IPC Router
4247 *
4248 * @pdev: Platform device corresponding to IPC Router.
4249 *
4250 * @return: 0 on success, standard Linux error codes on error.
4251 *
4252 * This function is called when the underlying device tree driver registers
4253 * a platform device, mapped to IPC Router.
4254 */
4255static int ipc_router_probe(struct platform_device *pdev)
4256{
4257 int ret = 0;
4258
4259 if (pdev && pdev->dev.of_node) {
4260 ret = parse_devicetree(pdev->dev.of_node);
4261 if (ret)
4262 IPC_RTR_ERR("%s: Failed to parse device tree\n",
4263 __func__);
4264 }
4265 return ret;
4266}
4267
4268static const struct of_device_id ipc_router_match_table[] = {
4269 { .compatible = "qcom,ipc_router" },
4270 {},
4271};
4272
4273static struct platform_driver ipc_router_driver = {
4274 .probe = ipc_router_probe,
4275 .driver = {
4276 .name = MODULE_NAME,
4277 .owner = THIS_MODULE,
4278 .of_match_table = ipc_router_match_table,
4279 },
4280};
4281
4282/**
4283 * ipc_router_core_init() - Initialize all IPC Router core data structures
4284 *
4285 * Return: 0 on Success or Standard error code otherwise.
4286 *
4287 * This function only initializes all the core data structures to the IPC Router
4288 * module. The remaining initialization is done inside msm_ipc_router_init().
4289 */
4290static int ipc_router_core_init(void)
4291{
4292 int i;
4293 int ret;
4294 struct msm_ipc_routing_table_entry *rt_entry;
4295
4296 mutex_lock(&ipc_router_init_lock);
4297 if (likely(is_ipc_router_inited)) {
4298 mutex_unlock(&ipc_router_init_lock);
4299 return 0;
4300 }
4301
4302 debugfs_init();
4303
4304 for (i = 0; i < SRV_HASH_SIZE; i++)
4305 INIT_LIST_HEAD(&server_list[i]);
4306
4307 for (i = 0; i < LP_HASH_SIZE; i++)
4308 INIT_LIST_HEAD(&local_ports[i]);
4309
4310 down_write(&routing_table_lock_lha3);
4311 if (!routing_table_inited) {
4312 init_routing_table();
4313 routing_table_inited = 1;
4314 }
4315 up_write(&routing_table_lock_lha3);
4316 rt_entry = create_routing_table_entry(IPC_ROUTER_NID_LOCAL, NULL);
4317 kref_put(&rt_entry->ref, ipc_router_release_rtentry);
4318
4319 msm_ipc_router_workqueue =
4320 create_singlethread_workqueue("msm_ipc_router");
4321 if (!msm_ipc_router_workqueue) {
4322 mutex_unlock(&ipc_router_init_lock);
4323 return -ENOMEM;
4324 }
4325
4326 ret = msm_ipc_router_security_init();
4327 if (ret < 0)
4328 IPC_RTR_ERR("%s: Security Init failed\n", __func__);
4329 else
4330 is_ipc_router_inited = true;
4331 mutex_unlock(&ipc_router_init_lock);
4332
4333 return ret;
4334}
4335
4336static int msm_ipc_router_init(void)
4337{
4338 int ret;
4339
4340 ret = ipc_router_core_init();
4341 if (ret < 0)
4342 return ret;
4343
4344 ret = platform_driver_register(&ipc_router_driver);
4345 if (ret)
4346 IPC_RTR_ERR(
4347 "%s: ipc_router_driver register failed %d\n", __func__, ret);
4348
4349 ret = msm_ipc_router_init_sockets();
4350 if (ret < 0)
4351 IPC_RTR_ERR("%s: Init sockets failed\n", __func__);
4352
4353 ipc_router_log_ctx_init();
4354 return ret;
4355}
4356
4357module_init(msm_ipc_router_init);
4358MODULE_DESCRIPTION("MSM IPC Router");
4359MODULE_LICENSE("GPL v2");