blob: e28eb8b228a20130551e969a277aa998eefe287c [file] [log] [blame]
Arun Prakash8c8dd7a2020-03-30 22:22:13 +05301/* Copyright (c) 2011-2020, The Linux Foundation. All rights reserved.
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -06002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/slab.h>
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/string.h>
17#include <linux/errno.h>
18#include <linux/init.h>
19#include <linux/types.h>
20#include <linux/delay.h>
21#include <linux/err.h>
22#include <linux/sched.h>
23#include <linux/poll.h>
24#include <linux/pm.h>
25#include <linux/platform_device.h>
26#include <linux/uaccess.h>
27#include <linux/debugfs.h>
28#include <linux/rwsem.h>
29#include <linux/ipc_logging.h>
30#include <linux/uaccess.h>
31#include <linux/ipc_router.h>
32#include <linux/ipc_router_xprt.h>
33#include <linux/kref.h>
Arun Kumar Neelakantamd5eb2732018-06-19 14:40:01 +053034#include <linux/kthread.h>
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -060035#include <soc/qcom/subsystem_notif.h>
36#include <soc/qcom/subsystem_restart.h>
37
38#include <asm/byteorder.h>
39
40#include "ipc_router_private.h"
41#include "ipc_router_security.h"
42
43enum {
44 SMEM_LOG = 1U << 0,
45 RTR_DBG = 1U << 1,
46};
47
48static int msm_ipc_router_debug_mask;
49module_param_named(debug_mask, msm_ipc_router_debug_mask,
50 int, 0664);
51#define MODULE_NAME "ipc_router"
52
53#define IPC_RTR_INFO_PAGES 6
54
55#define IPC_RTR_INFO(log_ctx, x...) do { \
56typeof(log_ctx) _log_ctx = (log_ctx); \
57if (_log_ctx) \
58 ipc_log_string(_log_ctx, x); \
59if (msm_ipc_router_debug_mask & RTR_DBG) \
60 pr_info("[IPCRTR] "x); \
61} while (0)
62
63#define IPC_ROUTER_LOG_EVENT_TX 0x01
64#define IPC_ROUTER_LOG_EVENT_RX 0x02
65#define IPC_ROUTER_LOG_EVENT_TX_ERR 0x03
66#define IPC_ROUTER_LOG_EVENT_RX_ERR 0x04
67#define IPC_ROUTER_DUMMY_DEST_NODE 0xFFFFFFFF
68
69#define ipc_port_sk(port) ((struct sock *)(port))
70
71static LIST_HEAD(control_ports);
72static DECLARE_RWSEM(control_ports_lock_lha5);
73
74#define LP_HASH_SIZE 32
75static struct list_head local_ports[LP_HASH_SIZE];
76static DECLARE_RWSEM(local_ports_lock_lhc2);
77
78/* Server info is organized as a hash table. The server's service ID is
79 * used to index into the hash table. The instance ID of most of the servers
80 * are 1 or 2. The service IDs are well distributed compared to the instance
81 * IDs and hence choosing service ID to index into this hash table optimizes
82 * the hash table operations like add, lookup, destroy.
83 */
84#define SRV_HASH_SIZE 32
85static struct list_head server_list[SRV_HASH_SIZE];
86static DECLARE_RWSEM(server_list_lock_lha2);
87
88struct msm_ipc_server {
89 struct list_head list;
90 struct kref ref;
91 struct msm_ipc_port_name name;
92 char pdev_name[32];
93 int next_pdev_id;
94 int synced_sec_rule;
95 struct list_head server_port_list;
96};
97
98struct msm_ipc_server_port {
99 struct list_head list;
100 struct platform_device *pdev;
101 struct msm_ipc_port_addr server_addr;
102 struct msm_ipc_router_xprt_info *xprt_info;
103};
104
105struct msm_ipc_resume_tx_port {
106 struct list_head list;
107 u32 port_id;
108 u32 node_id;
109};
110
111struct ipc_router_conn_info {
112 struct list_head list;
113 u32 port_id;
114};
115
116enum {
117 RESET = 0,
118 VALID = 1,
119};
120
121#define RP_HASH_SIZE 32
122struct msm_ipc_router_remote_port {
123 struct list_head list;
124 struct kref ref;
125 struct mutex rport_lock_lhb2; /* lock for remote port state access */
126 u32 node_id;
127 u32 port_id;
128 int status;
129 u32 tx_quota_cnt;
130 struct list_head resume_tx_port_list;
131 struct list_head conn_info_list;
132 void *sec_rule;
133 struct msm_ipc_server *server;
134};
135
136struct msm_ipc_router_xprt_info {
137 struct list_head list;
138 struct msm_ipc_router_xprt *xprt;
139 u32 remote_node_id;
140 u32 initialized;
Arun Kumar Neelakantamf99191d2018-06-11 18:13:43 +0530141 u32 hello_sent;
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -0600142 struct list_head pkt_list;
143 struct wakeup_source ws;
144 struct mutex rx_lock_lhb2; /* lock for xprt rx operations */
145 struct mutex tx_lock_lhb2; /* lock for xprt tx operations */
146 u32 need_len;
147 u32 abort_data_read;
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -0600148 void *log_ctx;
149 struct kref ref;
150 struct completion ref_complete;
Arun Kumar Neelakantam74ff8562017-05-26 17:57:52 +0530151 bool dynamic_ws;
Arun Kumar Neelakantamd5eb2732018-06-19 14:40:01 +0530152
153 struct kthread_worker kworker;
154 struct task_struct *task;
155 struct kthread_work read_data;
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -0600156};
157
158#define RT_HASH_SIZE 4
159struct msm_ipc_routing_table_entry {
160 struct list_head list;
161 struct kref ref;
162 u32 node_id;
163 u32 neighbor_node_id;
164 struct list_head remote_port_list[RP_HASH_SIZE];
165 struct msm_ipc_router_xprt_info *xprt_info;
166 struct rw_semaphore lock_lha4;
167 unsigned long num_tx_bytes;
168 unsigned long num_rx_bytes;
169};
170
171#define LOG_CTX_NAME_LEN 32
172struct ipc_rtr_log_ctx {
173 struct list_head list;
174 char log_ctx_name[LOG_CTX_NAME_LEN];
175 void *log_ctx;
176};
177
178static struct list_head routing_table[RT_HASH_SIZE];
179static DECLARE_RWSEM(routing_table_lock_lha3);
180static int routing_table_inited;
181
Arun Kumar Neelakantamd5eb2732018-06-19 14:40:01 +0530182static void do_read_data(struct kthread_work *work);
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -0600183
184static LIST_HEAD(xprt_info_list);
185static DECLARE_RWSEM(xprt_info_list_lock_lha5);
186
187static DEFINE_MUTEX(log_ctx_list_lock_lha0);
188static LIST_HEAD(log_ctx_list);
189static DEFINE_MUTEX(ipc_router_init_lock);
190static bool is_ipc_router_inited;
191static int ipc_router_core_init(void);
192#define IPC_ROUTER_INIT_TIMEOUT (10 * HZ)
193
194static u32 next_port_id;
195static DEFINE_MUTEX(next_port_id_lock_lhc1);
196static struct workqueue_struct *msm_ipc_router_workqueue;
197
198static void *local_log_ctx;
199static void *ipc_router_get_log_ctx(char *sub_name);
200static int process_resume_tx_msg(union rr_control_msg *msg,
201 struct rr_packet *pkt);
202static void ipc_router_reset_conn(struct msm_ipc_router_remote_port *rport_ptr);
203static int ipc_router_get_xprt_info_ref(
204 struct msm_ipc_router_xprt_info *xprt_info);
205static void ipc_router_put_xprt_info_ref(
206 struct msm_ipc_router_xprt_info *xprt_info);
207static void ipc_router_release_xprt_info_ref(struct kref *ref);
208
209struct pil_vote_info {
210 void *pil_handle;
211 struct work_struct load_work;
212 struct work_struct unload_work;
213};
214
215#define PIL_SUBSYSTEM_NAME_LEN 32
216static char default_peripheral[PIL_SUBSYSTEM_NAME_LEN];
217
218enum {
219 DOWN,
220 UP,
221};
222
Arun Kumar Neelakantam74ff8562017-05-26 17:57:52 +0530223static bool is_wakeup_source_allowed;
224
225void msm_ipc_router_set_ws_allowed(bool flag)
226{
227 is_wakeup_source_allowed = flag;
228}
229
Arun Kumar Neelakantam029e8462018-04-19 18:10:47 +0530230/**
231 * is_sensor_port() - Check if the remote port is sensor service or not
232 * @rport: Pointer to the remote port.
233 *
234 * Return: true if the remote port is sensor service else false.
235 */
236static int is_sensor_port(struct msm_ipc_router_remote_port *rport)
237{
238 u32 svcid = 0;
239
240 if (rport && rport->server) {
241 svcid = rport->server->name.service;
shermanwei1d90b7a2019-09-05 11:27:27 +0800242 //<2019/09/05,ShermanWei for Qualcomm KBA-180725024109 patch
243 //to solve that far Proximity the screen can not light on sometimes.
244 //// hold wakelock for thresh(proximity) algo sensor and OEM1(e.g: pick up gesture sensor)
245 if (svcid == 277 || svcid == 287)
246 return false;
247 //>2019/09/05,ShermanWei
Arun Kumar Neelakantam029e8462018-04-19 18:10:47 +0530248 if (svcid == 400 || (svcid >= 256 && svcid <= 320))
249 return true;
250 }
251
252 return false;
253}
254
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -0600255static void init_routing_table(void)
256{
257 int i;
258
259 for (i = 0; i < RT_HASH_SIZE; i++)
260 INIT_LIST_HEAD(&routing_table[i]);
261}
262
263/**
264 * ipc_router_calc_checksum() - compute the checksum for extended HELLO message
265 * @msg: Reference to the IPC Router HELLO message.
266 *
267 * Return: Computed checksum value, 0 if msg is NULL.
268 */
269static u32 ipc_router_calc_checksum(union rr_control_msg *msg)
270{
271 u32 checksum = 0;
272 int i, len;
273 u16 upper_nb;
274 u16 lower_nb;
275 void *hello;
276
277 if (!msg)
278 return checksum;
279 hello = msg;
280 len = sizeof(*msg);
281
282 for (i = 0; i < len / IPCR_WORD_SIZE; i++) {
283 lower_nb = (*((u32 *)hello)) & IPC_ROUTER_CHECKSUM_MASK;
284 upper_nb = ((*((u32 *)hello)) >> 16) &
285 IPC_ROUTER_CHECKSUM_MASK;
286 checksum = checksum + upper_nb + lower_nb;
287 hello = ((u32 *)hello) + 1;
288 }
289 while (checksum > 0xFFFF)
290 checksum = (checksum & IPC_ROUTER_CHECKSUM_MASK) +
291 ((checksum >> 16) & IPC_ROUTER_CHECKSUM_MASK);
292
293 checksum = ~checksum & IPC_ROUTER_CHECKSUM_MASK;
294 return checksum;
295}
296
297/**
298 * skb_copy_to_log_buf() - copies the required number bytes from the skb_queue
299 * @skb_head: skb_queue head that contains the data.
300 * @pl_len: length of payload need to be copied.
301 * @hdr_offset: length of the header present in first skb
302 * @log_buf: The output buffer which will contain the formatted log string
303 *
304 * This function copies the first specified number of bytes from the skb_queue
305 * to a new buffer and formats them to a string for logging.
306 */
307static void skb_copy_to_log_buf(struct sk_buff_head *skb_head,
308 unsigned int pl_len, unsigned int hdr_offset,
Arun Kumar Neelakantam7e4f46b2018-03-29 20:10:02 +0530309 unsigned char *log_buf)
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -0600310{
311 struct sk_buff *temp_skb;
312 unsigned int copied_len = 0, copy_len = 0;
313 int remaining;
314
315 if (!skb_head) {
316 IPC_RTR_ERR("%s: NULL skb_head\n", __func__);
317 return;
318 }
319 temp_skb = skb_peek(skb_head);
320 if (unlikely(!temp_skb || !temp_skb->data)) {
321 IPC_RTR_ERR("%s: No SKBs in skb_queue\n", __func__);
322 return;
323 }
324
325 remaining = temp_skb->len - hdr_offset;
326 skb_queue_walk(skb_head, temp_skb) {
327 copy_len = remaining < pl_len ? remaining : pl_len;
328 memcpy(log_buf + copied_len, temp_skb->data + hdr_offset,
329 copy_len);
330 copied_len += copy_len;
331 hdr_offset = 0;
332 if (copied_len == pl_len)
333 break;
334 remaining = pl_len - remaining;
335 }
336}
337
338/**
339 * ipc_router_log_msg() - log all data messages exchanged
340 * @log_ctx: IPC Logging context specific to each transport
341 * @xchng_type: Identifies the data to be a receive or send.
342 * @data: IPC Router data packet or control msg received or to be send.
343 * @hdr: Reference to the router header
344 * @port_ptr: Local IPC Router port.
345 * @rport_ptr: Remote IPC Router port
346 *
347 * This function builds the log message that would be passed on to the IPC
348 * logging framework. The data messages that would be passed corresponds to
349 * the information that is exchanged between the IPC Router and it's clients.
350 */
351static void ipc_router_log_msg(void *log_ctx, u32 xchng_type,
352 void *data, struct rr_header_v1 *hdr,
353 struct msm_ipc_port *port_ptr,
354 struct msm_ipc_router_remote_port *rport_ptr)
355{
356 struct sk_buff_head *skb_head = NULL;
357 union rr_control_msg *msg = NULL;
358 struct rr_packet *pkt = NULL;
359 u64 pl_buf = 0;
360 struct sk_buff *skb;
361 u32 buf_len = 8;
362 u32 svc_id = 0;
363 u32 svc_ins = 0;
364 unsigned int hdr_offset = 0;
365 u32 port_type = 0;
366
367 if (!log_ctx || !hdr || !data)
368 return;
369
370 if (hdr->type == IPC_ROUTER_CTRL_CMD_DATA) {
371 pkt = (struct rr_packet *)data;
372 skb_head = pkt->pkt_fragment_q;
373 skb = skb_peek(skb_head);
374 if (!skb || !skb->data) {
375 IPC_RTR_ERR("%s: No SKBs in skb_queue\n", __func__);
376 return;
377 }
378
379 if (skb_queue_len(skb_head) == 1 && skb->len < 8)
380 buf_len = skb->len;
381 if (xchng_type == IPC_ROUTER_LOG_EVENT_TX && hdr->dst_node_id
382 != IPC_ROUTER_NID_LOCAL) {
383 if (hdr->version == IPC_ROUTER_V1)
384 hdr_offset = sizeof(struct rr_header_v1);
385 else if (hdr->version == IPC_ROUTER_V2)
386 hdr_offset = sizeof(struct rr_header_v2);
387 }
Arun Kumar Neelakantam7e4f46b2018-03-29 20:10:02 +0530388 skb_copy_to_log_buf(skb_head, buf_len, hdr_offset,
389 (unsigned char *)&pl_buf);
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -0600390
391 if (port_ptr && rport_ptr && (port_ptr->type == CLIENT_PORT) &&
392 rport_ptr->server) {
393 svc_id = rport_ptr->server->name.service;
394 svc_ins = rport_ptr->server->name.instance;
395 port_type = CLIENT_PORT;
Dhoat Harpal3ad5fe32017-06-19 21:26:13 +0530396 port_ptr->last_served_svc_id =
397 rport_ptr->server->name.service;
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -0600398 } else if (port_ptr && (port_ptr->type == SERVER_PORT)) {
399 svc_id = port_ptr->port_name.service;
400 svc_ins = port_ptr->port_name.instance;
401 port_type = SERVER_PORT;
402 }
403 IPC_RTR_INFO(log_ctx,
404 "%s %s %s Len:0x%x T:0x%x CF:0x%x SVC:<0x%x:0x%x> SRC:<0x%x:0x%x> DST:<0x%x:0x%x> DATA: %08x %08x",
405 (xchng_type == IPC_ROUTER_LOG_EVENT_RX ? "" :
406 (xchng_type == IPC_ROUTER_LOG_EVENT_TX ?
407 current->comm : "")),
408 (port_type == CLIENT_PORT ? "CLI" : "SRV"),
409 (xchng_type == IPC_ROUTER_LOG_EVENT_RX ? "RX" :
410 (xchng_type == IPC_ROUTER_LOG_EVENT_TX ? "TX" :
411 (xchng_type == IPC_ROUTER_LOG_EVENT_TX_ERR ? "TX_ERR" :
412 (xchng_type == IPC_ROUTER_LOG_EVENT_RX_ERR ? "RX_ERR" :
413 "UNKNOWN")))),
414 hdr->size, hdr->type, hdr->control_flag,
415 svc_id, svc_ins, hdr->src_node_id, hdr->src_port_id,
416 hdr->dst_node_id, hdr->dst_port_id,
417 (unsigned int)pl_buf, (unsigned int)(pl_buf >> 32));
418
419 } else {
420 msg = (union rr_control_msg *)data;
421 if (msg->cmd == IPC_ROUTER_CTRL_CMD_NEW_SERVER ||
422 msg->cmd == IPC_ROUTER_CTRL_CMD_REMOVE_SERVER)
423 IPC_RTR_INFO(log_ctx,
424 "CTL MSG: %s cmd:0x%x SVC:<0x%x:0x%x> ADDR:<0x%x:0x%x>",
425 (xchng_type == IPC_ROUTER_LOG_EVENT_RX ? "RX" :
426 (xchng_type == IPC_ROUTER_LOG_EVENT_TX ? "TX" :
427 (xchng_type == IPC_ROUTER_LOG_EVENT_TX_ERR ? "TX_ERR" :
428 (xchng_type == IPC_ROUTER_LOG_EVENT_RX_ERR ? "RX_ERR" :
429 "UNKNOWN")))),
430 msg->cmd, msg->srv.service, msg->srv.instance,
431 msg->srv.node_id, msg->srv.port_id);
432 else if (msg->cmd == IPC_ROUTER_CTRL_CMD_REMOVE_CLIENT ||
433 msg->cmd == IPC_ROUTER_CTRL_CMD_RESUME_TX)
434 IPC_RTR_INFO(log_ctx,
435 "CTL MSG: %s cmd:0x%x ADDR: <0x%x:0x%x>",
436 (xchng_type == IPC_ROUTER_LOG_EVENT_RX ? "RX" :
437 (xchng_type == IPC_ROUTER_LOG_EVENT_TX ? "TX" : "ERR")),
438 msg->cmd, msg->cli.node_id, msg->cli.port_id);
Arun Prakash0fd19c22019-09-20 15:03:52 +0530439 else if (msg->cmd == IPC_ROUTER_CTRL_CMD_HELLO && hdr) {
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -0600440 IPC_RTR_INFO(log_ctx,
441 "CTL MSG %s cmd:0x%x ADDR:0x%x",
442 (xchng_type == IPC_ROUTER_LOG_EVENT_RX ? "RX" :
443 (xchng_type == IPC_ROUTER_LOG_EVENT_TX ? "TX" : "ERR")),
444 msg->cmd, hdr->src_node_id);
Arun Prakash0fd19c22019-09-20 15:03:52 +0530445 if (hdr->src_node_id == 0 || hdr->src_node_id == 3)
446 pr_err("%s: Modem QMI Readiness %s cmd:0x%x ADDR:0x%x\n",
447 __func__,
448 (xchng_type == IPC_ROUTER_LOG_EVENT_RX ? "RX" :
449 (xchng_type == IPC_ROUTER_LOG_EVENT_TX ? "TX" :
450 "ERR")), msg->cmd, hdr->src_node_id);
451 }
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -0600452 else
453 IPC_RTR_INFO(log_ctx,
454 "%s UNKNOWN cmd:0x%x",
455 (xchng_type == IPC_ROUTER_LOG_EVENT_RX ? "RX" :
456 (xchng_type == IPC_ROUTER_LOG_EVENT_TX ? "TX" : "ERR")),
457 msg->cmd);
458 }
459}
460
461/* Must be called with routing_table_lock_lha3 locked. */
462static struct msm_ipc_routing_table_entry *lookup_routing_table(
463 u32 node_id)
464{
465 u32 key = (node_id % RT_HASH_SIZE);
466 struct msm_ipc_routing_table_entry *rt_entry;
467
468 list_for_each_entry(rt_entry, &routing_table[key], list) {
469 if (rt_entry->node_id == node_id)
470 return rt_entry;
471 }
472 return NULL;
473}
474
475/**
476 * create_routing_table_entry() - Lookup and create a routing table entry
477 * @node_id: Node ID of the routing table entry to be created.
478 * @xprt_info: XPRT through which the node ID is reachable.
479 *
480 * @return: a reference to the routing table entry on success, NULL on failure.
481 */
482static struct msm_ipc_routing_table_entry *create_routing_table_entry(
483 u32 node_id, struct msm_ipc_router_xprt_info *xprt_info)
484{
485 int i;
486 struct msm_ipc_routing_table_entry *rt_entry;
487 u32 key;
488
489 down_write(&routing_table_lock_lha3);
490 rt_entry = lookup_routing_table(node_id);
491 if (rt_entry)
492 goto out_create_rtentry1;
493
494 rt_entry = kmalloc(sizeof(*rt_entry), GFP_KERNEL);
495 if (!rt_entry) {
496 IPC_RTR_ERR("%s: rt_entry allocation failed for %d\n",
497 __func__, node_id);
498 goto out_create_rtentry2;
499 }
500
501 for (i = 0; i < RP_HASH_SIZE; i++)
502 INIT_LIST_HEAD(&rt_entry->remote_port_list[i]);
503 init_rwsem(&rt_entry->lock_lha4);
504 kref_init(&rt_entry->ref);
505 rt_entry->node_id = node_id;
506 rt_entry->xprt_info = xprt_info;
507 if (xprt_info)
508 rt_entry->neighbor_node_id = xprt_info->remote_node_id;
509
510 key = (node_id % RT_HASH_SIZE);
511 list_add_tail(&rt_entry->list, &routing_table[key]);
512out_create_rtentry1:
513 kref_get(&rt_entry->ref);
514out_create_rtentry2:
515 up_write(&routing_table_lock_lha3);
516 return rt_entry;
517}
518
519/**
520 * ipc_router_get_rtentry_ref() - Get a reference to the routing table entry
521 * @node_id: Node ID of the routing table entry.
522 *
523 * @return: a reference to the routing table entry on success, NULL on failure.
524 *
525 * This function is used to obtain a reference to the rounting table entry
526 * corresponding to a node id.
527 */
528static struct msm_ipc_routing_table_entry *ipc_router_get_rtentry_ref(
529 u32 node_id)
530{
531 struct msm_ipc_routing_table_entry *rt_entry;
532
533 down_read(&routing_table_lock_lha3);
534 rt_entry = lookup_routing_table(node_id);
535 if (rt_entry)
536 kref_get(&rt_entry->ref);
537 up_read(&routing_table_lock_lha3);
538 return rt_entry;
539}
540
541/**
542 * ipc_router_release_rtentry() - Cleanup and release the routing table entry
543 * @ref: Reference to the entry.
544 *
545 * This function is called when all references to the routing table entry are
546 * released.
547 */
548void ipc_router_release_rtentry(struct kref *ref)
549{
550 struct msm_ipc_routing_table_entry *rt_entry =
551 container_of(ref, struct msm_ipc_routing_table_entry, ref);
552
553 /* All references to a routing entry will be put only under SSR.
554 * As part of SSR, all the internals of the routing table entry
555 * are cleaned. So just free the routing table entry.
556 */
557 kfree(rt_entry);
558}
559
560struct rr_packet *rr_read(struct msm_ipc_router_xprt_info *xprt_info)
561{
562 struct rr_packet *temp_pkt;
563
564 if (!xprt_info)
565 return NULL;
566
567 mutex_lock(&xprt_info->rx_lock_lhb2);
568 if (xprt_info->abort_data_read) {
569 mutex_unlock(&xprt_info->rx_lock_lhb2);
570 IPC_RTR_ERR("%s detected SSR & exiting now\n",
571 xprt_info->xprt->name);
572 return NULL;
573 }
574
575 if (list_empty(&xprt_info->pkt_list)) {
576 mutex_unlock(&xprt_info->rx_lock_lhb2);
577 return NULL;
578 }
579
580 temp_pkt = list_first_entry(&xprt_info->pkt_list,
581 struct rr_packet, list);
582 list_del(&temp_pkt->list);
583 if (list_empty(&xprt_info->pkt_list))
584 __pm_relax(&xprt_info->ws);
585 mutex_unlock(&xprt_info->rx_lock_lhb2);
586 return temp_pkt;
587}
588
589struct rr_packet *clone_pkt(struct rr_packet *pkt)
590{
591 struct rr_packet *cloned_pkt;
592 struct sk_buff *temp_skb, *cloned_skb;
593 struct sk_buff_head *pkt_fragment_q;
594
595 cloned_pkt = kzalloc(sizeof(*cloned_pkt), GFP_KERNEL);
596 if (!cloned_pkt) {
597 IPC_RTR_ERR("%s: failure\n", __func__);
598 return NULL;
599 }
600 memcpy(&cloned_pkt->hdr, &pkt->hdr, sizeof(struct rr_header_v1));
601 if (pkt->opt_hdr.len > 0) {
602 cloned_pkt->opt_hdr.data = kmalloc(pkt->opt_hdr.len,
603 GFP_KERNEL);
604 if (!cloned_pkt->opt_hdr.data) {
605 IPC_RTR_ERR("%s: Memory allocation Failed\n", __func__);
606 } else {
607 cloned_pkt->opt_hdr.len = pkt->opt_hdr.len;
608 memcpy(cloned_pkt->opt_hdr.data, pkt->opt_hdr.data,
609 pkt->opt_hdr.len);
610 }
611 }
612
613 pkt_fragment_q = kmalloc(sizeof(*pkt_fragment_q), GFP_KERNEL);
614 if (!pkt_fragment_q) {
615 IPC_RTR_ERR("%s: pkt_frag_q alloc failure\n", __func__);
616 kfree(cloned_pkt);
617 return NULL;
618 }
619 skb_queue_head_init(pkt_fragment_q);
620 kref_init(&cloned_pkt->ref);
621
622 skb_queue_walk(pkt->pkt_fragment_q, temp_skb) {
623 cloned_skb = skb_clone(temp_skb, GFP_KERNEL);
624 if (!cloned_skb)
625 goto fail_clone;
626 skb_queue_tail(pkt_fragment_q, cloned_skb);
627 }
628 cloned_pkt->pkt_fragment_q = pkt_fragment_q;
629 cloned_pkt->length = pkt->length;
Arun Kumar Neelakantam74ff8562017-05-26 17:57:52 +0530630 cloned_pkt->ws_need = pkt->ws_need;
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -0600631 return cloned_pkt;
632
633fail_clone:
634 while (!skb_queue_empty(pkt_fragment_q)) {
635 temp_skb = skb_dequeue(pkt_fragment_q);
636 kfree_skb(temp_skb);
637 }
638 kfree(pkt_fragment_q);
639 if (cloned_pkt->opt_hdr.len > 0)
640 kfree(cloned_pkt->opt_hdr.data);
641 kfree(cloned_pkt);
642 return NULL;
643}
644
645/**
646 * create_pkt() - Create a Router packet
647 * @data: SKB queue to be contained inside the packet.
648 *
649 * @return: pointer to packet on success, NULL on failure.
650 */
651struct rr_packet *create_pkt(struct sk_buff_head *data)
652{
653 struct rr_packet *pkt;
654 struct sk_buff *temp_skb;
655
656 pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
657 if (!pkt) {
658 IPC_RTR_ERR("%s: failure\n", __func__);
659 return NULL;
660 }
661
662 if (data) {
663 pkt->pkt_fragment_q = data;
664 skb_queue_walk(pkt->pkt_fragment_q, temp_skb)
665 pkt->length += temp_skb->len;
666 } else {
667 pkt->pkt_fragment_q = kmalloc(sizeof(*pkt->pkt_fragment_q),
668 GFP_KERNEL);
669 if (!pkt->pkt_fragment_q) {
670 IPC_RTR_ERR("%s: Couldn't alloc pkt_fragment_q\n",
671 __func__);
672 kfree(pkt);
673 return NULL;
674 }
675 skb_queue_head_init(pkt->pkt_fragment_q);
676 }
677 kref_init(&pkt->ref);
678 return pkt;
679}
680
681void release_pkt(struct rr_packet *pkt)
682{
683 struct sk_buff *temp_skb;
684
685 if (!pkt)
686 return;
687
688 if (!pkt->pkt_fragment_q) {
689 kfree(pkt);
690 return;
691 }
692
693 while (!skb_queue_empty(pkt->pkt_fragment_q)) {
694 temp_skb = skb_dequeue(pkt->pkt_fragment_q);
695 kfree_skb(temp_skb);
696 }
697 kfree(pkt->pkt_fragment_q);
698 if (pkt->opt_hdr.len > 0)
699 kfree(pkt->opt_hdr.data);
700 kfree(pkt);
701}
702
703static struct sk_buff_head *msm_ipc_router_buf_to_skb(void *buf,
704 unsigned int buf_len)
705{
706 struct sk_buff_head *skb_head;
707 struct sk_buff *skb;
708 int first = 1, offset = 0;
709 int skb_size, data_size;
710 void *data;
711 int last = 1;
712 int align_size;
713
714 skb_head = kmalloc(sizeof(*skb_head), GFP_KERNEL);
715 if (!skb_head) {
716 IPC_RTR_ERR("%s: Couldnot allocate skb_head\n", __func__);
717 return NULL;
718 }
719 skb_queue_head_init(skb_head);
720
721 data_size = buf_len;
722 align_size = ALIGN_SIZE(data_size);
723 while (offset != buf_len) {
724 skb_size = data_size;
725 if (first)
726 skb_size += IPC_ROUTER_HDR_SIZE;
727 if (last)
728 skb_size += align_size;
729
730 skb = alloc_skb(skb_size, GFP_KERNEL);
731 if (!skb) {
732 if (skb_size <= (PAGE_SIZE / 2)) {
733 IPC_RTR_ERR("%s: cannot allocate skb\n",
734 __func__);
735 goto buf_to_skb_error;
736 }
737 data_size = data_size / 2;
738 last = 0;
739 continue;
740 }
741
742 if (first) {
743 skb_reserve(skb, IPC_ROUTER_HDR_SIZE);
744 first = 0;
745 }
746
747 data = skb_put(skb, data_size);
748 memcpy(skb->data, buf + offset, data_size);
749 skb_queue_tail(skb_head, skb);
750 offset += data_size;
751 data_size = buf_len - offset;
752 last = 1;
753 }
754 return skb_head;
755
756buf_to_skb_error:
757 while (!skb_queue_empty(skb_head)) {
758 skb = skb_dequeue(skb_head);
759 kfree_skb(skb);
760 }
761 kfree(skb_head);
762 return NULL;
763}
764
765static void *msm_ipc_router_skb_to_buf(struct sk_buff_head *skb_head,
766 unsigned int len)
767{
768 struct sk_buff *temp;
769 unsigned int offset = 0, buf_len = 0, copy_len;
770 void *buf;
771
772 if (!skb_head) {
773 IPC_RTR_ERR("%s: NULL skb_head\n", __func__);
774 return NULL;
775 }
776
777 temp = skb_peek(skb_head);
778 buf_len = len;
779 buf = kmalloc(buf_len, GFP_KERNEL);
780 if (!buf) {
781 IPC_RTR_ERR("%s: cannot allocate buf\n", __func__);
782 return NULL;
783 }
784 skb_queue_walk(skb_head, temp) {
785 copy_len = buf_len < temp->len ? buf_len : temp->len;
786 memcpy(buf + offset, temp->data, copy_len);
787 offset += copy_len;
788 buf_len -= copy_len;
789 }
790 return buf;
791}
792
793void msm_ipc_router_free_skb(struct sk_buff_head *skb_head)
794{
795 struct sk_buff *temp_skb;
796
797 if (!skb_head)
798 return;
799
800 while (!skb_queue_empty(skb_head)) {
801 temp_skb = skb_dequeue(skb_head);
802 kfree_skb(temp_skb);
803 }
804 kfree(skb_head);
805}
806
807/**
808 * extract_optional_header() - Extract the optional header from skb
809 * @pkt: Packet structure into which the header has to be extracted.
810 * @opt_len: The optional header length in word size.
811 *
812 * @return: Length of optional header in bytes if success, zero otherwise.
813 */
814static int extract_optional_header(struct rr_packet *pkt, u8 opt_len)
815{
816 size_t offset = 0, buf_len = 0, copy_len, opt_hdr_len;
817 struct sk_buff *temp;
818 struct sk_buff_head *skb_head;
819
820 opt_hdr_len = opt_len * IPCR_WORD_SIZE;
821 pkt->opt_hdr.data = kmalloc(opt_hdr_len, GFP_KERNEL);
822 if (!pkt->opt_hdr.data) {
823 IPC_RTR_ERR("%s: Memory allocation Failed\n", __func__);
824 return 0;
825 }
826 skb_head = pkt->pkt_fragment_q;
827 buf_len = opt_hdr_len;
828 skb_queue_walk(skb_head, temp) {
829 copy_len = buf_len < temp->len ? buf_len : temp->len;
830 memcpy(pkt->opt_hdr.data + offset, temp->data, copy_len);
831 offset += copy_len;
832 buf_len -= copy_len;
833 skb_pull(temp, copy_len);
834 if (temp->len == 0) {
835 skb_dequeue(skb_head);
836 kfree_skb(temp);
837 }
838 }
839 pkt->opt_hdr.len = opt_hdr_len;
840 return opt_hdr_len;
841}
842
843/**
844 * extract_header_v1() - Extract IPC Router header of version 1
845 * @pkt: Packet structure into which the header has to be extraced.
846 * @skb: SKB from which the header has to be extracted.
847 *
848 * @return: 0 on success, standard Linux error codes on failure.
849 */
850static int extract_header_v1(struct rr_packet *pkt, struct sk_buff *skb)
851{
852 if (!pkt || !skb) {
853 IPC_RTR_ERR("%s: Invalid pkt or skb\n", __func__);
854 return -EINVAL;
855 }
856
857 memcpy(&pkt->hdr, skb->data, sizeof(struct rr_header_v1));
858 skb_pull(skb, sizeof(struct rr_header_v1));
859 pkt->length -= sizeof(struct rr_header_v1);
860 return 0;
861}
862
863/**
864 * extract_header_v2() - Extract IPC Router header of version 2
865 * @pkt: Packet structure into which the header has to be extraced.
866 * @skb: SKB from which the header has to be extracted.
867 *
868 * @return: 0 on success, standard Linux error codes on failure.
869 */
870static int extract_header_v2(struct rr_packet *pkt, struct sk_buff *skb)
871{
872 struct rr_header_v2 *hdr;
873 u8 opt_len;
874 size_t opt_hdr_len;
875 size_t total_hdr_size = sizeof(*hdr);
876
877 if (!pkt || !skb) {
878 IPC_RTR_ERR("%s: Invalid pkt or skb\n", __func__);
879 return -EINVAL;
880 }
881
882 hdr = (struct rr_header_v2 *)skb->data;
883 pkt->hdr.version = (u32)hdr->version;
884 pkt->hdr.type = (u32)hdr->type;
885 pkt->hdr.src_node_id = (u32)hdr->src_node_id;
886 pkt->hdr.src_port_id = (u32)hdr->src_port_id;
887 pkt->hdr.size = (u32)hdr->size;
888 pkt->hdr.control_flag = (u32)hdr->control_flag;
889 pkt->hdr.dst_node_id = (u32)hdr->dst_node_id;
890 pkt->hdr.dst_port_id = (u32)hdr->dst_port_id;
891 opt_len = hdr->opt_len;
892 skb_pull(skb, total_hdr_size);
893 if (opt_len > 0) {
894 opt_hdr_len = extract_optional_header(pkt, opt_len);
895 total_hdr_size += opt_hdr_len;
896 }
897 pkt->length -= total_hdr_size;
898 return 0;
899}
900
901/**
902 * extract_header() - Extract IPC Router header
903 * @pkt: Packet from which the header has to be extraced.
904 *
905 * @return: 0 on success, standard Linux error codes on failure.
906 *
907 * This function will check if the header version is v1 or v2 and invoke
908 * the corresponding helper function to extract the IPC Router header.
909 */
910static int extract_header(struct rr_packet *pkt)
911{
912 struct sk_buff *temp_skb;
913 int ret;
914
915 if (!pkt) {
916 IPC_RTR_ERR("%s: NULL PKT\n", __func__);
917 return -EINVAL;
918 }
919
920 temp_skb = skb_peek(pkt->pkt_fragment_q);
921 if (!temp_skb || !temp_skb->data) {
922 IPC_RTR_ERR("%s: No SKBs in skb_queue\n", __func__);
923 return -EINVAL;
924 }
925
926 if (temp_skb->data[0] == IPC_ROUTER_V1) {
927 ret = extract_header_v1(pkt, temp_skb);
928 } else if (temp_skb->data[0] == IPC_ROUTER_V2) {
929 ret = extract_header_v2(pkt, temp_skb);
930 } else {
931 IPC_RTR_ERR("%s: Invalid Header version %02x\n",
932 __func__, temp_skb->data[0]);
933 print_hex_dump(KERN_ERR, "Header: ", DUMP_PREFIX_ADDRESS,
934 16, 1, temp_skb->data, pkt->length, true);
935 return -EINVAL;
936 }
937 return ret;
938}
939
940/**
941 * calc_tx_header_size() - Calculate header size to be reserved in SKB
942 * @pkt: Packet in which the space for header has to be reserved.
943 * @dst_xprt_info: XPRT through which the destination is reachable.
944 *
945 * @return: required header size on success,
946 * starndard Linux error codes on failure.
947 *
948 * This function is used to calculate the header size that has to be reserved
949 * in a transmit SKB. The header size is calculated based on the XPRT through
950 * which the destination node is reachable.
951 */
952static int calc_tx_header_size(struct rr_packet *pkt,
953 struct msm_ipc_router_xprt_info *dst_xprt_info)
954{
955 int hdr_size = 0;
956 int xprt_version = 0;
957 struct msm_ipc_router_xprt_info *xprt_info = dst_xprt_info;
958
959 if (!pkt) {
960 IPC_RTR_ERR("%s: NULL PKT\n", __func__);
961 return -EINVAL;
962 }
963
964 if (xprt_info)
965 xprt_version = xprt_info->xprt->get_version(xprt_info->xprt);
966
967 if (xprt_version == IPC_ROUTER_V1) {
968 pkt->hdr.version = IPC_ROUTER_V1;
969 hdr_size = sizeof(struct rr_header_v1);
970 } else if (xprt_version == IPC_ROUTER_V2) {
971 pkt->hdr.version = IPC_ROUTER_V2;
972 hdr_size = sizeof(struct rr_header_v2) + pkt->opt_hdr.len;
973 } else {
974 IPC_RTR_ERR("%s: Invalid xprt_version %d\n",
975 __func__, xprt_version);
976 hdr_size = -EINVAL;
977 }
978
979 return hdr_size;
980}
981
982/**
983 * calc_rx_header_size() - Calculate the RX header size
984 * @xprt_info: XPRT info of the received message.
985 *
986 * @return: valid header size on success, INT_MAX on failure.
987 */
988static int calc_rx_header_size(struct msm_ipc_router_xprt_info *xprt_info)
989{
990 int xprt_version = 0;
991 int hdr_size = INT_MAX;
992
993 if (xprt_info)
994 xprt_version = xprt_info->xprt->get_version(xprt_info->xprt);
995
996 if (xprt_version == IPC_ROUTER_V1)
997 hdr_size = sizeof(struct rr_header_v1);
998 else if (xprt_version == IPC_ROUTER_V2)
999 hdr_size = sizeof(struct rr_header_v2);
1000 return hdr_size;
1001}
1002
1003/**
1004 * prepend_header_v1() - Prepend IPC Router header of version 1
1005 * @pkt: Packet structure which contains the header info to be prepended.
1006 * @hdr_size: Size of the header
1007 *
1008 * @return: 0 on success, standard Linux error codes on failure.
1009 */
1010static int prepend_header_v1(struct rr_packet *pkt, int hdr_size)
1011{
1012 struct sk_buff *temp_skb;
1013 struct rr_header_v1 *hdr;
1014
1015 if (!pkt || hdr_size <= 0) {
1016 IPC_RTR_ERR("%s: Invalid input parameters\n", __func__);
1017 return -EINVAL;
1018 }
1019
1020 temp_skb = skb_peek(pkt->pkt_fragment_q);
1021 if (!temp_skb || !temp_skb->data) {
1022 IPC_RTR_ERR("%s: No SKBs in skb_queue\n", __func__);
1023 return -EINVAL;
1024 }
1025
1026 if (skb_headroom(temp_skb) < hdr_size) {
1027 temp_skb = alloc_skb(hdr_size, GFP_KERNEL);
1028 if (!temp_skb) {
1029 IPC_RTR_ERR("%s: Could not allocate SKB of size %d\n",
1030 __func__, hdr_size);
1031 return -ENOMEM;
1032 }
1033 skb_reserve(temp_skb, hdr_size);
1034 }
1035
1036 hdr = (struct rr_header_v1 *)skb_push(temp_skb, hdr_size);
1037 memcpy(hdr, &pkt->hdr, hdr_size);
1038 if (temp_skb != skb_peek(pkt->pkt_fragment_q))
1039 skb_queue_head(pkt->pkt_fragment_q, temp_skb);
1040 pkt->length += hdr_size;
1041 return 0;
1042}
1043
1044/**
1045 * prepend_header_v2() - Prepend IPC Router header of version 2
1046 * @pkt: Packet structure which contains the header info to be prepended.
1047 * @hdr_size: Size of the header
1048 *
1049 * @return: 0 on success, standard Linux error codes on failure.
1050 */
1051static int prepend_header_v2(struct rr_packet *pkt, int hdr_size)
1052{
1053 struct sk_buff *temp_skb;
1054 struct rr_header_v2 *hdr;
1055
1056 if (!pkt || hdr_size <= 0) {
1057 IPC_RTR_ERR("%s: Invalid input parameters\n", __func__);
1058 return -EINVAL;
1059 }
1060
1061 temp_skb = skb_peek(pkt->pkt_fragment_q);
1062 if (!temp_skb || !temp_skb->data) {
1063 IPC_RTR_ERR("%s: No SKBs in skb_queue\n", __func__);
1064 return -EINVAL;
1065 }
1066
1067 if (skb_headroom(temp_skb) < hdr_size) {
1068 temp_skb = alloc_skb(hdr_size, GFP_KERNEL);
1069 if (!temp_skb) {
1070 IPC_RTR_ERR("%s: Could not allocate SKB of size %d\n",
1071 __func__, hdr_size);
1072 return -ENOMEM;
1073 }
1074 skb_reserve(temp_skb, hdr_size);
1075 }
1076
1077 hdr = (struct rr_header_v2 *)skb_push(temp_skb, hdr_size);
1078 hdr->version = (u8)pkt->hdr.version;
1079 hdr->type = (u8)pkt->hdr.type;
1080 hdr->control_flag = (u8)pkt->hdr.control_flag;
1081 hdr->size = (u32)pkt->hdr.size;
1082 hdr->src_node_id = (u16)pkt->hdr.src_node_id;
1083 hdr->src_port_id = (u16)pkt->hdr.src_port_id;
1084 hdr->dst_node_id = (u16)pkt->hdr.dst_node_id;
1085 hdr->dst_port_id = (u16)pkt->hdr.dst_port_id;
1086 if (pkt->opt_hdr.len > 0) {
1087 hdr->opt_len = pkt->opt_hdr.len / IPCR_WORD_SIZE;
1088 memcpy(hdr + sizeof(*hdr), pkt->opt_hdr.data, pkt->opt_hdr.len);
1089 } else {
1090 hdr->opt_len = 0;
1091 }
1092 if (temp_skb != skb_peek(pkt->pkt_fragment_q))
1093 skb_queue_head(pkt->pkt_fragment_q, temp_skb);
1094 pkt->length += hdr_size;
1095 return 0;
1096}
1097
1098/**
1099 * prepend_header() - Prepend IPC Router header
1100 * @pkt: Packet structure which contains the header info to be prepended.
1101 * @xprt_info: XPRT through which the packet is transmitted.
1102 *
1103 * @return: 0 on success, standard Linux error codes on failure.
1104 *
1105 * This function prepends the header to the packet to be transmitted. The
1106 * IPC Router header version to be prepended depends on the XPRT through
1107 * which the destination is reachable.
1108 */
1109static int prepend_header(struct rr_packet *pkt,
1110 struct msm_ipc_router_xprt_info *xprt_info)
1111{
1112 int hdr_size;
1113 struct sk_buff *temp_skb;
1114
1115 if (!pkt) {
1116 IPC_RTR_ERR("%s: NULL PKT\n", __func__);
1117 return -EINVAL;
1118 }
1119
1120 temp_skb = skb_peek(pkt->pkt_fragment_q);
1121 if (!temp_skb || !temp_skb->data) {
1122 IPC_RTR_ERR("%s: No SKBs in skb_queue\n", __func__);
1123 return -EINVAL;
1124 }
1125
1126 hdr_size = calc_tx_header_size(pkt, xprt_info);
1127 if (hdr_size <= 0)
1128 return hdr_size;
1129
1130 if (pkt->hdr.version == IPC_ROUTER_V1)
1131 return prepend_header_v1(pkt, hdr_size);
1132 else if (pkt->hdr.version == IPC_ROUTER_V2)
1133 return prepend_header_v2(pkt, hdr_size);
1134 else
1135 return -EINVAL;
1136}
1137
1138/**
1139 * defragment_pkt() - Defragment and linearize the packet
1140 * @pkt: Packet to be linearized.
1141 *
1142 * @return: 0 on success, standard Linux error codes on failure.
1143 *
1144 * Some packets contain fragments of data over multiple SKBs. If an XPRT
1145 * does not supported fragmented writes, linearize multiple SKBs into one
1146 * single SKB.
1147 */
1148static int defragment_pkt(struct rr_packet *pkt)
1149{
1150 struct sk_buff *dst_skb, *src_skb, *temp_skb;
1151 int offset = 0, buf_len = 0, copy_len;
1152 void *buf;
1153 int align_size;
1154
1155 if (!pkt || pkt->length <= 0) {
1156 IPC_RTR_ERR("%s: Invalid PKT\n", __func__);
1157 return -EINVAL;
1158 }
1159
1160 if (skb_queue_len(pkt->pkt_fragment_q) == 1)
1161 return 0;
1162
1163 align_size = ALIGN_SIZE(pkt->length);
1164 dst_skb = alloc_skb(pkt->length + align_size, GFP_KERNEL);
1165 if (!dst_skb) {
1166 IPC_RTR_ERR("%s: could not allocate one skb of size %d\n",
1167 __func__, pkt->length);
1168 return -ENOMEM;
1169 }
1170 buf = skb_put(dst_skb, pkt->length);
1171 buf_len = pkt->length;
1172
1173 skb_queue_walk(pkt->pkt_fragment_q, src_skb) {
1174 copy_len = buf_len < src_skb->len ? buf_len : src_skb->len;
1175 memcpy(buf + offset, src_skb->data, copy_len);
1176 offset += copy_len;
1177 buf_len -= copy_len;
1178 }
1179
1180 while (!skb_queue_empty(pkt->pkt_fragment_q)) {
1181 temp_skb = skb_dequeue(pkt->pkt_fragment_q);
1182 kfree_skb(temp_skb);
1183 }
1184 skb_queue_tail(pkt->pkt_fragment_q, dst_skb);
1185 return 0;
1186}
1187
1188static int post_pkt_to_port(struct msm_ipc_port *port_ptr,
1189 struct rr_packet *pkt, int clone)
1190{
1191 struct rr_packet *temp_pkt = pkt;
1192 void (*notify)(unsigned int event, void *oob_data,
1193 size_t oob_data_len, void *priv);
1194 void (*data_ready)(struct sock *sk) = NULL;
1195 struct sock *sk;
1196 u32 pkt_type;
1197
1198 if (unlikely(!port_ptr || !pkt))
1199 return -EINVAL;
1200
1201 if (clone) {
1202 temp_pkt = clone_pkt(pkt);
1203 if (!temp_pkt) {
1204 IPC_RTR_ERR(
1205 "%s: Error cloning packet for port %08x:%08x\n",
1206 __func__, port_ptr->this_port.node_id,
1207 port_ptr->this_port.port_id);
1208 return -ENOMEM;
1209 }
1210 }
1211
1212 mutex_lock(&port_ptr->port_rx_q_lock_lhc3);
Arun Kumar Neelakantam74ff8562017-05-26 17:57:52 +05301213 if (pkt->ws_need)
1214 __pm_stay_awake(port_ptr->port_rx_ws);
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -06001215 list_add_tail(&temp_pkt->list, &port_ptr->port_rx_q);
1216 wake_up(&port_ptr->port_rx_wait_q);
1217 notify = port_ptr->notify;
1218 pkt_type = temp_pkt->hdr.type;
1219 sk = (struct sock *)port_ptr->endpoint;
1220 if (sk) {
1221 read_lock(&sk->sk_callback_lock);
1222 data_ready = sk->sk_data_ready;
1223 read_unlock(&sk->sk_callback_lock);
1224 }
1225 mutex_unlock(&port_ptr->port_rx_q_lock_lhc3);
1226 if (notify)
1227 notify(pkt_type, NULL, 0, port_ptr->priv);
1228 else if (sk && data_ready)
1229 data_ready(sk);
1230
1231 return 0;
1232}
1233
1234/**
1235 * ipc_router_peek_pkt_size() - Peek into the packet header to get potential
1236 * packet size
1237 * @data: Starting address of the packet which points to router header.
1238 *
1239 * @returns: potential packet size on success, < 0 on error.
1240 *
1241 * This function is used by the underlying transport abstraction layer to
1242 * peek into the potential packet size of an incoming packet. This information
1243 * is used to perform link layer fragmentation and re-assembly
1244 */
1245int ipc_router_peek_pkt_size(char *data)
1246{
1247 int size;
1248
1249 if (!data) {
1250 pr_err("%s: NULL PKT\n", __func__);
1251 return -EINVAL;
1252 }
1253
1254 if (data[0] == IPC_ROUTER_V1)
1255 size = ((struct rr_header_v1 *)data)->size +
1256 sizeof(struct rr_header_v1);
1257 else if (data[0] == IPC_ROUTER_V2)
1258 size = ((struct rr_header_v2 *)data)->size +
1259 ((struct rr_header_v2 *)data)->opt_len * IPCR_WORD_SIZE
1260 + sizeof(struct rr_header_v2);
1261 else
1262 return -EINVAL;
1263
1264 size += ALIGN_SIZE(size);
1265 return size;
1266}
1267
1268static int post_control_ports(struct rr_packet *pkt)
1269{
1270 struct msm_ipc_port *port_ptr;
1271
1272 if (!pkt)
1273 return -EINVAL;
1274
1275 down_read(&control_ports_lock_lha5);
1276 list_for_each_entry(port_ptr, &control_ports, list)
1277 post_pkt_to_port(port_ptr, pkt, 1);
1278 up_read(&control_ports_lock_lha5);
1279 return 0;
1280}
1281
1282static u32 allocate_port_id(void)
1283{
1284 u32 port_id = 0, prev_port_id, key;
1285 struct msm_ipc_port *port_ptr;
1286
1287 mutex_lock(&next_port_id_lock_lhc1);
1288 prev_port_id = next_port_id;
1289 down_read(&local_ports_lock_lhc2);
1290 do {
1291 next_port_id++;
1292 if ((next_port_id & IPC_ROUTER_ADDRESS) == IPC_ROUTER_ADDRESS)
1293 next_port_id = 1;
1294
1295 key = (next_port_id & (LP_HASH_SIZE - 1));
1296 if (list_empty(&local_ports[key])) {
1297 port_id = next_port_id;
1298 break;
1299 }
1300 list_for_each_entry(port_ptr, &local_ports[key], list) {
1301 if (port_ptr->this_port.port_id == next_port_id) {
1302 port_id = next_port_id;
1303 break;
1304 }
1305 }
1306 if (!port_id) {
1307 port_id = next_port_id;
1308 break;
1309 }
1310 port_id = 0;
1311 } while (next_port_id != prev_port_id);
1312 up_read(&local_ports_lock_lhc2);
1313 mutex_unlock(&next_port_id_lock_lhc1);
1314
1315 return port_id;
1316}
1317
1318void msm_ipc_router_add_local_port(struct msm_ipc_port *port_ptr)
1319{
1320 u32 key;
1321
1322 if (!port_ptr)
1323 return;
1324
1325 key = (port_ptr->this_port.port_id & (LP_HASH_SIZE - 1));
1326 down_write(&local_ports_lock_lhc2);
1327 list_add_tail(&port_ptr->list, &local_ports[key]);
1328 up_write(&local_ports_lock_lhc2);
1329}
1330
1331/**
1332 * msm_ipc_router_create_raw_port() - Create an IPC Router port
1333 * @endpoint: User-space space socket information to be cached.
1334 * @notify: Function to notify incoming events on the port.
1335 * @event: Event ID to be handled.
1336 * @oob_data: Any out-of-band data associated with the event.
1337 * @oob_data_len: Size of the out-of-band data, if valid.
1338 * @priv: Private data registered during the port creation.
1339 * @priv: Private Data to be passed during the event notification.
1340 *
1341 * @return: Valid pointer to port on success, NULL on failure.
1342 *
1343 * This function is used to create an IPC Router port. The port is used for
1344 * communication locally or outside the subsystem.
1345 */
1346struct msm_ipc_port *
1347msm_ipc_router_create_raw_port(void *endpoint,
1348 void (*notify)(unsigned int event,
1349 void *oob_data,
1350 size_t oob_data_len, void *priv),
1351 void *priv)
1352{
1353 struct msm_ipc_port *port_ptr;
1354
1355 port_ptr = kzalloc(sizeof(*port_ptr), GFP_KERNEL);
1356 if (!port_ptr)
1357 return NULL;
1358
1359 port_ptr->this_port.node_id = IPC_ROUTER_NID_LOCAL;
1360 port_ptr->this_port.port_id = allocate_port_id();
1361 if (!port_ptr->this_port.port_id) {
1362 IPC_RTR_ERR("%s: All port ids are in use\n", __func__);
1363 kfree(port_ptr);
1364 return NULL;
1365 }
1366
1367 mutex_init(&port_ptr->port_lock_lhc3);
1368 INIT_LIST_HEAD(&port_ptr->port_rx_q);
1369 mutex_init(&port_ptr->port_rx_q_lock_lhc3);
1370 init_waitqueue_head(&port_ptr->port_rx_wait_q);
1371 snprintf(port_ptr->rx_ws_name, MAX_WS_NAME_SZ,
Dhoat Harpal3ad5fe32017-06-19 21:26:13 +05301372 "ipc%08x_%d_%s",
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -06001373 port_ptr->this_port.port_id,
Dhoat Harpal3ad5fe32017-06-19 21:26:13 +05301374 task_pid_nr(current),
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -06001375 current->comm);
1376 port_ptr->port_rx_ws = wakeup_source_register(port_ptr->rx_ws_name);
1377 if (!port_ptr->port_rx_ws) {
1378 kfree(port_ptr);
1379 return NULL;
1380 }
1381 init_waitqueue_head(&port_ptr->port_tx_wait_q);
1382 kref_init(&port_ptr->ref);
1383
1384 port_ptr->endpoint = endpoint;
1385 port_ptr->notify = notify;
1386 port_ptr->priv = priv;
1387
1388 msm_ipc_router_add_local_port(port_ptr);
1389 if (endpoint)
1390 sock_hold(ipc_port_sk(endpoint));
1391 return port_ptr;
1392}
1393
1394/**
1395 * ipc_router_get_port_ref() - Get a reference to the local port
1396 * @port_id: Port ID of the local port for which reference is get.
1397 *
1398 * @return: If port is found, a reference to the port is returned.
1399 * Else NULL is returned.
1400 */
1401static struct msm_ipc_port *ipc_router_get_port_ref(u32 port_id)
1402{
1403 int key = (port_id & (LP_HASH_SIZE - 1));
1404 struct msm_ipc_port *port_ptr;
1405
1406 down_read(&local_ports_lock_lhc2);
1407 list_for_each_entry(port_ptr, &local_ports[key], list) {
1408 if (port_ptr->this_port.port_id == port_id) {
1409 kref_get(&port_ptr->ref);
1410 up_read(&local_ports_lock_lhc2);
1411 return port_ptr;
1412 }
1413 }
1414 up_read(&local_ports_lock_lhc2);
1415 return NULL;
1416}
1417
1418/**
1419 * ipc_router_release_port() - Cleanup and release the port
1420 * @ref: Reference to the port.
1421 *
1422 * This function is called when all references to the port are released.
1423 */
1424void ipc_router_release_port(struct kref *ref)
1425{
1426 struct rr_packet *pkt, *temp_pkt;
1427 struct msm_ipc_port *port_ptr =
1428 container_of(ref, struct msm_ipc_port, ref);
1429
1430 mutex_lock(&port_ptr->port_rx_q_lock_lhc3);
1431 list_for_each_entry_safe(pkt, temp_pkt, &port_ptr->port_rx_q, list) {
1432 list_del(&pkt->list);
1433 release_pkt(pkt);
1434 }
1435 mutex_unlock(&port_ptr->port_rx_q_lock_lhc3);
1436 wakeup_source_unregister(port_ptr->port_rx_ws);
1437 if (port_ptr->endpoint)
1438 sock_put(ipc_port_sk(port_ptr->endpoint));
1439 kfree(port_ptr);
1440}
1441
1442/**
1443 * ipc_router_get_rport_ref()- Get reference to the remote port
1444 * @node_id: Node ID corresponding to the remote port.
1445 * @port_id: Port ID corresponding to the remote port.
1446 *
1447 * @return: a reference to the remote port on success, NULL on failure.
1448 */
1449static struct msm_ipc_router_remote_port *ipc_router_get_rport_ref(
1450 u32 node_id, u32 port_id)
1451{
1452 struct msm_ipc_router_remote_port *rport_ptr;
1453 struct msm_ipc_routing_table_entry *rt_entry;
1454 int key = (port_id & (RP_HASH_SIZE - 1));
1455
1456 rt_entry = ipc_router_get_rtentry_ref(node_id);
1457 if (!rt_entry) {
1458 IPC_RTR_ERR("%s: Node is not up\n", __func__);
1459 return NULL;
1460 }
1461
1462 down_read(&rt_entry->lock_lha4);
1463 list_for_each_entry(rport_ptr,
1464 &rt_entry->remote_port_list[key], list) {
1465 if (rport_ptr->port_id == port_id) {
1466 kref_get(&rport_ptr->ref);
1467 goto out_lookup_rmt_port1;
1468 }
1469 }
1470 rport_ptr = NULL;
1471out_lookup_rmt_port1:
1472 up_read(&rt_entry->lock_lha4);
1473 kref_put(&rt_entry->ref, ipc_router_release_rtentry);
1474 return rport_ptr;
1475}
1476
1477/**
1478 * ipc_router_create_rport() - Create a remote port
1479 * @node_id: Node ID corresponding to the remote port.
1480 * @port_id: Port ID corresponding to the remote port.
1481 * @xprt_info: XPRT through which the concerned node is reachable.
1482 *
1483 * @return: a reference to the remote port on success, NULL on failure.
1484 */
1485static struct msm_ipc_router_remote_port *ipc_router_create_rport(
1486 u32 node_id, u32 port_id,
1487 struct msm_ipc_router_xprt_info *xprt_info)
1488{
1489 struct msm_ipc_router_remote_port *rport_ptr;
1490 struct msm_ipc_routing_table_entry *rt_entry;
1491 int key = (port_id & (RP_HASH_SIZE - 1));
1492
1493 rt_entry = create_routing_table_entry(node_id, xprt_info);
1494 if (!rt_entry) {
1495 IPC_RTR_ERR("%s: Node cannot be created\n", __func__);
1496 return NULL;
1497 }
1498
1499 down_write(&rt_entry->lock_lha4);
1500 list_for_each_entry(rport_ptr,
1501 &rt_entry->remote_port_list[key], list) {
1502 if (rport_ptr->port_id == port_id)
1503 goto out_create_rmt_port1;
1504 }
1505
1506 rport_ptr = kmalloc(sizeof(*rport_ptr), GFP_KERNEL);
1507 if (!rport_ptr) {
1508 IPC_RTR_ERR("%s: Remote port alloc failed\n", __func__);
1509 goto out_create_rmt_port2;
1510 }
1511 rport_ptr->port_id = port_id;
1512 rport_ptr->node_id = node_id;
1513 rport_ptr->status = VALID;
1514 rport_ptr->sec_rule = NULL;
1515 rport_ptr->server = NULL;
1516 rport_ptr->tx_quota_cnt = 0;
1517 kref_init(&rport_ptr->ref);
1518 mutex_init(&rport_ptr->rport_lock_lhb2);
1519 INIT_LIST_HEAD(&rport_ptr->resume_tx_port_list);
1520 INIT_LIST_HEAD(&rport_ptr->conn_info_list);
1521 list_add_tail(&rport_ptr->list,
1522 &rt_entry->remote_port_list[key]);
1523out_create_rmt_port1:
1524 kref_get(&rport_ptr->ref);
1525out_create_rmt_port2:
1526 up_write(&rt_entry->lock_lha4);
1527 kref_put(&rt_entry->ref, ipc_router_release_rtentry);
1528 return rport_ptr;
1529}
1530
1531/**
1532 * msm_ipc_router_free_resume_tx_port() - Free the resume_tx ports
1533 * @rport_ptr: Pointer to the remote port.
1534 *
1535 * This function deletes all the resume_tx ports associated with a remote port
1536 * and frees the memory allocated to each resume_tx port.
1537 *
1538 * Must be called with rport_ptr->rport_lock_lhb2 locked.
1539 */
1540static void msm_ipc_router_free_resume_tx_port(
1541 struct msm_ipc_router_remote_port *rport_ptr)
1542{
1543 struct msm_ipc_resume_tx_port *rtx_port, *tmp_rtx_port;
1544
1545 list_for_each_entry_safe(rtx_port, tmp_rtx_port,
1546 &rport_ptr->resume_tx_port_list, list) {
1547 list_del(&rtx_port->list);
1548 kfree(rtx_port);
1549 }
1550}
1551
1552/**
1553 * msm_ipc_router_lookup_resume_tx_port() - Lookup resume_tx port list
1554 * @rport_ptr: Remote port whose resume_tx port list needs to be looked.
1555 * @port_id: Port ID which needs to be looked from the list.
1556 *
1557 * return 1 if the port_id is found in the list, else 0.
1558 *
1559 * This function is used to lookup the existence of a local port in
1560 * remote port's resume_tx list. This function is used to ensure that
1561 * the same port is not added to the remote_port's resume_tx list repeatedly.
1562 *
1563 * Must be called with rport_ptr->rport_lock_lhb2 locked.
1564 */
1565static int msm_ipc_router_lookup_resume_tx_port(
1566 struct msm_ipc_router_remote_port *rport_ptr, u32 port_id)
1567{
1568 struct msm_ipc_resume_tx_port *rtx_port;
1569
1570 list_for_each_entry(rtx_port, &rport_ptr->resume_tx_port_list, list) {
1571 if (port_id == rtx_port->port_id)
1572 return 1;
1573 }
1574 return 0;
1575}
1576
1577/**
1578 * ipc_router_dummy_write_space() - Dummy write space available callback
1579 * @sk: Socket pointer for which the callback is called.
1580 */
1581void ipc_router_dummy_write_space(struct sock *sk)
1582{
1583}
1584
1585/**
1586 * post_resume_tx() - Post the resume_tx event
1587 * @rport_ptr: Pointer to the remote port
1588 * @pkt : The data packet that is received on a resume_tx event
1589 * @msg: Out of band data to be passed to kernel drivers
1590 *
1591 * This function informs about the reception of the resume_tx message from a
1592 * remote port pointed by rport_ptr to all the local ports that are in the
1593 * resume_tx_ports_list of this remote port. On posting the information, this
1594 * function sequentially deletes each entry in the resume_tx_port_list of the
1595 * remote port.
1596 *
1597 * Must be called with rport_ptr->rport_lock_lhb2 locked.
1598 */
1599static void post_resume_tx(struct msm_ipc_router_remote_port *rport_ptr,
1600 struct rr_packet *pkt, union rr_control_msg *msg)
1601{
1602 struct msm_ipc_resume_tx_port *rtx_port, *tmp_rtx_port;
1603 struct msm_ipc_port *local_port;
1604 struct sock *sk;
1605 void (*write_space)(struct sock *sk) = NULL;
1606
1607 list_for_each_entry_safe(rtx_port, tmp_rtx_port,
1608 &rport_ptr->resume_tx_port_list, list) {
1609 local_port = ipc_router_get_port_ref(rtx_port->port_id);
1610 if (local_port && local_port->notify) {
1611 wake_up(&local_port->port_tx_wait_q);
1612 local_port->notify(IPC_ROUTER_CTRL_CMD_RESUME_TX, msg,
1613 sizeof(*msg), local_port->priv);
1614 } else if (local_port) {
1615 wake_up(&local_port->port_tx_wait_q);
1616 sk = ipc_port_sk(local_port->endpoint);
1617 if (sk) {
1618 read_lock(&sk->sk_callback_lock);
1619 write_space = sk->sk_write_space;
1620 read_unlock(&sk->sk_callback_lock);
1621 }
1622 if (write_space &&
1623 write_space != ipc_router_dummy_write_space)
1624 write_space(sk);
1625 else
1626 post_pkt_to_port(local_port, pkt, 1);
1627 } else {
1628 IPC_RTR_ERR("%s: Local Port %d not Found",
1629 __func__, rtx_port->port_id);
1630 }
1631 if (local_port)
1632 kref_put(&local_port->ref, ipc_router_release_port);
1633 list_del(&rtx_port->list);
1634 kfree(rtx_port);
1635 }
1636}
1637
1638/**
1639 * signal_rport_exit() - Signal the local ports of remote port exit
1640 * @rport_ptr: Remote port that is exiting.
1641 *
1642 * This function is used to signal the local ports that are waiting
1643 * to resume transmission to a remote port that is exiting.
1644 */
1645static void signal_rport_exit(struct msm_ipc_router_remote_port *rport_ptr)
1646{
1647 struct msm_ipc_resume_tx_port *rtx_port, *tmp_rtx_port;
1648 struct msm_ipc_port *local_port;
1649
1650 mutex_lock(&rport_ptr->rport_lock_lhb2);
1651 rport_ptr->status = RESET;
1652 list_for_each_entry_safe(rtx_port, tmp_rtx_port,
1653 &rport_ptr->resume_tx_port_list, list) {
1654 local_port = ipc_router_get_port_ref(rtx_port->port_id);
1655 if (local_port) {
1656 wake_up(&local_port->port_tx_wait_q);
1657 kref_put(&local_port->ref, ipc_router_release_port);
1658 }
1659 list_del(&rtx_port->list);
1660 kfree(rtx_port);
1661 }
1662 mutex_unlock(&rport_ptr->rport_lock_lhb2);
1663}
1664
1665/**
1666 * ipc_router_release_rport() - Cleanup and release the remote port
1667 * @ref: Reference to the remote port.
1668 *
1669 * This function is called when all references to the remote port are released.
1670 */
1671static void ipc_router_release_rport(struct kref *ref)
1672{
1673 struct msm_ipc_router_remote_port *rport_ptr =
1674 container_of(ref, struct msm_ipc_router_remote_port, ref);
1675
1676 mutex_lock(&rport_ptr->rport_lock_lhb2);
1677 msm_ipc_router_free_resume_tx_port(rport_ptr);
1678 mutex_unlock(&rport_ptr->rport_lock_lhb2);
1679 kfree(rport_ptr);
1680}
1681
1682/**
1683 * ipc_router_destroy_rport() - Destroy the remote port
1684 * @rport_ptr: Pointer to the remote port to be destroyed.
1685 */
1686static void ipc_router_destroy_rport(
1687 struct msm_ipc_router_remote_port *rport_ptr)
1688{
1689 u32 node_id;
1690 struct msm_ipc_routing_table_entry *rt_entry;
1691
1692 if (!rport_ptr)
1693 return;
1694
1695 node_id = rport_ptr->node_id;
1696 rt_entry = ipc_router_get_rtentry_ref(node_id);
1697 if (!rt_entry) {
1698 IPC_RTR_ERR("%s: Node %d is not up\n", __func__, node_id);
1699 return;
1700 }
1701 down_write(&rt_entry->lock_lha4);
1702 list_del(&rport_ptr->list);
1703 up_write(&rt_entry->lock_lha4);
1704 signal_rport_exit(rport_ptr);
1705 kref_put(&rport_ptr->ref, ipc_router_release_rport);
1706 kref_put(&rt_entry->ref, ipc_router_release_rtentry);
1707}
1708
1709/**
1710 * msm_ipc_router_lookup_server() - Lookup server information
1711 * @service: Service ID of the server info to be looked up.
1712 * @instance: Instance ID of the server info to be looked up.
1713 * @node_id: Node/Processor ID in which the server is hosted.
1714 * @port_id: Port ID within the node in which the server is hosted.
1715 *
1716 * @return: If found Pointer to server structure, else NULL.
1717 *
1718 * Note1: Lock the server_list_lock_lha2 before accessing this function.
1719 * Note2: If the <node_id:port_id> are <0:0>, then the lookup is restricted
1720 * to <service:instance>. Used only when a client wants to send a
1721 * message to any QMI server.
1722 */
1723static struct msm_ipc_server *msm_ipc_router_lookup_server(
1724 u32 service,
1725 u32 instance,
1726 u32 node_id,
1727 u32 port_id)
1728{
1729 struct msm_ipc_server *server;
1730 struct msm_ipc_server_port *server_port;
1731 int key = (service & (SRV_HASH_SIZE - 1));
1732
1733 list_for_each_entry(server, &server_list[key], list) {
1734 if ((server->name.service != service) ||
1735 (server->name.instance != instance))
1736 continue;
1737 if ((node_id == 0) && (port_id == 0))
1738 return server;
1739 list_for_each_entry(server_port, &server->server_port_list,
1740 list) {
1741 if ((server_port->server_addr.node_id == node_id) &&
1742 (server_port->server_addr.port_id == port_id))
1743 return server;
1744 }
1745 }
1746 return NULL;
1747}
1748
1749/**
1750 * ipc_router_get_server_ref() - Get reference to the server
1751 * @svc: Service ID for which the reference is required.
1752 * @ins: Instance ID for which the reference is required.
1753 * @node_id: Node/Processor ID in which the server is hosted.
1754 * @port_id: Port ID within the node in which the server is hosted.
1755 *
1756 * @return: If found return reference to server, else NULL.
1757 */
1758static struct msm_ipc_server *ipc_router_get_server_ref(
1759 u32 svc, u32 ins, u32 node_id, u32 port_id)
1760{
1761 struct msm_ipc_server *server;
1762
1763 down_read(&server_list_lock_lha2);
1764 server = msm_ipc_router_lookup_server(svc, ins, node_id, port_id);
1765 if (server)
1766 kref_get(&server->ref);
1767 up_read(&server_list_lock_lha2);
1768 return server;
1769}
1770
1771/**
1772 * ipc_router_release_server() - Cleanup and release the server
1773 * @ref: Reference to the server.
1774 *
1775 * This function is called when all references to the server are released.
1776 */
1777static void ipc_router_release_server(struct kref *ref)
1778{
1779 struct msm_ipc_server *server =
1780 container_of(ref, struct msm_ipc_server, ref);
1781
1782 kfree(server);
1783}
1784
1785/**
1786 * msm_ipc_router_create_server() - Add server info to hash table
1787 * @service: Service ID of the server info to be created.
1788 * @instance: Instance ID of the server info to be created.
1789 * @node_id: Node/Processor ID in which the server is hosted.
1790 * @port_id: Port ID within the node in which the server is hosted.
1791 * @xprt_info: XPRT through which the node hosting the server is reached.
1792 *
1793 * @return: Pointer to server structure on success, else NULL.
1794 *
1795 * This function adds the server info to the hash table. If the same
1796 * server(i.e. <service_id:instance_id>) is hosted in different nodes,
1797 * they are maintained as list of "server_port" under "server" structure.
1798 */
1799static struct msm_ipc_server *msm_ipc_router_create_server(
1800 u32 service,
1801 u32 instance,
1802 u32 node_id,
1803 u32 port_id,
1804 struct msm_ipc_router_xprt_info *xprt_info)
1805{
1806 struct msm_ipc_server *server = NULL;
1807 struct msm_ipc_server_port *server_port;
1808 struct platform_device *pdev;
1809 int key = (service & (SRV_HASH_SIZE - 1));
1810
1811 down_write(&server_list_lock_lha2);
1812 server = msm_ipc_router_lookup_server(service, instance, 0, 0);
1813 if (server) {
1814 list_for_each_entry(server_port, &server->server_port_list,
1815 list) {
1816 if ((server_port->server_addr.node_id == node_id) &&
1817 (server_port->server_addr.port_id == port_id))
1818 goto return_server;
1819 }
1820 goto create_srv_port;
1821 }
1822
1823 server = kzalloc(sizeof(*server), GFP_KERNEL);
1824 if (!server) {
1825 up_write(&server_list_lock_lha2);
1826 IPC_RTR_ERR("%s: Server allocation failed\n", __func__);
1827 return NULL;
1828 }
1829 server->name.service = service;
1830 server->name.instance = instance;
1831 server->synced_sec_rule = 0;
1832 INIT_LIST_HEAD(&server->server_port_list);
1833 kref_init(&server->ref);
1834 list_add_tail(&server->list, &server_list[key]);
1835 scnprintf(server->pdev_name, sizeof(server->pdev_name),
1836 "SVC%08x:%08x", service, instance);
1837 server->next_pdev_id = 1;
1838
1839create_srv_port:
1840 server_port = kzalloc(sizeof(*server_port), GFP_KERNEL);
1841 pdev = platform_device_alloc(server->pdev_name, server->next_pdev_id);
1842 if (!server_port || !pdev) {
1843 kfree(server_port);
1844 if (pdev)
1845 platform_device_put(pdev);
1846 if (list_empty(&server->server_port_list)) {
1847 list_del(&server->list);
1848 kfree(server);
1849 }
1850 up_write(&server_list_lock_lha2);
1851 IPC_RTR_ERR("%s: Server Port allocation failed\n", __func__);
1852 return NULL;
1853 }
1854 server_port->pdev = pdev;
1855 server_port->server_addr.node_id = node_id;
1856 server_port->server_addr.port_id = port_id;
1857 server_port->xprt_info = xprt_info;
1858 list_add_tail(&server_port->list, &server->server_port_list);
1859 server->next_pdev_id++;
1860 platform_device_add(server_port->pdev);
1861
1862return_server:
1863 /* Add a reference so that the caller can put it back */
1864 kref_get(&server->ref);
1865 up_write(&server_list_lock_lha2);
1866 return server;
1867}
1868
1869/**
1870 * ipc_router_destroy_server_nolock() - Remove server info from hash table
1871 * @server: Server info to be removed.
1872 * @node_id: Node/Processor ID in which the server is hosted.
1873 * @port_id: Port ID within the node in which the server is hosted.
1874 *
1875 * This function removes the server_port identified using <node_id:port_id>
1876 * from the server structure. If the server_port list under server structure
1877 * is empty after removal, then remove the server structure from the server
1878 * hash table. This function must be called with server_list_lock_lha2 locked.
1879 */
1880static void ipc_router_destroy_server_nolock(struct msm_ipc_server *server,
1881 u32 node_id, u32 port_id)
1882{
1883 struct msm_ipc_server_port *server_port;
1884 bool server_port_found = false;
1885
1886 if (!server)
1887 return;
1888
1889 list_for_each_entry(server_port, &server->server_port_list, list) {
1890 if ((server_port->server_addr.node_id == node_id) &&
1891 (server_port->server_addr.port_id == port_id)) {
1892 server_port_found = true;
1893 break;
1894 }
1895 }
1896 if (server_port_found && server_port) {
1897 platform_device_unregister(server_port->pdev);
1898 list_del(&server_port->list);
1899 kfree(server_port);
1900 }
1901 if (list_empty(&server->server_port_list)) {
1902 list_del(&server->list);
1903 kref_put(&server->ref, ipc_router_release_server);
1904 }
1905}
1906
1907/**
1908 * ipc_router_destroy_server() - Remove server info from hash table
1909 * @server: Server info to be removed.
1910 * @node_id: Node/Processor ID in which the server is hosted.
1911 * @port_id: Port ID within the node in which the server is hosted.
1912 *
1913 * This function removes the server_port identified using <node_id:port_id>
1914 * from the server structure. If the server_port list under server structure
1915 * is empty after removal, then remove the server structure from the server
1916 * hash table.
1917 */
1918static void ipc_router_destroy_server(struct msm_ipc_server *server,
1919 u32 node_id, u32 port_id)
1920{
1921 down_write(&server_list_lock_lha2);
1922 ipc_router_destroy_server_nolock(server, node_id, port_id);
1923 up_write(&server_list_lock_lha2);
1924}
1925
1926static int ipc_router_send_ctl_msg(
1927 struct msm_ipc_router_xprt_info *xprt_info,
1928 union rr_control_msg *msg,
1929 u32 dst_node_id)
1930{
1931 struct rr_packet *pkt;
1932 struct sk_buff *ipc_rtr_pkt;
1933 struct rr_header_v1 *hdr;
1934 int pkt_size;
1935 void *data;
1936 int ret = -EINVAL;
1937
1938 pkt = create_pkt(NULL);
1939 if (!pkt) {
1940 IPC_RTR_ERR("%s: pkt alloc failed\n", __func__);
1941 return -ENOMEM;
1942 }
1943
1944 pkt_size = IPC_ROUTER_HDR_SIZE + sizeof(*msg);
1945 ipc_rtr_pkt = alloc_skb(pkt_size, GFP_KERNEL);
1946 if (!ipc_rtr_pkt) {
1947 IPC_RTR_ERR("%s: ipc_rtr_pkt alloc failed\n", __func__);
1948 release_pkt(pkt);
1949 return -ENOMEM;
1950 }
1951
1952 skb_reserve(ipc_rtr_pkt, IPC_ROUTER_HDR_SIZE);
1953 data = skb_put(ipc_rtr_pkt, sizeof(*msg));
1954 memcpy(data, msg, sizeof(*msg));
1955 skb_queue_tail(pkt->pkt_fragment_q, ipc_rtr_pkt);
1956 pkt->length = sizeof(*msg);
1957
1958 hdr = &pkt->hdr;
1959 hdr->version = IPC_ROUTER_V1;
1960 hdr->type = msg->cmd;
1961 hdr->src_node_id = IPC_ROUTER_NID_LOCAL;
1962 hdr->src_port_id = IPC_ROUTER_ADDRESS;
1963 hdr->control_flag = 0;
1964 hdr->size = sizeof(*msg);
1965 if (hdr->type == IPC_ROUTER_CTRL_CMD_RESUME_TX ||
1966 (!xprt_info && dst_node_id == IPC_ROUTER_NID_LOCAL))
1967 hdr->dst_node_id = dst_node_id;
1968 else if (xprt_info)
1969 hdr->dst_node_id = xprt_info->remote_node_id;
1970 hdr->dst_port_id = IPC_ROUTER_ADDRESS;
1971
1972 if (dst_node_id == IPC_ROUTER_NID_LOCAL &&
1973 msg->cmd != IPC_ROUTER_CTRL_CMD_RESUME_TX) {
1974 ipc_router_log_msg(local_log_ctx, IPC_ROUTER_LOG_EVENT_TX, msg,
1975 hdr, NULL, NULL);
1976 ret = post_control_ports(pkt);
1977 } else if (dst_node_id == IPC_ROUTER_NID_LOCAL &&
1978 msg->cmd == IPC_ROUTER_CTRL_CMD_RESUME_TX) {
1979 ipc_router_log_msg(local_log_ctx, IPC_ROUTER_LOG_EVENT_TX, msg,
1980 hdr, NULL, NULL);
1981 ret = process_resume_tx_msg(msg, pkt);
1982 } else if (xprt_info && (msg->cmd == IPC_ROUTER_CTRL_CMD_HELLO ||
1983 xprt_info->initialized)) {
1984 mutex_lock(&xprt_info->tx_lock_lhb2);
1985 ipc_router_log_msg(xprt_info->log_ctx, IPC_ROUTER_LOG_EVENT_TX,
1986 msg, hdr, NULL, NULL);
1987 ret = prepend_header(pkt, xprt_info);
1988 if (ret < 0) {
1989 mutex_unlock(&xprt_info->tx_lock_lhb2);
1990 IPC_RTR_ERR("%s: Prepend Header failed\n", __func__);
1991 release_pkt(pkt);
1992 return ret;
1993 }
1994
1995 ret = xprt_info->xprt->write(pkt, pkt->length, xprt_info->xprt);
1996 mutex_unlock(&xprt_info->tx_lock_lhb2);
1997 }
1998
1999 release_pkt(pkt);
2000 return ret;
2001}
2002
2003static int
2004msm_ipc_router_send_server_list(u32 node_id,
2005 struct msm_ipc_router_xprt_info *xprt_info)
2006{
2007 union rr_control_msg ctl;
2008 struct msm_ipc_server *server;
2009 struct msm_ipc_server_port *server_port;
2010 int i;
2011
2012 if (!xprt_info || !xprt_info->initialized) {
2013 IPC_RTR_ERR("%s: Xprt info not initialized\n", __func__);
2014 return -EINVAL;
2015 }
2016
2017 memset(&ctl, 0, sizeof(ctl));
2018 ctl.cmd = IPC_ROUTER_CTRL_CMD_NEW_SERVER;
2019
2020 for (i = 0; i < SRV_HASH_SIZE; i++) {
2021 list_for_each_entry(server, &server_list[i], list) {
2022 ctl.srv.service = server->name.service;
2023 ctl.srv.instance = server->name.instance;
2024 list_for_each_entry(server_port,
2025 &server->server_port_list, list) {
2026 if (server_port->server_addr.node_id !=
2027 node_id)
2028 continue;
2029
2030 ctl.srv.node_id =
2031 server_port->server_addr.node_id;
2032 ctl.srv.port_id =
2033 server_port->server_addr.port_id;
2034 ipc_router_send_ctl_msg
2035 (xprt_info, &ctl,
2036 IPC_ROUTER_DUMMY_DEST_NODE);
2037 }
2038 }
2039 }
2040
2041 return 0;
2042}
2043
2044static int broadcast_ctl_msg_locally(union rr_control_msg *msg)
2045{
2046 return ipc_router_send_ctl_msg(NULL, msg, IPC_ROUTER_NID_LOCAL);
2047}
2048
2049static int broadcast_ctl_msg(union rr_control_msg *ctl)
2050{
2051 struct msm_ipc_router_xprt_info *xprt_info;
2052
2053 down_read(&xprt_info_list_lock_lha5);
2054 list_for_each_entry(xprt_info, &xprt_info_list, list) {
2055 ipc_router_send_ctl_msg(xprt_info, ctl,
2056 IPC_ROUTER_DUMMY_DEST_NODE);
2057 }
2058 up_read(&xprt_info_list_lock_lha5);
2059 broadcast_ctl_msg_locally(ctl);
2060
2061 return 0;
2062}
2063
2064static int relay_ctl_msg(struct msm_ipc_router_xprt_info *xprt_info,
2065 union rr_control_msg *ctl)
2066{
2067 struct msm_ipc_router_xprt_info *fwd_xprt_info;
2068
2069 if (!xprt_info || !ctl)
2070 return -EINVAL;
2071
2072 down_read(&xprt_info_list_lock_lha5);
2073 list_for_each_entry(fwd_xprt_info, &xprt_info_list, list) {
2074 if (xprt_info->xprt->link_id != fwd_xprt_info->xprt->link_id)
2075 ipc_router_send_ctl_msg(fwd_xprt_info, ctl,
2076 IPC_ROUTER_DUMMY_DEST_NODE);
2077 }
2078 up_read(&xprt_info_list_lock_lha5);
2079
2080 return 0;
2081}
2082
2083static int forward_msg(struct msm_ipc_router_xprt_info *xprt_info,
2084 struct rr_packet *pkt)
2085{
2086 struct rr_header_v1 *hdr;
2087 struct msm_ipc_router_xprt_info *fwd_xprt_info;
2088 struct msm_ipc_routing_table_entry *rt_entry;
2089 int ret = 0;
2090 int fwd_xprt_option;
2091
2092 if (!xprt_info || !pkt)
2093 return -EINVAL;
2094
2095 hdr = &pkt->hdr;
2096 rt_entry = ipc_router_get_rtentry_ref(hdr->dst_node_id);
2097 if (!(rt_entry) || !(rt_entry->xprt_info)) {
2098 IPC_RTR_ERR("%s: Routing table not initialized\n", __func__);
2099 ret = -ENODEV;
2100 goto fm_error1;
2101 }
2102
2103 down_read(&rt_entry->lock_lha4);
2104 fwd_xprt_info = rt_entry->xprt_info;
2105 ret = ipc_router_get_xprt_info_ref(fwd_xprt_info);
2106 if (ret < 0) {
2107 IPC_RTR_ERR("%s: Abort invalid xprt\n", __func__);
2108 goto fm_error_xprt;
2109 }
2110 ret = prepend_header(pkt, fwd_xprt_info);
2111 if (ret < 0) {
2112 IPC_RTR_ERR("%s: Prepend Header failed\n", __func__);
2113 goto fm_error2;
2114 }
2115 fwd_xprt_option = fwd_xprt_info->xprt->get_option(fwd_xprt_info->xprt);
2116 if (!(fwd_xprt_option & FRAG_PKT_WRITE_ENABLE)) {
2117 ret = defragment_pkt(pkt);
2118 if (ret < 0)
2119 goto fm_error2;
2120 }
2121
2122 mutex_lock(&fwd_xprt_info->tx_lock_lhb2);
2123 if (xprt_info->remote_node_id == fwd_xprt_info->remote_node_id) {
2124 IPC_RTR_ERR("%s: Discarding Command to route back\n", __func__);
2125 ret = -EINVAL;
2126 goto fm_error3;
2127 }
2128
2129 if (xprt_info->xprt->link_id == fwd_xprt_info->xprt->link_id) {
2130 IPC_RTR_ERR("%s: DST in the same cluster\n", __func__);
2131 ret = 0;
2132 goto fm_error3;
2133 }
2134 fwd_xprt_info->xprt->write(pkt, pkt->length, fwd_xprt_info->xprt);
2135 IPC_RTR_INFO(fwd_xprt_info->log_ctx,
2136 "%s %s Len:0x%x T:0x%x CF:0x%x SRC:<0x%x:0x%x> DST:<0x%x:0x%x>\n",
2137 "FWD", "TX", hdr->size, hdr->type, hdr->control_flag,
2138 hdr->src_node_id, hdr->src_port_id,
2139 hdr->dst_node_id, hdr->dst_port_id);
2140
2141fm_error3:
2142 mutex_unlock(&fwd_xprt_info->tx_lock_lhb2);
2143fm_error2:
2144 ipc_router_put_xprt_info_ref(fwd_xprt_info);
2145fm_error_xprt:
2146 up_read(&rt_entry->lock_lha4);
2147fm_error1:
2148 if (rt_entry)
2149 kref_put(&rt_entry->ref, ipc_router_release_rtentry);
2150 return ret;
2151}
2152
2153static int msm_ipc_router_send_remove_client(struct comm_mode_info *mode_info,
2154 u32 node_id, u32 port_id)
2155{
2156 union rr_control_msg msg;
2157 struct msm_ipc_router_xprt_info *tmp_xprt_info;
2158 int mode;
2159 void *xprt_info;
2160 int rc = 0;
2161
2162 if (!mode_info) {
2163 IPC_RTR_ERR("%s: NULL mode_info\n", __func__);
2164 return -EINVAL;
2165 }
2166 mode = mode_info->mode;
2167 xprt_info = mode_info->xprt_info;
2168
2169 memset(&msg, 0, sizeof(msg));
2170 msg.cmd = IPC_ROUTER_CTRL_CMD_REMOVE_CLIENT;
2171 msg.cli.node_id = node_id;
2172 msg.cli.port_id = port_id;
2173
2174 if ((mode == SINGLE_LINK_MODE) && xprt_info) {
2175 down_read(&xprt_info_list_lock_lha5);
2176 list_for_each_entry(tmp_xprt_info, &xprt_info_list, list) {
2177 if (tmp_xprt_info != xprt_info)
2178 continue;
2179 ipc_router_send_ctl_msg(tmp_xprt_info, &msg,
2180 IPC_ROUTER_DUMMY_DEST_NODE);
2181 break;
2182 }
2183 up_read(&xprt_info_list_lock_lha5);
2184 } else if ((mode == SINGLE_LINK_MODE) && !xprt_info) {
2185 broadcast_ctl_msg_locally(&msg);
2186 } else if (mode == MULTI_LINK_MODE) {
2187 broadcast_ctl_msg(&msg);
2188 } else if (mode != NULL_MODE) {
2189 IPC_RTR_ERR(
2190 "%s: Invalid mode(%d) + xprt_inf(%p) for %08x:%08x\n",
2191 __func__, mode, xprt_info, node_id, port_id);
2192 rc = -EINVAL;
2193 }
2194 return rc;
2195}
2196
2197static void update_comm_mode_info(struct comm_mode_info *mode_info,
2198 struct msm_ipc_router_xprt_info *xprt_info)
2199{
2200 if (!mode_info) {
2201 IPC_RTR_ERR("%s: NULL mode_info\n", __func__);
2202 return;
2203 }
2204
2205 if (mode_info->mode == NULL_MODE) {
2206 mode_info->xprt_info = xprt_info;
2207 mode_info->mode = SINGLE_LINK_MODE;
2208 } else if (mode_info->mode == SINGLE_LINK_MODE &&
2209 mode_info->xprt_info != xprt_info) {
2210 mode_info->mode = MULTI_LINK_MODE;
2211 }
2212}
2213
2214/**
2215 * cleanup_rmt_server() - Cleanup server hosted in the remote port
2216 * @xprt_info: XPRT through which this cleanup event is handled.
2217 * @rport_ptr: Remote port that is being cleaned up.
2218 * @server: Server that is hosted in the remote port.
2219 */
2220static void cleanup_rmt_server(struct msm_ipc_router_xprt_info *xprt_info,
2221 struct msm_ipc_router_remote_port *rport_ptr,
2222 struct msm_ipc_server *server)
2223{
2224 union rr_control_msg ctl;
2225
2226 memset(&ctl, 0, sizeof(ctl));
2227 ctl.cmd = IPC_ROUTER_CTRL_CMD_REMOVE_SERVER;
2228 ctl.srv.service = server->name.service;
2229 ctl.srv.instance = server->name.instance;
2230 ctl.srv.node_id = rport_ptr->node_id;
2231 ctl.srv.port_id = rport_ptr->port_id;
2232 if (xprt_info)
2233 relay_ctl_msg(xprt_info, &ctl);
2234 broadcast_ctl_msg_locally(&ctl);
2235 ipc_router_destroy_server_nolock(server, rport_ptr->node_id,
2236 rport_ptr->port_id);
2237}
2238
2239static void cleanup_rmt_ports(struct msm_ipc_router_xprt_info *xprt_info,
2240 struct msm_ipc_routing_table_entry *rt_entry)
2241{
2242 struct msm_ipc_router_remote_port *rport_ptr, *tmp_rport_ptr;
2243 struct msm_ipc_server *server;
2244 union rr_control_msg ctl;
2245 int j;
2246
2247 memset(&ctl, 0, sizeof(ctl));
2248 for (j = 0; j < RP_HASH_SIZE; j++) {
2249 list_for_each_entry_safe(rport_ptr, tmp_rport_ptr,
2250 &rt_entry->remote_port_list[j], list) {
2251 list_del(&rport_ptr->list);
2252 mutex_lock(&rport_ptr->rport_lock_lhb2);
2253 server = rport_ptr->server;
2254 rport_ptr->server = NULL;
2255 mutex_unlock(&rport_ptr->rport_lock_lhb2);
2256 ipc_router_reset_conn(rport_ptr);
2257 if (server) {
2258 cleanup_rmt_server(xprt_info, rport_ptr,
2259 server);
2260 server = NULL;
2261 }
2262
2263 ctl.cmd = IPC_ROUTER_CTRL_CMD_REMOVE_CLIENT;
2264 ctl.cli.node_id = rport_ptr->node_id;
2265 ctl.cli.port_id = rport_ptr->port_id;
2266 kref_put(&rport_ptr->ref, ipc_router_release_rport);
2267
2268 relay_ctl_msg(xprt_info, &ctl);
2269 broadcast_ctl_msg_locally(&ctl);
2270 }
2271 }
2272}
2273
2274static void msm_ipc_cleanup_routing_table(
2275 struct msm_ipc_router_xprt_info *xprt_info)
2276{
2277 int i;
2278 struct msm_ipc_routing_table_entry *rt_entry, *tmp_rt_entry;
2279
2280 if (!xprt_info) {
2281 IPC_RTR_ERR("%s: Invalid xprt_info\n", __func__);
2282 return;
2283 }
2284
2285 down_write(&server_list_lock_lha2);
2286 down_write(&routing_table_lock_lha3);
2287 for (i = 0; i < RT_HASH_SIZE; i++) {
2288 list_for_each_entry_safe(rt_entry, tmp_rt_entry,
2289 &routing_table[i], list) {
2290 down_write(&rt_entry->lock_lha4);
2291 if (rt_entry->xprt_info != xprt_info) {
2292 up_write(&rt_entry->lock_lha4);
2293 continue;
2294 }
2295 cleanup_rmt_ports(xprt_info, rt_entry);
2296 rt_entry->xprt_info = NULL;
2297 up_write(&rt_entry->lock_lha4);
2298 list_del(&rt_entry->list);
2299 kref_put(&rt_entry->ref, ipc_router_release_rtentry);
2300 }
2301 }
2302 up_write(&routing_table_lock_lha3);
2303 up_write(&server_list_lock_lha2);
2304}
2305
2306/**
2307 * sync_sec_rule() - Synchrnoize the security rule into the server structure
2308 * @server: Server structure where the rule has to be synchronized.
2309 * @rule: Security tule to be synchronized.
2310 *
2311 * This function is used to update the server structure with the security
2312 * rule configured for the <service:instance> corresponding to that server.
2313 */
2314static void sync_sec_rule(struct msm_ipc_server *server, void *rule)
2315{
2316 struct msm_ipc_server_port *server_port;
2317 struct msm_ipc_router_remote_port *rport_ptr = NULL;
2318
2319 list_for_each_entry(server_port, &server->server_port_list, list) {
2320 rport_ptr = ipc_router_get_rport_ref(
2321 server_port->server_addr.node_id,
2322 server_port->server_addr.port_id);
2323 if (!rport_ptr)
2324 continue;
2325 rport_ptr->sec_rule = rule;
2326 kref_put(&rport_ptr->ref, ipc_router_release_rport);
2327 }
2328 server->synced_sec_rule = 1;
2329}
2330
2331/**
2332 * msm_ipc_sync_sec_rule() - Sync the security rule to the service
2333 * @service: Service for which the rule has to be synchronized.
2334 * @instance: Instance for which the rule has to be synchronized.
2335 * @rule: Security rule to be synchronized.
2336 *
2337 * This function is used to syncrhonize the security rule with the server
2338 * hash table, if the user-space script configures the rule after the service
2339 * has come up. This function is used to synchronize the security rule to a
2340 * specific service and optionally a specific instance.
2341 */
2342void msm_ipc_sync_sec_rule(u32 service, u32 instance, void *rule)
2343{
2344 int key = (service & (SRV_HASH_SIZE - 1));
2345 struct msm_ipc_server *server;
2346
2347 down_write(&server_list_lock_lha2);
2348 list_for_each_entry(server, &server_list[key], list) {
2349 if (server->name.service != service)
2350 continue;
2351
2352 if (server->name.instance != instance &&
2353 instance != ALL_INSTANCE)
2354 continue;
2355
2356 /* If the rule applies to all instances and if the specific
2357 * instance of a service has a rule synchronized already,
2358 * do not apply the rule for that specific instance.
2359 */
2360 if (instance == ALL_INSTANCE && server->synced_sec_rule)
2361 continue;
2362
2363 sync_sec_rule(server, rule);
2364 }
2365 up_write(&server_list_lock_lha2);
2366}
2367
2368/**
2369 * msm_ipc_sync_default_sec_rule() - Default security rule to all services
2370 * @rule: Security rule to be synchronized.
2371 *
2372 * This function is used to syncrhonize the security rule with the server
2373 * hash table, if the user-space script configures the rule after the service
2374 * has come up. This function is used to synchronize the security rule that
2375 * applies to all services, if the concerned service do not have any rule
2376 * defined.
2377 */
2378void msm_ipc_sync_default_sec_rule(void *rule)
2379{
2380 int key;
2381 struct msm_ipc_server *server;
2382
2383 down_write(&server_list_lock_lha2);
2384 for (key = 0; key < SRV_HASH_SIZE; key++) {
2385 list_for_each_entry(server, &server_list[key], list) {
2386 if (server->synced_sec_rule)
2387 continue;
2388
2389 sync_sec_rule(server, rule);
2390 }
2391 }
2392 up_write(&server_list_lock_lha2);
2393}
2394
2395/**
2396 * ipc_router_reset_conn() - Reset the connection to remote port
2397 * @rport_ptr: Pointer to the remote port to be disconnected.
2398 *
2399 * This function is used to reset all the local ports that are connected to
2400 * the remote port being passed.
2401 */
2402static void ipc_router_reset_conn(struct msm_ipc_router_remote_port *rport_ptr)
2403{
2404 struct msm_ipc_port *port_ptr;
2405 struct ipc_router_conn_info *conn_info, *tmp_conn_info;
2406
2407 mutex_lock(&rport_ptr->rport_lock_lhb2);
2408 list_for_each_entry_safe(conn_info, tmp_conn_info,
2409 &rport_ptr->conn_info_list, list) {
2410 port_ptr = ipc_router_get_port_ref(conn_info->port_id);
2411 if (port_ptr) {
2412 mutex_lock(&port_ptr->port_lock_lhc3);
2413 port_ptr->conn_status = CONNECTION_RESET;
2414 mutex_unlock(&port_ptr->port_lock_lhc3);
2415 wake_up(&port_ptr->port_rx_wait_q);
2416 kref_put(&port_ptr->ref, ipc_router_release_port);
2417 }
2418
2419 list_del(&conn_info->list);
2420 kfree(conn_info);
2421 }
2422 mutex_unlock(&rport_ptr->rport_lock_lhb2);
2423}
2424
2425/**
2426 * ipc_router_set_conn() - Set the connection by initializing dest address
2427 * @port_ptr: Local port in which the connection has to be set.
2428 * @addr: Destination address of the connection.
2429 *
2430 * @return: 0 on success, standard Linux error codes on failure.
2431 */
2432int ipc_router_set_conn(struct msm_ipc_port *port_ptr,
2433 struct msm_ipc_addr *addr)
2434{
2435 struct msm_ipc_router_remote_port *rport_ptr;
2436 struct ipc_router_conn_info *conn_info;
2437
2438 if (unlikely(!port_ptr || !addr))
2439 return -EINVAL;
2440
2441 if (addr->addrtype != MSM_IPC_ADDR_ID) {
2442 IPC_RTR_ERR("%s: Invalid Address type\n", __func__);
2443 return -EINVAL;
2444 }
2445
2446 if (port_ptr->type == SERVER_PORT) {
2447 IPC_RTR_ERR("%s: Connection refused on a server port\n",
2448 __func__);
2449 return -ECONNREFUSED;
2450 }
2451
2452 if (port_ptr->conn_status == CONNECTED) {
2453 IPC_RTR_ERR("%s: Port %08x already connected\n",
2454 __func__, port_ptr->this_port.port_id);
2455 return -EISCONN;
2456 }
2457
2458 conn_info = kzalloc(sizeof(*conn_info), GFP_KERNEL);
2459 if (!conn_info) {
2460 IPC_RTR_ERR("%s: Error allocating conn_info\n", __func__);
2461 return -ENOMEM;
2462 }
2463 INIT_LIST_HEAD(&conn_info->list);
2464 conn_info->port_id = port_ptr->this_port.port_id;
2465
2466 rport_ptr = ipc_router_get_rport_ref(addr->addr.port_addr.node_id,
2467 addr->addr.port_addr.port_id);
2468 if (!rport_ptr) {
2469 IPC_RTR_ERR("%s: Invalid remote endpoint\n", __func__);
2470 kfree(conn_info);
2471 return -ENODEV;
2472 }
2473 mutex_lock(&rport_ptr->rport_lock_lhb2);
2474 list_add_tail(&conn_info->list, &rport_ptr->conn_info_list);
2475 mutex_unlock(&rport_ptr->rport_lock_lhb2);
2476
2477 mutex_lock(&port_ptr->port_lock_lhc3);
2478 memcpy(&port_ptr->dest_addr, &addr->addr.port_addr,
2479 sizeof(struct msm_ipc_port_addr));
2480 port_ptr->conn_status = CONNECTED;
2481 mutex_unlock(&port_ptr->port_lock_lhc3);
2482 kref_put(&rport_ptr->ref, ipc_router_release_rport);
2483 return 0;
2484}
2485
2486/**
2487 * do_version_negotiation() - perform a version negotiation and set the version
2488 * @xprt_info: Pointer to the IPC Router transport info structure.
2489 * @msg: Pointer to the IPC Router HELLO message.
2490 *
2491 * This function performs the version negotiation by verifying the computed
2492 * checksum first. If the checksum matches with the magic number, it sets the
2493 * negotiated IPC Router version in transport.
2494 */
2495static void do_version_negotiation(struct msm_ipc_router_xprt_info *xprt_info,
2496 union rr_control_msg *msg)
2497{
2498 u32 magic;
2499 unsigned int version;
2500
2501 if (!xprt_info)
2502 return;
2503 magic = ipc_router_calc_checksum(msg);
2504 if (magic == IPC_ROUTER_HELLO_MAGIC) {
2505 version = fls(msg->hello.versions & IPC_ROUTER_VER_BITMASK) - 1;
2506 /*Bit 0 & 31 are reserved for future usage*/
2507 if ((version > 0) &&
2508 (version != (sizeof(version) * BITS_PER_BYTE - 1)) &&
2509 xprt_info->xprt->set_version)
2510 xprt_info->xprt->set_version(xprt_info->xprt, version);
2511 }
2512}
2513
Arun Kumar Neelakantamf99191d2018-06-11 18:13:43 +05302514static int send_hello_msg(struct msm_ipc_router_xprt_info *xprt_info)
2515{
2516 int rc = 0;
2517 union rr_control_msg ctl;
2518
2519 if (!xprt_info->hello_sent) {
2520 xprt_info->hello_sent = 1;
2521 /* Send a HELLO message */
2522 memset(&ctl, 0, sizeof(ctl));
2523 ctl.hello.cmd = IPC_ROUTER_CTRL_CMD_HELLO;
2524 ctl.hello.checksum = IPC_ROUTER_HELLO_MAGIC;
2525 ctl.hello.versions = (uint32_t)IPC_ROUTER_VER_BITMASK;
2526 ctl.hello.checksum = ipc_router_calc_checksum(&ctl);
2527 rc = ipc_router_send_ctl_msg(xprt_info, &ctl,
2528 IPC_ROUTER_DUMMY_DEST_NODE);
2529 if (rc < 0) {
2530 xprt_info->hello_sent = 0;
2531 IPC_RTR_ERR("%s: Error sending HELLO message\n",
2532 __func__);
2533 return rc;
2534 }
2535 }
2536 return rc;
2537}
2538
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -06002539static int process_hello_msg(struct msm_ipc_router_xprt_info *xprt_info,
2540 union rr_control_msg *msg,
2541 struct rr_header_v1 *hdr)
2542{
2543 int i, rc = 0;
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -06002544 struct msm_ipc_routing_table_entry *rt_entry;
2545
2546 if (!hdr)
2547 return -EINVAL;
2548
2549 xprt_info->remote_node_id = hdr->src_node_id;
2550 rt_entry = create_routing_table_entry(hdr->src_node_id, xprt_info);
2551 if (!rt_entry) {
2552 IPC_RTR_ERR("%s: rt_entry allocation failed\n", __func__);
2553 return -ENOMEM;
2554 }
2555 kref_put(&rt_entry->ref, ipc_router_release_rtentry);
2556
2557 do_version_negotiation(xprt_info, msg);
Arun Kumar Neelakantamf99191d2018-06-11 18:13:43 +05302558 rc = send_hello_msg(xprt_info);
2559 if (rc < 0)
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -06002560 return rc;
Arun Kumar Neelakantamf99191d2018-06-11 18:13:43 +05302561
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -06002562 xprt_info->initialized = 1;
2563
2564 /* Send list of servers from the local node and from nodes
2565 * outside the mesh network in which this XPRT is part of.
2566 */
2567 down_read(&server_list_lock_lha2);
2568 down_read(&routing_table_lock_lha3);
2569 for (i = 0; i < RT_HASH_SIZE; i++) {
2570 list_for_each_entry(rt_entry, &routing_table[i], list) {
2571 if ((rt_entry->node_id != IPC_ROUTER_NID_LOCAL) &&
2572 (!rt_entry->xprt_info ||
2573 (rt_entry->xprt_info->xprt->link_id ==
2574 xprt_info->xprt->link_id)))
2575 continue;
2576 rc = msm_ipc_router_send_server_list(rt_entry->node_id,
2577 xprt_info);
2578 if (rc < 0) {
2579 up_read(&routing_table_lock_lha3);
2580 up_read(&server_list_lock_lha2);
2581 return rc;
2582 }
2583 }
2584 }
2585 up_read(&routing_table_lock_lha3);
2586 up_read(&server_list_lock_lha2);
2587 return rc;
2588}
2589
2590static int process_resume_tx_msg(union rr_control_msg *msg,
2591 struct rr_packet *pkt)
2592{
2593 struct msm_ipc_router_remote_port *rport_ptr;
2594
2595 rport_ptr = ipc_router_get_rport_ref(msg->cli.node_id,
2596 msg->cli.port_id);
2597 if (!rport_ptr) {
2598 IPC_RTR_ERR("%s: Unable to resume client\n", __func__);
2599 return -ENODEV;
2600 }
2601 mutex_lock(&rport_ptr->rport_lock_lhb2);
2602 rport_ptr->tx_quota_cnt = 0;
2603 post_resume_tx(rport_ptr, pkt, msg);
2604 mutex_unlock(&rport_ptr->rport_lock_lhb2);
2605 kref_put(&rport_ptr->ref, ipc_router_release_rport);
2606 return 0;
2607}
2608
2609static int process_new_server_msg(struct msm_ipc_router_xprt_info *xprt_info,
2610 union rr_control_msg *msg,
2611 struct rr_packet *pkt)
2612{
2613 struct msm_ipc_routing_table_entry *rt_entry;
2614 struct msm_ipc_server *server;
2615 struct msm_ipc_router_remote_port *rport_ptr;
2616
2617 if (msg->srv.instance == 0) {
2618 IPC_RTR_ERR("%s: Server %08x create rejected, version = 0\n",
2619 __func__, msg->srv.service);
2620 return -EINVAL;
2621 }
2622
2623 rt_entry = ipc_router_get_rtentry_ref(msg->srv.node_id);
2624 if (!rt_entry) {
2625 rt_entry = create_routing_table_entry(msg->srv.node_id,
2626 xprt_info);
2627 if (!rt_entry) {
2628 IPC_RTR_ERR("%s: rt_entry allocation failed\n",
2629 __func__);
2630 return -ENOMEM;
2631 }
2632 }
2633 kref_put(&rt_entry->ref, ipc_router_release_rtentry);
2634
2635 /* If the service already exists in the table, create_server returns
2636 * a reference to it.
2637 */
2638 rport_ptr = ipc_router_create_rport(msg->srv.node_id,
2639 msg->srv.port_id, xprt_info);
2640 if (!rport_ptr)
2641 return -ENOMEM;
2642
2643 server = msm_ipc_router_create_server(
2644 msg->srv.service, msg->srv.instance,
2645 msg->srv.node_id, msg->srv.port_id, xprt_info);
2646 if (!server) {
2647 IPC_RTR_ERR("%s: Server %08x:%08x Create failed\n",
2648 __func__, msg->srv.service, msg->srv.instance);
2649 kref_put(&rport_ptr->ref, ipc_router_release_rport);
2650 ipc_router_destroy_rport(rport_ptr);
2651 return -ENOMEM;
2652 }
2653 mutex_lock(&rport_ptr->rport_lock_lhb2);
2654 rport_ptr->server = server;
2655 mutex_unlock(&rport_ptr->rport_lock_lhb2);
2656 rport_ptr->sec_rule = msm_ipc_get_security_rule(
2657 msg->srv.service, msg->srv.instance);
2658 kref_put(&rport_ptr->ref, ipc_router_release_rport);
2659 kref_put(&server->ref, ipc_router_release_server);
2660
2661 /* Relay the new server message to other subsystems that do not belong
2662 * to the cluster from which this message is received. Notify the
2663 * local clients waiting for this service.
2664 */
2665 relay_ctl_msg(xprt_info, msg);
2666 post_control_ports(pkt);
2667 return 0;
2668}
2669
2670static int process_rmv_server_msg(struct msm_ipc_router_xprt_info *xprt_info,
2671 union rr_control_msg *msg,
2672 struct rr_packet *pkt)
2673{
2674 struct msm_ipc_server *server;
2675 struct msm_ipc_router_remote_port *rport_ptr;
2676
2677 server = ipc_router_get_server_ref(msg->srv.service, msg->srv.instance,
2678 msg->srv.node_id, msg->srv.port_id);
2679 rport_ptr = ipc_router_get_rport_ref(msg->srv.node_id,
2680 msg->srv.port_id);
2681 if (rport_ptr) {
2682 mutex_lock(&rport_ptr->rport_lock_lhb2);
2683 if (rport_ptr->server == server)
2684 rport_ptr->server = NULL;
2685 mutex_unlock(&rport_ptr->rport_lock_lhb2);
2686 kref_put(&rport_ptr->ref, ipc_router_release_rport);
2687 }
2688
2689 if (server) {
2690 kref_put(&server->ref, ipc_router_release_server);
2691 ipc_router_destroy_server(server, msg->srv.node_id,
2692 msg->srv.port_id);
2693 /* Relay the new server message to other subsystems that do not
2694 * belong to the cluster from which this message is received.
2695 * Notify the local clients communicating with the service.
2696 */
2697 relay_ctl_msg(xprt_info, msg);
2698 post_control_ports(pkt);
2699 }
2700 return 0;
2701}
2702
2703static int process_rmv_client_msg(struct msm_ipc_router_xprt_info *xprt_info,
2704 union rr_control_msg *msg,
2705 struct rr_packet *pkt)
2706{
2707 struct msm_ipc_router_remote_port *rport_ptr;
2708 struct msm_ipc_server *server;
2709
2710 rport_ptr = ipc_router_get_rport_ref(msg->cli.node_id,
2711 msg->cli.port_id);
2712 if (rport_ptr) {
2713 mutex_lock(&rport_ptr->rport_lock_lhb2);
2714 server = rport_ptr->server;
2715 rport_ptr->server = NULL;
2716 mutex_unlock(&rport_ptr->rport_lock_lhb2);
2717 ipc_router_reset_conn(rport_ptr);
2718 down_write(&server_list_lock_lha2);
2719 if (server)
2720 cleanup_rmt_server(NULL, rport_ptr, server);
2721 up_write(&server_list_lock_lha2);
2722 kref_put(&rport_ptr->ref, ipc_router_release_rport);
2723 ipc_router_destroy_rport(rport_ptr);
2724 }
2725
2726 relay_ctl_msg(xprt_info, msg);
2727 post_control_ports(pkt);
2728 return 0;
2729}
2730
2731static int process_control_msg(struct msm_ipc_router_xprt_info *xprt_info,
2732 struct rr_packet *pkt)
2733{
2734 union rr_control_msg *msg;
2735 int rc = 0;
2736 struct rr_header_v1 *hdr;
2737
2738 if (pkt->length != sizeof(*msg)) {
2739 IPC_RTR_ERR("%s: r2r msg size %d != %zu\n", __func__,
2740 pkt->length, sizeof(*msg));
2741 return -EINVAL;
2742 }
2743
2744 hdr = &pkt->hdr;
2745 msg = msm_ipc_router_skb_to_buf(pkt->pkt_fragment_q, sizeof(*msg));
2746 if (!msg) {
2747 IPC_RTR_ERR("%s: Error extracting control msg\n", __func__);
2748 return -ENOMEM;
2749 }
2750
2751 ipc_router_log_msg(xprt_info->log_ctx, IPC_ROUTER_LOG_EVENT_RX, msg,
2752 hdr, NULL, NULL);
2753
2754 switch (msg->cmd) {
2755 case IPC_ROUTER_CTRL_CMD_HELLO:
2756 rc = process_hello_msg(xprt_info, msg, hdr);
2757 break;
2758 case IPC_ROUTER_CTRL_CMD_RESUME_TX:
2759 rc = process_resume_tx_msg(msg, pkt);
2760 break;
2761 case IPC_ROUTER_CTRL_CMD_NEW_SERVER:
2762 rc = process_new_server_msg(xprt_info, msg, pkt);
2763 break;
2764 case IPC_ROUTER_CTRL_CMD_REMOVE_SERVER:
2765 rc = process_rmv_server_msg(xprt_info, msg, pkt);
2766 break;
2767 case IPC_ROUTER_CTRL_CMD_REMOVE_CLIENT:
2768 rc = process_rmv_client_msg(xprt_info, msg, pkt);
2769 break;
2770 default:
2771 rc = -EINVAL;
2772 }
2773 kfree(msg);
2774 return rc;
2775}
2776
Arun Kumar Neelakantamd5eb2732018-06-19 14:40:01 +05302777static void do_read_data(struct kthread_work *work)
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -06002778{
2779 struct rr_header_v1 *hdr;
2780 struct rr_packet *pkt = NULL;
2781 struct msm_ipc_port *port_ptr;
2782 struct msm_ipc_router_remote_port *rport_ptr;
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -06002783
2784 struct msm_ipc_router_xprt_info *xprt_info =
2785 container_of(work,
2786 struct msm_ipc_router_xprt_info,
2787 read_data);
2788
2789 while ((pkt = rr_read(xprt_info)) != NULL) {
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -06002790
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -06002791 hdr = &pkt->hdr;
2792
2793 if ((hdr->dst_node_id != IPC_ROUTER_NID_LOCAL) &&
2794 ((hdr->type == IPC_ROUTER_CTRL_CMD_RESUME_TX) ||
2795 (hdr->type == IPC_ROUTER_CTRL_CMD_DATA))) {
2796 IPC_RTR_INFO(xprt_info->log_ctx,
2797 "%s %s Len:0x%x T:0x%x CF:0x%x SRC:<0x%x:0x%x> DST:<0x%x:0x%x>\n",
2798 "FWD", "RX", hdr->size, hdr->type,
2799 hdr->control_flag, hdr->src_node_id,
2800 hdr->src_port_id, hdr->dst_node_id,
2801 hdr->dst_port_id);
Arun Prakash8c8dd7a2020-03-30 22:22:13 +05302802 /**
2803 * update forwarding port information as well in routing
2804 * table which will help to cleanup clients/services
2805 * running in modem when MSM goes down
2806 */
2807 rport_ptr = ipc_router_get_rport_ref(hdr->src_node_id,
2808 hdr->src_port_id);
2809 if (!rport_ptr) {
2810 rport_ptr =
2811 ipc_router_create_rport(hdr->src_node_id,
2812 hdr->src_port_id,
2813 xprt_info);
2814 if (!rport_ptr) {
2815 IPC_RTR_ERR(
2816 "%s: Rmt Prt %08x:%08x create failed\n",
2817 __func__, hdr->src_node_id,
2818 hdr->src_port_id);
2819 }
2820 }
2821 /**
2822 * just to fail safe check is added, if rport
2823 * allocation failed above we still forward the
2824 * packet to remote.
2825 */
2826 if (rport_ptr)
2827 kref_put(&rport_ptr->ref,
2828 ipc_router_release_rport);
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -06002829 forward_msg(xprt_info, pkt);
2830 goto read_next_pkt1;
2831 }
2832
2833 if (hdr->type != IPC_ROUTER_CTRL_CMD_DATA) {
2834 process_control_msg(xprt_info, pkt);
2835 goto read_next_pkt1;
2836 }
2837
2838 port_ptr = ipc_router_get_port_ref(hdr->dst_port_id);
2839 if (!port_ptr) {
2840 IPC_RTR_ERR("%s: No local port id %08x\n", __func__,
2841 hdr->dst_port_id);
2842 goto read_next_pkt1;
2843 }
2844
2845 rport_ptr = ipc_router_get_rport_ref(hdr->src_node_id,
2846 hdr->src_port_id);
2847 if (!rport_ptr) {
2848 rport_ptr = ipc_router_create_rport(hdr->src_node_id,
2849 hdr->src_port_id,
2850 xprt_info);
2851 if (!rport_ptr) {
2852 IPC_RTR_ERR(
2853 "%s: Rmt Prt %08x:%08x create failed\n",
2854 __func__, hdr->src_node_id,
2855 hdr->src_port_id);
2856 goto read_next_pkt2;
2857 }
2858 }
2859
2860 ipc_router_log_msg(xprt_info->log_ctx, IPC_ROUTER_LOG_EVENT_RX,
2861 pkt, hdr, port_ptr, rport_ptr);
2862 kref_put(&rport_ptr->ref, ipc_router_release_rport);
2863 post_pkt_to_port(port_ptr, pkt, 0);
2864 kref_put(&port_ptr->ref, ipc_router_release_port);
2865 continue;
2866read_next_pkt2:
2867 kref_put(&port_ptr->ref, ipc_router_release_port);
2868read_next_pkt1:
2869 release_pkt(pkt);
2870 }
2871}
2872
2873int msm_ipc_router_register_server(struct msm_ipc_port *port_ptr,
2874 struct msm_ipc_addr *name)
2875{
2876 struct msm_ipc_server *server;
2877 union rr_control_msg ctl;
2878 struct msm_ipc_router_remote_port *rport_ptr;
2879
2880 if (!port_ptr || !name)
2881 return -EINVAL;
2882
Karthikeyan Ramasubramanian63cf3592016-12-15 08:13:20 -07002883 if (port_ptr->type != CLIENT_PORT)
2884 return -EINVAL;
2885
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -06002886 if (name->addrtype != MSM_IPC_ADDR_NAME)
2887 return -EINVAL;
2888
2889 rport_ptr = ipc_router_create_rport(IPC_ROUTER_NID_LOCAL,
2890 port_ptr->this_port.port_id, NULL);
2891 if (!rport_ptr) {
2892 IPC_RTR_ERR("%s: RPort %08x:%08x creation failed\n", __func__,
2893 IPC_ROUTER_NID_LOCAL, port_ptr->this_port.port_id);
2894 return -ENOMEM;
2895 }
2896
2897 server = msm_ipc_router_create_server(name->addr.port_name.service,
2898 name->addr.port_name.instance,
2899 IPC_ROUTER_NID_LOCAL,
2900 port_ptr->this_port.port_id,
2901 NULL);
2902 if (!server) {
2903 IPC_RTR_ERR("%s: Server %08x:%08x Create failed\n",
2904 __func__, name->addr.port_name.service,
2905 name->addr.port_name.instance);
2906 kref_put(&rport_ptr->ref, ipc_router_release_rport);
2907 ipc_router_destroy_rport(rport_ptr);
2908 return -ENOMEM;
2909 }
2910
2911 memset(&ctl, 0, sizeof(ctl));
2912 ctl.cmd = IPC_ROUTER_CTRL_CMD_NEW_SERVER;
2913 ctl.srv.service = server->name.service;
2914 ctl.srv.instance = server->name.instance;
2915 ctl.srv.node_id = IPC_ROUTER_NID_LOCAL;
2916 ctl.srv.port_id = port_ptr->this_port.port_id;
2917 broadcast_ctl_msg(&ctl);
2918 mutex_lock(&port_ptr->port_lock_lhc3);
2919 port_ptr->type = SERVER_PORT;
2920 port_ptr->mode_info.mode = MULTI_LINK_MODE;
2921 port_ptr->port_name.service = server->name.service;
2922 port_ptr->port_name.instance = server->name.instance;
2923 port_ptr->rport_info = rport_ptr;
2924 mutex_unlock(&port_ptr->port_lock_lhc3);
2925 kref_put(&rport_ptr->ref, ipc_router_release_rport);
2926 kref_put(&server->ref, ipc_router_release_server);
2927 return 0;
2928}
2929
2930int msm_ipc_router_unregister_server(struct msm_ipc_port *port_ptr)
2931{
2932 struct msm_ipc_server *server;
2933 union rr_control_msg ctl;
2934 struct msm_ipc_router_remote_port *rport_ptr;
2935
2936 if (!port_ptr)
2937 return -EINVAL;
2938
2939 if (port_ptr->type != SERVER_PORT) {
2940 IPC_RTR_ERR("%s: Trying to unregister a non-server port\n",
2941 __func__);
2942 return -EINVAL;
2943 }
2944
2945 if (port_ptr->this_port.node_id != IPC_ROUTER_NID_LOCAL) {
2946 IPC_RTR_ERR(
2947 "%s: Trying to unregister a remote server locally\n",
2948 __func__);
2949 return -EINVAL;
2950 }
2951
2952 server = ipc_router_get_server_ref(port_ptr->port_name.service,
2953 port_ptr->port_name.instance,
2954 port_ptr->this_port.node_id,
2955 port_ptr->this_port.port_id);
2956 if (!server) {
2957 IPC_RTR_ERR("%s: Server lookup failed\n", __func__);
2958 return -ENODEV;
2959 }
2960
2961 mutex_lock(&port_ptr->port_lock_lhc3);
2962 port_ptr->type = CLIENT_PORT;
2963 rport_ptr = (struct msm_ipc_router_remote_port *)port_ptr->rport_info;
2964 mutex_unlock(&port_ptr->port_lock_lhc3);
2965 if (rport_ptr)
2966 ipc_router_reset_conn(rport_ptr);
2967 memset(&ctl, 0, sizeof(ctl));
2968 ctl.cmd = IPC_ROUTER_CTRL_CMD_REMOVE_SERVER;
2969 ctl.srv.service = server->name.service;
2970 ctl.srv.instance = server->name.instance;
2971 ctl.srv.node_id = IPC_ROUTER_NID_LOCAL;
2972 ctl.srv.port_id = port_ptr->this_port.port_id;
2973 kref_put(&server->ref, ipc_router_release_server);
2974 ipc_router_destroy_server(server, port_ptr->this_port.node_id,
2975 port_ptr->this_port.port_id);
2976 broadcast_ctl_msg(&ctl);
2977 mutex_lock(&port_ptr->port_lock_lhc3);
2978 port_ptr->type = CLIENT_PORT;
2979 mutex_unlock(&port_ptr->port_lock_lhc3);
2980 return 0;
2981}
2982
2983static int loopback_data(struct msm_ipc_port *src,
2984 u32 port_id,
2985 struct rr_packet *pkt)
2986{
2987 struct msm_ipc_port *port_ptr;
2988 struct sk_buff *temp_skb;
2989 int align_size;
2990
2991 if (!pkt) {
2992 IPC_RTR_ERR("%s: Invalid pkt pointer\n", __func__);
2993 return -EINVAL;
2994 }
2995
2996 temp_skb = skb_peek_tail(pkt->pkt_fragment_q);
Chris Lew42ea9612017-10-04 15:58:16 -07002997 if (!temp_skb) {
2998 IPC_RTR_ERR("%s: Empty skb\n", __func__);
2999 return -EINVAL;
3000 }
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -06003001 align_size = ALIGN_SIZE(pkt->length);
3002 skb_put(temp_skb, align_size);
3003 pkt->length += align_size;
3004
3005 port_ptr = ipc_router_get_port_ref(port_id);
3006 if (!port_ptr) {
3007 IPC_RTR_ERR("%s: Local port %d not present\n", __func__,
3008 port_id);
3009 return -ENODEV;
3010 }
3011 post_pkt_to_port(port_ptr, pkt, 1);
3012 update_comm_mode_info(&src->mode_info, NULL);
3013 kref_put(&port_ptr->ref, ipc_router_release_port);
3014
3015 return pkt->hdr.size;
3016}
3017
3018static int ipc_router_tx_wait(struct msm_ipc_port *src,
3019 struct msm_ipc_router_remote_port *rport_ptr,
3020 u32 *set_confirm_rx,
3021 long timeout)
3022{
3023 struct msm_ipc_resume_tx_port *resume_tx_port;
3024 int ret;
3025
3026 if (unlikely(!src || !rport_ptr))
3027 return -EINVAL;
3028
3029 for (;;) {
3030 mutex_lock(&rport_ptr->rport_lock_lhb2);
3031 if (rport_ptr->status == RESET) {
3032 mutex_unlock(&rport_ptr->rport_lock_lhb2);
3033 IPC_RTR_ERR("%s: RPort %08x:%08x is in reset state\n",
3034 __func__, rport_ptr->node_id,
3035 rport_ptr->port_id);
3036 return -ENETRESET;
3037 }
3038
3039 if (rport_ptr->tx_quota_cnt < IPC_ROUTER_HIGH_RX_QUOTA)
3040 break;
3041
3042 if (msm_ipc_router_lookup_resume_tx_port(
3043 rport_ptr, src->this_port.port_id))
3044 goto check_timeo;
3045
3046 resume_tx_port =
3047 kzalloc(sizeof(struct msm_ipc_resume_tx_port),
3048 GFP_KERNEL);
3049 if (!resume_tx_port) {
3050 IPC_RTR_ERR("%s: Resume_Tx port allocation failed\n",
3051 __func__);
3052 mutex_unlock(&rport_ptr->rport_lock_lhb2);
3053 return -ENOMEM;
3054 }
3055 INIT_LIST_HEAD(&resume_tx_port->list);
3056 resume_tx_port->port_id = src->this_port.port_id;
3057 resume_tx_port->node_id = src->this_port.node_id;
3058 list_add_tail(&resume_tx_port->list,
3059 &rport_ptr->resume_tx_port_list);
3060check_timeo:
3061 mutex_unlock(&rport_ptr->rport_lock_lhb2);
3062 if (!timeout) {
3063 return -EAGAIN;
3064 } else if (timeout < 0) {
3065 ret =
3066 wait_event_interruptible(src->port_tx_wait_q,
3067 (rport_ptr->tx_quota_cnt !=
3068 IPC_ROUTER_HIGH_RX_QUOTA ||
3069 rport_ptr->status == RESET));
3070 if (ret)
3071 return ret;
3072 } else {
3073 ret = wait_event_interruptible_timeout(
3074 src->port_tx_wait_q,
3075 (rport_ptr->tx_quota_cnt !=
3076 IPC_ROUTER_HIGH_RX_QUOTA ||
3077 rport_ptr->status == RESET),
3078 msecs_to_jiffies(timeout));
3079 if (ret < 0) {
3080 return ret;
3081 } else if (ret == 0) {
3082 IPC_RTR_ERR("%s: Resume_tx Timeout %08x:%08x\n",
3083 __func__, rport_ptr->node_id,
3084 rport_ptr->port_id);
3085 return -ETIMEDOUT;
3086 }
3087 }
3088 }
3089 rport_ptr->tx_quota_cnt++;
3090 if (rport_ptr->tx_quota_cnt == IPC_ROUTER_LOW_RX_QUOTA)
3091 *set_confirm_rx = 1;
3092 mutex_unlock(&rport_ptr->rport_lock_lhb2);
3093 return 0;
3094}
3095
3096static int
3097msm_ipc_router_write_pkt(struct msm_ipc_port *src,
3098 struct msm_ipc_router_remote_port *rport_ptr,
3099 struct rr_packet *pkt, long timeout)
3100{
3101 struct rr_header_v1 *hdr;
3102 struct msm_ipc_router_xprt_info *xprt_info;
3103 struct msm_ipc_routing_table_entry *rt_entry;
3104 struct sk_buff *temp_skb;
3105 int xprt_option;
3106 int ret;
3107 int align_size;
3108 u32 set_confirm_rx = 0;
3109
3110 if (!rport_ptr || !src || !pkt)
3111 return -EINVAL;
3112
3113 hdr = &pkt->hdr;
3114 hdr->version = IPC_ROUTER_V1;
3115 hdr->type = IPC_ROUTER_CTRL_CMD_DATA;
3116 hdr->src_node_id = src->this_port.node_id;
3117 hdr->src_port_id = src->this_port.port_id;
3118 hdr->size = pkt->length;
3119 hdr->control_flag = 0;
3120 hdr->dst_node_id = rport_ptr->node_id;
3121 hdr->dst_port_id = rport_ptr->port_id;
3122
3123 ret = ipc_router_tx_wait(src, rport_ptr, &set_confirm_rx, timeout);
3124 if (ret < 0)
3125 return ret;
3126 if (set_confirm_rx)
3127 hdr->control_flag |= CONTROL_FLAG_CONFIRM_RX;
3128
3129 if (hdr->dst_node_id == IPC_ROUTER_NID_LOCAL) {
3130 ipc_router_log_msg(local_log_ctx,
3131 IPC_ROUTER_LOG_EVENT_TX, pkt, hdr, src,
3132 rport_ptr);
3133 ret = loopback_data(src, hdr->dst_port_id, pkt);
3134 return ret;
3135 }
3136
3137 rt_entry = ipc_router_get_rtentry_ref(hdr->dst_node_id);
3138 if (!rt_entry) {
3139 IPC_RTR_ERR("%s: Remote node %d not up\n",
3140 __func__, hdr->dst_node_id);
3141 return -ENODEV;
3142 }
3143 down_read(&rt_entry->lock_lha4);
3144 xprt_info = rt_entry->xprt_info;
3145 ret = ipc_router_get_xprt_info_ref(xprt_info);
3146 if (ret < 0) {
3147 IPC_RTR_ERR("%s: Abort invalid xprt\n", __func__);
3148 up_read(&rt_entry->lock_lha4);
3149 kref_put(&rt_entry->ref, ipc_router_release_rtentry);
3150 return ret;
3151 }
3152 ret = prepend_header(pkt, xprt_info);
3153 if (ret < 0) {
3154 IPC_RTR_ERR("%s: Prepend Header failed\n", __func__);
3155 goto out_write_pkt;
3156 }
3157 xprt_option = xprt_info->xprt->get_option(xprt_info->xprt);
3158 if (!(xprt_option & FRAG_PKT_WRITE_ENABLE)) {
3159 ret = defragment_pkt(pkt);
3160 if (ret < 0)
3161 goto out_write_pkt;
3162 }
3163
3164 temp_skb = skb_peek_tail(pkt->pkt_fragment_q);
Chris Lew42ea9612017-10-04 15:58:16 -07003165 if (!temp_skb) {
3166 IPC_RTR_ERR("%s: Abort invalid pkt\n", __func__);
3167 ret = -EINVAL;
3168 goto out_write_pkt;
3169 }
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -06003170 align_size = ALIGN_SIZE(pkt->length);
3171 skb_put(temp_skb, align_size);
3172 pkt->length += align_size;
3173 mutex_lock(&xprt_info->tx_lock_lhb2);
3174 ret = xprt_info->xprt->write(pkt, pkt->length, xprt_info->xprt);
3175 mutex_unlock(&xprt_info->tx_lock_lhb2);
3176out_write_pkt:
3177 up_read(&rt_entry->lock_lha4);
3178 kref_put(&rt_entry->ref, ipc_router_release_rtentry);
3179
3180 if (ret < 0) {
3181 IPC_RTR_ERR("%s: Write on XPRT failed\n", __func__);
3182 ipc_router_log_msg(xprt_info->log_ctx,
3183 IPC_ROUTER_LOG_EVENT_TX_ERR, pkt, hdr, src,
3184 rport_ptr);
3185
3186 ipc_router_put_xprt_info_ref(xprt_info);
3187 return ret;
3188 }
3189 update_comm_mode_info(&src->mode_info, xprt_info);
3190 ipc_router_log_msg(xprt_info->log_ctx,
3191 IPC_ROUTER_LOG_EVENT_TX, pkt, hdr, src, rport_ptr);
3192
3193 ipc_router_put_xprt_info_ref(xprt_info);
3194 return hdr->size;
3195}
3196
3197int msm_ipc_router_send_to(struct msm_ipc_port *src,
3198 struct sk_buff_head *data,
3199 struct msm_ipc_addr *dest,
3200 long timeout)
3201{
3202 u32 dst_node_id = 0, dst_port_id = 0;
3203 struct msm_ipc_server *server;
3204 struct msm_ipc_server_port *server_port;
3205 struct msm_ipc_router_remote_port *rport_ptr = NULL;
3206 struct msm_ipc_router_remote_port *src_rport_ptr = NULL;
3207 struct rr_packet *pkt;
3208 int ret;
3209
3210 if (!src || !data || !dest) {
3211 IPC_RTR_ERR("%s: Invalid Parameters\n", __func__);
3212 return -EINVAL;
3213 }
3214
3215 /* Resolve Address*/
3216 if (dest->addrtype == MSM_IPC_ADDR_ID) {
3217 dst_node_id = dest->addr.port_addr.node_id;
3218 dst_port_id = dest->addr.port_addr.port_id;
3219 } else if (dest->addrtype == MSM_IPC_ADDR_NAME) {
3220 server =
3221 ipc_router_get_server_ref(dest->addr.port_name.service,
3222 dest->addr.port_name.instance,
3223 0, 0);
3224 if (!server) {
3225 IPC_RTR_ERR("%s: Destination not reachable\n",
3226 __func__);
3227 return -ENODEV;
3228 }
3229 server_port = list_first_entry(&server->server_port_list,
3230 struct msm_ipc_server_port,
3231 list);
3232 dst_node_id = server_port->server_addr.node_id;
3233 dst_port_id = server_port->server_addr.port_id;
3234 kref_put(&server->ref, ipc_router_release_server);
3235 }
3236
3237 rport_ptr = ipc_router_get_rport_ref(dst_node_id, dst_port_id);
3238 if (!rport_ptr) {
3239 IPC_RTR_ERR("%s: Remote port not found\n", __func__);
3240 return -ENODEV;
3241 }
3242
3243 if (src->check_send_permissions) {
3244 ret = src->check_send_permissions(rport_ptr->sec_rule);
3245 if (ret <= 0) {
3246 kref_put(&rport_ptr->ref, ipc_router_release_rport);
3247 IPC_RTR_ERR("%s: permission failure for %s\n",
3248 __func__, current->comm);
3249 return -EPERM;
3250 }
3251 }
3252
3253 if (dst_node_id == IPC_ROUTER_NID_LOCAL && !src->rport_info) {
3254 src_rport_ptr = ipc_router_create_rport(IPC_ROUTER_NID_LOCAL,
3255 src->this_port.port_id,
3256 NULL);
3257 if (!src_rport_ptr) {
3258 kref_put(&rport_ptr->ref, ipc_router_release_rport);
3259 IPC_RTR_ERR("%s: RPort creation failed\n", __func__);
3260 return -ENOMEM;
3261 }
3262 mutex_lock(&src->port_lock_lhc3);
3263 src->rport_info = src_rport_ptr;
3264 mutex_unlock(&src->port_lock_lhc3);
3265 kref_put(&src_rport_ptr->ref, ipc_router_release_rport);
3266 }
3267
3268 pkt = create_pkt(data);
3269 if (!pkt) {
3270 kref_put(&rport_ptr->ref, ipc_router_release_rport);
3271 IPC_RTR_ERR("%s: Pkt creation failed\n", __func__);
3272 return -ENOMEM;
3273 }
3274
3275 ret = msm_ipc_router_write_pkt(src, rport_ptr, pkt, timeout);
3276 kref_put(&rport_ptr->ref, ipc_router_release_rport);
3277 if (ret < 0)
3278 pkt->pkt_fragment_q = NULL;
3279 release_pkt(pkt);
3280
3281 return ret;
3282}
3283
3284int msm_ipc_router_send_msg(struct msm_ipc_port *src,
3285 struct msm_ipc_addr *dest,
3286 void *data, unsigned int data_len)
3287{
3288 struct sk_buff_head *out_skb_head;
3289 int ret;
3290
3291 out_skb_head = msm_ipc_router_buf_to_skb(data, data_len);
3292 if (!out_skb_head) {
3293 IPC_RTR_ERR("%s: SKB conversion failed\n", __func__);
3294 return -EFAULT;
3295 }
3296
3297 ret = msm_ipc_router_send_to(src, out_skb_head, dest, 0);
3298 if (ret < 0) {
3299 if (ret != -EAGAIN)
3300 IPC_RTR_ERR(
3301 "%s: msm_ipc_router_send_to failed - ret: %d\n",
3302 __func__, ret);
3303 msm_ipc_router_free_skb(out_skb_head);
3304 return ret;
3305 }
3306 return 0;
3307}
3308
3309/**
3310 * msm_ipc_router_send_resume_tx() - Send Resume_Tx message
3311 * @data: Pointer to received data packet that has confirm_rx bit set
3312 *
3313 * @return: On success, number of bytes transferred is returned, else
3314 * standard linux error code is returned.
3315 *
3316 * This function sends the Resume_Tx event to the remote node that
3317 * sent the data with confirm_rx field set. In case of a multi-hop
3318 * scenario also, this function makes sure that the destination node_id
3319 * to which the resume_tx event should reach is right.
3320 */
3321static int msm_ipc_router_send_resume_tx(void *data)
3322{
3323 union rr_control_msg msg;
3324 struct rr_header_v1 *hdr = (struct rr_header_v1 *)data;
3325 struct msm_ipc_routing_table_entry *rt_entry;
3326 int ret;
3327
3328 memset(&msg, 0, sizeof(msg));
3329 msg.cmd = IPC_ROUTER_CTRL_CMD_RESUME_TX;
3330 msg.cli.node_id = hdr->dst_node_id;
3331 msg.cli.port_id = hdr->dst_port_id;
3332 rt_entry = ipc_router_get_rtentry_ref(hdr->src_node_id);
3333 if (!rt_entry) {
3334 IPC_RTR_ERR("%s: %d Node is not present", __func__,
3335 hdr->src_node_id);
3336 return -ENODEV;
3337 }
3338 ret = ipc_router_get_xprt_info_ref(rt_entry->xprt_info);
3339 if (ret < 0) {
3340 IPC_RTR_ERR("%s: Abort invalid xprt\n", __func__);
3341 kref_put(&rt_entry->ref, ipc_router_release_rtentry);
3342 return ret;
3343 }
3344 ret = ipc_router_send_ctl_msg(rt_entry->xprt_info, &msg,
3345 hdr->src_node_id);
3346 ipc_router_put_xprt_info_ref(rt_entry->xprt_info);
3347 kref_put(&rt_entry->ref, ipc_router_release_rtentry);
3348 if (ret < 0)
3349 IPC_RTR_ERR(
3350 "%s: Send Resume_Tx Failed SRC_NODE: %d SRC_PORT: %d DEST_NODE: %d",
3351 __func__, hdr->dst_node_id, hdr->dst_port_id,
3352 hdr->src_node_id);
3353
3354 return ret;
3355}
3356
3357int msm_ipc_router_read(struct msm_ipc_port *port_ptr,
3358 struct rr_packet **read_pkt,
3359 size_t buf_len)
3360{
3361 struct rr_packet *pkt;
3362
3363 if (!port_ptr || !read_pkt)
3364 return -EINVAL;
3365
3366 mutex_lock(&port_ptr->port_rx_q_lock_lhc3);
3367 if (list_empty(&port_ptr->port_rx_q)) {
3368 mutex_unlock(&port_ptr->port_rx_q_lock_lhc3);
3369 return -EAGAIN;
3370 }
3371
3372 pkt = list_first_entry(&port_ptr->port_rx_q, struct rr_packet, list);
3373 if ((buf_len) && (pkt->hdr.size > buf_len)) {
3374 mutex_unlock(&port_ptr->port_rx_q_lock_lhc3);
3375 return -ETOOSMALL;
3376 }
3377 list_del(&pkt->list);
3378 if (list_empty(&port_ptr->port_rx_q))
3379 __pm_relax(port_ptr->port_rx_ws);
3380 *read_pkt = pkt;
3381 mutex_unlock(&port_ptr->port_rx_q_lock_lhc3);
3382 if (pkt->hdr.control_flag & CONTROL_FLAG_CONFIRM_RX)
3383 msm_ipc_router_send_resume_tx(&pkt->hdr);
3384
3385 return pkt->length;
3386}
3387
3388/**
3389 * msm_ipc_router_rx_data_wait() - Wait for new message destined to a local
3390 * port.
3391 * @port_ptr: Pointer to the local port
3392 * @timeout: < 0 timeout indicates infinite wait till a message arrives.
3393 * > 0 timeout indicates the wait time.
3394 * 0 indicates that we do not wait.
3395 * @return: 0 if there are pending messages to read,
3396 * standard Linux error code otherwise.
3397 *
3398 * Checks for the availability of messages that are destined to a local port.
3399 * If no messages are present then waits as per @timeout.
3400 */
3401int msm_ipc_router_rx_data_wait(struct msm_ipc_port *port_ptr, long timeout)
3402{
3403 int ret = 0;
3404
3405 mutex_lock(&port_ptr->port_rx_q_lock_lhc3);
3406 while (list_empty(&port_ptr->port_rx_q)) {
3407 mutex_unlock(&port_ptr->port_rx_q_lock_lhc3);
3408 if (timeout < 0) {
3409 ret = wait_event_interruptible(
3410 port_ptr->port_rx_wait_q,
3411 !list_empty(&port_ptr->port_rx_q));
3412 if (ret)
3413 return ret;
3414 } else if (timeout > 0) {
3415 timeout = wait_event_interruptible_timeout(
3416 port_ptr->port_rx_wait_q,
3417 !list_empty(&port_ptr->port_rx_q),
3418 timeout);
3419 if (timeout < 0)
3420 return -EFAULT;
3421 }
3422 if (timeout == 0)
3423 return -ENOMSG;
3424 mutex_lock(&port_ptr->port_rx_q_lock_lhc3);
3425 }
3426 mutex_unlock(&port_ptr->port_rx_q_lock_lhc3);
3427
3428 return ret;
3429}
3430
3431/**
3432 * msm_ipc_router_recv_from() - Receive messages destined to a local port.
3433 * @port_ptr: Pointer to the local port
3434 * @pkt : Pointer to the router-to-router packet
3435 * @src: Pointer to local port address
3436 * @timeout: < 0 timeout indicates infinite wait till a message arrives.
3437 * > 0 timeout indicates the wait time.
3438 * 0 indicates that we do not wait.
3439 * @return: = Number of bytes read(On successful read operation).
3440 * = -ENOMSG (If there are no pending messages and timeout is 0).
3441 * = -EINVAL (If either of the arguments, port_ptr or data is invalid)
3442 * = -EFAULT (If there are no pending messages when timeout is > 0
3443 * and the wait_event_interruptible_timeout has returned value > 0)
3444 * = -ERESTARTSYS (If there are no pending messages when timeout
3445 * is < 0 and wait_event_interruptible was interrupted by a signal)
3446 *
3447 * This function reads the messages that are destined for a local port. It
3448 * is used by modules that exist with-in the kernel and use IPC Router for
3449 * transport. The function checks if there are any messages that are already
3450 * received. If yes, it reads them, else it waits as per the timeout value.
3451 * On a successful read, the return value of the function indicates the number
3452 * of bytes that are read.
3453 */
3454int msm_ipc_router_recv_from(struct msm_ipc_port *port_ptr,
3455 struct rr_packet **pkt,
3456 struct msm_ipc_addr *src,
3457 long timeout)
3458{
3459 int ret, data_len, align_size;
3460 struct sk_buff *temp_skb;
3461 struct rr_header_v1 *hdr = NULL;
3462
3463 if (!port_ptr || !pkt) {
3464 IPC_RTR_ERR("%s: Invalid pointers being passed\n", __func__);
3465 return -EINVAL;
3466 }
3467
3468 *pkt = NULL;
3469
3470 ret = msm_ipc_router_rx_data_wait(port_ptr, timeout);
3471 if (ret)
3472 return ret;
3473
3474 ret = msm_ipc_router_read(port_ptr, pkt, 0);
3475 if (ret <= 0 || !(*pkt))
3476 return ret;
3477
3478 hdr = &((*pkt)->hdr);
3479 if (src) {
3480 src->addrtype = MSM_IPC_ADDR_ID;
3481 src->addr.port_addr.node_id = hdr->src_node_id;
3482 src->addr.port_addr.port_id = hdr->src_port_id;
3483 }
3484
3485 data_len = hdr->size;
3486 align_size = ALIGN_SIZE(data_len);
3487 if (align_size) {
3488 temp_skb = skb_peek_tail((*pkt)->pkt_fragment_q);
Chris Lew42ea9612017-10-04 15:58:16 -07003489 if (temp_skb)
3490 skb_trim(temp_skb, (temp_skb->len - align_size));
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -06003491 }
3492 return data_len;
3493}
3494
3495int msm_ipc_router_read_msg(struct msm_ipc_port *port_ptr,
3496 struct msm_ipc_addr *src,
3497 unsigned char **data,
3498 unsigned int *len)
3499{
3500 struct rr_packet *pkt;
3501 int ret;
3502
3503 ret = msm_ipc_router_recv_from(port_ptr, &pkt, src, 0);
3504 if (ret < 0) {
3505 if (ret != -ENOMSG)
3506 IPC_RTR_ERR(
3507 "%s: msm_ipc_router_recv_from failed - ret: %d\n",
3508 __func__, ret);
3509 return ret;
3510 }
3511
3512 *data = msm_ipc_router_skb_to_buf(pkt->pkt_fragment_q, ret);
3513 if (!(*data)) {
3514 IPC_RTR_ERR("%s: Buf conversion failed\n", __func__);
3515 release_pkt(pkt);
3516 return -ENOMEM;
3517 }
3518
3519 *len = ret;
3520 release_pkt(pkt);
3521 return 0;
3522}
3523
3524/**
3525 * msm_ipc_router_create_port() - Create a IPC Router port/endpoint
3526 * @notify: Callback function to notify any event on the port.
3527 * @event: Event ID to be handled.
3528 * @oob_data: Any out-of-band data associated with the event.
3529 * @oob_data_len: Size of the out-of-band data, if valid.
3530 * @priv: Private data registered during the port creation.
3531 * @priv: Private info to be passed while the notification is generated.
3532 *
3533 * @return: Pointer to the port on success, NULL on error.
3534 */
3535struct msm_ipc_port *msm_ipc_router_create_port(
3536 void (*notify)(unsigned int event, void *oob_data,
3537 size_t oob_data_len, void *priv),
3538 void *priv)
3539{
3540 struct msm_ipc_port *port_ptr;
3541 int ret;
3542
3543 ret = ipc_router_core_init();
3544 if (ret < 0) {
3545 IPC_RTR_ERR("%s: Error %d initializing IPC Router\n",
3546 __func__, ret);
3547 return NULL;
3548 }
3549
3550 port_ptr = msm_ipc_router_create_raw_port(NULL, notify, priv);
3551 if (!port_ptr)
3552 IPC_RTR_ERR("%s: port_ptr alloc failed\n", __func__);
3553
3554 return port_ptr;
3555}
3556
3557int msm_ipc_router_close_port(struct msm_ipc_port *port_ptr)
3558{
3559 union rr_control_msg msg;
3560 struct msm_ipc_server *server;
3561 struct msm_ipc_router_remote_port *rport_ptr;
3562
3563 if (!port_ptr)
3564 return -EINVAL;
3565
3566 if (port_ptr->type == SERVER_PORT || port_ptr->type == CLIENT_PORT) {
3567 down_write(&local_ports_lock_lhc2);
3568 list_del(&port_ptr->list);
3569 up_write(&local_ports_lock_lhc2);
3570
3571 mutex_lock(&port_ptr->port_lock_lhc3);
3572 rport_ptr = (struct msm_ipc_router_remote_port *)
3573 port_ptr->rport_info;
3574 port_ptr->rport_info = NULL;
3575 mutex_unlock(&port_ptr->port_lock_lhc3);
3576 if (rport_ptr) {
3577 ipc_router_reset_conn(rport_ptr);
3578 ipc_router_destroy_rport(rport_ptr);
3579 }
3580
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -06003581 /* Server port could have been a client port earlier.
3582 * Send REMOVE_CLIENT message in either case.
3583 */
3584 msm_ipc_router_send_remove_client(&port_ptr->mode_info,
3585 port_ptr->this_port.node_id,
3586 port_ptr->this_port.port_id);
3587 } else if (port_ptr->type == CONTROL_PORT) {
3588 down_write(&control_ports_lock_lha5);
3589 list_del(&port_ptr->list);
3590 up_write(&control_ports_lock_lha5);
3591 } else if (port_ptr->type == IRSC_PORT) {
3592 down_write(&local_ports_lock_lhc2);
3593 list_del(&port_ptr->list);
3594 up_write(&local_ports_lock_lhc2);
3595 signal_irsc_completion();
3596 }
3597
3598 if (port_ptr->type == SERVER_PORT) {
3599 server = ipc_router_get_server_ref(
3600 port_ptr->port_name.service,
3601 port_ptr->port_name.instance,
3602 port_ptr->this_port.node_id,
3603 port_ptr->this_port.port_id);
3604 if (server) {
3605 kref_put(&server->ref, ipc_router_release_server);
3606 ipc_router_destroy_server(server,
3607 port_ptr->this_port.node_id,
3608 port_ptr->this_port.port_id);
3609 }
Arun Prakash72cd4122020-04-25 21:23:11 +05303610 /**
3611 * released server information from hash table, now
3612 * it is safe to broadcast remove server message so that
3613 * next call to lookup server will not succeed until
3614 * server open the port again
3615 */
3616 memset(&msg, 0, sizeof(msg));
3617 msg.cmd = IPC_ROUTER_CTRL_CMD_REMOVE_SERVER;
3618 msg.srv.service = port_ptr->port_name.service;
3619 msg.srv.instance = port_ptr->port_name.instance;
3620 msg.srv.node_id = port_ptr->this_port.node_id;
3621 msg.srv.port_id = port_ptr->this_port.port_id;
3622 broadcast_ctl_msg(&msg);
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -06003623 }
3624
3625 mutex_lock(&port_ptr->port_lock_lhc3);
3626 rport_ptr = (struct msm_ipc_router_remote_port *)port_ptr->rport_info;
3627 port_ptr->rport_info = NULL;
3628 mutex_unlock(&port_ptr->port_lock_lhc3);
3629 if (rport_ptr)
3630 ipc_router_destroy_rport(rport_ptr);
3631
3632 kref_put(&port_ptr->ref, ipc_router_release_port);
3633 return 0;
3634}
3635
3636int msm_ipc_router_get_curr_pkt_size(struct msm_ipc_port *port_ptr)
3637{
3638 struct rr_packet *pkt;
3639 int rc = 0;
3640
3641 if (!port_ptr)
3642 return -EINVAL;
3643
3644 mutex_lock(&port_ptr->port_rx_q_lock_lhc3);
3645 if (!list_empty(&port_ptr->port_rx_q)) {
3646 pkt = list_first_entry(&port_ptr->port_rx_q, struct rr_packet,
3647 list);
3648 rc = pkt->hdr.size;
3649 }
3650 mutex_unlock(&port_ptr->port_rx_q_lock_lhc3);
3651
3652 return rc;
3653}
3654
3655int msm_ipc_router_bind_control_port(struct msm_ipc_port *port_ptr)
3656{
3657 if (unlikely(!port_ptr || port_ptr->type != CLIENT_PORT))
3658 return -EINVAL;
3659
3660 down_write(&local_ports_lock_lhc2);
3661 list_del(&port_ptr->list);
3662 up_write(&local_ports_lock_lhc2);
3663 port_ptr->type = CONTROL_PORT;
3664 down_write(&control_ports_lock_lha5);
3665 list_add_tail(&port_ptr->list, &control_ports);
3666 up_write(&control_ports_lock_lha5);
3667
3668 return 0;
3669}
3670
3671int msm_ipc_router_lookup_server_name(struct msm_ipc_port_name *srv_name,
3672 struct msm_ipc_server_info *srv_info,
3673 int num_entries_in_array, u32 lookup_mask)
3674{
3675 struct msm_ipc_server *server;
3676 struct msm_ipc_server_port *server_port;
3677 int key, i = 0; /*num_entries_found*/
3678
3679 if (!srv_name) {
3680 IPC_RTR_ERR("%s: Invalid srv_name\n", __func__);
3681 return -EINVAL;
3682 }
3683
3684 if (num_entries_in_array && !srv_info) {
3685 IPC_RTR_ERR("%s: srv_info NULL\n", __func__);
3686 return -EINVAL;
3687 }
3688
3689 down_read(&server_list_lock_lha2);
3690 key = (srv_name->service & (SRV_HASH_SIZE - 1));
3691 list_for_each_entry(server, &server_list[key], list) {
3692 if ((server->name.service != srv_name->service) ||
3693 ((server->name.instance & lookup_mask) !=
3694 srv_name->instance))
3695 continue;
3696
3697 list_for_each_entry(server_port, &server->server_port_list,
3698 list) {
3699 if (i < num_entries_in_array) {
3700 srv_info[i].node_id =
3701 server_port->server_addr.node_id;
3702 srv_info[i].port_id =
3703 server_port->server_addr.port_id;
3704 srv_info[i].service = server->name.service;
3705 srv_info[i].instance = server->name.instance;
3706 }
3707 i++;
3708 }
3709 }
3710 up_read(&server_list_lock_lha2);
3711
3712 return i;
3713}
3714
3715int msm_ipc_router_close(void)
3716{
3717 struct msm_ipc_router_xprt_info *xprt_info, *tmp_xprt_info;
3718
3719 down_write(&xprt_info_list_lock_lha5);
3720 list_for_each_entry_safe(xprt_info, tmp_xprt_info,
3721 &xprt_info_list, list) {
3722 xprt_info->xprt->close(xprt_info->xprt);
3723 list_del(&xprt_info->list);
3724 kfree(xprt_info);
3725 }
3726 up_write(&xprt_info_list_lock_lha5);
3727 return 0;
3728}
3729
3730/**
3731 * pil_vote_load_worker() - Process vote to load the modem
3732 *
3733 * @work: Work item to process
3734 *
3735 * This function is called to process votes to load the modem that have been
3736 * queued by msm_ipc_load_default_node().
3737 */
3738static void pil_vote_load_worker(struct work_struct *work)
3739{
3740 struct pil_vote_info *vote_info;
3741
3742 vote_info = container_of(work, struct pil_vote_info, load_work);
3743 if (strlen(default_peripheral)) {
3744 vote_info->pil_handle = subsystem_get(default_peripheral);
3745 if (IS_ERR(vote_info->pil_handle)) {
3746 IPC_RTR_ERR("%s: Failed to load %s\n",
3747 __func__, default_peripheral);
3748 vote_info->pil_handle = NULL;
3749 }
3750 } else {
3751 vote_info->pil_handle = NULL;
3752 }
3753}
3754
3755/**
3756 * pil_vote_unload_worker() - Process vote to unload the modem
3757 *
3758 * @work: Work item to process
3759 *
3760 * This function is called to process votes to unload the modem that have been
3761 * queued by msm_ipc_unload_default_node().
3762 */
3763static void pil_vote_unload_worker(struct work_struct *work)
3764{
3765 struct pil_vote_info *vote_info;
3766
3767 vote_info = container_of(work, struct pil_vote_info, unload_work);
3768
3769 if (vote_info->pil_handle) {
3770 subsystem_put(vote_info->pil_handle);
3771 vote_info->pil_handle = NULL;
3772 }
3773 kfree(vote_info);
3774}
3775
3776/**
3777 * msm_ipc_load_default_node() - Queue a vote to load the modem.
3778 *
3779 * @return: PIL vote info structure on success, NULL on failure.
3780 *
3781 * This function places a work item that loads the modem on the
3782 * single-threaded workqueue used for processing PIL votes to load
3783 * or unload the modem.
3784 */
3785void *msm_ipc_load_default_node(void)
3786{
3787 struct pil_vote_info *vote_info;
3788
3789 vote_info = kmalloc(sizeof(*vote_info), GFP_KERNEL);
3790 if (!vote_info)
3791 return vote_info;
3792
3793 INIT_WORK(&vote_info->load_work, pil_vote_load_worker);
3794 queue_work(msm_ipc_router_workqueue, &vote_info->load_work);
3795
3796 return vote_info;
3797}
3798
3799/**
3800 * msm_ipc_unload_default_node() - Queue a vote to unload the modem.
3801 *
3802 * @pil_vote: PIL vote info structure, containing the PIL handle
3803 * and work structure.
3804 *
3805 * This function places a work item that unloads the modem on the
3806 * single-threaded workqueue used for processing PIL votes to load
3807 * or unload the modem.
3808 */
3809void msm_ipc_unload_default_node(void *pil_vote)
3810{
3811 struct pil_vote_info *vote_info;
3812
3813 if (pil_vote) {
3814 vote_info = (struct pil_vote_info *)pil_vote;
3815 INIT_WORK(&vote_info->unload_work, pil_vote_unload_worker);
3816 queue_work(msm_ipc_router_workqueue, &vote_info->unload_work);
3817 }
3818}
3819
3820#if defined(CONFIG_DEBUG_FS)
3821static void dump_routing_table(struct seq_file *s)
3822{
3823 int j;
3824 struct msm_ipc_routing_table_entry *rt_entry;
3825
3826 seq_printf(s, "%-10s|%-20s|%-10s|\n", "Node Id", "XPRT Name",
3827 "Next Hop");
3828 seq_puts(s, "----------------------------------------------\n");
3829 for (j = 0; j < RT_HASH_SIZE; j++) {
3830 down_read(&routing_table_lock_lha3);
3831 list_for_each_entry(rt_entry, &routing_table[j], list) {
3832 down_read(&rt_entry->lock_lha4);
3833 seq_printf(s, "0x%08x|", rt_entry->node_id);
3834 if (rt_entry->node_id == IPC_ROUTER_NID_LOCAL)
3835 seq_printf(s, "%-20s|0x%08x|\n", "Loopback",
3836 rt_entry->node_id);
3837 else
3838 seq_printf(s, "%-20s|0x%08x|\n",
3839 rt_entry->xprt_info->xprt->name,
3840 rt_entry->node_id);
3841 up_read(&rt_entry->lock_lha4);
3842 }
3843 up_read(&routing_table_lock_lha3);
3844 }
3845}
3846
3847static void dump_xprt_info(struct seq_file *s)
3848{
3849 struct msm_ipc_router_xprt_info *xprt_info;
3850
3851 seq_printf(s, "%-20s|%-10s|%-12s|%-15s|\n", "XPRT Name", "Link ID",
3852 "Initialized", "Remote Node Id");
3853 seq_puts(s, "------------------------------------------------------------\n");
3854 down_read(&xprt_info_list_lock_lha5);
3855 list_for_each_entry(xprt_info, &xprt_info_list, list)
3856 seq_printf(s, "%-20s|0x%08x|%-12s|0x%08x|\n",
3857 xprt_info->xprt->name, xprt_info->xprt->link_id,
3858 (xprt_info->initialized ? "Y" : "N"),
3859 xprt_info->remote_node_id);
3860 up_read(&xprt_info_list_lock_lha5);
3861}
3862
3863static void dump_servers(struct seq_file *s)
3864{
3865 int j;
3866 struct msm_ipc_server *server;
3867 struct msm_ipc_server_port *server_port;
3868
3869 seq_printf(s, "%-11s|%-11s|%-11s|%-11s|\n", "Service", "Instance",
3870 "Node_id", "Port_id");
3871 seq_puts(s, "------------------------------------------------------------\n");
3872 down_read(&server_list_lock_lha2);
3873 for (j = 0; j < SRV_HASH_SIZE; j++) {
3874 list_for_each_entry(server, &server_list[j], list) {
3875 list_for_each_entry(server_port,
3876 &server->server_port_list,
3877 list)
3878 seq_printf(s, "0x%08x |0x%08x |0x%08x |0x%08x |\n",
3879 server->name.service,
3880 server->name.instance,
3881 server_port->server_addr.node_id,
3882 server_port->server_addr.port_id);
3883 }
3884 }
3885 up_read(&server_list_lock_lha2);
3886}
3887
3888static void dump_remote_ports(struct seq_file *s)
3889{
3890 int j, k;
3891 struct msm_ipc_router_remote_port *rport_ptr;
3892 struct msm_ipc_routing_table_entry *rt_entry;
3893
3894 seq_printf(s, "%-11s|%-11s|%-10s|\n", "Node_id", "Port_id",
3895 "Quota_cnt");
3896 seq_puts(s, "------------------------------------------------------------\n");
3897 for (j = 0; j < RT_HASH_SIZE; j++) {
3898 down_read(&routing_table_lock_lha3);
3899 list_for_each_entry(rt_entry, &routing_table[j], list) {
3900 down_read(&rt_entry->lock_lha4);
3901 for (k = 0; k < RP_HASH_SIZE; k++) {
3902 list_for_each_entry
3903 (rport_ptr,
3904 &rt_entry->remote_port_list[k],
3905 list)
3906 seq_printf(s, "0x%08x |0x%08x |0x%08x|\n",
3907 rport_ptr->node_id,
3908 rport_ptr->port_id,
3909 rport_ptr->tx_quota_cnt);
3910 }
3911 up_read(&rt_entry->lock_lha4);
3912 }
3913 up_read(&routing_table_lock_lha3);
3914 }
3915}
3916
3917static void dump_control_ports(struct seq_file *s)
3918{
3919 struct msm_ipc_port *port_ptr;
3920
3921 seq_printf(s, "%-11s|%-11s|\n", "Node_id", "Port_id");
3922 seq_puts(s, "------------------------------------------------------------\n");
3923 down_read(&control_ports_lock_lha5);
3924 list_for_each_entry(port_ptr, &control_ports, list)
3925 seq_printf(s, "0x%08x |0x%08x |\n", port_ptr->this_port.node_id,
3926 port_ptr->this_port.port_id);
3927 up_read(&control_ports_lock_lha5);
3928}
3929
3930static void dump_local_ports(struct seq_file *s)
3931{
3932 int j;
3933 struct msm_ipc_port *port_ptr;
3934
Dhoat Harpal3ad5fe32017-06-19 21:26:13 +05303935 seq_printf(s, "%-11s|%-11s|%-32s|%-11s|\n",
3936 "Node_id", "Port_id", "Wakelock", "Last SVCID");
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -06003937 seq_puts(s, "------------------------------------------------------------\n");
3938 down_read(&local_ports_lock_lhc2);
3939 for (j = 0; j < LP_HASH_SIZE; j++) {
3940 list_for_each_entry(port_ptr, &local_ports[j], list) {
3941 mutex_lock(&port_ptr->port_lock_lhc3);
Dhoat Harpal3ad5fe32017-06-19 21:26:13 +05303942 seq_printf(s, "0x%08x |0x%08x |%-32s|0x%08x |\n",
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -06003943 port_ptr->this_port.node_id,
Dhoat Harpal3ad5fe32017-06-19 21:26:13 +05303944 port_ptr->this_port.port_id,
3945 port_ptr->rx_ws_name,
3946 port_ptr->last_served_svc_id);
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -06003947 mutex_unlock(&port_ptr->port_lock_lhc3);
3948 }
3949 }
3950 up_read(&local_ports_lock_lhc2);
3951}
3952
3953static int debugfs_show(struct seq_file *s, void *data)
3954{
3955 void (*show)(struct seq_file *) = s->private;
3956
3957 show(s);
3958 return 0;
3959}
3960
3961static int debug_open(struct inode *inode, struct file *file)
3962{
3963 return single_open(file, debugfs_show, inode->i_private);
3964}
3965
3966static const struct file_operations debug_ops = {
3967 .open = debug_open,
3968 .release = single_release,
3969 .read = seq_read,
3970 .llseek = seq_lseek,
3971};
3972
3973static void debug_create(const char *name, struct dentry *dent,
3974 void (*show)(struct seq_file *))
3975{
3976 debugfs_create_file(name, 0444, dent, show, &debug_ops);
3977}
3978
3979static void debugfs_init(void)
3980{
3981 struct dentry *dent;
3982
3983 dent = debugfs_create_dir("msm_ipc_router", 0);
3984 if (IS_ERR(dent))
3985 return;
3986
3987 debug_create("dump_local_ports", dent, dump_local_ports);
3988 debug_create("dump_remote_ports", dent, dump_remote_ports);
3989 debug_create("dump_control_ports", dent, dump_control_ports);
3990 debug_create("dump_servers", dent, dump_servers);
3991 debug_create("dump_xprt_info", dent, dump_xprt_info);
3992 debug_create("dump_routing_table", dent, dump_routing_table);
3993}
3994
3995#else
3996static void debugfs_init(void) {}
3997#endif
3998
3999/**
4000 * ipc_router_create_log_ctx() - Create and add the log context based on
4001 * transport
4002 * @name: subsystem name
4003 *
4004 * Return: a reference to the log context created
4005 *
4006 * This function creates ipc log context based on transport and adds it to a
4007 * global list. This log context can be reused from the list in case of a
4008 * subsystem restart.
4009 */
4010static void *ipc_router_create_log_ctx(char *name)
4011{
4012 struct ipc_rtr_log_ctx *sub_log_ctx;
4013
4014 sub_log_ctx = kmalloc(sizeof(*sub_log_ctx), GFP_KERNEL);
4015 if (!sub_log_ctx)
4016 return NULL;
4017 sub_log_ctx->log_ctx = ipc_log_context_create(
4018 IPC_RTR_INFO_PAGES, name, 0);
4019 if (!sub_log_ctx->log_ctx) {
4020 IPC_RTR_ERR("%s: Unable to create IPC logging for [%s]",
4021 __func__, name);
4022 kfree(sub_log_ctx);
4023 return NULL;
4024 }
4025 strlcpy(sub_log_ctx->log_ctx_name, name, LOG_CTX_NAME_LEN);
4026 INIT_LIST_HEAD(&sub_log_ctx->list);
4027 list_add_tail(&sub_log_ctx->list, &log_ctx_list);
4028 return sub_log_ctx->log_ctx;
4029}
4030
4031static void ipc_router_log_ctx_init(void)
4032{
4033 mutex_lock(&log_ctx_list_lock_lha0);
4034 local_log_ctx = ipc_router_create_log_ctx("local_IPCRTR");
4035 mutex_unlock(&log_ctx_list_lock_lha0);
4036}
4037
4038/**
4039 * ipc_router_get_log_ctx() - Retrieves the ipc log context based on subsystem
4040 * name.
4041 * @sub_name: subsystem name
4042 *
4043 * Return: a reference to the log context
4044 */
4045static void *ipc_router_get_log_ctx(char *sub_name)
4046{
4047 void *log_ctx = NULL;
4048 struct ipc_rtr_log_ctx *temp_log_ctx;
4049
4050 mutex_lock(&log_ctx_list_lock_lha0);
4051 list_for_each_entry(temp_log_ctx, &log_ctx_list, list)
4052 if (!strcmp(temp_log_ctx->log_ctx_name, sub_name)) {
4053 log_ctx = temp_log_ctx->log_ctx;
4054 mutex_unlock(&log_ctx_list_lock_lha0);
4055 return log_ctx;
4056 }
4057 log_ctx = ipc_router_create_log_ctx(sub_name);
4058 mutex_unlock(&log_ctx_list_lock_lha0);
4059
4060 return log_ctx;
4061}
4062
4063/**
4064 * ipc_router_get_xprt_info_ref() - Get a reference to the xprt_info structure
4065 * @xprt_info: pointer to the xprt_info.
4066 *
4067 * @return: Zero on success, -ENODEV on failure.
4068 *
4069 * This function is used to obtain a reference to the xprt_info structure
4070 * corresponding to the requested @xprt_info pointer.
4071 */
4072static int ipc_router_get_xprt_info_ref(
4073 struct msm_ipc_router_xprt_info *xprt_info)
4074{
4075 int ret = -ENODEV;
4076 struct msm_ipc_router_xprt_info *tmp_xprt_info;
4077
4078 if (!xprt_info)
4079 return 0;
4080
4081 down_read(&xprt_info_list_lock_lha5);
4082 list_for_each_entry(tmp_xprt_info, &xprt_info_list, list) {
4083 if (tmp_xprt_info == xprt_info) {
4084 kref_get(&xprt_info->ref);
4085 ret = 0;
4086 break;
4087 }
4088 }
4089 up_read(&xprt_info_list_lock_lha5);
4090
4091 return ret;
4092}
4093
4094/**
4095 * ipc_router_put_xprt_info_ref() - Put a reference to the xprt_info structure
4096 * @xprt_info: pointer to the xprt_info.
4097 *
4098 * This function is used to put the reference to the xprt_info structure
4099 * corresponding to the requested @xprt_info pointer.
4100 */
4101static void ipc_router_put_xprt_info_ref(
4102 struct msm_ipc_router_xprt_info *xprt_info)
4103{
4104 if (xprt_info)
4105 kref_put(&xprt_info->ref, ipc_router_release_xprt_info_ref);
4106}
4107
4108/**
4109 * ipc_router_release_xprt_info_ref() - release the xprt_info last reference
4110 * @ref: Reference to the xprt_info structure.
4111 *
4112 * This function is called when all references to the xprt_info structure
4113 * are released.
4114 */
4115static void ipc_router_release_xprt_info_ref(struct kref *ref)
4116{
4117 struct msm_ipc_router_xprt_info *xprt_info =
4118 container_of(ref, struct msm_ipc_router_xprt_info, ref);
4119
4120 complete_all(&xprt_info->ref_complete);
4121}
4122
4123static int msm_ipc_router_add_xprt(struct msm_ipc_router_xprt *xprt)
4124{
4125 struct msm_ipc_router_xprt_info *xprt_info;
Arun Kumar Neelakantamd5eb2732018-06-19 14:40:01 +05304126 struct sched_param param = {.sched_priority = 1};
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -06004127
4128 xprt_info = kmalloc(sizeof(*xprt_info), GFP_KERNEL);
4129 if (!xprt_info)
4130 return -ENOMEM;
4131
4132 xprt_info->xprt = xprt;
4133 xprt_info->initialized = 0;
Arun Kumar Neelakantamf99191d2018-06-11 18:13:43 +05304134 xprt_info->hello_sent = 0;
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -06004135 xprt_info->remote_node_id = -1;
4136 INIT_LIST_HEAD(&xprt_info->pkt_list);
4137 mutex_init(&xprt_info->rx_lock_lhb2);
4138 mutex_init(&xprt_info->tx_lock_lhb2);
4139 wakeup_source_init(&xprt_info->ws, xprt->name);
4140 xprt_info->need_len = 0;
4141 xprt_info->abort_data_read = 0;
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -06004142 INIT_LIST_HEAD(&xprt_info->list);
4143 kref_init(&xprt_info->ref);
4144 init_completion(&xprt_info->ref_complete);
Arun Kumar Neelakantam74ff8562017-05-26 17:57:52 +05304145 xprt_info->dynamic_ws = 0;
4146 if (xprt->get_ws_info)
4147 xprt_info->dynamic_ws = xprt->get_ws_info(xprt);
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -06004148
Arun Kumar Neelakantamd5eb2732018-06-19 14:40:01 +05304149 kthread_init_work(&xprt_info->read_data, do_read_data);
4150 kthread_init_worker(&xprt_info->kworker);
4151 xprt_info->task = kthread_run(kthread_worker_fn,
4152 &xprt_info->kworker,
4153 "%s", xprt->name);
4154 if (IS_ERR(xprt_info->task)) {
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -06004155 kfree(xprt_info);
4156 return -ENOMEM;
4157 }
Arun Kumar Neelakantamd5eb2732018-06-19 14:40:01 +05304158 if (xprt->get_latency_info && xprt->get_latency_info(xprt))
4159 sched_setscheduler(xprt_info->task, SCHED_FIFO, &param);
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -06004160
4161 xprt_info->log_ctx = ipc_router_get_log_ctx(xprt->name);
4162
4163 if (!strcmp(xprt->name, "msm_ipc_router_loopback_xprt")) {
4164 xprt_info->remote_node_id = IPC_ROUTER_NID_LOCAL;
4165 xprt_info->initialized = 1;
4166 }
4167
4168 IPC_RTR_INFO(xprt_info->log_ctx, "Adding xprt: [%s]\n", xprt->name);
4169 down_write(&xprt_info_list_lock_lha5);
4170 list_add_tail(&xprt_info->list, &xprt_info_list);
4171 up_write(&xprt_info_list_lock_lha5);
4172
4173 down_write(&routing_table_lock_lha3);
4174 if (!routing_table_inited) {
4175 init_routing_table();
4176 routing_table_inited = 1;
4177 }
4178 up_write(&routing_table_lock_lha3);
4179
4180 xprt->priv = xprt_info;
Arun Kumar Neelakantamf99191d2018-06-11 18:13:43 +05304181 send_hello_msg(xprt_info);
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -06004182
4183 return 0;
4184}
4185
4186static void msm_ipc_router_remove_xprt(struct msm_ipc_router_xprt *xprt)
4187{
4188 struct msm_ipc_router_xprt_info *xprt_info;
4189 struct rr_packet *temp_pkt, *pkt;
4190
4191 if (xprt && xprt->priv) {
4192 xprt_info = xprt->priv;
4193
4194 IPC_RTR_INFO(xprt_info->log_ctx, "Removing xprt: [%s]\n",
4195 xprt->name);
4196 mutex_lock(&xprt_info->rx_lock_lhb2);
4197 xprt_info->abort_data_read = 1;
4198 mutex_unlock(&xprt_info->rx_lock_lhb2);
Arun Kumar Neelakantamd5eb2732018-06-19 14:40:01 +05304199 kthread_flush_worker(&xprt_info->kworker);
4200 kthread_stop(xprt_info->task);
4201 xprt_info->task = NULL;
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -06004202 mutex_lock(&xprt_info->rx_lock_lhb2);
4203 list_for_each_entry_safe(pkt, temp_pkt,
4204 &xprt_info->pkt_list, list) {
4205 list_del(&pkt->list);
4206 release_pkt(pkt);
4207 }
4208 mutex_unlock(&xprt_info->rx_lock_lhb2);
4209
4210 down_write(&xprt_info_list_lock_lha5);
4211 list_del(&xprt_info->list);
4212 up_write(&xprt_info_list_lock_lha5);
4213
4214 msm_ipc_cleanup_routing_table(xprt_info);
4215
4216 wakeup_source_trash(&xprt_info->ws);
4217
4218 ipc_router_put_xprt_info_ref(xprt_info);
4219 wait_for_completion(&xprt_info->ref_complete);
4220
4221 xprt->priv = 0;
4222 kfree(xprt_info);
4223 }
4224}
4225
4226struct msm_ipc_router_xprt_work {
4227 struct msm_ipc_router_xprt *xprt;
4228 struct work_struct work;
4229};
4230
4231static void xprt_open_worker(struct work_struct *work)
4232{
4233 struct msm_ipc_router_xprt_work *xprt_work =
4234 container_of(work, struct msm_ipc_router_xprt_work, work);
4235
4236 msm_ipc_router_add_xprt(xprt_work->xprt);
4237 kfree(xprt_work);
4238}
4239
4240static void xprt_close_worker(struct work_struct *work)
4241{
4242 struct msm_ipc_router_xprt_work *xprt_work =
4243 container_of(work, struct msm_ipc_router_xprt_work, work);
4244
4245 msm_ipc_router_remove_xprt(xprt_work->xprt);
4246 xprt_work->xprt->sft_close_done(xprt_work->xprt);
4247 kfree(xprt_work);
4248}
4249
4250void msm_ipc_router_xprt_notify(struct msm_ipc_router_xprt *xprt,
4251 unsigned int event,
4252 void *data)
4253{
4254 struct msm_ipc_router_xprt_info *xprt_info = xprt->priv;
4255 struct msm_ipc_router_xprt_work *xprt_work;
Arun Kumar Neelakantam029e8462018-04-19 18:10:47 +05304256 struct msm_ipc_router_remote_port *rport_ptr = NULL;
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -06004257 struct rr_packet *pkt;
4258 int ret;
4259
4260 ret = ipc_router_core_init();
4261 if (ret < 0) {
4262 IPC_RTR_ERR("%s: Error %d initializing IPC Router\n",
4263 __func__, ret);
4264 return;
4265 }
4266
4267 switch (event) {
4268 case IPC_ROUTER_XPRT_EVENT_OPEN:
4269 xprt_work = kmalloc(sizeof(*xprt_work), GFP_ATOMIC);
4270 if (xprt_work) {
4271 xprt_work->xprt = xprt;
4272 INIT_WORK(&xprt_work->work, xprt_open_worker);
4273 queue_work(msm_ipc_router_workqueue, &xprt_work->work);
4274 } else {
4275 IPC_RTR_ERR(
4276 "%s: malloc failure - Couldn't notify OPEN event",
4277 __func__);
4278 }
4279 break;
4280
4281 case IPC_ROUTER_XPRT_EVENT_CLOSE:
4282 xprt_work = kmalloc(sizeof(*xprt_work), GFP_ATOMIC);
4283 if (xprt_work) {
4284 xprt_work->xprt = xprt;
4285 INIT_WORK(&xprt_work->work, xprt_close_worker);
4286 queue_work(msm_ipc_router_workqueue, &xprt_work->work);
4287 } else {
4288 IPC_RTR_ERR(
4289 "%s: malloc failure - Couldn't notify CLOSE event",
4290 __func__);
4291 }
4292 break;
4293 }
4294
4295 if (!data)
4296 return;
4297
4298 while (!xprt_info) {
4299 msleep(100);
4300 xprt_info = xprt->priv;
4301 }
4302
4303 pkt = clone_pkt((struct rr_packet *)data);
4304 if (!pkt)
4305 return;
4306
Arun Kumar Neelakantam029e8462018-04-19 18:10:47 +05304307 if (pkt->length < calc_rx_header_size(xprt_info) ||
4308 pkt->length > MAX_IPC_PKT_SIZE) {
4309 IPC_RTR_ERR("%s: Invalid pkt length %d\n",
4310 __func__, pkt->length);
4311 release_pkt(pkt);
4312 return;
4313 }
4314
4315 ret = extract_header(pkt);
4316 if (ret < 0) {
4317 release_pkt(pkt);
4318 return;
4319 }
4320
Arun Kumar Neelakantam74ff8562017-05-26 17:57:52 +05304321 pkt->ws_need = false;
Arun Kumar Neelakantam029e8462018-04-19 18:10:47 +05304322
4323 if (pkt->hdr.type == IPC_ROUTER_CTRL_CMD_DATA)
4324 rport_ptr = ipc_router_get_rport_ref(pkt->hdr.src_node_id,
4325 pkt->hdr.src_port_id);
4326
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -06004327 mutex_lock(&xprt_info->rx_lock_lhb2);
4328 list_add_tail(&pkt->list, &xprt_info->pkt_list);
Arun Kumar Neelakantam029e8462018-04-19 18:10:47 +05304329 /* check every pkt is from SENSOR services or not and
4330 * avoid holding both edge and port specific wake-up sources
4331 */
4332 if (!is_sensor_port(rport_ptr)) {
4333 if (!xprt_info->dynamic_ws) {
Arun Kumar Neelakantam74ff8562017-05-26 17:57:52 +05304334 __pm_stay_awake(&xprt_info->ws);
4335 pkt->ws_need = true;
Arun Kumar Neelakantam029e8462018-04-19 18:10:47 +05304336 } else {
4337 if (is_wakeup_source_allowed) {
4338 __pm_stay_awake(&xprt_info->ws);
4339 pkt->ws_need = true;
4340 }
Arun Kumar Neelakantam74ff8562017-05-26 17:57:52 +05304341 }
4342 }
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -06004343 mutex_unlock(&xprt_info->rx_lock_lhb2);
Deepak Kumar Singh77d41ac2018-09-19 17:15:17 +05304344 if (rport_ptr)
4345 kref_put(&rport_ptr->ref, ipc_router_release_rport);
Arun Kumar Neelakantamd5eb2732018-06-19 14:40:01 +05304346 kthread_queue_work(&xprt_info->kworker, &xprt_info->read_data);
Karthikeyan Ramasubramanian6a116d62016-09-16 16:05:32 -06004347}
4348
4349/**
4350 * parse_devicetree() - parse device tree binding
4351 *
4352 * @node: pointer to device tree node
4353 *
4354 * @return: 0 on success, -ENODEV on failure.
4355 */
4356static int parse_devicetree(struct device_node *node)
4357{
4358 char *key;
4359 const char *peripheral = NULL;
4360
4361 key = "qcom,default-peripheral";
4362 peripheral = of_get_property(node, key, NULL);
4363 if (peripheral)
4364 strlcpy(default_peripheral, peripheral, PIL_SUBSYSTEM_NAME_LEN);
4365
4366 return 0;
4367}
4368
4369/**
4370 * ipc_router_probe() - Probe the IPC Router
4371 *
4372 * @pdev: Platform device corresponding to IPC Router.
4373 *
4374 * @return: 0 on success, standard Linux error codes on error.
4375 *
4376 * This function is called when the underlying device tree driver registers
4377 * a platform device, mapped to IPC Router.
4378 */
4379static int ipc_router_probe(struct platform_device *pdev)
4380{
4381 int ret = 0;
4382
4383 if (pdev && pdev->dev.of_node) {
4384 ret = parse_devicetree(pdev->dev.of_node);
4385 if (ret)
4386 IPC_RTR_ERR("%s: Failed to parse device tree\n",
4387 __func__);
4388 }
4389 return ret;
4390}
4391
4392static const struct of_device_id ipc_router_match_table[] = {
4393 { .compatible = "qcom,ipc_router" },
4394 {},
4395};
4396
4397static struct platform_driver ipc_router_driver = {
4398 .probe = ipc_router_probe,
4399 .driver = {
4400 .name = MODULE_NAME,
4401 .owner = THIS_MODULE,
4402 .of_match_table = ipc_router_match_table,
4403 },
4404};
4405
4406/**
4407 * ipc_router_core_init() - Initialize all IPC Router core data structures
4408 *
4409 * Return: 0 on Success or Standard error code otherwise.
4410 *
4411 * This function only initializes all the core data structures to the IPC Router
4412 * module. The remaining initialization is done inside msm_ipc_router_init().
4413 */
4414static int ipc_router_core_init(void)
4415{
4416 int i;
4417 int ret;
4418 struct msm_ipc_routing_table_entry *rt_entry;
4419
4420 mutex_lock(&ipc_router_init_lock);
4421 if (likely(is_ipc_router_inited)) {
4422 mutex_unlock(&ipc_router_init_lock);
4423 return 0;
4424 }
4425
4426 debugfs_init();
4427
4428 for (i = 0; i < SRV_HASH_SIZE; i++)
4429 INIT_LIST_HEAD(&server_list[i]);
4430
4431 for (i = 0; i < LP_HASH_SIZE; i++)
4432 INIT_LIST_HEAD(&local_ports[i]);
4433
4434 down_write(&routing_table_lock_lha3);
4435 if (!routing_table_inited) {
4436 init_routing_table();
4437 routing_table_inited = 1;
4438 }
4439 up_write(&routing_table_lock_lha3);
4440 rt_entry = create_routing_table_entry(IPC_ROUTER_NID_LOCAL, NULL);
4441 kref_put(&rt_entry->ref, ipc_router_release_rtentry);
4442
4443 msm_ipc_router_workqueue =
4444 create_singlethread_workqueue("msm_ipc_router");
4445 if (!msm_ipc_router_workqueue) {
4446 mutex_unlock(&ipc_router_init_lock);
4447 return -ENOMEM;
4448 }
4449
4450 ret = msm_ipc_router_security_init();
4451 if (ret < 0)
4452 IPC_RTR_ERR("%s: Security Init failed\n", __func__);
4453 else
4454 is_ipc_router_inited = true;
4455 mutex_unlock(&ipc_router_init_lock);
4456
4457 return ret;
4458}
4459
4460static int msm_ipc_router_init(void)
4461{
4462 int ret;
4463
4464 ret = ipc_router_core_init();
4465 if (ret < 0)
4466 return ret;
4467
4468 ret = platform_driver_register(&ipc_router_driver);
4469 if (ret)
4470 IPC_RTR_ERR(
4471 "%s: ipc_router_driver register failed %d\n", __func__, ret);
4472
4473 ret = msm_ipc_router_init_sockets();
4474 if (ret < 0)
4475 IPC_RTR_ERR("%s: Init sockets failed\n", __func__);
4476
4477 ipc_router_log_ctx_init();
4478 return ret;
4479}
4480
4481module_init(msm_ipc_router_init);
4482MODULE_DESCRIPTION("MSM IPC Router");
4483MODULE_LICENSE("GPL v2");