blob: 4e9b11810ff25b8a94301ecde978badb67f0376e [file] [log] [blame]
Chris Lew2555cd72017-04-10 17:11:24 -07001/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
Chris Lewfa6135e2016-08-01 13:29:46 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12#include <linux/err.h>
13#include <linux/ipc_logging.h>
14#include <linux/list.h>
15#include <linux/module.h>
16#include <linux/slab.h>
17#include <linux/random.h>
18#include <linux/uio.h>
19#include <soc/qcom/glink.h>
20#include <soc/qcom/tracer_pkt.h>
21#include "glink_loopback_commands.h"
22
23
24/* Number of internal IPC Logging log pages */
25#define GLINK_LBSRV_NUM_LOG_PAGES 3
26
27static void *glink_lbsrv_log_ctx;
28
29#define GLINK_LBSRV_IPC_LOG_STR(x...) do { \
30 if (glink_lbsrv_log_ctx) \
31 ipc_log_string(glink_lbsrv_log_ctx, x); \
32} while (0)
33
34#define LBSRV_INFO(x...) GLINK_LBSRV_IPC_LOG_STR("<LBSRV> " x)
35
36#define LBSRV_ERR(x...) do { \
37 pr_err("<LBSRV> " x); \
38 GLINK_LBSRV_IPC_LOG_STR("<LBSRV> " x); \
39} while (0)
40
41enum ch_type {
42 CTL,
43 DATA,
44};
45
46enum buf_type {
47 LINEAR,
48 VECTOR,
49};
50
51struct tx_config_info {
52 uint32_t random_delay;
53 uint32_t delay_ms;
54 uint32_t echo_count;
55 uint32_t transform_type;
56};
57
58struct rx_done_config_info {
59 uint32_t random_delay;
60 uint32_t delay_ms;
61};
62
63struct rmt_rx_intent_req_work_info {
64 size_t req_intent_size;
65 struct delayed_work work;
66 struct ch_info *work_ch_info;
67};
68
69struct queue_rx_intent_work_info {
70 uint32_t req_id;
71 bool deferred;
72 struct ch_info *req_ch_info;
73 uint32_t num_intents;
74 uint32_t intent_size;
75 uint32_t random_delay;
76 uint32_t delay_ms;
77 struct delayed_work work;
78 struct ch_info *work_ch_info;
79};
80
81struct lbsrv_vec {
82 uint32_t num_bufs;
83 struct kvec vec[0];
84};
85
86struct tx_work_info {
87 struct tx_config_info tx_config;
88 struct delayed_work work;
89 struct ch_info *tx_ch_info;
90 void *data;
91 bool tracer_pkt;
92 uint32_t buf_type;
93 size_t size;
94 void * (*vbuf_provider)(void *iovec, size_t offset, size_t *size);
95 void * (*pbuf_provider)(void *iovec, size_t offset, size_t *size);
96};
97
98struct rx_done_work_info {
99 struct delayed_work work;
100 struct ch_info *rx_done_ch_info;
101 void *ptr;
102};
103
104struct rx_work_info {
105 struct ch_info *rx_ch_info;
106 void *pkt_priv;
107 void *ptr;
108 bool tracer_pkt;
109 uint32_t buf_type;
110 size_t size;
111 void * (*vbuf_provider)(void *iovec, size_t offset, size_t *size);
112 void * (*pbuf_provider)(void *iovec, size_t offset, size_t *size);
113 struct delayed_work work;
114};
115
116struct ch_info {
117 struct list_head list;
118 struct mutex ch_info_lock;
119 char name[MAX_NAME_LEN];
120 char edge[GLINK_NAME_SIZE];
121 char transport[GLINK_NAME_SIZE];
122 void *handle;
123 bool fully_opened;
124 uint32_t type;
125 struct delayed_work open_work;
126 struct delayed_work close_work;
127 struct tx_config_info tx_config;
128 struct rx_done_config_info rx_done_config;
129 struct queue_rx_intent_work_info *queue_rx_intent_work_info;
130};
131
132struct ctl_ch_info {
133 char name[MAX_NAME_LEN];
134 char edge[GLINK_NAME_SIZE];
135 char transport[GLINK_NAME_SIZE];
136};
137
138static struct ctl_ch_info ctl_ch_tbl[] = {
139 {"LOCAL_LOOPBACK_SRV", "local", "lloop"},
140 {"LOOPBACK_CTL_APSS", "mpss", "smem"},
141 {"LOOPBACK_CTL_APSS", "lpass", "smem"},
142 {"LOOPBACK_CTL_APSS", "dsps", "smem"},
Chris Lewb99ce082017-04-25 16:42:00 -0700143 {"LOOPBACK_CTL_APPS", "cdsp", "smem"},
Chris Lewfa6135e2016-08-01 13:29:46 -0700144 {"LOOPBACK_CTL_APSS", "spss", "mailbox"},
Chris Lew2555cd72017-04-10 17:11:24 -0700145 {"LOOPBACK_CTL_APSS", "wdsp", "spi"},
Chris Lewfa6135e2016-08-01 13:29:46 -0700146};
147
148static DEFINE_MUTEX(ctl_ch_list_lock);
149static LIST_HEAD(ctl_ch_list);
150static DEFINE_MUTEX(data_ch_list_lock);
151static LIST_HEAD(data_ch_list);
152
153struct workqueue_struct *glink_lbsrv_wq;
154
155/**
156 * link_state_work_info - Information about work handling link state updates
157 * edge: Remote subsystem name in the link.
158 * transport: Name of the transport/link.
159 * link_state: State of the transport/link.
160 * work: Reference to the work item.
161 */
162struct link_state_work_info {
163 char edge[GLINK_NAME_SIZE];
164 char transport[GLINK_NAME_SIZE];
165 enum glink_link_state link_state;
166 struct delayed_work work;
167};
168
169static void glink_lbsrv_link_state_cb(struct glink_link_state_cb_info *cb_info,
170 void *priv);
171static struct glink_link_info glink_lbsrv_link_info = {
172 NULL, NULL, glink_lbsrv_link_state_cb};
173static void *glink_lbsrv_link_state_notif_handle;
174
175static void glink_lbsrv_open_worker(struct work_struct *work);
176static void glink_lbsrv_close_worker(struct work_struct *work);
177static void glink_lbsrv_rmt_rx_intent_req_worker(struct work_struct *work);
178static void glink_lbsrv_queue_rx_intent_worker(struct work_struct *work);
179static void glink_lbsrv_rx_worker(struct work_struct *work);
180static void glink_lbsrv_rx_done_worker(struct work_struct *work);
181static void glink_lbsrv_tx_worker(struct work_struct *work);
182
183int glink_lbsrv_send_response(void *handle, uint32_t req_id, uint32_t req_type,
184 uint32_t response)
185{
186 struct resp *resp_pkt = kzalloc(sizeof(struct resp), GFP_KERNEL);
187
188 if (!resp_pkt) {
189 LBSRV_ERR("%s: Error allocating response packet\n", __func__);
190 return -ENOMEM;
191 }
192
193 resp_pkt->req_id = req_id;
194 resp_pkt->req_type = req_type;
195 resp_pkt->response = response;
196
197 return glink_tx(handle, (void *)LINEAR, (void *)resp_pkt,
198 sizeof(struct resp), 0);
199}
200
201static uint32_t calc_delay_ms(uint32_t random_delay, uint32_t delay_ms)
202{
203 uint32_t tmp_delay_ms;
204
205 if (random_delay && delay_ms)
206 tmp_delay_ms = prandom_u32() % delay_ms;
207 else if (random_delay)
208 tmp_delay_ms = prandom_u32();
209 else
210 tmp_delay_ms = delay_ms;
211
212 return tmp_delay_ms;
213}
214
215static int create_ch_info(char *name, char *edge, char *transport,
216 uint32_t type, struct ch_info **ret_ch_info)
217{
218 struct ch_info *tmp_ch_info;
219
220 tmp_ch_info = kzalloc(sizeof(struct ch_info), GFP_KERNEL);
221 if (!tmp_ch_info) {
222 LBSRV_ERR("%s: Error allocation ch_info\n", __func__);
223 return -ENOMEM;
224 }
225
226 INIT_LIST_HEAD(&tmp_ch_info->list);
227 mutex_init(&tmp_ch_info->ch_info_lock);
228 strlcpy(tmp_ch_info->name, name, MAX_NAME_LEN);
229 strlcpy(tmp_ch_info->edge, edge, GLINK_NAME_SIZE);
230 strlcpy(tmp_ch_info->transport, transport, GLINK_NAME_SIZE);
231 tmp_ch_info->type = type;
232 INIT_DELAYED_WORK(&tmp_ch_info->open_work,
233 glink_lbsrv_open_worker);
234 INIT_DELAYED_WORK(&tmp_ch_info->close_work,
235 glink_lbsrv_close_worker);
236 tmp_ch_info->tx_config.echo_count = 1;
237
238 if (type == CTL) {
239 mutex_lock(&ctl_ch_list_lock);
240 list_add_tail(&tmp_ch_info->list, &ctl_ch_list);
241 mutex_unlock(&ctl_ch_list_lock);
242 } else if (type == DATA) {
243 mutex_lock(&data_ch_list_lock);
244 list_add_tail(&tmp_ch_info->list, &data_ch_list);
245 mutex_unlock(&data_ch_list_lock);
246 } else {
247 LBSRV_ERR("%s:%s:%s %s: Invalid ch type %d\n", transport,
248 edge, name, __func__, type);
249 kfree(tmp_ch_info);
250 return -EINVAL;
251 }
252 *ret_ch_info = tmp_ch_info;
253 return 0;
254}
255
256struct ch_info *lookup_ch_list(char *name, char *edge, char *transport,
257 uint32_t type)
258{
259 struct list_head *ch_list;
260 struct mutex *lock;
261 struct ch_info *tmp_ch_info;
262
263 if (type == DATA) {
264 ch_list = &data_ch_list;
265 lock = &data_ch_list_lock;
266 } else if (type == CTL) {
267 ch_list = &ctl_ch_list;
268 lock = &ctl_ch_list_lock;
269 } else {
270 LBSRV_ERR("%s:%s:%s %s: Invalid ch type %d\n", transport,
271 edge, name, __func__, type);
272 return NULL;
273 }
274
275 mutex_lock(lock);
276 list_for_each_entry(tmp_ch_info, ch_list, list) {
277 if (!strcmp(name, tmp_ch_info->name) &&
278 !strcmp(edge, tmp_ch_info->edge) &&
279 !strcmp(transport, tmp_ch_info->transport)) {
280 mutex_unlock(lock);
281 return tmp_ch_info;
282 }
283 }
284 mutex_unlock(lock);
285 return NULL;
286}
287
288int glink_lbsrv_handle_open_req(struct ch_info *rx_ch_info,
289 struct open_req req)
290{
291 struct ch_info *tmp_ch_info;
292 int ret;
293 char name[MAX_NAME_LEN];
294 char *temp;
295
296 strlcpy(name, req.ch_name, MAX_NAME_LEN);
297 if (!strcmp(rx_ch_info->transport, "lloop")) {
298 temp = strnstr(name, "_CLNT", MAX_NAME_LEN);
299 if (temp)
300 *temp = '\0';
301 strlcat(name, "_SRV", MAX_NAME_LEN);
302 }
303 LBSRV_INFO("%s:%s:%s %s: delay_ms[%d]\n",
304 rx_ch_info->transport, rx_ch_info->edge,
305 name, __func__, req.delay_ms);
306 tmp_ch_info = lookup_ch_list(name, rx_ch_info->edge,
307 rx_ch_info->transport, DATA);
308 if (tmp_ch_info)
309 goto queue_open_work;
310
311 ret = create_ch_info(name, rx_ch_info->edge, rx_ch_info->transport,
312 DATA, &tmp_ch_info);
313 if (ret)
314 return ret;
315queue_open_work:
316 queue_delayed_work(glink_lbsrv_wq, &tmp_ch_info->open_work,
317 msecs_to_jiffies(req.delay_ms));
318 return 0;
319}
320
321int glink_lbsrv_handle_close_req(struct ch_info *rx_ch_info,
322 struct close_req req)
323{
324 struct ch_info *tmp_ch_info;
325 char name[MAX_NAME_LEN];
326 char *temp;
327
328 strlcpy(name, req.ch_name, MAX_NAME_LEN);
329 if (!strcmp(rx_ch_info->transport, "lloop")) {
330 temp = strnstr(name, "_CLNT", MAX_NAME_LEN);
331 if (temp)
332 *temp = '\0';
333 strlcat(name, "_SRV", MAX_NAME_LEN);
334 }
335 LBSRV_INFO("%s:%s:%s %s: delay_ms[%d]\n",
336 rx_ch_info->transport, rx_ch_info->edge,
337 name, __func__, req.delay_ms);
338 tmp_ch_info = lookup_ch_list(name, rx_ch_info->edge,
339 rx_ch_info->transport, DATA);
340 if (tmp_ch_info)
341 queue_delayed_work(glink_lbsrv_wq, &tmp_ch_info->close_work,
342 msecs_to_jiffies(req.delay_ms));
343 return 0;
344}
345
346int glink_lbsrv_handle_queue_rx_intent_config_req(struct ch_info *rx_ch_info,
347 struct queue_rx_intent_config_req req, uint32_t req_id)
348{
349 struct ch_info *tmp_ch_info;
350 struct queue_rx_intent_work_info *tmp_work_info;
351 char name[MAX_NAME_LEN];
352 char *temp;
353 uint32_t delay_ms;
354
355 strlcpy(name, req.ch_name, MAX_NAME_LEN);
356 if (!strcmp(rx_ch_info->transport, "lloop")) {
357 temp = strnstr(name, "_CLNT", MAX_NAME_LEN);
358 if (temp)
359 *temp = '\0';
360 strlcat(name, "_SRV", MAX_NAME_LEN);
361 }
362 LBSRV_INFO("%s:%s:%s %s: num_intents[%d] size[%d]\n",
363 rx_ch_info->transport, rx_ch_info->edge, name, __func__,
364 req.num_intents, req.intent_size);
365 tmp_ch_info = lookup_ch_list(name, rx_ch_info->edge,
366 rx_ch_info->transport, DATA);
367 if (!tmp_ch_info) {
368 LBSRV_ERR("%s:%s:%s %s: Channel info not found\n",
369 rx_ch_info->transport, rx_ch_info->edge,
370 name, __func__);
371 return -EINVAL;
372 }
373
374 tmp_work_info = kzalloc(sizeof(struct queue_rx_intent_work_info),
375 GFP_KERNEL);
376 if (!tmp_work_info) {
377 LBSRV_ERR("%s: Error allocating work_info\n", __func__);
378 return -ENOMEM;
379 }
380
381 tmp_work_info->req_id = req_id;
382 tmp_work_info->req_ch_info = rx_ch_info;
383 tmp_work_info->num_intents = req.num_intents;
384 tmp_work_info->intent_size = req.intent_size;
385 tmp_work_info->random_delay = req.random_delay;
386 tmp_work_info->delay_ms = req.delay_ms;
387 INIT_DELAYED_WORK(&tmp_work_info->work,
388 glink_lbsrv_queue_rx_intent_worker);
389 tmp_work_info->work_ch_info = tmp_ch_info;
390
391 mutex_lock(&tmp_ch_info->ch_info_lock);
392 if (tmp_ch_info->fully_opened) {
393 mutex_unlock(&tmp_ch_info->ch_info_lock);
394 delay_ms = calc_delay_ms(tmp_work_info->random_delay,
395 tmp_work_info->delay_ms);
396 queue_delayed_work(glink_lbsrv_wq, &tmp_work_info->work,
397 msecs_to_jiffies(delay_ms));
398
399 if (tmp_work_info->random_delay || tmp_work_info->delay_ms)
400 glink_lbsrv_send_response(rx_ch_info->handle, req_id,
401 QUEUE_RX_INTENT_CONFIG, 0);
402 } else {
403 tmp_work_info->deferred = true;
404 tmp_ch_info->queue_rx_intent_work_info = tmp_work_info;
405 mutex_unlock(&tmp_ch_info->ch_info_lock);
406
407 glink_lbsrv_send_response(rx_ch_info->handle, req_id,
408 QUEUE_RX_INTENT_CONFIG, 0);
409 }
410
411 return 0;
412}
413
414int glink_lbsrv_handle_tx_config_req(struct ch_info *rx_ch_info,
415 struct tx_config_req req)
416{
417 struct ch_info *tmp_ch_info;
418 char name[MAX_NAME_LEN];
419 char *temp;
420
421 strlcpy(name, req.ch_name, MAX_NAME_LEN);
422 if (!strcmp(rx_ch_info->transport, "lloop")) {
423 temp = strnstr(name, "_CLNT", MAX_NAME_LEN);
424 if (temp)
425 *temp = '\0';
426 strlcat(name, "_SRV", MAX_NAME_LEN);
427 }
428 LBSRV_INFO("%s:%s:%s %s: echo_count[%d] transform[%d]\n",
429 rx_ch_info->transport, rx_ch_info->edge, name, __func__,
430 req.echo_count, req.transform_type);
431 tmp_ch_info = lookup_ch_list(name, rx_ch_info->edge,
432 rx_ch_info->transport, DATA);
433 if (!tmp_ch_info) {
434 LBSRV_ERR("%s:%s:%s %s: Channel info not found\n",
435 rx_ch_info->transport, rx_ch_info->edge,
436 name, __func__);
437 return -EINVAL;
438 }
439
440 mutex_lock(&tmp_ch_info->ch_info_lock);
441 tmp_ch_info->tx_config.random_delay = req.random_delay;
442 tmp_ch_info->tx_config.delay_ms = req.delay_ms;
443 tmp_ch_info->tx_config.echo_count = req.echo_count;
444 tmp_ch_info->tx_config.transform_type = req.transform_type;
445 mutex_unlock(&tmp_ch_info->ch_info_lock);
446 return 0;
447}
448
449int glink_lbsrv_handle_rx_done_config_req(struct ch_info *rx_ch_info,
450 struct rx_done_config_req req)
451{
452 struct ch_info *tmp_ch_info;
453 char name[MAX_NAME_LEN];
454 char *temp;
455
456 strlcpy(name, req.ch_name, MAX_NAME_LEN);
457 if (!strcmp(rx_ch_info->transport, "lloop")) {
458 temp = strnstr(name, "_CLNT", MAX_NAME_LEN);
459 if (temp)
460 *temp = '\0';
461 strlcat(name, "_SRV", MAX_NAME_LEN);
462 }
463 LBSRV_INFO("%s:%s:%s %s: delay_ms[%d] random_delay[%d]\n",
464 rx_ch_info->transport, rx_ch_info->edge, name,
465 __func__, req.delay_ms, req.random_delay);
466 tmp_ch_info = lookup_ch_list(name, rx_ch_info->edge,
467 rx_ch_info->transport, DATA);
468 if (!tmp_ch_info) {
469 LBSRV_ERR("%s:%s:%s %s: Channel info not found\n",
470 rx_ch_info->transport, rx_ch_info->edge,
471 name, __func__);
472 return -EINVAL;
473 }
474
475 mutex_lock(&tmp_ch_info->ch_info_lock);
476 tmp_ch_info->rx_done_config.random_delay = req.random_delay;
477 tmp_ch_info->rx_done_config.delay_ms = req.delay_ms;
478 mutex_unlock(&tmp_ch_info->ch_info_lock);
479 return 0;
480}
481
482/**
483 * glink_lbsrv_handle_req() - Handle the request commands received by clients
484 *
485 * rx_ch_info: Channel info on which the request is received
486 * pkt: Request structure received from client
487 *
488 * This function handles the all supported request types received from client
489 * and send the response back to client
490 */
491void glink_lbsrv_handle_req(struct ch_info *rx_ch_info, struct req pkt)
492{
493 int ret;
494
495 LBSRV_INFO("%s:%s:%s %s: Request packet type[%d]:id[%d]\n",
496 rx_ch_info->transport, rx_ch_info->edge,
497 rx_ch_info->name, __func__, pkt.hdr.req_type,
498 pkt.hdr.req_id);
499 switch (pkt.hdr.req_type) {
500 case OPEN:
501 ret = glink_lbsrv_handle_open_req(rx_ch_info,
502 pkt.payload.open);
503 break;
504 case CLOSE:
505 ret = glink_lbsrv_handle_close_req(rx_ch_info,
506 pkt.payload.close);
507 break;
508 case QUEUE_RX_INTENT_CONFIG:
509 ret = glink_lbsrv_handle_queue_rx_intent_config_req(
510 rx_ch_info, pkt.payload.q_rx_int_conf, pkt.hdr.req_id);
511 break;
512 case TX_CONFIG:
513 ret = glink_lbsrv_handle_tx_config_req(rx_ch_info,
514 pkt.payload.tx_conf);
515 break;
516 case RX_DONE_CONFIG:
517 ret = glink_lbsrv_handle_rx_done_config_req(rx_ch_info,
518 pkt.payload.rx_done_conf);
519 break;
520 default:
521 LBSRV_ERR("%s:%s:%s %s: Invalid Request type [%d]\n",
522 rx_ch_info->transport, rx_ch_info->edge,
523 rx_ch_info->name, __func__, pkt.hdr.req_type);
524 ret = -1;
525 break;
526 }
527
528 if (pkt.hdr.req_type != QUEUE_RX_INTENT_CONFIG)
529 glink_lbsrv_send_response(rx_ch_info->handle, pkt.hdr.req_id,
530 pkt.hdr.req_type, ret);
531}
532
533static void *glink_lbsrv_vbuf_provider(void *iovec, size_t offset,
534 size_t *buf_size)
535{
536 struct lbsrv_vec *tmp_vec_info = (struct lbsrv_vec *)iovec;
537 uint32_t i;
538 size_t temp_size = 0;
539
540 for (i = 0; i < tmp_vec_info->num_bufs; i++) {
541 temp_size += tmp_vec_info->vec[i].iov_len;
542 if (offset >= temp_size)
543 continue;
544 *buf_size = temp_size - offset;
545 return (void *)tmp_vec_info->vec[i].iov_base +
546 tmp_vec_info->vec[i].iov_len - *buf_size;
547 }
548 *buf_size = 0;
549 return NULL;
550}
551
552static void glink_lbsrv_free_data(void *data, uint32_t buf_type)
553{
554 struct lbsrv_vec *tmp_vec_info;
555 uint32_t i;
556
557 if (buf_type == LINEAR) {
558 kfree(data);
559 } else {
560 tmp_vec_info = (struct lbsrv_vec *)data;
561 for (i = 0; i < tmp_vec_info->num_bufs; i++) {
562 kfree(tmp_vec_info->vec[i].iov_base);
563 tmp_vec_info->vec[i].iov_base = NULL;
564 }
565 kfree(tmp_vec_info);
566 }
567}
568
569static void *copy_linear_data(struct rx_work_info *tmp_rx_work_info)
570{
571 char *data;
572 struct ch_info *rx_ch_info = tmp_rx_work_info->rx_ch_info;
573
574 data = kmalloc(tmp_rx_work_info->size, GFP_KERNEL);
575 if (data)
576 memcpy(data, tmp_rx_work_info->ptr, tmp_rx_work_info->size);
577 else
578 LBSRV_ERR("%s:%s:%s %s: Error allocating the data\n",
579 rx_ch_info->transport, rx_ch_info->edge,
580 rx_ch_info->name, __func__);
581 return data;
582}
583
584static void *copy_vector_data(struct rx_work_info *tmp_rx_work_info)
585{
586 uint32_t num_bufs = 0;
587 struct ch_info *rx_ch_info = tmp_rx_work_info->rx_ch_info;
588 struct lbsrv_vec *tmp_vec_info;
589 void *buf, *pbuf, *dest_buf;
590 size_t offset = 0;
591 size_t buf_size;
592 uint32_t i;
593
594 do {
595 if (tmp_rx_work_info->vbuf_provider)
596 buf = tmp_rx_work_info->vbuf_provider(
597 tmp_rx_work_info->ptr, offset, &buf_size);
598 else
599 buf = tmp_rx_work_info->pbuf_provider(
600 tmp_rx_work_info->ptr, offset, &buf_size);
601 if (!buf)
602 break;
603 offset += buf_size;
604 num_bufs++;
605 } while (buf);
606
607 tmp_vec_info = kzalloc(sizeof(*tmp_vec_info) +
608 num_bufs * sizeof(struct kvec), GFP_KERNEL);
609 if (!tmp_vec_info) {
610 LBSRV_ERR("%s:%s:%s %s: Error allocating vector info\n",
611 rx_ch_info->transport, rx_ch_info->edge,
612 rx_ch_info->name, __func__);
613 return NULL;
614 }
615 tmp_vec_info->num_bufs = num_bufs;
616
617 offset = 0;
618 for (i = 0; i < num_bufs; i++) {
619 if (tmp_rx_work_info->vbuf_provider) {
620 buf = tmp_rx_work_info->vbuf_provider(
621 tmp_rx_work_info->ptr, offset, &buf_size);
622 } else {
623 pbuf = tmp_rx_work_info->pbuf_provider(
624 tmp_rx_work_info->ptr, offset, &buf_size);
625 buf = phys_to_virt((unsigned long)pbuf);
626 }
627 dest_buf = kmalloc(buf_size, GFP_KERNEL);
628 if (!dest_buf) {
629 LBSRV_ERR("%s:%s:%s %s: Error allocating data\n",
630 rx_ch_info->transport, rx_ch_info->edge,
631 rx_ch_info->name, __func__);
632 goto out_copy_vector_data;
633 }
634 memcpy(dest_buf, buf, buf_size);
635 tmp_vec_info->vec[i].iov_base = dest_buf;
636 tmp_vec_info->vec[i].iov_len = buf_size;
637 offset += buf_size;
638 }
639 return tmp_vec_info;
640out_copy_vector_data:
641 glink_lbsrv_free_data((void *)tmp_vec_info, VECTOR);
642 return NULL;
643}
644
645static void *glink_lbsrv_copy_data(struct rx_work_info *tmp_rx_work_info)
646{
647 if (tmp_rx_work_info->buf_type == LINEAR)
648 return copy_linear_data(tmp_rx_work_info);
649 else
650 return copy_vector_data(tmp_rx_work_info);
651}
652
653static int glink_lbsrv_handle_data(struct rx_work_info *tmp_rx_work_info)
654{
655 void *data;
656 int ret;
657 struct ch_info *rx_ch_info = tmp_rx_work_info->rx_ch_info;
658 struct tx_work_info *tmp_tx_work_info;
659 struct rx_done_work_info *tmp_rx_done_work_info;
660 uint32_t delay_ms;
661
662 data = glink_lbsrv_copy_data(tmp_rx_work_info);
663 if (!data) {
664 ret = -ENOMEM;
665 goto out_handle_data;
666 }
667
668 tmp_rx_done_work_info = kmalloc(sizeof(struct rx_done_work_info),
669 GFP_KERNEL);
670 if (!tmp_rx_done_work_info) {
671 LBSRV_ERR("%s:%s:%s %s: Error allocating rx_done_work_info\n",
672 rx_ch_info->transport, rx_ch_info->edge,
673 rx_ch_info->name, __func__);
674 glink_lbsrv_free_data(data, tmp_rx_work_info->buf_type);
675 ret = -ENOMEM;
676 goto out_handle_data;
677 }
678 INIT_DELAYED_WORK(&tmp_rx_done_work_info->work,
679 glink_lbsrv_rx_done_worker);
680 tmp_rx_done_work_info->rx_done_ch_info = rx_ch_info;
681 tmp_rx_done_work_info->ptr = tmp_rx_work_info->ptr;
682 delay_ms = calc_delay_ms(rx_ch_info->rx_done_config.random_delay,
683 rx_ch_info->rx_done_config.delay_ms);
684 queue_delayed_work(glink_lbsrv_wq, &tmp_rx_done_work_info->work,
685 msecs_to_jiffies(delay_ms));
686
687 tmp_tx_work_info = kmalloc(sizeof(struct tx_work_info), GFP_KERNEL);
688 if (!tmp_tx_work_info) {
689 LBSRV_ERR("%s:%s:%s %s: Error allocating tx_work_info\n",
690 rx_ch_info->transport, rx_ch_info->edge,
691 rx_ch_info->name, __func__);
692 glink_lbsrv_free_data(data, tmp_rx_work_info->buf_type);
693 return -ENOMEM;
694 }
695 mutex_lock(&rx_ch_info->ch_info_lock);
696 tmp_tx_work_info->tx_config.random_delay =
697 rx_ch_info->tx_config.random_delay;
698 tmp_tx_work_info->tx_config.delay_ms = rx_ch_info->tx_config.delay_ms;
699 tmp_tx_work_info->tx_config.echo_count =
700 rx_ch_info->tx_config.echo_count;
701 tmp_tx_work_info->tx_config.transform_type =
702 rx_ch_info->tx_config.transform_type;
703 mutex_unlock(&rx_ch_info->ch_info_lock);
704 INIT_DELAYED_WORK(&tmp_tx_work_info->work, glink_lbsrv_tx_worker);
705 tmp_tx_work_info->tx_ch_info = rx_ch_info;
706 tmp_tx_work_info->data = data;
707 tmp_tx_work_info->tracer_pkt = tmp_rx_work_info->tracer_pkt;
708 tmp_tx_work_info->buf_type = tmp_rx_work_info->buf_type;
709 tmp_tx_work_info->size = tmp_rx_work_info->size;
710 if (tmp_tx_work_info->buf_type == VECTOR)
711 tmp_tx_work_info->vbuf_provider = glink_lbsrv_vbuf_provider;
712 else
713 tmp_tx_work_info->vbuf_provider = NULL;
714 tmp_tx_work_info->pbuf_provider = NULL;
715 delay_ms = calc_delay_ms(tmp_tx_work_info->tx_config.random_delay,
716 tmp_tx_work_info->tx_config.delay_ms);
717 queue_delayed_work(glink_lbsrv_wq, &tmp_tx_work_info->work,
718 msecs_to_jiffies(delay_ms));
719 return 0;
720out_handle_data:
721 glink_rx_done(rx_ch_info->handle, tmp_rx_work_info->ptr, false);
722 return ret;
723}
724
725void glink_lpbsrv_notify_rx(void *handle, const void *priv,
726 const void *pkt_priv, const void *ptr, size_t size)
727{
728 struct rx_work_info *tmp_work_info;
729 struct ch_info *rx_ch_info = (struct ch_info *)priv;
730
731 LBSRV_INFO(
732 "%s:%s:%s %s: end (Success) RX priv[%p] data[%p] size[%zu]\n",
733 rx_ch_info->transport, rx_ch_info->edge, rx_ch_info->name,
734 __func__, pkt_priv, (char *)ptr, size);
735 tmp_work_info = kzalloc(sizeof(struct rx_work_info), GFP_ATOMIC);
736 if (!tmp_work_info) {
737 LBSRV_ERR("%s:%s:%s %s: Error allocating rx_work\n",
738 rx_ch_info->transport, rx_ch_info->edge,
739 rx_ch_info->name, __func__);
740 return;
741 }
742
743 tmp_work_info->rx_ch_info = rx_ch_info;
744 tmp_work_info->pkt_priv = (void *)pkt_priv;
745 tmp_work_info->ptr = (void *)ptr;
746 tmp_work_info->buf_type = LINEAR;
747 tmp_work_info->size = size;
748 INIT_DELAYED_WORK(&tmp_work_info->work, glink_lbsrv_rx_worker);
749 queue_delayed_work(glink_lbsrv_wq, &tmp_work_info->work, 0);
750}
751
752void glink_lpbsrv_notify_rxv(void *handle, const void *priv,
753 const void *pkt_priv, void *ptr, size_t size,
754 void * (*vbuf_provider)(void *iovec, size_t offset, size_t *size),
755 void * (*pbuf_provider)(void *iovec, size_t offset, size_t *size))
756{
757 struct rx_work_info *tmp_work_info;
758 struct ch_info *rx_ch_info = (struct ch_info *)priv;
759
760 LBSRV_INFO("%s:%s:%s %s: priv[%p] data[%p] size[%zu]\n",
761 rx_ch_info->transport, rx_ch_info->edge, rx_ch_info->name,
762 __func__, pkt_priv, (char *)ptr, size);
763 tmp_work_info = kzalloc(sizeof(struct rx_work_info), GFP_ATOMIC);
764 if (!tmp_work_info) {
765 LBSRV_ERR("%s:%s:%s %s: Error allocating rx_work\n",
766 rx_ch_info->transport, rx_ch_info->edge,
767 rx_ch_info->name, __func__);
768 return;
769 }
770
771 tmp_work_info->rx_ch_info = rx_ch_info;
772 tmp_work_info->pkt_priv = (void *)pkt_priv;
773 tmp_work_info->ptr = (void *)ptr;
774 tmp_work_info->buf_type = VECTOR;
775 tmp_work_info->size = size;
776 tmp_work_info->vbuf_provider = vbuf_provider;
777 tmp_work_info->pbuf_provider = pbuf_provider;
778 INIT_DELAYED_WORK(&tmp_work_info->work, glink_lbsrv_rx_worker);
779 queue_delayed_work(glink_lbsrv_wq, &tmp_work_info->work, 0);
780}
781
782void glink_lpbsrv_notify_rx_tp(void *handle, const void *priv,
783 const void *pkt_priv, const void *ptr, size_t size)
784{
785 struct rx_work_info *tmp_work_info;
786 struct ch_info *rx_ch_info = (struct ch_info *)priv;
787
788 LBSRV_INFO(
789 "%s:%s:%s %s: end (Success) RX priv[%p] data[%p] size[%zu]\n",
790 rx_ch_info->transport, rx_ch_info->edge, rx_ch_info->name,
791 __func__, pkt_priv, (char *)ptr, size);
792 tracer_pkt_log_event((void *)ptr, LOOPBACK_SRV_RX);
793 tmp_work_info = kmalloc(sizeof(struct rx_work_info), GFP_ATOMIC);
794 if (!tmp_work_info) {
795 LBSRV_ERR("%s:%s:%s %s: Error allocating rx_work\n",
796 rx_ch_info->transport, rx_ch_info->edge,
797 rx_ch_info->name, __func__);
798 return;
799 }
800
801 tmp_work_info->rx_ch_info = rx_ch_info;
802 tmp_work_info->pkt_priv = (void *)pkt_priv;
803 tmp_work_info->ptr = (void *)ptr;
804 tmp_work_info->tracer_pkt = true;
805 tmp_work_info->buf_type = LINEAR;
806 tmp_work_info->size = size;
807 INIT_DELAYED_WORK(&tmp_work_info->work, glink_lbsrv_rx_worker);
808 queue_delayed_work(glink_lbsrv_wq, &tmp_work_info->work, 0);
809}
810
811void glink_lpbsrv_notify_tx_done(void *handle, const void *priv,
812 const void *pkt_priv, const void *ptr)
813{
814 struct ch_info *tx_done_ch_info = (struct ch_info *)priv;
815
816 LBSRV_INFO("%s:%s:%s %s: end (Success) TX_DONE ptr[%p]\n",
817 tx_done_ch_info->transport, tx_done_ch_info->edge,
818 tx_done_ch_info->name, __func__, ptr);
819
820 if (pkt_priv != (const void *)0xFFFFFFFF)
821 glink_lbsrv_free_data((void *)ptr,
822 (uint32_t)(uintptr_t)pkt_priv);
823}
824
825void glink_lpbsrv_notify_state(void *handle, const void *priv,
826 unsigned int event)
827{
828 int ret;
829 uint32_t delay_ms;
830 struct ch_info *tmp_ch_info = (struct ch_info *)priv;
831 struct queue_rx_intent_work_info *tmp_work_info = NULL;
832
833 LBSRV_INFO("%s:%s:%s %s: event[%d]\n",
834 tmp_ch_info->transport, tmp_ch_info->edge,
835 tmp_ch_info->name, __func__, event);
836 if (tmp_ch_info->type == CTL) {
837 if (event == GLINK_CONNECTED) {
838 ret = glink_queue_rx_intent(handle,
839 priv, sizeof(struct req));
840 LBSRV_INFO(
841 "%s:%s:%s %s: QUEUE RX INTENT size[%zu] ret[%d]\n",
842 tmp_ch_info->transport,
843 tmp_ch_info->edge,
844 tmp_ch_info->name,
845 __func__, sizeof(struct req), ret);
846 } else if (event == GLINK_LOCAL_DISCONNECTED) {
847 queue_delayed_work(glink_lbsrv_wq,
848 &tmp_ch_info->open_work,
849 msecs_to_jiffies(0));
850 } else if (event == GLINK_REMOTE_DISCONNECTED)
851 if (!IS_ERR_OR_NULL(tmp_ch_info->handle))
852 queue_delayed_work(glink_lbsrv_wq,
853 &tmp_ch_info->close_work, 0);
854 } else if (tmp_ch_info->type == DATA) {
855
856 if (event == GLINK_CONNECTED) {
857 mutex_lock(&tmp_ch_info->ch_info_lock);
858 tmp_ch_info->fully_opened = true;
859 tmp_work_info = tmp_ch_info->queue_rx_intent_work_info;
860 tmp_ch_info->queue_rx_intent_work_info = NULL;
861 mutex_unlock(&tmp_ch_info->ch_info_lock);
862
863 if (tmp_work_info) {
864 delay_ms = calc_delay_ms(
865 tmp_work_info->random_delay,
866 tmp_work_info->delay_ms);
867 queue_delayed_work(glink_lbsrv_wq,
868 &tmp_work_info->work,
869 msecs_to_jiffies(delay_ms));
870 }
871 } else if (event == GLINK_LOCAL_DISCONNECTED ||
872 event == GLINK_REMOTE_DISCONNECTED) {
873
874 mutex_lock(&tmp_ch_info->ch_info_lock);
875 tmp_ch_info->fully_opened = false;
876 /*
877 * If the state has changed to LOCAL_DISCONNECTED,
878 * the channel has been fully closed and can now be
879 * re-opened. If the handle value is -EBUSY, an earlier
880 * open request failed because the channel was in the
881 * process of closing. Requeue the work from the open
882 * request.
883 */
884 if (event == GLINK_LOCAL_DISCONNECTED &&
885 tmp_ch_info->handle == ERR_PTR(-EBUSY)) {
886 queue_delayed_work(glink_lbsrv_wq,
887 &tmp_ch_info->open_work,
888 msecs_to_jiffies(0));
889 }
890 if (event == GLINK_REMOTE_DISCONNECTED)
891 if (!IS_ERR_OR_NULL(tmp_ch_info->handle))
892 queue_delayed_work(
893 glink_lbsrv_wq,
894 &tmp_ch_info->close_work, 0);
895 mutex_unlock(&tmp_ch_info->ch_info_lock);
896 }
897 }
898}
899
900bool glink_lpbsrv_rmt_rx_intent_req_cb(void *handle, const void *priv,
901 size_t sz)
902{
903 struct rmt_rx_intent_req_work_info *tmp_work_info;
904 struct ch_info *tmp_ch_info = (struct ch_info *)priv;
905
906 LBSRV_INFO("%s:%s:%s %s: QUEUE RX INTENT to receive size[%zu]\n",
907 tmp_ch_info->transport, tmp_ch_info->edge, tmp_ch_info->name,
908 __func__, sz);
909
910 tmp_work_info = kmalloc(sizeof(struct rmt_rx_intent_req_work_info),
911 GFP_ATOMIC);
912 if (!tmp_work_info) {
913 LBSRV_ERR("%s:%s:%s %s: Error allocating rx_work\n",
914 tmp_ch_info->transport, tmp_ch_info->edge,
915 tmp_ch_info->name, __func__);
916 return false;
917 }
918 tmp_work_info->req_intent_size = sz;
919 tmp_work_info->work_ch_info = tmp_ch_info;
920
921 INIT_DELAYED_WORK(&tmp_work_info->work,
922 glink_lbsrv_rmt_rx_intent_req_worker);
923 queue_delayed_work(glink_lbsrv_wq, &tmp_work_info->work, 0);
924 return true;
925}
926
927void glink_lpbsrv_notify_rx_sigs(void *handle, const void *priv,
928 uint32_t old_sigs, uint32_t new_sigs)
929{
930 LBSRV_INFO(" %s old_sigs[0x%x] New_sigs[0x%x]\n",
931 __func__, old_sigs, new_sigs);
932 glink_sigs_set(handle, new_sigs);
933}
934
935static void glink_lbsrv_rx_worker(struct work_struct *work)
936{
937 struct delayed_work *rx_work = to_delayed_work(work);
938 struct rx_work_info *tmp_rx_work_info =
939 container_of(rx_work, struct rx_work_info, work);
940 struct ch_info *rx_ch_info = tmp_rx_work_info->rx_ch_info;
941 struct req request_pkt;
942 int ret;
943
944 if (rx_ch_info->type == CTL) {
945 request_pkt = *((struct req *)tmp_rx_work_info->ptr);
946 glink_rx_done(rx_ch_info->handle, tmp_rx_work_info->ptr, false);
947 ret = glink_queue_rx_intent(rx_ch_info->handle, rx_ch_info,
948 sizeof(struct req));
949 LBSRV_INFO("%s:%s:%s %s: QUEUE RX INTENT size[%zu] ret[%d]\n",
950 rx_ch_info->transport, rx_ch_info->edge,
951 rx_ch_info->name, __func__,
952 sizeof(struct req), ret);
953 glink_lbsrv_handle_req(rx_ch_info, request_pkt);
954 } else {
955 ret = glink_lbsrv_handle_data(tmp_rx_work_info);
956 }
957 kfree(tmp_rx_work_info);
958}
959
960static void glink_lbsrv_open_worker(struct work_struct *work)
961{
962 struct delayed_work *open_work = to_delayed_work(work);
963 struct ch_info *tmp_ch_info =
964 container_of(open_work, struct ch_info, open_work);
965 struct glink_open_config open_cfg;
966
967 LBSRV_INFO("%s: glink_loopback_server_init\n", __func__);
968 mutex_lock(&tmp_ch_info->ch_info_lock);
969 if (!IS_ERR_OR_NULL(tmp_ch_info->handle)) {
970 mutex_unlock(&tmp_ch_info->ch_info_lock);
971 return;
972 }
973
974 memset(&open_cfg, 0, sizeof(struct glink_open_config));
975 open_cfg.transport = tmp_ch_info->transport;
976 open_cfg.edge = tmp_ch_info->edge;
977 open_cfg.name = tmp_ch_info->name;
978
979 open_cfg.notify_rx = glink_lpbsrv_notify_rx;
980 if (tmp_ch_info->type == DATA)
981 open_cfg.notify_rxv = glink_lpbsrv_notify_rxv;
982 open_cfg.notify_tx_done = glink_lpbsrv_notify_tx_done;
983 open_cfg.notify_state = glink_lpbsrv_notify_state;
984 open_cfg.notify_rx_intent_req = glink_lpbsrv_rmt_rx_intent_req_cb;
985 open_cfg.notify_rx_sigs = glink_lpbsrv_notify_rx_sigs;
986 open_cfg.notify_rx_abort = NULL;
987 open_cfg.notify_tx_abort = NULL;
988 open_cfg.notify_rx_tracer_pkt = glink_lpbsrv_notify_rx_tp;
989 open_cfg.priv = tmp_ch_info;
990
991 tmp_ch_info->handle = glink_open(&open_cfg);
992 if (IS_ERR_OR_NULL(tmp_ch_info->handle)) {
993 LBSRV_ERR("%s:%s:%s %s: unable to open channel\n",
994 open_cfg.transport, open_cfg.edge, open_cfg.name,
995 __func__);
996 mutex_unlock(&tmp_ch_info->ch_info_lock);
997 return;
998 }
999 mutex_unlock(&tmp_ch_info->ch_info_lock);
1000 LBSRV_INFO("%s:%s:%s %s: Open complete\n", open_cfg.transport,
1001 open_cfg.edge, open_cfg.name, __func__);
1002}
1003
1004static void glink_lbsrv_close_worker(struct work_struct *work)
1005{
1006 struct delayed_work *close_work = to_delayed_work(work);
1007 struct ch_info *tmp_ch_info =
1008 container_of(close_work, struct ch_info, close_work);
1009
1010 mutex_lock(&tmp_ch_info->ch_info_lock);
1011 if (!IS_ERR_OR_NULL(tmp_ch_info->handle)) {
1012 glink_close(tmp_ch_info->handle);
1013 tmp_ch_info->handle = NULL;
1014 }
1015 mutex_unlock(&tmp_ch_info->ch_info_lock);
1016 LBSRV_INFO("%s:%s:%s %s: Close complete\n", tmp_ch_info->transport,
1017 tmp_ch_info->edge, tmp_ch_info->name, __func__);
1018}
1019
1020static void glink_lbsrv_rmt_rx_intent_req_worker(struct work_struct *work)
1021{
1022
1023 struct delayed_work *rmt_rx_intent_req_work = to_delayed_work(work);
1024 struct rmt_rx_intent_req_work_info *tmp_work_info =
1025 container_of(rmt_rx_intent_req_work,
1026 struct rmt_rx_intent_req_work_info, work);
1027 struct ch_info *tmp_ch_info = tmp_work_info->work_ch_info;
1028 int ret;
1029
1030 mutex_lock(&tmp_ch_info->ch_info_lock);
1031 if (IS_ERR_OR_NULL(tmp_ch_info->handle)) {
1032 mutex_unlock(&tmp_ch_info->ch_info_lock);
1033 LBSRV_ERR("%s:%s:%s %s: Invalid CH handle\n",
1034 tmp_ch_info->transport,
1035 tmp_ch_info->edge,
1036 tmp_ch_info->name, __func__);
1037 kfree(tmp_work_info);
1038 return;
1039 }
1040 ret = glink_queue_rx_intent(tmp_ch_info->handle,
1041 (void *)tmp_ch_info, tmp_work_info->req_intent_size);
1042 mutex_unlock(&tmp_ch_info->ch_info_lock);
1043 LBSRV_INFO("%s:%s:%s %s: QUEUE RX INTENT size[%zu] ret[%d]\n",
1044 tmp_ch_info->transport, tmp_ch_info->edge,
1045 tmp_ch_info->name, __func__, tmp_work_info->req_intent_size,
1046 ret);
1047 if (ret < 0) {
1048 LBSRV_ERR("%s:%s:%s %s: Err %d q'ing intent size %zu\n",
1049 tmp_ch_info->transport, tmp_ch_info->edge,
1050 tmp_ch_info->name, __func__, ret,
1051 tmp_work_info->req_intent_size);
1052 }
1053 kfree(tmp_work_info);
1054}
1055
1056static void glink_lbsrv_queue_rx_intent_worker(struct work_struct *work)
1057{
1058 struct delayed_work *queue_rx_intent_work = to_delayed_work(work);
1059 struct queue_rx_intent_work_info *tmp_work_info =
1060 container_of(queue_rx_intent_work,
1061 struct queue_rx_intent_work_info, work);
1062 struct ch_info *tmp_ch_info = tmp_work_info->work_ch_info;
1063 int ret;
1064 uint32_t delay_ms;
1065
1066 while (1) {
1067 mutex_lock(&tmp_ch_info->ch_info_lock);
1068 if (IS_ERR_OR_NULL(tmp_ch_info->handle)) {
1069 mutex_unlock(&tmp_ch_info->ch_info_lock);
1070 return;
1071 }
1072
1073 ret = glink_queue_rx_intent(tmp_ch_info->handle,
1074 (void *)tmp_ch_info, tmp_work_info->intent_size);
1075 mutex_unlock(&tmp_ch_info->ch_info_lock);
1076 if (ret < 0) {
1077 LBSRV_ERR("%s:%s:%s %s: Err %d q'ing intent size %d\n",
1078 tmp_ch_info->transport, tmp_ch_info->edge,
1079 tmp_ch_info->name, __func__, ret,
1080 tmp_work_info->intent_size);
1081 kfree(tmp_work_info);
1082 return;
1083 }
1084 LBSRV_INFO("%s:%s:%s %s: Queued rx intent of size %d\n",
1085 tmp_ch_info->transport, tmp_ch_info->edge,
1086 tmp_ch_info->name, __func__,
1087 tmp_work_info->intent_size);
1088 tmp_work_info->num_intents--;
1089 if (!tmp_work_info->num_intents)
1090 break;
1091
1092 delay_ms = calc_delay_ms(tmp_work_info->random_delay,
1093 tmp_work_info->delay_ms);
1094 if (delay_ms) {
1095 queue_delayed_work(glink_lbsrv_wq, &tmp_work_info->work,
1096 msecs_to_jiffies(delay_ms));
1097 return;
1098 }
1099 }
1100 LBSRV_INFO("%s:%s:%s %s: Queued all intents. size:%d\n",
1101 tmp_ch_info->transport, tmp_ch_info->edge, tmp_ch_info->name,
1102 __func__, tmp_work_info->intent_size);
1103
1104 if (!tmp_work_info->deferred && !tmp_work_info->random_delay &&
1105 !tmp_work_info->delay_ms)
1106 glink_lbsrv_send_response(tmp_work_info->req_ch_info->handle,
1107 tmp_work_info->req_id, QUEUE_RX_INTENT_CONFIG,
1108 0);
1109 kfree(tmp_work_info);
1110}
1111
1112static void glink_lbsrv_rx_done_worker(struct work_struct *work)
1113{
1114 struct delayed_work *rx_done_work = to_delayed_work(work);
1115 struct rx_done_work_info *tmp_work_info =
1116 container_of(rx_done_work, struct rx_done_work_info, work);
1117 struct ch_info *tmp_ch_info = tmp_work_info->rx_done_ch_info;
1118
1119 mutex_lock(&tmp_ch_info->ch_info_lock);
1120 if (!IS_ERR_OR_NULL(tmp_ch_info->handle))
1121 glink_rx_done(tmp_ch_info->handle, tmp_work_info->ptr, false);
1122 mutex_unlock(&tmp_ch_info->ch_info_lock);
1123 kfree(tmp_work_info);
1124}
1125
1126static void glink_lbsrv_tx_worker(struct work_struct *work)
1127{
1128 struct delayed_work *tx_work = to_delayed_work(work);
1129 struct tx_work_info *tmp_work_info =
1130 container_of(tx_work, struct tx_work_info, work);
1131 struct ch_info *tmp_ch_info = tmp_work_info->tx_ch_info;
1132 int ret;
1133 uint32_t delay_ms;
1134 uint32_t flags;
1135
1136 LBSRV_INFO("%s:%s:%s %s: start TX data[%p] size[%zu]\n",
1137 tmp_ch_info->transport, tmp_ch_info->edge, tmp_ch_info->name,
1138 __func__, tmp_work_info->data, tmp_work_info->size);
1139 while (1) {
1140 mutex_lock(&tmp_ch_info->ch_info_lock);
1141 if (IS_ERR_OR_NULL(tmp_ch_info->handle)) {
1142 mutex_unlock(&tmp_ch_info->ch_info_lock);
1143 return;
1144 }
1145
1146 flags = 0;
1147 if (tmp_work_info->tracer_pkt) {
1148 flags |= GLINK_TX_TRACER_PKT;
1149 tracer_pkt_log_event(tmp_work_info->data,
1150 LOOPBACK_SRV_TX);
1151 }
1152 if (tmp_work_info->buf_type == LINEAR)
1153 ret = glink_tx(tmp_ch_info->handle,
1154 (tmp_work_info->tx_config.echo_count > 1 ?
1155 (void *)0xFFFFFFFF :
1156 (void *)(uintptr_t)
1157 tmp_work_info->buf_type),
1158 (void *)tmp_work_info->data,
1159 tmp_work_info->size, flags);
1160 else
1161 ret = glink_txv(tmp_ch_info->handle,
1162 (tmp_work_info->tx_config.echo_count > 1 ?
1163 (void *)0xFFFFFFFF :
1164 (void *)(uintptr_t)
1165 tmp_work_info->buf_type),
1166 (void *)tmp_work_info->data,
1167 tmp_work_info->size,
1168 tmp_work_info->vbuf_provider,
1169 tmp_work_info->pbuf_provider,
1170 flags);
1171 mutex_unlock(&tmp_ch_info->ch_info_lock);
1172 if (ret < 0 && ret != -EAGAIN) {
1173 LBSRV_ERR("%s:%s:%s %s: TX Error %d\n",
1174 tmp_ch_info->transport,
1175 tmp_ch_info->edge,
1176 tmp_ch_info->name, __func__, ret);
1177 glink_lbsrv_free_data(tmp_work_info->data,
1178 tmp_work_info->buf_type);
1179 kfree(tmp_work_info);
1180 return;
1181 }
1182 if (ret != -EAGAIN)
1183 tmp_work_info->tx_config.echo_count--;
1184 if (!tmp_work_info->tx_config.echo_count)
1185 break;
1186
1187 delay_ms = calc_delay_ms(tmp_work_info->tx_config.random_delay,
1188 tmp_work_info->tx_config.delay_ms);
1189 if (delay_ms) {
1190 queue_delayed_work(glink_lbsrv_wq, &tmp_work_info->work,
1191 msecs_to_jiffies(delay_ms));
1192 return;
1193 }
1194 }
1195 kfree(tmp_work_info);
1196}
1197
1198/**
1199 * glink_lbsrv_link_state_worker() - Function to handle link state updates
1200 * work: Pointer to the work item in the link_state_work_info.
1201 *
1202 * This worker function is scheduled when there is a link state update. Since
1203 * the loopback server registers for all transports, it receives all link state
1204 * updates about all transports that get registered in the system.
1205 */
1206static void glink_lbsrv_link_state_worker(struct work_struct *work)
1207{
1208 struct delayed_work *ls_work = to_delayed_work(work);
1209 struct link_state_work_info *ls_info =
1210 container_of(ls_work, struct link_state_work_info, work);
1211 struct ch_info *tmp_ch_info;
1212
1213 if (ls_info->link_state == GLINK_LINK_STATE_UP) {
1214 LBSRV_INFO("%s: LINK_STATE_UP %s:%s\n",
1215 __func__, ls_info->edge, ls_info->transport);
1216 mutex_lock(&ctl_ch_list_lock);
1217 list_for_each_entry(tmp_ch_info, &ctl_ch_list, list) {
1218 if (strcmp(tmp_ch_info->edge, ls_info->edge) ||
1219 strcmp(tmp_ch_info->transport, ls_info->transport))
1220 continue;
1221 queue_delayed_work(glink_lbsrv_wq,
1222 &tmp_ch_info->open_work, 0);
1223 }
1224 mutex_unlock(&ctl_ch_list_lock);
1225 } else if (ls_info->link_state == GLINK_LINK_STATE_DOWN) {
1226 LBSRV_INFO("%s: LINK_STATE_DOWN %s:%s\n",
1227 __func__, ls_info->edge, ls_info->transport);
1228
1229 }
1230 kfree(ls_info);
1231}
1232
1233/**
1234 * glink_lbsrv_link_state_cb() - Callback to receive link state updates
1235 * cb_info: Information containing link & its state.
1236 * priv: Private data passed during the link state registration.
1237 *
1238 * This function is called by the GLINK core to notify the loopback server
1239 * regarding the link state updates. This function is registered with the
1240 * GLINK core by the loopback server during glink_register_link_state_cb().
1241 */
1242static void glink_lbsrv_link_state_cb(struct glink_link_state_cb_info *cb_info,
1243 void *priv)
1244{
1245 struct link_state_work_info *ls_info;
1246
1247 if (!cb_info)
1248 return;
1249
1250 LBSRV_INFO("%s: %s:%s\n", __func__, cb_info->edge, cb_info->transport);
1251 ls_info = kmalloc(sizeof(*ls_info), GFP_KERNEL);
1252 if (!ls_info) {
1253 LBSRV_ERR("%s: Error allocating link state info\n", __func__);
1254 return;
1255 }
1256
1257 strlcpy(ls_info->edge, cb_info->edge, GLINK_NAME_SIZE);
1258 strlcpy(ls_info->transport, cb_info->transport, GLINK_NAME_SIZE);
1259 ls_info->link_state = cb_info->link_state;
1260 INIT_DELAYED_WORK(&ls_info->work, glink_lbsrv_link_state_worker);
1261 queue_delayed_work(glink_lbsrv_wq, &ls_info->work, 0);
1262}
1263
1264static int glink_loopback_server_init(void)
1265{
1266 int i;
1267 int ret;
1268 struct ch_info *tmp_ch_info;
1269
1270 glink_lbsrv_log_ctx = ipc_log_context_create(GLINK_LBSRV_NUM_LOG_PAGES,
1271 "glink_lbsrv", 0);
1272 if (!glink_lbsrv_log_ctx)
1273 pr_err("%s: unable to create log context\n", __func__);
1274
1275 glink_lbsrv_wq = create_singlethread_workqueue("glink_lbsrv");
1276 if (!glink_lbsrv_wq) {
1277 LBSRV_ERR("%s: Error creating glink_lbsrv_wq\n", __func__);
1278 return -EFAULT;
1279 }
1280
1281 for (i = 0; i < ARRAY_SIZE(ctl_ch_tbl); i++) {
1282 ret = create_ch_info(ctl_ch_tbl[i].name, ctl_ch_tbl[i].edge,
1283 ctl_ch_tbl[i].transport, CTL,
1284 &tmp_ch_info);
1285 if (ret < 0) {
1286 LBSRV_ERR("%s: Error creating ctl ch index %d\n",
1287 __func__, i);
1288 continue;
1289 }
1290 }
1291 glink_lbsrv_link_state_notif_handle = glink_register_link_state_cb(
1292 &glink_lbsrv_link_info, NULL);
1293 return 0;
1294}
1295
1296module_init(glink_loopback_server_init);
1297
1298MODULE_DESCRIPTION("MSM Generic Link (G-Link) Loopback Server");
1299MODULE_LICENSE("GPL v2");