blob: 4f7c1e0c4cb5f2cc26294bd0d0d83a824e065cda [file] [log] [blame]
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12#include <linux/slab.h>
13#include <linux/err.h>
14#include <linux/sched.h>
15#include <linux/ratelimit.h>
16#include <linux/workqueue.h>
17#include <linux/diagchar.h>
18#include <linux/of.h>
19#include <linux/kmemleak.h>
20#include <linux/delay.h>
21#include <linux/atomic.h>
22#include "diagchar.h"
23#include "diagchar_hdlc.h"
24#include "diagfwd_peripheral.h"
25#include "diagfwd_cntl.h"
26#include "diag_masks.h"
27#include "diag_dci.h"
28#include "diagfwd.h"
29#include "diagfwd_socket.h"
30#include "diag_mux.h"
31#include "diag_ipc_logging.h"
32#include "diagfwd_glink.h"
33
34struct data_header {
35 uint8_t control_char;
36 uint8_t version;
37 uint16_t length;
38};
39
40static struct diagfwd_info *early_init_info[NUM_TRANSPORT];
41
42static void diagfwd_queue_read(struct diagfwd_info *fwd_info);
43static void diagfwd_buffers_exit(struct diagfwd_info *fwd_info);
44static void diagfwd_cntl_open(struct diagfwd_info *fwd_info);
45static void diagfwd_cntl_close(struct diagfwd_info *fwd_info);
46static void diagfwd_dci_open(struct diagfwd_info *fwd_info);
47static void diagfwd_dci_close(struct diagfwd_info *fwd_info);
48static void diagfwd_data_read_done(struct diagfwd_info *fwd_info,
49 unsigned char *buf, int len);
50static void diagfwd_cntl_read_done(struct diagfwd_info *fwd_info,
51 unsigned char *buf, int len);
52static void diagfwd_dci_read_done(struct diagfwd_info *fwd_info,
53 unsigned char *buf, int len);
54static void diagfwd_write_buffers_init(struct diagfwd_info *fwd_info);
55static void diagfwd_write_buffers_exit(struct diagfwd_info *fwd_info);
56struct diagfwd_info peripheral_info[NUM_TYPES][NUM_PERIPHERALS];
57
58static struct diag_channel_ops data_ch_ops = {
59 .open = NULL,
60 .close = NULL,
61 .read_done = diagfwd_data_read_done
62};
63
64static struct diag_channel_ops cntl_ch_ops = {
65 .open = diagfwd_cntl_open,
66 .close = diagfwd_cntl_close,
67 .read_done = diagfwd_cntl_read_done
68};
69
70static struct diag_channel_ops dci_ch_ops = {
71 .open = diagfwd_dci_open,
72 .close = diagfwd_dci_close,
73 .read_done = diagfwd_dci_read_done
74};
75
76static void diagfwd_cntl_open(struct diagfwd_info *fwd_info)
77{
78 if (!fwd_info)
79 return;
80 diag_cntl_channel_open(fwd_info);
81}
82
83static void diagfwd_cntl_close(struct diagfwd_info *fwd_info)
84{
85 if (!fwd_info)
86 return;
87 diag_cntl_channel_close(fwd_info);
88}
89
90static void diagfwd_dci_open(struct diagfwd_info *fwd_info)
91{
92 if (!fwd_info)
93 return;
94
95 diag_dci_notify_client(PERIPHERAL_MASK(fwd_info->peripheral),
96 DIAG_STATUS_OPEN, DCI_LOCAL_PROC);
97}
98
99static void diagfwd_dci_close(struct diagfwd_info *fwd_info)
100{
101 if (!fwd_info)
102 return;
103
104 diag_dci_notify_client(PERIPHERAL_MASK(fwd_info->peripheral),
105 DIAG_STATUS_CLOSED, DCI_LOCAL_PROC);
106}
107
108static int diag_add_hdlc_encoding(unsigned char *dest_buf, int *dest_len,
109 unsigned char *buf, int len)
110{
111 struct diag_send_desc_type send = { NULL, NULL, DIAG_STATE_START, 0 };
112 struct diag_hdlc_dest_type enc = { NULL, NULL, 0 };
113 struct data_header *header;
114 int header_size = sizeof(struct data_header);
115 uint8_t *end_control_char = NULL;
116 uint8_t *payload = NULL;
117 uint8_t *temp_buf = NULL;
118 uint8_t *temp_encode_buf = NULL;
119 int src_pkt_len;
120 int encoded_pkt_length;
121 int max_size;
122 int total_processed = 0;
123 int bytes_remaining;
124 int err = 0;
125 uint8_t loop_count = 0;
126
127 if (!dest_buf || !dest_len || !buf)
128 return -EIO;
129
130 temp_buf = buf;
131 temp_encode_buf = dest_buf;
132 bytes_remaining = *dest_len;
133
134 while (total_processed < len) {
135 loop_count++;
136 header = (struct data_header *)temp_buf;
137 /* Perform initial error checking */
138 if (header->control_char != CONTROL_CHAR ||
139 header->version != 1) {
140 err = -EINVAL;
141 break;
142 }
143
144 if (header->length >= bytes_remaining)
145 break;
146
147 payload = temp_buf + header_size;
148 end_control_char = payload + header->length;
149 if (*end_control_char != CONTROL_CHAR) {
150 err = -EINVAL;
151 break;
152 }
153
154 max_size = 2 * header->length + 3;
155 if (bytes_remaining < max_size) {
156 err = -EINVAL;
157 break;
158 }
159
160 /* Prepare for encoding the data */
161 send.state = DIAG_STATE_START;
162 send.pkt = payload;
163 send.last = (void *)(payload + header->length - 1);
164 send.terminate = 1;
165
166 enc.dest = temp_encode_buf;
167 enc.dest_last = (void *)(temp_encode_buf + max_size);
168 enc.crc = 0;
169 diag_hdlc_encode(&send, &enc);
170
171 /* Prepare for next packet */
172 src_pkt_len = (header_size + header->length + 1);
173 total_processed += src_pkt_len;
174 temp_buf += src_pkt_len;
175
176 encoded_pkt_length = (uint8_t *)enc.dest - temp_encode_buf;
177 bytes_remaining -= encoded_pkt_length;
178 temp_encode_buf = enc.dest;
179 }
180
181 *dest_len = (int)(temp_encode_buf - dest_buf);
182
183 return err;
184}
185
186static int check_bufsize_for_encoding(struct diagfwd_buf_t *buf, uint32_t len)
187{
188 uint32_t max_size = 0;
189 unsigned char *temp_buf = NULL;
190
191 if (!buf || len == 0)
192 return -EINVAL;
193
194 max_size = (2 * len) + 3;
195 if (max_size > PERIPHERAL_BUF_SZ) {
196 if (max_size > MAX_PERIPHERAL_HDLC_BUF_SZ) {
197 pr_err("diag: In %s, max_size is going beyond limit %d\n",
198 __func__, max_size);
199 max_size = MAX_PERIPHERAL_HDLC_BUF_SZ;
200 }
201
202 if (buf->len < max_size) {
203 temp_buf = krealloc(buf->data, max_size +
204 APF_DIAG_PADDING,
205 GFP_KERNEL);
206 if (!temp_buf)
207 return -ENOMEM;
208 buf->data = temp_buf;
209 buf->len = max_size;
210 }
211 }
212
213 return buf->len;
214}
215
216static void diagfwd_data_read_done(struct diagfwd_info *fwd_info,
217 unsigned char *buf, int len)
218{
219 int err = 0;
220 int write_len = 0;
221 unsigned char *write_buf = NULL;
222 struct diagfwd_buf_t *temp_buf = NULL;
223 struct diag_md_session_t *session_info = NULL;
224 uint8_t hdlc_disabled = 0;
225
226 if (!fwd_info || !buf || len <= 0) {
227 diag_ws_release();
228 return;
229 }
230
231 switch (fwd_info->type) {
232 case TYPE_DATA:
233 case TYPE_CMD:
234 break;
235 default:
236 pr_err_ratelimited("diag: In %s, invalid type %d for peripheral %d\n",
237 __func__, fwd_info->type,
238 fwd_info->peripheral);
239 diag_ws_release();
240 return;
241 }
242
243 mutex_lock(&driver->hdlc_disable_mutex);
244 mutex_lock(&fwd_info->data_mutex);
245 session_info = diag_md_session_get_peripheral(fwd_info->peripheral);
246 if (session_info)
247 hdlc_disabled = session_info->hdlc_disabled;
248 else
249 hdlc_disabled = driver->hdlc_disabled;
250
251 if (!driver->feature[fwd_info->peripheral].encode_hdlc) {
252 if (fwd_info->buf_1 && fwd_info->buf_1->data == buf) {
253 temp_buf = fwd_info->buf_1;
254 write_buf = fwd_info->buf_1->data;
255 } else if (fwd_info->buf_2 && fwd_info->buf_2->data == buf) {
256 temp_buf = fwd_info->buf_2;
257 write_buf = fwd_info->buf_2->data;
258 } else {
259 pr_err("diag: In %s, no match for buffer %pK, peripheral %d, type: %d\n",
260 __func__, buf, fwd_info->peripheral,
261 fwd_info->type);
262 goto end;
263 }
264 write_len = len;
265 } else if (hdlc_disabled) {
266 /* The data is raw and and on APPS side HDLC is disabled */
267 if (fwd_info->buf_1 && fwd_info->buf_1->data_raw == buf) {
268 temp_buf = fwd_info->buf_1;
269 } else if (fwd_info->buf_2 &&
270 fwd_info->buf_2->data_raw == buf) {
271 temp_buf = fwd_info->buf_2;
272 } else {
273 pr_err("diag: In %s, no match for non encode buffer %pK, peripheral %d, type: %d\n",
274 __func__, buf, fwd_info->peripheral,
275 fwd_info->type);
276 goto end;
277 }
278 if (len > PERIPHERAL_BUF_SZ) {
279 pr_err("diag: In %s, Incoming buffer too large %d, peripheral %d, type: %d\n",
280 __func__, len, fwd_info->peripheral,
281 fwd_info->type);
282 goto end;
283 }
284 write_len = len;
285 write_buf = buf;
286 } else {
287 if (fwd_info->buf_1 && fwd_info->buf_1->data_raw == buf) {
288 temp_buf = fwd_info->buf_1;
289 } else if (fwd_info->buf_2 &&
290 fwd_info->buf_2->data_raw == buf) {
291 temp_buf = fwd_info->buf_2;
292 } else {
293 pr_err("diag: In %s, no match for non encode buffer %pK, peripheral %d, type: %d\n",
294 __func__, buf, fwd_info->peripheral,
295 fwd_info->type);
296 goto end;
297 }
298 write_len = check_bufsize_for_encoding(temp_buf, len);
299 if (write_len <= 0) {
300 pr_err("diag: error in checking buf for encoding\n");
301 goto end;
302 }
303 write_buf = temp_buf->data;
304 err = diag_add_hdlc_encoding(write_buf, &write_len, buf, len);
305 if (err) {
306 pr_err("diag: error in adding hdlc encoding\n");
307 goto end;
308 }
309 }
310
311 if (write_len > 0) {
312 err = diag_mux_write(DIAG_LOCAL_PROC, write_buf, write_len,
313 temp_buf->ctxt);
314 if (err) {
315 pr_err_ratelimited("diag: In %s, unable to write to mux error: %d\n",
316 __func__, err);
317 goto end;
318 }
319 }
320 mutex_unlock(&fwd_info->data_mutex);
321 mutex_unlock(&driver->hdlc_disable_mutex);
322 diagfwd_queue_read(fwd_info);
323 return;
324
325end:
326 diag_ws_release();
327 mutex_unlock(&fwd_info->data_mutex);
328 mutex_unlock(&driver->hdlc_disable_mutex);
329 if (temp_buf) {
330 diagfwd_write_done(fwd_info->peripheral, fwd_info->type,
331 GET_BUF_NUM(temp_buf->ctxt));
332 }
333 diagfwd_queue_read(fwd_info);
334}
335
336static void diagfwd_cntl_read_done(struct diagfwd_info *fwd_info,
337 unsigned char *buf, int len)
338{
339 if (!fwd_info) {
340 diag_ws_release();
341 return;
342 }
343
344 if (fwd_info->type != TYPE_CNTL) {
345 pr_err("diag: In %s, invalid type %d for peripheral %d\n",
346 __func__, fwd_info->type, fwd_info->peripheral);
347 diag_ws_release();
348 return;
349 }
350
351 diag_ws_on_read(DIAG_WS_MUX, len);
352 diag_cntl_process_read_data(fwd_info, buf, len);
353 /*
354 * Control packets are not consumed by the clients. Mimic
355 * consumption by setting and clearing the wakeup source copy_count
356 * explicitly.
357 */
358 diag_ws_on_copy_fail(DIAG_WS_MUX);
359 /* Reset the buffer in_busy value after processing the data */
360 if (fwd_info->buf_1)
361 atomic_set(&fwd_info->buf_1->in_busy, 0);
362
363 diagfwd_queue_read(fwd_info);
364 diagfwd_queue_read(&peripheral_info[TYPE_DATA][fwd_info->peripheral]);
365 diagfwd_queue_read(&peripheral_info[TYPE_CMD][fwd_info->peripheral]);
366}
367
368static void diagfwd_dci_read_done(struct diagfwd_info *fwd_info,
369 unsigned char *buf, int len)
370{
371 if (!fwd_info)
372 return;
373
374 switch (fwd_info->type) {
375 case TYPE_DCI:
376 case TYPE_DCI_CMD:
377 break;
378 default:
379 pr_err("diag: In %s, invalid type %d for peripheral %d\n",
380 __func__, fwd_info->type, fwd_info->peripheral);
381 return;
382 }
383
384 diag_dci_process_peripheral_data(fwd_info, (void *)buf, len);
385 /* Reset the buffer in_busy value after processing the data */
386 if (fwd_info->buf_1)
387 atomic_set(&fwd_info->buf_1->in_busy, 0);
388
389 diagfwd_queue_read(fwd_info);
390}
391
392static void diagfwd_reset_buffers(struct diagfwd_info *fwd_info,
393 unsigned char *buf)
394{
395 if (!fwd_info || !buf)
396 return;
397
398 if (!driver->feature[fwd_info->peripheral].encode_hdlc) {
399 if (fwd_info->buf_1 && fwd_info->buf_1->data == buf)
400 atomic_set(&fwd_info->buf_1->in_busy, 0);
401 else if (fwd_info->buf_2 && fwd_info->buf_2->data == buf)
402 atomic_set(&fwd_info->buf_2->in_busy, 0);
403 } else {
404 if (fwd_info->buf_1 && fwd_info->buf_1->data_raw == buf)
405 atomic_set(&fwd_info->buf_1->in_busy, 0);
406 else if (fwd_info->buf_2 && fwd_info->buf_2->data_raw == buf)
407 atomic_set(&fwd_info->buf_2->in_busy, 0);
408 }
409}
410
411int diagfwd_peripheral_init(void)
412{
413 uint8_t peripheral;
414 uint8_t transport;
415 uint8_t type;
416 struct diagfwd_info *fwd_info = NULL;
417
418 for (transport = 0; transport < NUM_TRANSPORT; transport++) {
419 early_init_info[transport] = kzalloc(
420 sizeof(struct diagfwd_info) * NUM_PERIPHERALS,
421 GFP_KERNEL);
422 if (!early_init_info[transport])
423 return -ENOMEM;
424 kmemleak_not_leak(early_init_info[transport]);
425 }
426
427 for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
428 for (transport = 0; transport < NUM_TRANSPORT; transport++) {
429 fwd_info = &early_init_info[transport][peripheral];
430 fwd_info->peripheral = peripheral;
431 fwd_info->type = TYPE_CNTL;
432 fwd_info->transport = transport;
433 fwd_info->ctxt = NULL;
434 fwd_info->p_ops = NULL;
435 fwd_info->ch_open = 0;
436 fwd_info->inited = 1;
437 fwd_info->read_bytes = 0;
438 fwd_info->write_bytes = 0;
439 spin_lock_init(&fwd_info->buf_lock);
440 spin_lock_init(&fwd_info->write_buf_lock);
441 mutex_init(&fwd_info->data_mutex);
442 }
443 }
444
445 for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
446 for (type = 0; type < NUM_TYPES; type++) {
447 fwd_info = &peripheral_info[type][peripheral];
448 fwd_info->peripheral = peripheral;
449 fwd_info->type = type;
450 fwd_info->ctxt = NULL;
451 fwd_info->p_ops = NULL;
452 fwd_info->ch_open = 0;
453 fwd_info->read_bytes = 0;
454 fwd_info->write_bytes = 0;
455 spin_lock_init(&fwd_info->buf_lock);
456 spin_lock_init(&fwd_info->write_buf_lock);
457 mutex_init(&fwd_info->data_mutex);
458 /*
459 * This state shouldn't be set for Control channels
460 * during initialization. This is set when the feature
461 * mask is received for the first time.
462 */
463 if (type != TYPE_CNTL)
464 fwd_info->inited = 1;
465 }
466 driver->diagfwd_data[peripheral] =
467 &peripheral_info[TYPE_DATA][peripheral];
468 driver->diagfwd_cntl[peripheral] =
469 &peripheral_info[TYPE_CNTL][peripheral];
470 driver->diagfwd_dci[peripheral] =
471 &peripheral_info[TYPE_DCI][peripheral];
472 driver->diagfwd_cmd[peripheral] =
473 &peripheral_info[TYPE_CMD][peripheral];
474 driver->diagfwd_dci_cmd[peripheral] =
475 &peripheral_info[TYPE_DCI_CMD][peripheral];
476 }
477
478 if (driver->supports_sockets)
479 diag_socket_init();
480 diag_glink_init();
481
482 return 0;
483}
484
485void diagfwd_peripheral_exit(void)
486{
487 uint8_t peripheral;
488 uint8_t type;
489 struct diagfwd_info *fwd_info = NULL;
490
491 diag_socket_exit();
492
493 for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
494 for (type = 0; type < NUM_TYPES; type++) {
495 fwd_info = &peripheral_info[type][peripheral];
496 fwd_info->ctxt = NULL;
497 fwd_info->p_ops = NULL;
498 fwd_info->ch_open = 0;
499 diagfwd_buffers_exit(fwd_info);
500 }
501 }
502
503 for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
504 driver->diagfwd_data[peripheral] = NULL;
505 driver->diagfwd_cntl[peripheral] = NULL;
506 driver->diagfwd_dci[peripheral] = NULL;
507 driver->diagfwd_cmd[peripheral] = NULL;
508 driver->diagfwd_dci_cmd[peripheral] = NULL;
509 }
510
511 kfree(early_init_info);
512}
513
514int diagfwd_cntl_register(uint8_t transport, uint8_t peripheral, void *ctxt,
515 struct diag_peripheral_ops *ops,
516 struct diagfwd_info **fwd_ctxt)
517{
518 struct diagfwd_info *fwd_info = NULL;
519
520 if (!ctxt || !ops)
521 return -EIO;
522
523 if (transport >= NUM_TRANSPORT || peripheral >= NUM_PERIPHERALS)
524 return -EINVAL;
525
526 fwd_info = &early_init_info[transport][peripheral];
527 *fwd_ctxt = &early_init_info[transport][peripheral];
528 fwd_info->ctxt = ctxt;
529 fwd_info->p_ops = ops;
530 fwd_info->c_ops = &cntl_ch_ops;
531
532 return 0;
533}
534
535int diagfwd_register(uint8_t transport, uint8_t peripheral, uint8_t type,
536 void *ctxt, struct diag_peripheral_ops *ops,
537 struct diagfwd_info **fwd_ctxt)
538{
539 struct diagfwd_info *fwd_info = NULL;
540
541 if (peripheral >= NUM_PERIPHERALS || type >= NUM_TYPES ||
542 !ctxt || !ops || transport >= NUM_TRANSPORT) {
543 pr_err("diag: In %s, returning error\n", __func__);
544 return -EIO;
545 }
546
547 fwd_info = &peripheral_info[type][peripheral];
548 *fwd_ctxt = &peripheral_info[type][peripheral];
549 fwd_info->ctxt = ctxt;
550 fwd_info->p_ops = ops;
551 fwd_info->transport = transport;
552 fwd_info->ch_open = 0;
553
554 switch (type) {
555 case TYPE_DATA:
556 case TYPE_CMD:
557 fwd_info->c_ops = &data_ch_ops;
558 break;
559 case TYPE_DCI:
560 case TYPE_DCI_CMD:
561 fwd_info->c_ops = &dci_ch_ops;
562 break;
563 default:
564 pr_err("diag: In %s, invalid type: %d\n", __func__, type);
565 return -EINVAL;
566 }
567
568 if (atomic_read(&fwd_info->opened) &&
569 fwd_info->p_ops && fwd_info->p_ops->open) {
570 /*
571 * The registration can happen late, like in the case of
572 * sockets. fwd_info->opened reflects diag_state. Propagate the
573 * state to the peipherals.
574 */
575 fwd_info->p_ops->open(fwd_info->ctxt);
576 }
577
578 return 0;
579}
580
581void diagfwd_deregister(uint8_t peripheral, uint8_t type, void *ctxt)
582{
583 struct diagfwd_info *fwd_info = NULL;
584
585 if (peripheral >= NUM_PERIPHERALS || type >= NUM_TYPES || !ctxt)
586 return;
587
588 fwd_info = &peripheral_info[type][peripheral];
589 if (fwd_info->ctxt != ctxt) {
590 pr_err("diag: In %s, unable to find a match for p: %d t: %d\n",
591 __func__, peripheral, type);
592 return;
593 }
594 fwd_info->ctxt = NULL;
595 fwd_info->p_ops = NULL;
596 fwd_info->ch_open = 0;
597 diagfwd_buffers_exit(fwd_info);
598
599 switch (type) {
600 case TYPE_DATA:
601 driver->diagfwd_data[peripheral] = NULL;
602 break;
603 case TYPE_CNTL:
604 driver->diagfwd_cntl[peripheral] = NULL;
605 break;
606 case TYPE_DCI:
607 driver->diagfwd_dci[peripheral] = NULL;
608 break;
609 case TYPE_CMD:
610 driver->diagfwd_cmd[peripheral] = NULL;
611 break;
612 case TYPE_DCI_CMD:
613 driver->diagfwd_dci_cmd[peripheral] = NULL;
614 break;
615 }
616}
617
618void diagfwd_close_transport(uint8_t transport, uint8_t peripheral)
619{
620 struct diagfwd_info *fwd_info = NULL;
621 struct diagfwd_info *dest_info = NULL;
622 int (*init_fn)(uint8_t) = NULL;
623 void (*invalidate_fn)(void *, struct diagfwd_info *) = NULL;
624 int (*check_channel_state)(void *) = NULL;
625 uint8_t transport_open = 0;
626 int i = 0;
627
628 if (peripheral >= NUM_PERIPHERALS)
629 return;
630
631 switch (transport) {
632 case TRANSPORT_GLINK:
633 transport_open = TRANSPORT_SOCKET;
634 init_fn = diag_socket_init_peripheral;
635 invalidate_fn = diag_socket_invalidate;
636 check_channel_state = diag_socket_check_state;
637 break;
638 case TRANSPORT_SOCKET:
639 transport_open = TRANSPORT_GLINK;
640 init_fn = diag_glink_init_peripheral;
641 invalidate_fn = diag_glink_invalidate;
642 check_channel_state = diag_glink_check_state;
643 break;
644 default:
645 return;
646
647 }
648
649 mutex_lock(&driver->diagfwd_channel_mutex);
650 fwd_info = &early_init_info[transport][peripheral];
651 if (fwd_info->p_ops && fwd_info->p_ops->close)
652 fwd_info->p_ops->close(fwd_info->ctxt);
653 fwd_info = &early_init_info[transport_open][peripheral];
654 dest_info = &peripheral_info[TYPE_CNTL][peripheral];
655 dest_info->inited = 1;
656 dest_info->ctxt = fwd_info->ctxt;
657 dest_info->p_ops = fwd_info->p_ops;
658 dest_info->c_ops = fwd_info->c_ops;
659 dest_info->ch_open = fwd_info->ch_open;
660 dest_info->read_bytes = fwd_info->read_bytes;
661 dest_info->write_bytes = fwd_info->write_bytes;
662 dest_info->inited = fwd_info->inited;
663 dest_info->buf_1 = fwd_info->buf_1;
664 dest_info->buf_2 = fwd_info->buf_2;
665 dest_info->transport = fwd_info->transport;
666 invalidate_fn(dest_info->ctxt, dest_info);
667 for (i = 0; i < NUM_WRITE_BUFFERS; i++)
668 dest_info->buf_ptr[i] = fwd_info->buf_ptr[i];
669 if (!check_channel_state(dest_info->ctxt))
670 diagfwd_late_open(dest_info);
671 diagfwd_cntl_open(dest_info);
672 init_fn(peripheral);
673 mutex_unlock(&driver->diagfwd_channel_mutex);
674 diagfwd_queue_read(&peripheral_info[TYPE_DATA][peripheral]);
675 diagfwd_queue_read(&peripheral_info[TYPE_CMD][peripheral]);
676}
677
678void *diagfwd_request_write_buf(struct diagfwd_info *fwd_info)
679{
680 void *buf = NULL;
681 int index;
682 unsigned long flags;
683
684 spin_lock_irqsave(&fwd_info->write_buf_lock, flags);
685 for (index = 0 ; index < NUM_WRITE_BUFFERS; index++) {
686 if (!atomic_read(&(fwd_info->buf_ptr[index]->in_busy))) {
687 atomic_set(&(fwd_info->buf_ptr[index]->in_busy), 1);
688 buf = fwd_info->buf_ptr[index]->data;
689 if (!buf)
690 return NULL;
691 break;
692 }
693 }
694 spin_unlock_irqrestore(&fwd_info->write_buf_lock, flags);
695 return buf;
696}
697
698int diagfwd_write(uint8_t peripheral, uint8_t type, void *buf, int len)
699{
700 struct diagfwd_info *fwd_info = NULL;
701 int err = 0;
702 uint8_t retry_count = 0;
703 uint8_t max_retries = 3;
704 void *buf_ptr = NULL;
705
706 if (peripheral >= NUM_PERIPHERALS || type >= NUM_TYPES)
707 return -EINVAL;
708
709 if (type == TYPE_CMD || type == TYPE_DCI_CMD) {
710 if (!driver->feature[peripheral].rcvd_feature_mask ||
711 !driver->feature[peripheral].sent_feature_mask) {
712 pr_debug_ratelimited("diag: In %s, feature mask for peripheral: %d not received or sent yet\n",
713 __func__, peripheral);
714 return 0;
715 }
716 if (!driver->feature[peripheral].separate_cmd_rsp)
717 type = (type == TYPE_CMD) ? TYPE_DATA : TYPE_DCI;
718 }
719
720 fwd_info = &peripheral_info[type][peripheral];
721 if (!fwd_info->inited || !atomic_read(&fwd_info->opened))
722 return -ENODEV;
723
724 if (!(fwd_info->p_ops && fwd_info->p_ops->write && fwd_info->ctxt))
725 return -EIO;
726
727 if (fwd_info->transport == TRANSPORT_GLINK) {
728 buf_ptr = diagfwd_request_write_buf(fwd_info);
729 if (buf_ptr)
730 memcpy(buf_ptr, buf, len);
731 else {
732 DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
733 "diag: buffer not found for writing\n");
734 return -EIO;
735 }
736 } else
737 buf_ptr = buf;
738
739 while (retry_count < max_retries) {
740 err = 0;
741 err = fwd_info->p_ops->write(fwd_info->ctxt, buf_ptr, len);
742 if (err && err != -ENODEV) {
743 usleep_range(100000, 101000);
744 retry_count++;
745 continue;
746 }
747 break;
748 }
749
750 if (!err)
751 fwd_info->write_bytes += len;
752 else
753 if (fwd_info->transport == TRANSPORT_GLINK)
754 diagfwd_write_buffer_done(fwd_info, buf_ptr);
755 return err;
756}
757
758static void __diag_fwd_open(struct diagfwd_info *fwd_info)
759{
760 if (!fwd_info)
761 return;
762
763 atomic_set(&fwd_info->opened, 1);
764 if (!fwd_info->inited)
765 return;
766
767 if (fwd_info->buf_1)
768 atomic_set(&fwd_info->buf_1->in_busy, 0);
769 if (fwd_info->buf_2)
770 atomic_set(&fwd_info->buf_2->in_busy, 0);
771
772 if (fwd_info->p_ops && fwd_info->p_ops->open)
773 fwd_info->p_ops->open(fwd_info->ctxt);
774
775 diagfwd_queue_read(fwd_info);
776}
777
778void diagfwd_early_open(uint8_t peripheral)
779{
780 uint8_t transport = 0;
781 struct diagfwd_info *fwd_info = NULL;
782
783 if (peripheral >= NUM_PERIPHERALS)
784 return;
785
786 for (transport = 0; transport < NUM_TRANSPORT; transport++) {
787 fwd_info = &early_init_info[transport][peripheral];
788 __diag_fwd_open(fwd_info);
789 }
790}
791
792void diagfwd_open(uint8_t peripheral, uint8_t type)
793{
794 struct diagfwd_info *fwd_info = NULL;
795
796 if (peripheral >= NUM_PERIPHERALS || type >= NUM_TYPES)
797 return;
798
799 fwd_info = &peripheral_info[type][peripheral];
800 __diag_fwd_open(fwd_info);
801}
802
803void diagfwd_late_open(struct diagfwd_info *fwd_info)
804{
805 __diag_fwd_open(fwd_info);
806}
807
808void diagfwd_close(uint8_t peripheral, uint8_t type)
809{
810 struct diagfwd_info *fwd_info = NULL;
811
812 if (peripheral >= NUM_PERIPHERALS || type >= NUM_TYPES)
813 return;
814
815 fwd_info = &peripheral_info[type][peripheral];
816 atomic_set(&fwd_info->opened, 0);
817 if (!fwd_info->inited)
818 return;
819
820 if (fwd_info->p_ops && fwd_info->p_ops->close)
821 fwd_info->p_ops->close(fwd_info->ctxt);
822
823 if (fwd_info->buf_1)
824 atomic_set(&fwd_info->buf_1->in_busy, 1);
825 /*
826 * Only Data channels have two buffers. Set both the buffers
827 * to busy on close.
828 */
829 if (fwd_info->buf_2)
830 atomic_set(&fwd_info->buf_2->in_busy, 1);
831}
832
833int diagfwd_channel_open(struct diagfwd_info *fwd_info)
834{
835 int i;
836
837 if (!fwd_info)
838 return -EIO;
839
840 if (!fwd_info->inited) {
841 pr_debug("diag: In %s, channel is not inited, p: %d, t: %d\n",
842 __func__, fwd_info->peripheral, fwd_info->type);
843 return -EINVAL;
844 }
845
846 if (fwd_info->ch_open) {
847 pr_debug("diag: In %s, channel is already open, p: %d, t: %d\n",
848 __func__, fwd_info->peripheral, fwd_info->type);
849 return 0;
850 }
851
852 fwd_info->ch_open = 1;
853 diagfwd_buffers_init(fwd_info);
854 diagfwd_write_buffers_init(fwd_info);
855 if (fwd_info && fwd_info->c_ops && fwd_info->c_ops->open)
856 fwd_info->c_ops->open(fwd_info);
857 for (i = 0; i < NUM_WRITE_BUFFERS; i++) {
858 if (fwd_info->buf_ptr[i])
859 atomic_set(&fwd_info->buf_ptr[i]->in_busy, 0);
860 }
861 diagfwd_queue_read(fwd_info);
862 DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "p: %d t: %d considered opened\n",
863 fwd_info->peripheral, fwd_info->type);
864
865 if (atomic_read(&fwd_info->opened)) {
866 if (fwd_info->p_ops && fwd_info->p_ops->open)
867 fwd_info->p_ops->open(fwd_info->ctxt);
868 }
869
870 return 0;
871}
872
873int diagfwd_channel_close(struct diagfwd_info *fwd_info)
874{
875 int i;
876
877 if (!fwd_info)
878 return -EIO;
879
880 fwd_info->ch_open = 0;
881 if (fwd_info && fwd_info->c_ops && fwd_info->c_ops->close)
882 fwd_info->c_ops->close(fwd_info);
883
884 if (fwd_info->buf_1 && fwd_info->buf_1->data)
885 atomic_set(&fwd_info->buf_1->in_busy, 0);
886 if (fwd_info->buf_2 && fwd_info->buf_2->data)
887 atomic_set(&fwd_info->buf_2->in_busy, 0);
888
889 for (i = 0; i < NUM_WRITE_BUFFERS; i++) {
890 if (fwd_info->buf_ptr[i])
891 atomic_set(&fwd_info->buf_ptr[i]->in_busy, 1);
892 }
893 DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "p: %d t: %d considered closed\n",
894 fwd_info->peripheral, fwd_info->type);
895
896 return 0;
897}
898
899int diagfwd_channel_read_done(struct diagfwd_info *fwd_info,
900 unsigned char *buf, uint32_t len)
901{
902 if (!fwd_info) {
903 diag_ws_release();
904 return -EIO;
905 }
906
907 /*
908 * Diag peripheral layers should send len as 0 if there is any error
909 * in reading data from the transport. Use this information to reset the
910 * in_busy flags. No need to queue read in this case.
911 */
912 if (len == 0) {
913 diagfwd_reset_buffers(fwd_info, buf);
914 diag_ws_release();
915 return 0;
916 }
917
918 if (fwd_info && fwd_info->c_ops && fwd_info->c_ops->read_done)
919 fwd_info->c_ops->read_done(fwd_info, buf, len);
920 fwd_info->read_bytes += len;
921
922 return 0;
923}
924
925void diagfwd_write_done(uint8_t peripheral, uint8_t type, int ctxt)
926{
927 struct diagfwd_info *fwd_info = NULL;
928
929 if (peripheral >= NUM_PERIPHERALS || type >= NUM_TYPES)
930 return;
931
932 fwd_info = &peripheral_info[type][peripheral];
933 if (ctxt == 1 && fwd_info->buf_1)
934 atomic_set(&fwd_info->buf_1->in_busy, 0);
935 else if (ctxt == 2 && fwd_info->buf_2)
936 atomic_set(&fwd_info->buf_2->in_busy, 0);
937 else
938 pr_err("diag: In %s, invalid ctxt %d\n", __func__, ctxt);
939
940 diagfwd_queue_read(fwd_info);
941}
942
943int diagfwd_write_buffer_done(struct diagfwd_info *fwd_info, const void *ptr)
944{
945
946 int found = 0;
947 int index = 0;
948 unsigned long flags;
949
950 if (!fwd_info || !ptr)
951 return found;
952 spin_lock_irqsave(&fwd_info->write_buf_lock, flags);
953 for (index = 0; index < NUM_WRITE_BUFFERS; index++) {
954 if (fwd_info->buf_ptr[index]->data == ptr) {
955 atomic_set(&fwd_info->buf_ptr[index]->in_busy, 0);
956 found = 1;
957 break;
958 }
959 }
960 spin_unlock_irqrestore(&fwd_info->write_buf_lock, flags);
961 return found;
962}
963
964void diagfwd_channel_read(struct diagfwd_info *fwd_info)
965{
966 int err = 0;
967 uint32_t read_len = 0;
968 unsigned char *read_buf = NULL;
969 struct diagfwd_buf_t *temp_buf = NULL;
970
971 if (!fwd_info) {
972 diag_ws_release();
973 return;
974 }
975
976 if (!fwd_info->inited || !atomic_read(&fwd_info->opened)) {
977 pr_debug("diag: In %s, p: %d, t: %d, inited: %d, opened: %d ch_open: %d\n",
978 __func__, fwd_info->peripheral, fwd_info->type,
979 fwd_info->inited, atomic_read(&fwd_info->opened),
980 fwd_info->ch_open);
981 diag_ws_release();
982 return;
983 }
984
985 if (fwd_info->buf_1 && !atomic_read(&fwd_info->buf_1->in_busy)) {
986 temp_buf = fwd_info->buf_1;
987 atomic_set(&temp_buf->in_busy, 1);
988 if (driver->feature[fwd_info->peripheral].encode_hdlc &&
989 (fwd_info->type == TYPE_DATA ||
990 fwd_info->type == TYPE_CMD)) {
991 read_buf = fwd_info->buf_1->data_raw;
992 read_len = fwd_info->buf_1->len_raw;
993 } else {
994 read_buf = fwd_info->buf_1->data;
995 read_len = fwd_info->buf_1->len;
996 }
997 } else if (fwd_info->buf_2 && !atomic_read(&fwd_info->buf_2->in_busy)) {
998 temp_buf = fwd_info->buf_2;
999 atomic_set(&temp_buf->in_busy, 1);
1000 if (driver->feature[fwd_info->peripheral].encode_hdlc &&
1001 (fwd_info->type == TYPE_DATA ||
1002 fwd_info->type == TYPE_CMD)) {
1003 read_buf = fwd_info->buf_2->data_raw;
1004 read_len = fwd_info->buf_2->len_raw;
1005 } else {
1006 read_buf = fwd_info->buf_2->data;
1007 read_len = fwd_info->buf_2->len;
1008 }
1009 } else {
1010 pr_debug("diag: In %s, both buffers are empty for p: %d, t: %d\n",
1011 __func__, fwd_info->peripheral, fwd_info->type);
1012 }
1013
1014 if (!read_buf) {
1015 diag_ws_release();
1016 return;
1017 }
1018
1019 if (!(fwd_info->p_ops && fwd_info->p_ops->read && fwd_info->ctxt))
1020 goto fail_return;
1021
1022 DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "issued a read p: %d t: %d buf: %pK\n",
1023 fwd_info->peripheral, fwd_info->type, read_buf);
1024 err = fwd_info->p_ops->read(fwd_info->ctxt, read_buf, read_len);
1025 if (err)
1026 goto fail_return;
1027
1028 return;
1029
1030fail_return:
1031 diag_ws_release();
1032 atomic_set(&temp_buf->in_busy, 0);
1033}
1034
1035static void diagfwd_queue_read(struct diagfwd_info *fwd_info)
1036{
1037 if (!fwd_info)
1038 return;
1039
1040 if (!fwd_info->inited || !atomic_read(&fwd_info->opened)) {
1041 pr_debug("diag: In %s, p: %d, t: %d, inited: %d, opened: %d ch_open: %d\n",
1042 __func__, fwd_info->peripheral, fwd_info->type,
1043 fwd_info->inited, atomic_read(&fwd_info->opened),
1044 fwd_info->ch_open);
1045 return;
1046 }
1047
1048 /*
1049 * Don't queue a read on the data and command channels before receiving
1050 * the feature mask from the peripheral. We won't know which buffer to
1051 * use - HDLC or non HDLC buffer for reading.
1052 */
1053 if ((!driver->feature[fwd_info->peripheral].rcvd_feature_mask) &&
1054 (fwd_info->type != TYPE_CNTL)) {
1055 return;
1056 }
1057
1058 if (fwd_info->p_ops && fwd_info->p_ops->queue_read && fwd_info->ctxt)
1059 fwd_info->p_ops->queue_read(fwd_info->ctxt);
1060}
1061
1062void diagfwd_buffers_init(struct diagfwd_info *fwd_info)
1063{
1064 unsigned long flags;
1065
1066 if (!fwd_info)
1067 return;
1068
1069 if (!fwd_info->inited) {
1070 pr_err("diag: In %s, channel not inited, p: %d, t: %d\n",
1071 __func__, fwd_info->peripheral, fwd_info->type);
1072 return;
1073 }
1074
1075 spin_lock_irqsave(&fwd_info->buf_lock, flags);
1076 if (!fwd_info->buf_1) {
1077 fwd_info->buf_1 = kzalloc(sizeof(struct diagfwd_buf_t),
1078 GFP_ATOMIC);
1079 if (!fwd_info->buf_1)
1080 goto err;
1081 kmemleak_not_leak(fwd_info->buf_1);
1082 }
1083 if (!fwd_info->buf_1->data) {
1084 fwd_info->buf_1->data = kzalloc(PERIPHERAL_BUF_SZ +
1085 APF_DIAG_PADDING,
1086 GFP_ATOMIC);
1087 if (!fwd_info->buf_1->data)
1088 goto err;
1089 fwd_info->buf_1->len = PERIPHERAL_BUF_SZ;
1090 kmemleak_not_leak(fwd_info->buf_1->data);
1091 fwd_info->buf_1->ctxt = SET_BUF_CTXT(fwd_info->peripheral,
1092 fwd_info->type, 1);
1093 }
1094
1095 if (fwd_info->type == TYPE_DATA) {
1096 if (!fwd_info->buf_2) {
1097 fwd_info->buf_2 = kzalloc(sizeof(struct diagfwd_buf_t),
1098 GFP_ATOMIC);
1099 if (!fwd_info->buf_2)
1100 goto err;
1101 kmemleak_not_leak(fwd_info->buf_2);
1102 }
1103
1104 if (!fwd_info->buf_2->data) {
1105 fwd_info->buf_2->data = kzalloc(PERIPHERAL_BUF_SZ +
1106 APF_DIAG_PADDING,
1107 GFP_ATOMIC);
1108 if (!fwd_info->buf_2->data)
1109 goto err;
1110 fwd_info->buf_2->len = PERIPHERAL_BUF_SZ;
1111 kmemleak_not_leak(fwd_info->buf_2->data);
1112 fwd_info->buf_2->ctxt = SET_BUF_CTXT(
1113 fwd_info->peripheral,
1114 fwd_info->type, 2);
1115 }
1116
1117 if (driver->supports_apps_hdlc_encoding) {
1118 /* In support of hdlc encoding */
1119 if (!fwd_info->buf_1->data_raw) {
1120 fwd_info->buf_1->data_raw =
1121 kzalloc(PERIPHERAL_BUF_SZ +
1122 APF_DIAG_PADDING,
1123 GFP_ATOMIC);
1124 if (!fwd_info->buf_1->data_raw)
1125 goto err;
1126 fwd_info->buf_1->len_raw = PERIPHERAL_BUF_SZ;
1127 kmemleak_not_leak(fwd_info->buf_1->data_raw);
1128 }
1129 if (!fwd_info->buf_2->data_raw) {
1130 fwd_info->buf_2->data_raw =
1131 kzalloc(PERIPHERAL_BUF_SZ +
1132 APF_DIAG_PADDING,
1133 GFP_ATOMIC);
1134 if (!fwd_info->buf_2->data_raw)
1135 goto err;
1136 fwd_info->buf_2->len_raw = PERIPHERAL_BUF_SZ;
1137 kmemleak_not_leak(fwd_info->buf_2->data_raw);
1138 }
1139 }
1140 }
1141
1142 if (fwd_info->type == TYPE_CMD && driver->supports_apps_hdlc_encoding) {
1143 /* In support of hdlc encoding */
1144 if (!fwd_info->buf_1->data_raw) {
1145 fwd_info->buf_1->data_raw = kzalloc(PERIPHERAL_BUF_SZ +
1146 APF_DIAG_PADDING,
1147 GFP_ATOMIC);
1148 if (!fwd_info->buf_1->data_raw)
1149 goto err;
1150 fwd_info->buf_1->len_raw = PERIPHERAL_BUF_SZ;
1151 kmemleak_not_leak(fwd_info->buf_1->data_raw);
1152 }
1153 }
1154
1155 spin_unlock_irqrestore(&fwd_info->buf_lock, flags);
1156 return;
1157
1158err:
1159 spin_unlock_irqrestore(&fwd_info->buf_lock, flags);
1160 diagfwd_buffers_exit(fwd_info);
1161}
1162
1163static void diagfwd_buffers_exit(struct diagfwd_info *fwd_info)
1164{
1165 unsigned long flags;
1166
1167 if (!fwd_info)
1168 return;
1169
1170 spin_lock_irqsave(&fwd_info->buf_lock, flags);
1171 if (fwd_info->buf_1) {
1172 kfree(fwd_info->buf_1->data);
1173 fwd_info->buf_1->data = NULL;
1174 kfree(fwd_info->buf_1->data_raw);
1175 fwd_info->buf_1->data_raw = NULL;
1176 kfree(fwd_info->buf_1);
1177 fwd_info->buf_1 = NULL;
1178 }
1179 if (fwd_info->buf_2) {
1180 kfree(fwd_info->buf_2->data);
1181 fwd_info->buf_2->data = NULL;
1182 kfree(fwd_info->buf_2->data_raw);
1183 fwd_info->buf_2->data_raw = NULL;
1184 kfree(fwd_info->buf_2);
1185 fwd_info->buf_2 = NULL;
1186 }
1187 spin_unlock_irqrestore(&fwd_info->buf_lock, flags);
1188}
1189
1190void diagfwd_write_buffers_init(struct diagfwd_info *fwd_info)
1191{
1192 unsigned long flags;
1193 int i;
1194
1195 if (!fwd_info)
1196 return;
1197
1198 if (!fwd_info->inited) {
1199 pr_err("diag: In %s, channel not inited, p: %d, t: %d\n",
1200 __func__, fwd_info->peripheral, fwd_info->type);
1201 return;
1202 }
1203
1204 spin_lock_irqsave(&fwd_info->write_buf_lock, flags);
1205 for (i = 0; i < NUM_WRITE_BUFFERS; i++) {
1206 if (!fwd_info->buf_ptr[i])
1207 fwd_info->buf_ptr[i] =
1208 kzalloc(sizeof(struct diagfwd_buf_t),
1209 GFP_ATOMIC);
1210 if (!fwd_info->buf_ptr[i])
1211 goto err;
1212 kmemleak_not_leak(fwd_info->buf_ptr[i]);
1213 if (!fwd_info->buf_ptr[i]->data) {
1214 fwd_info->buf_ptr[i]->data = kzalloc(PERIPHERAL_BUF_SZ,
1215 GFP_ATOMIC);
1216 if (!fwd_info->buf_ptr[i]->data)
1217 goto err;
1218 fwd_info->buf_ptr[i]->len = PERIPHERAL_BUF_SZ;
1219 kmemleak_not_leak(fwd_info->buf_ptr[i]->data);
1220 }
1221 }
1222 spin_unlock_irqrestore(&fwd_info->write_buf_lock, flags);
1223 return;
1224
1225err:
1226 spin_unlock_irqrestore(&fwd_info->write_buf_lock, flags);
1227 pr_err("diag:unable to allocate write buffers\n");
1228 diagfwd_write_buffers_exit(fwd_info);
1229
1230}
1231
1232static void diagfwd_write_buffers_exit(struct diagfwd_info *fwd_info)
1233{
1234 unsigned long flags;
1235 int i;
1236
1237 if (!fwd_info)
1238 return;
1239
1240 spin_lock_irqsave(&fwd_info->write_buf_lock, flags);
1241 for (i = 0; i < NUM_WRITE_BUFFERS; i++) {
1242 if (fwd_info->buf_ptr[i]) {
1243 kfree(fwd_info->buf_ptr[i]->data);
1244 fwd_info->buf_ptr[i]->data = NULL;
1245 kfree(fwd_info->buf_ptr[i]);
1246 fwd_info->buf_ptr[i] = NULL;
1247 }
1248 }
1249 spin_unlock_irqrestore(&fwd_info->write_buf_lock, flags);
1250}