blob: 955d81fb278b1c11d56cd19a04f646b40b761989 [file] [log] [blame]
Manoj Prabhu B98325462017-01-10 20:19:28 +05301/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12#include <linux/slab.h>
13#include <linux/err.h>
14#include <linux/sched.h>
15#include <linux/ratelimit.h>
16#include <linux/workqueue.h>
17#include <linux/diagchar.h>
18#include <linux/of.h>
19#include <linux/kmemleak.h>
20#include <linux/delay.h>
21#include <linux/atomic.h>
22#include "diagchar.h"
23#include "diagchar_hdlc.h"
24#include "diagfwd_peripheral.h"
25#include "diagfwd_cntl.h"
26#include "diag_masks.h"
27#include "diag_dci.h"
28#include "diagfwd.h"
29#include "diagfwd_socket.h"
30#include "diag_mux.h"
31#include "diag_ipc_logging.h"
32#include "diagfwd_glink.h"
33
34struct data_header {
35 uint8_t control_char;
36 uint8_t version;
37 uint16_t length;
38};
39
40static struct diagfwd_info *early_init_info[NUM_TRANSPORT];
41
42static void diagfwd_queue_read(struct diagfwd_info *fwd_info);
43static void diagfwd_buffers_exit(struct diagfwd_info *fwd_info);
44static void diagfwd_cntl_open(struct diagfwd_info *fwd_info);
45static void diagfwd_cntl_close(struct diagfwd_info *fwd_info);
46static void diagfwd_dci_open(struct diagfwd_info *fwd_info);
47static void diagfwd_dci_close(struct diagfwd_info *fwd_info);
Manoj Prabhu B571cf422017-08-08 19:01:41 +053048static void diagfwd_data_read_untag_done(struct diagfwd_info *fwd_info,
49 unsigned char *buf, int len);
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -070050static void diagfwd_data_read_done(struct diagfwd_info *fwd_info,
51 unsigned char *buf, int len);
52static void diagfwd_cntl_read_done(struct diagfwd_info *fwd_info,
53 unsigned char *buf, int len);
54static void diagfwd_dci_read_done(struct diagfwd_info *fwd_info,
55 unsigned char *buf, int len);
56static void diagfwd_write_buffers_init(struct diagfwd_info *fwd_info);
57static void diagfwd_write_buffers_exit(struct diagfwd_info *fwd_info);
58struct diagfwd_info peripheral_info[NUM_TYPES][NUM_PERIPHERALS];
59
60static struct diag_channel_ops data_ch_ops = {
61 .open = NULL,
62 .close = NULL,
Manoj Prabhu B571cf422017-08-08 19:01:41 +053063 .read_done = diagfwd_data_read_untag_done
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -070064};
65
66static struct diag_channel_ops cntl_ch_ops = {
67 .open = diagfwd_cntl_open,
68 .close = diagfwd_cntl_close,
69 .read_done = diagfwd_cntl_read_done
70};
71
72static struct diag_channel_ops dci_ch_ops = {
73 .open = diagfwd_dci_open,
74 .close = diagfwd_dci_close,
75 .read_done = diagfwd_dci_read_done
76};
77
78static void diagfwd_cntl_open(struct diagfwd_info *fwd_info)
79{
80 if (!fwd_info)
81 return;
82 diag_cntl_channel_open(fwd_info);
83}
84
85static void diagfwd_cntl_close(struct diagfwd_info *fwd_info)
86{
87 if (!fwd_info)
88 return;
89 diag_cntl_channel_close(fwd_info);
90}
91
92static void diagfwd_dci_open(struct diagfwd_info *fwd_info)
93{
94 if (!fwd_info)
95 return;
96
97 diag_dci_notify_client(PERIPHERAL_MASK(fwd_info->peripheral),
98 DIAG_STATUS_OPEN, DCI_LOCAL_PROC);
99}
100
101static void diagfwd_dci_close(struct diagfwd_info *fwd_info)
102{
103 if (!fwd_info)
104 return;
105
106 diag_dci_notify_client(PERIPHERAL_MASK(fwd_info->peripheral),
107 DIAG_STATUS_CLOSED, DCI_LOCAL_PROC);
108}
109
110static int diag_add_hdlc_encoding(unsigned char *dest_buf, int *dest_len,
111 unsigned char *buf, int len)
112{
113 struct diag_send_desc_type send = { NULL, NULL, DIAG_STATE_START, 0 };
114 struct diag_hdlc_dest_type enc = { NULL, NULL, 0 };
115 struct data_header *header;
116 int header_size = sizeof(struct data_header);
117 uint8_t *end_control_char = NULL;
118 uint8_t *payload = NULL;
119 uint8_t *temp_buf = NULL;
120 uint8_t *temp_encode_buf = NULL;
121 int src_pkt_len;
122 int encoded_pkt_length;
123 int max_size;
124 int total_processed = 0;
125 int bytes_remaining;
126 int err = 0;
127 uint8_t loop_count = 0;
128
129 if (!dest_buf || !dest_len || !buf)
130 return -EIO;
131
132 temp_buf = buf;
133 temp_encode_buf = dest_buf;
134 bytes_remaining = *dest_len;
135
136 while (total_processed < len) {
137 loop_count++;
138 header = (struct data_header *)temp_buf;
139 /* Perform initial error checking */
140 if (header->control_char != CONTROL_CHAR ||
141 header->version != 1) {
142 err = -EINVAL;
143 break;
144 }
145
146 if (header->length >= bytes_remaining)
147 break;
148
149 payload = temp_buf + header_size;
150 end_control_char = payload + header->length;
151 if (*end_control_char != CONTROL_CHAR) {
152 err = -EINVAL;
153 break;
154 }
155
156 max_size = 2 * header->length + 3;
157 if (bytes_remaining < max_size) {
158 err = -EINVAL;
159 break;
160 }
161
162 /* Prepare for encoding the data */
163 send.state = DIAG_STATE_START;
164 send.pkt = payload;
165 send.last = (void *)(payload + header->length - 1);
166 send.terminate = 1;
167
168 enc.dest = temp_encode_buf;
169 enc.dest_last = (void *)(temp_encode_buf + max_size);
170 enc.crc = 0;
171 diag_hdlc_encode(&send, &enc);
172
173 /* Prepare for next packet */
174 src_pkt_len = (header_size + header->length + 1);
175 total_processed += src_pkt_len;
176 temp_buf += src_pkt_len;
177
178 encoded_pkt_length = (uint8_t *)enc.dest - temp_encode_buf;
179 bytes_remaining -= encoded_pkt_length;
180 temp_encode_buf = enc.dest;
181 }
182
183 *dest_len = (int)(temp_encode_buf - dest_buf);
184
185 return err;
186}
187
188static int check_bufsize_for_encoding(struct diagfwd_buf_t *buf, uint32_t len)
189{
190 uint32_t max_size = 0;
191 unsigned char *temp_buf = NULL;
192
193 if (!buf || len == 0)
194 return -EINVAL;
195
196 max_size = (2 * len) + 3;
197 if (max_size > PERIPHERAL_BUF_SZ) {
198 if (max_size > MAX_PERIPHERAL_HDLC_BUF_SZ) {
199 pr_err("diag: In %s, max_size is going beyond limit %d\n",
200 __func__, max_size);
201 max_size = MAX_PERIPHERAL_HDLC_BUF_SZ;
202 }
203
204 if (buf->len < max_size) {
205 temp_buf = krealloc(buf->data, max_size +
206 APF_DIAG_PADDING,
207 GFP_KERNEL);
208 if (!temp_buf)
209 return -ENOMEM;
210 buf->data = temp_buf;
211 buf->len = max_size;
212 }
213 }
214
215 return buf->len;
216}
217
Manoj Prabhu B571cf422017-08-08 19:01:41 +0530218int diag_md_get_peripheral(int ctxt)
219{
220 int pd = 0, i = 0;
221 int type = 0, peripheral = -EINVAL;
222 struct diagfwd_info *fwd_info = NULL;
223
224 peripheral = GET_BUF_PERIPHERAL(ctxt);
225 if (peripheral < 0 || peripheral > NUM_PERIPHERALS)
226 return -EINVAL;
227
228 if (peripheral == APPS_DATA)
229 return peripheral;
230
231 type = GET_BUF_TYPE(ctxt);
232 if (type < 0 || type >= NUM_TYPES)
233 return -EINVAL;
234
235 fwd_info = &peripheral_info[type][peripheral];
236 if (!fwd_info)
237 return -EINVAL;
238
239 pd = GET_PD_CTXT(ctxt);
240
241 if (driver->num_pd_session) {
242 if (pd == fwd_info->diagid_root) {
243 if (peripheral > NUM_PERIPHERALS)
244 peripheral = -EINVAL;
245 } else {
246 for (i = 0; i <= (fwd_info->num_pd - 2); i++) {
247 if (pd == fwd_info->diagid_user[i]) {
248 switch (peripheral) {
249 case PERIPHERAL_MODEM:
250 if (driver->pd_logging_mode[0])
251 peripheral = UPD_WLAN;
252 break;
253 default:
254 peripheral = -EINVAL;
255 break;
256 }
257 }
258 }
259 }
260 }
261 return peripheral;
262}
263
264static void diagfwd_data_process_done(struct diagfwd_info *fwd_info,
265 struct diagfwd_buf_t *buf, int len)
266{
267 int err = 0;
268 int write_len = 0, peripheral = 0;
269 unsigned char *write_buf = NULL;
270 struct diag_md_session_t *session_info = NULL;
271 uint8_t hdlc_disabled = 0;
272
273 if (!fwd_info || !buf || len <= 0) {
274 diag_ws_release();
275 return;
276 }
277
278 switch (fwd_info->type) {
279 case TYPE_DATA:
280 case TYPE_CMD:
281 break;
282 default:
283 pr_err_ratelimited("diag: In %s, invalid type %d for peripheral %d\n",
284 __func__, fwd_info->type,
285 fwd_info->peripheral);
286 diag_ws_release();
287 return;
288 }
289
290 mutex_lock(&driver->hdlc_disable_mutex);
291 mutex_lock(&fwd_info->data_mutex);
292
293 peripheral =
294 diag_md_get_peripheral(buf->ctxt);
295 if (peripheral < 0) {
296 pr_err("diag:%s:%d invalid peripheral = %d\n",
297 __func__, __LINE__, peripheral);
298 mutex_unlock(&fwd_info->data_mutex);
299 mutex_unlock(&driver->hdlc_disable_mutex);
300 diag_ws_release();
301 return;
302 }
303
304 session_info =
305 diag_md_session_get_peripheral(peripheral);
306 if (session_info)
307 hdlc_disabled = session_info->hdlc_disabled;
308 else
309 hdlc_disabled = driver->hdlc_disabled;
310
311 if (hdlc_disabled) {
312 /* The data is raw and and on APPS side HDLC is disabled */
313 if (!buf) {
314 pr_err("diag: In %s, no match for non encode buffer %pK, peripheral %d, type: %d\n",
315 __func__, buf, fwd_info->peripheral,
316 fwd_info->type);
317 goto end;
318 }
319 if (len > PERIPHERAL_BUF_SZ) {
320 pr_err("diag: In %s, Incoming buffer too large %d, peripheral %d, type: %d\n",
321 __func__, len, fwd_info->peripheral,
322 fwd_info->type);
323 goto end;
324 }
325 write_len = len;
326 if (write_len <= 0)
327 goto end;
328 write_buf = buf->data_raw;
329 } else {
330 if (!buf) {
331 pr_err("diag: In %s, no match for non encode buffer %pK, peripheral %d, type: %d\n",
332 __func__, buf, fwd_info->peripheral,
333 fwd_info->type);
334 goto end;
335 }
336
337 write_len = check_bufsize_for_encoding(buf, len);
338 if (write_len <= 0) {
339 pr_err("diag: error in checking buf for encoding\n");
340 goto end;
341 }
342 write_buf = buf->data;
343 err = diag_add_hdlc_encoding(write_buf, &write_len,
344 buf->data_raw, len);
345 if (err) {
346 pr_err("diag: error in adding hdlc encoding\n");
347 goto end;
348 }
349 }
350
351 if (write_len > 0) {
352 err = diag_mux_write(DIAG_LOCAL_PROC, write_buf, write_len,
353 buf->ctxt);
354 if (err) {
355 pr_err_ratelimited("diag: In %s, unable to write to mux error: %d\n",
356 __func__, err);
357 goto end;
358 }
359 }
360 mutex_unlock(&fwd_info->data_mutex);
361 mutex_unlock(&driver->hdlc_disable_mutex);
362 diagfwd_queue_read(fwd_info);
363 return;
364
365end:
366 diag_ws_release();
367 mutex_unlock(&fwd_info->data_mutex);
368 mutex_unlock(&driver->hdlc_disable_mutex);
369 if (buf) {
370 diagfwd_write_done(fwd_info->peripheral, fwd_info->type,
371 GET_BUF_NUM(buf->ctxt));
372 }
373 diagfwd_queue_read(fwd_info);
374}
375
376static void diagfwd_data_read_untag_done(struct diagfwd_info *fwd_info,
377 unsigned char *buf, int len)
378{
379 int i = 0;
380 int len_cpd = 0;
381 int ctxt_cpd = 0;
382 int len_upd[MAX_PERIPHERAL_UPD] = {0};
383 int ctxt_upd[MAX_PERIPHERAL_UPD] = {0};
384 int packet_len = 0, processed = 0;
385 unsigned char *temp_buf_main = NULL;
386 unsigned char *temp_buf_cpd = NULL;
387 unsigned char *temp_buf_upd[MAX_PERIPHERAL_UPD] = {NULL};
388 struct diagfwd_buf_t *temp_fwdinfo_cpd = NULL;
389 struct diagfwd_buf_t *temp_fwdinfo_upd = NULL;
390 int flag_buf_1 = 0, flag_buf_2 = 0;
391 uint8_t peripheral;
392
393 if (!fwd_info || !buf || len <= 0) {
394 diag_ws_release();
395 return;
396 }
397
398 switch (fwd_info->type) {
399 case TYPE_DATA:
400 case TYPE_CMD:
401 break;
402 default:
403 pr_err_ratelimited("diag: In %s, invalid type %d for peripheral %d\n",
404 __func__, fwd_info->type,
405 fwd_info->peripheral);
406 diag_ws_release();
407 return;
408 }
409 peripheral = fwd_info->peripheral;
410 if (peripheral >= NUM_PERIPHERALS)
411 return;
412
413 if (driver->feature[peripheral].encode_hdlc &&
414 driver->feature[peripheral].untag_header &&
415 driver->peripheral_untag[peripheral]) {
416 temp_buf_cpd = buf;
417 temp_buf_main = buf;
418 if (fwd_info->buf_1 &&
419 fwd_info->buf_1->data_raw == buf) {
420 flag_buf_1 = 1;
421 temp_fwdinfo_cpd = fwd_info->buf_1;
422 if (fwd_info->type == TYPE_DATA) {
423 for (i = 0; i <= (fwd_info->num_pd - 2); i++)
424 temp_buf_upd[i] =
425 fwd_info->buf_upd[i][0]->data_raw;
426 }
427 } else if (fwd_info->buf_2 &&
428 fwd_info->buf_2->data_raw == buf) {
429 flag_buf_2 = 1;
430 temp_fwdinfo_cpd = fwd_info->buf_2;
431 if (fwd_info->type == TYPE_DATA) {
432 for (i = 0; i <= (fwd_info->num_pd - 2); i++)
433 temp_buf_upd[i] =
434 fwd_info->buf_upd[i][1]->data_raw;
435 }
436 } else {
437 pr_err("diag: In %s, no match for buffer %pK, peripheral %d, type: %d\n",
438 __func__, buf, peripheral,
439 fwd_info->type);
440 goto end;
441 }
442
443 while (processed < len) {
444 pr_debug("diag_fr:untagged packet buf contents: %02x %02x %02x %02x\n",
445 *temp_buf_main, *(temp_buf_main+1),
446 *(temp_buf_main+2), *(temp_buf_main+3));
447 packet_len =
448 *(uint16_t *) (temp_buf_main + 2);
449 if (packet_len > PERIPHERAL_BUF_SZ)
450 goto end;
451 if ((*temp_buf_main) == fwd_info->diagid_root) {
452 ctxt_cpd = fwd_info->diagid_root;
453 len_cpd += packet_len;
454 if (temp_buf_cpd) {
455 memcpy(temp_buf_cpd,
456 (temp_buf_main + 4), packet_len);
457 temp_buf_cpd += packet_len;
458 }
459 } else {
460 for (i = 0; i <= (fwd_info->num_pd - 2); i++)
461 if ((*temp_buf_main) ==
462 fwd_info->diagid_user[i])
463 break;
464 ctxt_upd[i] = fwd_info->diagid_user[i];
465 if (temp_buf_upd[i]) {
466 memcpy(temp_buf_upd[i],
467 (temp_buf_main + 4), packet_len);
468 temp_buf_upd[i] += packet_len;
469 }
470 len_upd[i] += packet_len;
471 }
472 len = len - 4;
473 temp_buf_main += (packet_len + 4);
474 processed += packet_len;
475 }
476 for (i = 0; i <= (fwd_info->num_pd - 2); i++) {
477 if (fwd_info->type == TYPE_DATA && len_upd[i]) {
478 if (flag_buf_1) {
479 fwd_info->upd_len[i][0] = len_upd[i];
480 temp_fwdinfo_upd =
481 fwd_info->buf_upd[i][0];
482 } else {
483 fwd_info->upd_len[i][1] = len_upd[i];
484 temp_fwdinfo_upd =
485 fwd_info->buf_upd[i][1];
486 }
487 temp_fwdinfo_upd->ctxt &= 0x00FFFFFF;
488 temp_fwdinfo_upd->ctxt |=
489 (SET_PD_CTXT(ctxt_upd[i]));
490 atomic_set(&temp_fwdinfo_upd->in_busy, 1);
491 diagfwd_data_process_done(fwd_info,
492 temp_fwdinfo_upd, len_upd[i]);
493 } else {
494 if (flag_buf_1)
495 fwd_info->upd_len[i][0] = 0;
496 if (flag_buf_2)
497 fwd_info->upd_len[i][1] = 0;
498 }
499 }
500 if (len_cpd) {
501 if (flag_buf_1)
502 fwd_info->cpd_len_1 = len_cpd;
503 else
504 fwd_info->cpd_len_2 = len_cpd;
505 temp_fwdinfo_cpd->ctxt &= 0x00FFFFFF;
506 temp_fwdinfo_cpd->ctxt |=
507 (SET_PD_CTXT(ctxt_cpd));
508 diagfwd_data_process_done(fwd_info,
509 temp_fwdinfo_cpd, len_cpd);
510 } else {
511 if (flag_buf_1)
512 fwd_info->cpd_len_1 = 0;
513 if (flag_buf_2)
514 fwd_info->cpd_len_2 = 0;
515 }
516 } else {
517 diagfwd_data_read_done(fwd_info, buf, len);
518 }
519 return;
520end:
521 diag_ws_release();
522 if (temp_fwdinfo_cpd) {
523 diagfwd_write_done(fwd_info->peripheral, fwd_info->type,
524 GET_BUF_NUM(temp_fwdinfo_cpd->ctxt));
525 }
526 diagfwd_queue_read(fwd_info);
527}
528
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -0700529static void diagfwd_data_read_done(struct diagfwd_info *fwd_info,
530 unsigned char *buf, int len)
531{
532 int err = 0;
533 int write_len = 0;
534 unsigned char *write_buf = NULL;
535 struct diagfwd_buf_t *temp_buf = NULL;
536 struct diag_md_session_t *session_info = NULL;
537 uint8_t hdlc_disabled = 0;
538
539 if (!fwd_info || !buf || len <= 0) {
540 diag_ws_release();
541 return;
542 }
543
544 switch (fwd_info->type) {
545 case TYPE_DATA:
546 case TYPE_CMD:
547 break;
548 default:
549 pr_err_ratelimited("diag: In %s, invalid type %d for peripheral %d\n",
550 __func__, fwd_info->type,
551 fwd_info->peripheral);
552 diag_ws_release();
553 return;
554 }
555
556 mutex_lock(&driver->hdlc_disable_mutex);
557 mutex_lock(&fwd_info->data_mutex);
558 session_info = diag_md_session_get_peripheral(fwd_info->peripheral);
559 if (session_info)
560 hdlc_disabled = session_info->hdlc_disabled;
561 else
562 hdlc_disabled = driver->hdlc_disabled;
563
564 if (!driver->feature[fwd_info->peripheral].encode_hdlc) {
565 if (fwd_info->buf_1 && fwd_info->buf_1->data == buf) {
566 temp_buf = fwd_info->buf_1;
567 write_buf = fwd_info->buf_1->data;
568 } else if (fwd_info->buf_2 && fwd_info->buf_2->data == buf) {
569 temp_buf = fwd_info->buf_2;
570 write_buf = fwd_info->buf_2->data;
571 } else {
572 pr_err("diag: In %s, no match for buffer %pK, peripheral %d, type: %d\n",
573 __func__, buf, fwd_info->peripheral,
574 fwd_info->type);
575 goto end;
576 }
577 write_len = len;
578 } else if (hdlc_disabled) {
579 /* The data is raw and and on APPS side HDLC is disabled */
580 if (fwd_info->buf_1 && fwd_info->buf_1->data_raw == buf) {
581 temp_buf = fwd_info->buf_1;
582 } else if (fwd_info->buf_2 &&
583 fwd_info->buf_2->data_raw == buf) {
584 temp_buf = fwd_info->buf_2;
585 } else {
586 pr_err("diag: In %s, no match for non encode buffer %pK, peripheral %d, type: %d\n",
587 __func__, buf, fwd_info->peripheral,
588 fwd_info->type);
589 goto end;
590 }
591 if (len > PERIPHERAL_BUF_SZ) {
592 pr_err("diag: In %s, Incoming buffer too large %d, peripheral %d, type: %d\n",
593 __func__, len, fwd_info->peripheral,
594 fwd_info->type);
595 goto end;
596 }
597 write_len = len;
598 write_buf = buf;
599 } else {
600 if (fwd_info->buf_1 && fwd_info->buf_1->data_raw == buf) {
601 temp_buf = fwd_info->buf_1;
602 } else if (fwd_info->buf_2 &&
603 fwd_info->buf_2->data_raw == buf) {
604 temp_buf = fwd_info->buf_2;
605 } else {
606 pr_err("diag: In %s, no match for non encode buffer %pK, peripheral %d, type: %d\n",
607 __func__, buf, fwd_info->peripheral,
608 fwd_info->type);
609 goto end;
610 }
611 write_len = check_bufsize_for_encoding(temp_buf, len);
612 if (write_len <= 0) {
613 pr_err("diag: error in checking buf for encoding\n");
614 goto end;
615 }
616 write_buf = temp_buf->data;
617 err = diag_add_hdlc_encoding(write_buf, &write_len, buf, len);
618 if (err) {
619 pr_err("diag: error in adding hdlc encoding\n");
620 goto end;
621 }
622 }
623
624 if (write_len > 0) {
625 err = diag_mux_write(DIAG_LOCAL_PROC, write_buf, write_len,
626 temp_buf->ctxt);
627 if (err) {
628 pr_err_ratelimited("diag: In %s, unable to write to mux error: %d\n",
629 __func__, err);
630 goto end;
631 }
632 }
633 mutex_unlock(&fwd_info->data_mutex);
634 mutex_unlock(&driver->hdlc_disable_mutex);
635 diagfwd_queue_read(fwd_info);
636 return;
637
638end:
639 diag_ws_release();
640 mutex_unlock(&fwd_info->data_mutex);
641 mutex_unlock(&driver->hdlc_disable_mutex);
642 if (temp_buf) {
643 diagfwd_write_done(fwd_info->peripheral, fwd_info->type,
644 GET_BUF_NUM(temp_buf->ctxt));
645 }
646 diagfwd_queue_read(fwd_info);
647}
648
649static void diagfwd_cntl_read_done(struct diagfwd_info *fwd_info,
650 unsigned char *buf, int len)
651{
652 if (!fwd_info) {
653 diag_ws_release();
654 return;
655 }
656
657 if (fwd_info->type != TYPE_CNTL) {
658 pr_err("diag: In %s, invalid type %d for peripheral %d\n",
659 __func__, fwd_info->type, fwd_info->peripheral);
660 diag_ws_release();
661 return;
662 }
663
664 diag_ws_on_read(DIAG_WS_MUX, len);
665 diag_cntl_process_read_data(fwd_info, buf, len);
666 /*
667 * Control packets are not consumed by the clients. Mimic
668 * consumption by setting and clearing the wakeup source copy_count
669 * explicitly.
670 */
671 diag_ws_on_copy_fail(DIAG_WS_MUX);
672 /* Reset the buffer in_busy value after processing the data */
673 if (fwd_info->buf_1)
674 atomic_set(&fwd_info->buf_1->in_busy, 0);
675
676 diagfwd_queue_read(fwd_info);
677 diagfwd_queue_read(&peripheral_info[TYPE_DATA][fwd_info->peripheral]);
678 diagfwd_queue_read(&peripheral_info[TYPE_CMD][fwd_info->peripheral]);
679}
680
681static void diagfwd_dci_read_done(struct diagfwd_info *fwd_info,
682 unsigned char *buf, int len)
683{
684 if (!fwd_info)
685 return;
686
687 switch (fwd_info->type) {
688 case TYPE_DCI:
689 case TYPE_DCI_CMD:
690 break;
691 default:
692 pr_err("diag: In %s, invalid type %d for peripheral %d\n",
693 __func__, fwd_info->type, fwd_info->peripheral);
694 return;
695 }
696
697 diag_dci_process_peripheral_data(fwd_info, (void *)buf, len);
698 /* Reset the buffer in_busy value after processing the data */
699 if (fwd_info->buf_1)
700 atomic_set(&fwd_info->buf_1->in_busy, 0);
701
702 diagfwd_queue_read(fwd_info);
703}
704
705static void diagfwd_reset_buffers(struct diagfwd_info *fwd_info,
706 unsigned char *buf)
707{
708 if (!fwd_info || !buf)
709 return;
710
711 if (!driver->feature[fwd_info->peripheral].encode_hdlc) {
712 if (fwd_info->buf_1 && fwd_info->buf_1->data == buf)
713 atomic_set(&fwd_info->buf_1->in_busy, 0);
714 else if (fwd_info->buf_2 && fwd_info->buf_2->data == buf)
715 atomic_set(&fwd_info->buf_2->in_busy, 0);
716 } else {
717 if (fwd_info->buf_1 && fwd_info->buf_1->data_raw == buf)
718 atomic_set(&fwd_info->buf_1->in_busy, 0);
719 else if (fwd_info->buf_2 && fwd_info->buf_2->data_raw == buf)
720 atomic_set(&fwd_info->buf_2->in_busy, 0);
721 }
722}
723
724int diagfwd_peripheral_init(void)
725{
726 uint8_t peripheral;
727 uint8_t transport;
728 uint8_t type;
Manoj Prabhu B571cf422017-08-08 19:01:41 +0530729 int i = 0;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -0700730 struct diagfwd_info *fwd_info = NULL;
731
732 for (transport = 0; transport < NUM_TRANSPORT; transport++) {
733 early_init_info[transport] = kzalloc(
734 sizeof(struct diagfwd_info) * NUM_PERIPHERALS,
735 GFP_KERNEL);
736 if (!early_init_info[transport])
737 return -ENOMEM;
738 kmemleak_not_leak(early_init_info[transport]);
739 }
740
741 for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
742 for (transport = 0; transport < NUM_TRANSPORT; transport++) {
743 fwd_info = &early_init_info[transport][peripheral];
744 fwd_info->peripheral = peripheral;
745 fwd_info->type = TYPE_CNTL;
746 fwd_info->transport = transport;
747 fwd_info->ctxt = NULL;
748 fwd_info->p_ops = NULL;
749 fwd_info->ch_open = 0;
750 fwd_info->inited = 1;
751 fwd_info->read_bytes = 0;
752 fwd_info->write_bytes = 0;
Manoj Prabhu B571cf422017-08-08 19:01:41 +0530753 fwd_info->cpd_len_1 = 0;
754 fwd_info->cpd_len_2 = 0;
755 fwd_info->num_pd = 0;
Sreelakshmi Gownipalliae6c8e82016-11-29 16:01:13 -0800756 mutex_init(&fwd_info->buf_mutex);
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -0700757 mutex_init(&fwd_info->data_mutex);
Sreelakshmi Gownipalliae6c8e82016-11-29 16:01:13 -0800758 spin_lock_init(&fwd_info->write_buf_lock);
Manoj Prabhu B571cf422017-08-08 19:01:41 +0530759
760 for (i = 0; i < MAX_PERIPHERAL_UPD; i++) {
761 fwd_info->diagid_user[i] = 0;
762 fwd_info->upd_len[i][0] = 0;
763 fwd_info->upd_len[i][1] = 0;
764 fwd_info->buf_upd[i][0] = NULL;
765 fwd_info->buf_upd[i][1] = NULL;
766 }
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -0700767 }
768 }
769
770 for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
771 for (type = 0; type < NUM_TYPES; type++) {
772 fwd_info = &peripheral_info[type][peripheral];
773 fwd_info->peripheral = peripheral;
774 fwd_info->type = type;
775 fwd_info->ctxt = NULL;
776 fwd_info->p_ops = NULL;
777 fwd_info->ch_open = 0;
778 fwd_info->read_bytes = 0;
779 fwd_info->write_bytes = 0;
Manoj Prabhu B571cf422017-08-08 19:01:41 +0530780 fwd_info->num_pd = 0;
781 fwd_info->cpd_len_1 = 0;
782 fwd_info->cpd_len_2 = 0;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -0700783 spin_lock_init(&fwd_info->write_buf_lock);
Sreelakshmi Gownipalliae6c8e82016-11-29 16:01:13 -0800784 mutex_init(&fwd_info->buf_mutex);
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -0700785 mutex_init(&fwd_info->data_mutex);
Manoj Prabhu B571cf422017-08-08 19:01:41 +0530786
787 for (i = 0; i < MAX_PERIPHERAL_UPD; i++) {
788 fwd_info->diagid_user[i] = 0;
789 fwd_info->upd_len[i][0] = 0;
790 fwd_info->upd_len[i][1] = 0;
791 fwd_info->buf_upd[i][0] = NULL;
792 fwd_info->buf_upd[i][1] = NULL;
793 }
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -0700794 /*
795 * This state shouldn't be set for Control channels
796 * during initialization. This is set when the feature
797 * mask is received for the first time.
798 */
799 if (type != TYPE_CNTL)
800 fwd_info->inited = 1;
801 }
802 driver->diagfwd_data[peripheral] =
803 &peripheral_info[TYPE_DATA][peripheral];
804 driver->diagfwd_cntl[peripheral] =
805 &peripheral_info[TYPE_CNTL][peripheral];
806 driver->diagfwd_dci[peripheral] =
807 &peripheral_info[TYPE_DCI][peripheral];
808 driver->diagfwd_cmd[peripheral] =
809 &peripheral_info[TYPE_CMD][peripheral];
810 driver->diagfwd_dci_cmd[peripheral] =
811 &peripheral_info[TYPE_DCI_CMD][peripheral];
812 }
813
814 if (driver->supports_sockets)
815 diag_socket_init();
816 diag_glink_init();
817
818 return 0;
819}
820
821void diagfwd_peripheral_exit(void)
822{
823 uint8_t peripheral;
824 uint8_t type;
825 struct diagfwd_info *fwd_info = NULL;
826
827 diag_socket_exit();
828
829 for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
830 for (type = 0; type < NUM_TYPES; type++) {
831 fwd_info = &peripheral_info[type][peripheral];
832 fwd_info->ctxt = NULL;
833 fwd_info->p_ops = NULL;
834 fwd_info->ch_open = 0;
835 diagfwd_buffers_exit(fwd_info);
836 }
837 }
838
839 for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
840 driver->diagfwd_data[peripheral] = NULL;
841 driver->diagfwd_cntl[peripheral] = NULL;
842 driver->diagfwd_dci[peripheral] = NULL;
843 driver->diagfwd_cmd[peripheral] = NULL;
844 driver->diagfwd_dci_cmd[peripheral] = NULL;
845 }
846
847 kfree(early_init_info);
848}
849
850int diagfwd_cntl_register(uint8_t transport, uint8_t peripheral, void *ctxt,
851 struct diag_peripheral_ops *ops,
852 struct diagfwd_info **fwd_ctxt)
853{
854 struct diagfwd_info *fwd_info = NULL;
855
856 if (!ctxt || !ops)
857 return -EIO;
858
859 if (transport >= NUM_TRANSPORT || peripheral >= NUM_PERIPHERALS)
860 return -EINVAL;
861
862 fwd_info = &early_init_info[transport][peripheral];
863 *fwd_ctxt = &early_init_info[transport][peripheral];
864 fwd_info->ctxt = ctxt;
865 fwd_info->p_ops = ops;
866 fwd_info->c_ops = &cntl_ch_ops;
867
868 return 0;
869}
870
871int diagfwd_register(uint8_t transport, uint8_t peripheral, uint8_t type,
872 void *ctxt, struct diag_peripheral_ops *ops,
873 struct diagfwd_info **fwd_ctxt)
874{
875 struct diagfwd_info *fwd_info = NULL;
876
877 if (peripheral >= NUM_PERIPHERALS || type >= NUM_TYPES ||
878 !ctxt || !ops || transport >= NUM_TRANSPORT) {
879 pr_err("diag: In %s, returning error\n", __func__);
880 return -EIO;
881 }
882
883 fwd_info = &peripheral_info[type][peripheral];
884 *fwd_ctxt = &peripheral_info[type][peripheral];
885 fwd_info->ctxt = ctxt;
886 fwd_info->p_ops = ops;
887 fwd_info->transport = transport;
888 fwd_info->ch_open = 0;
889
890 switch (type) {
891 case TYPE_DATA:
892 case TYPE_CMD:
893 fwd_info->c_ops = &data_ch_ops;
894 break;
895 case TYPE_DCI:
896 case TYPE_DCI_CMD:
897 fwd_info->c_ops = &dci_ch_ops;
898 break;
899 default:
900 pr_err("diag: In %s, invalid type: %d\n", __func__, type);
901 return -EINVAL;
902 }
903
904 if (atomic_read(&fwd_info->opened) &&
905 fwd_info->p_ops && fwd_info->p_ops->open) {
906 /*
907 * The registration can happen late, like in the case of
908 * sockets. fwd_info->opened reflects diag_state. Propagate the
909 * state to the peipherals.
910 */
911 fwd_info->p_ops->open(fwd_info->ctxt);
912 }
913
914 return 0;
915}
916
917void diagfwd_deregister(uint8_t peripheral, uint8_t type, void *ctxt)
918{
919 struct diagfwd_info *fwd_info = NULL;
920
921 if (peripheral >= NUM_PERIPHERALS || type >= NUM_TYPES || !ctxt)
922 return;
923
924 fwd_info = &peripheral_info[type][peripheral];
925 if (fwd_info->ctxt != ctxt) {
926 pr_err("diag: In %s, unable to find a match for p: %d t: %d\n",
927 __func__, peripheral, type);
928 return;
929 }
930 fwd_info->ctxt = NULL;
931 fwd_info->p_ops = NULL;
932 fwd_info->ch_open = 0;
933 diagfwd_buffers_exit(fwd_info);
934
935 switch (type) {
936 case TYPE_DATA:
937 driver->diagfwd_data[peripheral] = NULL;
938 break;
939 case TYPE_CNTL:
940 driver->diagfwd_cntl[peripheral] = NULL;
941 break;
942 case TYPE_DCI:
943 driver->diagfwd_dci[peripheral] = NULL;
944 break;
945 case TYPE_CMD:
946 driver->diagfwd_cmd[peripheral] = NULL;
947 break;
948 case TYPE_DCI_CMD:
949 driver->diagfwd_dci_cmd[peripheral] = NULL;
950 break;
951 }
952}
953
954void diagfwd_close_transport(uint8_t transport, uint8_t peripheral)
955{
956 struct diagfwd_info *fwd_info = NULL;
957 struct diagfwd_info *dest_info = NULL;
958 int (*init_fn)(uint8_t) = NULL;
959 void (*invalidate_fn)(void *, struct diagfwd_info *) = NULL;
960 int (*check_channel_state)(void *) = NULL;
961 uint8_t transport_open = 0;
962 int i = 0;
963
964 if (peripheral >= NUM_PERIPHERALS)
965 return;
966
967 switch (transport) {
968 case TRANSPORT_GLINK:
969 transport_open = TRANSPORT_SOCKET;
970 init_fn = diag_socket_init_peripheral;
971 invalidate_fn = diag_socket_invalidate;
972 check_channel_state = diag_socket_check_state;
973 break;
974 case TRANSPORT_SOCKET:
975 transport_open = TRANSPORT_GLINK;
976 init_fn = diag_glink_init_peripheral;
977 invalidate_fn = diag_glink_invalidate;
978 check_channel_state = diag_glink_check_state;
979 break;
980 default:
981 return;
982
983 }
984
Manoj Prabhu B98325462017-01-10 20:19:28 +0530985 mutex_lock(&driver->diagfwd_channel_mutex[peripheral]);
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -0700986 fwd_info = &early_init_info[transport][peripheral];
987 if (fwd_info->p_ops && fwd_info->p_ops->close)
988 fwd_info->p_ops->close(fwd_info->ctxt);
989 fwd_info = &early_init_info[transport_open][peripheral];
990 dest_info = &peripheral_info[TYPE_CNTL][peripheral];
991 dest_info->inited = 1;
992 dest_info->ctxt = fwd_info->ctxt;
993 dest_info->p_ops = fwd_info->p_ops;
994 dest_info->c_ops = fwd_info->c_ops;
995 dest_info->ch_open = fwd_info->ch_open;
996 dest_info->read_bytes = fwd_info->read_bytes;
997 dest_info->write_bytes = fwd_info->write_bytes;
998 dest_info->inited = fwd_info->inited;
999 dest_info->buf_1 = fwd_info->buf_1;
1000 dest_info->buf_2 = fwd_info->buf_2;
1001 dest_info->transport = fwd_info->transport;
1002 invalidate_fn(dest_info->ctxt, dest_info);
1003 for (i = 0; i < NUM_WRITE_BUFFERS; i++)
1004 dest_info->buf_ptr[i] = fwd_info->buf_ptr[i];
1005 if (!check_channel_state(dest_info->ctxt))
1006 diagfwd_late_open(dest_info);
Manoj Prabhu B8ee172f2017-07-14 14:34:26 +05301007
1008 /*
1009 * Open control channel to update masks after buffers are
1010 * initialized for peripherals that have transport other than
1011 * GLINK. GLINK supported peripheral mask update will
1012 * happen after glink buffers are initialized.
1013 */
1014
1015 if (dest_info->transport != TRANSPORT_GLINK)
1016 diagfwd_cntl_open(dest_info);
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001017 init_fn(peripheral);
Manoj Prabhu B98325462017-01-10 20:19:28 +05301018 mutex_unlock(&driver->diagfwd_channel_mutex[peripheral]);
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001019 diagfwd_queue_read(&peripheral_info[TYPE_DATA][peripheral]);
1020 diagfwd_queue_read(&peripheral_info[TYPE_CMD][peripheral]);
1021}
1022
1023void *diagfwd_request_write_buf(struct diagfwd_info *fwd_info)
1024{
1025 void *buf = NULL;
1026 int index;
1027 unsigned long flags;
1028
1029 spin_lock_irqsave(&fwd_info->write_buf_lock, flags);
1030 for (index = 0 ; index < NUM_WRITE_BUFFERS; index++) {
1031 if (!atomic_read(&(fwd_info->buf_ptr[index]->in_busy))) {
1032 atomic_set(&(fwd_info->buf_ptr[index]->in_busy), 1);
1033 buf = fwd_info->buf_ptr[index]->data;
1034 if (!buf)
1035 return NULL;
1036 break;
1037 }
1038 }
1039 spin_unlock_irqrestore(&fwd_info->write_buf_lock, flags);
1040 return buf;
1041}
1042
1043int diagfwd_write(uint8_t peripheral, uint8_t type, void *buf, int len)
1044{
1045 struct diagfwd_info *fwd_info = NULL;
1046 int err = 0;
1047 uint8_t retry_count = 0;
1048 uint8_t max_retries = 3;
1049 void *buf_ptr = NULL;
1050
1051 if (peripheral >= NUM_PERIPHERALS || type >= NUM_TYPES)
1052 return -EINVAL;
1053
1054 if (type == TYPE_CMD || type == TYPE_DCI_CMD) {
1055 if (!driver->feature[peripheral].rcvd_feature_mask ||
1056 !driver->feature[peripheral].sent_feature_mask) {
1057 pr_debug_ratelimited("diag: In %s, feature mask for peripheral: %d not received or sent yet\n",
1058 __func__, peripheral);
1059 return 0;
1060 }
1061 if (!driver->feature[peripheral].separate_cmd_rsp)
1062 type = (type == TYPE_CMD) ? TYPE_DATA : TYPE_DCI;
1063 }
1064
1065 fwd_info = &peripheral_info[type][peripheral];
1066 if (!fwd_info->inited || !atomic_read(&fwd_info->opened))
1067 return -ENODEV;
1068
Manoj Prabhu B571cf422017-08-08 19:01:41 +05301069 if (type == TYPE_CMD) {
1070 if (driver->feature[peripheral].untag_header)
1071 if (!fwd_info->diagid_root ||
1072 (!driver->diag_id_sent[peripheral])) {
1073 DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
1074 "diag: diag_id is not assigned yet\n");
1075 return 0;
1076 }
1077 }
1078
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001079 if (!(fwd_info->p_ops && fwd_info->p_ops->write && fwd_info->ctxt))
1080 return -EIO;
1081
1082 if (fwd_info->transport == TRANSPORT_GLINK) {
1083 buf_ptr = diagfwd_request_write_buf(fwd_info);
1084 if (buf_ptr)
1085 memcpy(buf_ptr, buf, len);
1086 else {
1087 DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
1088 "diag: buffer not found for writing\n");
1089 return -EIO;
1090 }
1091 } else
1092 buf_ptr = buf;
1093
1094 while (retry_count < max_retries) {
1095 err = 0;
1096 err = fwd_info->p_ops->write(fwd_info->ctxt, buf_ptr, len);
1097 if (err && err != -ENODEV) {
1098 usleep_range(100000, 101000);
1099 retry_count++;
1100 continue;
1101 }
1102 break;
1103 }
1104
1105 if (!err)
1106 fwd_info->write_bytes += len;
1107 else
1108 if (fwd_info->transport == TRANSPORT_GLINK)
1109 diagfwd_write_buffer_done(fwd_info, buf_ptr);
1110 return err;
1111}
1112
1113static void __diag_fwd_open(struct diagfwd_info *fwd_info)
1114{
1115 if (!fwd_info)
1116 return;
1117
1118 atomic_set(&fwd_info->opened, 1);
1119 if (!fwd_info->inited)
1120 return;
1121
1122 if (fwd_info->buf_1)
1123 atomic_set(&fwd_info->buf_1->in_busy, 0);
1124 if (fwd_info->buf_2)
1125 atomic_set(&fwd_info->buf_2->in_busy, 0);
1126
1127 if (fwd_info->p_ops && fwd_info->p_ops->open)
1128 fwd_info->p_ops->open(fwd_info->ctxt);
1129
1130 diagfwd_queue_read(fwd_info);
1131}
1132
1133void diagfwd_early_open(uint8_t peripheral)
1134{
1135 uint8_t transport = 0;
1136 struct diagfwd_info *fwd_info = NULL;
1137
1138 if (peripheral >= NUM_PERIPHERALS)
1139 return;
1140
1141 for (transport = 0; transport < NUM_TRANSPORT; transport++) {
1142 fwd_info = &early_init_info[transport][peripheral];
1143 __diag_fwd_open(fwd_info);
1144 }
1145}
1146
1147void diagfwd_open(uint8_t peripheral, uint8_t type)
1148{
1149 struct diagfwd_info *fwd_info = NULL;
1150
1151 if (peripheral >= NUM_PERIPHERALS || type >= NUM_TYPES)
1152 return;
1153
1154 fwd_info = &peripheral_info[type][peripheral];
1155 __diag_fwd_open(fwd_info);
1156}
1157
1158void diagfwd_late_open(struct diagfwd_info *fwd_info)
1159{
1160 __diag_fwd_open(fwd_info);
1161}
1162
1163void diagfwd_close(uint8_t peripheral, uint8_t type)
1164{
1165 struct diagfwd_info *fwd_info = NULL;
1166
1167 if (peripheral >= NUM_PERIPHERALS || type >= NUM_TYPES)
1168 return;
1169
1170 fwd_info = &peripheral_info[type][peripheral];
1171 atomic_set(&fwd_info->opened, 0);
1172 if (!fwd_info->inited)
1173 return;
1174
1175 if (fwd_info->p_ops && fwd_info->p_ops->close)
1176 fwd_info->p_ops->close(fwd_info->ctxt);
1177
1178 if (fwd_info->buf_1)
1179 atomic_set(&fwd_info->buf_1->in_busy, 1);
1180 /*
1181 * Only Data channels have two buffers. Set both the buffers
1182 * to busy on close.
1183 */
1184 if (fwd_info->buf_2)
1185 atomic_set(&fwd_info->buf_2->in_busy, 1);
1186}
1187
1188int diagfwd_channel_open(struct diagfwd_info *fwd_info)
1189{
1190 int i;
1191
1192 if (!fwd_info)
1193 return -EIO;
1194
1195 if (!fwd_info->inited) {
1196 pr_debug("diag: In %s, channel is not inited, p: %d, t: %d\n",
1197 __func__, fwd_info->peripheral, fwd_info->type);
1198 return -EINVAL;
1199 }
1200
1201 if (fwd_info->ch_open) {
1202 pr_debug("diag: In %s, channel is already open, p: %d, t: %d\n",
1203 __func__, fwd_info->peripheral, fwd_info->type);
1204 return 0;
1205 }
Manoj Prabhu Bb989d562017-06-28 11:36:20 +05301206 mutex_lock(&driver->diagfwd_channel_mutex[fwd_info->peripheral]);
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001207 fwd_info->ch_open = 1;
1208 diagfwd_buffers_init(fwd_info);
Manoj Prabhu B8ee172f2017-07-14 14:34:26 +05301209
1210 /*
1211 * Initialize buffers for glink supported
1212 * peripherals only. Open control channel to update
1213 * masks after buffers are initialized.
1214 */
1215 if (fwd_info->transport == TRANSPORT_GLINK) {
1216 diagfwd_write_buffers_init(fwd_info);
1217 if (fwd_info->type == TYPE_CNTL)
1218 diagfwd_cntl_open(fwd_info);
1219 }
1220
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001221 if (fwd_info && fwd_info->c_ops && fwd_info->c_ops->open)
1222 fwd_info->c_ops->open(fwd_info);
1223 for (i = 0; i < NUM_WRITE_BUFFERS; i++) {
1224 if (fwd_info->buf_ptr[i])
1225 atomic_set(&fwd_info->buf_ptr[i]->in_busy, 0);
1226 }
1227 diagfwd_queue_read(fwd_info);
1228 DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "p: %d t: %d considered opened\n",
1229 fwd_info->peripheral, fwd_info->type);
1230
1231 if (atomic_read(&fwd_info->opened)) {
1232 if (fwd_info->p_ops && fwd_info->p_ops->open)
1233 fwd_info->p_ops->open(fwd_info->ctxt);
1234 }
Manoj Prabhu Bb989d562017-06-28 11:36:20 +05301235 mutex_unlock(&driver->diagfwd_channel_mutex[fwd_info->peripheral]);
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001236 return 0;
1237}
1238
1239int diagfwd_channel_close(struct diagfwd_info *fwd_info)
1240{
1241 int i;
1242
1243 if (!fwd_info)
1244 return -EIO;
1245
Mohit Aggarwalaea1a722017-07-24 14:03:56 +05301246 if (fwd_info->type == TYPE_CNTL)
1247 flush_workqueue(driver->cntl_wq);
1248
Manoj Prabhu Bb989d562017-06-28 11:36:20 +05301249 mutex_lock(&driver->diagfwd_channel_mutex[fwd_info->peripheral]);
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001250 fwd_info->ch_open = 0;
1251 if (fwd_info && fwd_info->c_ops && fwd_info->c_ops->close)
1252 fwd_info->c_ops->close(fwd_info);
1253
1254 if (fwd_info->buf_1 && fwd_info->buf_1->data)
1255 atomic_set(&fwd_info->buf_1->in_busy, 0);
1256 if (fwd_info->buf_2 && fwd_info->buf_2->data)
1257 atomic_set(&fwd_info->buf_2->in_busy, 0);
1258
1259 for (i = 0; i < NUM_WRITE_BUFFERS; i++) {
1260 if (fwd_info->buf_ptr[i])
1261 atomic_set(&fwd_info->buf_ptr[i]->in_busy, 1);
1262 }
1263 DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "p: %d t: %d considered closed\n",
1264 fwd_info->peripheral, fwd_info->type);
Manoj Prabhu Bb989d562017-06-28 11:36:20 +05301265 mutex_unlock(&driver->diagfwd_channel_mutex[fwd_info->peripheral]);
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001266 return 0;
1267}
1268
1269int diagfwd_channel_read_done(struct diagfwd_info *fwd_info,
1270 unsigned char *buf, uint32_t len)
1271{
1272 if (!fwd_info) {
1273 diag_ws_release();
1274 return -EIO;
1275 }
1276
1277 /*
1278 * Diag peripheral layers should send len as 0 if there is any error
1279 * in reading data from the transport. Use this information to reset the
1280 * in_busy flags. No need to queue read in this case.
1281 */
1282 if (len == 0) {
1283 diagfwd_reset_buffers(fwd_info, buf);
1284 diag_ws_release();
1285 return 0;
1286 }
1287
1288 if (fwd_info && fwd_info->c_ops && fwd_info->c_ops->read_done)
1289 fwd_info->c_ops->read_done(fwd_info, buf, len);
1290 fwd_info->read_bytes += len;
1291
1292 return 0;
1293}
1294
1295void diagfwd_write_done(uint8_t peripheral, uint8_t type, int ctxt)
1296{
Manoj Prabhu B571cf422017-08-08 19:01:41 +05301297 int i = 0;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001298 struct diagfwd_info *fwd_info = NULL;
1299
1300 if (peripheral >= NUM_PERIPHERALS || type >= NUM_TYPES)
1301 return;
1302
1303 fwd_info = &peripheral_info[type][peripheral];
Manoj Prabhu B571cf422017-08-08 19:01:41 +05301304 if (!fwd_info)
1305 return;
1306
1307 if (ctxt == 1 && fwd_info->buf_1) {
1308 /* Buffer 1 for core PD is freed */
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001309 atomic_set(&fwd_info->buf_1->in_busy, 0);
Manoj Prabhu B571cf422017-08-08 19:01:41 +05301310 fwd_info->cpd_len_1 = 0;
1311 } else if (ctxt == 2 && fwd_info->buf_2) {
1312 /* Buffer 2 for core PD is freed */
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001313 atomic_set(&fwd_info->buf_2->in_busy, 0);
Manoj Prabhu B571cf422017-08-08 19:01:41 +05301314 fwd_info->cpd_len_2 = 0;
1315 } else if (ctxt >= 3 && (ctxt % 2)) {
1316 for (i = 0; i <= (fwd_info->num_pd - 2); i++) {
1317 if (fwd_info->buf_upd[i][0]) {
1318 /* Buffer 1 for ith user PD is freed */
1319 atomic_set(&fwd_info->buf_upd[i][0]->in_busy, 0);
1320 fwd_info->upd_len[i][0] = 0;
1321 }
1322 if (!fwd_info->cpd_len_1)
1323 atomic_set(&fwd_info->buf_1->in_busy, 0);
1324 }
1325 } else if (ctxt >= 4 && !(ctxt % 2)) {
1326 for (i = 0; i <= (fwd_info->num_pd - 2); i++) {
1327 if (fwd_info->buf_upd[i][1]) {
1328 /* Buffer 2 for ith user PD is freed */
1329 atomic_set(&fwd_info->buf_upd[i][0]->in_busy, 0);
1330 fwd_info->upd_len[i][1] = 0;
1331 }
1332 if (!fwd_info->cpd_len_2)
1333 atomic_set(&fwd_info->buf_2->in_busy, 0);
1334 }
1335 } else
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001336 pr_err("diag: In %s, invalid ctxt %d\n", __func__, ctxt);
1337
1338 diagfwd_queue_read(fwd_info);
1339}
1340
1341int diagfwd_write_buffer_done(struct diagfwd_info *fwd_info, const void *ptr)
1342{
1343
1344 int found = 0;
1345 int index = 0;
1346 unsigned long flags;
1347
1348 if (!fwd_info || !ptr)
1349 return found;
1350 spin_lock_irqsave(&fwd_info->write_buf_lock, flags);
1351 for (index = 0; index < NUM_WRITE_BUFFERS; index++) {
1352 if (fwd_info->buf_ptr[index]->data == ptr) {
1353 atomic_set(&fwd_info->buf_ptr[index]->in_busy, 0);
1354 found = 1;
1355 break;
1356 }
1357 }
1358 spin_unlock_irqrestore(&fwd_info->write_buf_lock, flags);
1359 return found;
1360}
1361
1362void diagfwd_channel_read(struct diagfwd_info *fwd_info)
1363{
1364 int err = 0;
1365 uint32_t read_len = 0;
1366 unsigned char *read_buf = NULL;
1367 struct diagfwd_buf_t *temp_buf = NULL;
1368
1369 if (!fwd_info) {
1370 diag_ws_release();
1371 return;
1372 }
1373
1374 if (!fwd_info->inited || !atomic_read(&fwd_info->opened)) {
1375 pr_debug("diag: In %s, p: %d, t: %d, inited: %d, opened: %d ch_open: %d\n",
1376 __func__, fwd_info->peripheral, fwd_info->type,
1377 fwd_info->inited, atomic_read(&fwd_info->opened),
1378 fwd_info->ch_open);
1379 diag_ws_release();
1380 return;
1381 }
1382
1383 if (fwd_info->buf_1 && !atomic_read(&fwd_info->buf_1->in_busy)) {
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001384 if (driver->feature[fwd_info->peripheral].encode_hdlc &&
1385 (fwd_info->type == TYPE_DATA ||
1386 fwd_info->type == TYPE_CMD)) {
1387 read_buf = fwd_info->buf_1->data_raw;
1388 read_len = fwd_info->buf_1->len_raw;
1389 } else {
1390 read_buf = fwd_info->buf_1->data;
1391 read_len = fwd_info->buf_1->len;
1392 }
Mohit Aggarwal77f227d2016-11-24 13:48:12 +05301393 if (read_buf) {
1394 temp_buf = fwd_info->buf_1;
1395 atomic_set(&temp_buf->in_busy, 1);
1396 }
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001397 } else if (fwd_info->buf_2 && !atomic_read(&fwd_info->buf_2->in_busy)) {
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001398 if (driver->feature[fwd_info->peripheral].encode_hdlc &&
1399 (fwd_info->type == TYPE_DATA ||
1400 fwd_info->type == TYPE_CMD)) {
1401 read_buf = fwd_info->buf_2->data_raw;
1402 read_len = fwd_info->buf_2->len_raw;
1403 } else {
1404 read_buf = fwd_info->buf_2->data;
1405 read_len = fwd_info->buf_2->len;
1406 }
Mohit Aggarwal77f227d2016-11-24 13:48:12 +05301407 if (read_buf) {
1408 temp_buf = fwd_info->buf_2;
1409 atomic_set(&temp_buf->in_busy, 1);
1410 }
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001411 } else {
1412 pr_debug("diag: In %s, both buffers are empty for p: %d, t: %d\n",
1413 __func__, fwd_info->peripheral, fwd_info->type);
1414 }
1415
1416 if (!read_buf) {
1417 diag_ws_release();
1418 return;
1419 }
1420
1421 if (!(fwd_info->p_ops && fwd_info->p_ops->read && fwd_info->ctxt))
1422 goto fail_return;
1423
1424 DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "issued a read p: %d t: %d buf: %pK\n",
1425 fwd_info->peripheral, fwd_info->type, read_buf);
1426 err = fwd_info->p_ops->read(fwd_info->ctxt, read_buf, read_len);
1427 if (err)
1428 goto fail_return;
1429
1430 return;
1431
1432fail_return:
1433 diag_ws_release();
1434 atomic_set(&temp_buf->in_busy, 0);
1435}
1436
1437static void diagfwd_queue_read(struct diagfwd_info *fwd_info)
1438{
1439 if (!fwd_info)
1440 return;
1441
1442 if (!fwd_info->inited || !atomic_read(&fwd_info->opened)) {
1443 pr_debug("diag: In %s, p: %d, t: %d, inited: %d, opened: %d ch_open: %d\n",
1444 __func__, fwd_info->peripheral, fwd_info->type,
1445 fwd_info->inited, atomic_read(&fwd_info->opened),
1446 fwd_info->ch_open);
1447 return;
1448 }
1449
1450 /*
1451 * Don't queue a read on the data and command channels before receiving
1452 * the feature mask from the peripheral. We won't know which buffer to
1453 * use - HDLC or non HDLC buffer for reading.
1454 */
1455 if ((!driver->feature[fwd_info->peripheral].rcvd_feature_mask) &&
1456 (fwd_info->type != TYPE_CNTL)) {
1457 return;
1458 }
1459
1460 if (fwd_info->p_ops && fwd_info->p_ops->queue_read && fwd_info->ctxt)
1461 fwd_info->p_ops->queue_read(fwd_info->ctxt);
1462}
1463
Manoj Prabhu B571cf422017-08-08 19:01:41 +05301464static int diagfwd_buffers_allocate(struct diagfwd_info *fwd_info)
1465{
1466 int i, j;
1467
1468 for (i = 0; ((fwd_info->num_pd > 1) &&
1469 (i <= (fwd_info->num_pd - 2))); i++) {
1470 for (j = 0; j < NUM_WRITE_BUFFERS; j++) {
1471 if (!fwd_info->buf_upd[i][j]) {
1472 fwd_info->buf_upd[i][j] =
1473 kzalloc(sizeof(struct diagfwd_buf_t),
1474 GFP_KERNEL);
1475 if (ZERO_OR_NULL_PTR(fwd_info->buf_upd[i][j]))
1476 return -ENOMEM;
1477 kmemleak_not_leak(fwd_info->buf_upd[i][j]);
1478 }
1479
1480 if (fwd_info->buf_upd[i][j] &&
1481 !fwd_info->buf_upd[i][j]->data) {
1482 fwd_info->buf_upd[i][j]->data =
1483 kzalloc(PERIPHERAL_BUF_SZ +
1484 APF_DIAG_PADDING,
1485 GFP_KERNEL);
1486 if (ZERO_OR_NULL_PTR(
1487 fwd_info->buf_upd[i][j]->data))
1488 return -ENOMEM;
1489 fwd_info->buf_upd[i][j]->len =
1490 PERIPHERAL_BUF_SZ;
1491 kmemleak_not_leak(
1492 fwd_info->buf_upd[i][j]->data);
1493 fwd_info->buf_upd[i][j]->ctxt =
1494 SET_BUF_CTXT(fwd_info->peripheral,
1495 fwd_info->type, ((2 * i) + (j + 3)));
1496 }
1497
1498 if (driver->supports_apps_hdlc_encoding) {
1499 if (fwd_info->buf_upd[i][j] &&
1500 !fwd_info->buf_upd[i][j]->data_raw) {
1501 fwd_info->buf_upd[i][j]->data_raw =
1502 kzalloc(PERIPHERAL_BUF_SZ +
1503 APF_DIAG_PADDING,
1504 GFP_KERNEL);
1505 if (ZERO_OR_NULL_PTR(
1506 fwd_info->buf_upd[i][j]->data_raw))
1507 return -ENOMEM;
1508 fwd_info->buf_upd[i][j]->len_raw =
1509 PERIPHERAL_BUF_SZ;
1510 kmemleak_not_leak(
1511 fwd_info->buf_upd[i][j]->data_raw);
1512 }
1513 }
1514 }
1515 }
1516 return 0;
1517}
1518
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001519void diagfwd_buffers_init(struct diagfwd_info *fwd_info)
1520{
Manoj Prabhu B571cf422017-08-08 19:01:41 +05301521 int ret = 0;
1522 unsigned char *temp_char_buf;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001523
1524 if (!fwd_info)
1525 return;
1526
1527 if (!fwd_info->inited) {
1528 pr_err("diag: In %s, channel not inited, p: %d, t: %d\n",
1529 __func__, fwd_info->peripheral, fwd_info->type);
1530 return;
1531 }
1532
Sreelakshmi Gownipalliae6c8e82016-11-29 16:01:13 -08001533 mutex_lock(&fwd_info->buf_mutex);
Manoj Prabhu B571cf422017-08-08 19:01:41 +05301534
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001535 if (!fwd_info->buf_1) {
1536 fwd_info->buf_1 = kzalloc(sizeof(struct diagfwd_buf_t),
Sreelakshmi Gownipalliae6c8e82016-11-29 16:01:13 -08001537 GFP_KERNEL);
Manoj Prabhu B571cf422017-08-08 19:01:41 +05301538 if (ZERO_OR_NULL_PTR(fwd_info->buf_1))
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001539 goto err;
1540 kmemleak_not_leak(fwd_info->buf_1);
1541 }
Manoj Prabhu B571cf422017-08-08 19:01:41 +05301542
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001543 if (!fwd_info->buf_1->data) {
1544 fwd_info->buf_1->data = kzalloc(PERIPHERAL_BUF_SZ +
1545 APF_DIAG_PADDING,
Sreelakshmi Gownipalliae6c8e82016-11-29 16:01:13 -08001546 GFP_KERNEL);
Manoj Prabhu B571cf422017-08-08 19:01:41 +05301547 if (ZERO_OR_NULL_PTR(fwd_info->buf_1->data))
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001548 goto err;
1549 fwd_info->buf_1->len = PERIPHERAL_BUF_SZ;
1550 kmemleak_not_leak(fwd_info->buf_1->data);
1551 fwd_info->buf_1->ctxt = SET_BUF_CTXT(fwd_info->peripheral,
1552 fwd_info->type, 1);
1553 }
1554
1555 if (fwd_info->type == TYPE_DATA) {
1556 if (!fwd_info->buf_2) {
1557 fwd_info->buf_2 = kzalloc(sizeof(struct diagfwd_buf_t),
Sreelakshmi Gownipalliae6c8e82016-11-29 16:01:13 -08001558 GFP_KERNEL);
Manoj Prabhu B571cf422017-08-08 19:01:41 +05301559 if (ZERO_OR_NULL_PTR(fwd_info->buf_2))
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001560 goto err;
1561 kmemleak_not_leak(fwd_info->buf_2);
1562 }
1563
1564 if (!fwd_info->buf_2->data) {
1565 fwd_info->buf_2->data = kzalloc(PERIPHERAL_BUF_SZ +
1566 APF_DIAG_PADDING,
Sreelakshmi Gownipalliae6c8e82016-11-29 16:01:13 -08001567 GFP_KERNEL);
Manoj Prabhu B571cf422017-08-08 19:01:41 +05301568 if (ZERO_OR_NULL_PTR(fwd_info->buf_2->data))
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001569 goto err;
1570 fwd_info->buf_2->len = PERIPHERAL_BUF_SZ;
1571 kmemleak_not_leak(fwd_info->buf_2->data);
1572 fwd_info->buf_2->ctxt = SET_BUF_CTXT(
1573 fwd_info->peripheral,
1574 fwd_info->type, 2);
1575 }
1576
Manoj Prabhu B571cf422017-08-08 19:01:41 +05301577 if (driver->feature[fwd_info->peripheral].untag_header)
1578 ret = diagfwd_buffers_allocate(fwd_info);
1579 if (ret)
1580 goto err;
1581
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001582 if (driver->supports_apps_hdlc_encoding) {
1583 /* In support of hdlc encoding */
1584 if (!fwd_info->buf_1->data_raw) {
1585 fwd_info->buf_1->data_raw =
1586 kzalloc(PERIPHERAL_BUF_SZ +
1587 APF_DIAG_PADDING,
Sreelakshmi Gownipalliae6c8e82016-11-29 16:01:13 -08001588 GFP_KERNEL);
Manoj Prabhu B571cf422017-08-08 19:01:41 +05301589 temp_char_buf =
1590 fwd_info->buf_1->data_raw;
1591 if (ZERO_OR_NULL_PTR(temp_char_buf))
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001592 goto err;
Manoj Prabhu B571cf422017-08-08 19:01:41 +05301593 fwd_info->buf_1->len_raw =
1594 PERIPHERAL_BUF_SZ;
1595 kmemleak_not_leak(temp_char_buf);
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001596 }
Manoj Prabhu B571cf422017-08-08 19:01:41 +05301597
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001598 if (!fwd_info->buf_2->data_raw) {
1599 fwd_info->buf_2->data_raw =
1600 kzalloc(PERIPHERAL_BUF_SZ +
1601 APF_DIAG_PADDING,
Sreelakshmi Gownipalliae6c8e82016-11-29 16:01:13 -08001602 GFP_KERNEL);
Manoj Prabhu B571cf422017-08-08 19:01:41 +05301603 temp_char_buf =
1604 fwd_info->buf_2->data_raw;
1605 if (ZERO_OR_NULL_PTR(temp_char_buf))
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001606 goto err;
Manoj Prabhu B571cf422017-08-08 19:01:41 +05301607 fwd_info->buf_2->len_raw =
1608 PERIPHERAL_BUF_SZ;
1609 kmemleak_not_leak(temp_char_buf);
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001610 }
1611 }
1612 }
1613
Manoj Prabhu B571cf422017-08-08 19:01:41 +05301614 if (fwd_info->type == TYPE_CMD &&
1615 driver->supports_apps_hdlc_encoding) {
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001616 /* In support of hdlc encoding */
1617 if (!fwd_info->buf_1->data_raw) {
1618 fwd_info->buf_1->data_raw = kzalloc(PERIPHERAL_BUF_SZ +
1619 APF_DIAG_PADDING,
Sreelakshmi Gownipalliae6c8e82016-11-29 16:01:13 -08001620 GFP_KERNEL);
Manoj Prabhu B571cf422017-08-08 19:01:41 +05301621 temp_char_buf =
1622 fwd_info->buf_1->data_raw;
1623 if (ZERO_OR_NULL_PTR(temp_char_buf))
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001624 goto err;
1625 fwd_info->buf_1->len_raw = PERIPHERAL_BUF_SZ;
Manoj Prabhu B571cf422017-08-08 19:01:41 +05301626 kmemleak_not_leak(temp_char_buf);
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001627 }
1628 }
1629
Sreelakshmi Gownipalliae6c8e82016-11-29 16:01:13 -08001630 mutex_unlock(&fwd_info->buf_mutex);
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001631 return;
1632
1633err:
Sreelakshmi Gownipalliae6c8e82016-11-29 16:01:13 -08001634 mutex_unlock(&fwd_info->buf_mutex);
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001635 diagfwd_buffers_exit(fwd_info);
Manoj Prabhu B571cf422017-08-08 19:01:41 +05301636 return;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001637}
1638
1639static void diagfwd_buffers_exit(struct diagfwd_info *fwd_info)
1640{
Manoj Prabhu B571cf422017-08-08 19:01:41 +05301641 int i = 0;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001642
1643 if (!fwd_info)
1644 return;
1645
Sreelakshmi Gownipalliae6c8e82016-11-29 16:01:13 -08001646 mutex_lock(&fwd_info->buf_mutex);
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001647 if (fwd_info->buf_1) {
1648 kfree(fwd_info->buf_1->data);
1649 fwd_info->buf_1->data = NULL;
1650 kfree(fwd_info->buf_1->data_raw);
1651 fwd_info->buf_1->data_raw = NULL;
1652 kfree(fwd_info->buf_1);
1653 fwd_info->buf_1 = NULL;
1654 }
1655 if (fwd_info->buf_2) {
1656 kfree(fwd_info->buf_2->data);
1657 fwd_info->buf_2->data = NULL;
1658 kfree(fwd_info->buf_2->data_raw);
1659 fwd_info->buf_2->data_raw = NULL;
1660 kfree(fwd_info->buf_2);
1661 fwd_info->buf_2 = NULL;
1662 }
Manoj Prabhu B571cf422017-08-08 19:01:41 +05301663 for (i = 0; i <= (fwd_info->num_pd - 2); i++) {
1664 if (fwd_info->buf_upd[i][0]) {
1665 kfree(fwd_info->buf_upd[i][0]->data);
1666 fwd_info->buf_upd[i][0]->data = NULL;
1667 kfree(fwd_info->buf_upd[i][0]->data_raw);
1668 fwd_info->buf_upd[i][0]->data_raw = NULL;
1669 kfree(fwd_info->buf_upd[i][0]);
1670 fwd_info->buf_upd[i][0] = NULL;
1671 }
1672 if (fwd_info->buf_upd[i][1]) {
1673 kfree(fwd_info->buf_upd[i][1]->data);
1674 fwd_info->buf_upd[i][1]->data = NULL;
1675 kfree(fwd_info->buf_upd[i][1]->data_raw);
1676 fwd_info->buf_upd[i][1]->data_raw = NULL;
1677 kfree(fwd_info->buf_upd[i][1]);
1678 fwd_info->buf_upd[i][1] = NULL;
1679 }
1680 }
Sreelakshmi Gownipalliae6c8e82016-11-29 16:01:13 -08001681 mutex_unlock(&fwd_info->buf_mutex);
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001682}
1683
1684void diagfwd_write_buffers_init(struct diagfwd_info *fwd_info)
1685{
1686 unsigned long flags;
1687 int i;
1688
1689 if (!fwd_info)
1690 return;
1691
1692 if (!fwd_info->inited) {
1693 pr_err("diag: In %s, channel not inited, p: %d, t: %d\n",
1694 __func__, fwd_info->peripheral, fwd_info->type);
1695 return;
1696 }
1697
1698 spin_lock_irqsave(&fwd_info->write_buf_lock, flags);
1699 for (i = 0; i < NUM_WRITE_BUFFERS; i++) {
1700 if (!fwd_info->buf_ptr[i])
1701 fwd_info->buf_ptr[i] =
1702 kzalloc(sizeof(struct diagfwd_buf_t),
1703 GFP_ATOMIC);
1704 if (!fwd_info->buf_ptr[i])
1705 goto err;
1706 kmemleak_not_leak(fwd_info->buf_ptr[i]);
1707 if (!fwd_info->buf_ptr[i]->data) {
1708 fwd_info->buf_ptr[i]->data = kzalloc(PERIPHERAL_BUF_SZ,
1709 GFP_ATOMIC);
1710 if (!fwd_info->buf_ptr[i]->data)
1711 goto err;
1712 fwd_info->buf_ptr[i]->len = PERIPHERAL_BUF_SZ;
1713 kmemleak_not_leak(fwd_info->buf_ptr[i]->data);
1714 }
1715 }
1716 spin_unlock_irqrestore(&fwd_info->write_buf_lock, flags);
1717 return;
1718
1719err:
1720 spin_unlock_irqrestore(&fwd_info->write_buf_lock, flags);
1721 pr_err("diag:unable to allocate write buffers\n");
1722 diagfwd_write_buffers_exit(fwd_info);
1723
1724}
1725
1726static void diagfwd_write_buffers_exit(struct diagfwd_info *fwd_info)
1727{
1728 unsigned long flags;
1729 int i;
1730
1731 if (!fwd_info)
1732 return;
1733
1734 spin_lock_irqsave(&fwd_info->write_buf_lock, flags);
1735 for (i = 0; i < NUM_WRITE_BUFFERS; i++) {
1736 if (fwd_info->buf_ptr[i]) {
1737 kfree(fwd_info->buf_ptr[i]->data);
1738 fwd_info->buf_ptr[i]->data = NULL;
1739 kfree(fwd_info->buf_ptr[i]);
1740 fwd_info->buf_ptr[i] = NULL;
1741 }
1742 }
1743 spin_unlock_irqrestore(&fwd_info->write_buf_lock, flags);
1744}