blob: 23cf2936628c9a9281a5bc6611cadcb589b25414 [file] [log] [blame]
Sreelakshmi Gownipalli48a1d182018-01-29 13:17:13 -08001/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/slab.h>
14#include <linux/init.h>
15#include <linux/uaccess.h>
16#include <linux/diagchar.h>
17#include <linux/sched.h>
18#include <linux/err.h>
19#include <linux/delay.h>
20#include <linux/workqueue.h>
21#include <linux/pm_runtime.h>
22#include <linux/platform_device.h>
23#include <linux/pm_wakeup.h>
24#include <linux/spinlock.h>
25#include <linux/ratelimit.h>
26#include <linux/reboot.h>
27#include <asm/current.h>
28#include <soc/qcom/restart.h>
29#ifdef CONFIG_DIAG_OVER_USB
30#include <linux/usb/usbdiag.h>
31#endif
32#include "diagchar_hdlc.h"
33#include "diagmem.h"
34#include "diagchar.h"
35#include "diagfwd.h"
36#include "diagfwd_cntl.h"
37#include "diag_dci.h"
38#include "diag_masks.h"
39#include "diagfwd_bridge.h"
40#include "diagfwd_peripheral.h"
41#include "diag_ipc_logging.h"
42
43static struct timer_list dci_drain_timer;
44static int dci_timer_in_progress;
45static struct work_struct dci_data_drain_work;
46
47struct diag_dci_partial_pkt_t partial_pkt;
48
49unsigned int dci_max_reg = 100;
50unsigned int dci_max_clients = 10;
51struct mutex dci_log_mask_mutex;
52struct mutex dci_event_mask_mutex;
53
54/*
55 * DCI_HANDSHAKE_RETRY_TIME: Time to wait (in microseconds) before checking the
56 * connection status again.
57 *
58 * DCI_HANDSHAKE_WAIT_TIME: Timeout (in milliseconds) to check for dci
59 * connection status
60 */
61#define DCI_HANDSHAKE_RETRY_TIME 500000
62#define DCI_HANDSHAKE_WAIT_TIME 200
63
64spinlock_t ws_lock;
65unsigned long ws_lock_flags;
66
67struct dci_ops_tbl_t dci_ops_tbl[NUM_DCI_PROC] = {
68 {
69 .ctx = 0,
70 .send_log_mask = diag_send_dci_log_mask,
71 .send_event_mask = diag_send_dci_event_mask,
72 .peripheral_status = 0,
73 .mempool = 0,
74 },
75#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
76 {
77 .ctx = DIAGFWD_MDM_DCI,
78 .send_log_mask = diag_send_dci_log_mask_remote,
79 .send_event_mask = diag_send_dci_event_mask_remote,
80 .peripheral_status = 0,
81 .mempool = POOL_TYPE_MDM_DCI_WRITE,
82 }
83#endif
84};
85
86struct dci_channel_status_t dci_channel_status[NUM_DCI_PROC] = {
87 {
88 .id = 0,
89 .open = 0,
90 .retry_count = 0
91 },
92#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
93 {
94 .id = DIAGFWD_MDM_DCI,
95 .open = 0,
96 .retry_count = 0
97 }
98#endif
99};
100
101/* Number of milliseconds anticipated to process the DCI data */
102#define DCI_WAKEUP_TIMEOUT 1
103
104#define DCI_CAN_ADD_BUF_TO_LIST(buf) \
105 (buf && buf->data && !buf->in_busy && buf->data_len > 0) \
106
107#ifdef CONFIG_DEBUG_FS
108struct diag_dci_data_info *dci_traffic;
109struct mutex dci_stat_mutex;
110void diag_dci_record_traffic(int read_bytes, uint8_t ch_type,
111 uint8_t peripheral, uint8_t proc)
112{
113 static int curr_dci_data;
114 static unsigned long iteration;
115 struct diag_dci_data_info *temp_data = dci_traffic;
116
117 if (!temp_data)
118 return;
119 mutex_lock(&dci_stat_mutex);
120 if (curr_dci_data == DIAG_DCI_DEBUG_CNT)
121 curr_dci_data = 0;
122 temp_data += curr_dci_data;
123 temp_data->iteration = iteration + 1;
124 temp_data->data_size = read_bytes;
125 temp_data->peripheral = peripheral;
126 temp_data->ch_type = ch_type;
127 temp_data->proc = proc;
128 diag_get_timestamp(temp_data->time_stamp);
129 curr_dci_data++;
130 iteration++;
131 mutex_unlock(&dci_stat_mutex);
132}
133#else
134void diag_dci_record_traffic(int read_bytes, uint8_t ch_type,
135 uint8_t peripheral, uint8_t proc) { }
136#endif
Gopikrishna Mogasatic20aa252017-02-09 15:04:32 +0530137
138static int check_peripheral_dci_support(int peripheral_id, int dci_proc_id)
139{
140 int dci_peripheral_list = 0;
141
142 if (dci_proc_id < 0 || dci_proc_id >= NUM_DCI_PROC) {
143 pr_err("diag:In %s,not a supported DCI proc id\n", __func__);
144 return 0;
145 }
146 if (peripheral_id < 0 || peripheral_id >= NUM_PERIPHERALS) {
147 pr_err("diag:In %s,not a valid peripheral id\n", __func__);
148 return 0;
149 }
150 dci_peripheral_list = dci_ops_tbl[dci_proc_id].peripheral_status;
151
152 if (dci_peripheral_list <= 0 || dci_peripheral_list > DIAG_CON_ALL) {
153 pr_err("diag:In %s,not a valid dci peripheral mask\n",
154 __func__);
155 return 0;
156 }
157 /* Remove APSS bit mask information */
158 dci_peripheral_list = dci_peripheral_list >> 1;
159
160 if ((1 << peripheral_id) & (dci_peripheral_list))
161 return 1;
162 else
163 return 0;
164}
165
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -0700166static void create_dci_log_mask_tbl(unsigned char *mask, uint8_t dirty)
167{
168 unsigned char *temp = mask;
169 uint8_t i;
170
171 if (!mask)
172 return;
173
174 /* create hard coded table for log mask with 16 categories */
175 for (i = 0; i < DCI_MAX_LOG_CODES; i++) {
176 *temp = i;
177 temp++;
178 *temp = dirty ? 1 : 0;
179 temp++;
180 memset(temp, 0, DCI_MAX_ITEMS_PER_LOG_CODE);
181 temp += DCI_MAX_ITEMS_PER_LOG_CODE;
182 }
183}
184
185static void create_dci_event_mask_tbl(unsigned char *tbl_buf)
186{
187 if (tbl_buf)
188 memset(tbl_buf, 0, DCI_EVENT_MASK_SIZE);
189}
190
191void dci_drain_data(unsigned long data)
192{
193 queue_work(driver->diag_dci_wq, &dci_data_drain_work);
194}
195
196static void dci_check_drain_timer(void)
197{
198 if (!dci_timer_in_progress) {
199 dci_timer_in_progress = 1;
200 mod_timer(&dci_drain_timer, jiffies + msecs_to_jiffies(200));
201 }
202}
203
204#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
205static void dci_handshake_work_fn(struct work_struct *work)
206{
207 int err = 0;
208 int max_retries = 5;
209
210 struct dci_channel_status_t *status = container_of(work,
211 struct dci_channel_status_t,
212 handshake_work);
213
214 if (status->open) {
215 pr_debug("diag: In %s, remote dci channel is open, index: %d\n",
216 __func__, status->id);
217 return;
218 }
219
220 if (status->retry_count == max_retries) {
221 status->retry_count = 0;
222 pr_info("diag: dci channel connection handshake timed out, id: %d\n",
223 status->id);
224 err = diagfwd_bridge_close(TOKEN_TO_BRIDGE(status->id));
225 if (err) {
226 pr_err("diag: In %s, unable to close dci channel id: %d, err: %d\n",
227 __func__, status->id, err);
228 }
229 return;
230 }
231 status->retry_count++;
232 /*
233 * Sleep for sometime to check for the connection status again. The
234 * value should be optimum to include a roundabout time for a small
235 * packet to the remote processor.
236 */
237 usleep_range(DCI_HANDSHAKE_RETRY_TIME, DCI_HANDSHAKE_RETRY_TIME + 100);
238 mod_timer(&status->wait_time,
239 jiffies + msecs_to_jiffies(DCI_HANDSHAKE_WAIT_TIME));
240}
241
242static void dci_chk_handshake(unsigned long data)
243{
244 int index = (int)data;
245
246 if (index < 0 || index >= NUM_DCI_PROC)
247 return;
248
249 queue_work(driver->diag_dci_wq,
250 &dci_channel_status[index].handshake_work);
251}
252#endif
253
254static int diag_dci_init_buffer(struct diag_dci_buffer_t *buffer, int type)
255{
256 if (!buffer || buffer->data)
257 return -EINVAL;
258
259 switch (type) {
260 case DCI_BUF_PRIMARY:
261 buffer->capacity = IN_BUF_SIZE;
262 buffer->data = kzalloc(buffer->capacity, GFP_KERNEL);
263 if (!buffer->data)
264 return -ENOMEM;
265 break;
266 case DCI_BUF_SECONDARY:
267 buffer->data = NULL;
268 buffer->capacity = IN_BUF_SIZE;
269 break;
270 case DCI_BUF_CMD:
271 buffer->capacity = DIAG_MAX_REQ_SIZE + DCI_BUF_SIZE;
272 buffer->data = kzalloc(buffer->capacity, GFP_KERNEL);
273 if (!buffer->data)
274 return -ENOMEM;
275 break;
276 default:
277 pr_err("diag: In %s, unknown type %d", __func__, type);
278 return -EINVAL;
279 }
280
281 buffer->data_len = 0;
282 buffer->in_busy = 0;
283 buffer->buf_type = type;
284 mutex_init(&buffer->data_mutex);
285
286 return 0;
287}
288
289static inline int diag_dci_check_buffer(struct diag_dci_buffer_t *buf, int len)
290{
291 if (!buf)
292 return -EINVAL;
293
294 /* Return 1 if the buffer is not busy and can hold new data */
295 if ((buf->data_len + len < buf->capacity) && !buf->in_busy)
296 return 1;
297
298 return 0;
299}
300
301static void dci_add_buffer_to_list(struct diag_dci_client_tbl *client,
302 struct diag_dci_buffer_t *buf)
303{
304 if (!buf || !client || !buf->data)
305 return;
306
307 if (buf->in_list || buf->data_len == 0)
308 return;
309
310 mutex_lock(&client->write_buf_mutex);
311 list_add_tail(&buf->buf_track, &client->list_write_buf);
312 /*
313 * In the case of DCI, there can be multiple packets in one read. To
314 * calculate the wakeup source reference count, we must account for each
315 * packet in a single read.
316 */
317 diag_ws_on_read(DIAG_WS_DCI, buf->data_len);
318 mutex_lock(&buf->data_mutex);
319 buf->in_busy = 1;
320 buf->in_list = 1;
321 mutex_unlock(&buf->data_mutex);
322 mutex_unlock(&client->write_buf_mutex);
323}
324
325static int diag_dci_get_buffer(struct diag_dci_client_tbl *client,
326 int data_source, int len)
327{
328 struct diag_dci_buffer_t *buf_primary = NULL;
329 struct diag_dci_buffer_t *buf_temp = NULL;
330 struct diag_dci_buffer_t *curr = NULL;
331
332 if (!client)
333 return -EINVAL;
334 if (len < 0 || len > IN_BUF_SIZE)
335 return -EINVAL;
336
337 curr = client->buffers[data_source].buf_curr;
338 buf_primary = client->buffers[data_source].buf_primary;
339
340 if (curr && diag_dci_check_buffer(curr, len) == 1)
341 return 0;
342
343 dci_add_buffer_to_list(client, curr);
344 client->buffers[data_source].buf_curr = NULL;
345
346 if (diag_dci_check_buffer(buf_primary, len) == 1) {
347 client->buffers[data_source].buf_curr = buf_primary;
348 return 0;
349 }
350
351 buf_temp = kzalloc(sizeof(struct diag_dci_buffer_t), GFP_KERNEL);
352 if (!buf_temp)
353 return -EIO;
354
355 if (!diag_dci_init_buffer(buf_temp, DCI_BUF_SECONDARY)) {
356 buf_temp->data = diagmem_alloc(driver, IN_BUF_SIZE,
357 POOL_TYPE_DCI);
358 if (!buf_temp->data) {
359 kfree(buf_temp);
360 buf_temp = NULL;
361 return -ENOMEM;
362 }
363 client->buffers[data_source].buf_curr = buf_temp;
364 return 0;
365 }
366
367 kfree(buf_temp);
368 buf_temp = NULL;
369 return -EIO;
370}
371
372void diag_dci_wakeup_clients(void)
373{
374 struct list_head *start, *temp;
375 struct diag_dci_client_tbl *entry = NULL;
376
377 mutex_lock(&driver->dci_mutex);
378 list_for_each_safe(start, temp, &driver->dci_client_list) {
379 entry = list_entry(start, struct diag_dci_client_tbl, track);
380
381 /*
382 * Don't wake up the client when there is no pending buffer to
383 * write or when it is writing to user space
384 */
385 if (!list_empty(&entry->list_write_buf) && !entry->in_service) {
386 mutex_lock(&entry->write_buf_mutex);
387 entry->in_service = 1;
388 mutex_unlock(&entry->write_buf_mutex);
389 diag_update_sleeping_process(entry->client->tgid,
390 DCI_DATA_TYPE);
391 }
392 }
393 mutex_unlock(&driver->dci_mutex);
394}
395
396void dci_data_drain_work_fn(struct work_struct *work)
397{
398 int i;
399 struct list_head *start, *temp;
400 struct diag_dci_client_tbl *entry = NULL;
401 struct diag_dci_buf_peripheral_t *proc_buf = NULL;
402 struct diag_dci_buffer_t *buf_temp = NULL;
403
404 mutex_lock(&driver->dci_mutex);
405 list_for_each_safe(start, temp, &driver->dci_client_list) {
406 entry = list_entry(start, struct diag_dci_client_tbl, track);
407 for (i = 0; i < entry->num_buffers; i++) {
408 proc_buf = &entry->buffers[i];
409
410 mutex_lock(&proc_buf->buf_mutex);
411 buf_temp = proc_buf->buf_primary;
412 if (DCI_CAN_ADD_BUF_TO_LIST(buf_temp))
413 dci_add_buffer_to_list(entry, buf_temp);
414
415 buf_temp = proc_buf->buf_cmd;
416 if (DCI_CAN_ADD_BUF_TO_LIST(buf_temp))
417 dci_add_buffer_to_list(entry, buf_temp);
418
419 buf_temp = proc_buf->buf_curr;
420 if (DCI_CAN_ADD_BUF_TO_LIST(buf_temp)) {
421 dci_add_buffer_to_list(entry, buf_temp);
422 proc_buf->buf_curr = NULL;
423 }
424 mutex_unlock(&proc_buf->buf_mutex);
425 }
426 if (!list_empty(&entry->list_write_buf) && !entry->in_service) {
427 mutex_lock(&entry->write_buf_mutex);
428 entry->in_service = 1;
429 mutex_unlock(&entry->write_buf_mutex);
430 diag_update_sleeping_process(entry->client->tgid,
431 DCI_DATA_TYPE);
432 }
433 }
434 mutex_unlock(&driver->dci_mutex);
435 dci_timer_in_progress = 0;
436}
437
438static int diag_process_single_dci_pkt(unsigned char *buf, int len,
439 int data_source, int token)
440{
441 uint8_t cmd_code = 0;
442
443 if (!buf || len < 0) {
444 pr_err("diag: Invalid input in %s, buf: %pK, len: %d\n",
445 __func__, buf, len);
446 return -EIO;
447 }
448
449 cmd_code = *(uint8_t *)buf;
450
451 switch (cmd_code) {
452 case LOG_CMD_CODE:
453 extract_dci_log(buf, len, data_source, token, NULL);
454 break;
455 case EVENT_CMD_CODE:
456 extract_dci_events(buf, len, data_source, token, NULL);
457 break;
458 case EXT_HDR_CMD_CODE:
459 extract_dci_ext_pkt(buf, len, data_source, token);
460 break;
461 case DCI_PKT_RSP_CODE:
462 case DCI_DELAYED_RSP_CODE:
463 extract_dci_pkt_rsp(buf, len, data_source, token);
464 break;
465 case DCI_CONTROL_PKT_CODE:
466 extract_dci_ctrl_pkt(buf, len, token);
467 break;
468 default:
469 pr_err("diag: Unable to process single DCI packet, cmd_code: %d, data_source: %d",
470 cmd_code, data_source);
471 return -EINVAL;
472 }
473
474 return 0;
475}
476
477/* Process the data read from apps userspace client */
478void diag_process_apps_dci_read_data(int data_type, void *buf, int recd_bytes)
479{
480 int err = 0;
481
482 if (!buf) {
483 pr_err_ratelimited("diag: In %s, Null buf pointer\n", __func__);
484 return;
485 }
486
487 if (data_type != DATA_TYPE_DCI_LOG && data_type != DATA_TYPE_DCI_EVENT
488 && data_type != DCI_PKT_TYPE) {
489 pr_err("diag: In %s, unsupported data_type: 0x%x\n",
490 __func__, (unsigned int)data_type);
491 return;
492 }
493
494 err = diag_process_single_dci_pkt(buf, recd_bytes, APPS_DATA,
495 DCI_LOCAL_PROC);
496 if (err)
497 return;
498
499 /* wake up all sleeping DCI clients which have some data */
500 diag_dci_wakeup_clients();
501 dci_check_drain_timer();
502}
503
504void diag_process_remote_dci_read_data(int index, void *buf, int recd_bytes)
505{
506 int read_bytes = 0, err = 0;
507 uint16_t dci_pkt_len;
508 struct diag_dci_header_t *header = NULL;
509 int header_len = sizeof(struct diag_dci_header_t);
510 int token = BRIDGE_TO_TOKEN(index);
511
512 if (!buf)
513 return;
514
515 diag_dci_record_traffic(recd_bytes, 0, 0, token);
516
517 if (!partial_pkt.processing)
518 goto start;
519
520 if (partial_pkt.remaining > recd_bytes) {
521 if ((partial_pkt.read_len + recd_bytes) >
522 (MAX_DCI_PACKET_SZ)) {
523 pr_err("diag: Invalid length %d, %d received in %s\n",
524 partial_pkt.read_len, recd_bytes, __func__);
525 goto end;
526 }
527 memcpy(partial_pkt.data + partial_pkt.read_len, buf,
528 recd_bytes);
529 read_bytes += recd_bytes;
530 buf += read_bytes;
531 partial_pkt.read_len += recd_bytes;
532 partial_pkt.remaining -= recd_bytes;
533 } else {
534 if ((partial_pkt.read_len + partial_pkt.remaining) >
535 (MAX_DCI_PACKET_SZ)) {
536 pr_err("diag: Invalid length during partial read %d, %d received in %s\n",
537 partial_pkt.read_len,
538 partial_pkt.remaining, __func__);
539 goto end;
540 }
541 memcpy(partial_pkt.data + partial_pkt.read_len, buf,
542 partial_pkt.remaining);
543 read_bytes += partial_pkt.remaining;
544 buf += read_bytes;
545 partial_pkt.read_len += partial_pkt.remaining;
546 partial_pkt.remaining = 0;
547 }
548
549 if (partial_pkt.remaining == 0) {
550 /*
551 * Retrieve from the DCI control packet after the header = start
552 * (1 byte) + version (1 byte) + length (2 bytes)
553 */
554 diag_process_single_dci_pkt(partial_pkt.data + 4,
555 partial_pkt.read_len - header_len,
556 DCI_REMOTE_DATA, token);
557 partial_pkt.read_len = 0;
558 partial_pkt.total_len = 0;
559 partial_pkt.processing = 0;
560 goto start;
561 }
562 goto end;
563
564start:
565 while (read_bytes < recd_bytes) {
566 header = (struct diag_dci_header_t *)buf;
567 dci_pkt_len = header->length;
568
569 if (header->cmd_code != DCI_CONTROL_PKT_CODE &&
570 driver->num_dci_client == 0) {
571 read_bytes += header_len + dci_pkt_len;
572 buf += header_len + dci_pkt_len;
573 continue;
574 }
575
576 if (dci_pkt_len + header_len > MAX_DCI_PACKET_SZ) {
577 pr_err("diag: Invalid length in the dci packet field %d\n",
578 dci_pkt_len);
579 break;
580 }
581
582 if ((dci_pkt_len + header_len) > (recd_bytes - read_bytes)) {
583 partial_pkt.read_len = recd_bytes - read_bytes;
584 partial_pkt.total_len = dci_pkt_len + header_len;
585 partial_pkt.remaining = partial_pkt.total_len -
586 partial_pkt.read_len;
587 partial_pkt.processing = 1;
588 memcpy(partial_pkt.data, buf, partial_pkt.read_len);
589 break;
590 }
591 /*
592 * Retrieve from the DCI control packet after the header = start
593 * (1 byte) + version (1 byte) + length (2 bytes)
594 */
595 err = diag_process_single_dci_pkt(buf + 4, dci_pkt_len,
596 DCI_REMOTE_DATA, DCI_MDM_PROC);
597 if (err)
598 break;
599 read_bytes += header_len + dci_pkt_len;
600 buf += header_len + dci_pkt_len; /* advance to next DCI pkt */
601 }
602end:
603 if (err)
604 return;
605 /* wake up all sleeping DCI clients which have some data */
606 diag_dci_wakeup_clients();
607 dci_check_drain_timer();
608}
609
610/* Process the data read from the peripheral dci channels */
611void diag_dci_process_peripheral_data(struct diagfwd_info *p_info, void *buf,
612 int recd_bytes)
613{
614 int read_bytes = 0, err = 0;
615 uint16_t dci_pkt_len;
616 struct diag_dci_pkt_header_t *header = NULL;
617 uint8_t recv_pkt_cmd_code;
618
619 if (!buf || !p_info)
620 return;
621
622 /*
623 * Release wakeup source when there are no more clients to
624 * process DCI data
625 */
626 if (driver->num_dci_client == 0) {
627 diag_ws_reset(DIAG_WS_DCI);
628 return;
629 }
630
631 diag_dci_record_traffic(recd_bytes, p_info->type, p_info->peripheral,
632 DCI_LOCAL_PROC);
633 while (read_bytes < recd_bytes) {
634 header = (struct diag_dci_pkt_header_t *)buf;
635 recv_pkt_cmd_code = header->pkt_code;
636 dci_pkt_len = header->len;
637
638 /*
639 * Check if the length of the current packet is lesser than the
640 * remaining bytes in the received buffer. This includes space
641 * for the Start byte (1), Version byte (1), length bytes (2)
642 * and End byte (1)
643 */
644 if ((dci_pkt_len + 5) > (recd_bytes - read_bytes)) {
645 pr_err("diag: Invalid length in %s, len: %d, dci_pkt_len: %d",
646 __func__, recd_bytes, dci_pkt_len);
647 diag_ws_release();
648 return;
649 }
650 /*
651 * Retrieve from the DCI control packet after the header = start
652 * (1 byte) + version (1 byte) + length (2 bytes)
653 */
654 err = diag_process_single_dci_pkt(buf + 4, dci_pkt_len,
655 (int)p_info->peripheral,
656 DCI_LOCAL_PROC);
657 if (err) {
658 diag_ws_release();
659 break;
660 }
661 read_bytes += 5 + dci_pkt_len;
662 buf += 5 + dci_pkt_len; /* advance to next DCI pkt */
663 }
664
665 if (err)
666 return;
667 /* wake up all sleeping DCI clients which have some data */
668 diag_dci_wakeup_clients();
669 dci_check_drain_timer();
670}
671
672int diag_dci_query_log_mask(struct diag_dci_client_tbl *entry,
673 uint16_t log_code)
674{
675 uint16_t item_num;
676 uint8_t equip_id, *log_mask_ptr, byte_mask;
677 int byte_index, offset;
678
679 if (!entry) {
680 pr_err("diag: In %s, invalid client entry\n", __func__);
681 return 0;
682 }
683
684 equip_id = LOG_GET_EQUIP_ID(log_code);
685 item_num = LOG_GET_ITEM_NUM(log_code);
686 byte_index = item_num/8 + 2;
687 byte_mask = 0x01 << (item_num % 8);
688 offset = equip_id * 514;
689
690 if (offset + byte_index > DCI_LOG_MASK_SIZE) {
691 pr_err("diag: In %s, invalid offset: %d, log_code: %d, byte_index: %d\n",
692 __func__, offset, log_code, byte_index);
693 return 0;
694 }
695
696 log_mask_ptr = entry->dci_log_mask;
697 log_mask_ptr = log_mask_ptr + offset + byte_index;
698 return ((*log_mask_ptr & byte_mask) == byte_mask) ? 1 : 0;
699
700}
701
702int diag_dci_query_event_mask(struct diag_dci_client_tbl *entry,
703 uint16_t event_id)
704{
705 uint8_t *event_mask_ptr, byte_mask;
706 int byte_index, bit_index;
707
708 if (!entry) {
709 pr_err("diag: In %s, invalid client entry\n", __func__);
710 return 0;
711 }
712
713 byte_index = event_id/8;
714 bit_index = event_id % 8;
715 byte_mask = 0x1 << bit_index;
716
717 if (byte_index > DCI_EVENT_MASK_SIZE) {
718 pr_err("diag: In %s, invalid, event_id: %d, byte_index: %d\n",
719 __func__, event_id, byte_index);
720 return 0;
721 }
722
723 event_mask_ptr = entry->dci_event_mask;
724 event_mask_ptr = event_mask_ptr + byte_index;
725 return ((*event_mask_ptr & byte_mask) == byte_mask) ? 1 : 0;
726}
727
728static int diag_dci_filter_commands(struct diag_pkt_header_t *header)
729{
730 if (!header)
731 return -ENOMEM;
732
733 switch (header->cmd_code) {
734 case 0x7d: /* Msg Mask Configuration */
735 case 0x73: /* Log Mask Configuration */
736 case 0x81: /* Event Mask Configuration */
737 case 0x82: /* Event Mask Change */
738 case 0x60: /* Event Mask Toggle */
739 return 1;
740 }
741
742 if (header->cmd_code == 0x4b && header->subsys_id == 0x12) {
743 switch (header->subsys_cmd_code) {
744 case 0x60: /* Extended Event Mask Config */
745 case 0x61: /* Extended Msg Mask Config */
746 case 0x62: /* Extended Log Mask Config */
747 case 0x20C: /* Set current Preset ID */
748 case 0x20D: /* Get current Preset ID */
749 case 0x218: /* HDLC Disabled Command */
750 return 1;
751 }
752 }
753
754 return 0;
755}
756
757static struct dci_pkt_req_entry_t *diag_register_dci_transaction(int uid,
758 int client_id)
759{
760 struct dci_pkt_req_entry_t *entry = NULL;
761
762 entry = kzalloc(sizeof(struct dci_pkt_req_entry_t), GFP_KERNEL);
763 if (!entry)
764 return NULL;
765
766 driver->dci_tag++;
767 entry->client_id = client_id;
768 entry->uid = uid;
769 entry->tag = driver->dci_tag;
770 pr_debug("diag: Registering DCI cmd req, client_id: %d, uid: %d, tag:%d\n",
771 entry->client_id, entry->uid, entry->tag);
772 list_add_tail(&entry->track, &driver->dci_req_list);
773
774 return entry;
775}
776
777static struct dci_pkt_req_entry_t *diag_dci_get_request_entry(int tag)
778{
779 struct list_head *start, *temp;
780 struct dci_pkt_req_entry_t *entry = NULL;
781
782 list_for_each_safe(start, temp, &driver->dci_req_list) {
783 entry = list_entry(start, struct dci_pkt_req_entry_t, track);
784 if (entry->tag == tag)
785 return entry;
786 }
787 return NULL;
788}
789
790static int diag_dci_remove_req_entry(unsigned char *buf, int len,
791 struct dci_pkt_req_entry_t *entry)
792{
793 uint16_t rsp_count = 0, delayed_rsp_id = 0;
794
795 if (!buf || len <= 0 || !entry) {
796 pr_err("diag: In %s, invalid input buf: %pK, len: %d, entry: %pK\n",
797 __func__, buf, len, entry);
798 return -EIO;
799 }
800
801 /* It is an immediate response, delete it from the table */
802 if (*buf != 0x80) {
803 list_del(&entry->track);
804 kfree(entry);
Manoj Prabhu B34e54702016-12-21 13:02:38 +0530805 entry = NULL;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -0700806 return 1;
807 }
808
809 /* It is a delayed response. Check if the length is valid */
810 if (len < MIN_DELAYED_RSP_LEN) {
811 pr_err("diag: Invalid delayed rsp packet length %d\n", len);
812 return -EINVAL;
813 }
814
815 /*
816 * If the delayed response id field (uint16_t at byte 8) is 0 then
817 * there is only one response and we can remove the request entry.
818 */
819 delayed_rsp_id = *(uint16_t *)(buf + 8);
820 if (delayed_rsp_id == 0) {
821 list_del(&entry->track);
822 kfree(entry);
Manoj Prabhu B34e54702016-12-21 13:02:38 +0530823 entry = NULL;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -0700824 return 1;
825 }
826
827 /*
828 * Check the response count field (uint16 at byte 10). The request
829 * entry can be deleted it it is the last response in the sequence.
830 * It is the last response in the sequence if the response count
831 * is 1 or if the signed bit gets dropped.
832 */
833 rsp_count = *(uint16_t *)(buf + 10);
834 if (rsp_count > 0 && rsp_count < 0x1000) {
835 list_del(&entry->track);
836 kfree(entry);
Manoj Prabhu B34e54702016-12-21 13:02:38 +0530837 entry = NULL;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -0700838 return 1;
839 }
840
841 return 0;
842}
843
844static void dci_process_ctrl_status(unsigned char *buf, int len, int token)
845{
846 struct diag_ctrl_dci_status *header = NULL;
847 unsigned char *temp = buf;
848 uint32_t read_len = 0;
849 uint8_t i;
850 int peripheral_mask, status;
851
852 if (!buf || (len < sizeof(struct diag_ctrl_dci_status))) {
853 pr_err("diag: In %s, invalid buf %pK or length: %d\n",
854 __func__, buf, len);
855 return;
856 }
857
858 if (!VALID_DCI_TOKEN(token)) {
859 pr_err("diag: In %s, invalid DCI token %d\n", __func__, token);
860 return;
861 }
862
863 header = (struct diag_ctrl_dci_status *)temp;
864 temp += sizeof(struct diag_ctrl_dci_status);
865 read_len += sizeof(struct diag_ctrl_dci_status);
866
867 for (i = 0; i < header->count; i++) {
Sreelakshmi Gownipalli48a1d182018-01-29 13:17:13 -0800868 if (read_len > (len - 2)) {
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -0700869 pr_err("diag: In %s, Invalid length len: %d\n",
870 __func__, len);
871 return;
872 }
873
874 switch (*(uint8_t *)temp) {
875 case PERIPHERAL_MODEM:
876 peripheral_mask = DIAG_CON_MPSS;
877 break;
878 case PERIPHERAL_LPASS:
879 peripheral_mask = DIAG_CON_LPASS;
880 break;
881 case PERIPHERAL_WCNSS:
882 peripheral_mask = DIAG_CON_WCNSS;
883 break;
884 case PERIPHERAL_SENSORS:
885 peripheral_mask = DIAG_CON_SENSORS;
886 break;
887 default:
888 pr_err("diag: In %s, unknown peripheral, peripheral: %d\n",
889 __func__, *(uint8_t *)temp);
890 return;
891 }
892 temp += sizeof(uint8_t);
893 read_len += sizeof(uint8_t);
894
895 status = (*(uint8_t *)temp) ? DIAG_STATUS_OPEN :
896 DIAG_STATUS_CLOSED;
897 temp += sizeof(uint8_t);
898 read_len += sizeof(uint8_t);
899 diag_dci_notify_client(peripheral_mask, status, token);
900 }
901}
902
903static void dci_process_ctrl_handshake_pkt(unsigned char *buf, int len,
904 int token)
905{
906 struct diag_ctrl_dci_handshake_pkt *header = NULL;
907 unsigned char *temp = buf;
908 int err = 0;
909
910 if (!buf || (len < sizeof(struct diag_ctrl_dci_handshake_pkt)))
911 return;
912
913 if (!VALID_DCI_TOKEN(token))
914 return;
915
916 header = (struct diag_ctrl_dci_handshake_pkt *)temp;
917 if (header->magic == DCI_MAGIC) {
918 dci_channel_status[token].open = 1;
919 err = dci_ops_tbl[token].send_log_mask(token);
920 if (err) {
921 pr_err("diag: In %s, unable to send log mask to token: %d, err: %d\n",
922 __func__, token, err);
923 }
924 err = dci_ops_tbl[token].send_event_mask(token);
925 if (err) {
926 pr_err("diag: In %s, unable to send event mask to token: %d, err: %d\n",
927 __func__, token, err);
928 }
929 }
930}
931
932void extract_dci_ctrl_pkt(unsigned char *buf, int len, int token)
933{
934 unsigned char *temp = buf;
935 uint32_t ctrl_pkt_id;
936
937 diag_ws_on_read(DIAG_WS_DCI, len);
938 if (!buf) {
939 pr_err("diag: Invalid buffer in %s\n", __func__);
940 goto err;
941 }
942
943 if (len < (sizeof(uint8_t) + sizeof(uint32_t))) {
944 pr_err("diag: In %s, invalid length %d\n", __func__, len);
945 goto err;
946 }
947
948 /* Skip the Control packet command code */
949 temp += sizeof(uint8_t);
950 len -= sizeof(uint8_t);
951 ctrl_pkt_id = *(uint32_t *)temp;
952 switch (ctrl_pkt_id) {
953 case DIAG_CTRL_MSG_DCI_CONNECTION_STATUS:
954 dci_process_ctrl_status(temp, len, token);
955 break;
956 case DIAG_CTRL_MSG_DCI_HANDSHAKE_PKT:
957 dci_process_ctrl_handshake_pkt(temp, len, token);
958 break;
959 default:
960 pr_debug("diag: In %s, unknown control pkt %d\n",
961 __func__, ctrl_pkt_id);
962 break;
963 }
964
965err:
966 /*
967 * DCI control packets are not consumed by the clients. Mimic client
968 * consumption by setting and clearing the wakeup source copy_count
969 * explicitly.
970 */
971 diag_ws_on_copy_fail(DIAG_WS_DCI);
972}
973
974void extract_dci_pkt_rsp(unsigned char *buf, int len, int data_source,
975 int token)
976{
977 int tag;
978 struct diag_dci_client_tbl *entry = NULL;
979 void *temp_buf = NULL;
980 uint8_t dci_cmd_code, cmd_code_len, delete_flag = 0;
981 uint32_t rsp_len = 0;
982 struct diag_dci_buffer_t *rsp_buf = NULL;
983 struct dci_pkt_req_entry_t *req_entry = NULL;
984 unsigned char *temp = buf;
985 int save_req_uid = 0;
986 struct diag_dci_pkt_rsp_header_t pkt_rsp_header;
987
988 if (!buf) {
989 pr_err("diag: Invalid pointer in %s\n", __func__);
990 return;
991 }
992 dci_cmd_code = *(uint8_t *)(temp);
993 if (dci_cmd_code == DCI_PKT_RSP_CODE) {
994 cmd_code_len = sizeof(uint8_t);
995 } else if (dci_cmd_code == DCI_DELAYED_RSP_CODE) {
996 cmd_code_len = sizeof(uint32_t);
997 } else {
998 pr_err("diag: In %s, invalid command code %d\n", __func__,
999 dci_cmd_code);
1000 return;
1001 }
1002 temp += cmd_code_len;
1003 tag = *(int *)temp;
1004 temp += sizeof(int);
1005
1006 /*
1007 * The size of the response is (total length) - (length of the command
1008 * code, the tag (int)
1009 */
1010 rsp_len = len - (cmd_code_len + sizeof(int));
1011 if ((rsp_len == 0) || (rsp_len > (len - 5))) {
1012 pr_err("diag: Invalid length in %s, len: %d, rsp_len: %d",
1013 __func__, len, rsp_len);
1014 return;
1015 }
1016
1017 mutex_lock(&driver->dci_mutex);
1018 req_entry = diag_dci_get_request_entry(tag);
1019 if (!req_entry) {
1020 pr_err_ratelimited("diag: No matching client for DCI data\n");
1021 mutex_unlock(&driver->dci_mutex);
1022 return;
1023 }
1024
1025 entry = diag_dci_get_client_entry(req_entry->client_id);
1026 if (!entry) {
1027 pr_err("diag: In %s, couldn't find client entry, id:%d\n",
1028 __func__, req_entry->client_id);
1029 mutex_unlock(&driver->dci_mutex);
1030 return;
1031 }
1032
1033 save_req_uid = req_entry->uid;
1034 /* Remove the headers and send only the response to this function */
1035 delete_flag = diag_dci_remove_req_entry(temp, rsp_len, req_entry);
1036 if (delete_flag < 0) {
1037 mutex_unlock(&driver->dci_mutex);
1038 return;
1039 }
1040
1041 mutex_lock(&entry->buffers[data_source].buf_mutex);
1042 rsp_buf = entry->buffers[data_source].buf_cmd;
1043
1044 mutex_lock(&rsp_buf->data_mutex);
1045 /*
1046 * Check if we can fit the data in the rsp buffer. The total length of
1047 * the rsp is the rsp length (write_len) + DCI_PKT_RSP_TYPE header (int)
1048 * + field for length (int) + delete_flag (uint8_t)
1049 */
1050 if ((rsp_buf->data_len + 9 + rsp_len) > rsp_buf->capacity) {
1051 pr_alert("diag: create capacity for pkt rsp\n");
1052 rsp_buf->capacity += 9 + rsp_len;
1053 temp_buf = krealloc(rsp_buf->data, rsp_buf->capacity,
1054 GFP_KERNEL);
1055 if (!temp_buf) {
1056 pr_err("diag: DCI realloc failed\n");
1057 mutex_unlock(&rsp_buf->data_mutex);
1058 mutex_unlock(&entry->buffers[data_source].buf_mutex);
1059 mutex_unlock(&driver->dci_mutex);
1060 return;
1061 }
1062 rsp_buf->data = temp_buf;
1063 }
1064
1065 /* Fill in packet response header information */
1066 pkt_rsp_header.type = DCI_PKT_RSP_TYPE;
1067 /* Packet Length = Response Length + Length of uid field (int) */
1068 pkt_rsp_header.length = rsp_len + sizeof(int);
1069 pkt_rsp_header.delete_flag = delete_flag;
1070 pkt_rsp_header.uid = save_req_uid;
1071 memcpy(rsp_buf->data + rsp_buf->data_len, &pkt_rsp_header,
1072 sizeof(struct diag_dci_pkt_rsp_header_t));
1073 rsp_buf->data_len += sizeof(struct diag_dci_pkt_rsp_header_t);
1074 memcpy(rsp_buf->data + rsp_buf->data_len, temp, rsp_len);
1075 rsp_buf->data_len += rsp_len;
1076 rsp_buf->data_source = data_source;
1077
1078 mutex_unlock(&rsp_buf->data_mutex);
1079
1080 /*
1081 * Add directly to the list for writing responses to the
1082 * userspace as these shouldn't be buffered and shouldn't wait
1083 * for log and event buffers to be full
1084 */
1085 dci_add_buffer_to_list(entry, rsp_buf);
1086 mutex_unlock(&entry->buffers[data_source].buf_mutex);
1087 mutex_unlock(&driver->dci_mutex);
1088}
1089
1090static void copy_ext_hdr(struct diag_dci_buffer_t *data_buffer, void *ext_hdr)
1091{
1092 if (!data_buffer) {
1093 pr_err("diag: In %s, data buffer is NULL", __func__);
1094 return;
1095 }
1096
1097 *(int *)(data_buffer->data + data_buffer->data_len) =
1098 DCI_EXT_HDR_TYPE;
1099 data_buffer->data_len += sizeof(int);
1100 memcpy(data_buffer->data + data_buffer->data_len, ext_hdr,
1101 EXT_HDR_LEN);
1102 data_buffer->data_len += EXT_HDR_LEN;
1103}
1104
1105static void copy_dci_event(unsigned char *buf, int len,
1106 struct diag_dci_client_tbl *client, int data_source,
1107 void *ext_hdr)
1108{
1109 struct diag_dci_buffer_t *data_buffer = NULL;
1110 struct diag_dci_buf_peripheral_t *proc_buf = NULL;
1111 int err = 0, total_len = 0;
1112
1113 if (!buf || !client) {
1114 pr_err("diag: Invalid pointers in %s", __func__);
1115 return;
1116 }
1117
1118 total_len = sizeof(int) + len;
1119 if (ext_hdr)
1120 total_len += sizeof(int) + EXT_HDR_LEN;
1121
1122 proc_buf = &client->buffers[data_source];
1123 mutex_lock(&proc_buf->buf_mutex);
1124 mutex_lock(&proc_buf->health_mutex);
1125 err = diag_dci_get_buffer(client, data_source, total_len);
1126 if (err) {
1127 if (err == -ENOMEM)
1128 proc_buf->health.dropped_events++;
1129 else
1130 pr_err("diag: In %s, invalid packet\n", __func__);
1131 mutex_unlock(&proc_buf->health_mutex);
1132 mutex_unlock(&proc_buf->buf_mutex);
1133 return;
1134 }
1135
1136 data_buffer = proc_buf->buf_curr;
1137
1138 proc_buf->health.received_events++;
1139 mutex_unlock(&proc_buf->health_mutex);
1140 mutex_unlock(&proc_buf->buf_mutex);
1141
1142 mutex_lock(&data_buffer->data_mutex);
1143 if (ext_hdr)
1144 copy_ext_hdr(data_buffer, ext_hdr);
1145
1146 *(int *)(data_buffer->data + data_buffer->data_len) = DCI_EVENT_TYPE;
1147 data_buffer->data_len += sizeof(int);
1148 memcpy(data_buffer->data + data_buffer->data_len, buf, len);
1149 data_buffer->data_len += len;
1150 data_buffer->data_source = data_source;
1151 mutex_unlock(&data_buffer->data_mutex);
1152
1153}
1154
1155void extract_dci_events(unsigned char *buf, int len, int data_source,
1156 int token, void *ext_hdr)
1157{
1158 uint16_t event_id, event_id_packet, length, temp_len;
1159 uint8_t payload_len, payload_len_field;
1160 uint8_t timestamp[8] = {0}, timestamp_len;
1161 unsigned char event_data[MAX_EVENT_SIZE];
1162 unsigned int total_event_len;
1163 struct list_head *start, *temp;
1164 struct diag_dci_client_tbl *entry = NULL;
1165
1166 length = *(uint16_t *)(buf + 1); /* total length of event series */
1167 if (length == 0) {
1168 pr_err("diag: Incoming dci event length is invalid\n");
1169 return;
1170 }
1171 /*
1172 * Move directly to the start of the event series. 1 byte for
1173 * event code and 2 bytes for the length field.
1174 * The length field indicates the total length removing the cmd_code
1175 * and the length field. The event parsing in that case should happen
1176 * till the end.
1177 */
1178 temp_len = 3;
1179 while (temp_len < length) {
1180 event_id_packet = *(uint16_t *)(buf + temp_len);
1181 event_id = event_id_packet & 0x0FFF; /* extract 12 bits */
1182 if (event_id_packet & 0x8000) {
1183 /* The packet has the two smallest byte of the
1184 * timestamp
1185 */
1186 timestamp_len = 2;
1187 } else {
1188 /* The packet has the full timestamp. The first event
1189 * will always have full timestamp. Save it in the
1190 * timestamp buffer and use it for subsequent events if
1191 * necessary.
1192 */
1193 timestamp_len = 8;
1194 memcpy(timestamp, buf + temp_len + 2, timestamp_len);
1195 }
1196 /* 13th and 14th bit represent the payload length */
1197 if (((event_id_packet & 0x6000) >> 13) == 3) {
1198 payload_len_field = 1;
1199 payload_len = *(uint8_t *)
1200 (buf + temp_len + 2 + timestamp_len);
1201 if (payload_len < (MAX_EVENT_SIZE - 13)) {
1202 /* copy the payload length and the payload */
1203 memcpy(event_data + 12, buf + temp_len + 2 +
1204 timestamp_len, 1);
1205 memcpy(event_data + 13, buf + temp_len + 2 +
1206 timestamp_len + 1, payload_len);
1207 } else {
1208 pr_err("diag: event > %d, payload_len = %d\n",
1209 (MAX_EVENT_SIZE - 13), payload_len);
1210 return;
1211 }
1212 } else {
1213 payload_len_field = 0;
1214 payload_len = (event_id_packet & 0x6000) >> 13;
1215 /* copy the payload */
1216 memcpy(event_data + 12, buf + temp_len + 2 +
1217 timestamp_len, payload_len);
1218 }
1219
1220 /* Before copying the data to userspace, check if we are still
1221 * within the buffer limit. This is an error case, don't count
1222 * it towards the health statistics.
1223 *
1224 * Here, the offset of 2 bytes(uint16_t) is for the
1225 * event_id_packet length
1226 */
1227 temp_len += sizeof(uint16_t) + timestamp_len +
1228 payload_len_field + payload_len;
1229 if (temp_len > len) {
1230 pr_err("diag: Invalid length in %s, len: %d, read: %d",
1231 __func__, len, temp_len);
1232 return;
1233 }
1234
1235 /* 2 bytes for the event id & timestamp len is hard coded to 8,
1236 * as individual events have full timestamp.
1237 */
1238 *(uint16_t *)(event_data) = 10 +
1239 payload_len_field + payload_len;
1240 *(uint16_t *)(event_data + 2) = event_id_packet & 0x7FFF;
1241 memcpy(event_data + 4, timestamp, 8);
1242 /* 2 bytes for the event length field which is added to
1243 * the event data.
1244 */
1245 total_event_len = 2 + 10 + payload_len_field + payload_len;
1246 /* parse through event mask tbl of each client and check mask */
1247 mutex_lock(&driver->dci_mutex);
1248 list_for_each_safe(start, temp, &driver->dci_client_list) {
1249 entry = list_entry(start, struct diag_dci_client_tbl,
1250 track);
1251 if (entry->client_info.token != token)
1252 continue;
1253 if (diag_dci_query_event_mask(entry, event_id)) {
1254 /* copy to client buffer */
1255 copy_dci_event(event_data, total_event_len,
1256 entry, data_source, ext_hdr);
1257 }
1258 }
1259 mutex_unlock(&driver->dci_mutex);
1260 }
1261}
1262
1263static void copy_dci_log(unsigned char *buf, int len,
1264 struct diag_dci_client_tbl *client, int data_source,
1265 void *ext_hdr)
1266{
1267 uint16_t log_length = 0;
1268 struct diag_dci_buffer_t *data_buffer = NULL;
1269 struct diag_dci_buf_peripheral_t *proc_buf = NULL;
1270 int err = 0, total_len = 0;
1271
1272 if (!buf || !client) {
1273 pr_err("diag: Invalid pointers in %s", __func__);
1274 return;
1275 }
1276
1277 log_length = *(uint16_t *)(buf + 2);
1278 if (log_length > USHRT_MAX - 4) {
1279 pr_err("diag: Integer overflow in %s, log_len: %d",
1280 __func__, log_length);
1281 return;
1282 }
1283 total_len = sizeof(int) + log_length;
1284 if (ext_hdr)
1285 total_len += sizeof(int) + EXT_HDR_LEN;
1286
1287 /* Check if we are within the len. The check should include the
1288 * first 4 bytes for the Log code(2) and the length bytes (2)
1289 */
1290 if ((log_length + sizeof(uint16_t) + 2) > len) {
1291 pr_err("diag: Invalid length in %s, log_len: %d, len: %d",
1292 __func__, log_length, len);
1293 return;
1294 }
1295
1296 proc_buf = &client->buffers[data_source];
1297 mutex_lock(&proc_buf->buf_mutex);
1298 mutex_lock(&proc_buf->health_mutex);
1299 err = diag_dci_get_buffer(client, data_source, total_len);
1300 if (err) {
1301 if (err == -ENOMEM)
1302 proc_buf->health.dropped_logs++;
1303 else
1304 pr_err("diag: In %s, invalid packet\n", __func__);
1305 mutex_unlock(&proc_buf->health_mutex);
1306 mutex_unlock(&proc_buf->buf_mutex);
1307 return;
1308 }
1309
1310 data_buffer = proc_buf->buf_curr;
1311 proc_buf->health.received_logs++;
1312 mutex_unlock(&proc_buf->health_mutex);
1313 mutex_unlock(&proc_buf->buf_mutex);
1314
1315 mutex_lock(&data_buffer->data_mutex);
1316 if (!data_buffer->data) {
1317 mutex_unlock(&data_buffer->data_mutex);
1318 return;
1319 }
1320 if (ext_hdr)
1321 copy_ext_hdr(data_buffer, ext_hdr);
1322
1323 *(int *)(data_buffer->data + data_buffer->data_len) = DCI_LOG_TYPE;
1324 data_buffer->data_len += sizeof(int);
1325 memcpy(data_buffer->data + data_buffer->data_len, buf + sizeof(int),
1326 log_length);
1327 data_buffer->data_len += log_length;
1328 data_buffer->data_source = data_source;
1329 mutex_unlock(&data_buffer->data_mutex);
1330}
1331
1332void extract_dci_log(unsigned char *buf, int len, int data_source, int token,
1333 void *ext_hdr)
1334{
1335 uint16_t log_code, read_bytes = 0;
1336 struct list_head *start, *temp;
1337 struct diag_dci_client_tbl *entry = NULL;
1338
1339 if (!buf) {
1340 pr_err("diag: In %s buffer is NULL\n", __func__);
1341 return;
1342 }
1343
1344 /* The first six bytes for the incoming log packet contains
1345 * Command code (2), the length of the packet (2) and the length
1346 * of the log (2)
1347 */
1348 log_code = *(uint16_t *)(buf + 6);
1349 read_bytes += sizeof(uint16_t) + 6;
1350 if (read_bytes > len) {
1351 pr_err("diag: Invalid length in %s, len: %d, read: %d",
1352 __func__, len, read_bytes);
1353 return;
1354 }
1355
1356 /* parse through log mask table of each client and check mask */
1357 mutex_lock(&driver->dci_mutex);
1358 list_for_each_safe(start, temp, &driver->dci_client_list) {
1359 entry = list_entry(start, struct diag_dci_client_tbl, track);
1360 if (entry->client_info.token != token)
1361 continue;
1362 if (diag_dci_query_log_mask(entry, log_code)) {
1363 pr_debug("\t log code %x needed by client %d",
1364 log_code, entry->client->tgid);
1365 /* copy to client buffer */
1366 copy_dci_log(buf, len, entry, data_source, ext_hdr);
1367 }
1368 }
1369 mutex_unlock(&driver->dci_mutex);
1370}
1371
1372void extract_dci_ext_pkt(unsigned char *buf, int len, int data_source,
1373 int token)
1374{
1375 uint8_t version, pkt_cmd_code = 0;
1376 unsigned char *pkt = NULL;
1377
1378 if (!buf) {
1379 pr_err("diag: In %s buffer is NULL\n", __func__);
1380 return;
1381 }
1382
1383 version = *(uint8_t *)buf + 1;
1384 if (version < EXT_HDR_VERSION) {
1385 pr_err("diag: %s, Extended header with invalid version: %d\n",
1386 __func__, version);
1387 return;
1388 }
1389
1390 pkt = buf + EXT_HDR_LEN;
1391 pkt_cmd_code = *(uint8_t *)pkt;
1392 len -= EXT_HDR_LEN;
1393 if (len < 0) {
1394 pr_err("diag: %s, Invalid length len: %d\n", __func__, len);
1395 return;
1396 }
1397
1398 switch (pkt_cmd_code) {
1399 case LOG_CMD_CODE:
1400 extract_dci_log(pkt, len, data_source, token, buf);
1401 break;
1402 case EVENT_CMD_CODE:
1403 extract_dci_events(pkt, len, data_source, token, buf);
1404 break;
1405 default:
1406 pr_err("diag: %s unsupported cmd_code: %d, data_source: %d\n",
1407 __func__, pkt_cmd_code, data_source);
1408 return;
1409 }
1410}
1411
1412void diag_dci_channel_open_work(struct work_struct *work)
1413{
1414 int i, j;
1415 char dirty_bits[16];
1416 uint8_t *client_log_mask_ptr;
1417 uint8_t *log_mask_ptr;
1418 int ret;
1419 struct list_head *start, *temp;
1420 struct diag_dci_client_tbl *entry = NULL;
1421
1422 /* Update apps and peripheral(s) with the dci log and event masks */
1423 memset(dirty_bits, 0, 16 * sizeof(uint8_t));
1424
1425 /*
1426 * From each log entry used by each client, determine
1427 * which log entries in the cumulative logs that need
1428 * to be updated on the peripheral.
1429 */
1430 mutex_lock(&driver->dci_mutex);
1431 list_for_each_safe(start, temp, &driver->dci_client_list) {
1432 entry = list_entry(start, struct diag_dci_client_tbl, track);
1433 if (entry->client_info.token != DCI_LOCAL_PROC)
1434 continue;
1435 client_log_mask_ptr = entry->dci_log_mask;
1436 for (j = 0; j < 16; j++) {
1437 if (*(client_log_mask_ptr+1))
1438 dirty_bits[j] = 1;
1439 client_log_mask_ptr += 514;
1440 }
1441 }
1442 mutex_unlock(&driver->dci_mutex);
1443
1444 mutex_lock(&dci_log_mask_mutex);
1445 /* Update the appropriate dirty bits in the cumulative mask */
1446 log_mask_ptr = dci_ops_tbl[DCI_LOCAL_PROC].log_mask_composite;
1447 for (i = 0; i < 16; i++) {
1448 if (dirty_bits[i])
1449 *(log_mask_ptr+1) = dirty_bits[i];
1450
1451 log_mask_ptr += 514;
1452 }
1453 mutex_unlock(&dci_log_mask_mutex);
1454
1455 /* Send updated mask to userspace clients */
1456 diag_update_userspace_clients(DCI_LOG_MASKS_TYPE);
1457 /* Send updated log mask to peripherals */
1458 ret = dci_ops_tbl[DCI_LOCAL_PROC].send_log_mask(DCI_LOCAL_PROC);
1459
1460 /* Send updated event mask to userspace clients */
1461 diag_update_userspace_clients(DCI_EVENT_MASKS_TYPE);
1462 /* Send updated event mask to peripheral */
1463 ret = dci_ops_tbl[DCI_LOCAL_PROC].send_event_mask(DCI_LOCAL_PROC);
1464}
1465
1466void diag_dci_notify_client(int peripheral_mask, int data, int proc)
1467{
Channagoud Kadabi075db3b2017-03-16 14:26:17 -07001468 int stat = 0;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001469 struct siginfo info;
1470 struct list_head *start, *temp;
1471 struct diag_dci_client_tbl *entry = NULL;
Gopikrishna Mogasati41224ba2017-05-22 12:05:11 +05301472 struct pid *pid_struct = NULL;
1473 struct task_struct *dci_task = NULL;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001474
1475 memset(&info, 0, sizeof(struct siginfo));
1476 info.si_code = SI_QUEUE;
1477 info.si_int = (peripheral_mask | data);
1478 if (data == DIAG_STATUS_OPEN)
1479 dci_ops_tbl[proc].peripheral_status |= peripheral_mask;
1480 else
1481 dci_ops_tbl[proc].peripheral_status &= ~peripheral_mask;
1482
1483 /* Notify the DCI process that the peripheral DCI Channel is up */
Mohit Aggarwalf68e6b72016-01-04 17:10:30 +05301484 mutex_lock(&driver->dci_mutex);
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001485 list_for_each_safe(start, temp, &driver->dci_client_list) {
1486 entry = list_entry(start, struct diag_dci_client_tbl, track);
1487 if (entry->client_info.token != proc)
1488 continue;
1489 if (entry->client_info.notification_list & peripheral_mask) {
1490 info.si_signo = entry->client_info.signal_type;
Gopikrishna Mogasati41224ba2017-05-22 12:05:11 +05301491 pid_struct = find_get_pid(entry->tgid);
1492 if (pid_struct) {
1493 dci_task = get_pid_task(pid_struct,
1494 PIDTYPE_PID);
1495 if (!dci_task) {
1496 DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
1497 "diag: dci client with pid = %d Exited..\n",
1498 entry->tgid);
1499 mutex_unlock(&driver->dci_mutex);
1500 return;
1501 }
1502 if (entry->client &&
1503 entry->tgid == dci_task->tgid) {
1504 DIAG_LOG(DIAG_DEBUG_DCI,
1505 "entry tgid = %d, dci client tgid = %d\n",
1506 entry->tgid, dci_task->tgid);
1507 stat = send_sig_info(
1508 entry->client_info.signal_type,
1509 &info, dci_task);
1510 if (stat)
1511 pr_err("diag: Err sending dci signal to client, signal data: 0x%x, stat: %d\n",
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001512 info.si_int, stat);
Gopikrishna Mogasati41224ba2017-05-22 12:05:11 +05301513 } else
1514 pr_err("diag: client data is corrupted, signal data: 0x%x, stat: %d\n",
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001515 info.si_int, stat);
Gopikrishna Mogasati41224ba2017-05-22 12:05:11 +05301516 }
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001517 }
1518 }
Mohit Aggarwalf68e6b72016-01-04 17:10:30 +05301519 mutex_unlock(&driver->dci_mutex);
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001520}
1521
1522static int diag_send_dci_pkt(struct diag_cmd_reg_t *entry,
1523 unsigned char *buf, int len, int tag)
1524{
1525 int i, status = DIAG_DCI_NO_ERROR;
1526 uint32_t write_len = 0;
1527 struct diag_dci_pkt_header_t header;
1528
1529 if (!entry)
1530 return -EIO;
1531
1532 if (len < 1 || len > DIAG_MAX_REQ_SIZE) {
1533 pr_err("diag: dci: In %s, invalid length %d, max_length: %d\n",
1534 __func__, len, (int)(DCI_REQ_BUF_SIZE - sizeof(header)));
1535 return -EIO;
1536 }
1537
1538 if ((len + sizeof(header) + sizeof(uint8_t)) > DCI_BUF_SIZE) {
1539 pr_err("diag: dci: In %s, invalid length %d for apps_dci_buf, max_length: %d\n",
1540 __func__, len, DIAG_MAX_REQ_SIZE);
1541 return -EIO;
1542 }
1543
1544 mutex_lock(&driver->dci_mutex);
1545 /* prepare DCI packet */
1546 header.start = CONTROL_CHAR;
1547 header.version = 1;
1548 header.len = len + sizeof(int) + sizeof(uint8_t);
1549 header.pkt_code = DCI_PKT_RSP_CODE;
1550 header.tag = tag;
1551 memcpy(driver->apps_dci_buf, &header, sizeof(header));
1552 write_len += sizeof(header);
1553 memcpy(driver->apps_dci_buf + write_len, buf, len);
1554 write_len += len;
1555 *(uint8_t *)(driver->apps_dci_buf + write_len) = CONTROL_CHAR;
1556 write_len += sizeof(uint8_t);
1557
1558 /* This command is registered locally on the Apps */
1559 if (entry->proc == APPS_DATA) {
1560 diag_update_pkt_buffer(driver->apps_dci_buf, write_len,
1561 DCI_PKT_TYPE);
1562 diag_update_sleeping_process(entry->pid, DCI_PKT_TYPE);
1563 mutex_unlock(&driver->dci_mutex);
1564 return DIAG_DCI_NO_ERROR;
1565 }
1566
1567 for (i = 0; i < NUM_PERIPHERALS; i++)
1568 if (entry->proc == i) {
1569 status = 1;
1570 break;
1571 }
1572
1573 if (status) {
1574 status = diag_dci_write_proc(entry->proc,
1575 DIAG_DATA_TYPE,
1576 driver->apps_dci_buf,
1577 write_len);
1578 } else {
1579 pr_err("diag: Cannot send packet to peripheral %d",
1580 entry->proc);
1581 status = DIAG_DCI_SEND_DATA_FAIL;
1582 }
1583 mutex_unlock(&driver->dci_mutex);
1584 return status;
1585}
1586
1587#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
1588unsigned char *dci_get_buffer_from_bridge(int token)
1589{
1590 uint8_t retries = 0, max_retries = 3;
1591 unsigned char *buf = NULL;
1592
1593 do {
1594 buf = diagmem_alloc(driver, DIAG_MDM_BUF_SIZE,
1595 dci_ops_tbl[token].mempool);
1596 if (!buf) {
1597 usleep_range(5000, 5100);
1598 retries++;
1599 } else
1600 break;
1601 } while (retries < max_retries);
1602
1603 return buf;
1604}
1605
1606int diag_dci_write_bridge(int token, unsigned char *buf, int len)
1607{
1608 return diagfwd_bridge_write(TOKEN_TO_BRIDGE(token), buf, len);
1609}
1610
1611int diag_dci_write_done_bridge(int index, unsigned char *buf, int len)
1612{
1613 int token = BRIDGE_TO_TOKEN(index);
1614
1615 if (!VALID_DCI_TOKEN(token)) {
1616 pr_err("diag: Invalid DCI token %d in %s\n", token, __func__);
1617 return -EINVAL;
1618 }
1619 diagmem_free(driver, buf, dci_ops_tbl[token].mempool);
1620 return 0;
1621}
1622#endif
1623
1624#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
1625static int diag_send_dci_pkt_remote(unsigned char *data, int len, int tag,
1626 int token)
1627{
1628 unsigned char *buf = NULL;
1629 struct diag_dci_header_t dci_header;
1630 int dci_header_size = sizeof(struct diag_dci_header_t);
1631 int ret = DIAG_DCI_NO_ERROR;
1632 uint32_t write_len = 0;
1633
1634 if (!data)
1635 return -EIO;
1636
1637 buf = dci_get_buffer_from_bridge(token);
1638 if (!buf) {
1639 pr_err("diag: In %s, unable to get dci buffers to write data\n",
1640 __func__);
1641 return -EAGAIN;
1642 }
1643
1644 dci_header.start = CONTROL_CHAR;
1645 dci_header.version = 1;
1646 /*
1647 * The Length of the DCI packet = length of the command + tag (int) +
1648 * the command code size (uint8_t)
1649 */
1650 dci_header.length = len + sizeof(int) + sizeof(uint8_t);
1651 dci_header.cmd_code = DCI_PKT_RSP_CODE;
1652
1653 memcpy(buf + write_len, &dci_header, dci_header_size);
1654 write_len += dci_header_size;
1655 *(int *)(buf + write_len) = tag;
1656 write_len += sizeof(int);
1657 memcpy(buf + write_len, data, len);
1658 write_len += len;
1659 *(buf + write_len) = CONTROL_CHAR; /* End Terminator */
1660 write_len += sizeof(uint8_t);
1661
1662 ret = diag_dci_write_bridge(token, buf, write_len);
1663 if (ret) {
1664 pr_err("diag: error writing dci pkt to remote proc, token: %d, err: %d\n",
1665 token, ret);
1666 diagmem_free(driver, buf, dci_ops_tbl[token].mempool);
1667 } else {
1668 ret = DIAG_DCI_NO_ERROR;
1669 }
1670
1671 return ret;
1672}
1673#else
1674static int diag_send_dci_pkt_remote(unsigned char *data, int len, int tag,
1675 int token)
1676{
1677 return DIAG_DCI_NO_ERROR;
1678}
1679#endif
1680
1681#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
1682int diag_dci_send_handshake_pkt(int index)
1683{
1684 int err = 0;
1685 int token = BRIDGE_TO_TOKEN(index);
1686 int write_len = 0;
1687 struct diag_ctrl_dci_handshake_pkt ctrl_pkt;
1688 unsigned char *buf = NULL;
1689 struct diag_dci_header_t dci_header;
1690
1691 if (!VALID_DCI_TOKEN(token)) {
1692 pr_err("diag: In %s, invalid DCI token %d\n", __func__, token);
1693 return -EINVAL;
1694 }
1695
1696 buf = dci_get_buffer_from_bridge(token);
1697 if (!buf) {
1698 pr_err("diag: In %s, unable to get dci buffers to write data\n",
1699 __func__);
1700 return -EAGAIN;
1701 }
1702
1703 dci_header.start = CONTROL_CHAR;
1704 dci_header.version = 1;
1705 /* Include the cmd code (uint8_t) in the length */
1706 dci_header.length = sizeof(ctrl_pkt) + sizeof(uint8_t);
1707 dci_header.cmd_code = DCI_CONTROL_PKT_CODE;
1708 memcpy(buf, &dci_header, sizeof(dci_header));
1709 write_len += sizeof(dci_header);
1710
1711 ctrl_pkt.ctrl_pkt_id = DIAG_CTRL_MSG_DCI_HANDSHAKE_PKT;
1712 /*
1713 * The control packet data length accounts for the version (uint32_t)
1714 * of the packet and the magic number (uint32_t).
1715 */
1716 ctrl_pkt.ctrl_pkt_data_len = 2 * sizeof(uint32_t);
1717 ctrl_pkt.version = 1;
1718 ctrl_pkt.magic = DCI_MAGIC;
1719 memcpy(buf + write_len, &ctrl_pkt, sizeof(ctrl_pkt));
1720 write_len += sizeof(ctrl_pkt);
1721
1722 *(uint8_t *)(buf + write_len) = CONTROL_CHAR;
1723 write_len += sizeof(uint8_t);
1724
1725 err = diag_dci_write_bridge(token, buf, write_len);
1726 if (err) {
1727 pr_err("diag: error writing ack packet to remote proc, token: %d, err: %d\n",
1728 token, err);
1729 diagmem_free(driver, buf, dci_ops_tbl[token].mempool);
1730 return err;
1731 }
1732
1733 mod_timer(&(dci_channel_status[token].wait_time),
1734 jiffies + msecs_to_jiffies(DCI_HANDSHAKE_WAIT_TIME));
1735
1736 return 0;
1737}
1738#else
1739int diag_dci_send_handshake_pkt(int index)
1740{
1741 return 0;
1742}
1743#endif
1744
1745static int diag_dci_process_apps_pkt(struct diag_pkt_header_t *pkt_header,
1746 unsigned char *req_buf, int req_len,
1747 int tag)
1748{
1749 uint8_t cmd_code, subsys_id, i, goto_download = 0;
1750 uint8_t header_len = sizeof(struct diag_dci_pkt_header_t);
1751 uint16_t ss_cmd_code;
1752 uint32_t write_len = 0;
1753 unsigned char *dest_buf = driver->apps_dci_buf;
1754 unsigned char *payload_ptr = driver->apps_dci_buf + header_len;
1755 struct diag_dci_pkt_header_t dci_header;
1756
1757 if (!pkt_header || !req_buf || req_len <= 0 || tag < 0)
1758 return -EIO;
1759
1760 cmd_code = pkt_header->cmd_code;
1761 subsys_id = pkt_header->subsys_id;
1762 ss_cmd_code = pkt_header->subsys_cmd_code;
1763
1764 if (cmd_code == DIAG_CMD_DOWNLOAD) {
1765 *payload_ptr = DIAG_CMD_DOWNLOAD;
1766 write_len = sizeof(uint8_t);
1767 goto_download = 1;
1768 goto fill_buffer;
1769 } else if (cmd_code == DIAG_CMD_VERSION) {
1770 if (chk_polling_response()) {
1771 for (i = 0; i < 55; i++, write_len++, payload_ptr++)
1772 *(payload_ptr) = 0;
1773 goto fill_buffer;
1774 }
1775 } else if (cmd_code == DIAG_CMD_EXT_BUILD) {
1776 if (chk_polling_response()) {
1777 *payload_ptr = DIAG_CMD_EXT_BUILD;
1778 write_len = sizeof(uint8_t);
1779 payload_ptr += sizeof(uint8_t);
1780 for (i = 0; i < 8; i++, write_len++, payload_ptr++)
1781 *(payload_ptr) = 0;
1782 *(int *)(payload_ptr) = chk_config_get_id();
1783 write_len += sizeof(int);
1784 goto fill_buffer;
1785 }
1786 } else if (cmd_code == DIAG_CMD_LOG_ON_DMND) {
1787 write_len = diag_cmd_log_on_demand(req_buf, req_len,
1788 payload_ptr,
1789 APPS_BUF_SIZE - header_len);
1790 goto fill_buffer;
1791 } else if (cmd_code != DIAG_CMD_DIAG_SUBSYS) {
1792 return DIAG_DCI_TABLE_ERR;
1793 }
1794
1795 if (subsys_id == DIAG_SS_DIAG) {
1796 if (ss_cmd_code == DIAG_DIAG_MAX_PKT_SZ) {
1797 memcpy(payload_ptr, pkt_header,
1798 sizeof(struct diag_pkt_header_t));
1799 write_len = sizeof(struct diag_pkt_header_t);
1800 *(uint32_t *)(payload_ptr + write_len) =
1801 DIAG_MAX_REQ_SIZE;
1802 write_len += sizeof(uint32_t);
1803 } else if (ss_cmd_code == DIAG_DIAG_STM) {
1804 write_len = diag_process_stm_cmd(req_buf, payload_ptr);
1805 }
1806 } else if (subsys_id == DIAG_SS_PARAMS) {
1807 if (ss_cmd_code == DIAG_DIAG_POLL) {
1808 if (chk_polling_response()) {
1809 memcpy(payload_ptr, pkt_header,
1810 sizeof(struct diag_pkt_header_t));
1811 write_len = sizeof(struct diag_pkt_header_t);
1812 payload_ptr += write_len;
1813 for (i = 0; i < 12; i++, write_len++) {
1814 *(payload_ptr) = 0;
1815 payload_ptr++;
1816 }
1817 }
1818 } else if (ss_cmd_code == DIAG_DEL_RSP_WRAP) {
1819 memcpy(payload_ptr, pkt_header,
1820 sizeof(struct diag_pkt_header_t));
1821 write_len = sizeof(struct diag_pkt_header_t);
1822 *(int *)(payload_ptr + write_len) = wrap_enabled;
1823 write_len += sizeof(int);
1824 } else if (ss_cmd_code == DIAG_DEL_RSP_WRAP_CNT) {
1825 wrap_enabled = true;
1826 memcpy(payload_ptr, pkt_header,
1827 sizeof(struct diag_pkt_header_t));
1828 write_len = sizeof(struct diag_pkt_header_t);
1829 *(uint16_t *)(payload_ptr + write_len) = wrap_count;
1830 write_len += sizeof(uint16_t);
1831 } else if (ss_cmd_code == DIAG_EXT_MOBILE_ID) {
1832 write_len = diag_cmd_get_mobile_id(req_buf, req_len,
1833 payload_ptr,
1834 APPS_BUF_SIZE - header_len);
1835 }
1836 }
1837
1838fill_buffer:
1839 if (write_len > 0) {
1840 /* Check if we are within the range of the buffer*/
1841 if (write_len + header_len > DIAG_MAX_REQ_SIZE) {
1842 pr_err("diag: In %s, invalid length %d\n", __func__,
1843 write_len + header_len);
1844 return -ENOMEM;
1845 }
1846 dci_header.start = CONTROL_CHAR;
1847 dci_header.version = 1;
1848 /*
1849 * Length of the rsp pkt = actual data len + pkt rsp code
1850 * (uint8_t) + tag (int)
1851 */
1852 dci_header.len = write_len + sizeof(uint8_t) + sizeof(int);
1853 dci_header.pkt_code = DCI_PKT_RSP_CODE;
1854 dci_header.tag = tag;
1855 driver->in_busy_dcipktdata = 1;
1856 memcpy(dest_buf, &dci_header, header_len);
1857 diag_process_apps_dci_read_data(DCI_PKT_TYPE, dest_buf + 4,
1858 dci_header.len);
1859 driver->in_busy_dcipktdata = 0;
1860
1861 if (goto_download) {
1862 /*
1863 * Sleep for sometime so that the response reaches the
1864 * client. The value 5000 empirically as an optimum
1865 * time for the response to reach the client.
1866 */
1867 usleep_range(5000, 5100);
1868 /* call download API */
1869 msm_set_restart_mode(RESTART_DLOAD);
1870 pr_alert("diag: download mode set, Rebooting SoC..\n");
1871 kernel_restart(NULL);
1872 }
1873 return DIAG_DCI_NO_ERROR;
1874 }
1875
1876 return DIAG_DCI_TABLE_ERR;
1877}
1878
1879static int diag_process_dci_pkt_rsp(unsigned char *buf, int len)
1880{
1881 int ret = DIAG_DCI_TABLE_ERR;
1882 int common_cmd = 0;
1883 struct diag_pkt_header_t *header = NULL;
1884 unsigned char *temp = buf;
1885 unsigned char *req_buf = NULL;
1886 uint8_t retry_count = 0, max_retries = 3;
1887 uint32_t read_len = 0, req_len = len;
1888 struct dci_pkt_req_entry_t *req_entry = NULL;
1889 struct diag_dci_client_tbl *dci_entry = NULL;
1890 struct dci_pkt_req_t req_hdr;
1891 struct diag_cmd_reg_t *reg_item;
1892 struct diag_cmd_reg_entry_t reg_entry;
1893 struct diag_cmd_reg_entry_t *temp_entry;
1894
1895 if (!buf)
1896 return -EIO;
1897
1898 if (len <= sizeof(struct dci_pkt_req_t) || len > DCI_REQ_BUF_SIZE) {
1899 pr_err("diag: dci: Invalid length %d len in %s", len, __func__);
1900 return -EIO;
1901 }
1902
1903 req_hdr = *(struct dci_pkt_req_t *)temp;
1904 temp += sizeof(struct dci_pkt_req_t);
1905 read_len += sizeof(struct dci_pkt_req_t);
1906 req_len -= sizeof(struct dci_pkt_req_t);
1907 req_buf = temp; /* Start of the Request */
1908 header = (struct diag_pkt_header_t *)temp;
1909 temp += sizeof(struct diag_pkt_header_t);
1910 read_len += sizeof(struct diag_pkt_header_t);
1911 if (read_len >= DCI_REQ_BUF_SIZE) {
1912 pr_err("diag: dci: In %s, invalid read_len: %d\n", __func__,
1913 read_len);
1914 return -EIO;
1915 }
1916
1917 mutex_lock(&driver->dci_mutex);
1918 dci_entry = diag_dci_get_client_entry(req_hdr.client_id);
1919 if (!dci_entry) {
1920 pr_err("diag: Invalid client %d in %s\n",
1921 req_hdr.client_id, __func__);
1922 mutex_unlock(&driver->dci_mutex);
1923 return DIAG_DCI_NO_REG;
1924 }
1925
1926 /* Check if the command is allowed on DCI */
1927 if (diag_dci_filter_commands(header)) {
1928 pr_debug("diag: command not supported %d %d %d",
1929 header->cmd_code, header->subsys_id,
1930 header->subsys_cmd_code);
1931 mutex_unlock(&driver->dci_mutex);
1932 return DIAG_DCI_SEND_DATA_FAIL;
1933 }
1934
1935 common_cmd = diag_check_common_cmd(header);
1936 if (common_cmd < 0) {
1937 pr_debug("diag: error in checking common command, %d\n",
1938 common_cmd);
1939 mutex_unlock(&driver->dci_mutex);
1940 return DIAG_DCI_SEND_DATA_FAIL;
1941 }
1942
1943 /*
1944 * Previous packet is yet to be consumed by the client. Wait
1945 * till the buffer is free.
1946 */
1947 while (retry_count < max_retries) {
1948 retry_count++;
1949 if (driver->in_busy_dcipktdata)
1950 usleep_range(10000, 10100);
1951 else
1952 break;
1953 }
1954 /* The buffer is still busy */
1955 if (driver->in_busy_dcipktdata) {
1956 pr_err("diag: In %s, apps dci buffer is still busy. Dropping packet\n",
1957 __func__);
1958 mutex_unlock(&driver->dci_mutex);
1959 return -EAGAIN;
1960 }
1961
1962 /* Register this new DCI packet */
1963 req_entry = diag_register_dci_transaction(req_hdr.uid,
1964 req_hdr.client_id);
1965 if (!req_entry) {
1966 pr_alert("diag: registering new DCI transaction failed\n");
1967 mutex_unlock(&driver->dci_mutex);
1968 return DIAG_DCI_NO_REG;
1969 }
1970 mutex_unlock(&driver->dci_mutex);
1971
1972 /*
1973 * If the client has registered for remote data, route the packet to the
1974 * remote processor
1975 */
1976 if (dci_entry->client_info.token > 0) {
1977 ret = diag_send_dci_pkt_remote(req_buf, req_len, req_entry->tag,
1978 dci_entry->client_info.token);
1979 return ret;
1980 }
1981
1982 /* Check if it is a dedicated Apps command */
1983 ret = diag_dci_process_apps_pkt(header, req_buf, req_len,
1984 req_entry->tag);
1985 if ((ret == DIAG_DCI_NO_ERROR && !common_cmd) || ret < 0)
1986 return ret;
1987
1988 reg_entry.cmd_code = header->cmd_code;
1989 reg_entry.subsys_id = header->subsys_id;
1990 reg_entry.cmd_code_hi = header->subsys_cmd_code;
1991 reg_entry.cmd_code_lo = header->subsys_cmd_code;
1992
Mohit Aggarwalf68e6b72016-01-04 17:10:30 +05301993 mutex_lock(&driver->cmd_reg_mutex);
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001994 temp_entry = diag_cmd_search(&reg_entry, ALL_PROC);
1995 if (temp_entry) {
1996 reg_item = container_of(temp_entry, struct diag_cmd_reg_t,
1997 entry);
1998 ret = diag_send_dci_pkt(reg_item, req_buf, req_len,
1999 req_entry->tag);
2000 } else {
2001 DIAG_LOG(DIAG_DEBUG_DCI, "Command not found: %02x %02x %02x\n",
2002 reg_entry.cmd_code, reg_entry.subsys_id,
2003 reg_entry.cmd_code_hi);
2004 }
Mohit Aggarwalf68e6b72016-01-04 17:10:30 +05302005 mutex_unlock(&driver->cmd_reg_mutex);
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07002006
2007 return ret;
2008}
2009
2010int diag_process_dci_transaction(unsigned char *buf, int len)
2011{
2012 unsigned char *temp = buf;
2013 uint16_t log_code, item_num;
2014 int ret = -1, found = 0, client_id = 0, client_token = 0;
2015 int count, set_mask, num_codes, bit_index, event_id, offset = 0;
2016 unsigned int byte_index, read_len = 0;
2017 uint8_t equip_id, *log_mask_ptr, *head_log_mask_ptr, byte_mask;
2018 uint8_t *event_mask_ptr;
2019 struct diag_dci_client_tbl *dci_entry = NULL;
2020
2021 if (!temp) {
2022 pr_err("diag: Invalid buffer in %s\n", __func__);
2023 return -ENOMEM;
2024 }
2025
2026 /* This is Pkt request/response transaction */
2027 if (*(int *)temp > 0) {
2028 return diag_process_dci_pkt_rsp(buf, len);
2029 } else if (*(int *)temp == DCI_LOG_TYPE) {
2030 /* Minimum length of a log mask config is 12 + 2 bytes for
2031 * atleast one log code to be set or reset.
2032 */
2033 if (len < DCI_LOG_CON_MIN_LEN || len > USER_SPACE_DATA) {
2034 pr_err("diag: dci: Invalid length in %s\n", __func__);
2035 return -EIO;
2036 }
2037
2038 /* Extract each log code and put in client table */
2039 temp += sizeof(int);
2040 read_len += sizeof(int);
2041 client_id = *(int *)temp;
2042 temp += sizeof(int);
2043 read_len += sizeof(int);
2044 set_mask = *(int *)temp;
2045 temp += sizeof(int);
2046 read_len += sizeof(int);
2047 num_codes = *(int *)temp;
2048 temp += sizeof(int);
2049 read_len += sizeof(int);
2050
2051 /* Find client table entry */
2052 mutex_lock(&driver->dci_mutex);
2053 dci_entry = diag_dci_get_client_entry(client_id);
2054 if (!dci_entry) {
2055 pr_err("diag: In %s, invalid client\n", __func__);
2056 mutex_unlock(&driver->dci_mutex);
2057 return ret;
2058 }
2059 client_token = dci_entry->client_info.token;
2060
2061 if (num_codes == 0 || (num_codes >= (USER_SPACE_DATA - 8)/2)) {
2062 pr_err("diag: dci: Invalid number of log codes %d\n",
2063 num_codes);
2064 mutex_unlock(&driver->dci_mutex);
2065 return -EIO;
2066 }
2067
2068 head_log_mask_ptr = dci_entry->dci_log_mask;
2069 if (!head_log_mask_ptr) {
2070 pr_err("diag: dci: Invalid Log mask pointer in %s\n",
2071 __func__);
2072 mutex_unlock(&driver->dci_mutex);
2073 return -ENOMEM;
2074 }
2075 pr_debug("diag: head of dci log mask %pK\n", head_log_mask_ptr);
2076 count = 0; /* iterator for extracting log codes */
2077
2078 while (count < num_codes) {
2079 if (read_len >= USER_SPACE_DATA) {
2080 pr_err("diag: dci: Invalid length for log type in %s",
2081 __func__);
2082 mutex_unlock(&driver->dci_mutex);
2083 return -EIO;
2084 }
2085 log_code = *(uint16_t *)temp;
2086 equip_id = LOG_GET_EQUIP_ID(log_code);
2087 item_num = LOG_GET_ITEM_NUM(log_code);
2088 byte_index = item_num/8 + 2;
2089 if (byte_index >= (DCI_MAX_ITEMS_PER_LOG_CODE+2)) {
2090 pr_err("diag: dci: Log type, invalid byte index\n");
2091 mutex_unlock(&driver->dci_mutex);
2092 return ret;
2093 }
2094 byte_mask = 0x01 << (item_num % 8);
2095 /*
2096 * Parse through log mask table and find
2097 * relevant range
2098 */
2099 log_mask_ptr = head_log_mask_ptr;
2100 found = 0;
2101 offset = 0;
2102 while (log_mask_ptr && (offset < DCI_LOG_MASK_SIZE)) {
2103 if (*log_mask_ptr == equip_id) {
2104 found = 1;
2105 pr_debug("diag: find equip id = %x at %pK\n",
2106 equip_id, log_mask_ptr);
2107 break;
2108 }
2109 pr_debug("diag: did not find equip id = %x at %d\n",
2110 equip_id, *log_mask_ptr);
2111 log_mask_ptr += 514;
2112 offset += 514;
2113 }
2114 if (!found) {
2115 pr_err("diag: dci equip id not found\n");
2116 mutex_unlock(&driver->dci_mutex);
2117 return ret;
2118 }
2119 *(log_mask_ptr+1) = 1; /* set the dirty byte */
2120 log_mask_ptr = log_mask_ptr + byte_index;
2121 if (set_mask)
2122 *log_mask_ptr |= byte_mask;
2123 else
2124 *log_mask_ptr &= ~byte_mask;
2125 /* add to cumulative mask */
2126 update_dci_cumulative_log_mask(
2127 offset, byte_index,
2128 byte_mask, client_token);
2129 temp += 2;
2130 read_len += 2;
2131 count++;
2132 ret = DIAG_DCI_NO_ERROR;
2133 }
2134 /* send updated mask to userspace clients */
2135 if (client_token == DCI_LOCAL_PROC)
2136 diag_update_userspace_clients(DCI_LOG_MASKS_TYPE);
2137 /* send updated mask to peripherals */
2138 ret = dci_ops_tbl[client_token].send_log_mask(client_token);
2139 mutex_unlock(&driver->dci_mutex);
2140 } else if (*(int *)temp == DCI_EVENT_TYPE) {
2141 /* Minimum length of a event mask config is 12 + 4 bytes for
2142 * atleast one event id to be set or reset.
2143 */
2144 if (len < DCI_EVENT_CON_MIN_LEN || len > USER_SPACE_DATA) {
2145 pr_err("diag: dci: Invalid length in %s\n", __func__);
2146 return -EIO;
2147 }
2148
2149 /* Extract each event id and put in client table */
2150 temp += sizeof(int);
2151 read_len += sizeof(int);
2152 client_id = *(int *)temp;
2153 temp += sizeof(int);
2154 read_len += sizeof(int);
2155 set_mask = *(int *)temp;
2156 temp += sizeof(int);
2157 read_len += sizeof(int);
2158 num_codes = *(int *)temp;
2159 temp += sizeof(int);
2160 read_len += sizeof(int);
2161
2162 /* find client table entry */
2163 mutex_lock(&driver->dci_mutex);
2164 dci_entry = diag_dci_get_client_entry(client_id);
2165 if (!dci_entry) {
2166 pr_err("diag: In %s, invalid client\n", __func__);
2167 mutex_unlock(&driver->dci_mutex);
2168 return ret;
2169 }
2170 client_token = dci_entry->client_info.token;
2171
2172 /* Check for positive number of event ids. Also, the number of
2173 * event ids should fit in the buffer along with set_mask and
2174 * num_codes which are 4 bytes each.
2175 */
2176 if (num_codes == 0 || (num_codes >= (USER_SPACE_DATA - 8)/2)) {
2177 pr_err("diag: dci: Invalid number of event ids %d\n",
2178 num_codes);
2179 mutex_unlock(&driver->dci_mutex);
2180 return -EIO;
2181 }
2182
2183 event_mask_ptr = dci_entry->dci_event_mask;
2184 if (!event_mask_ptr) {
2185 pr_err("diag: dci: Invalid event mask pointer in %s\n",
2186 __func__);
2187 mutex_unlock(&driver->dci_mutex);
2188 return -ENOMEM;
2189 }
2190 pr_debug("diag: head of dci event mask %pK\n", event_mask_ptr);
2191 count = 0; /* iterator for extracting log codes */
2192 while (count < num_codes) {
2193 if (read_len >= USER_SPACE_DATA) {
2194 pr_err("diag: dci: Invalid length for event type in %s",
2195 __func__);
2196 mutex_unlock(&driver->dci_mutex);
2197 return -EIO;
2198 }
2199 event_id = *(int *)temp;
2200 byte_index = event_id/8;
2201 if (byte_index >= DCI_EVENT_MASK_SIZE) {
2202 pr_err("diag: dci: Event type, invalid byte index\n");
2203 mutex_unlock(&driver->dci_mutex);
2204 return ret;
2205 }
2206 bit_index = event_id % 8;
2207 byte_mask = 0x1 << bit_index;
2208 /*
2209 * Parse through event mask table and set
2210 * relevant byte & bit combination
2211 */
2212 if (set_mask)
2213 *(event_mask_ptr + byte_index) |= byte_mask;
2214 else
2215 *(event_mask_ptr + byte_index) &= ~byte_mask;
2216 /* add to cumulative mask */
2217 update_dci_cumulative_event_mask(byte_index, byte_mask,
2218 client_token);
2219 temp += sizeof(int);
2220 read_len += sizeof(int);
2221 count++;
2222 ret = DIAG_DCI_NO_ERROR;
2223 }
2224 /* send updated mask to userspace clients */
2225 if (dci_entry->client_info.token == DCI_LOCAL_PROC)
2226 diag_update_userspace_clients(DCI_EVENT_MASKS_TYPE);
2227 /* send updated mask to peripherals */
2228 ret = dci_ops_tbl[client_token].send_event_mask(client_token);
2229 mutex_unlock(&driver->dci_mutex);
2230 } else {
2231 pr_alert("diag: Incorrect DCI transaction\n");
2232 }
2233 return ret;
2234}
2235
2236
2237struct diag_dci_client_tbl *diag_dci_get_client_entry(int client_id)
2238{
2239 struct list_head *start, *temp;
2240 struct diag_dci_client_tbl *entry = NULL;
2241
2242 list_for_each_safe(start, temp, &driver->dci_client_list) {
2243 entry = list_entry(start, struct diag_dci_client_tbl, track);
2244 if (entry->client_info.client_id == client_id)
2245 return entry;
2246 }
2247 return NULL;
2248}
2249
2250struct diag_dci_client_tbl *dci_lookup_client_entry_pid(int tgid)
2251{
2252 struct list_head *start, *temp;
2253 struct diag_dci_client_tbl *entry = NULL;
Manoj Prabhu B2c841ee2017-05-05 10:15:53 +05302254 struct pid *pid_struct = NULL;
2255 struct task_struct *task_s = NULL;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07002256
2257 list_for_each_safe(start, temp, &driver->dci_client_list) {
2258 entry = list_entry(start, struct diag_dci_client_tbl, track);
Manoj Prabhu B2c841ee2017-05-05 10:15:53 +05302259 pid_struct = find_get_pid(entry->tgid);
2260 if (!pid_struct) {
2261 DIAG_LOG(DIAG_DEBUG_DCI,
2262 "diag: valid pid doesn't exist for pid = %d\n",
2263 entry->tgid);
2264 continue;
2265 }
2266 task_s = get_pid_task(pid_struct, PIDTYPE_PID);
2267 if (!task_s) {
2268 DIAG_LOG(DIAG_DEBUG_DCI,
2269 "diag: valid task doesn't exist for pid = %d\n",
2270 entry->tgid);
2271 continue;
2272 }
2273 if (task_s == entry->client)
2274 if (entry->client->tgid == tgid)
2275 return entry;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07002276 }
2277 return NULL;
2278}
2279
2280void update_dci_cumulative_event_mask(int offset, uint8_t byte_mask, int token)
2281{
2282 uint8_t *event_mask_ptr, *update_ptr = NULL;
2283 struct list_head *start, *temp;
2284 struct diag_dci_client_tbl *entry = NULL;
2285 bool is_set = false;
2286
2287 mutex_lock(&dci_event_mask_mutex);
2288 update_ptr = dci_ops_tbl[token].event_mask_composite;
2289 if (!update_ptr) {
2290 mutex_unlock(&dci_event_mask_mutex);
2291 return;
2292 }
2293 update_ptr += offset;
2294 list_for_each_safe(start, temp, &driver->dci_client_list) {
2295 entry = list_entry(start, struct diag_dci_client_tbl, track);
2296 if (entry->client_info.token != token)
2297 continue;
2298 event_mask_ptr = entry->dci_event_mask;
2299 event_mask_ptr += offset;
2300 if ((*event_mask_ptr & byte_mask) == byte_mask) {
2301 is_set = true;
2302 /* break even if one client has the event mask set */
2303 break;
2304 }
2305 }
2306 if (is_set == false)
2307 *update_ptr &= ~byte_mask;
2308 else
2309 *update_ptr |= byte_mask;
2310 mutex_unlock(&dci_event_mask_mutex);
2311}
2312
2313void diag_dci_invalidate_cumulative_event_mask(int token)
2314{
2315 int i = 0;
2316 struct list_head *start, *temp;
2317 struct diag_dci_client_tbl *entry = NULL;
2318 uint8_t *event_mask_ptr, *update_ptr = NULL;
2319
2320 mutex_lock(&dci_event_mask_mutex);
2321 update_ptr = dci_ops_tbl[token].event_mask_composite;
2322 if (!update_ptr) {
2323 mutex_unlock(&dci_event_mask_mutex);
2324 return;
2325 }
2326
2327 create_dci_event_mask_tbl(update_ptr);
2328 list_for_each_safe(start, temp, &driver->dci_client_list) {
2329 entry = list_entry(start, struct diag_dci_client_tbl, track);
2330 if (entry->client_info.token != token)
2331 continue;
2332 event_mask_ptr = entry->dci_event_mask;
2333 for (i = 0; i < DCI_EVENT_MASK_SIZE; i++)
2334 *(update_ptr+i) |= *(event_mask_ptr+i);
2335 }
2336 mutex_unlock(&dci_event_mask_mutex);
2337}
2338
2339#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
2340int diag_send_dci_event_mask_remote(int token)
2341{
2342 unsigned char *buf = NULL;
2343 struct diag_dci_header_t dci_header;
2344 struct diag_ctrl_event_mask event_mask;
2345 int dci_header_size = sizeof(struct diag_dci_header_t);
2346 int event_header_size = sizeof(struct diag_ctrl_event_mask);
2347 int i, ret = DIAG_DCI_NO_ERROR, err = DIAG_DCI_NO_ERROR;
2348 unsigned char *event_mask_ptr = NULL;
2349 uint32_t write_len = 0;
2350
2351 mutex_lock(&dci_event_mask_mutex);
2352 event_mask_ptr = dci_ops_tbl[token].event_mask_composite;
2353 if (!event_mask_ptr) {
2354 mutex_unlock(&dci_event_mask_mutex);
2355 return -EINVAL;
2356 }
2357 buf = dci_get_buffer_from_bridge(token);
2358 if (!buf) {
2359 pr_err("diag: In %s, unable to get dci buffers to write data\n",
2360 __func__);
2361 mutex_unlock(&dci_event_mask_mutex);
2362 return -EAGAIN;
2363 }
2364
2365 /* Frame the DCI header */
2366 dci_header.start = CONTROL_CHAR;
2367 dci_header.version = 1;
2368 dci_header.length = event_header_size + DCI_EVENT_MASK_SIZE + 1;
2369 dci_header.cmd_code = DCI_CONTROL_PKT_CODE;
2370
2371 event_mask.cmd_type = DIAG_CTRL_MSG_EVENT_MASK;
2372 event_mask.data_len = EVENT_MASK_CTRL_HEADER_LEN + DCI_EVENT_MASK_SIZE;
2373 event_mask.stream_id = DCI_MASK_STREAM;
2374 event_mask.status = DIAG_CTRL_MASK_VALID;
2375 event_mask.event_config = 0; /* event config */
2376 event_mask.event_mask_size = DCI_EVENT_MASK_SIZE;
2377 for (i = 0; i < DCI_EVENT_MASK_SIZE; i++) {
2378 if (event_mask_ptr[i] != 0) {
2379 event_mask.event_config = 1;
2380 break;
2381 }
2382 }
2383 memcpy(buf + write_len, &dci_header, dci_header_size);
2384 write_len += dci_header_size;
2385 memcpy(buf + write_len, &event_mask, event_header_size);
2386 write_len += event_header_size;
2387 memcpy(buf + write_len, event_mask_ptr, DCI_EVENT_MASK_SIZE);
2388 write_len += DCI_EVENT_MASK_SIZE;
2389 *(buf + write_len) = CONTROL_CHAR; /* End Terminator */
2390 write_len += sizeof(uint8_t);
2391 err = diag_dci_write_bridge(token, buf, write_len);
2392 if (err) {
2393 pr_err("diag: error writing event mask to remote proc, token: %d, err: %d\n",
2394 token, err);
2395 diagmem_free(driver, buf, dci_ops_tbl[token].mempool);
2396 ret = err;
2397 } else {
2398 ret = DIAG_DCI_NO_ERROR;
2399 }
2400 mutex_unlock(&dci_event_mask_mutex);
2401 return ret;
2402}
2403#endif
2404
2405int diag_send_dci_event_mask(int token)
2406{
2407 void *buf = event_mask.update_buf;
2408 struct diag_ctrl_event_mask header;
2409 int header_size = sizeof(struct diag_ctrl_event_mask);
2410 int ret = DIAG_DCI_NO_ERROR, err = DIAG_DCI_NO_ERROR, i;
2411 unsigned char *event_mask_ptr = NULL;
2412
2413 mutex_lock(&dci_event_mask_mutex);
2414 event_mask_ptr = dci_ops_tbl[DCI_LOCAL_PROC].event_mask_composite;
2415 if (!event_mask_ptr) {
2416 mutex_unlock(&dci_event_mask_mutex);
2417 return -EINVAL;
2418 }
2419
2420 mutex_lock(&event_mask.lock);
2421 /* send event mask update */
2422 header.cmd_type = DIAG_CTRL_MSG_EVENT_MASK;
2423 header.data_len = EVENT_MASK_CTRL_HEADER_LEN + DCI_EVENT_MASK_SIZE;
2424 header.stream_id = DCI_MASK_STREAM;
2425 header.status = DIAG_CTRL_MASK_VALID;
2426 header.event_config = 0; /* event config */
2427 header.event_mask_size = DCI_EVENT_MASK_SIZE;
2428 for (i = 0; i < DCI_EVENT_MASK_SIZE; i++) {
2429 if (event_mask_ptr[i] != 0) {
2430 header.event_config = 1;
2431 break;
2432 }
2433 }
2434 memcpy(buf, &header, header_size);
2435 memcpy(buf+header_size, event_mask_ptr, DCI_EVENT_MASK_SIZE);
2436 for (i = 0; i < NUM_PERIPHERALS; i++) {
2437 /*
2438 * Don't send to peripheral if its regular channel
2439 * is down. It may also mean that the peripheral doesn't
2440 * support DCI.
2441 */
Gopikrishna Mogasatic20aa252017-02-09 15:04:32 +05302442 if (check_peripheral_dci_support(i, DCI_LOCAL_PROC)) {
2443 err = diag_dci_write_proc(i, DIAG_CNTL_TYPE, buf,
2444 header_size + DCI_EVENT_MASK_SIZE);
2445 if (err != DIAG_DCI_NO_ERROR)
2446 ret = DIAG_DCI_SEND_DATA_FAIL;
2447 }
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07002448 }
2449
2450 mutex_unlock(&event_mask.lock);
2451 mutex_unlock(&dci_event_mask_mutex);
2452
2453 return ret;
2454}
2455
2456void update_dci_cumulative_log_mask(int offset, unsigned int byte_index,
2457 uint8_t byte_mask, int token)
2458{
2459 uint8_t *log_mask_ptr, *update_ptr = NULL;
2460 bool is_set = false;
2461 struct list_head *start, *temp;
2462 struct diag_dci_client_tbl *entry = NULL;
2463
2464 mutex_lock(&dci_log_mask_mutex);
2465 update_ptr = dci_ops_tbl[token].log_mask_composite;
2466 if (!update_ptr) {
2467 mutex_unlock(&dci_log_mask_mutex);
2468 return;
2469 }
2470
2471 update_ptr += offset;
2472 /* update the dirty bit */
2473 *(update_ptr+1) = 1;
2474 update_ptr = update_ptr + byte_index;
2475 list_for_each_safe(start, temp, &driver->dci_client_list) {
2476 entry = list_entry(start, struct diag_dci_client_tbl, track);
2477 if (entry->client_info.token != token)
2478 continue;
2479 log_mask_ptr = entry->dci_log_mask;
2480 log_mask_ptr = log_mask_ptr + offset + byte_index;
2481 if ((*log_mask_ptr & byte_mask) == byte_mask) {
2482 is_set = true;
2483 /* break even if one client has the log mask set */
2484 break;
2485 }
2486 }
2487
2488 if (is_set == false)
2489 *update_ptr &= ~byte_mask;
2490 else
2491 *update_ptr |= byte_mask;
2492 mutex_unlock(&dci_log_mask_mutex);
2493}
2494
2495void diag_dci_invalidate_cumulative_log_mask(int token)
2496{
2497 int i = 0;
2498 struct list_head *start, *temp;
2499 struct diag_dci_client_tbl *entry = NULL;
2500 uint8_t *log_mask_ptr, *update_ptr = NULL;
2501
2502 /* Clear the composite mask and redo all the masks */
2503 mutex_lock(&dci_log_mask_mutex);
2504 update_ptr = dci_ops_tbl[token].log_mask_composite;
2505 if (!update_ptr) {
2506 mutex_unlock(&dci_log_mask_mutex);
2507 return;
2508 }
2509
2510 create_dci_log_mask_tbl(update_ptr, DCI_LOG_MASK_DIRTY);
2511 list_for_each_safe(start, temp, &driver->dci_client_list) {
2512 entry = list_entry(start, struct diag_dci_client_tbl, track);
2513 if (entry->client_info.token != token)
2514 continue;
2515 log_mask_ptr = entry->dci_log_mask;
2516 for (i = 0; i < DCI_LOG_MASK_SIZE; i++)
2517 *(update_ptr+i) |= *(log_mask_ptr+i);
2518 }
2519 mutex_unlock(&dci_log_mask_mutex);
2520}
2521
2522static int dci_fill_log_mask(unsigned char *dest_ptr, unsigned char *src_ptr)
2523{
2524 struct diag_ctrl_log_mask header;
2525 int header_len = sizeof(struct diag_ctrl_log_mask);
2526
2527 header.cmd_type = DIAG_CTRL_MSG_LOG_MASK;
2528 header.num_items = DCI_MAX_ITEMS_PER_LOG_CODE;
2529 header.data_len = 11 + DCI_MAX_ITEMS_PER_LOG_CODE;
2530 header.stream_id = DCI_MASK_STREAM;
2531 header.status = 3;
2532 header.equip_id = *src_ptr;
2533 header.log_mask_size = DCI_MAX_ITEMS_PER_LOG_CODE;
2534 memcpy(dest_ptr, &header, header_len);
2535 memcpy(dest_ptr + header_len, src_ptr + 2, DCI_MAX_ITEMS_PER_LOG_CODE);
2536
2537 return header_len + DCI_MAX_ITEMS_PER_LOG_CODE;
2538}
2539
2540#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
2541int diag_send_dci_log_mask_remote(int token)
2542{
2543
2544 unsigned char *buf = NULL;
2545 struct diag_dci_header_t dci_header;
2546 int dci_header_size = sizeof(struct diag_dci_header_t);
2547 int log_header_size = sizeof(struct diag_ctrl_log_mask);
2548 uint8_t *log_mask_ptr = NULL;
2549 int i, ret = DIAG_DCI_NO_ERROR, err = DIAG_DCI_NO_ERROR;
2550 int updated;
2551 uint32_t write_len = 0;
2552
2553 mutex_lock(&dci_log_mask_mutex);
2554 log_mask_ptr = dci_ops_tbl[token].log_mask_composite;
2555 if (!log_mask_ptr) {
2556 mutex_unlock(&dci_log_mask_mutex);
2557 return -EINVAL;
2558 }
2559
2560 /* DCI header is common to all equipment IDs */
2561 dci_header.start = CONTROL_CHAR;
2562 dci_header.version = 1;
2563 dci_header.length = log_header_size + DCI_MAX_ITEMS_PER_LOG_CODE + 1;
2564 dci_header.cmd_code = DCI_CONTROL_PKT_CODE;
2565
2566 for (i = 0; i < DCI_MAX_LOG_CODES; i++) {
2567 updated = 1;
2568 write_len = 0;
2569 if (!*(log_mask_ptr + 1)) {
2570 log_mask_ptr += 514;
2571 continue;
2572 }
2573
2574 buf = dci_get_buffer_from_bridge(token);
2575 if (!buf) {
2576 pr_err("diag: In %s, unable to get dci buffers to write data\n",
2577 __func__);
2578 mutex_unlock(&dci_log_mask_mutex);
2579 return -EAGAIN;
2580 }
2581
2582 memcpy(buf + write_len, &dci_header, dci_header_size);
2583 write_len += dci_header_size;
2584 write_len += dci_fill_log_mask(buf + write_len, log_mask_ptr);
2585 *(buf + write_len) = CONTROL_CHAR; /* End Terminator */
2586 write_len += sizeof(uint8_t);
2587 err = diag_dci_write_bridge(token, buf, write_len);
2588 if (err) {
2589 pr_err("diag: error writing log mask to remote processor, equip_id: %d, token: %d, err: %d\n",
2590 i, token, err);
2591 diagmem_free(driver, buf, dci_ops_tbl[token].mempool);
2592 updated = 0;
2593 }
2594 if (updated)
2595 *(log_mask_ptr + 1) = 0; /* clear dirty byte */
2596 log_mask_ptr += 514;
2597 }
2598 mutex_unlock(&dci_log_mask_mutex);
2599 return ret;
2600}
2601#endif
2602
2603int diag_send_dci_log_mask(int token)
2604{
2605 void *buf = log_mask.update_buf;
2606 int write_len = 0;
2607 uint8_t *log_mask_ptr = NULL;
2608 int i, j, ret = DIAG_DCI_NO_ERROR, err = DIAG_DCI_NO_ERROR;
2609 int updated;
2610
2611
2612 mutex_lock(&dci_log_mask_mutex);
2613 log_mask_ptr = dci_ops_tbl[DCI_LOCAL_PROC].log_mask_composite;
2614 if (!log_mask_ptr) {
2615 mutex_unlock(&dci_log_mask_mutex);
2616 return -EINVAL;
2617 }
2618
2619 mutex_lock(&log_mask.lock);
2620 for (i = 0; i < 16; i++) {
2621 updated = 1;
2622 /* Dirty bit is set don't update the mask for this equip id */
2623 if (!(*(log_mask_ptr + 1))) {
2624 log_mask_ptr += 514;
2625 continue;
2626 }
2627 write_len = dci_fill_log_mask(buf, log_mask_ptr);
2628 for (j = 0; j < NUM_PERIPHERALS && write_len; j++) {
Gopikrishna Mogasatic20aa252017-02-09 15:04:32 +05302629 if (check_peripheral_dci_support(j, DCI_LOCAL_PROC)) {
2630 err = diag_dci_write_proc(j, DIAG_CNTL_TYPE,
2631 buf, write_len);
2632 if (err != DIAG_DCI_NO_ERROR) {
2633 updated = 0;
2634 ret = DIAG_DCI_SEND_DATA_FAIL;
2635 }
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07002636 }
2637 }
2638 if (updated)
2639 *(log_mask_ptr+1) = 0; /* clear dirty byte */
2640 log_mask_ptr += 514;
2641 }
2642 mutex_unlock(&log_mask.lock);
2643 mutex_unlock(&dci_log_mask_mutex);
2644 return ret;
2645}
2646
2647static int diag_dci_init_local(void)
2648{
2649 struct dci_ops_tbl_t *temp = &dci_ops_tbl[DCI_LOCAL_PROC];
2650
2651 create_dci_log_mask_tbl(temp->log_mask_composite, DCI_LOG_MASK_CLEAN);
2652 create_dci_event_mask_tbl(temp->event_mask_composite);
2653 temp->peripheral_status |= DIAG_CON_APSS;
2654
2655 return 0;
2656}
2657
2658#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
2659static void diag_dci_init_handshake_remote(void)
2660{
2661 int i;
2662 struct dci_channel_status_t *temp = NULL;
2663
2664 for (i = DCI_REMOTE_BASE; i < NUM_DCI_PROC; i++) {
2665 temp = &dci_channel_status[i];
2666 temp->id = i;
2667 setup_timer(&temp->wait_time, dci_chk_handshake, i);
2668 INIT_WORK(&temp->handshake_work, dci_handshake_work_fn);
2669 }
2670}
2671
2672static int diag_dci_init_remote(void)
2673{
2674 int i;
2675 struct dci_ops_tbl_t *temp = NULL;
2676
2677 diagmem_init(driver, POOL_TYPE_MDM_DCI_WRITE);
2678
2679 for (i = DCI_REMOTE_BASE; i < DCI_REMOTE_LAST; i++) {
2680 temp = &dci_ops_tbl[i];
2681 create_dci_log_mask_tbl(temp->log_mask_composite,
2682 DCI_LOG_MASK_CLEAN);
2683 create_dci_event_mask_tbl(temp->event_mask_composite);
2684 }
2685
2686 partial_pkt.data = kzalloc(MAX_DCI_PACKET_SZ, GFP_KERNEL);
2687 if (!partial_pkt.data)
2688 return -ENOMEM;
2689
2690 partial_pkt.total_len = 0;
2691 partial_pkt.read_len = 0;
2692 partial_pkt.remaining = 0;
2693 partial_pkt.processing = 0;
2694
2695 diag_dci_init_handshake_remote();
2696
2697 return 0;
2698}
2699#else
2700static int diag_dci_init_remote(void)
2701{
2702 return 0;
2703}
2704#endif
2705
2706static int diag_dci_init_ops_tbl(void)
2707{
2708 int err = 0;
2709
2710 err = diag_dci_init_local();
2711 if (err)
2712 goto err;
2713 err = diag_dci_init_remote();
2714 if (err)
2715 goto err;
2716
2717 return 0;
2718
2719err:
2720 return -ENOMEM;
2721}
2722
2723int diag_dci_init(void)
2724{
2725 int ret = 0;
2726
2727 driver->dci_tag = 0;
2728 driver->dci_client_id = 0;
2729 driver->num_dci_client = 0;
2730 mutex_init(&driver->dci_mutex);
2731 mutex_init(&dci_log_mask_mutex);
2732 mutex_init(&dci_event_mask_mutex);
2733 spin_lock_init(&ws_lock);
2734
2735 ret = diag_dci_init_ops_tbl();
2736 if (ret)
2737 goto err;
2738
2739 if (driver->apps_dci_buf == NULL) {
2740 driver->apps_dci_buf = kzalloc(DCI_BUF_SIZE, GFP_KERNEL);
2741 if (driver->apps_dci_buf == NULL)
2742 goto err;
2743 }
2744 INIT_LIST_HEAD(&driver->dci_client_list);
2745 INIT_LIST_HEAD(&driver->dci_req_list);
2746
2747 driver->diag_dci_wq = create_singlethread_workqueue("diag_dci_wq");
2748 if (!driver->diag_dci_wq)
2749 goto err;
2750
2751 INIT_WORK(&dci_data_drain_work, dci_data_drain_work_fn);
2752
2753 setup_timer(&dci_drain_timer, dci_drain_data, 0);
2754 return DIAG_DCI_NO_ERROR;
2755err:
2756 pr_err("diag: Could not initialize diag DCI buffers");
2757 kfree(driver->apps_dci_buf);
Manoj Prabhu B34e54702016-12-21 13:02:38 +05302758 driver->apps_dci_buf = NULL;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07002759
2760 if (driver->diag_dci_wq)
2761 destroy_workqueue(driver->diag_dci_wq);
2762 kfree(partial_pkt.data);
Manoj Prabhu B34e54702016-12-21 13:02:38 +05302763 partial_pkt.data = NULL;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07002764 mutex_destroy(&driver->dci_mutex);
2765 mutex_destroy(&dci_log_mask_mutex);
2766 mutex_destroy(&dci_event_mask_mutex);
2767 return DIAG_DCI_NO_REG;
2768}
2769
2770void diag_dci_channel_init(void)
2771{
2772 uint8_t peripheral;
2773
2774 for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
2775 diagfwd_open(peripheral, TYPE_DCI);
2776 diagfwd_open(peripheral, TYPE_DCI_CMD);
2777 }
2778}
2779
2780void diag_dci_exit(void)
2781{
2782 kfree(partial_pkt.data);
Manoj Prabhu B34e54702016-12-21 13:02:38 +05302783 partial_pkt.data = NULL;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07002784 kfree(driver->apps_dci_buf);
Manoj Prabhu B34e54702016-12-21 13:02:38 +05302785 driver->apps_dci_buf = NULL;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07002786 mutex_destroy(&driver->dci_mutex);
2787 mutex_destroy(&dci_log_mask_mutex);
2788 mutex_destroy(&dci_event_mask_mutex);
2789 destroy_workqueue(driver->diag_dci_wq);
2790}
2791
2792int diag_dci_clear_log_mask(int client_id)
2793{
2794 int err = DIAG_DCI_NO_ERROR, token = DCI_LOCAL_PROC;
2795 uint8_t *update_ptr;
2796 struct diag_dci_client_tbl *entry = NULL;
2797
2798 entry = diag_dci_get_client_entry(client_id);
2799 if (!entry) {
2800 pr_err("diag: In %s, invalid client entry\n", __func__);
2801 return DIAG_DCI_TABLE_ERR;
2802 }
2803 token = entry->client_info.token;
2804 update_ptr = dci_ops_tbl[token].log_mask_composite;
2805
2806 create_dci_log_mask_tbl(entry->dci_log_mask, DCI_LOG_MASK_CLEAN);
2807 diag_dci_invalidate_cumulative_log_mask(token);
2808
2809 /*
2810 * Send updated mask to userspace clients only if the client
2811 * is registered on the local processor
2812 */
2813 if (token == DCI_LOCAL_PROC)
2814 diag_update_userspace_clients(DCI_LOG_MASKS_TYPE);
2815 /* Send updated mask to peripherals */
2816 err = dci_ops_tbl[token].send_log_mask(token);
2817 return err;
2818}
2819
2820int diag_dci_clear_event_mask(int client_id)
2821{
2822 int err = DIAG_DCI_NO_ERROR, token = DCI_LOCAL_PROC;
2823 uint8_t *update_ptr;
2824 struct diag_dci_client_tbl *entry = NULL;
2825
2826 entry = diag_dci_get_client_entry(client_id);
2827 if (!entry) {
2828 pr_err("diag: In %s, invalid client entry\n", __func__);
2829 return DIAG_DCI_TABLE_ERR;
2830 }
2831 token = entry->client_info.token;
2832 update_ptr = dci_ops_tbl[token].event_mask_composite;
2833
2834 create_dci_event_mask_tbl(entry->dci_event_mask);
2835 diag_dci_invalidate_cumulative_event_mask(token);
2836
2837 /*
2838 * Send updated mask to userspace clients only if the client is
2839 * registerted on the local processor
2840 */
2841 if (token == DCI_LOCAL_PROC)
2842 diag_update_userspace_clients(DCI_EVENT_MASKS_TYPE);
2843 /* Send updated mask to peripherals */
2844 err = dci_ops_tbl[token].send_event_mask(token);
2845 return err;
2846}
2847
2848uint8_t diag_dci_get_cumulative_real_time(int token)
2849{
2850 uint8_t real_time = MODE_NONREALTIME;
2851 struct list_head *start, *temp;
2852 struct diag_dci_client_tbl *entry = NULL;
2853
2854 list_for_each_safe(start, temp, &driver->dci_client_list) {
2855 entry = list_entry(start, struct diag_dci_client_tbl, track);
2856 if (entry->real_time == MODE_REALTIME &&
2857 entry->client_info.token == token) {
2858 real_time = 1;
2859 break;
2860 }
2861 }
2862 return real_time;
2863}
2864
2865int diag_dci_set_real_time(struct diag_dci_client_tbl *entry, uint8_t real_time)
2866{
2867 if (!entry) {
2868 pr_err("diag: In %s, invalid client entry\n", __func__);
2869 return 0;
2870 }
2871 entry->real_time = real_time;
2872 return 1;
2873}
2874
2875int diag_dci_register_client(struct diag_dci_reg_tbl_t *reg_entry)
2876{
2877 int i, err = 0;
2878 struct diag_dci_client_tbl *new_entry = NULL;
2879 struct diag_dci_buf_peripheral_t *proc_buf = NULL;
2880
2881 if (!reg_entry)
2882 return DIAG_DCI_NO_REG;
2883 if (!VALID_DCI_TOKEN(reg_entry->token)) {
2884 pr_alert("diag: Invalid DCI client token, %d\n",
2885 reg_entry->token);
2886 return DIAG_DCI_NO_REG;
2887 }
2888
2889 if (driver->dci_state == DIAG_DCI_NO_REG)
2890 return DIAG_DCI_NO_REG;
2891
2892 if (driver->num_dci_client >= MAX_DCI_CLIENTS)
2893 return DIAG_DCI_NO_REG;
2894
2895 new_entry = kzalloc(sizeof(struct diag_dci_client_tbl), GFP_KERNEL);
2896 if (!new_entry)
2897 return DIAG_DCI_NO_REG;
2898
2899 mutex_lock(&driver->dci_mutex);
2900
2901 new_entry->client = current;
2902 new_entry->tgid = current->tgid;
2903 new_entry->client_info.notification_list =
2904 reg_entry->notification_list;
2905 new_entry->client_info.signal_type =
2906 reg_entry->signal_type;
2907 new_entry->client_info.token = reg_entry->token;
2908 switch (reg_entry->token) {
2909 case DCI_LOCAL_PROC:
2910 new_entry->num_buffers = NUM_DCI_PERIPHERALS;
2911 break;
2912 case DCI_MDM_PROC:
2913 new_entry->num_buffers = 1;
2914 break;
2915 }
Manoj Prabhu Bdab4c6c2017-06-14 16:55:59 +05302916
2917 new_entry->buffers = NULL;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07002918 new_entry->real_time = MODE_REALTIME;
2919 new_entry->in_service = 0;
2920 INIT_LIST_HEAD(&new_entry->list_write_buf);
2921 mutex_init(&new_entry->write_buf_mutex);
2922 new_entry->dci_log_mask = kzalloc(DCI_LOG_MASK_SIZE, GFP_KERNEL);
2923 if (!new_entry->dci_log_mask) {
2924 pr_err("diag: Unable to create log mask for client, %d",
2925 driver->dci_client_id);
2926 goto fail_alloc;
2927 }
2928 create_dci_log_mask_tbl(new_entry->dci_log_mask, DCI_LOG_MASK_CLEAN);
2929
2930 new_entry->dci_event_mask = kzalloc(DCI_EVENT_MASK_SIZE, GFP_KERNEL);
2931 if (!new_entry->dci_event_mask)
2932 goto fail_alloc;
2933 create_dci_event_mask_tbl(new_entry->dci_event_mask);
2934
2935 new_entry->buffers = kzalloc(new_entry->num_buffers *
2936 sizeof(struct diag_dci_buf_peripheral_t),
2937 GFP_KERNEL);
2938 if (!new_entry->buffers) {
2939 pr_err("diag: Unable to allocate buffers for peripherals in %s\n",
2940 __func__);
2941 goto fail_alloc;
2942 }
2943
2944 for (i = 0; i < new_entry->num_buffers; i++) {
2945 proc_buf = &new_entry->buffers[i];
2946 if (!proc_buf)
2947 goto fail_alloc;
2948
2949 mutex_init(&proc_buf->health_mutex);
2950 mutex_init(&proc_buf->buf_mutex);
2951 proc_buf->health.dropped_events = 0;
2952 proc_buf->health.dropped_logs = 0;
2953 proc_buf->health.received_events = 0;
2954 proc_buf->health.received_logs = 0;
2955 proc_buf->buf_primary = kzalloc(
2956 sizeof(struct diag_dci_buffer_t),
2957 GFP_KERNEL);
2958 if (!proc_buf->buf_primary)
2959 goto fail_alloc;
2960 proc_buf->buf_cmd = kzalloc(sizeof(struct diag_dci_buffer_t),
2961 GFP_KERNEL);
2962 if (!proc_buf->buf_cmd)
2963 goto fail_alloc;
2964 err = diag_dci_init_buffer(proc_buf->buf_primary,
2965 DCI_BUF_PRIMARY);
2966 if (err)
2967 goto fail_alloc;
2968 err = diag_dci_init_buffer(proc_buf->buf_cmd, DCI_BUF_CMD);
2969 if (err)
2970 goto fail_alloc;
2971 proc_buf->buf_curr = proc_buf->buf_primary;
2972 }
2973
2974 list_add_tail(&new_entry->track, &driver->dci_client_list);
2975 driver->dci_client_id++;
2976 new_entry->client_info.client_id = driver->dci_client_id;
2977 reg_entry->client_id = driver->dci_client_id;
2978 driver->num_dci_client++;
2979 if (driver->num_dci_client == 1)
2980 diag_update_proc_vote(DIAG_PROC_DCI, VOTE_UP, reg_entry->token);
2981 queue_work(driver->diag_real_time_wq, &driver->diag_real_time_work);
2982 mutex_unlock(&driver->dci_mutex);
2983
2984 return driver->dci_client_id;
2985
2986fail_alloc:
2987 if (new_entry) {
Manoj Prabhu Bdab4c6c2017-06-14 16:55:59 +05302988 for (i = 0; ((i < new_entry->num_buffers) &&
2989 new_entry->buffers); i++) {
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07002990 proc_buf = &new_entry->buffers[i];
2991 if (proc_buf) {
2992 mutex_destroy(&proc_buf->health_mutex);
2993 if (proc_buf->buf_primary) {
2994 kfree(proc_buf->buf_primary->data);
Manoj Prabhu B34e54702016-12-21 13:02:38 +05302995 proc_buf->buf_primary->data = NULL;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07002996 mutex_destroy(
2997 &proc_buf->buf_primary->data_mutex);
2998 }
2999 kfree(proc_buf->buf_primary);
Manoj Prabhu B34e54702016-12-21 13:02:38 +05303000 proc_buf->buf_primary = NULL;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07003001 if (proc_buf->buf_cmd) {
3002 kfree(proc_buf->buf_cmd->data);
Manoj Prabhu B34e54702016-12-21 13:02:38 +05303003 proc_buf->buf_cmd->data = NULL;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07003004 mutex_destroy(
3005 &proc_buf->buf_cmd->data_mutex);
3006 }
3007 kfree(proc_buf->buf_cmd);
Manoj Prabhu B34e54702016-12-21 13:02:38 +05303008 proc_buf->buf_cmd = NULL;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07003009 }
3010 }
3011 kfree(new_entry->dci_event_mask);
Manoj Prabhu B34e54702016-12-21 13:02:38 +05303012 new_entry->dci_event_mask = NULL;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07003013 kfree(new_entry->dci_log_mask);
Manoj Prabhu B34e54702016-12-21 13:02:38 +05303014 new_entry->dci_log_mask = NULL;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07003015 kfree(new_entry->buffers);
Manoj Prabhu B34e54702016-12-21 13:02:38 +05303016 new_entry->buffers = NULL;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07003017 kfree(new_entry);
Manoj Prabhu B34e54702016-12-21 13:02:38 +05303018 new_entry = NULL;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07003019 }
3020 mutex_unlock(&driver->dci_mutex);
3021 return DIAG_DCI_NO_REG;
3022}
3023
3024int diag_dci_deinit_client(struct diag_dci_client_tbl *entry)
3025{
3026 int ret = DIAG_DCI_NO_ERROR, real_time = MODE_REALTIME, i, peripheral;
3027 struct diag_dci_buf_peripheral_t *proc_buf = NULL;
3028 struct diag_dci_buffer_t *buf_entry, *temp;
3029 struct list_head *start, *req_temp;
3030 struct dci_pkt_req_entry_t *req_entry = NULL;
3031 int token = DCI_LOCAL_PROC;
3032
3033 if (!entry)
3034 return DIAG_DCI_NOT_SUPPORTED;
3035
3036 token = entry->client_info.token;
3037 /*
3038 * Remove the entry from the list before freeing the buffers
3039 * to ensure that we don't have any invalid access.
3040 */
3041 if (!list_empty(&entry->track))
3042 list_del(&entry->track);
3043 driver->num_dci_client--;
3044 /*
3045 * Clear the client's log and event masks, update the cumulative
3046 * masks and send the masks to peripherals
3047 */
3048 kfree(entry->dci_log_mask);
Manoj Prabhu B34e54702016-12-21 13:02:38 +05303049 entry->dci_log_mask = NULL;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07003050 diag_dci_invalidate_cumulative_log_mask(token);
3051 if (token == DCI_LOCAL_PROC)
3052 diag_update_userspace_clients(DCI_LOG_MASKS_TYPE);
3053 ret = dci_ops_tbl[token].send_log_mask(token);
3054 if (ret != DIAG_DCI_NO_ERROR)
3055 return ret;
3056 kfree(entry->dci_event_mask);
Manoj Prabhu B34e54702016-12-21 13:02:38 +05303057 entry->dci_event_mask = NULL;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07003058 diag_dci_invalidate_cumulative_event_mask(token);
3059 if (token == DCI_LOCAL_PROC)
3060 diag_update_userspace_clients(DCI_EVENT_MASKS_TYPE);
3061 ret = dci_ops_tbl[token].send_event_mask(token);
3062 if (ret != DIAG_DCI_NO_ERROR)
3063 return ret;
3064
3065 list_for_each_safe(start, req_temp, &driver->dci_req_list) {
3066 req_entry = list_entry(start, struct dci_pkt_req_entry_t,
3067 track);
3068 if (req_entry->client_id == entry->client_info.client_id) {
3069 if (!list_empty(&req_entry->track))
3070 list_del(&req_entry->track);
3071 kfree(req_entry);
Manoj Prabhu B34e54702016-12-21 13:02:38 +05303072 req_entry = NULL;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07003073 }
3074 }
3075
3076 /* Clean up any buffer that is pending write */
3077 mutex_lock(&entry->write_buf_mutex);
3078 list_for_each_entry_safe(buf_entry, temp, &entry->list_write_buf,
3079 buf_track) {
3080 if (!list_empty(&buf_entry->buf_track))
3081 list_del(&buf_entry->buf_track);
3082 if (buf_entry->buf_type == DCI_BUF_SECONDARY) {
3083 mutex_lock(&buf_entry->data_mutex);
3084 diagmem_free(driver, buf_entry->data, POOL_TYPE_DCI);
3085 buf_entry->data = NULL;
3086 mutex_unlock(&buf_entry->data_mutex);
3087 kfree(buf_entry);
Manoj Prabhu B34e54702016-12-21 13:02:38 +05303088 buf_entry = NULL;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07003089 } else if (buf_entry->buf_type == DCI_BUF_CMD) {
3090 peripheral = buf_entry->data_source;
3091 if (peripheral == APPS_DATA)
3092 continue;
3093 }
3094 /*
3095 * These are buffers that can't be written to the client which
3096 * means that the copy cannot be completed. Make sure that we
3097 * remove those references in DCI wakeup source.
3098 */
3099 diag_ws_on_copy_fail(DIAG_WS_DCI);
3100 }
3101 mutex_unlock(&entry->write_buf_mutex);
3102
3103 for (i = 0; i < entry->num_buffers; i++) {
3104 proc_buf = &entry->buffers[i];
3105 buf_entry = proc_buf->buf_curr;
3106 mutex_lock(&proc_buf->buf_mutex);
3107 /* Clean up secondary buffer from mempool that is active */
3108 if (buf_entry && buf_entry->buf_type == DCI_BUF_SECONDARY) {
3109 mutex_lock(&buf_entry->data_mutex);
3110 diagmem_free(driver, buf_entry->data, POOL_TYPE_DCI);
3111 buf_entry->data = NULL;
3112 mutex_unlock(&buf_entry->data_mutex);
3113 mutex_destroy(&buf_entry->data_mutex);
3114 kfree(buf_entry);
Manoj Prabhu B34e54702016-12-21 13:02:38 +05303115 buf_entry = NULL;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07003116 }
3117
3118 mutex_lock(&proc_buf->buf_primary->data_mutex);
3119 kfree(proc_buf->buf_primary->data);
Manoj Prabhu B34e54702016-12-21 13:02:38 +05303120 proc_buf->buf_primary->data = NULL;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07003121 mutex_unlock(&proc_buf->buf_primary->data_mutex);
3122
3123 mutex_lock(&proc_buf->buf_cmd->data_mutex);
3124 kfree(proc_buf->buf_cmd->data);
Manoj Prabhu B34e54702016-12-21 13:02:38 +05303125 proc_buf->buf_cmd->data = NULL;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07003126 mutex_unlock(&proc_buf->buf_cmd->data_mutex);
3127
3128 mutex_destroy(&proc_buf->health_mutex);
3129 mutex_destroy(&proc_buf->buf_primary->data_mutex);
3130 mutex_destroy(&proc_buf->buf_cmd->data_mutex);
3131
3132 kfree(proc_buf->buf_primary);
Manoj Prabhu B34e54702016-12-21 13:02:38 +05303133 proc_buf->buf_primary = NULL;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07003134 kfree(proc_buf->buf_cmd);
Manoj Prabhu B34e54702016-12-21 13:02:38 +05303135 proc_buf->buf_cmd = NULL;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07003136 mutex_unlock(&proc_buf->buf_mutex);
3137 }
3138 mutex_destroy(&entry->write_buf_mutex);
3139
3140 kfree(entry->buffers);
Manoj Prabhu B34e54702016-12-21 13:02:38 +05303141 entry->buffers = NULL;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07003142 kfree(entry);
Manoj Prabhu B34e54702016-12-21 13:02:38 +05303143 entry = NULL;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07003144
3145 if (driver->num_dci_client == 0) {
3146 diag_update_proc_vote(DIAG_PROC_DCI, VOTE_DOWN, token);
3147 } else {
3148 real_time = diag_dci_get_cumulative_real_time(token);
3149 diag_update_real_time_vote(DIAG_PROC_DCI, real_time, token);
3150 }
3151 queue_work(driver->diag_real_time_wq, &driver->diag_real_time_work);
3152
3153 return DIAG_DCI_NO_ERROR;
3154}
3155
3156int diag_dci_write_proc(uint8_t peripheral, int pkt_type, char *buf, int len)
3157{
3158 uint8_t dest_channel = TYPE_DATA;
3159 int err = 0;
3160
3161 if (!buf || peripheral >= NUM_PERIPHERALS || len < 0 ||
3162 !(driver->feature[PERIPHERAL_MODEM].rcvd_feature_mask)) {
3163 DIAG_LOG(DIAG_DEBUG_DCI,
3164 "buf: 0x%pK, p: %d, len: %d, f_mask: %d\n",
3165 buf, peripheral, len,
3166 driver->feature[PERIPHERAL_MODEM].rcvd_feature_mask);
3167 return -EINVAL;
3168 }
3169
3170 if (pkt_type == DIAG_DATA_TYPE) {
3171 dest_channel = TYPE_DCI_CMD;
3172 } else if (pkt_type == DIAG_CNTL_TYPE) {
3173 dest_channel = TYPE_CNTL;
3174 } else {
3175 pr_err("diag: Invalid DCI pkt type in %s", __func__);
3176 return -EINVAL;
3177 }
3178
3179 err = diagfwd_write(peripheral, dest_channel, buf, len);
3180 if (err && err != -ENODEV) {
3181 pr_err("diag: In %s, unable to write to peripheral: %d, type: %d, len: %d, err: %d\n",
3182 __func__, peripheral, dest_channel, len, err);
3183 } else {
3184 err = DIAG_DCI_NO_ERROR;
3185 }
3186
3187 return err;
3188}
3189
3190int diag_dci_copy_health_stats(struct diag_dci_health_stats_proc *stats_proc)
3191{
3192 struct diag_dci_client_tbl *entry = NULL;
3193 struct diag_dci_health_t *health = NULL;
3194 struct diag_dci_health_stats *stats = NULL;
3195 int i, proc;
3196
3197 if (!stats_proc)
3198 return -EINVAL;
3199
3200 stats = &stats_proc->health;
3201 proc = stats_proc->proc;
3202 if (proc < ALL_PROC || proc > APPS_DATA)
3203 return -EINVAL;
3204
3205 entry = diag_dci_get_client_entry(stats_proc->client_id);
3206 if (!entry)
3207 return DIAG_DCI_NOT_SUPPORTED;
3208
3209 /*
3210 * If the client has registered for remote processor, the
3211 * proc field doesn't have any effect as they have only one buffer.
3212 */
3213 if (entry->client_info.token)
3214 proc = 0;
3215
3216 stats->stats.dropped_logs = 0;
3217 stats->stats.dropped_events = 0;
3218 stats->stats.received_logs = 0;
3219 stats->stats.received_events = 0;
3220
3221 if (proc != ALL_PROC) {
3222 health = &entry->buffers[proc].health;
3223 stats->stats.dropped_logs = health->dropped_logs;
3224 stats->stats.dropped_events = health->dropped_events;
3225 stats->stats.received_logs = health->received_logs;
3226 stats->stats.received_events = health->received_events;
3227 if (stats->reset_status) {
3228 mutex_lock(&entry->buffers[proc].health_mutex);
3229 health->dropped_logs = 0;
3230 health->dropped_events = 0;
3231 health->received_logs = 0;
3232 health->received_events = 0;
3233 mutex_unlock(&entry->buffers[proc].health_mutex);
3234 }
3235 return DIAG_DCI_NO_ERROR;
3236 }
3237
3238 for (i = 0; i < entry->num_buffers; i++) {
3239 health = &entry->buffers[i].health;
3240 stats->stats.dropped_logs += health->dropped_logs;
3241 stats->stats.dropped_events += health->dropped_events;
3242 stats->stats.received_logs += health->received_logs;
3243 stats->stats.received_events += health->received_events;
3244 if (stats->reset_status) {
3245 mutex_lock(&entry->buffers[i].health_mutex);
3246 health->dropped_logs = 0;
3247 health->dropped_events = 0;
3248 health->received_logs = 0;
3249 health->received_events = 0;
3250 mutex_unlock(&entry->buffers[i].health_mutex);
3251 }
3252 }
3253 return DIAG_DCI_NO_ERROR;
3254}
3255
3256int diag_dci_get_support_list(struct diag_dci_peripherals_t *support_list)
3257{
3258 if (!support_list)
3259 return -ENOMEM;
3260
3261 if (!VALID_DCI_TOKEN(support_list->proc))
3262 return -EIO;
3263
3264 support_list->list = dci_ops_tbl[support_list->proc].peripheral_status;
3265 return DIAG_DCI_NO_ERROR;
3266}