blob: 0937b2f14d77cca52b67172fb5e97bd9c16e5986 [file] [log] [blame]
Sreelakshmi Gownipalli48a1d182018-01-29 13:17:13 -08001/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/slab.h>
14#include <linux/init.h>
15#include <linux/uaccess.h>
16#include <linux/diagchar.h>
17#include <linux/sched.h>
18#include <linux/err.h>
19#include <linux/delay.h>
20#include <linux/workqueue.h>
21#include <linux/pm_runtime.h>
22#include <linux/platform_device.h>
23#include <linux/pm_wakeup.h>
24#include <linux/spinlock.h>
25#include <linux/ratelimit.h>
26#include <linux/reboot.h>
27#include <asm/current.h>
28#include <soc/qcom/restart.h>
Hardik Arya2a665fc2018-03-08 11:50:49 +053029#include <linux/vmalloc.h>
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -070030#ifdef CONFIG_DIAG_OVER_USB
31#include <linux/usb/usbdiag.h>
32#endif
33#include "diagchar_hdlc.h"
34#include "diagmem.h"
35#include "diagchar.h"
36#include "diagfwd.h"
37#include "diagfwd_cntl.h"
38#include "diag_dci.h"
39#include "diag_masks.h"
40#include "diagfwd_bridge.h"
41#include "diagfwd_peripheral.h"
42#include "diag_ipc_logging.h"
43
44static struct timer_list dci_drain_timer;
45static int dci_timer_in_progress;
46static struct work_struct dci_data_drain_work;
47
48struct diag_dci_partial_pkt_t partial_pkt;
49
50unsigned int dci_max_reg = 100;
51unsigned int dci_max_clients = 10;
52struct mutex dci_log_mask_mutex;
53struct mutex dci_event_mask_mutex;
54
55/*
56 * DCI_HANDSHAKE_RETRY_TIME: Time to wait (in microseconds) before checking the
57 * connection status again.
58 *
59 * DCI_HANDSHAKE_WAIT_TIME: Timeout (in milliseconds) to check for dci
60 * connection status
61 */
62#define DCI_HANDSHAKE_RETRY_TIME 500000
63#define DCI_HANDSHAKE_WAIT_TIME 200
64
65spinlock_t ws_lock;
66unsigned long ws_lock_flags;
67
68struct dci_ops_tbl_t dci_ops_tbl[NUM_DCI_PROC] = {
69 {
70 .ctx = 0,
71 .send_log_mask = diag_send_dci_log_mask,
72 .send_event_mask = diag_send_dci_event_mask,
73 .peripheral_status = 0,
74 .mempool = 0,
75 },
76#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
77 {
78 .ctx = DIAGFWD_MDM_DCI,
79 .send_log_mask = diag_send_dci_log_mask_remote,
80 .send_event_mask = diag_send_dci_event_mask_remote,
81 .peripheral_status = 0,
82 .mempool = POOL_TYPE_MDM_DCI_WRITE,
83 }
84#endif
85};
86
87struct dci_channel_status_t dci_channel_status[NUM_DCI_PROC] = {
88 {
89 .id = 0,
90 .open = 0,
91 .retry_count = 0
92 },
93#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
94 {
95 .id = DIAGFWD_MDM_DCI,
96 .open = 0,
97 .retry_count = 0
98 }
99#endif
100};
101
102/* Number of milliseconds anticipated to process the DCI data */
103#define DCI_WAKEUP_TIMEOUT 1
104
105#define DCI_CAN_ADD_BUF_TO_LIST(buf) \
106 (buf && buf->data && !buf->in_busy && buf->data_len > 0) \
107
108#ifdef CONFIG_DEBUG_FS
109struct diag_dci_data_info *dci_traffic;
110struct mutex dci_stat_mutex;
111void diag_dci_record_traffic(int read_bytes, uint8_t ch_type,
112 uint8_t peripheral, uint8_t proc)
113{
114 static int curr_dci_data;
115 static unsigned long iteration;
116 struct diag_dci_data_info *temp_data = dci_traffic;
117
118 if (!temp_data)
119 return;
120 mutex_lock(&dci_stat_mutex);
121 if (curr_dci_data == DIAG_DCI_DEBUG_CNT)
122 curr_dci_data = 0;
123 temp_data += curr_dci_data;
124 temp_data->iteration = iteration + 1;
125 temp_data->data_size = read_bytes;
126 temp_data->peripheral = peripheral;
127 temp_data->ch_type = ch_type;
128 temp_data->proc = proc;
129 diag_get_timestamp(temp_data->time_stamp);
130 curr_dci_data++;
131 iteration++;
132 mutex_unlock(&dci_stat_mutex);
133}
134#else
135void diag_dci_record_traffic(int read_bytes, uint8_t ch_type,
136 uint8_t peripheral, uint8_t proc) { }
137#endif
Gopikrishna Mogasatic20aa252017-02-09 15:04:32 +0530138
139static int check_peripheral_dci_support(int peripheral_id, int dci_proc_id)
140{
141 int dci_peripheral_list = 0;
142
143 if (dci_proc_id < 0 || dci_proc_id >= NUM_DCI_PROC) {
144 pr_err("diag:In %s,not a supported DCI proc id\n", __func__);
145 return 0;
146 }
147 if (peripheral_id < 0 || peripheral_id >= NUM_PERIPHERALS) {
148 pr_err("diag:In %s,not a valid peripheral id\n", __func__);
149 return 0;
150 }
151 dci_peripheral_list = dci_ops_tbl[dci_proc_id].peripheral_status;
152
153 if (dci_peripheral_list <= 0 || dci_peripheral_list > DIAG_CON_ALL) {
154 pr_err("diag:In %s,not a valid dci peripheral mask\n",
155 __func__);
156 return 0;
157 }
158 /* Remove APSS bit mask information */
159 dci_peripheral_list = dci_peripheral_list >> 1;
160
161 if ((1 << peripheral_id) & (dci_peripheral_list))
162 return 1;
163 else
164 return 0;
165}
166
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -0700167static void create_dci_log_mask_tbl(unsigned char *mask, uint8_t dirty)
168{
169 unsigned char *temp = mask;
170 uint8_t i;
171
172 if (!mask)
173 return;
174
175 /* create hard coded table for log mask with 16 categories */
176 for (i = 0; i < DCI_MAX_LOG_CODES; i++) {
177 *temp = i;
178 temp++;
179 *temp = dirty ? 1 : 0;
180 temp++;
181 memset(temp, 0, DCI_MAX_ITEMS_PER_LOG_CODE);
182 temp += DCI_MAX_ITEMS_PER_LOG_CODE;
183 }
184}
185
186static void create_dci_event_mask_tbl(unsigned char *tbl_buf)
187{
188 if (tbl_buf)
189 memset(tbl_buf, 0, DCI_EVENT_MASK_SIZE);
190}
191
192void dci_drain_data(unsigned long data)
193{
194 queue_work(driver->diag_dci_wq, &dci_data_drain_work);
195}
196
197static void dci_check_drain_timer(void)
198{
199 if (!dci_timer_in_progress) {
200 dci_timer_in_progress = 1;
201 mod_timer(&dci_drain_timer, jiffies + msecs_to_jiffies(200));
202 }
203}
204
205#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
206static void dci_handshake_work_fn(struct work_struct *work)
207{
208 int err = 0;
209 int max_retries = 5;
210
211 struct dci_channel_status_t *status = container_of(work,
212 struct dci_channel_status_t,
213 handshake_work);
214
215 if (status->open) {
216 pr_debug("diag: In %s, remote dci channel is open, index: %d\n",
217 __func__, status->id);
218 return;
219 }
220
221 if (status->retry_count == max_retries) {
222 status->retry_count = 0;
223 pr_info("diag: dci channel connection handshake timed out, id: %d\n",
224 status->id);
225 err = diagfwd_bridge_close(TOKEN_TO_BRIDGE(status->id));
226 if (err) {
227 pr_err("diag: In %s, unable to close dci channel id: %d, err: %d\n",
228 __func__, status->id, err);
229 }
230 return;
231 }
232 status->retry_count++;
233 /*
234 * Sleep for sometime to check for the connection status again. The
235 * value should be optimum to include a roundabout time for a small
236 * packet to the remote processor.
237 */
238 usleep_range(DCI_HANDSHAKE_RETRY_TIME, DCI_HANDSHAKE_RETRY_TIME + 100);
239 mod_timer(&status->wait_time,
240 jiffies + msecs_to_jiffies(DCI_HANDSHAKE_WAIT_TIME));
241}
242
243static void dci_chk_handshake(unsigned long data)
244{
245 int index = (int)data;
246
247 if (index < 0 || index >= NUM_DCI_PROC)
248 return;
249
250 queue_work(driver->diag_dci_wq,
251 &dci_channel_status[index].handshake_work);
252}
253#endif
254
255static int diag_dci_init_buffer(struct diag_dci_buffer_t *buffer, int type)
256{
257 if (!buffer || buffer->data)
258 return -EINVAL;
259
260 switch (type) {
261 case DCI_BUF_PRIMARY:
262 buffer->capacity = IN_BUF_SIZE;
Hardik Arya2a665fc2018-03-08 11:50:49 +0530263 buffer->data = vzalloc(buffer->capacity);
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -0700264 if (!buffer->data)
265 return -ENOMEM;
266 break;
267 case DCI_BUF_SECONDARY:
268 buffer->data = NULL;
269 buffer->capacity = IN_BUF_SIZE;
270 break;
271 case DCI_BUF_CMD:
272 buffer->capacity = DIAG_MAX_REQ_SIZE + DCI_BUF_SIZE;
Hardik Arya2a665fc2018-03-08 11:50:49 +0530273 buffer->data = vzalloc(buffer->capacity);
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -0700274 if (!buffer->data)
275 return -ENOMEM;
276 break;
277 default:
278 pr_err("diag: In %s, unknown type %d", __func__, type);
279 return -EINVAL;
280 }
281
282 buffer->data_len = 0;
283 buffer->in_busy = 0;
284 buffer->buf_type = type;
285 mutex_init(&buffer->data_mutex);
286
287 return 0;
288}
289
290static inline int diag_dci_check_buffer(struct diag_dci_buffer_t *buf, int len)
291{
292 if (!buf)
293 return -EINVAL;
294
295 /* Return 1 if the buffer is not busy and can hold new data */
296 if ((buf->data_len + len < buf->capacity) && !buf->in_busy)
297 return 1;
298
299 return 0;
300}
301
302static void dci_add_buffer_to_list(struct diag_dci_client_tbl *client,
303 struct diag_dci_buffer_t *buf)
304{
305 if (!buf || !client || !buf->data)
306 return;
307
308 if (buf->in_list || buf->data_len == 0)
309 return;
310
311 mutex_lock(&client->write_buf_mutex);
312 list_add_tail(&buf->buf_track, &client->list_write_buf);
313 /*
314 * In the case of DCI, there can be multiple packets in one read. To
315 * calculate the wakeup source reference count, we must account for each
316 * packet in a single read.
317 */
318 diag_ws_on_read(DIAG_WS_DCI, buf->data_len);
319 mutex_lock(&buf->data_mutex);
320 buf->in_busy = 1;
321 buf->in_list = 1;
322 mutex_unlock(&buf->data_mutex);
323 mutex_unlock(&client->write_buf_mutex);
324}
325
326static int diag_dci_get_buffer(struct diag_dci_client_tbl *client,
327 int data_source, int len)
328{
329 struct diag_dci_buffer_t *buf_primary = NULL;
330 struct diag_dci_buffer_t *buf_temp = NULL;
331 struct diag_dci_buffer_t *curr = NULL;
332
333 if (!client)
334 return -EINVAL;
335 if (len < 0 || len > IN_BUF_SIZE)
336 return -EINVAL;
337
338 curr = client->buffers[data_source].buf_curr;
339 buf_primary = client->buffers[data_source].buf_primary;
340
341 if (curr && diag_dci_check_buffer(curr, len) == 1)
342 return 0;
343
344 dci_add_buffer_to_list(client, curr);
345 client->buffers[data_source].buf_curr = NULL;
346
347 if (diag_dci_check_buffer(buf_primary, len) == 1) {
348 client->buffers[data_source].buf_curr = buf_primary;
349 return 0;
350 }
351
352 buf_temp = kzalloc(sizeof(struct diag_dci_buffer_t), GFP_KERNEL);
353 if (!buf_temp)
354 return -EIO;
355
356 if (!diag_dci_init_buffer(buf_temp, DCI_BUF_SECONDARY)) {
357 buf_temp->data = diagmem_alloc(driver, IN_BUF_SIZE,
358 POOL_TYPE_DCI);
359 if (!buf_temp->data) {
360 kfree(buf_temp);
361 buf_temp = NULL;
362 return -ENOMEM;
363 }
364 client->buffers[data_source].buf_curr = buf_temp;
365 return 0;
366 }
367
368 kfree(buf_temp);
369 buf_temp = NULL;
370 return -EIO;
371}
372
373void diag_dci_wakeup_clients(void)
374{
375 struct list_head *start, *temp;
376 struct diag_dci_client_tbl *entry = NULL;
377
378 mutex_lock(&driver->dci_mutex);
379 list_for_each_safe(start, temp, &driver->dci_client_list) {
380 entry = list_entry(start, struct diag_dci_client_tbl, track);
381
382 /*
383 * Don't wake up the client when there is no pending buffer to
384 * write or when it is writing to user space
385 */
386 if (!list_empty(&entry->list_write_buf) && !entry->in_service) {
387 mutex_lock(&entry->write_buf_mutex);
388 entry->in_service = 1;
389 mutex_unlock(&entry->write_buf_mutex);
390 diag_update_sleeping_process(entry->client->tgid,
391 DCI_DATA_TYPE);
392 }
393 }
394 mutex_unlock(&driver->dci_mutex);
395}
396
397void dci_data_drain_work_fn(struct work_struct *work)
398{
399 int i;
400 struct list_head *start, *temp;
401 struct diag_dci_client_tbl *entry = NULL;
402 struct diag_dci_buf_peripheral_t *proc_buf = NULL;
403 struct diag_dci_buffer_t *buf_temp = NULL;
404
405 mutex_lock(&driver->dci_mutex);
406 list_for_each_safe(start, temp, &driver->dci_client_list) {
407 entry = list_entry(start, struct diag_dci_client_tbl, track);
408 for (i = 0; i < entry->num_buffers; i++) {
409 proc_buf = &entry->buffers[i];
410
411 mutex_lock(&proc_buf->buf_mutex);
412 buf_temp = proc_buf->buf_primary;
413 if (DCI_CAN_ADD_BUF_TO_LIST(buf_temp))
414 dci_add_buffer_to_list(entry, buf_temp);
415
416 buf_temp = proc_buf->buf_cmd;
417 if (DCI_CAN_ADD_BUF_TO_LIST(buf_temp))
418 dci_add_buffer_to_list(entry, buf_temp);
419
420 buf_temp = proc_buf->buf_curr;
421 if (DCI_CAN_ADD_BUF_TO_LIST(buf_temp)) {
422 dci_add_buffer_to_list(entry, buf_temp);
423 proc_buf->buf_curr = NULL;
424 }
425 mutex_unlock(&proc_buf->buf_mutex);
426 }
427 if (!list_empty(&entry->list_write_buf) && !entry->in_service) {
428 mutex_lock(&entry->write_buf_mutex);
429 entry->in_service = 1;
430 mutex_unlock(&entry->write_buf_mutex);
431 diag_update_sleeping_process(entry->client->tgid,
432 DCI_DATA_TYPE);
433 }
434 }
435 mutex_unlock(&driver->dci_mutex);
436 dci_timer_in_progress = 0;
437}
438
439static int diag_process_single_dci_pkt(unsigned char *buf, int len,
440 int data_source, int token)
441{
442 uint8_t cmd_code = 0;
443
444 if (!buf || len < 0) {
445 pr_err("diag: Invalid input in %s, buf: %pK, len: %d\n",
446 __func__, buf, len);
447 return -EIO;
448 }
449
450 cmd_code = *(uint8_t *)buf;
451
452 switch (cmd_code) {
453 case LOG_CMD_CODE:
454 extract_dci_log(buf, len, data_source, token, NULL);
455 break;
456 case EVENT_CMD_CODE:
457 extract_dci_events(buf, len, data_source, token, NULL);
458 break;
459 case EXT_HDR_CMD_CODE:
460 extract_dci_ext_pkt(buf, len, data_source, token);
461 break;
462 case DCI_PKT_RSP_CODE:
463 case DCI_DELAYED_RSP_CODE:
464 extract_dci_pkt_rsp(buf, len, data_source, token);
465 break;
466 case DCI_CONTROL_PKT_CODE:
467 extract_dci_ctrl_pkt(buf, len, token);
468 break;
469 default:
470 pr_err("diag: Unable to process single DCI packet, cmd_code: %d, data_source: %d",
471 cmd_code, data_source);
472 return -EINVAL;
473 }
474
475 return 0;
476}
477
478/* Process the data read from apps userspace client */
479void diag_process_apps_dci_read_data(int data_type, void *buf, int recd_bytes)
480{
481 int err = 0;
482
483 if (!buf) {
484 pr_err_ratelimited("diag: In %s, Null buf pointer\n", __func__);
485 return;
486 }
487
488 if (data_type != DATA_TYPE_DCI_LOG && data_type != DATA_TYPE_DCI_EVENT
489 && data_type != DCI_PKT_TYPE) {
490 pr_err("diag: In %s, unsupported data_type: 0x%x\n",
491 __func__, (unsigned int)data_type);
492 return;
493 }
494
495 err = diag_process_single_dci_pkt(buf, recd_bytes, APPS_DATA,
496 DCI_LOCAL_PROC);
497 if (err)
498 return;
499
500 /* wake up all sleeping DCI clients which have some data */
501 diag_dci_wakeup_clients();
502 dci_check_drain_timer();
503}
504
505void diag_process_remote_dci_read_data(int index, void *buf, int recd_bytes)
506{
507 int read_bytes = 0, err = 0;
508 uint16_t dci_pkt_len;
509 struct diag_dci_header_t *header = NULL;
510 int header_len = sizeof(struct diag_dci_header_t);
511 int token = BRIDGE_TO_TOKEN(index);
512
513 if (!buf)
514 return;
515
516 diag_dci_record_traffic(recd_bytes, 0, 0, token);
517
518 if (!partial_pkt.processing)
519 goto start;
520
521 if (partial_pkt.remaining > recd_bytes) {
522 if ((partial_pkt.read_len + recd_bytes) >
523 (MAX_DCI_PACKET_SZ)) {
524 pr_err("diag: Invalid length %d, %d received in %s\n",
525 partial_pkt.read_len, recd_bytes, __func__);
526 goto end;
527 }
528 memcpy(partial_pkt.data + partial_pkt.read_len, buf,
529 recd_bytes);
530 read_bytes += recd_bytes;
531 buf += read_bytes;
532 partial_pkt.read_len += recd_bytes;
533 partial_pkt.remaining -= recd_bytes;
534 } else {
535 if ((partial_pkt.read_len + partial_pkt.remaining) >
536 (MAX_DCI_PACKET_SZ)) {
537 pr_err("diag: Invalid length during partial read %d, %d received in %s\n",
538 partial_pkt.read_len,
539 partial_pkt.remaining, __func__);
540 goto end;
541 }
542 memcpy(partial_pkt.data + partial_pkt.read_len, buf,
543 partial_pkt.remaining);
544 read_bytes += partial_pkt.remaining;
545 buf += read_bytes;
546 partial_pkt.read_len += partial_pkt.remaining;
547 partial_pkt.remaining = 0;
548 }
549
550 if (partial_pkt.remaining == 0) {
551 /*
552 * Retrieve from the DCI control packet after the header = start
553 * (1 byte) + version (1 byte) + length (2 bytes)
554 */
555 diag_process_single_dci_pkt(partial_pkt.data + 4,
556 partial_pkt.read_len - header_len,
557 DCI_REMOTE_DATA, token);
558 partial_pkt.read_len = 0;
559 partial_pkt.total_len = 0;
560 partial_pkt.processing = 0;
561 goto start;
562 }
563 goto end;
564
565start:
566 while (read_bytes < recd_bytes) {
567 header = (struct diag_dci_header_t *)buf;
568 dci_pkt_len = header->length;
569
570 if (header->cmd_code != DCI_CONTROL_PKT_CODE &&
571 driver->num_dci_client == 0) {
572 read_bytes += header_len + dci_pkt_len;
573 buf += header_len + dci_pkt_len;
574 continue;
575 }
576
577 if (dci_pkt_len + header_len > MAX_DCI_PACKET_SZ) {
578 pr_err("diag: Invalid length in the dci packet field %d\n",
579 dci_pkt_len);
580 break;
581 }
582
583 if ((dci_pkt_len + header_len) > (recd_bytes - read_bytes)) {
584 partial_pkt.read_len = recd_bytes - read_bytes;
585 partial_pkt.total_len = dci_pkt_len + header_len;
586 partial_pkt.remaining = partial_pkt.total_len -
587 partial_pkt.read_len;
588 partial_pkt.processing = 1;
589 memcpy(partial_pkt.data, buf, partial_pkt.read_len);
590 break;
591 }
592 /*
593 * Retrieve from the DCI control packet after the header = start
594 * (1 byte) + version (1 byte) + length (2 bytes)
595 */
596 err = diag_process_single_dci_pkt(buf + 4, dci_pkt_len,
597 DCI_REMOTE_DATA, DCI_MDM_PROC);
598 if (err)
599 break;
600 read_bytes += header_len + dci_pkt_len;
601 buf += header_len + dci_pkt_len; /* advance to next DCI pkt */
602 }
603end:
604 if (err)
605 return;
606 /* wake up all sleeping DCI clients which have some data */
607 diag_dci_wakeup_clients();
608 dci_check_drain_timer();
609}
610
611/* Process the data read from the peripheral dci channels */
612void diag_dci_process_peripheral_data(struct diagfwd_info *p_info, void *buf,
613 int recd_bytes)
614{
615 int read_bytes = 0, err = 0;
616 uint16_t dci_pkt_len;
617 struct diag_dci_pkt_header_t *header = NULL;
618 uint8_t recv_pkt_cmd_code;
619
620 if (!buf || !p_info)
621 return;
622
623 /*
624 * Release wakeup source when there are no more clients to
625 * process DCI data
626 */
627 if (driver->num_dci_client == 0) {
628 diag_ws_reset(DIAG_WS_DCI);
629 return;
630 }
631
632 diag_dci_record_traffic(recd_bytes, p_info->type, p_info->peripheral,
633 DCI_LOCAL_PROC);
634 while (read_bytes < recd_bytes) {
635 header = (struct diag_dci_pkt_header_t *)buf;
636 recv_pkt_cmd_code = header->pkt_code;
637 dci_pkt_len = header->len;
638
639 /*
640 * Check if the length of the current packet is lesser than the
641 * remaining bytes in the received buffer. This includes space
642 * for the Start byte (1), Version byte (1), length bytes (2)
643 * and End byte (1)
644 */
645 if ((dci_pkt_len + 5) > (recd_bytes - read_bytes)) {
646 pr_err("diag: Invalid length in %s, len: %d, dci_pkt_len: %d",
647 __func__, recd_bytes, dci_pkt_len);
648 diag_ws_release();
649 return;
650 }
651 /*
652 * Retrieve from the DCI control packet after the header = start
653 * (1 byte) + version (1 byte) + length (2 bytes)
654 */
655 err = diag_process_single_dci_pkt(buf + 4, dci_pkt_len,
656 (int)p_info->peripheral,
657 DCI_LOCAL_PROC);
658 if (err) {
659 diag_ws_release();
660 break;
661 }
662 read_bytes += 5 + dci_pkt_len;
663 buf += 5 + dci_pkt_len; /* advance to next DCI pkt */
664 }
665
666 if (err)
667 return;
668 /* wake up all sleeping DCI clients which have some data */
669 diag_dci_wakeup_clients();
670 dci_check_drain_timer();
671}
672
673int diag_dci_query_log_mask(struct diag_dci_client_tbl *entry,
674 uint16_t log_code)
675{
676 uint16_t item_num;
677 uint8_t equip_id, *log_mask_ptr, byte_mask;
678 int byte_index, offset;
679
680 if (!entry) {
681 pr_err("diag: In %s, invalid client entry\n", __func__);
682 return 0;
683 }
684
685 equip_id = LOG_GET_EQUIP_ID(log_code);
686 item_num = LOG_GET_ITEM_NUM(log_code);
687 byte_index = item_num/8 + 2;
688 byte_mask = 0x01 << (item_num % 8);
689 offset = equip_id * 514;
690
Hardik Arya4f2f2cb2018-04-06 15:10:36 +0530691 if (offset + byte_index >= DCI_LOG_MASK_SIZE) {
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -0700692 pr_err("diag: In %s, invalid offset: %d, log_code: %d, byte_index: %d\n",
693 __func__, offset, log_code, byte_index);
694 return 0;
695 }
696
697 log_mask_ptr = entry->dci_log_mask;
698 log_mask_ptr = log_mask_ptr + offset + byte_index;
699 return ((*log_mask_ptr & byte_mask) == byte_mask) ? 1 : 0;
700
701}
702
703int diag_dci_query_event_mask(struct diag_dci_client_tbl *entry,
704 uint16_t event_id)
705{
706 uint8_t *event_mask_ptr, byte_mask;
707 int byte_index, bit_index;
708
709 if (!entry) {
710 pr_err("diag: In %s, invalid client entry\n", __func__);
711 return 0;
712 }
713
714 byte_index = event_id/8;
715 bit_index = event_id % 8;
716 byte_mask = 0x1 << bit_index;
717
Hardik Arya4f2f2cb2018-04-06 15:10:36 +0530718 if (byte_index >= DCI_EVENT_MASK_SIZE) {
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -0700719 pr_err("diag: In %s, invalid, event_id: %d, byte_index: %d\n",
720 __func__, event_id, byte_index);
721 return 0;
722 }
723
724 event_mask_ptr = entry->dci_event_mask;
725 event_mask_ptr = event_mask_ptr + byte_index;
726 return ((*event_mask_ptr & byte_mask) == byte_mask) ? 1 : 0;
727}
728
729static int diag_dci_filter_commands(struct diag_pkt_header_t *header)
730{
731 if (!header)
732 return -ENOMEM;
733
734 switch (header->cmd_code) {
735 case 0x7d: /* Msg Mask Configuration */
736 case 0x73: /* Log Mask Configuration */
737 case 0x81: /* Event Mask Configuration */
738 case 0x82: /* Event Mask Change */
739 case 0x60: /* Event Mask Toggle */
740 return 1;
741 }
742
743 if (header->cmd_code == 0x4b && header->subsys_id == 0x12) {
744 switch (header->subsys_cmd_code) {
745 case 0x60: /* Extended Event Mask Config */
746 case 0x61: /* Extended Msg Mask Config */
747 case 0x62: /* Extended Log Mask Config */
748 case 0x20C: /* Set current Preset ID */
749 case 0x20D: /* Get current Preset ID */
750 case 0x218: /* HDLC Disabled Command */
751 return 1;
752 }
753 }
754
755 return 0;
756}
757
758static struct dci_pkt_req_entry_t *diag_register_dci_transaction(int uid,
759 int client_id)
760{
761 struct dci_pkt_req_entry_t *entry = NULL;
762
763 entry = kzalloc(sizeof(struct dci_pkt_req_entry_t), GFP_KERNEL);
764 if (!entry)
765 return NULL;
766
767 driver->dci_tag++;
768 entry->client_id = client_id;
769 entry->uid = uid;
770 entry->tag = driver->dci_tag;
771 pr_debug("diag: Registering DCI cmd req, client_id: %d, uid: %d, tag:%d\n",
772 entry->client_id, entry->uid, entry->tag);
773 list_add_tail(&entry->track, &driver->dci_req_list);
774
775 return entry;
776}
777
778static struct dci_pkt_req_entry_t *diag_dci_get_request_entry(int tag)
779{
780 struct list_head *start, *temp;
781 struct dci_pkt_req_entry_t *entry = NULL;
782
783 list_for_each_safe(start, temp, &driver->dci_req_list) {
784 entry = list_entry(start, struct dci_pkt_req_entry_t, track);
785 if (entry->tag == tag)
786 return entry;
787 }
788 return NULL;
789}
790
791static int diag_dci_remove_req_entry(unsigned char *buf, int len,
792 struct dci_pkt_req_entry_t *entry)
793{
794 uint16_t rsp_count = 0, delayed_rsp_id = 0;
795
796 if (!buf || len <= 0 || !entry) {
797 pr_err("diag: In %s, invalid input buf: %pK, len: %d, entry: %pK\n",
798 __func__, buf, len, entry);
799 return -EIO;
800 }
801
802 /* It is an immediate response, delete it from the table */
803 if (*buf != 0x80) {
804 list_del(&entry->track);
805 kfree(entry);
Manoj Prabhu B34e54702016-12-21 13:02:38 +0530806 entry = NULL;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -0700807 return 1;
808 }
809
810 /* It is a delayed response. Check if the length is valid */
811 if (len < MIN_DELAYED_RSP_LEN) {
812 pr_err("diag: Invalid delayed rsp packet length %d\n", len);
813 return -EINVAL;
814 }
815
816 /*
817 * If the delayed response id field (uint16_t at byte 8) is 0 then
818 * there is only one response and we can remove the request entry.
819 */
820 delayed_rsp_id = *(uint16_t *)(buf + 8);
821 if (delayed_rsp_id == 0) {
822 list_del(&entry->track);
823 kfree(entry);
Manoj Prabhu B34e54702016-12-21 13:02:38 +0530824 entry = NULL;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -0700825 return 1;
826 }
827
828 /*
829 * Check the response count field (uint16 at byte 10). The request
830 * entry can be deleted it it is the last response in the sequence.
831 * It is the last response in the sequence if the response count
832 * is 1 or if the signed bit gets dropped.
833 */
834 rsp_count = *(uint16_t *)(buf + 10);
835 if (rsp_count > 0 && rsp_count < 0x1000) {
836 list_del(&entry->track);
837 kfree(entry);
Manoj Prabhu B34e54702016-12-21 13:02:38 +0530838 entry = NULL;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -0700839 return 1;
840 }
841
842 return 0;
843}
844
845static void dci_process_ctrl_status(unsigned char *buf, int len, int token)
846{
847 struct diag_ctrl_dci_status *header = NULL;
848 unsigned char *temp = buf;
849 uint32_t read_len = 0;
850 uint8_t i;
851 int peripheral_mask, status;
852
853 if (!buf || (len < sizeof(struct diag_ctrl_dci_status))) {
854 pr_err("diag: In %s, invalid buf %pK or length: %d\n",
855 __func__, buf, len);
856 return;
857 }
858
859 if (!VALID_DCI_TOKEN(token)) {
860 pr_err("diag: In %s, invalid DCI token %d\n", __func__, token);
861 return;
862 }
863
864 header = (struct diag_ctrl_dci_status *)temp;
865 temp += sizeof(struct diag_ctrl_dci_status);
866 read_len += sizeof(struct diag_ctrl_dci_status);
867
868 for (i = 0; i < header->count; i++) {
Sreelakshmi Gownipalli48a1d182018-01-29 13:17:13 -0800869 if (read_len > (len - 2)) {
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -0700870 pr_err("diag: In %s, Invalid length len: %d\n",
871 __func__, len);
872 return;
873 }
874
875 switch (*(uint8_t *)temp) {
876 case PERIPHERAL_MODEM:
877 peripheral_mask = DIAG_CON_MPSS;
878 break;
879 case PERIPHERAL_LPASS:
880 peripheral_mask = DIAG_CON_LPASS;
881 break;
882 case PERIPHERAL_WCNSS:
883 peripheral_mask = DIAG_CON_WCNSS;
884 break;
885 case PERIPHERAL_SENSORS:
886 peripheral_mask = DIAG_CON_SENSORS;
887 break;
888 default:
889 pr_err("diag: In %s, unknown peripheral, peripheral: %d\n",
890 __func__, *(uint8_t *)temp);
891 return;
892 }
893 temp += sizeof(uint8_t);
894 read_len += sizeof(uint8_t);
895
896 status = (*(uint8_t *)temp) ? DIAG_STATUS_OPEN :
897 DIAG_STATUS_CLOSED;
898 temp += sizeof(uint8_t);
899 read_len += sizeof(uint8_t);
900 diag_dci_notify_client(peripheral_mask, status, token);
901 }
902}
903
904static void dci_process_ctrl_handshake_pkt(unsigned char *buf, int len,
905 int token)
906{
907 struct diag_ctrl_dci_handshake_pkt *header = NULL;
908 unsigned char *temp = buf;
909 int err = 0;
910
911 if (!buf || (len < sizeof(struct diag_ctrl_dci_handshake_pkt)))
912 return;
913
914 if (!VALID_DCI_TOKEN(token))
915 return;
916
917 header = (struct diag_ctrl_dci_handshake_pkt *)temp;
918 if (header->magic == DCI_MAGIC) {
919 dci_channel_status[token].open = 1;
920 err = dci_ops_tbl[token].send_log_mask(token);
921 if (err) {
922 pr_err("diag: In %s, unable to send log mask to token: %d, err: %d\n",
923 __func__, token, err);
924 }
925 err = dci_ops_tbl[token].send_event_mask(token);
926 if (err) {
927 pr_err("diag: In %s, unable to send event mask to token: %d, err: %d\n",
928 __func__, token, err);
929 }
930 }
931}
932
933void extract_dci_ctrl_pkt(unsigned char *buf, int len, int token)
934{
935 unsigned char *temp = buf;
936 uint32_t ctrl_pkt_id;
937
938 diag_ws_on_read(DIAG_WS_DCI, len);
939 if (!buf) {
940 pr_err("diag: Invalid buffer in %s\n", __func__);
941 goto err;
942 }
943
944 if (len < (sizeof(uint8_t) + sizeof(uint32_t))) {
945 pr_err("diag: In %s, invalid length %d\n", __func__, len);
946 goto err;
947 }
948
949 /* Skip the Control packet command code */
950 temp += sizeof(uint8_t);
951 len -= sizeof(uint8_t);
952 ctrl_pkt_id = *(uint32_t *)temp;
953 switch (ctrl_pkt_id) {
954 case DIAG_CTRL_MSG_DCI_CONNECTION_STATUS:
955 dci_process_ctrl_status(temp, len, token);
956 break;
957 case DIAG_CTRL_MSG_DCI_HANDSHAKE_PKT:
958 dci_process_ctrl_handshake_pkt(temp, len, token);
959 break;
960 default:
961 pr_debug("diag: In %s, unknown control pkt %d\n",
962 __func__, ctrl_pkt_id);
963 break;
964 }
965
966err:
967 /*
968 * DCI control packets are not consumed by the clients. Mimic client
969 * consumption by setting and clearing the wakeup source copy_count
970 * explicitly.
971 */
972 diag_ws_on_copy_fail(DIAG_WS_DCI);
973}
974
975void extract_dci_pkt_rsp(unsigned char *buf, int len, int data_source,
976 int token)
977{
978 int tag;
979 struct diag_dci_client_tbl *entry = NULL;
980 void *temp_buf = NULL;
981 uint8_t dci_cmd_code, cmd_code_len, delete_flag = 0;
982 uint32_t rsp_len = 0;
983 struct diag_dci_buffer_t *rsp_buf = NULL;
984 struct dci_pkt_req_entry_t *req_entry = NULL;
985 unsigned char *temp = buf;
986 int save_req_uid = 0;
987 struct diag_dci_pkt_rsp_header_t pkt_rsp_header;
988
989 if (!buf) {
990 pr_err("diag: Invalid pointer in %s\n", __func__);
991 return;
992 }
993 dci_cmd_code = *(uint8_t *)(temp);
994 if (dci_cmd_code == DCI_PKT_RSP_CODE) {
995 cmd_code_len = sizeof(uint8_t);
996 } else if (dci_cmd_code == DCI_DELAYED_RSP_CODE) {
997 cmd_code_len = sizeof(uint32_t);
998 } else {
999 pr_err("diag: In %s, invalid command code %d\n", __func__,
1000 dci_cmd_code);
1001 return;
1002 }
1003 temp += cmd_code_len;
1004 tag = *(int *)temp;
1005 temp += sizeof(int);
1006
1007 /*
1008 * The size of the response is (total length) - (length of the command
1009 * code, the tag (int)
1010 */
1011 rsp_len = len - (cmd_code_len + sizeof(int));
1012 if ((rsp_len == 0) || (rsp_len > (len - 5))) {
1013 pr_err("diag: Invalid length in %s, len: %d, rsp_len: %d",
1014 __func__, len, rsp_len);
1015 return;
1016 }
1017
1018 mutex_lock(&driver->dci_mutex);
1019 req_entry = diag_dci_get_request_entry(tag);
1020 if (!req_entry) {
1021 pr_err_ratelimited("diag: No matching client for DCI data\n");
1022 mutex_unlock(&driver->dci_mutex);
1023 return;
1024 }
1025
1026 entry = diag_dci_get_client_entry(req_entry->client_id);
1027 if (!entry) {
1028 pr_err("diag: In %s, couldn't find client entry, id:%d\n",
1029 __func__, req_entry->client_id);
1030 mutex_unlock(&driver->dci_mutex);
1031 return;
1032 }
1033
1034 save_req_uid = req_entry->uid;
1035 /* Remove the headers and send only the response to this function */
1036 delete_flag = diag_dci_remove_req_entry(temp, rsp_len, req_entry);
1037 if (delete_flag < 0) {
1038 mutex_unlock(&driver->dci_mutex);
1039 return;
1040 }
1041
1042 mutex_lock(&entry->buffers[data_source].buf_mutex);
1043 rsp_buf = entry->buffers[data_source].buf_cmd;
1044
1045 mutex_lock(&rsp_buf->data_mutex);
1046 /*
1047 * Check if we can fit the data in the rsp buffer. The total length of
1048 * the rsp is the rsp length (write_len) + DCI_PKT_RSP_TYPE header (int)
1049 * + field for length (int) + delete_flag (uint8_t)
1050 */
1051 if ((rsp_buf->data_len + 9 + rsp_len) > rsp_buf->capacity) {
1052 pr_alert("diag: create capacity for pkt rsp\n");
1053 rsp_buf->capacity += 9 + rsp_len;
1054 temp_buf = krealloc(rsp_buf->data, rsp_buf->capacity,
1055 GFP_KERNEL);
1056 if (!temp_buf) {
1057 pr_err("diag: DCI realloc failed\n");
1058 mutex_unlock(&rsp_buf->data_mutex);
1059 mutex_unlock(&entry->buffers[data_source].buf_mutex);
1060 mutex_unlock(&driver->dci_mutex);
1061 return;
1062 }
1063 rsp_buf->data = temp_buf;
1064 }
1065
1066 /* Fill in packet response header information */
1067 pkt_rsp_header.type = DCI_PKT_RSP_TYPE;
1068 /* Packet Length = Response Length + Length of uid field (int) */
1069 pkt_rsp_header.length = rsp_len + sizeof(int);
1070 pkt_rsp_header.delete_flag = delete_flag;
1071 pkt_rsp_header.uid = save_req_uid;
1072 memcpy(rsp_buf->data + rsp_buf->data_len, &pkt_rsp_header,
1073 sizeof(struct diag_dci_pkt_rsp_header_t));
1074 rsp_buf->data_len += sizeof(struct diag_dci_pkt_rsp_header_t);
1075 memcpy(rsp_buf->data + rsp_buf->data_len, temp, rsp_len);
1076 rsp_buf->data_len += rsp_len;
1077 rsp_buf->data_source = data_source;
1078
1079 mutex_unlock(&rsp_buf->data_mutex);
1080
1081 /*
1082 * Add directly to the list for writing responses to the
1083 * userspace as these shouldn't be buffered and shouldn't wait
1084 * for log and event buffers to be full
1085 */
1086 dci_add_buffer_to_list(entry, rsp_buf);
1087 mutex_unlock(&entry->buffers[data_source].buf_mutex);
1088 mutex_unlock(&driver->dci_mutex);
1089}
1090
1091static void copy_ext_hdr(struct diag_dci_buffer_t *data_buffer, void *ext_hdr)
1092{
1093 if (!data_buffer) {
1094 pr_err("diag: In %s, data buffer is NULL", __func__);
1095 return;
1096 }
1097
1098 *(int *)(data_buffer->data + data_buffer->data_len) =
1099 DCI_EXT_HDR_TYPE;
1100 data_buffer->data_len += sizeof(int);
1101 memcpy(data_buffer->data + data_buffer->data_len, ext_hdr,
1102 EXT_HDR_LEN);
1103 data_buffer->data_len += EXT_HDR_LEN;
1104}
1105
1106static void copy_dci_event(unsigned char *buf, int len,
1107 struct diag_dci_client_tbl *client, int data_source,
1108 void *ext_hdr)
1109{
1110 struct diag_dci_buffer_t *data_buffer = NULL;
1111 struct diag_dci_buf_peripheral_t *proc_buf = NULL;
1112 int err = 0, total_len = 0;
1113
1114 if (!buf || !client) {
1115 pr_err("diag: Invalid pointers in %s", __func__);
1116 return;
1117 }
1118
1119 total_len = sizeof(int) + len;
1120 if (ext_hdr)
1121 total_len += sizeof(int) + EXT_HDR_LEN;
1122
1123 proc_buf = &client->buffers[data_source];
1124 mutex_lock(&proc_buf->buf_mutex);
1125 mutex_lock(&proc_buf->health_mutex);
1126 err = diag_dci_get_buffer(client, data_source, total_len);
1127 if (err) {
1128 if (err == -ENOMEM)
1129 proc_buf->health.dropped_events++;
1130 else
1131 pr_err("diag: In %s, invalid packet\n", __func__);
1132 mutex_unlock(&proc_buf->health_mutex);
1133 mutex_unlock(&proc_buf->buf_mutex);
1134 return;
1135 }
1136
1137 data_buffer = proc_buf->buf_curr;
1138
1139 proc_buf->health.received_events++;
1140 mutex_unlock(&proc_buf->health_mutex);
1141 mutex_unlock(&proc_buf->buf_mutex);
1142
1143 mutex_lock(&data_buffer->data_mutex);
1144 if (ext_hdr)
1145 copy_ext_hdr(data_buffer, ext_hdr);
1146
1147 *(int *)(data_buffer->data + data_buffer->data_len) = DCI_EVENT_TYPE;
1148 data_buffer->data_len += sizeof(int);
1149 memcpy(data_buffer->data + data_buffer->data_len, buf, len);
1150 data_buffer->data_len += len;
1151 data_buffer->data_source = data_source;
1152 mutex_unlock(&data_buffer->data_mutex);
1153
1154}
1155
1156void extract_dci_events(unsigned char *buf, int len, int data_source,
1157 int token, void *ext_hdr)
1158{
1159 uint16_t event_id, event_id_packet, length, temp_len;
1160 uint8_t payload_len, payload_len_field;
1161 uint8_t timestamp[8] = {0}, timestamp_len;
1162 unsigned char event_data[MAX_EVENT_SIZE];
1163 unsigned int total_event_len;
1164 struct list_head *start, *temp;
1165 struct diag_dci_client_tbl *entry = NULL;
1166
Hardik Aryaba334f42018-01-17 21:03:52 +05301167 if (!buf) {
1168 pr_err("diag: In %s buffer is NULL\n", __func__);
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001169 return;
1170 }
1171 /*
Hardik Aryaba334f42018-01-17 21:03:52 +05301172 * 1 byte for event code and 2 bytes for the length field.
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001173 * The length field indicates the total length removing the cmd_code
1174 * and the length field. The event parsing in that case should happen
1175 * till the end.
1176 */
Hardik Aryaba334f42018-01-17 21:03:52 +05301177 if (len < 3) {
1178 pr_err("diag: In %s invalid len: %d\n", __func__, len);
1179 return;
1180 }
1181 length = *(uint16_t *)(buf + 1); /* total length of event series */
1182 if ((length == 0) || (len != (length + 3))) {
1183 pr_err("diag: Incoming dci event length: %d is invalid\n",
1184 length);
1185 return;
1186 }
1187 /*
1188 * Move directly to the start of the event series.
1189 * The event parsing should happen from start of event
1190 * series till the end.
1191 */
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001192 temp_len = 3;
1193 while (temp_len < length) {
1194 event_id_packet = *(uint16_t *)(buf + temp_len);
1195 event_id = event_id_packet & 0x0FFF; /* extract 12 bits */
1196 if (event_id_packet & 0x8000) {
1197 /* The packet has the two smallest byte of the
1198 * timestamp
1199 */
1200 timestamp_len = 2;
1201 } else {
1202 /* The packet has the full timestamp. The first event
1203 * will always have full timestamp. Save it in the
1204 * timestamp buffer and use it for subsequent events if
1205 * necessary.
1206 */
1207 timestamp_len = 8;
Hardik Aryaba334f42018-01-17 21:03:52 +05301208 if ((temp_len + timestamp_len + 2) <= len)
1209 memcpy(timestamp, buf + temp_len + 2,
1210 timestamp_len);
1211 else {
1212 pr_err("diag: Invalid length in %s, len: %d, temp_len: %d",
1213 __func__, len, temp_len);
1214 return;
1215 }
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001216 }
1217 /* 13th and 14th bit represent the payload length */
1218 if (((event_id_packet & 0x6000) >> 13) == 3) {
1219 payload_len_field = 1;
Hardik Aryaba334f42018-01-17 21:03:52 +05301220 if ((temp_len + timestamp_len + 3) <= len) {
1221 payload_len = *(uint8_t *)
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001222 (buf + temp_len + 2 + timestamp_len);
Hardik Aryaba334f42018-01-17 21:03:52 +05301223 } else {
1224 pr_err("diag: Invalid length in %s, len: %d, temp_len: %d",
1225 __func__, len, temp_len);
1226 return;
1227 }
1228 if ((payload_len < (MAX_EVENT_SIZE - 13)) &&
1229 ((temp_len + timestamp_len + payload_len + 3) <= len)) {
1230 /*
1231 * Copy the payload length and the payload
1232 * after skipping temp_len bytes for already
1233 * parsed packet, timestamp_len for timestamp
1234 * buffer, 2 bytes for event_id_packet.
1235 */
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001236 memcpy(event_data + 12, buf + temp_len + 2 +
1237 timestamp_len, 1);
1238 memcpy(event_data + 13, buf + temp_len + 2 +
1239 timestamp_len + 1, payload_len);
1240 } else {
Hardik Aryaba334f42018-01-17 21:03:52 +05301241 pr_err("diag: event > %d, payload_len = %d, temp_len = %d\n",
1242 (MAX_EVENT_SIZE - 13), payload_len, temp_len);
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001243 return;
1244 }
1245 } else {
1246 payload_len_field = 0;
1247 payload_len = (event_id_packet & 0x6000) >> 13;
Hardik Aryaba334f42018-01-17 21:03:52 +05301248 /*
1249 * Copy the payload after skipping temp_len bytes
1250 * for already parsed packet, timestamp_len for
1251 * timestamp buffer, 2 bytes for event_id_packet.
1252 */
1253 if ((payload_len < (MAX_EVENT_SIZE - 12)) &&
1254 ((temp_len + timestamp_len + payload_len + 2) <= len))
1255 memcpy(event_data + 12, buf + temp_len + 2 +
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001256 timestamp_len, payload_len);
Hardik Aryaba334f42018-01-17 21:03:52 +05301257 else {
1258 pr_err("diag: event > %d, payload_len = %d, temp_len = %d\n",
1259 (MAX_EVENT_SIZE - 12), payload_len, temp_len);
1260 return;
1261 }
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001262 }
1263
1264 /* Before copying the data to userspace, check if we are still
1265 * within the buffer limit. This is an error case, don't count
1266 * it towards the health statistics.
1267 *
1268 * Here, the offset of 2 bytes(uint16_t) is for the
1269 * event_id_packet length
1270 */
1271 temp_len += sizeof(uint16_t) + timestamp_len +
1272 payload_len_field + payload_len;
1273 if (temp_len > len) {
1274 pr_err("diag: Invalid length in %s, len: %d, read: %d",
1275 __func__, len, temp_len);
1276 return;
1277 }
1278
1279 /* 2 bytes for the event id & timestamp len is hard coded to 8,
1280 * as individual events have full timestamp.
1281 */
1282 *(uint16_t *)(event_data) = 10 +
1283 payload_len_field + payload_len;
1284 *(uint16_t *)(event_data + 2) = event_id_packet & 0x7FFF;
1285 memcpy(event_data + 4, timestamp, 8);
1286 /* 2 bytes for the event length field which is added to
1287 * the event data.
1288 */
1289 total_event_len = 2 + 10 + payload_len_field + payload_len;
1290 /* parse through event mask tbl of each client and check mask */
1291 mutex_lock(&driver->dci_mutex);
1292 list_for_each_safe(start, temp, &driver->dci_client_list) {
1293 entry = list_entry(start, struct diag_dci_client_tbl,
1294 track);
1295 if (entry->client_info.token != token)
1296 continue;
1297 if (diag_dci_query_event_mask(entry, event_id)) {
1298 /* copy to client buffer */
1299 copy_dci_event(event_data, total_event_len,
1300 entry, data_source, ext_hdr);
1301 }
1302 }
1303 mutex_unlock(&driver->dci_mutex);
1304 }
1305}
1306
1307static void copy_dci_log(unsigned char *buf, int len,
1308 struct diag_dci_client_tbl *client, int data_source,
1309 void *ext_hdr)
1310{
1311 uint16_t log_length = 0;
1312 struct diag_dci_buffer_t *data_buffer = NULL;
1313 struct diag_dci_buf_peripheral_t *proc_buf = NULL;
1314 int err = 0, total_len = 0;
1315
1316 if (!buf || !client) {
1317 pr_err("diag: Invalid pointers in %s", __func__);
1318 return;
1319 }
1320
1321 log_length = *(uint16_t *)(buf + 2);
1322 if (log_length > USHRT_MAX - 4) {
1323 pr_err("diag: Integer overflow in %s, log_len: %d",
1324 __func__, log_length);
1325 return;
1326 }
1327 total_len = sizeof(int) + log_length;
1328 if (ext_hdr)
1329 total_len += sizeof(int) + EXT_HDR_LEN;
1330
1331 /* Check if we are within the len. The check should include the
1332 * first 4 bytes for the Log code(2) and the length bytes (2)
1333 */
1334 if ((log_length + sizeof(uint16_t) + 2) > len) {
1335 pr_err("diag: Invalid length in %s, log_len: %d, len: %d",
1336 __func__, log_length, len);
1337 return;
1338 }
1339
1340 proc_buf = &client->buffers[data_source];
1341 mutex_lock(&proc_buf->buf_mutex);
1342 mutex_lock(&proc_buf->health_mutex);
1343 err = diag_dci_get_buffer(client, data_source, total_len);
1344 if (err) {
1345 if (err == -ENOMEM)
1346 proc_buf->health.dropped_logs++;
1347 else
1348 pr_err("diag: In %s, invalid packet\n", __func__);
1349 mutex_unlock(&proc_buf->health_mutex);
1350 mutex_unlock(&proc_buf->buf_mutex);
1351 return;
1352 }
1353
1354 data_buffer = proc_buf->buf_curr;
1355 proc_buf->health.received_logs++;
1356 mutex_unlock(&proc_buf->health_mutex);
1357 mutex_unlock(&proc_buf->buf_mutex);
1358
1359 mutex_lock(&data_buffer->data_mutex);
1360 if (!data_buffer->data) {
1361 mutex_unlock(&data_buffer->data_mutex);
1362 return;
1363 }
1364 if (ext_hdr)
1365 copy_ext_hdr(data_buffer, ext_hdr);
1366
1367 *(int *)(data_buffer->data + data_buffer->data_len) = DCI_LOG_TYPE;
1368 data_buffer->data_len += sizeof(int);
1369 memcpy(data_buffer->data + data_buffer->data_len, buf + sizeof(int),
1370 log_length);
1371 data_buffer->data_len += log_length;
1372 data_buffer->data_source = data_source;
1373 mutex_unlock(&data_buffer->data_mutex);
1374}
1375
1376void extract_dci_log(unsigned char *buf, int len, int data_source, int token,
1377 void *ext_hdr)
1378{
1379 uint16_t log_code, read_bytes = 0;
1380 struct list_head *start, *temp;
1381 struct diag_dci_client_tbl *entry = NULL;
1382
1383 if (!buf) {
1384 pr_err("diag: In %s buffer is NULL\n", __func__);
1385 return;
1386 }
Hardik Aryaba334f42018-01-17 21:03:52 +05301387 /*
1388 * The first eight bytes for the incoming log packet contains
1389 * Command code (2), the length of the packet (2), the length
1390 * of the log (2) and log code (2)
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001391 */
Hardik Aryaba334f42018-01-17 21:03:52 +05301392 if (len < 8) {
1393 pr_err("diag: In %s invalid len: %d\n", __func__, len);
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001394 return;
1395 }
1396
Hardik Aryaba334f42018-01-17 21:03:52 +05301397 log_code = *(uint16_t *)(buf + 6);
1398 read_bytes += sizeof(uint16_t) + 6;
1399
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001400 /* parse through log mask table of each client and check mask */
1401 mutex_lock(&driver->dci_mutex);
1402 list_for_each_safe(start, temp, &driver->dci_client_list) {
1403 entry = list_entry(start, struct diag_dci_client_tbl, track);
1404 if (entry->client_info.token != token)
1405 continue;
1406 if (diag_dci_query_log_mask(entry, log_code)) {
1407 pr_debug("\t log code %x needed by client %d",
1408 log_code, entry->client->tgid);
1409 /* copy to client buffer */
1410 copy_dci_log(buf, len, entry, data_source, ext_hdr);
1411 }
1412 }
1413 mutex_unlock(&driver->dci_mutex);
1414}
1415
1416void extract_dci_ext_pkt(unsigned char *buf, int len, int data_source,
1417 int token)
1418{
1419 uint8_t version, pkt_cmd_code = 0;
1420 unsigned char *pkt = NULL;
1421
1422 if (!buf) {
1423 pr_err("diag: In %s buffer is NULL\n", __func__);
1424 return;
1425 }
Hardik Aryaba334f42018-01-17 21:03:52 +05301426 if (len < (EXT_HDR_LEN + sizeof(uint8_t))) {
1427 pr_err("diag: In %s invalid len: %d\n", __func__, len);
1428 return;
1429 }
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001430
1431 version = *(uint8_t *)buf + 1;
1432 if (version < EXT_HDR_VERSION) {
1433 pr_err("diag: %s, Extended header with invalid version: %d\n",
1434 __func__, version);
1435 return;
1436 }
1437
1438 pkt = buf + EXT_HDR_LEN;
1439 pkt_cmd_code = *(uint8_t *)pkt;
1440 len -= EXT_HDR_LEN;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001441
1442 switch (pkt_cmd_code) {
1443 case LOG_CMD_CODE:
1444 extract_dci_log(pkt, len, data_source, token, buf);
1445 break;
1446 case EVENT_CMD_CODE:
1447 extract_dci_events(pkt, len, data_source, token, buf);
1448 break;
1449 default:
1450 pr_err("diag: %s unsupported cmd_code: %d, data_source: %d\n",
1451 __func__, pkt_cmd_code, data_source);
1452 return;
1453 }
1454}
1455
1456void diag_dci_channel_open_work(struct work_struct *work)
1457{
1458 int i, j;
1459 char dirty_bits[16];
1460 uint8_t *client_log_mask_ptr;
1461 uint8_t *log_mask_ptr;
1462 int ret;
1463 struct list_head *start, *temp;
1464 struct diag_dci_client_tbl *entry = NULL;
1465
1466 /* Update apps and peripheral(s) with the dci log and event masks */
1467 memset(dirty_bits, 0, 16 * sizeof(uint8_t));
1468
1469 /*
1470 * From each log entry used by each client, determine
1471 * which log entries in the cumulative logs that need
1472 * to be updated on the peripheral.
1473 */
1474 mutex_lock(&driver->dci_mutex);
1475 list_for_each_safe(start, temp, &driver->dci_client_list) {
1476 entry = list_entry(start, struct diag_dci_client_tbl, track);
1477 if (entry->client_info.token != DCI_LOCAL_PROC)
1478 continue;
1479 client_log_mask_ptr = entry->dci_log_mask;
1480 for (j = 0; j < 16; j++) {
1481 if (*(client_log_mask_ptr+1))
1482 dirty_bits[j] = 1;
1483 client_log_mask_ptr += 514;
1484 }
1485 }
1486 mutex_unlock(&driver->dci_mutex);
1487
1488 mutex_lock(&dci_log_mask_mutex);
1489 /* Update the appropriate dirty bits in the cumulative mask */
1490 log_mask_ptr = dci_ops_tbl[DCI_LOCAL_PROC].log_mask_composite;
1491 for (i = 0; i < 16; i++) {
1492 if (dirty_bits[i])
1493 *(log_mask_ptr+1) = dirty_bits[i];
1494
1495 log_mask_ptr += 514;
1496 }
1497 mutex_unlock(&dci_log_mask_mutex);
1498
1499 /* Send updated mask to userspace clients */
1500 diag_update_userspace_clients(DCI_LOG_MASKS_TYPE);
1501 /* Send updated log mask to peripherals */
1502 ret = dci_ops_tbl[DCI_LOCAL_PROC].send_log_mask(DCI_LOCAL_PROC);
1503
1504 /* Send updated event mask to userspace clients */
1505 diag_update_userspace_clients(DCI_EVENT_MASKS_TYPE);
1506 /* Send updated event mask to peripheral */
1507 ret = dci_ops_tbl[DCI_LOCAL_PROC].send_event_mask(DCI_LOCAL_PROC);
1508}
1509
1510void diag_dci_notify_client(int peripheral_mask, int data, int proc)
1511{
Channagoud Kadabi075db3b2017-03-16 14:26:17 -07001512 int stat = 0;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001513 struct siginfo info;
1514 struct list_head *start, *temp;
1515 struct diag_dci_client_tbl *entry = NULL;
Gopikrishna Mogasati41224ba2017-05-22 12:05:11 +05301516 struct pid *pid_struct = NULL;
1517 struct task_struct *dci_task = NULL;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001518
1519 memset(&info, 0, sizeof(struct siginfo));
1520 info.si_code = SI_QUEUE;
1521 info.si_int = (peripheral_mask | data);
1522 if (data == DIAG_STATUS_OPEN)
1523 dci_ops_tbl[proc].peripheral_status |= peripheral_mask;
1524 else
1525 dci_ops_tbl[proc].peripheral_status &= ~peripheral_mask;
1526
1527 /* Notify the DCI process that the peripheral DCI Channel is up */
Mohit Aggarwalf68e6b72016-01-04 17:10:30 +05301528 mutex_lock(&driver->dci_mutex);
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001529 list_for_each_safe(start, temp, &driver->dci_client_list) {
1530 entry = list_entry(start, struct diag_dci_client_tbl, track);
1531 if (entry->client_info.token != proc)
1532 continue;
1533 if (entry->client_info.notification_list & peripheral_mask) {
1534 info.si_signo = entry->client_info.signal_type;
Gopikrishna Mogasati41224ba2017-05-22 12:05:11 +05301535 pid_struct = find_get_pid(entry->tgid);
1536 if (pid_struct) {
1537 dci_task = get_pid_task(pid_struct,
1538 PIDTYPE_PID);
1539 if (!dci_task) {
1540 DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
1541 "diag: dci client with pid = %d Exited..\n",
1542 entry->tgid);
1543 mutex_unlock(&driver->dci_mutex);
1544 return;
1545 }
1546 if (entry->client &&
1547 entry->tgid == dci_task->tgid) {
1548 DIAG_LOG(DIAG_DEBUG_DCI,
1549 "entry tgid = %d, dci client tgid = %d\n",
1550 entry->tgid, dci_task->tgid);
1551 stat = send_sig_info(
1552 entry->client_info.signal_type,
1553 &info, dci_task);
1554 if (stat)
1555 pr_err("diag: Err sending dci signal to client, signal data: 0x%x, stat: %d\n",
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001556 info.si_int, stat);
Gopikrishna Mogasati41224ba2017-05-22 12:05:11 +05301557 } else
1558 pr_err("diag: client data is corrupted, signal data: 0x%x, stat: %d\n",
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001559 info.si_int, stat);
Gopikrishna Mogasati41224ba2017-05-22 12:05:11 +05301560 }
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001561 }
1562 }
Mohit Aggarwalf68e6b72016-01-04 17:10:30 +05301563 mutex_unlock(&driver->dci_mutex);
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001564}
1565
1566static int diag_send_dci_pkt(struct diag_cmd_reg_t *entry,
1567 unsigned char *buf, int len, int tag)
1568{
1569 int i, status = DIAG_DCI_NO_ERROR;
1570 uint32_t write_len = 0;
1571 struct diag_dci_pkt_header_t header;
1572
1573 if (!entry)
1574 return -EIO;
1575
1576 if (len < 1 || len > DIAG_MAX_REQ_SIZE) {
1577 pr_err("diag: dci: In %s, invalid length %d, max_length: %d\n",
1578 __func__, len, (int)(DCI_REQ_BUF_SIZE - sizeof(header)));
1579 return -EIO;
1580 }
1581
1582 if ((len + sizeof(header) + sizeof(uint8_t)) > DCI_BUF_SIZE) {
1583 pr_err("diag: dci: In %s, invalid length %d for apps_dci_buf, max_length: %d\n",
1584 __func__, len, DIAG_MAX_REQ_SIZE);
1585 return -EIO;
1586 }
1587
1588 mutex_lock(&driver->dci_mutex);
1589 /* prepare DCI packet */
1590 header.start = CONTROL_CHAR;
1591 header.version = 1;
1592 header.len = len + sizeof(int) + sizeof(uint8_t);
1593 header.pkt_code = DCI_PKT_RSP_CODE;
1594 header.tag = tag;
1595 memcpy(driver->apps_dci_buf, &header, sizeof(header));
1596 write_len += sizeof(header);
1597 memcpy(driver->apps_dci_buf + write_len, buf, len);
1598 write_len += len;
1599 *(uint8_t *)(driver->apps_dci_buf + write_len) = CONTROL_CHAR;
1600 write_len += sizeof(uint8_t);
1601
1602 /* This command is registered locally on the Apps */
1603 if (entry->proc == APPS_DATA) {
1604 diag_update_pkt_buffer(driver->apps_dci_buf, write_len,
1605 DCI_PKT_TYPE);
1606 diag_update_sleeping_process(entry->pid, DCI_PKT_TYPE);
1607 mutex_unlock(&driver->dci_mutex);
1608 return DIAG_DCI_NO_ERROR;
1609 }
1610
1611 for (i = 0; i < NUM_PERIPHERALS; i++)
1612 if (entry->proc == i) {
1613 status = 1;
1614 break;
1615 }
1616
1617 if (status) {
1618 status = diag_dci_write_proc(entry->proc,
1619 DIAG_DATA_TYPE,
1620 driver->apps_dci_buf,
1621 write_len);
1622 } else {
1623 pr_err("diag: Cannot send packet to peripheral %d",
1624 entry->proc);
1625 status = DIAG_DCI_SEND_DATA_FAIL;
1626 }
1627 mutex_unlock(&driver->dci_mutex);
1628 return status;
1629}
1630
1631#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
1632unsigned char *dci_get_buffer_from_bridge(int token)
1633{
1634 uint8_t retries = 0, max_retries = 3;
1635 unsigned char *buf = NULL;
1636
1637 do {
1638 buf = diagmem_alloc(driver, DIAG_MDM_BUF_SIZE,
1639 dci_ops_tbl[token].mempool);
1640 if (!buf) {
1641 usleep_range(5000, 5100);
1642 retries++;
1643 } else
1644 break;
1645 } while (retries < max_retries);
1646
1647 return buf;
1648}
1649
1650int diag_dci_write_bridge(int token, unsigned char *buf, int len)
1651{
1652 return diagfwd_bridge_write(TOKEN_TO_BRIDGE(token), buf, len);
1653}
1654
1655int diag_dci_write_done_bridge(int index, unsigned char *buf, int len)
1656{
1657 int token = BRIDGE_TO_TOKEN(index);
1658
1659 if (!VALID_DCI_TOKEN(token)) {
1660 pr_err("diag: Invalid DCI token %d in %s\n", token, __func__);
1661 return -EINVAL;
1662 }
1663 diagmem_free(driver, buf, dci_ops_tbl[token].mempool);
1664 return 0;
1665}
1666#endif
1667
1668#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
1669static int diag_send_dci_pkt_remote(unsigned char *data, int len, int tag,
1670 int token)
1671{
1672 unsigned char *buf = NULL;
1673 struct diag_dci_header_t dci_header;
1674 int dci_header_size = sizeof(struct diag_dci_header_t);
1675 int ret = DIAG_DCI_NO_ERROR;
1676 uint32_t write_len = 0;
1677
1678 if (!data)
1679 return -EIO;
1680
1681 buf = dci_get_buffer_from_bridge(token);
1682 if (!buf) {
1683 pr_err("diag: In %s, unable to get dci buffers to write data\n",
1684 __func__);
1685 return -EAGAIN;
1686 }
1687
1688 dci_header.start = CONTROL_CHAR;
1689 dci_header.version = 1;
1690 /*
1691 * The Length of the DCI packet = length of the command + tag (int) +
1692 * the command code size (uint8_t)
1693 */
1694 dci_header.length = len + sizeof(int) + sizeof(uint8_t);
1695 dci_header.cmd_code = DCI_PKT_RSP_CODE;
1696
1697 memcpy(buf + write_len, &dci_header, dci_header_size);
1698 write_len += dci_header_size;
1699 *(int *)(buf + write_len) = tag;
1700 write_len += sizeof(int);
1701 memcpy(buf + write_len, data, len);
1702 write_len += len;
1703 *(buf + write_len) = CONTROL_CHAR; /* End Terminator */
1704 write_len += sizeof(uint8_t);
1705
1706 ret = diag_dci_write_bridge(token, buf, write_len);
1707 if (ret) {
1708 pr_err("diag: error writing dci pkt to remote proc, token: %d, err: %d\n",
1709 token, ret);
1710 diagmem_free(driver, buf, dci_ops_tbl[token].mempool);
1711 } else {
1712 ret = DIAG_DCI_NO_ERROR;
1713 }
1714
1715 return ret;
1716}
1717#else
1718static int diag_send_dci_pkt_remote(unsigned char *data, int len, int tag,
1719 int token)
1720{
1721 return DIAG_DCI_NO_ERROR;
1722}
1723#endif
1724
1725#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
1726int diag_dci_send_handshake_pkt(int index)
1727{
1728 int err = 0;
1729 int token = BRIDGE_TO_TOKEN(index);
1730 int write_len = 0;
1731 struct diag_ctrl_dci_handshake_pkt ctrl_pkt;
1732 unsigned char *buf = NULL;
1733 struct diag_dci_header_t dci_header;
1734
1735 if (!VALID_DCI_TOKEN(token)) {
1736 pr_err("diag: In %s, invalid DCI token %d\n", __func__, token);
1737 return -EINVAL;
1738 }
1739
1740 buf = dci_get_buffer_from_bridge(token);
1741 if (!buf) {
1742 pr_err("diag: In %s, unable to get dci buffers to write data\n",
1743 __func__);
1744 return -EAGAIN;
1745 }
1746
1747 dci_header.start = CONTROL_CHAR;
1748 dci_header.version = 1;
1749 /* Include the cmd code (uint8_t) in the length */
1750 dci_header.length = sizeof(ctrl_pkt) + sizeof(uint8_t);
1751 dci_header.cmd_code = DCI_CONTROL_PKT_CODE;
1752 memcpy(buf, &dci_header, sizeof(dci_header));
1753 write_len += sizeof(dci_header);
1754
1755 ctrl_pkt.ctrl_pkt_id = DIAG_CTRL_MSG_DCI_HANDSHAKE_PKT;
1756 /*
1757 * The control packet data length accounts for the version (uint32_t)
1758 * of the packet and the magic number (uint32_t).
1759 */
1760 ctrl_pkt.ctrl_pkt_data_len = 2 * sizeof(uint32_t);
1761 ctrl_pkt.version = 1;
1762 ctrl_pkt.magic = DCI_MAGIC;
1763 memcpy(buf + write_len, &ctrl_pkt, sizeof(ctrl_pkt));
1764 write_len += sizeof(ctrl_pkt);
1765
1766 *(uint8_t *)(buf + write_len) = CONTROL_CHAR;
1767 write_len += sizeof(uint8_t);
1768
1769 err = diag_dci_write_bridge(token, buf, write_len);
1770 if (err) {
1771 pr_err("diag: error writing ack packet to remote proc, token: %d, err: %d\n",
1772 token, err);
1773 diagmem_free(driver, buf, dci_ops_tbl[token].mempool);
1774 return err;
1775 }
1776
1777 mod_timer(&(dci_channel_status[token].wait_time),
1778 jiffies + msecs_to_jiffies(DCI_HANDSHAKE_WAIT_TIME));
1779
1780 return 0;
1781}
1782#else
1783int diag_dci_send_handshake_pkt(int index)
1784{
1785 return 0;
1786}
1787#endif
1788
1789static int diag_dci_process_apps_pkt(struct diag_pkt_header_t *pkt_header,
1790 unsigned char *req_buf, int req_len,
1791 int tag)
1792{
1793 uint8_t cmd_code, subsys_id, i, goto_download = 0;
1794 uint8_t header_len = sizeof(struct diag_dci_pkt_header_t);
1795 uint16_t ss_cmd_code;
1796 uint32_t write_len = 0;
1797 unsigned char *dest_buf = driver->apps_dci_buf;
1798 unsigned char *payload_ptr = driver->apps_dci_buf + header_len;
1799 struct diag_dci_pkt_header_t dci_header;
1800
1801 if (!pkt_header || !req_buf || req_len <= 0 || tag < 0)
1802 return -EIO;
1803
1804 cmd_code = pkt_header->cmd_code;
1805 subsys_id = pkt_header->subsys_id;
1806 ss_cmd_code = pkt_header->subsys_cmd_code;
1807
1808 if (cmd_code == DIAG_CMD_DOWNLOAD) {
1809 *payload_ptr = DIAG_CMD_DOWNLOAD;
1810 write_len = sizeof(uint8_t);
1811 goto_download = 1;
1812 goto fill_buffer;
1813 } else if (cmd_code == DIAG_CMD_VERSION) {
1814 if (chk_polling_response()) {
1815 for (i = 0; i < 55; i++, write_len++, payload_ptr++)
1816 *(payload_ptr) = 0;
1817 goto fill_buffer;
1818 }
1819 } else if (cmd_code == DIAG_CMD_EXT_BUILD) {
1820 if (chk_polling_response()) {
1821 *payload_ptr = DIAG_CMD_EXT_BUILD;
1822 write_len = sizeof(uint8_t);
1823 payload_ptr += sizeof(uint8_t);
1824 for (i = 0; i < 8; i++, write_len++, payload_ptr++)
1825 *(payload_ptr) = 0;
1826 *(int *)(payload_ptr) = chk_config_get_id();
1827 write_len += sizeof(int);
1828 goto fill_buffer;
1829 }
1830 } else if (cmd_code == DIAG_CMD_LOG_ON_DMND) {
1831 write_len = diag_cmd_log_on_demand(req_buf, req_len,
1832 payload_ptr,
1833 APPS_BUF_SIZE - header_len);
1834 goto fill_buffer;
1835 } else if (cmd_code != DIAG_CMD_DIAG_SUBSYS) {
1836 return DIAG_DCI_TABLE_ERR;
1837 }
1838
1839 if (subsys_id == DIAG_SS_DIAG) {
1840 if (ss_cmd_code == DIAG_DIAG_MAX_PKT_SZ) {
1841 memcpy(payload_ptr, pkt_header,
1842 sizeof(struct diag_pkt_header_t));
1843 write_len = sizeof(struct diag_pkt_header_t);
1844 *(uint32_t *)(payload_ptr + write_len) =
1845 DIAG_MAX_REQ_SIZE;
1846 write_len += sizeof(uint32_t);
1847 } else if (ss_cmd_code == DIAG_DIAG_STM) {
1848 write_len = diag_process_stm_cmd(req_buf, payload_ptr);
1849 }
1850 } else if (subsys_id == DIAG_SS_PARAMS) {
1851 if (ss_cmd_code == DIAG_DIAG_POLL) {
1852 if (chk_polling_response()) {
1853 memcpy(payload_ptr, pkt_header,
1854 sizeof(struct diag_pkt_header_t));
1855 write_len = sizeof(struct diag_pkt_header_t);
1856 payload_ptr += write_len;
1857 for (i = 0; i < 12; i++, write_len++) {
1858 *(payload_ptr) = 0;
1859 payload_ptr++;
1860 }
1861 }
1862 } else if (ss_cmd_code == DIAG_DEL_RSP_WRAP) {
1863 memcpy(payload_ptr, pkt_header,
1864 sizeof(struct diag_pkt_header_t));
1865 write_len = sizeof(struct diag_pkt_header_t);
1866 *(int *)(payload_ptr + write_len) = wrap_enabled;
1867 write_len += sizeof(int);
1868 } else if (ss_cmd_code == DIAG_DEL_RSP_WRAP_CNT) {
1869 wrap_enabled = true;
1870 memcpy(payload_ptr, pkt_header,
1871 sizeof(struct diag_pkt_header_t));
1872 write_len = sizeof(struct diag_pkt_header_t);
1873 *(uint16_t *)(payload_ptr + write_len) = wrap_count;
1874 write_len += sizeof(uint16_t);
1875 } else if (ss_cmd_code == DIAG_EXT_MOBILE_ID) {
1876 write_len = diag_cmd_get_mobile_id(req_buf, req_len,
1877 payload_ptr,
1878 APPS_BUF_SIZE - header_len);
1879 }
1880 }
1881
1882fill_buffer:
1883 if (write_len > 0) {
1884 /* Check if we are within the range of the buffer*/
1885 if (write_len + header_len > DIAG_MAX_REQ_SIZE) {
1886 pr_err("diag: In %s, invalid length %d\n", __func__,
1887 write_len + header_len);
1888 return -ENOMEM;
1889 }
1890 dci_header.start = CONTROL_CHAR;
1891 dci_header.version = 1;
1892 /*
1893 * Length of the rsp pkt = actual data len + pkt rsp code
1894 * (uint8_t) + tag (int)
1895 */
1896 dci_header.len = write_len + sizeof(uint8_t) + sizeof(int);
1897 dci_header.pkt_code = DCI_PKT_RSP_CODE;
1898 dci_header.tag = tag;
1899 driver->in_busy_dcipktdata = 1;
1900 memcpy(dest_buf, &dci_header, header_len);
1901 diag_process_apps_dci_read_data(DCI_PKT_TYPE, dest_buf + 4,
1902 dci_header.len);
1903 driver->in_busy_dcipktdata = 0;
1904
1905 if (goto_download) {
1906 /*
1907 * Sleep for sometime so that the response reaches the
1908 * client. The value 5000 empirically as an optimum
1909 * time for the response to reach the client.
1910 */
1911 usleep_range(5000, 5100);
1912 /* call download API */
1913 msm_set_restart_mode(RESTART_DLOAD);
1914 pr_alert("diag: download mode set, Rebooting SoC..\n");
1915 kernel_restart(NULL);
1916 }
1917 return DIAG_DCI_NO_ERROR;
1918 }
1919
1920 return DIAG_DCI_TABLE_ERR;
1921}
1922
1923static int diag_process_dci_pkt_rsp(unsigned char *buf, int len)
1924{
1925 int ret = DIAG_DCI_TABLE_ERR;
1926 int common_cmd = 0;
1927 struct diag_pkt_header_t *header = NULL;
1928 unsigned char *temp = buf;
1929 unsigned char *req_buf = NULL;
1930 uint8_t retry_count = 0, max_retries = 3;
1931 uint32_t read_len = 0, req_len = len;
1932 struct dci_pkt_req_entry_t *req_entry = NULL;
1933 struct diag_dci_client_tbl *dci_entry = NULL;
1934 struct dci_pkt_req_t req_hdr;
1935 struct diag_cmd_reg_t *reg_item;
1936 struct diag_cmd_reg_entry_t reg_entry;
1937 struct diag_cmd_reg_entry_t *temp_entry;
1938
1939 if (!buf)
1940 return -EIO;
1941
1942 if (len <= sizeof(struct dci_pkt_req_t) || len > DCI_REQ_BUF_SIZE) {
1943 pr_err("diag: dci: Invalid length %d len in %s", len, __func__);
1944 return -EIO;
1945 }
1946
1947 req_hdr = *(struct dci_pkt_req_t *)temp;
1948 temp += sizeof(struct dci_pkt_req_t);
1949 read_len += sizeof(struct dci_pkt_req_t);
1950 req_len -= sizeof(struct dci_pkt_req_t);
1951 req_buf = temp; /* Start of the Request */
1952 header = (struct diag_pkt_header_t *)temp;
1953 temp += sizeof(struct diag_pkt_header_t);
1954 read_len += sizeof(struct diag_pkt_header_t);
1955 if (read_len >= DCI_REQ_BUF_SIZE) {
1956 pr_err("diag: dci: In %s, invalid read_len: %d\n", __func__,
1957 read_len);
1958 return -EIO;
1959 }
1960
1961 mutex_lock(&driver->dci_mutex);
1962 dci_entry = diag_dci_get_client_entry(req_hdr.client_id);
1963 if (!dci_entry) {
1964 pr_err("diag: Invalid client %d in %s\n",
1965 req_hdr.client_id, __func__);
1966 mutex_unlock(&driver->dci_mutex);
1967 return DIAG_DCI_NO_REG;
1968 }
1969
1970 /* Check if the command is allowed on DCI */
1971 if (diag_dci_filter_commands(header)) {
1972 pr_debug("diag: command not supported %d %d %d",
1973 header->cmd_code, header->subsys_id,
1974 header->subsys_cmd_code);
1975 mutex_unlock(&driver->dci_mutex);
1976 return DIAG_DCI_SEND_DATA_FAIL;
1977 }
1978
1979 common_cmd = diag_check_common_cmd(header);
1980 if (common_cmd < 0) {
1981 pr_debug("diag: error in checking common command, %d\n",
1982 common_cmd);
1983 mutex_unlock(&driver->dci_mutex);
1984 return DIAG_DCI_SEND_DATA_FAIL;
1985 }
1986
1987 /*
1988 * Previous packet is yet to be consumed by the client. Wait
1989 * till the buffer is free.
1990 */
1991 while (retry_count < max_retries) {
1992 retry_count++;
1993 if (driver->in_busy_dcipktdata)
1994 usleep_range(10000, 10100);
1995 else
1996 break;
1997 }
1998 /* The buffer is still busy */
1999 if (driver->in_busy_dcipktdata) {
2000 pr_err("diag: In %s, apps dci buffer is still busy. Dropping packet\n",
2001 __func__);
2002 mutex_unlock(&driver->dci_mutex);
2003 return -EAGAIN;
2004 }
2005
2006 /* Register this new DCI packet */
2007 req_entry = diag_register_dci_transaction(req_hdr.uid,
2008 req_hdr.client_id);
2009 if (!req_entry) {
2010 pr_alert("diag: registering new DCI transaction failed\n");
2011 mutex_unlock(&driver->dci_mutex);
2012 return DIAG_DCI_NO_REG;
2013 }
2014 mutex_unlock(&driver->dci_mutex);
2015
2016 /*
2017 * If the client has registered for remote data, route the packet to the
2018 * remote processor
2019 */
2020 if (dci_entry->client_info.token > 0) {
2021 ret = diag_send_dci_pkt_remote(req_buf, req_len, req_entry->tag,
2022 dci_entry->client_info.token);
2023 return ret;
2024 }
2025
2026 /* Check if it is a dedicated Apps command */
2027 ret = diag_dci_process_apps_pkt(header, req_buf, req_len,
2028 req_entry->tag);
2029 if ((ret == DIAG_DCI_NO_ERROR && !common_cmd) || ret < 0)
2030 return ret;
2031
2032 reg_entry.cmd_code = header->cmd_code;
2033 reg_entry.subsys_id = header->subsys_id;
2034 reg_entry.cmd_code_hi = header->subsys_cmd_code;
2035 reg_entry.cmd_code_lo = header->subsys_cmd_code;
2036
Mohit Aggarwalf68e6b72016-01-04 17:10:30 +05302037 mutex_lock(&driver->cmd_reg_mutex);
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07002038 temp_entry = diag_cmd_search(&reg_entry, ALL_PROC);
2039 if (temp_entry) {
2040 reg_item = container_of(temp_entry, struct diag_cmd_reg_t,
2041 entry);
2042 ret = diag_send_dci_pkt(reg_item, req_buf, req_len,
2043 req_entry->tag);
2044 } else {
2045 DIAG_LOG(DIAG_DEBUG_DCI, "Command not found: %02x %02x %02x\n",
2046 reg_entry.cmd_code, reg_entry.subsys_id,
2047 reg_entry.cmd_code_hi);
2048 }
Mohit Aggarwalf68e6b72016-01-04 17:10:30 +05302049 mutex_unlock(&driver->cmd_reg_mutex);
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07002050
2051 return ret;
2052}
2053
2054int diag_process_dci_transaction(unsigned char *buf, int len)
2055{
2056 unsigned char *temp = buf;
2057 uint16_t log_code, item_num;
2058 int ret = -1, found = 0, client_id = 0, client_token = 0;
2059 int count, set_mask, num_codes, bit_index, event_id, offset = 0;
2060 unsigned int byte_index, read_len = 0;
2061 uint8_t equip_id, *log_mask_ptr, *head_log_mask_ptr, byte_mask;
2062 uint8_t *event_mask_ptr;
2063 struct diag_dci_client_tbl *dci_entry = NULL;
2064
2065 if (!temp) {
2066 pr_err("diag: Invalid buffer in %s\n", __func__);
2067 return -ENOMEM;
2068 }
2069
2070 /* This is Pkt request/response transaction */
2071 if (*(int *)temp > 0) {
2072 return diag_process_dci_pkt_rsp(buf, len);
2073 } else if (*(int *)temp == DCI_LOG_TYPE) {
2074 /* Minimum length of a log mask config is 12 + 2 bytes for
2075 * atleast one log code to be set or reset.
2076 */
2077 if (len < DCI_LOG_CON_MIN_LEN || len > USER_SPACE_DATA) {
2078 pr_err("diag: dci: Invalid length in %s\n", __func__);
2079 return -EIO;
2080 }
2081
2082 /* Extract each log code and put in client table */
2083 temp += sizeof(int);
2084 read_len += sizeof(int);
2085 client_id = *(int *)temp;
2086 temp += sizeof(int);
2087 read_len += sizeof(int);
2088 set_mask = *(int *)temp;
2089 temp += sizeof(int);
2090 read_len += sizeof(int);
2091 num_codes = *(int *)temp;
2092 temp += sizeof(int);
2093 read_len += sizeof(int);
2094
2095 /* Find client table entry */
2096 mutex_lock(&driver->dci_mutex);
2097 dci_entry = diag_dci_get_client_entry(client_id);
2098 if (!dci_entry) {
2099 pr_err("diag: In %s, invalid client\n", __func__);
2100 mutex_unlock(&driver->dci_mutex);
2101 return ret;
2102 }
2103 client_token = dci_entry->client_info.token;
2104
2105 if (num_codes == 0 || (num_codes >= (USER_SPACE_DATA - 8)/2)) {
2106 pr_err("diag: dci: Invalid number of log codes %d\n",
2107 num_codes);
2108 mutex_unlock(&driver->dci_mutex);
2109 return -EIO;
2110 }
2111
2112 head_log_mask_ptr = dci_entry->dci_log_mask;
2113 if (!head_log_mask_ptr) {
2114 pr_err("diag: dci: Invalid Log mask pointer in %s\n",
2115 __func__);
2116 mutex_unlock(&driver->dci_mutex);
2117 return -ENOMEM;
2118 }
2119 pr_debug("diag: head of dci log mask %pK\n", head_log_mask_ptr);
2120 count = 0; /* iterator for extracting log codes */
2121
2122 while (count < num_codes) {
2123 if (read_len >= USER_SPACE_DATA) {
2124 pr_err("diag: dci: Invalid length for log type in %s",
2125 __func__);
2126 mutex_unlock(&driver->dci_mutex);
2127 return -EIO;
2128 }
2129 log_code = *(uint16_t *)temp;
2130 equip_id = LOG_GET_EQUIP_ID(log_code);
2131 item_num = LOG_GET_ITEM_NUM(log_code);
2132 byte_index = item_num/8 + 2;
2133 if (byte_index >= (DCI_MAX_ITEMS_PER_LOG_CODE+2)) {
2134 pr_err("diag: dci: Log type, invalid byte index\n");
2135 mutex_unlock(&driver->dci_mutex);
2136 return ret;
2137 }
2138 byte_mask = 0x01 << (item_num % 8);
2139 /*
2140 * Parse through log mask table and find
2141 * relevant range
2142 */
2143 log_mask_ptr = head_log_mask_ptr;
2144 found = 0;
2145 offset = 0;
2146 while (log_mask_ptr && (offset < DCI_LOG_MASK_SIZE)) {
2147 if (*log_mask_ptr == equip_id) {
2148 found = 1;
2149 pr_debug("diag: find equip id = %x at %pK\n",
2150 equip_id, log_mask_ptr);
2151 break;
2152 }
2153 pr_debug("diag: did not find equip id = %x at %d\n",
2154 equip_id, *log_mask_ptr);
2155 log_mask_ptr += 514;
2156 offset += 514;
2157 }
2158 if (!found) {
2159 pr_err("diag: dci equip id not found\n");
2160 mutex_unlock(&driver->dci_mutex);
2161 return ret;
2162 }
2163 *(log_mask_ptr+1) = 1; /* set the dirty byte */
2164 log_mask_ptr = log_mask_ptr + byte_index;
2165 if (set_mask)
2166 *log_mask_ptr |= byte_mask;
2167 else
2168 *log_mask_ptr &= ~byte_mask;
2169 /* add to cumulative mask */
2170 update_dci_cumulative_log_mask(
2171 offset, byte_index,
2172 byte_mask, client_token);
2173 temp += 2;
2174 read_len += 2;
2175 count++;
2176 ret = DIAG_DCI_NO_ERROR;
2177 }
2178 /* send updated mask to userspace clients */
2179 if (client_token == DCI_LOCAL_PROC)
2180 diag_update_userspace_clients(DCI_LOG_MASKS_TYPE);
2181 /* send updated mask to peripherals */
2182 ret = dci_ops_tbl[client_token].send_log_mask(client_token);
2183 mutex_unlock(&driver->dci_mutex);
2184 } else if (*(int *)temp == DCI_EVENT_TYPE) {
2185 /* Minimum length of a event mask config is 12 + 4 bytes for
2186 * atleast one event id to be set or reset.
2187 */
2188 if (len < DCI_EVENT_CON_MIN_LEN || len > USER_SPACE_DATA) {
2189 pr_err("diag: dci: Invalid length in %s\n", __func__);
2190 return -EIO;
2191 }
2192
2193 /* Extract each event id and put in client table */
2194 temp += sizeof(int);
2195 read_len += sizeof(int);
2196 client_id = *(int *)temp;
2197 temp += sizeof(int);
2198 read_len += sizeof(int);
2199 set_mask = *(int *)temp;
2200 temp += sizeof(int);
2201 read_len += sizeof(int);
2202 num_codes = *(int *)temp;
2203 temp += sizeof(int);
2204 read_len += sizeof(int);
2205
2206 /* find client table entry */
2207 mutex_lock(&driver->dci_mutex);
2208 dci_entry = diag_dci_get_client_entry(client_id);
2209 if (!dci_entry) {
2210 pr_err("diag: In %s, invalid client\n", __func__);
2211 mutex_unlock(&driver->dci_mutex);
2212 return ret;
2213 }
2214 client_token = dci_entry->client_info.token;
2215
2216 /* Check for positive number of event ids. Also, the number of
2217 * event ids should fit in the buffer along with set_mask and
2218 * num_codes which are 4 bytes each.
2219 */
2220 if (num_codes == 0 || (num_codes >= (USER_SPACE_DATA - 8)/2)) {
2221 pr_err("diag: dci: Invalid number of event ids %d\n",
2222 num_codes);
2223 mutex_unlock(&driver->dci_mutex);
2224 return -EIO;
2225 }
2226
2227 event_mask_ptr = dci_entry->dci_event_mask;
2228 if (!event_mask_ptr) {
2229 pr_err("diag: dci: Invalid event mask pointer in %s\n",
2230 __func__);
2231 mutex_unlock(&driver->dci_mutex);
2232 return -ENOMEM;
2233 }
2234 pr_debug("diag: head of dci event mask %pK\n", event_mask_ptr);
2235 count = 0; /* iterator for extracting log codes */
2236 while (count < num_codes) {
2237 if (read_len >= USER_SPACE_DATA) {
2238 pr_err("diag: dci: Invalid length for event type in %s",
2239 __func__);
2240 mutex_unlock(&driver->dci_mutex);
2241 return -EIO;
2242 }
2243 event_id = *(int *)temp;
2244 byte_index = event_id/8;
2245 if (byte_index >= DCI_EVENT_MASK_SIZE) {
2246 pr_err("diag: dci: Event type, invalid byte index\n");
2247 mutex_unlock(&driver->dci_mutex);
2248 return ret;
2249 }
2250 bit_index = event_id % 8;
2251 byte_mask = 0x1 << bit_index;
2252 /*
2253 * Parse through event mask table and set
2254 * relevant byte & bit combination
2255 */
2256 if (set_mask)
2257 *(event_mask_ptr + byte_index) |= byte_mask;
2258 else
2259 *(event_mask_ptr + byte_index) &= ~byte_mask;
2260 /* add to cumulative mask */
2261 update_dci_cumulative_event_mask(byte_index, byte_mask,
2262 client_token);
2263 temp += sizeof(int);
2264 read_len += sizeof(int);
2265 count++;
2266 ret = DIAG_DCI_NO_ERROR;
2267 }
2268 /* send updated mask to userspace clients */
2269 if (dci_entry->client_info.token == DCI_LOCAL_PROC)
2270 diag_update_userspace_clients(DCI_EVENT_MASKS_TYPE);
2271 /* send updated mask to peripherals */
2272 ret = dci_ops_tbl[client_token].send_event_mask(client_token);
2273 mutex_unlock(&driver->dci_mutex);
2274 } else {
2275 pr_alert("diag: Incorrect DCI transaction\n");
2276 }
2277 return ret;
2278}
2279
2280
2281struct diag_dci_client_tbl *diag_dci_get_client_entry(int client_id)
2282{
2283 struct list_head *start, *temp;
2284 struct diag_dci_client_tbl *entry = NULL;
2285
2286 list_for_each_safe(start, temp, &driver->dci_client_list) {
2287 entry = list_entry(start, struct diag_dci_client_tbl, track);
2288 if (entry->client_info.client_id == client_id)
2289 return entry;
2290 }
2291 return NULL;
2292}
2293
2294struct diag_dci_client_tbl *dci_lookup_client_entry_pid(int tgid)
2295{
2296 struct list_head *start, *temp;
2297 struct diag_dci_client_tbl *entry = NULL;
Manoj Prabhu B2c841ee2017-05-05 10:15:53 +05302298 struct pid *pid_struct = NULL;
2299 struct task_struct *task_s = NULL;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07002300
2301 list_for_each_safe(start, temp, &driver->dci_client_list) {
2302 entry = list_entry(start, struct diag_dci_client_tbl, track);
Manoj Prabhu B2c841ee2017-05-05 10:15:53 +05302303 pid_struct = find_get_pid(entry->tgid);
2304 if (!pid_struct) {
2305 DIAG_LOG(DIAG_DEBUG_DCI,
Manoj Prabhu B71363482018-12-13 14:44:16 +05302306 "diag: Exited pid (%d) doesn't match dci client of pid (%d)\n",
2307 tgid, entry->tgid);
Manoj Prabhu B2c841ee2017-05-05 10:15:53 +05302308 continue;
2309 }
2310 task_s = get_pid_task(pid_struct, PIDTYPE_PID);
2311 if (!task_s) {
2312 DIAG_LOG(DIAG_DEBUG_DCI,
2313 "diag: valid task doesn't exist for pid = %d\n",
2314 entry->tgid);
2315 continue;
2316 }
2317 if (task_s == entry->client)
2318 if (entry->client->tgid == tgid)
2319 return entry;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07002320 }
2321 return NULL;
2322}
2323
2324void update_dci_cumulative_event_mask(int offset, uint8_t byte_mask, int token)
2325{
2326 uint8_t *event_mask_ptr, *update_ptr = NULL;
2327 struct list_head *start, *temp;
2328 struct diag_dci_client_tbl *entry = NULL;
2329 bool is_set = false;
2330
2331 mutex_lock(&dci_event_mask_mutex);
2332 update_ptr = dci_ops_tbl[token].event_mask_composite;
2333 if (!update_ptr) {
2334 mutex_unlock(&dci_event_mask_mutex);
2335 return;
2336 }
2337 update_ptr += offset;
2338 list_for_each_safe(start, temp, &driver->dci_client_list) {
2339 entry = list_entry(start, struct diag_dci_client_tbl, track);
2340 if (entry->client_info.token != token)
2341 continue;
2342 event_mask_ptr = entry->dci_event_mask;
2343 event_mask_ptr += offset;
2344 if ((*event_mask_ptr & byte_mask) == byte_mask) {
2345 is_set = true;
2346 /* break even if one client has the event mask set */
2347 break;
2348 }
2349 }
2350 if (is_set == false)
2351 *update_ptr &= ~byte_mask;
2352 else
2353 *update_ptr |= byte_mask;
2354 mutex_unlock(&dci_event_mask_mutex);
2355}
2356
2357void diag_dci_invalidate_cumulative_event_mask(int token)
2358{
2359 int i = 0;
2360 struct list_head *start, *temp;
2361 struct diag_dci_client_tbl *entry = NULL;
2362 uint8_t *event_mask_ptr, *update_ptr = NULL;
2363
2364 mutex_lock(&dci_event_mask_mutex);
2365 update_ptr = dci_ops_tbl[token].event_mask_composite;
2366 if (!update_ptr) {
2367 mutex_unlock(&dci_event_mask_mutex);
2368 return;
2369 }
2370
2371 create_dci_event_mask_tbl(update_ptr);
2372 list_for_each_safe(start, temp, &driver->dci_client_list) {
2373 entry = list_entry(start, struct diag_dci_client_tbl, track);
2374 if (entry->client_info.token != token)
2375 continue;
2376 event_mask_ptr = entry->dci_event_mask;
2377 for (i = 0; i < DCI_EVENT_MASK_SIZE; i++)
2378 *(update_ptr+i) |= *(event_mask_ptr+i);
2379 }
2380 mutex_unlock(&dci_event_mask_mutex);
2381}
2382
2383#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
2384int diag_send_dci_event_mask_remote(int token)
2385{
2386 unsigned char *buf = NULL;
2387 struct diag_dci_header_t dci_header;
2388 struct diag_ctrl_event_mask event_mask;
2389 int dci_header_size = sizeof(struct diag_dci_header_t);
2390 int event_header_size = sizeof(struct diag_ctrl_event_mask);
2391 int i, ret = DIAG_DCI_NO_ERROR, err = DIAG_DCI_NO_ERROR;
2392 unsigned char *event_mask_ptr = NULL;
2393 uint32_t write_len = 0;
2394
2395 mutex_lock(&dci_event_mask_mutex);
2396 event_mask_ptr = dci_ops_tbl[token].event_mask_composite;
2397 if (!event_mask_ptr) {
2398 mutex_unlock(&dci_event_mask_mutex);
2399 return -EINVAL;
2400 }
2401 buf = dci_get_buffer_from_bridge(token);
2402 if (!buf) {
2403 pr_err("diag: In %s, unable to get dci buffers to write data\n",
2404 __func__);
2405 mutex_unlock(&dci_event_mask_mutex);
2406 return -EAGAIN;
2407 }
2408
2409 /* Frame the DCI header */
2410 dci_header.start = CONTROL_CHAR;
2411 dci_header.version = 1;
2412 dci_header.length = event_header_size + DCI_EVENT_MASK_SIZE + 1;
2413 dci_header.cmd_code = DCI_CONTROL_PKT_CODE;
2414
2415 event_mask.cmd_type = DIAG_CTRL_MSG_EVENT_MASK;
2416 event_mask.data_len = EVENT_MASK_CTRL_HEADER_LEN + DCI_EVENT_MASK_SIZE;
2417 event_mask.stream_id = DCI_MASK_STREAM;
2418 event_mask.status = DIAG_CTRL_MASK_VALID;
2419 event_mask.event_config = 0; /* event config */
2420 event_mask.event_mask_size = DCI_EVENT_MASK_SIZE;
2421 for (i = 0; i < DCI_EVENT_MASK_SIZE; i++) {
2422 if (event_mask_ptr[i] != 0) {
2423 event_mask.event_config = 1;
2424 break;
2425 }
2426 }
2427 memcpy(buf + write_len, &dci_header, dci_header_size);
2428 write_len += dci_header_size;
2429 memcpy(buf + write_len, &event_mask, event_header_size);
2430 write_len += event_header_size;
2431 memcpy(buf + write_len, event_mask_ptr, DCI_EVENT_MASK_SIZE);
2432 write_len += DCI_EVENT_MASK_SIZE;
2433 *(buf + write_len) = CONTROL_CHAR; /* End Terminator */
2434 write_len += sizeof(uint8_t);
2435 err = diag_dci_write_bridge(token, buf, write_len);
2436 if (err) {
2437 pr_err("diag: error writing event mask to remote proc, token: %d, err: %d\n",
2438 token, err);
2439 diagmem_free(driver, buf, dci_ops_tbl[token].mempool);
2440 ret = err;
2441 } else {
2442 ret = DIAG_DCI_NO_ERROR;
2443 }
2444 mutex_unlock(&dci_event_mask_mutex);
2445 return ret;
2446}
2447#endif
2448
2449int diag_send_dci_event_mask(int token)
2450{
2451 void *buf = event_mask.update_buf;
2452 struct diag_ctrl_event_mask header;
2453 int header_size = sizeof(struct diag_ctrl_event_mask);
2454 int ret = DIAG_DCI_NO_ERROR, err = DIAG_DCI_NO_ERROR, i;
2455 unsigned char *event_mask_ptr = NULL;
2456
2457 mutex_lock(&dci_event_mask_mutex);
2458 event_mask_ptr = dci_ops_tbl[DCI_LOCAL_PROC].event_mask_composite;
2459 if (!event_mask_ptr) {
2460 mutex_unlock(&dci_event_mask_mutex);
2461 return -EINVAL;
2462 }
2463
2464 mutex_lock(&event_mask.lock);
2465 /* send event mask update */
2466 header.cmd_type = DIAG_CTRL_MSG_EVENT_MASK;
2467 header.data_len = EVENT_MASK_CTRL_HEADER_LEN + DCI_EVENT_MASK_SIZE;
2468 header.stream_id = DCI_MASK_STREAM;
2469 header.status = DIAG_CTRL_MASK_VALID;
2470 header.event_config = 0; /* event config */
2471 header.event_mask_size = DCI_EVENT_MASK_SIZE;
2472 for (i = 0; i < DCI_EVENT_MASK_SIZE; i++) {
2473 if (event_mask_ptr[i] != 0) {
2474 header.event_config = 1;
2475 break;
2476 }
2477 }
2478 memcpy(buf, &header, header_size);
2479 memcpy(buf+header_size, event_mask_ptr, DCI_EVENT_MASK_SIZE);
2480 for (i = 0; i < NUM_PERIPHERALS; i++) {
2481 /*
2482 * Don't send to peripheral if its regular channel
2483 * is down. It may also mean that the peripheral doesn't
2484 * support DCI.
2485 */
Gopikrishna Mogasatic20aa252017-02-09 15:04:32 +05302486 if (check_peripheral_dci_support(i, DCI_LOCAL_PROC)) {
2487 err = diag_dci_write_proc(i, DIAG_CNTL_TYPE, buf,
2488 header_size + DCI_EVENT_MASK_SIZE);
2489 if (err != DIAG_DCI_NO_ERROR)
2490 ret = DIAG_DCI_SEND_DATA_FAIL;
2491 }
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07002492 }
2493
2494 mutex_unlock(&event_mask.lock);
2495 mutex_unlock(&dci_event_mask_mutex);
2496
2497 return ret;
2498}
2499
2500void update_dci_cumulative_log_mask(int offset, unsigned int byte_index,
2501 uint8_t byte_mask, int token)
2502{
2503 uint8_t *log_mask_ptr, *update_ptr = NULL;
2504 bool is_set = false;
2505 struct list_head *start, *temp;
2506 struct diag_dci_client_tbl *entry = NULL;
2507
2508 mutex_lock(&dci_log_mask_mutex);
2509 update_ptr = dci_ops_tbl[token].log_mask_composite;
2510 if (!update_ptr) {
2511 mutex_unlock(&dci_log_mask_mutex);
2512 return;
2513 }
2514
2515 update_ptr += offset;
2516 /* update the dirty bit */
2517 *(update_ptr+1) = 1;
2518 update_ptr = update_ptr + byte_index;
2519 list_for_each_safe(start, temp, &driver->dci_client_list) {
2520 entry = list_entry(start, struct diag_dci_client_tbl, track);
2521 if (entry->client_info.token != token)
2522 continue;
2523 log_mask_ptr = entry->dci_log_mask;
2524 log_mask_ptr = log_mask_ptr + offset + byte_index;
2525 if ((*log_mask_ptr & byte_mask) == byte_mask) {
2526 is_set = true;
2527 /* break even if one client has the log mask set */
2528 break;
2529 }
2530 }
2531
2532 if (is_set == false)
2533 *update_ptr &= ~byte_mask;
2534 else
2535 *update_ptr |= byte_mask;
2536 mutex_unlock(&dci_log_mask_mutex);
2537}
2538
2539void diag_dci_invalidate_cumulative_log_mask(int token)
2540{
2541 int i = 0;
2542 struct list_head *start, *temp;
2543 struct diag_dci_client_tbl *entry = NULL;
2544 uint8_t *log_mask_ptr, *update_ptr = NULL;
2545
2546 /* Clear the composite mask and redo all the masks */
2547 mutex_lock(&dci_log_mask_mutex);
2548 update_ptr = dci_ops_tbl[token].log_mask_composite;
2549 if (!update_ptr) {
2550 mutex_unlock(&dci_log_mask_mutex);
2551 return;
2552 }
2553
2554 create_dci_log_mask_tbl(update_ptr, DCI_LOG_MASK_DIRTY);
2555 list_for_each_safe(start, temp, &driver->dci_client_list) {
2556 entry = list_entry(start, struct diag_dci_client_tbl, track);
2557 if (entry->client_info.token != token)
2558 continue;
2559 log_mask_ptr = entry->dci_log_mask;
2560 for (i = 0; i < DCI_LOG_MASK_SIZE; i++)
2561 *(update_ptr+i) |= *(log_mask_ptr+i);
2562 }
2563 mutex_unlock(&dci_log_mask_mutex);
2564}
2565
2566static int dci_fill_log_mask(unsigned char *dest_ptr, unsigned char *src_ptr)
2567{
2568 struct diag_ctrl_log_mask header;
2569 int header_len = sizeof(struct diag_ctrl_log_mask);
2570
2571 header.cmd_type = DIAG_CTRL_MSG_LOG_MASK;
2572 header.num_items = DCI_MAX_ITEMS_PER_LOG_CODE;
2573 header.data_len = 11 + DCI_MAX_ITEMS_PER_LOG_CODE;
2574 header.stream_id = DCI_MASK_STREAM;
2575 header.status = 3;
2576 header.equip_id = *src_ptr;
2577 header.log_mask_size = DCI_MAX_ITEMS_PER_LOG_CODE;
2578 memcpy(dest_ptr, &header, header_len);
2579 memcpy(dest_ptr + header_len, src_ptr + 2, DCI_MAX_ITEMS_PER_LOG_CODE);
2580
2581 return header_len + DCI_MAX_ITEMS_PER_LOG_CODE;
2582}
2583
2584#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
2585int diag_send_dci_log_mask_remote(int token)
2586{
2587
2588 unsigned char *buf = NULL;
2589 struct diag_dci_header_t dci_header;
2590 int dci_header_size = sizeof(struct diag_dci_header_t);
2591 int log_header_size = sizeof(struct diag_ctrl_log_mask);
2592 uint8_t *log_mask_ptr = NULL;
2593 int i, ret = DIAG_DCI_NO_ERROR, err = DIAG_DCI_NO_ERROR;
2594 int updated;
2595 uint32_t write_len = 0;
2596
2597 mutex_lock(&dci_log_mask_mutex);
2598 log_mask_ptr = dci_ops_tbl[token].log_mask_composite;
2599 if (!log_mask_ptr) {
2600 mutex_unlock(&dci_log_mask_mutex);
2601 return -EINVAL;
2602 }
2603
2604 /* DCI header is common to all equipment IDs */
2605 dci_header.start = CONTROL_CHAR;
2606 dci_header.version = 1;
2607 dci_header.length = log_header_size + DCI_MAX_ITEMS_PER_LOG_CODE + 1;
2608 dci_header.cmd_code = DCI_CONTROL_PKT_CODE;
2609
2610 for (i = 0; i < DCI_MAX_LOG_CODES; i++) {
2611 updated = 1;
2612 write_len = 0;
2613 if (!*(log_mask_ptr + 1)) {
2614 log_mask_ptr += 514;
2615 continue;
2616 }
2617
2618 buf = dci_get_buffer_from_bridge(token);
2619 if (!buf) {
2620 pr_err("diag: In %s, unable to get dci buffers to write data\n",
2621 __func__);
2622 mutex_unlock(&dci_log_mask_mutex);
2623 return -EAGAIN;
2624 }
2625
2626 memcpy(buf + write_len, &dci_header, dci_header_size);
2627 write_len += dci_header_size;
2628 write_len += dci_fill_log_mask(buf + write_len, log_mask_ptr);
2629 *(buf + write_len) = CONTROL_CHAR; /* End Terminator */
2630 write_len += sizeof(uint8_t);
2631 err = diag_dci_write_bridge(token, buf, write_len);
2632 if (err) {
2633 pr_err("diag: error writing log mask to remote processor, equip_id: %d, token: %d, err: %d\n",
2634 i, token, err);
2635 diagmem_free(driver, buf, dci_ops_tbl[token].mempool);
2636 updated = 0;
2637 }
2638 if (updated)
2639 *(log_mask_ptr + 1) = 0; /* clear dirty byte */
2640 log_mask_ptr += 514;
2641 }
2642 mutex_unlock(&dci_log_mask_mutex);
2643 return ret;
2644}
2645#endif
2646
2647int diag_send_dci_log_mask(int token)
2648{
2649 void *buf = log_mask.update_buf;
2650 int write_len = 0;
2651 uint8_t *log_mask_ptr = NULL;
2652 int i, j, ret = DIAG_DCI_NO_ERROR, err = DIAG_DCI_NO_ERROR;
2653 int updated;
2654
2655
2656 mutex_lock(&dci_log_mask_mutex);
2657 log_mask_ptr = dci_ops_tbl[DCI_LOCAL_PROC].log_mask_composite;
2658 if (!log_mask_ptr) {
2659 mutex_unlock(&dci_log_mask_mutex);
2660 return -EINVAL;
2661 }
2662
2663 mutex_lock(&log_mask.lock);
2664 for (i = 0; i < 16; i++) {
2665 updated = 1;
2666 /* Dirty bit is set don't update the mask for this equip id */
2667 if (!(*(log_mask_ptr + 1))) {
2668 log_mask_ptr += 514;
2669 continue;
2670 }
2671 write_len = dci_fill_log_mask(buf, log_mask_ptr);
2672 for (j = 0; j < NUM_PERIPHERALS && write_len; j++) {
Gopikrishna Mogasatic20aa252017-02-09 15:04:32 +05302673 if (check_peripheral_dci_support(j, DCI_LOCAL_PROC)) {
2674 err = diag_dci_write_proc(j, DIAG_CNTL_TYPE,
2675 buf, write_len);
2676 if (err != DIAG_DCI_NO_ERROR) {
2677 updated = 0;
2678 ret = DIAG_DCI_SEND_DATA_FAIL;
2679 }
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07002680 }
2681 }
2682 if (updated)
2683 *(log_mask_ptr+1) = 0; /* clear dirty byte */
2684 log_mask_ptr += 514;
2685 }
2686 mutex_unlock(&log_mask.lock);
2687 mutex_unlock(&dci_log_mask_mutex);
2688 return ret;
2689}
2690
2691static int diag_dci_init_local(void)
2692{
2693 struct dci_ops_tbl_t *temp = &dci_ops_tbl[DCI_LOCAL_PROC];
2694
2695 create_dci_log_mask_tbl(temp->log_mask_composite, DCI_LOG_MASK_CLEAN);
2696 create_dci_event_mask_tbl(temp->event_mask_composite);
2697 temp->peripheral_status |= DIAG_CON_APSS;
2698
2699 return 0;
2700}
2701
2702#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
2703static void diag_dci_init_handshake_remote(void)
2704{
2705 int i;
2706 struct dci_channel_status_t *temp = NULL;
2707
2708 for (i = DCI_REMOTE_BASE; i < NUM_DCI_PROC; i++) {
2709 temp = &dci_channel_status[i];
2710 temp->id = i;
2711 setup_timer(&temp->wait_time, dci_chk_handshake, i);
2712 INIT_WORK(&temp->handshake_work, dci_handshake_work_fn);
2713 }
2714}
2715
Sreelakshmi Gownipalli416353d2018-05-21 12:47:16 +05302716int diag_dci_init_remote(void)
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07002717{
2718 int i;
2719 struct dci_ops_tbl_t *temp = NULL;
2720
2721 diagmem_init(driver, POOL_TYPE_MDM_DCI_WRITE);
2722
2723 for (i = DCI_REMOTE_BASE; i < DCI_REMOTE_LAST; i++) {
2724 temp = &dci_ops_tbl[i];
2725 create_dci_log_mask_tbl(temp->log_mask_composite,
2726 DCI_LOG_MASK_CLEAN);
2727 create_dci_event_mask_tbl(temp->event_mask_composite);
2728 }
2729
Hardik Arya2a665fc2018-03-08 11:50:49 +05302730 partial_pkt.data = vzalloc(MAX_DCI_PACKET_SZ);
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07002731 if (!partial_pkt.data)
2732 return -ENOMEM;
2733
2734 partial_pkt.total_len = 0;
2735 partial_pkt.read_len = 0;
2736 partial_pkt.remaining = 0;
2737 partial_pkt.processing = 0;
2738
2739 diag_dci_init_handshake_remote();
2740
2741 return 0;
2742}
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07002743#endif
2744
2745static int diag_dci_init_ops_tbl(void)
2746{
2747 int err = 0;
2748
2749 err = diag_dci_init_local();
2750 if (err)
2751 goto err;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07002752 return 0;
2753
2754err:
2755 return -ENOMEM;
2756}
2757
2758int diag_dci_init(void)
2759{
2760 int ret = 0;
2761
2762 driver->dci_tag = 0;
2763 driver->dci_client_id = 0;
2764 driver->num_dci_client = 0;
2765 mutex_init(&driver->dci_mutex);
2766 mutex_init(&dci_log_mask_mutex);
2767 mutex_init(&dci_event_mask_mutex);
2768 spin_lock_init(&ws_lock);
2769
2770 ret = diag_dci_init_ops_tbl();
2771 if (ret)
2772 goto err;
2773
2774 if (driver->apps_dci_buf == NULL) {
Hardik Arya2a665fc2018-03-08 11:50:49 +05302775 driver->apps_dci_buf = vzalloc(DCI_BUF_SIZE);
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07002776 if (driver->apps_dci_buf == NULL)
2777 goto err;
2778 }
2779 INIT_LIST_HEAD(&driver->dci_client_list);
2780 INIT_LIST_HEAD(&driver->dci_req_list);
2781
2782 driver->diag_dci_wq = create_singlethread_workqueue("diag_dci_wq");
2783 if (!driver->diag_dci_wq)
2784 goto err;
2785
2786 INIT_WORK(&dci_data_drain_work, dci_data_drain_work_fn);
2787
2788 setup_timer(&dci_drain_timer, dci_drain_data, 0);
2789 return DIAG_DCI_NO_ERROR;
2790err:
2791 pr_err("diag: Could not initialize diag DCI buffers");
Hardik Arya2a665fc2018-03-08 11:50:49 +05302792 vfree(driver->apps_dci_buf);
Manoj Prabhu B34e54702016-12-21 13:02:38 +05302793 driver->apps_dci_buf = NULL;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07002794
2795 if (driver->diag_dci_wq)
2796 destroy_workqueue(driver->diag_dci_wq);
Hardik Arya2a665fc2018-03-08 11:50:49 +05302797 vfree(partial_pkt.data);
Manoj Prabhu B34e54702016-12-21 13:02:38 +05302798 partial_pkt.data = NULL;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07002799 mutex_destroy(&driver->dci_mutex);
2800 mutex_destroy(&dci_log_mask_mutex);
2801 mutex_destroy(&dci_event_mask_mutex);
2802 return DIAG_DCI_NO_REG;
2803}
2804
2805void diag_dci_channel_init(void)
2806{
2807 uint8_t peripheral;
2808
2809 for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
2810 diagfwd_open(peripheral, TYPE_DCI);
2811 diagfwd_open(peripheral, TYPE_DCI_CMD);
2812 }
2813}
2814
2815void diag_dci_exit(void)
2816{
Hardik Arya2a665fc2018-03-08 11:50:49 +05302817 vfree(partial_pkt.data);
Manoj Prabhu B34e54702016-12-21 13:02:38 +05302818 partial_pkt.data = NULL;
Hardik Arya2a665fc2018-03-08 11:50:49 +05302819 vfree(driver->apps_dci_buf);
Manoj Prabhu B34e54702016-12-21 13:02:38 +05302820 driver->apps_dci_buf = NULL;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07002821 mutex_destroy(&driver->dci_mutex);
2822 mutex_destroy(&dci_log_mask_mutex);
2823 mutex_destroy(&dci_event_mask_mutex);
2824 destroy_workqueue(driver->diag_dci_wq);
2825}
2826
2827int diag_dci_clear_log_mask(int client_id)
2828{
2829 int err = DIAG_DCI_NO_ERROR, token = DCI_LOCAL_PROC;
2830 uint8_t *update_ptr;
2831 struct diag_dci_client_tbl *entry = NULL;
2832
2833 entry = diag_dci_get_client_entry(client_id);
2834 if (!entry) {
2835 pr_err("diag: In %s, invalid client entry\n", __func__);
2836 return DIAG_DCI_TABLE_ERR;
2837 }
2838 token = entry->client_info.token;
2839 update_ptr = dci_ops_tbl[token].log_mask_composite;
2840
2841 create_dci_log_mask_tbl(entry->dci_log_mask, DCI_LOG_MASK_CLEAN);
2842 diag_dci_invalidate_cumulative_log_mask(token);
2843
2844 /*
2845 * Send updated mask to userspace clients only if the client
2846 * is registered on the local processor
2847 */
2848 if (token == DCI_LOCAL_PROC)
2849 diag_update_userspace_clients(DCI_LOG_MASKS_TYPE);
2850 /* Send updated mask to peripherals */
2851 err = dci_ops_tbl[token].send_log_mask(token);
2852 return err;
2853}
2854
2855int diag_dci_clear_event_mask(int client_id)
2856{
2857 int err = DIAG_DCI_NO_ERROR, token = DCI_LOCAL_PROC;
2858 uint8_t *update_ptr;
2859 struct diag_dci_client_tbl *entry = NULL;
2860
2861 entry = diag_dci_get_client_entry(client_id);
2862 if (!entry) {
2863 pr_err("diag: In %s, invalid client entry\n", __func__);
2864 return DIAG_DCI_TABLE_ERR;
2865 }
2866 token = entry->client_info.token;
2867 update_ptr = dci_ops_tbl[token].event_mask_composite;
2868
2869 create_dci_event_mask_tbl(entry->dci_event_mask);
2870 diag_dci_invalidate_cumulative_event_mask(token);
2871
2872 /*
2873 * Send updated mask to userspace clients only if the client is
2874 * registerted on the local processor
2875 */
2876 if (token == DCI_LOCAL_PROC)
2877 diag_update_userspace_clients(DCI_EVENT_MASKS_TYPE);
2878 /* Send updated mask to peripherals */
2879 err = dci_ops_tbl[token].send_event_mask(token);
2880 return err;
2881}
2882
2883uint8_t diag_dci_get_cumulative_real_time(int token)
2884{
2885 uint8_t real_time = MODE_NONREALTIME;
2886 struct list_head *start, *temp;
2887 struct diag_dci_client_tbl *entry = NULL;
2888
2889 list_for_each_safe(start, temp, &driver->dci_client_list) {
2890 entry = list_entry(start, struct diag_dci_client_tbl, track);
2891 if (entry->real_time == MODE_REALTIME &&
2892 entry->client_info.token == token) {
2893 real_time = 1;
2894 break;
2895 }
2896 }
2897 return real_time;
2898}
2899
2900int diag_dci_set_real_time(struct diag_dci_client_tbl *entry, uint8_t real_time)
2901{
2902 if (!entry) {
2903 pr_err("diag: In %s, invalid client entry\n", __func__);
2904 return 0;
2905 }
2906 entry->real_time = real_time;
2907 return 1;
2908}
2909
2910int diag_dci_register_client(struct diag_dci_reg_tbl_t *reg_entry)
2911{
2912 int i, err = 0;
2913 struct diag_dci_client_tbl *new_entry = NULL;
2914 struct diag_dci_buf_peripheral_t *proc_buf = NULL;
2915
2916 if (!reg_entry)
2917 return DIAG_DCI_NO_REG;
2918 if (!VALID_DCI_TOKEN(reg_entry->token)) {
2919 pr_alert("diag: Invalid DCI client token, %d\n",
2920 reg_entry->token);
2921 return DIAG_DCI_NO_REG;
2922 }
2923
2924 if (driver->dci_state == DIAG_DCI_NO_REG)
2925 return DIAG_DCI_NO_REG;
2926
2927 if (driver->num_dci_client >= MAX_DCI_CLIENTS)
2928 return DIAG_DCI_NO_REG;
2929
2930 new_entry = kzalloc(sizeof(struct diag_dci_client_tbl), GFP_KERNEL);
2931 if (!new_entry)
2932 return DIAG_DCI_NO_REG;
2933
2934 mutex_lock(&driver->dci_mutex);
2935
2936 new_entry->client = current;
2937 new_entry->tgid = current->tgid;
2938 new_entry->client_info.notification_list =
2939 reg_entry->notification_list;
2940 new_entry->client_info.signal_type =
2941 reg_entry->signal_type;
2942 new_entry->client_info.token = reg_entry->token;
2943 switch (reg_entry->token) {
2944 case DCI_LOCAL_PROC:
2945 new_entry->num_buffers = NUM_DCI_PERIPHERALS;
2946 break;
2947 case DCI_MDM_PROC:
2948 new_entry->num_buffers = 1;
2949 break;
2950 }
Manoj Prabhu Bdab4c6c2017-06-14 16:55:59 +05302951
2952 new_entry->buffers = NULL;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07002953 new_entry->real_time = MODE_REALTIME;
2954 new_entry->in_service = 0;
2955 INIT_LIST_HEAD(&new_entry->list_write_buf);
2956 mutex_init(&new_entry->write_buf_mutex);
Hardik Arya2a665fc2018-03-08 11:50:49 +05302957 new_entry->dci_log_mask = vzalloc(DCI_LOG_MASK_SIZE);
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07002958 if (!new_entry->dci_log_mask) {
2959 pr_err("diag: Unable to create log mask for client, %d",
2960 driver->dci_client_id);
2961 goto fail_alloc;
2962 }
2963 create_dci_log_mask_tbl(new_entry->dci_log_mask, DCI_LOG_MASK_CLEAN);
2964
Hardik Arya2a665fc2018-03-08 11:50:49 +05302965 new_entry->dci_event_mask = vzalloc(DCI_EVENT_MASK_SIZE);
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07002966 if (!new_entry->dci_event_mask)
2967 goto fail_alloc;
2968 create_dci_event_mask_tbl(new_entry->dci_event_mask);
2969
2970 new_entry->buffers = kzalloc(new_entry->num_buffers *
2971 sizeof(struct diag_dci_buf_peripheral_t),
Hardik Arya2a665fc2018-03-08 11:50:49 +05302972 GFP_KERNEL);
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07002973 if (!new_entry->buffers) {
2974 pr_err("diag: Unable to allocate buffers for peripherals in %s\n",
2975 __func__);
2976 goto fail_alloc;
2977 }
2978
2979 for (i = 0; i < new_entry->num_buffers; i++) {
2980 proc_buf = &new_entry->buffers[i];
2981 if (!proc_buf)
2982 goto fail_alloc;
2983
2984 mutex_init(&proc_buf->health_mutex);
2985 mutex_init(&proc_buf->buf_mutex);
2986 proc_buf->health.dropped_events = 0;
2987 proc_buf->health.dropped_logs = 0;
2988 proc_buf->health.received_events = 0;
2989 proc_buf->health.received_logs = 0;
2990 proc_buf->buf_primary = kzalloc(
2991 sizeof(struct diag_dci_buffer_t),
2992 GFP_KERNEL);
2993 if (!proc_buf->buf_primary)
2994 goto fail_alloc;
2995 proc_buf->buf_cmd = kzalloc(sizeof(struct diag_dci_buffer_t),
Hardik Arya2a665fc2018-03-08 11:50:49 +05302996 GFP_KERNEL);
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07002997 if (!proc_buf->buf_cmd)
2998 goto fail_alloc;
2999 err = diag_dci_init_buffer(proc_buf->buf_primary,
3000 DCI_BUF_PRIMARY);
3001 if (err)
3002 goto fail_alloc;
3003 err = diag_dci_init_buffer(proc_buf->buf_cmd, DCI_BUF_CMD);
3004 if (err)
3005 goto fail_alloc;
3006 proc_buf->buf_curr = proc_buf->buf_primary;
3007 }
3008
3009 list_add_tail(&new_entry->track, &driver->dci_client_list);
3010 driver->dci_client_id++;
3011 new_entry->client_info.client_id = driver->dci_client_id;
3012 reg_entry->client_id = driver->dci_client_id;
3013 driver->num_dci_client++;
3014 if (driver->num_dci_client == 1)
3015 diag_update_proc_vote(DIAG_PROC_DCI, VOTE_UP, reg_entry->token);
3016 queue_work(driver->diag_real_time_wq, &driver->diag_real_time_work);
3017 mutex_unlock(&driver->dci_mutex);
3018
3019 return driver->dci_client_id;
3020
3021fail_alloc:
3022 if (new_entry) {
Manoj Prabhu Bdab4c6c2017-06-14 16:55:59 +05303023 for (i = 0; ((i < new_entry->num_buffers) &&
3024 new_entry->buffers); i++) {
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07003025 proc_buf = &new_entry->buffers[i];
3026 if (proc_buf) {
3027 mutex_destroy(&proc_buf->health_mutex);
3028 if (proc_buf->buf_primary) {
Hardik Arya2a665fc2018-03-08 11:50:49 +05303029 vfree(proc_buf->buf_primary->data);
Manoj Prabhu B34e54702016-12-21 13:02:38 +05303030 proc_buf->buf_primary->data = NULL;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07003031 mutex_destroy(
3032 &proc_buf->buf_primary->data_mutex);
3033 }
3034 kfree(proc_buf->buf_primary);
Manoj Prabhu B34e54702016-12-21 13:02:38 +05303035 proc_buf->buf_primary = NULL;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07003036 if (proc_buf->buf_cmd) {
Hardik Arya2a665fc2018-03-08 11:50:49 +05303037 vfree(proc_buf->buf_cmd->data);
Manoj Prabhu B34e54702016-12-21 13:02:38 +05303038 proc_buf->buf_cmd->data = NULL;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07003039 mutex_destroy(
3040 &proc_buf->buf_cmd->data_mutex);
3041 }
3042 kfree(proc_buf->buf_cmd);
Manoj Prabhu B34e54702016-12-21 13:02:38 +05303043 proc_buf->buf_cmd = NULL;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07003044 }
3045 }
Hardik Arya2a665fc2018-03-08 11:50:49 +05303046 vfree(new_entry->dci_event_mask);
Manoj Prabhu B34e54702016-12-21 13:02:38 +05303047 new_entry->dci_event_mask = NULL;
Hardik Arya2a665fc2018-03-08 11:50:49 +05303048 vfree(new_entry->dci_log_mask);
Manoj Prabhu B34e54702016-12-21 13:02:38 +05303049 new_entry->dci_log_mask = NULL;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07003050 kfree(new_entry->buffers);
Manoj Prabhu B34e54702016-12-21 13:02:38 +05303051 new_entry->buffers = NULL;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07003052 kfree(new_entry);
Manoj Prabhu B34e54702016-12-21 13:02:38 +05303053 new_entry = NULL;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07003054 }
3055 mutex_unlock(&driver->dci_mutex);
3056 return DIAG_DCI_NO_REG;
3057}
3058
3059int diag_dci_deinit_client(struct diag_dci_client_tbl *entry)
3060{
3061 int ret = DIAG_DCI_NO_ERROR, real_time = MODE_REALTIME, i, peripheral;
3062 struct diag_dci_buf_peripheral_t *proc_buf = NULL;
3063 struct diag_dci_buffer_t *buf_entry, *temp;
3064 struct list_head *start, *req_temp;
3065 struct dci_pkt_req_entry_t *req_entry = NULL;
3066 int token = DCI_LOCAL_PROC;
3067
3068 if (!entry)
3069 return DIAG_DCI_NOT_SUPPORTED;
3070
3071 token = entry->client_info.token;
3072 /*
3073 * Remove the entry from the list before freeing the buffers
3074 * to ensure that we don't have any invalid access.
3075 */
3076 if (!list_empty(&entry->track))
3077 list_del(&entry->track);
3078 driver->num_dci_client--;
3079 /*
3080 * Clear the client's log and event masks, update the cumulative
3081 * masks and send the masks to peripherals
3082 */
Hardik Arya2a665fc2018-03-08 11:50:49 +05303083 vfree(entry->dci_log_mask);
Manoj Prabhu B34e54702016-12-21 13:02:38 +05303084 entry->dci_log_mask = NULL;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07003085 diag_dci_invalidate_cumulative_log_mask(token);
3086 if (token == DCI_LOCAL_PROC)
3087 diag_update_userspace_clients(DCI_LOG_MASKS_TYPE);
3088 ret = dci_ops_tbl[token].send_log_mask(token);
3089 if (ret != DIAG_DCI_NO_ERROR)
3090 return ret;
Hardik Arya2a665fc2018-03-08 11:50:49 +05303091 vfree(entry->dci_event_mask);
Manoj Prabhu B34e54702016-12-21 13:02:38 +05303092 entry->dci_event_mask = NULL;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07003093 diag_dci_invalidate_cumulative_event_mask(token);
3094 if (token == DCI_LOCAL_PROC)
3095 diag_update_userspace_clients(DCI_EVENT_MASKS_TYPE);
3096 ret = dci_ops_tbl[token].send_event_mask(token);
3097 if (ret != DIAG_DCI_NO_ERROR)
3098 return ret;
3099
3100 list_for_each_safe(start, req_temp, &driver->dci_req_list) {
3101 req_entry = list_entry(start, struct dci_pkt_req_entry_t,
3102 track);
3103 if (req_entry->client_id == entry->client_info.client_id) {
3104 if (!list_empty(&req_entry->track))
3105 list_del(&req_entry->track);
3106 kfree(req_entry);
Manoj Prabhu B34e54702016-12-21 13:02:38 +05303107 req_entry = NULL;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07003108 }
3109 }
3110
3111 /* Clean up any buffer that is pending write */
3112 mutex_lock(&entry->write_buf_mutex);
3113 list_for_each_entry_safe(buf_entry, temp, &entry->list_write_buf,
3114 buf_track) {
3115 if (!list_empty(&buf_entry->buf_track))
3116 list_del(&buf_entry->buf_track);
3117 if (buf_entry->buf_type == DCI_BUF_SECONDARY) {
3118 mutex_lock(&buf_entry->data_mutex);
3119 diagmem_free(driver, buf_entry->data, POOL_TYPE_DCI);
3120 buf_entry->data = NULL;
3121 mutex_unlock(&buf_entry->data_mutex);
3122 kfree(buf_entry);
Manoj Prabhu B34e54702016-12-21 13:02:38 +05303123 buf_entry = NULL;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07003124 } else if (buf_entry->buf_type == DCI_BUF_CMD) {
3125 peripheral = buf_entry->data_source;
3126 if (peripheral == APPS_DATA)
3127 continue;
3128 }
3129 /*
3130 * These are buffers that can't be written to the client which
3131 * means that the copy cannot be completed. Make sure that we
3132 * remove those references in DCI wakeup source.
3133 */
3134 diag_ws_on_copy_fail(DIAG_WS_DCI);
3135 }
3136 mutex_unlock(&entry->write_buf_mutex);
3137
3138 for (i = 0; i < entry->num_buffers; i++) {
3139 proc_buf = &entry->buffers[i];
3140 buf_entry = proc_buf->buf_curr;
3141 mutex_lock(&proc_buf->buf_mutex);
3142 /* Clean up secondary buffer from mempool that is active */
3143 if (buf_entry && buf_entry->buf_type == DCI_BUF_SECONDARY) {
3144 mutex_lock(&buf_entry->data_mutex);
3145 diagmem_free(driver, buf_entry->data, POOL_TYPE_DCI);
3146 buf_entry->data = NULL;
3147 mutex_unlock(&buf_entry->data_mutex);
3148 mutex_destroy(&buf_entry->data_mutex);
3149 kfree(buf_entry);
Manoj Prabhu B34e54702016-12-21 13:02:38 +05303150 buf_entry = NULL;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07003151 }
3152
3153 mutex_lock(&proc_buf->buf_primary->data_mutex);
Hardik Arya2a665fc2018-03-08 11:50:49 +05303154 vfree(proc_buf->buf_primary->data);
Manoj Prabhu B34e54702016-12-21 13:02:38 +05303155 proc_buf->buf_primary->data = NULL;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07003156 mutex_unlock(&proc_buf->buf_primary->data_mutex);
3157
3158 mutex_lock(&proc_buf->buf_cmd->data_mutex);
Hardik Arya2a665fc2018-03-08 11:50:49 +05303159 vfree(proc_buf->buf_cmd->data);
Manoj Prabhu B34e54702016-12-21 13:02:38 +05303160 proc_buf->buf_cmd->data = NULL;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07003161 mutex_unlock(&proc_buf->buf_cmd->data_mutex);
3162
3163 mutex_destroy(&proc_buf->health_mutex);
3164 mutex_destroy(&proc_buf->buf_primary->data_mutex);
3165 mutex_destroy(&proc_buf->buf_cmd->data_mutex);
3166
3167 kfree(proc_buf->buf_primary);
Manoj Prabhu B34e54702016-12-21 13:02:38 +05303168 proc_buf->buf_primary = NULL;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07003169 kfree(proc_buf->buf_cmd);
Manoj Prabhu B34e54702016-12-21 13:02:38 +05303170 proc_buf->buf_cmd = NULL;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07003171 mutex_unlock(&proc_buf->buf_mutex);
3172 }
3173 mutex_destroy(&entry->write_buf_mutex);
3174
3175 kfree(entry->buffers);
Manoj Prabhu B34e54702016-12-21 13:02:38 +05303176 entry->buffers = NULL;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07003177 kfree(entry);
Manoj Prabhu B34e54702016-12-21 13:02:38 +05303178 entry = NULL;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07003179
3180 if (driver->num_dci_client == 0) {
3181 diag_update_proc_vote(DIAG_PROC_DCI, VOTE_DOWN, token);
3182 } else {
3183 real_time = diag_dci_get_cumulative_real_time(token);
3184 diag_update_real_time_vote(DIAG_PROC_DCI, real_time, token);
3185 }
3186 queue_work(driver->diag_real_time_wq, &driver->diag_real_time_work);
3187
3188 return DIAG_DCI_NO_ERROR;
3189}
3190
3191int diag_dci_write_proc(uint8_t peripheral, int pkt_type, char *buf, int len)
3192{
3193 uint8_t dest_channel = TYPE_DATA;
3194 int err = 0;
3195
3196 if (!buf || peripheral >= NUM_PERIPHERALS || len < 0 ||
3197 !(driver->feature[PERIPHERAL_MODEM].rcvd_feature_mask)) {
3198 DIAG_LOG(DIAG_DEBUG_DCI,
3199 "buf: 0x%pK, p: %d, len: %d, f_mask: %d\n",
3200 buf, peripheral, len,
3201 driver->feature[PERIPHERAL_MODEM].rcvd_feature_mask);
3202 return -EINVAL;
3203 }
3204
3205 if (pkt_type == DIAG_DATA_TYPE) {
3206 dest_channel = TYPE_DCI_CMD;
3207 } else if (pkt_type == DIAG_CNTL_TYPE) {
3208 dest_channel = TYPE_CNTL;
3209 } else {
3210 pr_err("diag: Invalid DCI pkt type in %s", __func__);
3211 return -EINVAL;
3212 }
3213
3214 err = diagfwd_write(peripheral, dest_channel, buf, len);
3215 if (err && err != -ENODEV) {
3216 pr_err("diag: In %s, unable to write to peripheral: %d, type: %d, len: %d, err: %d\n",
3217 __func__, peripheral, dest_channel, len, err);
3218 } else {
3219 err = DIAG_DCI_NO_ERROR;
3220 }
3221
3222 return err;
3223}
3224
3225int diag_dci_copy_health_stats(struct diag_dci_health_stats_proc *stats_proc)
3226{
3227 struct diag_dci_client_tbl *entry = NULL;
3228 struct diag_dci_health_t *health = NULL;
3229 struct diag_dci_health_stats *stats = NULL;
3230 int i, proc;
3231
3232 if (!stats_proc)
3233 return -EINVAL;
3234
3235 stats = &stats_proc->health;
3236 proc = stats_proc->proc;
3237 if (proc < ALL_PROC || proc > APPS_DATA)
3238 return -EINVAL;
3239
3240 entry = diag_dci_get_client_entry(stats_proc->client_id);
3241 if (!entry)
3242 return DIAG_DCI_NOT_SUPPORTED;
3243
3244 /*
3245 * If the client has registered for remote processor, the
3246 * proc field doesn't have any effect as they have only one buffer.
3247 */
3248 if (entry->client_info.token)
3249 proc = 0;
3250
3251 stats->stats.dropped_logs = 0;
3252 stats->stats.dropped_events = 0;
3253 stats->stats.received_logs = 0;
3254 stats->stats.received_events = 0;
3255
3256 if (proc != ALL_PROC) {
3257 health = &entry->buffers[proc].health;
3258 stats->stats.dropped_logs = health->dropped_logs;
3259 stats->stats.dropped_events = health->dropped_events;
3260 stats->stats.received_logs = health->received_logs;
3261 stats->stats.received_events = health->received_events;
3262 if (stats->reset_status) {
3263 mutex_lock(&entry->buffers[proc].health_mutex);
3264 health->dropped_logs = 0;
3265 health->dropped_events = 0;
3266 health->received_logs = 0;
3267 health->received_events = 0;
3268 mutex_unlock(&entry->buffers[proc].health_mutex);
3269 }
3270 return DIAG_DCI_NO_ERROR;
3271 }
3272
3273 for (i = 0; i < entry->num_buffers; i++) {
3274 health = &entry->buffers[i].health;
3275 stats->stats.dropped_logs += health->dropped_logs;
3276 stats->stats.dropped_events += health->dropped_events;
3277 stats->stats.received_logs += health->received_logs;
3278 stats->stats.received_events += health->received_events;
3279 if (stats->reset_status) {
3280 mutex_lock(&entry->buffers[i].health_mutex);
3281 health->dropped_logs = 0;
3282 health->dropped_events = 0;
3283 health->received_logs = 0;
3284 health->received_events = 0;
3285 mutex_unlock(&entry->buffers[i].health_mutex);
3286 }
3287 }
3288 return DIAG_DCI_NO_ERROR;
3289}
3290
3291int diag_dci_get_support_list(struct diag_dci_peripherals_t *support_list)
3292{
3293 if (!support_list)
3294 return -ENOMEM;
3295
3296 if (!VALID_DCI_TOKEN(support_list->proc))
3297 return -EIO;
3298
3299 support_list->list = dci_ops_tbl[support_list->proc].peripheral_status;
3300 return DIAG_DCI_NO_ERROR;
3301}