blob: ac777b03bac76567a7567b1fc653268071649d2f [file] [log] [blame]
Sreelakshmi Gownipalli8d477d32017-02-08 19:49:06 -08001/* Copyright (c) 2008-2017, The Linux Foundation. All rights reserved.
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/slab.h>
14#include <linux/init.h>
15#include <linux/module.h>
16#include <linux/cdev.h>
17#include <linux/fs.h>
18#include <linux/device.h>
19#include <linux/delay.h>
20#include <linux/uaccess.h>
21#include <linux/diagchar.h>
22#include <linux/sched.h>
23#include <linux/ratelimit.h>
24#include <linux/timer.h>
25#ifdef CONFIG_DIAG_OVER_USB
26#include <linux/usb/usbdiag.h>
27#endif
28#include <asm/current.h>
29#include "diagchar_hdlc.h"
30#include "diagmem.h"
31#include "diagchar.h"
32#include "diagfwd.h"
33#include "diagfwd_cntl.h"
34#include "diag_dci.h"
35#include "diag_debugfs.h"
36#include "diag_masks.h"
37#include "diagfwd_bridge.h"
38#include "diag_usb.h"
39#include "diag_memorydevice.h"
40#include "diag_mux.h"
41#include "diag_ipc_logging.h"
42#include "diagfwd_peripheral.h"
43
44#include <linux/coresight-stm.h>
45#include <linux/kernel.h>
46#ifdef CONFIG_COMPAT
47#include <linux/compat.h>
48#endif
49
50MODULE_DESCRIPTION("Diag Char Driver");
51MODULE_LICENSE("GPL v2");
52
53#define MIN_SIZ_ALLOW 4
54#define INIT 1
55#define EXIT -1
56struct diagchar_dev *driver;
57struct diagchar_priv {
58 int pid;
59};
60
61#define USER_SPACE_RAW_DATA 0
62#define USER_SPACE_HDLC_DATA 1
63
64/* Memory pool variables */
65/* Used for copying any incoming packet from user space clients. */
66static unsigned int poolsize = 12;
67module_param(poolsize, uint, 0000);
68
69/*
70 * Used for HDLC encoding packets coming from the user
71 * space.
72 */
73static unsigned int poolsize_hdlc = 10;
74module_param(poolsize_hdlc, uint, 0000);
75
76/*
77 * This is used for incoming DCI requests from the user space clients.
78 * Don't expose itemsize as it is internal.
79 */
80static unsigned int poolsize_user = 8;
81module_param(poolsize_user, uint, 0000);
82
83/*
84 * USB structures allocated for writing Diag data generated on the Apps to USB.
85 * Don't expose itemsize as it is constant.
86 */
87static unsigned int itemsize_usb_apps = sizeof(struct diag_request);
88static unsigned int poolsize_usb_apps = 10;
89module_param(poolsize_usb_apps, uint, 0000);
90
91/* Used for DCI client buffers. Don't expose itemsize as it is constant. */
92static unsigned int poolsize_dci = 10;
93module_param(poolsize_dci, uint, 0000);
94
95#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
96/* Used for reading data from the remote device. */
97static unsigned int itemsize_mdm = DIAG_MDM_BUF_SIZE;
98static unsigned int poolsize_mdm = 18;
99module_param(itemsize_mdm, uint, 0000);
100module_param(poolsize_mdm, uint, 0000);
101
102/*
103 * Used for reading DCI data from the remote device.
104 * Don't expose poolsize for DCI data. There is only one read buffer
105 */
106static unsigned int itemsize_mdm_dci = DIAG_MDM_BUF_SIZE;
107static unsigned int poolsize_mdm_dci = 1;
108module_param(itemsize_mdm_dci, uint, 0000);
109
110/*
111 * Used for USB structues associated with a remote device.
112 * Don't expose the itemsize since it is constant.
113 */
114static unsigned int itemsize_mdm_usb = sizeof(struct diag_request);
115static unsigned int poolsize_mdm_usb = 18;
116module_param(poolsize_mdm_usb, uint, 0000);
117
118/*
119 * Used for writing read DCI data to remote peripherals. Don't
120 * expose poolsize for DCI data. There is only one read
121 * buffer. Add 6 bytes for DCI header information: Start (1),
122 * Version (1), Length (2), Tag (2)
123 */
124static unsigned int itemsize_mdm_dci_write = DIAG_MDM_DCI_BUF_SIZE;
125static unsigned int poolsize_mdm_dci_write = 1;
126module_param(itemsize_mdm_dci_write, uint, 0000);
127
128/*
129 * Used for USB structures associated with a remote SMUX
130 * device Don't expose the itemsize since it is constant
131 */
132static unsigned int itemsize_qsc_usb = sizeof(struct diag_request);
133static unsigned int poolsize_qsc_usb = 8;
134module_param(poolsize_qsc_usb, uint, 0000);
135#endif
136
137/* This is the max number of user-space clients supported at initialization*/
138static unsigned int max_clients = 15;
139static unsigned int threshold_client_limit = 50;
140module_param(max_clients, uint, 0000);
141
142/* Timer variables */
143static struct timer_list drain_timer;
144static int timer_in_progress;
145
146struct diag_apps_data_t {
147 void *buf;
148 uint32_t len;
149 int ctxt;
150};
151
152static struct diag_apps_data_t hdlc_data;
153static struct diag_apps_data_t non_hdlc_data;
154static struct mutex apps_data_mutex;
155
156#define DIAGPKT_MAX_DELAYED_RSP 0xFFFF
157
158#ifdef DIAG_DEBUG
159uint16_t diag_debug_mask;
160void *diag_ipc_log;
161#endif
162
163static void diag_md_session_close(struct diag_md_session_t *session_info);
164
165/*
166 * Returns the next delayed rsp id. If wrapping is enabled,
167 * wraps the delayed rsp id to DIAGPKT_MAX_DELAYED_RSP.
168 */
169static uint16_t diag_get_next_delayed_rsp_id(void)
170{
171 uint16_t rsp_id = 0;
172
173 mutex_lock(&driver->delayed_rsp_mutex);
174 rsp_id = driver->delayed_rsp_id;
175 if (rsp_id < DIAGPKT_MAX_DELAYED_RSP)
176 rsp_id++;
177 else {
178 if (wrap_enabled) {
179 rsp_id = 1;
180 wrap_count++;
181 } else
182 rsp_id = DIAGPKT_MAX_DELAYED_RSP;
183 }
184 driver->delayed_rsp_id = rsp_id;
185 mutex_unlock(&driver->delayed_rsp_mutex);
186
187 return rsp_id;
188}
189
190static int diag_switch_logging(struct diag_logging_mode_param_t *param);
191
192#define COPY_USER_SPACE_OR_ERR(buf, data, length) \
193do { \
194 if ((count < ret+length) || (copy_to_user(buf, \
195 (void *)&data, length))) { \
196 ret = -EFAULT; \
197 } \
198 ret += length; \
199} while (0)
200
201static void drain_timer_func(unsigned long data)
202{
203 queue_work(driver->diag_wq, &(driver->diag_drain_work));
204}
205
206static void diag_drain_apps_data(struct diag_apps_data_t *data)
207{
208 int err = 0;
209
210 if (!data || !data->buf)
211 return;
212
213 err = diag_mux_write(DIAG_LOCAL_PROC, data->buf, data->len,
214 data->ctxt);
215 if (err)
216 diagmem_free(driver, data->buf, POOL_TYPE_HDLC);
217
218 data->buf = NULL;
219 data->len = 0;
220}
221
222void diag_update_user_client_work_fn(struct work_struct *work)
223{
224 diag_update_userspace_clients(HDLC_SUPPORT_TYPE);
225}
226
227static void diag_update_md_client_work_fn(struct work_struct *work)
228{
229 diag_update_md_clients(HDLC_SUPPORT_TYPE);
230}
231
232void diag_drain_work_fn(struct work_struct *work)
233{
234 struct diag_md_session_t *session_info = NULL;
235 uint8_t hdlc_disabled = 0;
236
237 timer_in_progress = 0;
238 mutex_lock(&apps_data_mutex);
239 session_info = diag_md_session_get_peripheral(APPS_DATA);
240 if (session_info)
241 hdlc_disabled = session_info->hdlc_disabled;
242 else
243 hdlc_disabled = driver->hdlc_disabled;
244
245 if (!hdlc_disabled)
246 diag_drain_apps_data(&hdlc_data);
247 else
248 diag_drain_apps_data(&non_hdlc_data);
249 mutex_unlock(&apps_data_mutex);
250}
251
252void check_drain_timer(void)
253{
254 int ret = 0;
255
256 if (!timer_in_progress) {
257 timer_in_progress = 1;
258 ret = mod_timer(&drain_timer, jiffies + msecs_to_jiffies(200));
259 }
260}
261
262void diag_add_client(int i, struct file *file)
263{
264 struct diagchar_priv *diagpriv_data;
265
266 driver->client_map[i].pid = current->tgid;
267 diagpriv_data = kmalloc(sizeof(struct diagchar_priv),
268 GFP_KERNEL);
269 if (diagpriv_data)
270 diagpriv_data->pid = current->tgid;
271 file->private_data = diagpriv_data;
272 strlcpy(driver->client_map[i].name, current->comm, 20);
273 driver->client_map[i].name[19] = '\0';
274}
275
276static void diag_mempool_init(void)
277{
278 uint32_t itemsize = DIAG_MAX_REQ_SIZE;
279 uint32_t itemsize_hdlc = DIAG_MAX_HDLC_BUF_SIZE + APF_DIAG_PADDING;
280 uint32_t itemsize_dci = IN_BUF_SIZE;
281 uint32_t itemsize_user = DCI_REQ_BUF_SIZE;
282
283 itemsize += ((DCI_HDR_SIZE > CALLBACK_HDR_SIZE) ? DCI_HDR_SIZE :
284 CALLBACK_HDR_SIZE);
285 diagmem_setsize(POOL_TYPE_COPY, itemsize, poolsize);
286 diagmem_setsize(POOL_TYPE_HDLC, itemsize_hdlc, poolsize_hdlc);
287 diagmem_setsize(POOL_TYPE_DCI, itemsize_dci, poolsize_dci);
288 diagmem_setsize(POOL_TYPE_USER, itemsize_user, poolsize_user);
289
290 diagmem_init(driver, POOL_TYPE_COPY);
291 diagmem_init(driver, POOL_TYPE_HDLC);
292 diagmem_init(driver, POOL_TYPE_USER);
293 diagmem_init(driver, POOL_TYPE_DCI);
294}
295
296static void diag_mempool_exit(void)
297{
298 diagmem_exit(driver, POOL_TYPE_COPY);
299 diagmem_exit(driver, POOL_TYPE_HDLC);
300 diagmem_exit(driver, POOL_TYPE_USER);
301 diagmem_exit(driver, POOL_TYPE_DCI);
302}
303
304static int diagchar_open(struct inode *inode, struct file *file)
305{
306 int i = 0;
307 void *temp;
308
309 if (driver) {
310 mutex_lock(&driver->diagchar_mutex);
311
312 for (i = 0; i < driver->num_clients; i++)
313 if (driver->client_map[i].pid == 0)
314 break;
315
316 if (i < driver->num_clients) {
317 diag_add_client(i, file);
318 } else {
319 if (i < threshold_client_limit) {
320 driver->num_clients++;
321 temp = krealloc(driver->client_map
322 , (driver->num_clients) * sizeof(struct
323 diag_client_map), GFP_KERNEL);
324 if (!temp)
325 goto fail;
326 else
327 driver->client_map = temp;
328 temp = krealloc(driver->data_ready
329 , (driver->num_clients) * sizeof(int),
330 GFP_KERNEL);
331 if (!temp)
332 goto fail;
333 else
334 driver->data_ready = temp;
335 diag_add_client(i, file);
336 } else {
337 mutex_unlock(&driver->diagchar_mutex);
338 pr_err_ratelimited("diag: Max client limit for DIAG reached\n");
339 pr_err_ratelimited("diag: Cannot open handle %s %d",
340 current->comm, current->tgid);
341 for (i = 0; i < driver->num_clients; i++)
342 pr_debug("%d) %s PID=%d", i, driver->
343 client_map[i].name,
344 driver->client_map[i].pid);
345 return -ENOMEM;
346 }
347 }
348 driver->data_ready[i] = 0x0;
349 driver->data_ready[i] |= MSG_MASKS_TYPE;
350 driver->data_ready[i] |= EVENT_MASKS_TYPE;
351 driver->data_ready[i] |= LOG_MASKS_TYPE;
352 driver->data_ready[i] |= DCI_LOG_MASKS_TYPE;
353 driver->data_ready[i] |= DCI_EVENT_MASKS_TYPE;
354
355 if (driver->ref_count == 0)
356 diag_mempool_init();
357 driver->ref_count++;
358 mutex_unlock(&driver->diagchar_mutex);
359 return 0;
360 }
361 return -ENOMEM;
362
363fail:
364 mutex_unlock(&driver->diagchar_mutex);
365 driver->num_clients--;
366 pr_err_ratelimited("diag: Insufficient memory for new client");
367 return -ENOMEM;
368}
369
370static uint32_t diag_translate_kernel_to_user_mask(uint32_t peripheral_mask)
371{
372 uint32_t ret = 0;
373
374 if (peripheral_mask & MD_PERIPHERAL_MASK(APPS_DATA))
375 ret |= DIAG_CON_APSS;
376 if (peripheral_mask & MD_PERIPHERAL_MASK(PERIPHERAL_MODEM))
377 ret |= DIAG_CON_MPSS;
378 if (peripheral_mask & MD_PERIPHERAL_MASK(PERIPHERAL_LPASS))
379 ret |= DIAG_CON_LPASS;
380 if (peripheral_mask & MD_PERIPHERAL_MASK(PERIPHERAL_WCNSS))
381 ret |= DIAG_CON_WCNSS;
382 if (peripheral_mask & MD_PERIPHERAL_MASK(PERIPHERAL_SENSORS))
383 ret |= DIAG_CON_SENSORS;
384 if (peripheral_mask & MD_PERIPHERAL_MASK(PERIPHERAL_WDSP))
385 ret |= DIAG_CON_WDSP;
Sreelakshmi Gownipalli588a31d2016-11-02 13:33:43 -0700386 if (peripheral_mask & MD_PERIPHERAL_MASK(PERIPHERAL_CDSP))
387 ret |= DIAG_CON_CDSP;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -0700388
389 return ret;
390}
391
392void diag_clear_masks(struct diag_md_session_t *info)
393{
394 int ret;
395 char cmd_disable_log_mask[] = { 0x73, 0, 0, 0, 0, 0, 0, 0};
396 char cmd_disable_msg_mask[] = { 0x7D, 0x05, 0, 0, 0, 0, 0, 0};
397 char cmd_disable_event_mask[] = { 0x60, 0};
398
399 DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
400 "diag: %s: masks clear request upon %s\n", __func__,
401 ((info) ? "ODL exit" : "USB Disconnection"));
402
403 ret = diag_process_apps_masks(cmd_disable_log_mask,
404 sizeof(cmd_disable_log_mask), info);
405 ret = diag_process_apps_masks(cmd_disable_msg_mask,
406 sizeof(cmd_disable_msg_mask), info);
407 ret = diag_process_apps_masks(cmd_disable_event_mask,
408 sizeof(cmd_disable_event_mask), info);
409 DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
410 "diag:%s: masks cleared successfully\n", __func__);
411}
412
413static void diag_close_logging_process(const int pid)
414{
415 int i;
416 int session_peripheral_mask;
417 struct diag_md_session_t *session_info = NULL;
418 struct diag_logging_mode_param_t params;
419
420 session_info = diag_md_session_get_pid(pid);
421 if (!session_info)
422 return;
423
424 diag_clear_masks(session_info);
425
426 mutex_lock(&driver->diag_maskclear_mutex);
427 driver->mask_clear = 1;
428 mutex_unlock(&driver->diag_maskclear_mutex);
429
430 session_peripheral_mask = session_info->peripheral_mask;
431 diag_md_session_close(session_info);
432 for (i = 0; i < NUM_MD_SESSIONS; i++)
433 if (MD_PERIPHERAL_MASK(i) & session_peripheral_mask)
434 diag_mux_close_peripheral(DIAG_LOCAL_PROC, i);
435
436 params.req_mode = USB_MODE;
437 params.mode_param = 0;
438 params.peripheral_mask =
439 diag_translate_kernel_to_user_mask(session_peripheral_mask);
440 mutex_lock(&driver->diagchar_mutex);
441 diag_switch_logging(&params);
442 mutex_unlock(&driver->diagchar_mutex);
443}
444
445static int diag_remove_client_entry(struct file *file)
446{
447 int i = -1;
448 struct diagchar_priv *diagpriv_data = NULL;
449 struct diag_dci_client_tbl *dci_entry = NULL;
450
451 if (!driver)
452 return -ENOMEM;
453
454 mutex_lock(&driver->diag_file_mutex);
455 if (!file) {
456 DIAG_LOG(DIAG_DEBUG_USERSPACE, "Invalid file pointer\n");
457 mutex_unlock(&driver->diag_file_mutex);
458 return -ENOENT;
459 }
460 if (!(file->private_data)) {
461 DIAG_LOG(DIAG_DEBUG_USERSPACE, "Invalid private data\n");
462 mutex_unlock(&driver->diag_file_mutex);
463 return -EINVAL;
464 }
465
466 diagpriv_data = file->private_data;
467
468 /*
469 * clean up any DCI registrations, if this is a DCI client
470 * This will specially help in case of ungraceful exit of any DCI client
471 * This call will remove any pending registrations of such client
472 */
473 mutex_lock(&driver->dci_mutex);
474 dci_entry = dci_lookup_client_entry_pid(current->tgid);
475 if (dci_entry)
476 diag_dci_deinit_client(dci_entry);
477 mutex_unlock(&driver->dci_mutex);
478
479 diag_close_logging_process(current->tgid);
480
481 /* Delete the pkt response table entry for the exiting process */
482 diag_cmd_remove_reg_by_pid(current->tgid);
483
484 mutex_lock(&driver->diagchar_mutex);
485 driver->ref_count--;
486 if (driver->ref_count == 0)
487 diag_mempool_exit();
488
489 for (i = 0; i < driver->num_clients; i++) {
490 if (diagpriv_data && diagpriv_data->pid ==
491 driver->client_map[i].pid) {
492 driver->client_map[i].pid = 0;
493 kfree(diagpriv_data);
494 diagpriv_data = NULL;
495 file->private_data = 0;
496 break;
497 }
498 }
499 mutex_unlock(&driver->diagchar_mutex);
500 mutex_unlock(&driver->diag_file_mutex);
501 return 0;
502}
503static int diagchar_close(struct inode *inode, struct file *file)
504{
505 int ret;
506
507 DIAG_LOG(DIAG_DEBUG_USERSPACE, "diag: process exit %s\n",
508 current->comm);
509 ret = diag_remove_client_entry(file);
510 mutex_lock(&driver->diag_maskclear_mutex);
511 driver->mask_clear = 0;
512 mutex_unlock(&driver->diag_maskclear_mutex);
513 return ret;
514}
515
516void diag_record_stats(int type, int flag)
517{
518 struct diag_pkt_stats_t *pkt_stats = NULL;
519
520 switch (type) {
521 case DATA_TYPE_EVENT:
522 pkt_stats = &driver->event_stats;
523 break;
524 case DATA_TYPE_F3:
525 pkt_stats = &driver->msg_stats;
526 break;
527 case DATA_TYPE_LOG:
528 pkt_stats = &driver->log_stats;
529 break;
530 case DATA_TYPE_RESPONSE:
531 if (flag != PKT_DROP)
532 return;
533 pr_err_ratelimited("diag: In %s, dropping response. This shouldn't happen\n",
534 __func__);
535 return;
536 case DATA_TYPE_DELAYED_RESPONSE:
537 /* No counters to increase for Delayed responses */
538 return;
539 default:
540 pr_err_ratelimited("diag: In %s, invalid pkt_type: %d\n",
541 __func__, type);
542 return;
543 }
544
545 switch (flag) {
546 case PKT_ALLOC:
547 atomic_add(1, (atomic_t *)&pkt_stats->alloc_count);
548 break;
549 case PKT_DROP:
550 atomic_add(1, (atomic_t *)&pkt_stats->drop_count);
551 break;
552 case PKT_RESET:
553 atomic_set((atomic_t *)&pkt_stats->alloc_count, 0);
554 atomic_set((atomic_t *)&pkt_stats->drop_count, 0);
555 break;
556 default:
557 pr_err_ratelimited("diag: In %s, invalid flag: %d\n",
558 __func__, flag);
559 return;
560 }
561}
562
563void diag_get_timestamp(char *time_str)
564{
565 struct timeval t;
566 struct tm broken_tm;
567
568 do_gettimeofday(&t);
569 if (!time_str)
570 return;
571 time_to_tm(t.tv_sec, 0, &broken_tm);
572 scnprintf(time_str, DIAG_TS_SIZE, "%d:%d:%d:%ld", broken_tm.tm_hour,
573 broken_tm.tm_min, broken_tm.tm_sec, t.tv_usec);
574}
575
576int diag_get_remote(int remote_info)
577{
578 int val = (remote_info < 0) ? -remote_info : remote_info;
579 int remote_val;
580
581 switch (val) {
582 case MDM:
583 case MDM2:
584 case QSC:
585 remote_val = -remote_info;
586 break;
587 default:
588 remote_val = 0;
589 break;
590 }
591
592 return remote_val;
593}
594
595int diag_cmd_chk_polling(struct diag_cmd_reg_entry_t *entry)
596{
597 int polling = DIAG_CMD_NOT_POLLING;
598
599 if (!entry)
600 return -EIO;
601
602 if (entry->cmd_code == DIAG_CMD_NO_SUBSYS) {
603 if (entry->subsys_id == DIAG_CMD_NO_SUBSYS &&
604 entry->cmd_code_hi >= DIAG_CMD_STATUS &&
605 entry->cmd_code_lo <= DIAG_CMD_STATUS)
606 polling = DIAG_CMD_POLLING;
607 else if (entry->subsys_id == DIAG_SS_WCDMA &&
608 entry->cmd_code_hi >= DIAG_CMD_QUERY_CALL &&
609 entry->cmd_code_lo <= DIAG_CMD_QUERY_CALL)
610 polling = DIAG_CMD_POLLING;
611 else if (entry->subsys_id == DIAG_SS_GSM &&
612 entry->cmd_code_hi >= DIAG_CMD_QUERY_TMC &&
613 entry->cmd_code_lo <= DIAG_CMD_QUERY_TMC)
614 polling = DIAG_CMD_POLLING;
615 else if (entry->subsys_id == DIAG_SS_PARAMS &&
616 entry->cmd_code_hi >= DIAG_DIAG_POLL &&
617 entry->cmd_code_lo <= DIAG_DIAG_POLL)
618 polling = DIAG_CMD_POLLING;
619 else if (entry->subsys_id == DIAG_SS_TDSCDMA &&
620 entry->cmd_code_hi >= DIAG_CMD_TDSCDMA_STATUS &&
621 entry->cmd_code_lo <= DIAG_CMD_TDSCDMA_STATUS)
622 polling = DIAG_CMD_POLLING;
623 }
624
625 return polling;
626}
627
628static void diag_cmd_invalidate_polling(int change_flag)
629{
630 int polling = DIAG_CMD_NOT_POLLING;
631 struct list_head *start;
632 struct list_head *temp;
633 struct diag_cmd_reg_t *item = NULL;
634
635 if (change_flag == DIAG_CMD_ADD) {
636 if (driver->polling_reg_flag)
637 return;
638 }
639
640 driver->polling_reg_flag = 0;
641 list_for_each_safe(start, temp, &driver->cmd_reg_list) {
642 item = list_entry(start, struct diag_cmd_reg_t, link);
643 polling = diag_cmd_chk_polling(&item->entry);
644 if (polling == DIAG_CMD_POLLING) {
645 driver->polling_reg_flag = 1;
646 break;
647 }
648 }
649}
650
651int diag_cmd_add_reg(struct diag_cmd_reg_entry_t *new_entry, uint8_t proc,
652 int pid)
653{
654 struct diag_cmd_reg_t *new_item = NULL;
655
656 if (!new_entry) {
657 pr_err("diag: In %s, invalid new entry\n", __func__);
658 return -EINVAL;
659 }
660
661 if (proc > APPS_DATA) {
662 pr_err("diag: In %s, invalid peripheral %d\n", __func__, proc);
663 return -EINVAL;
664 }
665
666 if (proc != APPS_DATA)
667 pid = INVALID_PID;
668
669 new_item = kzalloc(sizeof(struct diag_cmd_reg_t), GFP_KERNEL);
670 if (!new_item)
671 return -ENOMEM;
672 kmemleak_not_leak(new_item);
673
674 new_item->pid = pid;
675 new_item->proc = proc;
676 memcpy(&new_item->entry, new_entry,
677 sizeof(struct diag_cmd_reg_entry_t));
678 INIT_LIST_HEAD(&new_item->link);
679
680 mutex_lock(&driver->cmd_reg_mutex);
681 list_add_tail(&new_item->link, &driver->cmd_reg_list);
682 driver->cmd_reg_count++;
683 diag_cmd_invalidate_polling(DIAG_CMD_ADD);
684 mutex_unlock(&driver->cmd_reg_mutex);
685
686 return 0;
687}
688
689struct diag_cmd_reg_entry_t *diag_cmd_search(
690 struct diag_cmd_reg_entry_t *entry, int proc)
691{
692 struct list_head *start;
693 struct list_head *temp;
694 struct diag_cmd_reg_t *item = NULL;
695 struct diag_cmd_reg_entry_t *temp_entry = NULL;
696
697 if (!entry) {
698 pr_err("diag: In %s, invalid entry\n", __func__);
699 return NULL;
700 }
701
702 list_for_each_safe(start, temp, &driver->cmd_reg_list) {
703 item = list_entry(start, struct diag_cmd_reg_t, link);
704 temp_entry = &item->entry;
705 if (temp_entry->cmd_code == entry->cmd_code &&
706 temp_entry->subsys_id == entry->subsys_id &&
707 temp_entry->cmd_code_hi >= entry->cmd_code_hi &&
708 temp_entry->cmd_code_lo <= entry->cmd_code_lo &&
709 (proc == item->proc || proc == ALL_PROC)) {
710 return &item->entry;
711 } else if (temp_entry->cmd_code == DIAG_CMD_NO_SUBSYS &&
712 entry->cmd_code == DIAG_CMD_DIAG_SUBSYS) {
713 if (temp_entry->subsys_id == entry->subsys_id &&
714 temp_entry->cmd_code_hi >= entry->cmd_code_hi &&
715 temp_entry->cmd_code_lo <= entry->cmd_code_lo &&
716 (proc == item->proc || proc == ALL_PROC)) {
717 return &item->entry;
718 }
719 } else if (temp_entry->cmd_code == DIAG_CMD_NO_SUBSYS &&
720 temp_entry->subsys_id == DIAG_CMD_NO_SUBSYS) {
721 if ((temp_entry->cmd_code_hi >= entry->cmd_code) &&
722 (temp_entry->cmd_code_lo <= entry->cmd_code) &&
723 (proc == item->proc || proc == ALL_PROC)) {
724 if (entry->cmd_code == MODE_CMD) {
725 if (entry->subsys_id == RESET_ID &&
726 item->proc != APPS_DATA) {
727 continue;
728 }
729 if (entry->subsys_id != RESET_ID &&
730 item->proc == APPS_DATA) {
731 continue;
732 }
733 }
734 return &item->entry;
735 }
736 }
737 }
738
739 return NULL;
740}
741
742void diag_cmd_remove_reg(struct diag_cmd_reg_entry_t *entry, uint8_t proc)
743{
744 struct diag_cmd_reg_t *item = NULL;
745 struct diag_cmd_reg_entry_t *temp_entry;
746
747 if (!entry) {
748 pr_err("diag: In %s, invalid entry\n", __func__);
749 return;
750 }
751
752 mutex_lock(&driver->cmd_reg_mutex);
753 temp_entry = diag_cmd_search(entry, proc);
754 if (temp_entry) {
755 item = container_of(temp_entry, struct diag_cmd_reg_t, entry);
756 if (!item) {
757 mutex_unlock(&driver->cmd_reg_mutex);
758 return;
759 }
760 list_del(&item->link);
761 kfree(item);
762 driver->cmd_reg_count--;
763 }
764 diag_cmd_invalidate_polling(DIAG_CMD_REMOVE);
765 mutex_unlock(&driver->cmd_reg_mutex);
766}
767
768void diag_cmd_remove_reg_by_pid(int pid)
769{
770 struct list_head *start;
771 struct list_head *temp;
772 struct diag_cmd_reg_t *item = NULL;
773
774 mutex_lock(&driver->cmd_reg_mutex);
775 list_for_each_safe(start, temp, &driver->cmd_reg_list) {
776 item = list_entry(start, struct diag_cmd_reg_t, link);
777 if (item->pid == pid) {
778 list_del(&item->link);
779 kfree(item);
780 driver->cmd_reg_count--;
781 }
782 }
783 mutex_unlock(&driver->cmd_reg_mutex);
784}
785
786void diag_cmd_remove_reg_by_proc(int proc)
787{
788 struct list_head *start;
789 struct list_head *temp;
790 struct diag_cmd_reg_t *item = NULL;
791
792 mutex_lock(&driver->cmd_reg_mutex);
793 list_for_each_safe(start, temp, &driver->cmd_reg_list) {
794 item = list_entry(start, struct diag_cmd_reg_t, link);
795 if (item->proc == proc) {
796 list_del(&item->link);
797 kfree(item);
798 driver->cmd_reg_count--;
799 }
800 }
801 diag_cmd_invalidate_polling(DIAG_CMD_REMOVE);
802 mutex_unlock(&driver->cmd_reg_mutex);
803}
804
805static int diag_copy_dci(char __user *buf, size_t count,
806 struct diag_dci_client_tbl *entry, int *pret)
807{
808 int total_data_len = 0;
809 int ret = 0;
810 int exit_stat = 1;
811 uint8_t drain_again = 0;
812 struct diag_dci_buffer_t *buf_entry, *temp;
813
814 if (!buf || !entry || !pret)
815 return exit_stat;
816
817 ret = *pret;
818
819 ret += sizeof(int);
820 if (ret >= count) {
821 pr_err("diag: In %s, invalid value for ret: %d, count: %zu\n",
822 __func__, ret, count);
823 return -EINVAL;
824 }
825
826 mutex_lock(&entry->write_buf_mutex);
827 list_for_each_entry_safe(buf_entry, temp, &entry->list_write_buf,
828 buf_track) {
829
830 if ((ret + buf_entry->data_len) > count) {
831 drain_again = 1;
832 break;
833 }
834
835 list_del(&buf_entry->buf_track);
836 mutex_lock(&buf_entry->data_mutex);
837 if ((buf_entry->data_len > 0) &&
838 (buf_entry->in_busy) &&
839 (buf_entry->data)) {
840 if (copy_to_user(buf+ret, (void *)buf_entry->data,
841 buf_entry->data_len))
842 goto drop;
843 ret += buf_entry->data_len;
844 total_data_len += buf_entry->data_len;
845 diag_ws_on_copy(DIAG_WS_DCI);
846drop:
847 buf_entry->in_busy = 0;
848 buf_entry->data_len = 0;
849 buf_entry->in_list = 0;
850 if (buf_entry->buf_type == DCI_BUF_CMD) {
851 mutex_unlock(&buf_entry->data_mutex);
852 continue;
853 } else if (buf_entry->buf_type == DCI_BUF_SECONDARY) {
854 diagmem_free(driver, buf_entry->data,
855 POOL_TYPE_DCI);
856 buf_entry->data = NULL;
857 mutex_unlock(&buf_entry->data_mutex);
858 kfree(buf_entry);
859 continue;
860 }
861
862 }
863 mutex_unlock(&buf_entry->data_mutex);
864 }
865
866 if (total_data_len > 0) {
867 /* Copy the total data length */
868 COPY_USER_SPACE_OR_ERR(buf+8, total_data_len, 4);
869 if (ret == -EFAULT)
870 goto exit;
871 ret -= 4;
872 } else {
873 pr_debug("diag: In %s, Trying to copy ZERO bytes, total_data_len: %d\n",
874 __func__, total_data_len);
875 }
876
877 exit_stat = 0;
878exit:
879 entry->in_service = 0;
880 mutex_unlock(&entry->write_buf_mutex);
881 *pret = ret;
882 if (drain_again)
883 dci_drain_data(0);
884
885 return exit_stat;
886}
887
888#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
889static int diag_remote_init(void)
890{
891 diagmem_setsize(POOL_TYPE_MDM, itemsize_mdm, poolsize_mdm);
892 diagmem_setsize(POOL_TYPE_MDM2, itemsize_mdm, poolsize_mdm);
893 diagmem_setsize(POOL_TYPE_MDM_DCI, itemsize_mdm_dci, poolsize_mdm_dci);
894 diagmem_setsize(POOL_TYPE_MDM2_DCI, itemsize_mdm_dci,
895 poolsize_mdm_dci);
896 diagmem_setsize(POOL_TYPE_MDM_MUX, itemsize_mdm_usb, poolsize_mdm_usb);
897 diagmem_setsize(POOL_TYPE_MDM2_MUX, itemsize_mdm_usb, poolsize_mdm_usb);
898 diagmem_setsize(POOL_TYPE_MDM_DCI_WRITE, itemsize_mdm_dci_write,
899 poolsize_mdm_dci_write);
900 diagmem_setsize(POOL_TYPE_MDM2_DCI_WRITE, itemsize_mdm_dci_write,
901 poolsize_mdm_dci_write);
902 diagmem_setsize(POOL_TYPE_QSC_MUX, itemsize_qsc_usb,
903 poolsize_qsc_usb);
904 driver->hdlc_encode_buf = kzalloc(DIAG_MAX_HDLC_BUF_SIZE, GFP_KERNEL);
905 if (!driver->hdlc_encode_buf)
906 return -ENOMEM;
907 driver->hdlc_encode_buf_len = 0;
908 return 0;
909}
910
911static void diag_remote_exit(void)
912{
913 kfree(driver->hdlc_encode_buf);
914}
915
916static int diag_send_raw_data_remote(int proc, void *buf, int len,
917 uint8_t hdlc_flag)
918{
919 int err = 0;
920 int max_len = 0;
921 uint8_t retry_count = 0;
922 uint8_t max_retries = 3;
923 uint16_t payload = 0;
924 struct diag_send_desc_type send = { NULL, NULL, DIAG_STATE_START, 0 };
925 struct diag_hdlc_dest_type enc = { NULL, NULL, 0 };
926 int bridge_index = proc - 1;
927 struct diag_md_session_t *session_info = NULL;
928 uint8_t hdlc_disabled = 0;
929
930 if (!buf)
931 return -EINVAL;
932
933 if (len <= 0) {
934 pr_err("diag: In %s, invalid len: %d", __func__, len);
935 return -EBADMSG;
936 }
937
938 if (bridge_index < 0 || bridge_index > NUM_REMOTE_DEV) {
939 pr_err("diag: In %s, invalid bridge index: %d\n", __func__,
940 bridge_index);
941 return -EINVAL;
942 }
943
944 do {
945 if (driver->hdlc_encode_buf_len == 0)
946 break;
947 usleep_range(10000, 10100);
948 retry_count++;
949 } while (retry_count < max_retries);
950
951 if (driver->hdlc_encode_buf_len != 0)
952 return -EAGAIN;
953 session_info = diag_md_session_get_peripheral(APPS_DATA);
954 if (session_info)
955 hdlc_disabled = session_info->hdlc_disabled;
956 else
957 hdlc_disabled = driver->hdlc_disabled;
958 if (hdlc_disabled) {
959 payload = *(uint16_t *)(buf + 2);
960 driver->hdlc_encode_buf_len = payload;
961 /*
962 * Adding 4 bytes for start (1 byte), version (1 byte) and
963 * payload (2 bytes)
964 */
965 memcpy(driver->hdlc_encode_buf, buf + 4, payload);
966 goto send_data;
967 }
968
969 if (hdlc_flag) {
970 if (len > DIAG_MAX_HDLC_BUF_SIZE) {
971 pr_err("diag: Dropping packet, HDLC encoded packet payload size crosses buffer limit. Current payload size %d\n",
972 len);
973 return -EBADMSG;
974 }
975 driver->hdlc_encode_buf_len = len;
976 memcpy(driver->hdlc_encode_buf, buf, len);
977 goto send_data;
978 }
979
980 /*
981 * The worst case length will be twice as the incoming packet length.
982 * Add 3 bytes for CRC bytes (2 bytes) and delimiter (1 byte)
983 */
984 max_len = (2 * len) + 3;
985 if (max_len > DIAG_MAX_HDLC_BUF_SIZE) {
986 pr_err("diag: Dropping packet, HDLC encoded packet payload size crosses buffer limit. Current payload size %d\n",
987 max_len);
988 return -EBADMSG;
989 }
990
991 /* Perform HDLC encoding on incoming data */
992 send.state = DIAG_STATE_START;
993 send.pkt = (void *)(buf);
994 send.last = (void *)(buf + len - 1);
995 send.terminate = 1;
996
997 enc.dest = driver->hdlc_encode_buf;
998 enc.dest_last = (void *)(driver->hdlc_encode_buf + max_len - 1);
999 diag_hdlc_encode(&send, &enc);
1000 driver->hdlc_encode_buf_len = (int)(enc.dest -
1001 (void *)driver->hdlc_encode_buf);
1002
1003send_data:
1004 err = diagfwd_bridge_write(bridge_index, driver->hdlc_encode_buf,
1005 driver->hdlc_encode_buf_len);
1006 if (err) {
1007 pr_err_ratelimited("diag: Error writing Callback packet to proc: %d, err: %d\n",
1008 proc, err);
1009 driver->hdlc_encode_buf_len = 0;
1010 }
1011
1012 return err;
1013}
1014
1015static int diag_process_userspace_remote(int proc, void *buf, int len)
1016{
1017 int bridge_index = proc - 1;
1018
1019 if (!buf || len < 0) {
1020 pr_err("diag: Invalid input in %s, buf: %pK, len: %d\n",
1021 __func__, buf, len);
1022 return -EINVAL;
1023 }
1024
1025 if (bridge_index < 0 || bridge_index > NUM_REMOTE_DEV) {
1026 pr_err("diag: In %s, invalid bridge index: %d\n", __func__,
1027 bridge_index);
1028 return -EINVAL;
1029 }
1030
1031 driver->user_space_data_busy = 1;
1032 return diagfwd_bridge_write(bridge_index, buf, len);
1033}
1034#else
1035static int diag_remote_init(void)
1036{
1037 return 0;
1038}
1039
1040static void diag_remote_exit(void)
1041{
1042}
1043
1044int diagfwd_bridge_init(void)
1045{
1046 return 0;
1047}
1048
1049void diagfwd_bridge_exit(void)
1050{
1051}
1052
1053uint16_t diag_get_remote_device_mask(void)
1054{
1055 return 0;
1056}
1057
1058static int diag_send_raw_data_remote(int proc, void *buf, int len,
1059 uint8_t hdlc_flag)
1060{
1061 return -EINVAL;
1062}
1063
1064static int diag_process_userspace_remote(int proc, void *buf, int len)
1065{
1066 return 0;
1067}
1068#endif
1069
1070static int mask_request_validate(unsigned char mask_buf[])
1071{
1072 uint8_t packet_id;
1073 uint8_t subsys_id;
1074 uint16_t ss_cmd;
1075
1076 packet_id = mask_buf[0];
1077
1078 if (packet_id == DIAG_CMD_DIAG_SUBSYS_DELAY) {
1079 subsys_id = mask_buf[1];
1080 ss_cmd = *(uint16_t *)(mask_buf + 2);
1081 switch (subsys_id) {
1082 case DIAG_SS_DIAG:
1083 if ((ss_cmd == DIAG_SS_FILE_READ_MODEM) ||
1084 (ss_cmd == DIAG_SS_FILE_READ_ADSP) ||
1085 (ss_cmd == DIAG_SS_FILE_READ_WCNSS) ||
1086 (ss_cmd == DIAG_SS_FILE_READ_SLPI) ||
1087 (ss_cmd == DIAG_SS_FILE_READ_APPS))
1088 return 1;
1089 break;
1090 default:
1091 return 0;
1092 }
1093 } else if (packet_id == 0x4B) {
1094 subsys_id = mask_buf[1];
1095 ss_cmd = *(uint16_t *)(mask_buf + 2);
1096 /* Packets with SSID which are allowed */
1097 switch (subsys_id) {
1098 case 0x04: /* DIAG_SUBSYS_WCDMA */
1099 if ((ss_cmd == 0) || (ss_cmd == 0xF))
1100 return 1;
1101 break;
1102 case 0x08: /* DIAG_SUBSYS_GSM */
1103 if ((ss_cmd == 0) || (ss_cmd == 0x1))
1104 return 1;
1105 break;
1106 case 0x09: /* DIAG_SUBSYS_UMTS */
1107 case 0x0F: /* DIAG_SUBSYS_CM */
1108 if (ss_cmd == 0)
1109 return 1;
1110 break;
1111 case 0x0C: /* DIAG_SUBSYS_OS */
1112 if ((ss_cmd == 2) || (ss_cmd == 0x100))
1113 return 1; /* MPU and APU */
1114 break;
1115 case 0x12: /* DIAG_SUBSYS_DIAG_SERV */
1116 if ((ss_cmd == 0) || (ss_cmd == 0x6) || (ss_cmd == 0x7))
1117 return 1;
1118 else if (ss_cmd == 0x218) /* HDLC Disabled Command*/
1119 return 0;
1120 else if (ss_cmd == DIAG_GET_TIME_API)
1121 return 1;
1122 else if (ss_cmd == DIAG_SET_TIME_API)
1123 return 1;
1124 else if (ss_cmd == DIAG_SWITCH_COMMAND)
1125 return 1;
1126 else if (ss_cmd == DIAG_BUFFERING_MODE)
1127 return 1;
1128 break;
1129 case 0x13: /* DIAG_SUBSYS_FS */
1130 if ((ss_cmd == 0) || (ss_cmd == 0x1))
1131 return 1;
1132 break;
1133 default:
1134 return 0;
1135 }
1136 } else {
1137 switch (packet_id) {
1138 case 0x00: /* Version Number */
1139 case 0x0C: /* CDMA status packet */
1140 case 0x1C: /* Diag Version */
1141 case 0x1D: /* Time Stamp */
1142 case 0x60: /* Event Report Control */
1143 case 0x63: /* Status snapshot */
1144 case 0x73: /* Logging Configuration */
1145 case 0x7C: /* Extended build ID */
1146 case 0x7D: /* Extended Message configuration */
1147 case 0x81: /* Event get mask */
1148 case 0x82: /* Set the event mask */
1149 return 1;
1150 default:
1151 return 0;
1152 }
1153 }
1154 return 0;
1155}
1156
1157static void diag_md_session_init(void)
1158{
1159 int i;
1160
1161 mutex_init(&driver->md_session_lock);
1162 driver->md_session_mask = 0;
1163 driver->md_session_mode = DIAG_MD_NONE;
1164 for (i = 0; i < NUM_MD_SESSIONS; i++)
1165 driver->md_session_map[i] = NULL;
1166}
1167
1168static void diag_md_session_exit(void)
1169{
1170 int i;
1171 struct diag_md_session_t *session_info = NULL;
1172
1173 for (i = 0; i < NUM_MD_SESSIONS; i++) {
1174 if (driver->md_session_map[i]) {
1175 session_info = driver->md_session_map[i];
1176 diag_log_mask_free(session_info->log_mask);
1177 kfree(session_info->log_mask);
1178 session_info->log_mask = NULL;
1179 diag_msg_mask_free(session_info->msg_mask);
1180 kfree(session_info->msg_mask);
1181 session_info->msg_mask = NULL;
1182 diag_event_mask_free(session_info->event_mask);
1183 kfree(session_info->event_mask);
1184 session_info->event_mask = NULL;
1185 kfree(session_info);
1186 session_info = NULL;
1187 driver->md_session_map[i] = NULL;
1188 }
1189 }
1190 mutex_destroy(&driver->md_session_lock);
1191 driver->md_session_mask = 0;
1192 driver->md_session_mode = DIAG_MD_NONE;
1193}
1194
1195int diag_md_session_create(int mode, int peripheral_mask, int proc)
1196{
1197 int i;
1198 int err = 0;
1199 struct diag_md_session_t *new_session = NULL;
1200
1201 /*
1202 * If a session is running with a peripheral mask and a new session
1203 * request comes in with same peripheral mask value then return
1204 * invalid param
1205 */
1206 if (driver->md_session_mode == DIAG_MD_PERIPHERAL &&
1207 (driver->md_session_mask & peripheral_mask) != 0)
1208 return -EINVAL;
1209
1210 mutex_lock(&driver->md_session_lock);
1211 new_session = kzalloc(sizeof(struct diag_md_session_t), GFP_KERNEL);
1212 if (!new_session) {
1213 mutex_unlock(&driver->md_session_lock);
1214 return -ENOMEM;
1215 }
1216
1217 new_session->peripheral_mask = 0;
1218 new_session->pid = current->tgid;
1219 new_session->task = current;
1220
1221 new_session->log_mask = kzalloc(sizeof(struct diag_mask_info),
1222 GFP_KERNEL);
1223 if (!new_session->log_mask) {
1224 err = -ENOMEM;
1225 goto fail_peripheral;
1226 }
1227 new_session->event_mask = kzalloc(sizeof(struct diag_mask_info),
1228 GFP_KERNEL);
1229 if (!new_session->event_mask) {
1230 err = -ENOMEM;
1231 goto fail_peripheral;
1232 }
1233 new_session->msg_mask = kzalloc(sizeof(struct diag_mask_info),
1234 GFP_KERNEL);
1235 if (!new_session->msg_mask) {
1236 err = -ENOMEM;
1237 goto fail_peripheral;
1238 }
1239
1240 err = diag_log_mask_copy(new_session->log_mask, &log_mask);
1241 if (err) {
1242 DIAG_LOG(DIAG_DEBUG_USERSPACE,
1243 "return value of log copy. err %d\n", err);
1244 goto fail_peripheral;
1245 }
1246 err = diag_event_mask_copy(new_session->event_mask, &event_mask);
1247 if (err) {
1248 DIAG_LOG(DIAG_DEBUG_USERSPACE,
1249 "return value of event copy. err %d\n", err);
1250 goto fail_peripheral;
1251 }
1252 err = diag_msg_mask_copy(new_session->msg_mask, &msg_mask);
1253 if (err) {
1254 DIAG_LOG(DIAG_DEBUG_USERSPACE,
1255 "return value of msg copy. err %d\n", err);
1256 goto fail_peripheral;
1257 }
1258 for (i = 0; i < NUM_MD_SESSIONS; i++) {
1259 if ((MD_PERIPHERAL_MASK(i) & peripheral_mask) == 0)
1260 continue;
1261 if (driver->md_session_map[i] != NULL) {
1262 DIAG_LOG(DIAG_DEBUG_USERSPACE,
1263 "another instance present for %d\n", i);
1264 err = -EEXIST;
1265 goto fail_peripheral;
1266 }
1267 new_session->peripheral_mask |= MD_PERIPHERAL_MASK(i);
1268 driver->md_session_map[i] = new_session;
1269 driver->md_session_mask |= MD_PERIPHERAL_MASK(i);
1270 }
1271 setup_timer(&new_session->hdlc_reset_timer,
1272 diag_md_hdlc_reset_timer_func,
1273 new_session->pid);
1274
1275 driver->md_session_mode = DIAG_MD_PERIPHERAL;
1276 mutex_unlock(&driver->md_session_lock);
1277 DIAG_LOG(DIAG_DEBUG_USERSPACE,
1278 "created session in peripheral mode\n");
1279 return 0;
1280
1281fail_peripheral:
1282 diag_log_mask_free(new_session->log_mask);
1283 kfree(new_session->log_mask);
1284 new_session->log_mask = NULL;
1285 diag_event_mask_free(new_session->event_mask);
1286 kfree(new_session->event_mask);
1287 new_session->event_mask = NULL;
1288 diag_msg_mask_free(new_session->msg_mask);
1289 kfree(new_session->msg_mask);
1290 new_session->msg_mask = NULL;
1291 kfree(new_session);
1292 new_session = NULL;
1293 mutex_unlock(&driver->md_session_lock);
1294 return err;
1295}
1296
1297static void diag_md_session_close(struct diag_md_session_t *session_info)
1298{
1299 int i;
1300 uint8_t found = 0;
1301
1302 if (!session_info)
1303 return;
1304
1305 mutex_lock(&driver->md_session_lock);
1306 for (i = 0; i < NUM_MD_SESSIONS; i++) {
1307 if (driver->md_session_map[i] != session_info)
1308 continue;
1309 driver->md_session_map[i] = NULL;
1310 driver->md_session_mask &= ~session_info->peripheral_mask;
1311 }
1312 diag_log_mask_free(session_info->log_mask);
1313 kfree(session_info->log_mask);
1314 session_info->log_mask = NULL;
1315 diag_msg_mask_free(session_info->msg_mask);
1316 kfree(session_info->msg_mask);
1317 session_info->msg_mask = NULL;
1318 diag_event_mask_free(session_info->event_mask);
1319 kfree(session_info->event_mask);
1320 session_info->event_mask = NULL;
1321 del_timer(&session_info->hdlc_reset_timer);
1322
1323 for (i = 0; i < NUM_MD_SESSIONS && !found; i++) {
1324 if (driver->md_session_map[i] != NULL)
1325 found = 1;
1326 }
1327
1328 driver->md_session_mode = (found) ? DIAG_MD_PERIPHERAL : DIAG_MD_NONE;
1329 kfree(session_info);
1330 session_info = NULL;
1331 mutex_unlock(&driver->md_session_lock);
1332 DIAG_LOG(DIAG_DEBUG_USERSPACE, "cleared up session\n");
1333}
1334
1335struct diag_md_session_t *diag_md_session_get_pid(int pid)
1336{
1337 int i;
1338
1339 for (i = 0; i < NUM_MD_SESSIONS; i++) {
1340 if (driver->md_session_map[i] &&
1341 driver->md_session_map[i]->pid == pid)
1342 return driver->md_session_map[i];
1343 }
1344 return NULL;
1345}
1346
1347struct diag_md_session_t *diag_md_session_get_peripheral(uint8_t peripheral)
1348{
1349 if (peripheral >= NUM_MD_SESSIONS)
1350 return NULL;
1351 return driver->md_session_map[peripheral];
1352}
1353
1354static int diag_md_peripheral_switch(struct diag_md_session_t *session_info,
1355 int peripheral_mask, int req_mode) {
1356 int i, bit = 0;
1357
1358 if (!session_info)
1359 return -EINVAL;
1360 if (req_mode != DIAG_USB_MODE || req_mode != DIAG_MEMORY_DEVICE_MODE)
1361 return -EINVAL;
1362
1363 /*
1364 * check that md_session_map for i == session_info,
1365 * if not then race condition occurred and bail
1366 */
1367 mutex_lock(&driver->md_session_lock);
1368 for (i = 0; i < NUM_MD_SESSIONS; i++) {
1369 bit = MD_PERIPHERAL_MASK(i) & peripheral_mask;
1370 if (!bit)
1371 continue;
1372 if (req_mode == DIAG_USB_MODE) {
1373 if (driver->md_session_map[i] != session_info) {
1374 mutex_unlock(&driver->md_session_lock);
1375 return -EINVAL;
1376 }
1377 driver->md_session_map[i] = NULL;
1378 driver->md_session_mask &= ~bit;
1379 session_info->peripheral_mask &= ~bit;
1380
1381 } else {
1382 if (driver->md_session_map[i] != NULL) {
1383 mutex_unlock(&driver->md_session_lock);
1384 return -EINVAL;
1385 }
1386 driver->md_session_map[i] = session_info;
1387 driver->md_session_mask |= bit;
1388 session_info->peripheral_mask |= bit;
1389
1390 }
1391 }
1392
1393 driver->md_session_mode = DIAG_MD_PERIPHERAL;
1394 mutex_unlock(&driver->md_session_lock);
1395 DIAG_LOG(DIAG_DEBUG_USERSPACE, "Changed Peripherals:0x%x to mode:%d\n",
1396 peripheral_mask, req_mode);
1397}
1398
1399static int diag_md_session_check(int curr_mode, int req_mode,
1400 const struct diag_logging_mode_param_t *param,
1401 uint8_t *change_mode)
1402{
1403 int i, bit = 0, err = 0;
1404 int change_mask = 0;
1405 struct diag_md_session_t *session_info = NULL;
1406
1407 if (!param || !change_mode)
1408 return -EIO;
1409
1410 *change_mode = 0;
1411
1412 switch (curr_mode) {
1413 case DIAG_USB_MODE:
1414 case DIAG_MEMORY_DEVICE_MODE:
1415 case DIAG_MULTI_MODE:
1416 break;
1417 default:
1418 return -EINVAL;
1419 }
1420
1421 if (req_mode != DIAG_USB_MODE && req_mode != DIAG_MEMORY_DEVICE_MODE)
1422 return -EINVAL;
1423
1424 if (req_mode == DIAG_USB_MODE) {
1425 if (curr_mode == DIAG_USB_MODE)
1426 return 0;
1427 if (driver->md_session_mode == DIAG_MD_NONE
1428 && driver->md_session_mask == 0 && driver->logging_mask) {
1429 *change_mode = 1;
1430 return 0;
1431 }
1432
1433 /*
1434 * curr_mode is either DIAG_MULTI_MODE or DIAG_MD_MODE
1435 * Check if requested peripherals are already in usb mode
1436 */
1437 for (i = 0; i < NUM_MD_SESSIONS; i++) {
1438 bit = MD_PERIPHERAL_MASK(i) & param->peripheral_mask;
1439 if (!bit)
1440 continue;
1441 if (bit & driver->logging_mask)
1442 change_mask |= bit;
1443 }
1444 if (!change_mask)
1445 return 0;
1446
1447 /*
1448 * Change is needed. Check if this md_session has set all the
1449 * requested peripherals. If another md session set a requested
1450 * peripheral then we cannot switch that peripheral to USB.
1451 * If this session owns all the requested peripherals, then
1452 * call function to switch the modes/masks for the md_session
1453 */
1454 session_info = diag_md_session_get_pid(current->tgid);
1455 if (!session_info) {
1456 *change_mode = 1;
1457 return 0;
1458 }
1459 if ((change_mask & session_info->peripheral_mask)
1460 != change_mask) {
1461 DIAG_LOG(DIAG_DEBUG_USERSPACE,
1462 "Another MD Session owns a requested peripheral\n");
1463 return -EINVAL;
1464 }
1465 *change_mode = 1;
1466
1467 /* If all peripherals are being set to USB Mode, call close */
1468 if (~change_mask & session_info->peripheral_mask) {
1469 err = diag_md_peripheral_switch(session_info,
1470 change_mask, DIAG_USB_MODE);
1471 } else
1472 diag_md_session_close(session_info);
1473
1474 return err;
1475
1476 } else if (req_mode == DIAG_MEMORY_DEVICE_MODE) {
1477 /*
1478 * Get bit mask that represents what peripherals already have
1479 * been set. Check that requested peripherals already set are
1480 * owned by this md session
1481 */
1482 change_mask = driver->md_session_mask & param->peripheral_mask;
1483 session_info = diag_md_session_get_pid(current->tgid);
1484
1485 if (session_info) {
1486 if ((session_info->peripheral_mask & change_mask)
1487 != change_mask) {
1488 DIAG_LOG(DIAG_DEBUG_USERSPACE,
1489 "Another MD Session owns a requested peripheral\n");
1490 return -EINVAL;
1491 }
1492 err = diag_md_peripheral_switch(session_info,
1493 change_mask, DIAG_USB_MODE);
1494 } else {
1495 if (change_mask) {
1496 DIAG_LOG(DIAG_DEBUG_USERSPACE,
1497 "Another MD Session owns a requested peripheral\n");
1498 return -EINVAL;
1499 }
1500 err = diag_md_session_create(DIAG_MD_PERIPHERAL,
1501 param->peripheral_mask, DIAG_LOCAL_PROC);
1502 }
1503 *change_mode = 1;
1504 return err;
1505 }
1506 return -EINVAL;
1507}
1508
1509static uint32_t diag_translate_mask(uint32_t peripheral_mask)
1510{
1511 uint32_t ret = 0;
1512
1513 if (peripheral_mask & DIAG_CON_APSS)
1514 ret |= (1 << APPS_DATA);
1515 if (peripheral_mask & DIAG_CON_MPSS)
1516 ret |= (1 << PERIPHERAL_MODEM);
1517 if (peripheral_mask & DIAG_CON_LPASS)
1518 ret |= (1 << PERIPHERAL_LPASS);
1519 if (peripheral_mask & DIAG_CON_WCNSS)
1520 ret |= (1 << PERIPHERAL_WCNSS);
1521 if (peripheral_mask & DIAG_CON_SENSORS)
1522 ret |= (1 << PERIPHERAL_SENSORS);
1523 if (peripheral_mask & DIAG_CON_WDSP)
1524 ret |= (1 << PERIPHERAL_WDSP);
Sreelakshmi Gownipalli588a31d2016-11-02 13:33:43 -07001525 if (peripheral_mask & DIAG_CON_CDSP)
1526 ret |= (1 << PERIPHERAL_CDSP);
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001527
1528 return ret;
1529}
1530
1531static int diag_switch_logging(struct diag_logging_mode_param_t *param)
1532{
1533 int new_mode;
1534 int curr_mode;
1535 int err = 0;
1536 uint8_t do_switch = 1;
1537 uint32_t peripheral_mask = 0;
1538
1539 if (!param)
1540 return -EINVAL;
1541
1542 if (!param->peripheral_mask) {
1543 DIAG_LOG(DIAG_DEBUG_USERSPACE,
1544 "asking for mode switch with no peripheral mask set\n");
1545 return -EINVAL;
1546 }
1547
1548 peripheral_mask = diag_translate_mask(param->peripheral_mask);
1549 param->peripheral_mask = peripheral_mask;
1550
1551 switch (param->req_mode) {
1552 case CALLBACK_MODE:
1553 case UART_MODE:
1554 case SOCKET_MODE:
1555 case MEMORY_DEVICE_MODE:
1556 new_mode = DIAG_MEMORY_DEVICE_MODE;
1557 break;
1558 case USB_MODE:
1559 new_mode = DIAG_USB_MODE;
1560 break;
1561 default:
1562 pr_err("diag: In %s, request to switch to invalid mode: %d\n",
1563 __func__, param->req_mode);
1564 return -EINVAL;
1565 }
1566
1567 curr_mode = driver->logging_mode;
1568 DIAG_LOG(DIAG_DEBUG_USERSPACE,
1569 "request to switch logging from %d mask:%0x to %d mask:%0x\n",
1570 curr_mode, driver->md_session_mask, new_mode, peripheral_mask);
1571
1572 err = diag_md_session_check(curr_mode, new_mode, param, &do_switch);
1573 if (err) {
1574 DIAG_LOG(DIAG_DEBUG_USERSPACE,
1575 "err from diag_md_session_check, err: %d\n", err);
1576 return err;
1577 }
1578
1579 if (do_switch == 0) {
1580 DIAG_LOG(DIAG_DEBUG_USERSPACE,
1581 "not switching modes c: %d n: %d\n",
1582 curr_mode, new_mode);
1583 return 0;
1584 }
1585
1586 diag_ws_reset(DIAG_WS_MUX);
1587 err = diag_mux_switch_logging(&new_mode, &peripheral_mask);
1588 if (err) {
1589 pr_err("diag: In %s, unable to switch mode from %d to %d, err: %d\n",
1590 __func__, curr_mode, new_mode, err);
1591 driver->logging_mode = curr_mode;
1592 goto fail;
1593 }
1594 driver->logging_mode = new_mode;
1595 driver->logging_mask = peripheral_mask;
1596 DIAG_LOG(DIAG_DEBUG_USERSPACE,
1597 "Switch logging to %d mask:%0x\n", new_mode, peripheral_mask);
1598
1599 /* Update to take peripheral_mask */
1600 if (new_mode != DIAG_MEMORY_DEVICE_MODE) {
1601 diag_update_real_time_vote(DIAG_PROC_MEMORY_DEVICE,
1602 MODE_REALTIME, ALL_PROC);
1603 } else {
1604 diag_update_proc_vote(DIAG_PROC_MEMORY_DEVICE, VOTE_UP,
1605 ALL_PROC);
1606 }
1607
1608 if (!(new_mode == DIAG_MEMORY_DEVICE_MODE &&
1609 curr_mode == DIAG_USB_MODE)) {
1610 queue_work(driver->diag_real_time_wq,
1611 &driver->diag_real_time_work);
1612 }
1613
1614 return 0;
1615fail:
1616 return err;
1617}
1618
1619static int diag_ioctl_dci_reg(unsigned long ioarg)
1620{
1621 int result = -EINVAL;
1622 struct diag_dci_reg_tbl_t dci_reg_params;
1623
1624 if (copy_from_user(&dci_reg_params, (void __user *)ioarg,
1625 sizeof(struct diag_dci_reg_tbl_t)))
1626 return -EFAULT;
1627
1628 result = diag_dci_register_client(&dci_reg_params);
1629
1630 return result;
1631}
1632
1633static int diag_ioctl_dci_health_stats(unsigned long ioarg)
1634{
1635 int result = -EINVAL;
1636 struct diag_dci_health_stats_proc stats;
1637
1638 if (copy_from_user(&stats, (void __user *)ioarg,
1639 sizeof(struct diag_dci_health_stats_proc)))
1640 return -EFAULT;
1641
1642 result = diag_dci_copy_health_stats(&stats);
1643 if (result == DIAG_DCI_NO_ERROR) {
1644 if (copy_to_user((void __user *)ioarg, &stats,
1645 sizeof(struct diag_dci_health_stats_proc)))
1646 return -EFAULT;
1647 }
1648
1649 return result;
1650}
1651
1652static int diag_ioctl_dci_log_status(unsigned long ioarg)
1653{
1654 struct diag_log_event_stats le_stats;
1655 struct diag_dci_client_tbl *dci_client = NULL;
1656
1657 if (copy_from_user(&le_stats, (void __user *)ioarg,
1658 sizeof(struct diag_log_event_stats)))
1659 return -EFAULT;
1660
1661 dci_client = diag_dci_get_client_entry(le_stats.client_id);
1662 if (!dci_client)
1663 return DIAG_DCI_NOT_SUPPORTED;
1664 le_stats.is_set = diag_dci_query_log_mask(dci_client, le_stats.code);
1665 if (copy_to_user((void __user *)ioarg, &le_stats,
1666 sizeof(struct diag_log_event_stats)))
1667 return -EFAULT;
1668
1669 return DIAG_DCI_NO_ERROR;
1670}
1671
1672static int diag_ioctl_dci_event_status(unsigned long ioarg)
1673{
1674 struct diag_log_event_stats le_stats;
1675 struct diag_dci_client_tbl *dci_client = NULL;
1676
1677 if (copy_from_user(&le_stats, (void __user *)ioarg,
1678 sizeof(struct diag_log_event_stats)))
1679 return -EFAULT;
1680
1681 dci_client = diag_dci_get_client_entry(le_stats.client_id);
1682 if (!dci_client)
1683 return DIAG_DCI_NOT_SUPPORTED;
1684
1685 le_stats.is_set = diag_dci_query_event_mask(dci_client, le_stats.code);
1686 if (copy_to_user((void __user *)ioarg, &le_stats,
1687 sizeof(struct diag_log_event_stats)))
1688 return -EFAULT;
1689
1690 return DIAG_DCI_NO_ERROR;
1691}
1692
1693static int diag_ioctl_lsm_deinit(void)
1694{
1695 int i;
1696
1697 for (i = 0; i < driver->num_clients; i++)
1698 if (driver->client_map[i].pid == current->tgid)
1699 break;
1700
1701 if (i == driver->num_clients)
1702 return -EINVAL;
1703
1704 driver->data_ready[i] |= DEINIT_TYPE;
1705 wake_up_interruptible(&driver->wait_q);
1706
1707 return 1;
1708}
1709
1710static int diag_ioctl_vote_real_time(unsigned long ioarg)
1711{
1712 int real_time = 0;
1713 int temp_proc = ALL_PROC;
1714 struct real_time_vote_t vote;
1715 struct diag_dci_client_tbl *dci_client = NULL;
1716
1717 if (copy_from_user(&vote, (void __user *)ioarg,
1718 sizeof(struct real_time_vote_t)))
1719 return -EFAULT;
1720
1721 if (vote.proc > DIAG_PROC_MEMORY_DEVICE ||
1722 vote.real_time_vote > MODE_UNKNOWN ||
1723 vote.client_id < 0) {
1724 pr_err("diag: %s, invalid params, proc: %d, vote: %d, client_id: %d\n",
1725 __func__, vote.proc, vote.real_time_vote,
1726 vote.client_id);
1727 return -EINVAL;
1728 }
1729
1730 driver->real_time_update_busy++;
1731 if (vote.proc == DIAG_PROC_DCI) {
1732 dci_client = diag_dci_get_client_entry(vote.client_id);
1733 if (!dci_client) {
1734 driver->real_time_update_busy--;
1735 return DIAG_DCI_NOT_SUPPORTED;
1736 }
1737 diag_dci_set_real_time(dci_client, vote.real_time_vote);
1738 real_time = diag_dci_get_cumulative_real_time(
1739 dci_client->client_info.token);
1740 diag_update_real_time_vote(vote.proc, real_time,
1741 dci_client->client_info.token);
1742 } else {
1743 real_time = vote.real_time_vote;
1744 temp_proc = vote.client_id;
1745 diag_update_real_time_vote(vote.proc, real_time,
1746 temp_proc);
1747 }
1748 queue_work(driver->diag_real_time_wq, &driver->diag_real_time_work);
1749 return 0;
1750}
1751
1752static int diag_ioctl_get_real_time(unsigned long ioarg)
1753{
1754 int i;
1755 int retry_count = 0;
1756 int timer = 0;
1757 struct real_time_query_t rt_query;
1758
1759 if (copy_from_user(&rt_query, (void __user *)ioarg,
1760 sizeof(struct real_time_query_t)))
1761 return -EFAULT;
1762 while (retry_count < 3) {
1763 if (driver->real_time_update_busy > 0) {
1764 retry_count++;
1765 /*
1766 * The value 10000 was chosen empirically as an
1767 * optimum value in order to give the work in
1768 * diag_real_time_wq to complete processing.
1769 */
1770 for (timer = 0; timer < 5; timer++)
1771 usleep_range(10000, 10100);
1772 } else {
1773 break;
1774 }
1775 }
1776
1777 if (driver->real_time_update_busy > 0)
1778 return -EAGAIN;
1779
1780 if (rt_query.proc < 0 || rt_query.proc >= DIAG_NUM_PROC) {
1781 pr_err("diag: Invalid proc %d in %s\n", rt_query.proc,
1782 __func__);
1783 return -EINVAL;
1784 }
1785 rt_query.real_time = driver->real_time_mode[rt_query.proc];
1786 /*
1787 * For the local processor, if any of the peripherals is in buffering
1788 * mode, overwrite the value of real time with UNKNOWN_MODE
1789 */
1790 if (rt_query.proc == DIAG_LOCAL_PROC) {
1791 for (i = 0; i < NUM_PERIPHERALS; i++) {
1792 if (!driver->feature[i].peripheral_buffering)
1793 continue;
1794 switch (driver->buffering_mode[i].mode) {
1795 case DIAG_BUFFERING_MODE_CIRCULAR:
1796 case DIAG_BUFFERING_MODE_THRESHOLD:
1797 rt_query.real_time = MODE_UNKNOWN;
1798 break;
1799 }
1800 }
1801 }
1802
1803 if (copy_to_user((void __user *)ioarg, &rt_query,
1804 sizeof(struct real_time_query_t)))
1805 return -EFAULT;
1806
1807 return 0;
1808}
1809
1810static int diag_ioctl_set_buffering_mode(unsigned long ioarg)
1811{
1812 struct diag_buffering_mode_t params;
1813
1814 if (copy_from_user(&params, (void __user *)ioarg, sizeof(params)))
1815 return -EFAULT;
1816
1817 if (params.peripheral >= NUM_PERIPHERALS)
1818 return -EINVAL;
1819
1820 mutex_lock(&driver->mode_lock);
1821 driver->buffering_flag[params.peripheral] = 1;
1822 mutex_unlock(&driver->mode_lock);
1823
1824 return diag_send_peripheral_buffering_mode(&params);
1825}
1826
1827static int diag_ioctl_peripheral_drain_immediate(unsigned long ioarg)
1828{
1829 uint8_t peripheral;
1830
1831 if (copy_from_user(&peripheral, (void __user *)ioarg, sizeof(uint8_t)))
1832 return -EFAULT;
1833
1834 if (peripheral >= NUM_PERIPHERALS) {
1835 pr_err("diag: In %s, invalid peripheral %d\n", __func__,
1836 peripheral);
1837 return -EINVAL;
1838 }
1839
1840 if (!driver->feature[peripheral].peripheral_buffering) {
1841 pr_err("diag: In %s, peripheral %d doesn't support buffering\n",
1842 __func__, peripheral);
1843 return -EIO;
1844 }
1845
1846 return diag_send_peripheral_drain_immediate(peripheral);
1847}
1848
1849static int diag_ioctl_dci_support(unsigned long ioarg)
1850{
1851 struct diag_dci_peripherals_t dci_support;
1852 int result = -EINVAL;
1853
1854 if (copy_from_user(&dci_support, (void __user *)ioarg,
1855 sizeof(struct diag_dci_peripherals_t)))
1856 return -EFAULT;
1857
1858 result = diag_dci_get_support_list(&dci_support);
1859 if (result == DIAG_DCI_NO_ERROR)
1860 if (copy_to_user((void __user *)ioarg, &dci_support,
1861 sizeof(struct diag_dci_peripherals_t)))
1862 return -EFAULT;
1863
1864 return result;
1865}
1866
1867static int diag_ioctl_hdlc_toggle(unsigned long ioarg)
1868{
1869 uint8_t hdlc_support;
1870 struct diag_md_session_t *session_info = NULL;
1871
1872 session_info = diag_md_session_get_pid(current->tgid);
1873 if (copy_from_user(&hdlc_support, (void __user *)ioarg,
1874 sizeof(uint8_t)))
1875 return -EFAULT;
1876 mutex_lock(&driver->hdlc_disable_mutex);
1877 if (session_info) {
1878 mutex_lock(&driver->md_session_lock);
1879 session_info->hdlc_disabled = hdlc_support;
1880 mutex_unlock(&driver->md_session_lock);
1881 } else
1882 driver->hdlc_disabled = hdlc_support;
1883 mutex_unlock(&driver->hdlc_disable_mutex);
1884 diag_update_md_clients(HDLC_SUPPORT_TYPE);
1885
1886 return 0;
1887}
1888
1889static int diag_ioctl_register_callback(unsigned long ioarg)
1890{
1891 int err = 0;
1892 struct diag_callback_reg_t reg;
1893
1894 if (copy_from_user(&reg, (void __user *)ioarg,
1895 sizeof(struct diag_callback_reg_t))) {
1896 return -EFAULT;
1897 }
1898
1899 if (reg.proc < 0 || reg.proc >= DIAG_NUM_PROC) {
1900 pr_err("diag: In %s, invalid proc %d for callback registration\n",
1901 __func__, reg.proc);
1902 return -EINVAL;
1903 }
1904
1905 if (driver->md_session_mode == DIAG_MD_PERIPHERAL)
1906 return -EIO;
1907
1908 return err;
1909}
1910
1911static int diag_cmd_register_tbl(struct diag_cmd_reg_tbl_t *reg_tbl)
1912{
1913 int i;
1914 int err = 0;
1915 uint32_t count = 0;
1916 struct diag_cmd_reg_entry_t *entries = NULL;
1917 const uint16_t entry_len = sizeof(struct diag_cmd_reg_entry_t);
1918
1919
1920 if (!reg_tbl) {
1921 pr_err("diag: In %s, invalid registration table\n", __func__);
1922 return -EINVAL;
1923 }
1924
1925 count = reg_tbl->count;
1926 if ((UINT_MAX / entry_len) < count) {
1927 pr_warn("diag: In %s, possbile integer overflow.\n", __func__);
1928 return -EFAULT;
1929 }
1930
1931 entries = kzalloc(count * entry_len, GFP_KERNEL);
1932 if (!entries)
1933 return -ENOMEM;
1934
1935
1936 err = copy_from_user(entries, reg_tbl->entries, count * entry_len);
1937 if (err) {
1938 pr_err("diag: In %s, error copying data from userspace, err: %d\n",
1939 __func__, err);
1940 kfree(entries);
1941 return -EFAULT;
1942 }
1943
1944 for (i = 0; i < count; i++) {
1945 err = diag_cmd_add_reg(&entries[i], APPS_DATA, current->tgid);
1946 if (err) {
1947 pr_err("diag: In %s, unable to register command, err: %d\n",
1948 __func__, err);
1949 break;
1950 }
1951 }
1952
1953 kfree(entries);
1954 return err;
1955}
1956
1957static int diag_ioctl_cmd_reg(unsigned long ioarg)
1958{
1959 struct diag_cmd_reg_tbl_t reg_tbl;
1960
1961 if (copy_from_user(&reg_tbl, (void __user *)ioarg,
1962 sizeof(struct diag_cmd_reg_tbl_t))) {
1963 return -EFAULT;
1964 }
1965
1966 return diag_cmd_register_tbl(&reg_tbl);
1967}
1968
1969static int diag_ioctl_cmd_dereg(void)
1970{
1971 diag_cmd_remove_reg_by_pid(current->tgid);
1972 return 0;
1973}
1974
1975#ifdef CONFIG_COMPAT
1976/*
1977 * @sync_obj_name: name of the synchronization object associated with this proc
1978 * @count: number of entries in the bind
1979 * @params: the actual packet registrations
1980 */
1981struct diag_cmd_reg_tbl_compat_t {
1982 char sync_obj_name[MAX_SYNC_OBJ_NAME_SIZE];
1983 uint32_t count;
1984 compat_uptr_t entries;
1985};
1986
1987static int diag_ioctl_cmd_reg_compat(unsigned long ioarg)
1988{
1989 struct diag_cmd_reg_tbl_compat_t reg_tbl_compat;
1990 struct diag_cmd_reg_tbl_t reg_tbl;
1991
1992 if (copy_from_user(&reg_tbl_compat, (void __user *)ioarg,
1993 sizeof(struct diag_cmd_reg_tbl_compat_t))) {
1994 return -EFAULT;
1995 }
1996
1997 strlcpy(reg_tbl.sync_obj_name, reg_tbl_compat.sync_obj_name,
1998 MAX_SYNC_OBJ_NAME_SIZE);
1999 reg_tbl.count = reg_tbl_compat.count;
2000 reg_tbl.entries = (struct diag_cmd_reg_entry_t *)
2001 (uintptr_t)reg_tbl_compat.entries;
2002
2003 return diag_cmd_register_tbl(&reg_tbl);
2004}
2005
2006long diagchar_compat_ioctl(struct file *filp,
2007 unsigned int iocmd, unsigned long ioarg)
2008{
2009 int result = -EINVAL;
2010 int client_id = 0;
2011 uint16_t delayed_rsp_id = 0;
2012 uint16_t remote_dev;
2013 struct diag_dci_client_tbl *dci_client = NULL;
2014 struct diag_logging_mode_param_t mode_param;
2015
2016 switch (iocmd) {
2017 case DIAG_IOCTL_COMMAND_REG:
2018 result = diag_ioctl_cmd_reg_compat(ioarg);
2019 break;
2020 case DIAG_IOCTL_COMMAND_DEREG:
2021 result = diag_ioctl_cmd_dereg();
2022 break;
2023 case DIAG_IOCTL_GET_DELAYED_RSP_ID:
2024 delayed_rsp_id = diag_get_next_delayed_rsp_id();
2025 if (copy_to_user((void __user *)ioarg, &delayed_rsp_id,
2026 sizeof(uint16_t)))
2027 result = -EFAULT;
2028 else
2029 result = 0;
2030 break;
2031 case DIAG_IOCTL_DCI_REG:
2032 result = diag_ioctl_dci_reg(ioarg);
2033 break;
2034 case DIAG_IOCTL_DCI_DEINIT:
2035 mutex_lock(&driver->dci_mutex);
2036 if (copy_from_user((void *)&client_id, (void __user *)ioarg,
2037 sizeof(int))) {
2038 mutex_unlock(&driver->dci_mutex);
2039 return -EFAULT;
2040 }
2041 dci_client = diag_dci_get_client_entry(client_id);
2042 if (!dci_client) {
2043 mutex_unlock(&driver->dci_mutex);
2044 return DIAG_DCI_NOT_SUPPORTED;
2045 }
2046 result = diag_dci_deinit_client(dci_client);
2047 mutex_unlock(&driver->dci_mutex);
2048 break;
2049 case DIAG_IOCTL_DCI_SUPPORT:
2050 result = diag_ioctl_dci_support(ioarg);
2051 break;
2052 case DIAG_IOCTL_DCI_HEALTH_STATS:
2053 mutex_lock(&driver->dci_mutex);
2054 result = diag_ioctl_dci_health_stats(ioarg);
2055 mutex_unlock(&driver->dci_mutex);
2056 break;
2057 case DIAG_IOCTL_DCI_LOG_STATUS:
2058 mutex_lock(&driver->dci_mutex);
2059 result = diag_ioctl_dci_log_status(ioarg);
2060 mutex_unlock(&driver->dci_mutex);
2061 break;
2062 case DIAG_IOCTL_DCI_EVENT_STATUS:
2063 mutex_lock(&driver->dci_mutex);
2064 result = diag_ioctl_dci_event_status(ioarg);
2065 mutex_unlock(&driver->dci_mutex);
2066 break;
2067 case DIAG_IOCTL_DCI_CLEAR_LOGS:
2068 mutex_lock(&driver->dci_mutex);
2069 if (copy_from_user((void *)&client_id, (void __user *)ioarg,
2070 sizeof(int))) {
2071 mutex_unlock(&driver->dci_mutex);
2072 return -EFAULT;
2073 }
2074 result = diag_dci_clear_log_mask(client_id);
2075 mutex_unlock(&driver->dci_mutex);
2076 break;
2077 case DIAG_IOCTL_DCI_CLEAR_EVENTS:
2078 mutex_lock(&driver->dci_mutex);
2079 if (copy_from_user(&client_id, (void __user *)ioarg,
2080 sizeof(int))) {
2081 mutex_unlock(&driver->dci_mutex);
2082 return -EFAULT;
2083 }
2084 result = diag_dci_clear_event_mask(client_id);
2085 mutex_unlock(&driver->dci_mutex);
2086 break;
2087 case DIAG_IOCTL_LSM_DEINIT:
2088 result = diag_ioctl_lsm_deinit();
2089 break;
2090 case DIAG_IOCTL_SWITCH_LOGGING:
2091 if (copy_from_user((void *)&mode_param, (void __user *)ioarg,
2092 sizeof(mode_param)))
2093 return -EFAULT;
2094 mutex_lock(&driver->diagchar_mutex);
2095 result = diag_switch_logging(&mode_param);
2096 mutex_unlock(&driver->diagchar_mutex);
2097 break;
2098 case DIAG_IOCTL_REMOTE_DEV:
2099 remote_dev = diag_get_remote_device_mask();
2100 if (copy_to_user((void __user *)ioarg, &remote_dev,
2101 sizeof(uint16_t)))
2102 result = -EFAULT;
2103 else
2104 result = 1;
2105 break;
2106 case DIAG_IOCTL_VOTE_REAL_TIME:
2107 mutex_lock(&driver->dci_mutex);
2108 result = diag_ioctl_vote_real_time(ioarg);
2109 mutex_unlock(&driver->dci_mutex);
2110 break;
2111 case DIAG_IOCTL_GET_REAL_TIME:
2112 result = diag_ioctl_get_real_time(ioarg);
2113 break;
2114 case DIAG_IOCTL_PERIPHERAL_BUF_CONFIG:
2115 result = diag_ioctl_set_buffering_mode(ioarg);
2116 break;
2117 case DIAG_IOCTL_PERIPHERAL_BUF_DRAIN:
2118 result = diag_ioctl_peripheral_drain_immediate(ioarg);
2119 break;
2120 case DIAG_IOCTL_REGISTER_CALLBACK:
2121 result = diag_ioctl_register_callback(ioarg);
2122 break;
2123 case DIAG_IOCTL_HDLC_TOGGLE:
2124 result = diag_ioctl_hdlc_toggle(ioarg);
2125 break;
2126 }
2127 return result;
2128}
2129#endif
2130
2131long diagchar_ioctl(struct file *filp,
2132 unsigned int iocmd, unsigned long ioarg)
2133{
2134 int result = -EINVAL;
2135 int client_id = 0;
2136 uint16_t delayed_rsp_id;
2137 uint16_t remote_dev;
2138 struct diag_dci_client_tbl *dci_client = NULL;
2139 struct diag_logging_mode_param_t mode_param;
2140
2141 switch (iocmd) {
2142 case DIAG_IOCTL_COMMAND_REG:
2143 result = diag_ioctl_cmd_reg(ioarg);
2144 break;
2145 case DIAG_IOCTL_COMMAND_DEREG:
2146 result = diag_ioctl_cmd_dereg();
2147 break;
2148 case DIAG_IOCTL_GET_DELAYED_RSP_ID:
2149 delayed_rsp_id = diag_get_next_delayed_rsp_id();
2150 if (copy_to_user((void __user *)ioarg, &delayed_rsp_id,
2151 sizeof(uint16_t)))
2152 result = -EFAULT;
2153 else
2154 result = 0;
2155 break;
2156 case DIAG_IOCTL_DCI_REG:
2157 result = diag_ioctl_dci_reg(ioarg);
2158 break;
2159 case DIAG_IOCTL_DCI_DEINIT:
2160 mutex_lock(&driver->dci_mutex);
2161 if (copy_from_user((void *)&client_id, (void __user *)ioarg,
2162 sizeof(int))) {
2163 mutex_unlock(&driver->dci_mutex);
2164 return -EFAULT;
2165 }
2166 dci_client = diag_dci_get_client_entry(client_id);
2167 if (!dci_client) {
2168 mutex_unlock(&driver->dci_mutex);
2169 return DIAG_DCI_NOT_SUPPORTED;
2170 }
2171 result = diag_dci_deinit_client(dci_client);
2172 mutex_unlock(&driver->dci_mutex);
2173 break;
2174 case DIAG_IOCTL_DCI_SUPPORT:
2175 result = diag_ioctl_dci_support(ioarg);
2176 break;
2177 case DIAG_IOCTL_DCI_HEALTH_STATS:
2178 mutex_lock(&driver->dci_mutex);
2179 result = diag_ioctl_dci_health_stats(ioarg);
2180 mutex_unlock(&driver->dci_mutex);
2181 break;
2182 case DIAG_IOCTL_DCI_LOG_STATUS:
2183 mutex_lock(&driver->dci_mutex);
2184 result = diag_ioctl_dci_log_status(ioarg);
2185 mutex_unlock(&driver->dci_mutex);
2186 break;
2187 case DIAG_IOCTL_DCI_EVENT_STATUS:
2188 result = diag_ioctl_dci_event_status(ioarg);
2189 break;
2190 case DIAG_IOCTL_DCI_CLEAR_LOGS:
2191 mutex_lock(&driver->dci_mutex);
2192 if (copy_from_user((void *)&client_id, (void __user *)ioarg,
2193 sizeof(int))) {
2194 mutex_unlock(&driver->dci_mutex);
2195 return -EFAULT;
2196 }
2197 result = diag_dci_clear_log_mask(client_id);
2198 mutex_unlock(&driver->dci_mutex);
2199 break;
2200 case DIAG_IOCTL_DCI_CLEAR_EVENTS:
2201 mutex_lock(&driver->dci_mutex);
2202 if (copy_from_user(&client_id, (void __user *)ioarg,
2203 sizeof(int))) {
2204 mutex_unlock(&driver->dci_mutex);
2205 return -EFAULT;
2206 }
2207 result = diag_dci_clear_event_mask(client_id);
2208 mutex_unlock(&driver->dci_mutex);
2209 break;
2210 case DIAG_IOCTL_LSM_DEINIT:
2211 result = diag_ioctl_lsm_deinit();
2212 break;
2213 case DIAG_IOCTL_SWITCH_LOGGING:
2214 if (copy_from_user((void *)&mode_param, (void __user *)ioarg,
2215 sizeof(mode_param)))
2216 return -EFAULT;
2217 mutex_lock(&driver->diagchar_mutex);
2218 result = diag_switch_logging(&mode_param);
2219 mutex_unlock(&driver->diagchar_mutex);
2220 break;
2221 case DIAG_IOCTL_REMOTE_DEV:
2222 remote_dev = diag_get_remote_device_mask();
2223 if (copy_to_user((void __user *)ioarg, &remote_dev,
2224 sizeof(uint16_t)))
2225 result = -EFAULT;
2226 else
2227 result = 1;
2228 break;
2229 case DIAG_IOCTL_VOTE_REAL_TIME:
2230 mutex_lock(&driver->dci_mutex);
2231 result = diag_ioctl_vote_real_time(ioarg);
2232 mutex_unlock(&driver->dci_mutex);
2233 break;
2234 case DIAG_IOCTL_GET_REAL_TIME:
2235 result = diag_ioctl_get_real_time(ioarg);
2236 break;
2237 case DIAG_IOCTL_PERIPHERAL_BUF_CONFIG:
2238 result = diag_ioctl_set_buffering_mode(ioarg);
2239 break;
2240 case DIAG_IOCTL_PERIPHERAL_BUF_DRAIN:
2241 result = diag_ioctl_peripheral_drain_immediate(ioarg);
2242 break;
2243 case DIAG_IOCTL_REGISTER_CALLBACK:
2244 result = diag_ioctl_register_callback(ioarg);
2245 break;
2246 case DIAG_IOCTL_HDLC_TOGGLE:
2247 result = diag_ioctl_hdlc_toggle(ioarg);
2248 break;
2249 }
2250 return result;
2251}
2252
2253static int diag_process_apps_data_hdlc(unsigned char *buf, int len,
2254 int pkt_type)
2255{
2256 int err = 0;
2257 int ret = PKT_DROP;
2258 struct diag_apps_data_t *data = &hdlc_data;
2259 struct diag_send_desc_type send = { NULL, NULL, DIAG_STATE_START, 0 };
2260 struct diag_hdlc_dest_type enc = { NULL, NULL, 0 };
2261 /*
2262 * The maximum encoded size of the buffer can be atmost twice the length
2263 * of the packet. Add three bytes foe footer - 16 bit CRC (2 bytes) +
2264 * delimiter (1 byte).
2265 */
2266 const uint32_t max_encoded_size = ((2 * len) + 3);
2267
2268 if (!buf || len <= 0) {
2269 pr_err("diag: In %s, invalid buf: %pK len: %d\n",
2270 __func__, buf, len);
2271 return -EIO;
2272 }
2273
2274 if (max_encoded_size > DIAG_MAX_HDLC_BUF_SIZE) {
2275 pr_err_ratelimited("diag: In %s, encoded data is larger %d than the buffer size %d\n",
2276 __func__, max_encoded_size, DIAG_MAX_HDLC_BUF_SIZE);
2277 return -EBADMSG;
2278 }
2279
2280 send.state = DIAG_STATE_START;
2281 send.pkt = buf;
2282 send.last = (void *)(buf + len - 1);
2283 send.terminate = 1;
2284
2285 if (!data->buf)
2286 data->buf = diagmem_alloc(driver, DIAG_MAX_HDLC_BUF_SIZE +
2287 APF_DIAG_PADDING,
2288 POOL_TYPE_HDLC);
2289 if (!data->buf) {
2290 ret = PKT_DROP;
2291 goto fail_ret;
2292 }
2293
2294 if ((DIAG_MAX_HDLC_BUF_SIZE - data->len) <= max_encoded_size) {
2295 err = diag_mux_write(DIAG_LOCAL_PROC, data->buf, data->len,
2296 data->ctxt);
2297 if (err) {
2298 ret = -EIO;
2299 goto fail_free_buf;
2300 }
2301 data->buf = NULL;
2302 data->len = 0;
2303 data->buf = diagmem_alloc(driver, DIAG_MAX_HDLC_BUF_SIZE +
2304 APF_DIAG_PADDING,
2305 POOL_TYPE_HDLC);
2306 if (!data->buf) {
2307 ret = PKT_DROP;
2308 goto fail_ret;
2309 }
2310 }
2311
2312 enc.dest = data->buf + data->len;
2313 enc.dest_last = (void *)(data->buf + data->len + max_encoded_size);
2314 diag_hdlc_encode(&send, &enc);
2315
2316 /*
2317 * This is to check if after HDLC encoding, we are still within
2318 * the limits of aggregation buffer. If not, we write out the
2319 * current buffer and start aggregation in a newly allocated
2320 * buffer.
2321 */
2322 if ((uintptr_t)enc.dest >= (uintptr_t)(data->buf +
2323 DIAG_MAX_HDLC_BUF_SIZE)) {
2324 err = diag_mux_write(DIAG_LOCAL_PROC, data->buf, data->len,
2325 data->ctxt);
2326 if (err) {
2327 ret = -EIO;
2328 goto fail_free_buf;
2329 }
2330 data->buf = NULL;
2331 data->len = 0;
2332 data->buf = diagmem_alloc(driver, DIAG_MAX_HDLC_BUF_SIZE +
2333 APF_DIAG_PADDING,
2334 POOL_TYPE_HDLC);
2335 if (!data->buf) {
2336 ret = PKT_DROP;
2337 goto fail_ret;
2338 }
2339
2340 enc.dest = data->buf + data->len;
2341 enc.dest_last = (void *)(data->buf + data->len +
2342 max_encoded_size);
2343 diag_hdlc_encode(&send, &enc);
2344 }
2345
2346 data->len = (((uintptr_t)enc.dest - (uintptr_t)data->buf) <
2347 DIAG_MAX_HDLC_BUF_SIZE) ?
2348 ((uintptr_t)enc.dest - (uintptr_t)data->buf) :
2349 DIAG_MAX_HDLC_BUF_SIZE;
2350
2351 if (pkt_type == DATA_TYPE_RESPONSE) {
2352 err = diag_mux_write(DIAG_LOCAL_PROC, data->buf, data->len,
2353 data->ctxt);
2354 if (err) {
2355 ret = -EIO;
2356 goto fail_free_buf;
2357 }
2358 data->buf = NULL;
2359 data->len = 0;
2360 }
2361
2362 return PKT_ALLOC;
2363
2364fail_free_buf:
2365 diagmem_free(driver, data->buf, POOL_TYPE_HDLC);
2366 data->buf = NULL;
2367 data->len = 0;
2368
2369fail_ret:
2370 return ret;
2371}
2372
2373static int diag_process_apps_data_non_hdlc(unsigned char *buf, int len,
2374 int pkt_type)
2375{
2376 int err = 0;
2377 int ret = PKT_DROP;
2378 struct diag_pkt_frame_t header;
2379 struct diag_apps_data_t *data = &non_hdlc_data;
2380 /*
2381 * The maximum packet size, when the data is non hdlc encoded is equal
2382 * to the size of the packet frame header and the length. Add 1 for the
2383 * delimiter 0x7E at the end.
2384 */
2385 const uint32_t max_pkt_size = sizeof(header) + len + 1;
2386
2387 if (!buf || len <= 0) {
2388 pr_err("diag: In %s, invalid buf: %pK len: %d\n",
2389 __func__, buf, len);
2390 return -EIO;
2391 }
2392
2393 if (!data->buf) {
2394 data->buf = diagmem_alloc(driver, DIAG_MAX_HDLC_BUF_SIZE +
2395 APF_DIAG_PADDING,
2396 POOL_TYPE_HDLC);
2397 if (!data->buf) {
2398 ret = PKT_DROP;
2399 goto fail_ret;
2400 }
2401 }
2402
2403 if ((DIAG_MAX_HDLC_BUF_SIZE - data->len) <= max_pkt_size) {
2404 err = diag_mux_write(DIAG_LOCAL_PROC, data->buf, data->len,
2405 data->ctxt);
2406 if (err) {
2407 ret = -EIO;
2408 goto fail_free_buf;
2409 }
2410 data->buf = NULL;
2411 data->len = 0;
2412 data->buf = diagmem_alloc(driver, DIAG_MAX_HDLC_BUF_SIZE +
2413 APF_DIAG_PADDING,
2414 POOL_TYPE_HDLC);
2415 if (!data->buf) {
2416 ret = PKT_DROP;
2417 goto fail_ret;
2418 }
2419 }
2420
2421 header.start = CONTROL_CHAR;
2422 header.version = 1;
2423 header.length = len;
2424 memcpy(data->buf + data->len, &header, sizeof(header));
2425 data->len += sizeof(header);
2426 memcpy(data->buf + data->len, buf, len);
2427 data->len += len;
2428 *(uint8_t *)(data->buf + data->len) = CONTROL_CHAR;
2429 data->len += sizeof(uint8_t);
2430 if (pkt_type == DATA_TYPE_RESPONSE) {
2431 err = diag_mux_write(DIAG_LOCAL_PROC, data->buf, data->len,
2432 data->ctxt);
2433 if (err) {
2434 ret = -EIO;
2435 goto fail_free_buf;
2436 }
2437 data->buf = NULL;
2438 data->len = 0;
2439 }
2440
2441 return PKT_ALLOC;
2442
2443fail_free_buf:
2444 diagmem_free(driver, data->buf, POOL_TYPE_HDLC);
2445 data->buf = NULL;
2446 data->len = 0;
2447
2448fail_ret:
2449 return ret;
2450}
2451
2452static int diag_user_process_dci_data(const char __user *buf, int len)
2453{
2454 int err = 0;
2455 const int mempool = POOL_TYPE_USER;
2456 unsigned char *user_space_data = NULL;
2457
2458 if (!buf || len <= 0 || len > diag_mempools[mempool].itemsize) {
2459 pr_err_ratelimited("diag: In %s, invalid buf %pK len: %d\n",
2460 __func__, buf, len);
2461 return -EBADMSG;
2462 }
2463
2464 user_space_data = diagmem_alloc(driver, len, mempool);
2465 if (!user_space_data)
2466 return -ENOMEM;
2467
2468 err = copy_from_user(user_space_data, buf, len);
2469 if (err) {
2470 pr_err_ratelimited("diag: In %s, unable to copy data from userspace, err: %d\n",
2471 __func__, err);
2472 err = DIAG_DCI_SEND_DATA_FAIL;
2473 goto fail;
2474 }
2475
2476 err = diag_process_dci_transaction(user_space_data, len);
2477fail:
2478 diagmem_free(driver, user_space_data, mempool);
2479 user_space_data = NULL;
2480 return err;
2481}
2482
2483static int diag_user_process_dci_apps_data(const char __user *buf, int len,
2484 int pkt_type)
2485{
2486 int err = 0;
2487 const int mempool = POOL_TYPE_COPY;
2488 unsigned char *user_space_data = NULL;
2489
2490 if (!buf || len <= 0 || len > diag_mempools[mempool].itemsize) {
2491 pr_err_ratelimited("diag: In %s, invalid buf %pK len: %d\n",
2492 __func__, buf, len);
2493 return -EBADMSG;
2494 }
2495
2496 pkt_type &= (DCI_PKT_TYPE | DATA_TYPE_DCI_LOG | DATA_TYPE_DCI_EVENT);
2497 if (!pkt_type) {
2498 pr_err_ratelimited("diag: In %s, invalid pkt_type: %d\n",
2499 __func__, pkt_type);
2500 return -EBADMSG;
2501 }
2502
2503 user_space_data = diagmem_alloc(driver, len, mempool);
2504 if (!user_space_data)
2505 return -ENOMEM;
2506
2507 err = copy_from_user(user_space_data, buf, len);
2508 if (err) {
2509 pr_alert("diag: In %s, unable to copy data from userspace, err: %d\n",
2510 __func__, err);
2511 goto fail;
2512 }
2513
2514 diag_process_apps_dci_read_data(pkt_type, user_space_data, len);
2515fail:
2516 diagmem_free(driver, user_space_data, mempool);
2517 user_space_data = NULL;
2518 return err;
2519}
2520
2521static int diag_user_process_raw_data(const char __user *buf, int len)
2522{
2523 int err = 0;
2524 int ret = 0;
2525 int token_offset = 0;
2526 int remote_proc = 0;
2527 const int mempool = POOL_TYPE_COPY;
2528 unsigned char *user_space_data = NULL;
2529 struct diag_md_session_t *info = NULL;
2530
2531 if (!buf || len <= 0 || len > CALLBACK_BUF_SIZE) {
2532 pr_err_ratelimited("diag: In %s, invalid buf %pK len: %d\n",
2533 __func__, buf, len);
2534 return -EBADMSG;
2535 }
2536
2537 user_space_data = diagmem_alloc(driver, len, mempool);
2538 if (!user_space_data)
2539 return -ENOMEM;
2540
2541 err = copy_from_user(user_space_data, buf, len);
2542 if (err) {
2543 pr_err("diag: copy failed for user space data\n");
2544 goto fail;
2545 }
2546
2547 /* Check for proc_type */
2548 remote_proc = diag_get_remote(*(int *)user_space_data);
2549 if (remote_proc) {
2550 token_offset = sizeof(int);
2551 if (len <= MIN_SIZ_ALLOW) {
2552 pr_err("diag: In %s, possible integer underflow, payload size: %d\n",
2553 __func__, len);
2554 diagmem_free(driver, user_space_data, mempool);
2555 user_space_data = NULL;
2556 return -EBADMSG;
2557 }
2558 len -= sizeof(int);
2559 }
2560 if (driver->mask_check) {
2561 if (!mask_request_validate(user_space_data +
2562 token_offset)) {
2563 pr_alert("diag: mask request Invalid\n");
2564 diagmem_free(driver, user_space_data, mempool);
2565 user_space_data = NULL;
2566 return -EFAULT;
2567 }
2568 }
2569 if (remote_proc) {
2570 ret = diag_send_raw_data_remote(remote_proc,
2571 (void *)(user_space_data + token_offset),
2572 len, USER_SPACE_RAW_DATA);
2573 if (ret) {
2574 pr_err("diag: Error sending data to remote proc %d, err: %d\n",
2575 remote_proc, ret);
2576 }
2577 } else {
2578 wait_event_interruptible(driver->wait_q,
2579 (driver->in_busy_pktdata == 0));
2580 info = diag_md_session_get_pid(current->tgid);
2581 ret = diag_process_apps_pkt(user_space_data, len, info);
2582 if (ret == 1)
2583 diag_send_error_rsp((void *)(user_space_data), len);
2584 }
2585fail:
2586 diagmem_free(driver, user_space_data, mempool);
2587 user_space_data = NULL;
2588 return ret;
2589}
2590
2591static int diag_user_process_userspace_data(const char __user *buf, int len)
2592{
2593 int err = 0;
2594 int max_retries = 3;
2595 int retry_count = 0;
2596 int remote_proc = 0;
2597 int token_offset = 0;
2598 struct diag_md_session_t *session_info = NULL;
2599 uint8_t hdlc_disabled;
2600
2601 if (!buf || len <= 0 || len > USER_SPACE_DATA) {
2602 pr_err_ratelimited("diag: In %s, invalid buf %pK len: %d\n",
2603 __func__, buf, len);
2604 return -EBADMSG;
2605 }
2606
2607 do {
2608 if (!driver->user_space_data_busy)
2609 break;
2610 retry_count++;
2611 usleep_range(10000, 10100);
2612 } while (retry_count < max_retries);
2613
2614 if (driver->user_space_data_busy)
2615 return -EAGAIN;
2616
2617 err = copy_from_user(driver->user_space_data_buf, buf, len);
2618 if (err) {
2619 pr_err("diag: In %s, failed to copy data from userspace, err: %d\n",
2620 __func__, err);
2621 return -EIO;
2622 }
2623
2624 /* Check for proc_type */
2625 remote_proc = diag_get_remote(*(int *)driver->user_space_data_buf);
2626 if (remote_proc) {
2627 if (len <= MIN_SIZ_ALLOW) {
2628 pr_err("diag: Integer underflow in %s, payload size: %d",
2629 __func__, len);
2630 return -EBADMSG;
2631 }
2632 token_offset = sizeof(int);
2633 len -= sizeof(int);
2634 }
2635
2636 /* Check masks for On-Device logging */
2637 if (driver->mask_check) {
2638 if (!mask_request_validate(driver->user_space_data_buf +
2639 token_offset)) {
2640 pr_alert("diag: mask request Invalid\n");
2641 return -EFAULT;
2642 }
2643 }
2644
2645 /* send masks to local processor now */
2646 if (!remote_proc) {
2647 session_info = diag_md_session_get_pid(current->tgid);
2648 if (!session_info) {
2649 pr_err("diag:In %s request came from invalid md session pid:%d",
2650 __func__, current->tgid);
2651 return -EINVAL;
2652 }
2653 if (session_info)
2654 hdlc_disabled = session_info->hdlc_disabled;
2655 else
2656 hdlc_disabled = driver->hdlc_disabled;
2657 if (!hdlc_disabled)
2658 diag_process_hdlc_pkt((void *)
2659 (driver->user_space_data_buf),
2660 len, session_info);
2661 else
2662 diag_process_non_hdlc_pkt((char *)
2663 (driver->user_space_data_buf),
2664 len, session_info);
2665 return 0;
2666 }
2667
2668 err = diag_process_userspace_remote(remote_proc,
2669 driver->user_space_data_buf +
2670 token_offset, len);
2671 if (err) {
2672 driver->user_space_data_busy = 0;
2673 pr_err("diag: Error sending mask to remote proc %d, err: %d\n",
2674 remote_proc, err);
2675 }
2676
2677 return err;
2678}
2679
2680static int diag_user_process_apps_data(const char __user *buf, int len,
2681 int pkt_type)
2682{
2683 int ret = 0;
2684 int stm_size = 0;
2685 const int mempool = POOL_TYPE_COPY;
2686 unsigned char *user_space_data = NULL;
2687 struct diag_md_session_t *session_info = NULL;
2688 uint8_t hdlc_disabled;
2689
2690 if (!buf || len <= 0 || len > DIAG_MAX_RSP_SIZE) {
2691 pr_err_ratelimited("diag: In %s, invalid buf %pK len: %d\n",
2692 __func__, buf, len);
2693 return -EBADMSG;
2694 }
2695
2696 switch (pkt_type) {
2697 case DATA_TYPE_EVENT:
2698 case DATA_TYPE_F3:
2699 case DATA_TYPE_LOG:
2700 case DATA_TYPE_RESPONSE:
2701 case DATA_TYPE_DELAYED_RESPONSE:
2702 break;
2703 default:
2704 pr_err_ratelimited("diag: In %s, invalid pkt_type: %d\n",
2705 __func__, pkt_type);
2706 return -EBADMSG;
2707 }
2708
2709 user_space_data = diagmem_alloc(driver, len, mempool);
2710 if (!user_space_data) {
2711 diag_record_stats(pkt_type, PKT_DROP);
2712 return -ENOMEM;
2713 }
2714
2715 ret = copy_from_user(user_space_data, buf, len);
2716 if (ret) {
2717 pr_alert("diag: In %s, unable to copy data from userspace, err: %d\n",
2718 __func__, ret);
2719 diagmem_free(driver, user_space_data, mempool);
2720 user_space_data = NULL;
2721 diag_record_stats(pkt_type, PKT_DROP);
2722 return -EBADMSG;
2723 }
2724
2725 if (driver->stm_state[APPS_DATA] &&
2726 (pkt_type >= DATA_TYPE_EVENT) && (pkt_type <= DATA_TYPE_LOG)) {
2727 stm_size = stm_log_inv_ts(OST_ENTITY_DIAG, 0, user_space_data,
2728 len);
2729 if (stm_size == 0) {
2730 pr_debug("diag: In %s, stm_log_inv_ts returned size of 0\n",
2731 __func__);
2732 }
2733 diagmem_free(driver, user_space_data, mempool);
2734 user_space_data = NULL;
2735
2736 return 0;
2737 }
2738
2739 mutex_lock(&apps_data_mutex);
2740 mutex_lock(&driver->hdlc_disable_mutex);
2741 session_info = diag_md_session_get_peripheral(APPS_DATA);
2742 if (session_info)
2743 hdlc_disabled = session_info->hdlc_disabled;
2744 else
2745 hdlc_disabled = driver->hdlc_disabled;
2746 if (hdlc_disabled)
2747 ret = diag_process_apps_data_non_hdlc(user_space_data, len,
2748 pkt_type);
2749 else
2750 ret = diag_process_apps_data_hdlc(user_space_data, len,
2751 pkt_type);
2752 mutex_unlock(&driver->hdlc_disable_mutex);
2753 mutex_unlock(&apps_data_mutex);
2754
2755 diagmem_free(driver, user_space_data, mempool);
2756 user_space_data = NULL;
2757
2758 check_drain_timer();
2759
2760 if (ret == PKT_DROP)
2761 diag_record_stats(pkt_type, PKT_DROP);
2762 else if (ret == PKT_ALLOC)
2763 diag_record_stats(pkt_type, PKT_ALLOC);
2764 else
2765 return ret;
2766
2767 return 0;
2768}
2769
2770static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
2771 loff_t *ppos)
2772{
2773 struct diag_dci_client_tbl *entry;
2774 struct list_head *start, *temp;
2775 int index = -1, i = 0, ret = 0;
2776 int data_type;
2777 int copy_dci_data = 0;
2778 int exit_stat = 0;
2779 int write_len = 0;
2780 struct diag_md_session_t *session_info = NULL;
2781
2782 for (i = 0; i < driver->num_clients; i++)
2783 if (driver->client_map[i].pid == current->tgid)
2784 index = i;
2785
2786 if (index == -1) {
2787 pr_err("diag: Client PID not found in table");
2788 return -EINVAL;
2789 }
2790 if (!buf) {
2791 pr_err("diag: bad address from user side\n");
2792 return -EFAULT;
2793 }
2794 wait_event_interruptible(driver->wait_q, driver->data_ready[index]);
2795
2796 mutex_lock(&driver->diagchar_mutex);
2797
2798 if ((driver->data_ready[index] & USER_SPACE_DATA_TYPE) &&
2799 (driver->logging_mode == DIAG_MEMORY_DEVICE_MODE ||
2800 driver->logging_mode == DIAG_MULTI_MODE)) {
2801 pr_debug("diag: process woken up\n");
2802 /*Copy the type of data being passed*/
2803 data_type = driver->data_ready[index] & USER_SPACE_DATA_TYPE;
2804 driver->data_ready[index] ^= USER_SPACE_DATA_TYPE;
2805 COPY_USER_SPACE_OR_ERR(buf, data_type, sizeof(int));
2806 if (ret == -EFAULT)
2807 goto exit;
2808 /* place holder for number of data field */
2809 ret += sizeof(int);
2810 session_info = diag_md_session_get_pid(current->tgid);
2811 exit_stat = diag_md_copy_to_user(buf, &ret, count,
2812 session_info);
2813 goto exit;
2814 } else if (driver->data_ready[index] & USER_SPACE_DATA_TYPE) {
2815 /* In case, the thread wakes up and the logging mode is not
2816 * memory device any more, the condition needs to be cleared.
2817 */
2818 driver->data_ready[index] ^= USER_SPACE_DATA_TYPE;
2819 }
2820
2821 if (driver->data_ready[index] & HDLC_SUPPORT_TYPE) {
2822 data_type = driver->data_ready[index] & HDLC_SUPPORT_TYPE;
2823 driver->data_ready[index] ^= HDLC_SUPPORT_TYPE;
2824 COPY_USER_SPACE_OR_ERR(buf, data_type, sizeof(int));
2825 if (ret == -EFAULT)
2826 goto exit;
2827
2828 session_info = diag_md_session_get_pid(current->tgid);
2829 if (session_info) {
2830 COPY_USER_SPACE_OR_ERR(buf+4,
2831 session_info->hdlc_disabled,
2832 sizeof(uint8_t));
2833 if (ret == -EFAULT)
2834 goto exit;
2835 }
2836 goto exit;
2837 }
2838
2839 if (driver->data_ready[index] & DEINIT_TYPE) {
2840 /*Copy the type of data being passed*/
2841 data_type = driver->data_ready[index] & DEINIT_TYPE;
2842 COPY_USER_SPACE_OR_ERR(buf, data_type, 4);
2843 if (ret == -EFAULT)
2844 goto exit;
2845 driver->data_ready[index] ^= DEINIT_TYPE;
2846 mutex_unlock(&driver->diagchar_mutex);
2847 diag_remove_client_entry(file);
2848 return ret;
2849 }
2850
2851 if (driver->data_ready[index] & MSG_MASKS_TYPE) {
2852 /*Copy the type of data being passed*/
2853 data_type = driver->data_ready[index] & MSG_MASKS_TYPE;
2854 session_info = diag_md_session_get_peripheral(APPS_DATA);
2855 COPY_USER_SPACE_OR_ERR(buf, data_type, sizeof(int));
2856 if (ret == -EFAULT)
2857 goto exit;
2858 write_len = diag_copy_to_user_msg_mask(buf + ret, count,
2859 session_info);
2860 if (write_len > 0)
2861 ret += write_len;
2862 driver->data_ready[index] ^= MSG_MASKS_TYPE;
2863 goto exit;
2864 }
2865
2866 if (driver->data_ready[index] & EVENT_MASKS_TYPE) {
2867 /*Copy the type of data being passed*/
2868 data_type = driver->data_ready[index] & EVENT_MASKS_TYPE;
2869 session_info = diag_md_session_get_peripheral(APPS_DATA);
2870 COPY_USER_SPACE_OR_ERR(buf, data_type, 4);
2871 if (ret == -EFAULT)
2872 goto exit;
2873
2874 if (session_info && session_info->event_mask &&
2875 session_info->event_mask->ptr) {
2876 COPY_USER_SPACE_OR_ERR(buf + sizeof(int),
2877 *(session_info->event_mask->ptr),
2878 session_info->event_mask->mask_len);
2879 if (ret == -EFAULT)
2880 goto exit;
2881 } else {
2882 COPY_USER_SPACE_OR_ERR(buf + sizeof(int),
2883 *(event_mask.ptr),
2884 event_mask.mask_len);
2885 if (ret == -EFAULT)
2886 goto exit;
2887 }
2888 driver->data_ready[index] ^= EVENT_MASKS_TYPE;
2889 goto exit;
2890 }
2891
2892 if (driver->data_ready[index] & LOG_MASKS_TYPE) {
2893 /*Copy the type of data being passed*/
2894 data_type = driver->data_ready[index] & LOG_MASKS_TYPE;
2895 session_info = diag_md_session_get_peripheral(APPS_DATA);
2896 COPY_USER_SPACE_OR_ERR(buf, data_type, sizeof(int));
2897 if (ret == -EFAULT)
2898 goto exit;
2899
2900 write_len = diag_copy_to_user_log_mask(buf + ret, count,
2901 session_info);
2902 if (write_len > 0)
2903 ret += write_len;
2904 driver->data_ready[index] ^= LOG_MASKS_TYPE;
2905 goto exit;
2906 }
2907
2908 if (driver->data_ready[index] & PKT_TYPE) {
2909 /*Copy the type of data being passed*/
2910 data_type = driver->data_ready[index] & PKT_TYPE;
2911 COPY_USER_SPACE_OR_ERR(buf, data_type, sizeof(data_type));
2912 if (ret == -EFAULT)
2913 goto exit;
2914
2915 COPY_USER_SPACE_OR_ERR(buf + sizeof(data_type),
2916 *(driver->apps_req_buf),
2917 driver->apps_req_buf_len);
2918 if (ret == -EFAULT)
2919 goto exit;
2920 driver->data_ready[index] ^= PKT_TYPE;
2921 driver->in_busy_pktdata = 0;
2922 goto exit;
2923 }
2924
2925 if (driver->data_ready[index] & DCI_PKT_TYPE) {
2926 /* Copy the type of data being passed */
2927 data_type = driver->data_ready[index] & DCI_PKT_TYPE;
2928 COPY_USER_SPACE_OR_ERR(buf, data_type, 4);
2929 if (ret == -EFAULT)
2930 goto exit;
2931
2932 COPY_USER_SPACE_OR_ERR(buf+4, *(driver->dci_pkt_buf),
2933 driver->dci_pkt_length);
2934 if (ret == -EFAULT)
2935 goto exit;
2936
2937 driver->data_ready[index] ^= DCI_PKT_TYPE;
2938 driver->in_busy_dcipktdata = 0;
2939 goto exit;
2940 }
2941
2942 if (driver->data_ready[index] & DCI_EVENT_MASKS_TYPE) {
2943 /*Copy the type of data being passed*/
2944 data_type = driver->data_ready[index] & DCI_EVENT_MASKS_TYPE;
2945 COPY_USER_SPACE_OR_ERR(buf, data_type, 4);
2946 if (ret == -EFAULT)
2947 goto exit;
2948
2949 COPY_USER_SPACE_OR_ERR(buf+4, driver->num_dci_client, 4);
2950 if (ret == -EFAULT)
2951 goto exit;
2952
2953 COPY_USER_SPACE_OR_ERR(buf + 8, (dci_ops_tbl[DCI_LOCAL_PROC].
2954 event_mask_composite), DCI_EVENT_MASK_SIZE);
2955 if (ret == -EFAULT)
2956 goto exit;
2957
2958 driver->data_ready[index] ^= DCI_EVENT_MASKS_TYPE;
2959 goto exit;
2960 }
2961
2962 if (driver->data_ready[index] & DCI_LOG_MASKS_TYPE) {
2963 /*Copy the type of data being passed*/
2964 data_type = driver->data_ready[index] & DCI_LOG_MASKS_TYPE;
2965 COPY_USER_SPACE_OR_ERR(buf, data_type, 4);
2966 if (ret == -EFAULT)
2967 goto exit;
2968
2969 COPY_USER_SPACE_OR_ERR(buf+4, driver->num_dci_client, 4);
2970 if (ret == -EFAULT)
2971 goto exit;
2972
2973 COPY_USER_SPACE_OR_ERR(buf+8, (dci_ops_tbl[DCI_LOCAL_PROC].
2974 log_mask_composite), DCI_LOG_MASK_SIZE);
2975 if (ret == -EFAULT)
2976 goto exit;
2977 driver->data_ready[index] ^= DCI_LOG_MASKS_TYPE;
2978 goto exit;
2979 }
2980
2981exit:
2982 mutex_unlock(&driver->diagchar_mutex);
2983 if (driver->data_ready[index] & DCI_DATA_TYPE) {
2984 mutex_lock(&driver->dci_mutex);
2985 /* Copy the type of data being passed */
2986 data_type = driver->data_ready[index] & DCI_DATA_TYPE;
2987 list_for_each_safe(start, temp, &driver->dci_client_list) {
2988 entry = list_entry(start, struct diag_dci_client_tbl,
2989 track);
2990 if (entry->client->tgid != current->tgid)
2991 continue;
2992 if (!entry->in_service)
2993 continue;
2994 if (copy_to_user(buf + ret, &data_type, sizeof(int))) {
2995 mutex_unlock(&driver->dci_mutex);
2996 goto end;
2997 }
2998 ret += sizeof(int);
2999 if (copy_to_user(buf + ret, &entry->client_info.token,
3000 sizeof(int))) {
3001 mutex_unlock(&driver->dci_mutex);
3002 goto end;
3003 }
3004 ret += sizeof(int);
3005 copy_dci_data = 1;
3006 exit_stat = diag_copy_dci(buf, count, entry, &ret);
3007 mutex_lock(&driver->diagchar_mutex);
3008 driver->data_ready[index] ^= DCI_DATA_TYPE;
3009 mutex_unlock(&driver->diagchar_mutex);
3010 if (exit_stat == 1) {
3011 mutex_unlock(&driver->dci_mutex);
3012 goto end;
3013 }
3014 }
3015 mutex_unlock(&driver->dci_mutex);
3016 goto end;
3017 }
3018end:
3019 /*
3020 * Flush any read that is currently pending on DCI data and
3021 * command channnels. This will ensure that the next read is not
3022 * missed.
3023 */
3024 if (copy_dci_data) {
3025 diag_ws_on_copy_complete(DIAG_WS_DCI);
3026 flush_workqueue(driver->diag_dci_wq);
3027 }
3028 return ret;
3029}
3030
3031static ssize_t diagchar_write(struct file *file, const char __user *buf,
3032 size_t count, loff_t *ppos)
3033{
3034 int err = 0;
3035 int pkt_type = 0;
3036 int payload_len = 0;
3037 const char __user *payload_buf = NULL;
3038
3039 /*
3040 * The data coming from the user sapce should at least have the
3041 * packet type heeader.
3042 */
3043 if (count < sizeof(int)) {
3044 pr_err("diag: In %s, client is sending short data, len: %d\n",
3045 __func__, (int)count);
3046 return -EBADMSG;
3047 }
3048
3049 err = copy_from_user((&pkt_type), buf, sizeof(int));
3050 if (err) {
3051 pr_err_ratelimited("diag: In %s, unable to copy pkt_type from userspace, err: %d\n",
3052 __func__, err);
3053 return -EIO;
3054 }
3055
3056 if (driver->logging_mode == DIAG_USB_MODE && !driver->usb_connected) {
3057 if (!((pkt_type == DCI_DATA_TYPE) ||
3058 (pkt_type == DCI_PKT_TYPE) ||
3059 (pkt_type & DATA_TYPE_DCI_LOG) ||
3060 (pkt_type & DATA_TYPE_DCI_EVENT))) {
3061 pr_debug("diag: In %s, Dropping non DCI packet type\n",
3062 __func__);
3063 return -EIO;
3064 }
3065 }
3066
3067 payload_buf = buf + sizeof(int);
3068 payload_len = count - sizeof(int);
3069
3070 if (pkt_type == DCI_PKT_TYPE)
3071 return diag_user_process_dci_apps_data(payload_buf,
3072 payload_len,
3073 pkt_type);
3074 else if (pkt_type == DCI_DATA_TYPE)
3075 return diag_user_process_dci_data(payload_buf, payload_len);
3076 else if (pkt_type == USER_SPACE_RAW_DATA_TYPE)
3077 return diag_user_process_raw_data(payload_buf,
3078 payload_len);
3079 else if (pkt_type == USER_SPACE_DATA_TYPE)
3080 return diag_user_process_userspace_data(payload_buf,
3081 payload_len);
3082 if (pkt_type & (DATA_TYPE_DCI_LOG | DATA_TYPE_DCI_EVENT)) {
3083 err = diag_user_process_dci_apps_data(payload_buf, payload_len,
3084 pkt_type);
3085 if (pkt_type & DATA_TYPE_DCI_LOG)
3086 pkt_type ^= DATA_TYPE_DCI_LOG;
3087 if (pkt_type & DATA_TYPE_DCI_EVENT)
3088 pkt_type ^= DATA_TYPE_DCI_EVENT;
3089 /*
3090 * Check if the log or event is selected even on the regular
3091 * stream. If USB is not connected and we are not in memory
3092 * device mode, we should not process these logs/events.
3093 */
3094 if (pkt_type && driver->logging_mode == DIAG_USB_MODE &&
3095 !driver->usb_connected)
3096 return err;
3097 }
3098
3099 switch (pkt_type) {
3100 case DATA_TYPE_EVENT:
3101 case DATA_TYPE_F3:
3102 case DATA_TYPE_LOG:
3103 case DATA_TYPE_DELAYED_RESPONSE:
3104 case DATA_TYPE_RESPONSE:
3105 return diag_user_process_apps_data(payload_buf, payload_len,
3106 pkt_type);
3107 default:
3108 pr_err_ratelimited("diag: In %s, invalid pkt_type: %d\n",
3109 __func__, pkt_type);
3110 return -EINVAL;
3111 }
3112
3113 return err;
3114}
3115
3116void diag_ws_init(void)
3117{
3118 driver->dci_ws.ref_count = 0;
3119 driver->dci_ws.copy_count = 0;
3120 spin_lock_init(&driver->dci_ws.lock);
3121
3122 driver->md_ws.ref_count = 0;
3123 driver->md_ws.copy_count = 0;
3124 spin_lock_init(&driver->md_ws.lock);
3125}
3126
3127static void diag_stats_init(void)
3128{
3129 if (!driver)
3130 return;
3131
3132 driver->msg_stats.alloc_count = 0;
3133 driver->msg_stats.drop_count = 0;
3134
3135 driver->log_stats.alloc_count = 0;
3136 driver->log_stats.drop_count = 0;
3137
3138 driver->event_stats.alloc_count = 0;
3139 driver->event_stats.drop_count = 0;
3140}
3141
3142void diag_ws_on_notify(void)
3143{
3144 /*
3145 * Do not deal with reference count here as there can be spurious
3146 * interrupts.
3147 */
3148 pm_stay_awake(driver->diag_dev);
3149}
3150
3151void diag_ws_on_read(int type, int pkt_len)
3152{
3153 unsigned long flags;
3154 struct diag_ws_ref_t *ws_ref = NULL;
3155
3156 switch (type) {
3157 case DIAG_WS_DCI:
3158 ws_ref = &driver->dci_ws;
3159 break;
3160 case DIAG_WS_MUX:
3161 ws_ref = &driver->md_ws;
3162 break;
3163 default:
3164 pr_err_ratelimited("diag: In %s, invalid type: %d\n",
3165 __func__, type);
3166 return;
3167 }
3168
3169 spin_lock_irqsave(&ws_ref->lock, flags);
3170 if (pkt_len > 0) {
3171 ws_ref->ref_count++;
3172 } else {
3173 if (ws_ref->ref_count < 1) {
3174 ws_ref->ref_count = 0;
3175 ws_ref->copy_count = 0;
3176 }
3177 diag_ws_release();
3178 }
3179 spin_unlock_irqrestore(&ws_ref->lock, flags);
3180}
3181
3182
3183void diag_ws_on_copy(int type)
3184{
3185 unsigned long flags;
3186 struct diag_ws_ref_t *ws_ref = NULL;
3187
3188 switch (type) {
3189 case DIAG_WS_DCI:
3190 ws_ref = &driver->dci_ws;
3191 break;
3192 case DIAG_WS_MUX:
3193 ws_ref = &driver->md_ws;
3194 break;
3195 default:
3196 pr_err_ratelimited("diag: In %s, invalid type: %d\n",
3197 __func__, type);
3198 return;
3199 }
3200
3201 spin_lock_irqsave(&ws_ref->lock, flags);
3202 ws_ref->copy_count++;
3203 spin_unlock_irqrestore(&ws_ref->lock, flags);
3204}
3205
3206void diag_ws_on_copy_fail(int type)
3207{
3208 unsigned long flags;
3209 struct diag_ws_ref_t *ws_ref = NULL;
3210
3211 switch (type) {
3212 case DIAG_WS_DCI:
3213 ws_ref = &driver->dci_ws;
3214 break;
3215 case DIAG_WS_MUX:
3216 ws_ref = &driver->md_ws;
3217 break;
3218 default:
3219 pr_err_ratelimited("diag: In %s, invalid type: %d\n",
3220 __func__, type);
3221 return;
3222 }
3223
3224 spin_lock_irqsave(&ws_ref->lock, flags);
3225 ws_ref->ref_count--;
3226 spin_unlock_irqrestore(&ws_ref->lock, flags);
3227
3228 diag_ws_release();
3229}
3230
3231void diag_ws_on_copy_complete(int type)
3232{
3233 unsigned long flags;
3234 struct diag_ws_ref_t *ws_ref = NULL;
3235
3236 switch (type) {
3237 case DIAG_WS_DCI:
3238 ws_ref = &driver->dci_ws;
3239 break;
3240 case DIAG_WS_MUX:
3241 ws_ref = &driver->md_ws;
3242 break;
3243 default:
3244 pr_err_ratelimited("diag: In %s, invalid type: %d\n",
3245 __func__, type);
3246 return;
3247 }
3248
3249 spin_lock_irqsave(&ws_ref->lock, flags);
3250 ws_ref->ref_count -= ws_ref->copy_count;
3251 if (ws_ref->ref_count < 1)
3252 ws_ref->ref_count = 0;
3253 ws_ref->copy_count = 0;
3254 spin_unlock_irqrestore(&ws_ref->lock, flags);
3255
3256 diag_ws_release();
3257}
3258
3259void diag_ws_reset(int type)
3260{
3261 unsigned long flags;
3262 struct diag_ws_ref_t *ws_ref = NULL;
3263
3264 switch (type) {
3265 case DIAG_WS_DCI:
3266 ws_ref = &driver->dci_ws;
3267 break;
3268 case DIAG_WS_MUX:
3269 ws_ref = &driver->md_ws;
3270 break;
3271 default:
3272 pr_err_ratelimited("diag: In %s, invalid type: %d\n",
3273 __func__, type);
3274 return;
3275 }
3276
3277 spin_lock_irqsave(&ws_ref->lock, flags);
3278 ws_ref->ref_count = 0;
3279 ws_ref->copy_count = 0;
3280 spin_unlock_irqrestore(&ws_ref->lock, flags);
3281
3282 diag_ws_release();
3283}
3284
3285void diag_ws_release(void)
3286{
3287 if (driver->dci_ws.ref_count == 0 && driver->md_ws.ref_count == 0)
3288 pm_relax(driver->diag_dev);
3289}
3290
3291#ifdef DIAG_DEBUG
3292static void diag_debug_init(void)
3293{
3294 diag_ipc_log = ipc_log_context_create(DIAG_IPC_LOG_PAGES, "diag", 0);
3295 if (!diag_ipc_log)
3296 pr_err("diag: Failed to create IPC logging context\n");
3297 /*
3298 * Set the bit mask here as per diag_ipc_logging.h to enable debug logs
3299 * to be logged to IPC
3300 */
3301 diag_debug_mask = DIAG_DEBUG_PERIPHERALS | DIAG_DEBUG_DCI |
3302 DIAG_DEBUG_BRIDGE;
3303}
3304#else
3305static void diag_debug_init(void)
3306{
3307
3308}
3309#endif
3310
3311static int diag_real_time_info_init(void)
3312{
3313 int i;
3314
3315 if (!driver)
3316 return -EIO;
3317 for (i = 0; i < DIAG_NUM_PROC; i++) {
3318 driver->real_time_mode[i] = 1;
3319 driver->proc_rt_vote_mask[i] |= DIAG_PROC_DCI;
3320 driver->proc_rt_vote_mask[i] |= DIAG_PROC_MEMORY_DEVICE;
3321 }
3322 driver->real_time_update_busy = 0;
3323 driver->proc_active_mask = 0;
3324 driver->diag_real_time_wq = create_singlethread_workqueue(
3325 "diag_real_time_wq");
3326 if (!driver->diag_real_time_wq)
3327 return -ENOMEM;
3328 INIT_WORK(&(driver->diag_real_time_work), diag_real_time_work_fn);
3329 mutex_init(&driver->real_time_mutex);
3330 return 0;
3331}
3332
3333static const struct file_operations diagcharfops = {
3334 .owner = THIS_MODULE,
3335 .read = diagchar_read,
3336 .write = diagchar_write,
3337#ifdef CONFIG_COMPAT
3338 .compat_ioctl = diagchar_compat_ioctl,
3339#endif
3340 .unlocked_ioctl = diagchar_ioctl,
3341 .open = diagchar_open,
3342 .release = diagchar_close
3343};
3344
3345static int diagchar_setup_cdev(dev_t devno)
3346{
3347
3348 int err;
3349
3350 cdev_init(driver->cdev, &diagcharfops);
3351
3352 driver->cdev->owner = THIS_MODULE;
3353 driver->cdev->ops = &diagcharfops;
3354
3355 err = cdev_add(driver->cdev, devno, 1);
3356
3357 if (err) {
3358 pr_info("diagchar cdev registration failed !\n");
3359 return err;
3360 }
3361
3362 driver->diagchar_class = class_create(THIS_MODULE, "diag");
3363
3364 if (IS_ERR(driver->diagchar_class)) {
3365 pr_err("Error creating diagchar class.\n");
3366 return PTR_ERR(driver->diagchar_class);
3367 }
3368
3369 driver->diag_dev = device_create(driver->diagchar_class, NULL, devno,
3370 (void *)driver, "diag");
3371
3372 if (!driver->diag_dev)
3373 return -EIO;
3374
3375 driver->diag_dev->power.wakeup = wakeup_source_register("DIAG_WS");
3376 return 0;
3377
3378}
3379
3380static int diagchar_cleanup(void)
3381{
3382 if (driver) {
3383 if (driver->cdev) {
3384 /* TODO - Check if device exists before deleting */
3385 device_destroy(driver->diagchar_class,
3386 MKDEV(driver->major,
3387 driver->minor_start));
3388 cdev_del(driver->cdev);
3389 }
3390 if (!IS_ERR(driver->diagchar_class))
3391 class_destroy(driver->diagchar_class);
3392 kfree(driver);
3393 }
3394 return 0;
3395}
3396
3397static int __init diagchar_init(void)
3398{
3399 dev_t dev;
3400 int ret;
3401
3402 pr_debug("diagfwd initializing ..\n");
3403 ret = 0;
3404 driver = kzalloc(sizeof(struct diagchar_dev) + 5, GFP_KERNEL);
3405 if (!driver)
3406 return -ENOMEM;
3407 kmemleak_not_leak(driver);
3408
3409 timer_in_progress = 0;
3410 driver->delayed_rsp_id = 0;
3411 driver->hdlc_disabled = 0;
3412 driver->dci_state = DIAG_DCI_NO_ERROR;
3413 setup_timer(&drain_timer, drain_timer_func, 1234);
3414 driver->supports_sockets = 1;
3415 driver->time_sync_enabled = 0;
3416 driver->uses_time_api = 0;
3417 driver->poolsize = poolsize;
3418 driver->poolsize_hdlc = poolsize_hdlc;
3419 driver->poolsize_dci = poolsize_dci;
3420 driver->poolsize_user = poolsize_user;
3421 /*
3422 * POOL_TYPE_MUX_APPS is for the buffers in the Diag MUX layer.
3423 * The number of buffers encompasses Diag data generated on
3424 * the Apss processor + 1 for the responses generated exclusively on
3425 * the Apps processor + data from data channels (4 channels per
3426 * peripheral) + data from command channels (2)
3427 */
3428 diagmem_setsize(POOL_TYPE_MUX_APPS, itemsize_usb_apps,
3429 poolsize_usb_apps + 1 + (NUM_PERIPHERALS * 6));
3430 driver->num_clients = max_clients;
3431 driver->logging_mode = DIAG_USB_MODE;
3432 driver->mask_check = 0;
3433 driver->in_busy_pktdata = 0;
3434 driver->in_busy_dcipktdata = 0;
3435 driver->rsp_buf_ctxt = SET_BUF_CTXT(APPS_DATA, TYPE_CMD, 1);
3436 hdlc_data.ctxt = SET_BUF_CTXT(APPS_DATA, TYPE_DATA, 1);
3437 hdlc_data.len = 0;
3438 non_hdlc_data.ctxt = SET_BUF_CTXT(APPS_DATA, TYPE_DATA, 1);
3439 non_hdlc_data.len = 0;
3440 mutex_init(&driver->hdlc_disable_mutex);
3441 mutex_init(&driver->diagchar_mutex);
3442 mutex_init(&driver->diag_maskclear_mutex);
3443 mutex_init(&driver->diag_file_mutex);
3444 mutex_init(&driver->delayed_rsp_mutex);
3445 mutex_init(&apps_data_mutex);
3446 mutex_init(&driver->diagfwd_channel_mutex);
3447 init_waitqueue_head(&driver->wait_q);
3448 INIT_WORK(&(driver->diag_drain_work), diag_drain_work_fn);
3449 INIT_WORK(&(driver->update_user_clients),
3450 diag_update_user_client_work_fn);
3451 INIT_WORK(&(driver->update_md_clients),
3452 diag_update_md_client_work_fn);
3453 diag_ws_init();
3454 diag_stats_init();
3455 diag_debug_init();
3456 diag_md_session_init();
3457
3458 driver->incoming_pkt.capacity = DIAG_MAX_REQ_SIZE;
3459 driver->incoming_pkt.data = kzalloc(DIAG_MAX_REQ_SIZE, GFP_KERNEL);
3460 if (!driver->incoming_pkt.data) {
3461 ret = -ENOMEM;
3462 goto fail;
3463 }
3464 kmemleak_not_leak(driver->incoming_pkt.data);
3465 driver->incoming_pkt.processing = 0;
3466 driver->incoming_pkt.read_len = 0;
3467 driver->incoming_pkt.remaining = 0;
3468 driver->incoming_pkt.total_len = 0;
3469
3470 ret = diag_real_time_info_init();
3471 if (ret)
3472 goto fail;
3473 ret = diag_debugfs_init();
3474 if (ret)
3475 goto fail;
3476 ret = diag_masks_init();
3477 if (ret)
3478 goto fail;
3479 ret = diag_remote_init();
3480 if (ret)
3481 goto fail;
3482 ret = diag_mux_init();
3483 if (ret)
3484 goto fail;
3485 ret = diagfwd_init();
3486 if (ret)
3487 goto fail;
3488 ret = diagfwd_cntl_init();
3489 if (ret)
3490 goto fail;
3491 driver->dci_state = diag_dci_init();
3492 ret = diagfwd_peripheral_init();
3493 if (ret)
3494 goto fail;
3495 diagfwd_cntl_channel_init();
3496 if (driver->dci_state == DIAG_DCI_NO_ERROR)
3497 diag_dci_channel_init();
3498 pr_debug("diagchar initializing ..\n");
3499 driver->num = 1;
3500 driver->name = ((void *)driver) + sizeof(struct diagchar_dev);
3501 strlcpy(driver->name, "diag", 4);
3502 /* Get major number from kernel and initialize */
3503 ret = alloc_chrdev_region(&dev, driver->minor_start,
3504 driver->num, driver->name);
3505 if (!ret) {
3506 driver->major = MAJOR(dev);
3507 driver->minor_start = MINOR(dev);
3508 } else {
3509 pr_err("diag: Major number not allocated\n");
3510 goto fail;
3511 }
3512 driver->cdev = cdev_alloc();
3513 ret = diagchar_setup_cdev(dev);
3514 if (ret)
3515 goto fail;
Sreelakshmi Gownipalli8d477d32017-02-08 19:49:06 -08003516 mutex_init(&driver->diag_id_mutex);
3517 INIT_LIST_HEAD(&driver->diag_id_list);
3518 diag_add_diag_id_to_list(DIAG_ID_APPS, "APPS");
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07003519 pr_debug("diagchar initialized now");
3520 ret = diagfwd_bridge_init();
3521 if (ret)
3522 diagfwd_bridge_exit();
3523 return 0;
3524
3525fail:
3526 pr_err("diagchar is not initialized, ret: %d\n", ret);
3527 diag_debugfs_cleanup();
3528 diagchar_cleanup();
3529 diag_mux_exit();
3530 diagfwd_peripheral_exit();
3531 diagfwd_bridge_exit();
3532 diagfwd_exit();
3533 diagfwd_cntl_exit();
3534 diag_dci_exit();
3535 diag_masks_exit();
3536 diag_remote_exit();
3537 return ret;
3538
3539}
3540
3541static void diagchar_exit(void)
3542{
3543 pr_info("diagchar exiting...\n");
3544 diag_mempool_exit();
3545 diag_mux_exit();
3546 diagfwd_peripheral_exit();
3547 diagfwd_exit();
3548 diagfwd_cntl_exit();
3549 diag_dci_exit();
3550 diag_masks_exit();
3551 diag_md_session_exit();
3552 diag_remote_exit();
3553 diag_debugfs_cleanup();
3554 diagchar_cleanup();
3555 pr_info("done diagchar exit\n");
3556}
3557
3558module_init(diagchar_init);
3559module_exit(diagchar_exit);