blob: c44a9eaeb2d780492ed9abb2434e5b77851fda17 [file] [log] [blame]
Sreelakshmi Gownipalli8d477d32017-02-08 19:49:06 -08001/* Copyright (c) 2008-2017, The Linux Foundation. All rights reserved.
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/slab.h>
14#include <linux/init.h>
15#include <linux/module.h>
16#include <linux/cdev.h>
17#include <linux/fs.h>
18#include <linux/device.h>
19#include <linux/delay.h>
20#include <linux/uaccess.h>
21#include <linux/diagchar.h>
22#include <linux/sched.h>
23#include <linux/ratelimit.h>
24#include <linux/timer.h>
25#ifdef CONFIG_DIAG_OVER_USB
26#include <linux/usb/usbdiag.h>
27#endif
28#include <asm/current.h>
29#include "diagchar_hdlc.h"
30#include "diagmem.h"
31#include "diagchar.h"
32#include "diagfwd.h"
33#include "diagfwd_cntl.h"
34#include "diag_dci.h"
35#include "diag_debugfs.h"
36#include "diag_masks.h"
37#include "diagfwd_bridge.h"
38#include "diag_usb.h"
39#include "diag_memorydevice.h"
40#include "diag_mux.h"
41#include "diag_ipc_logging.h"
42#include "diagfwd_peripheral.h"
43
44#include <linux/coresight-stm.h>
45#include <linux/kernel.h>
46#ifdef CONFIG_COMPAT
47#include <linux/compat.h>
48#endif
49
50MODULE_DESCRIPTION("Diag Char Driver");
51MODULE_LICENSE("GPL v2");
52
53#define MIN_SIZ_ALLOW 4
54#define INIT 1
55#define EXIT -1
56struct diagchar_dev *driver;
57struct diagchar_priv {
58 int pid;
59};
60
61#define USER_SPACE_RAW_DATA 0
62#define USER_SPACE_HDLC_DATA 1
63
64/* Memory pool variables */
65/* Used for copying any incoming packet from user space clients. */
66static unsigned int poolsize = 12;
67module_param(poolsize, uint, 0000);
68
69/*
70 * Used for HDLC encoding packets coming from the user
71 * space.
72 */
73static unsigned int poolsize_hdlc = 10;
74module_param(poolsize_hdlc, uint, 0000);
75
76/*
77 * This is used for incoming DCI requests from the user space clients.
78 * Don't expose itemsize as it is internal.
79 */
80static unsigned int poolsize_user = 8;
81module_param(poolsize_user, uint, 0000);
82
83/*
84 * USB structures allocated for writing Diag data generated on the Apps to USB.
85 * Don't expose itemsize as it is constant.
86 */
87static unsigned int itemsize_usb_apps = sizeof(struct diag_request);
88static unsigned int poolsize_usb_apps = 10;
89module_param(poolsize_usb_apps, uint, 0000);
90
91/* Used for DCI client buffers. Don't expose itemsize as it is constant. */
92static unsigned int poolsize_dci = 10;
93module_param(poolsize_dci, uint, 0000);
94
95#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
96/* Used for reading data from the remote device. */
97static unsigned int itemsize_mdm = DIAG_MDM_BUF_SIZE;
98static unsigned int poolsize_mdm = 18;
99module_param(itemsize_mdm, uint, 0000);
100module_param(poolsize_mdm, uint, 0000);
101
102/*
103 * Used for reading DCI data from the remote device.
104 * Don't expose poolsize for DCI data. There is only one read buffer
105 */
106static unsigned int itemsize_mdm_dci = DIAG_MDM_BUF_SIZE;
107static unsigned int poolsize_mdm_dci = 1;
108module_param(itemsize_mdm_dci, uint, 0000);
109
110/*
111 * Used for USB structues associated with a remote device.
112 * Don't expose the itemsize since it is constant.
113 */
114static unsigned int itemsize_mdm_usb = sizeof(struct diag_request);
115static unsigned int poolsize_mdm_usb = 18;
116module_param(poolsize_mdm_usb, uint, 0000);
117
118/*
119 * Used for writing read DCI data to remote peripherals. Don't
120 * expose poolsize for DCI data. There is only one read
121 * buffer. Add 6 bytes for DCI header information: Start (1),
122 * Version (1), Length (2), Tag (2)
123 */
124static unsigned int itemsize_mdm_dci_write = DIAG_MDM_DCI_BUF_SIZE;
125static unsigned int poolsize_mdm_dci_write = 1;
126module_param(itemsize_mdm_dci_write, uint, 0000);
127
128/*
129 * Used for USB structures associated with a remote SMUX
130 * device Don't expose the itemsize since it is constant
131 */
132static unsigned int itemsize_qsc_usb = sizeof(struct diag_request);
133static unsigned int poolsize_qsc_usb = 8;
134module_param(poolsize_qsc_usb, uint, 0000);
135#endif
136
137/* This is the max number of user-space clients supported at initialization*/
138static unsigned int max_clients = 15;
139static unsigned int threshold_client_limit = 50;
140module_param(max_clients, uint, 0000);
141
142/* Timer variables */
143static struct timer_list drain_timer;
144static int timer_in_progress;
145
146struct diag_apps_data_t {
147 void *buf;
148 uint32_t len;
149 int ctxt;
150};
151
152static struct diag_apps_data_t hdlc_data;
153static struct diag_apps_data_t non_hdlc_data;
154static struct mutex apps_data_mutex;
155
156#define DIAGPKT_MAX_DELAYED_RSP 0xFFFF
157
158#ifdef DIAG_DEBUG
159uint16_t diag_debug_mask;
160void *diag_ipc_log;
161#endif
162
163static void diag_md_session_close(struct diag_md_session_t *session_info);
164
165/*
166 * Returns the next delayed rsp id. If wrapping is enabled,
167 * wraps the delayed rsp id to DIAGPKT_MAX_DELAYED_RSP.
168 */
169static uint16_t diag_get_next_delayed_rsp_id(void)
170{
171 uint16_t rsp_id = 0;
172
173 mutex_lock(&driver->delayed_rsp_mutex);
174 rsp_id = driver->delayed_rsp_id;
175 if (rsp_id < DIAGPKT_MAX_DELAYED_RSP)
176 rsp_id++;
177 else {
178 if (wrap_enabled) {
179 rsp_id = 1;
180 wrap_count++;
181 } else
182 rsp_id = DIAGPKT_MAX_DELAYED_RSP;
183 }
184 driver->delayed_rsp_id = rsp_id;
185 mutex_unlock(&driver->delayed_rsp_mutex);
186
187 return rsp_id;
188}
189
190static int diag_switch_logging(struct diag_logging_mode_param_t *param);
191
192#define COPY_USER_SPACE_OR_ERR(buf, data, length) \
193do { \
194 if ((count < ret+length) || (copy_to_user(buf, \
195 (void *)&data, length))) { \
196 ret = -EFAULT; \
197 } \
198 ret += length; \
199} while (0)
200
201static void drain_timer_func(unsigned long data)
202{
203 queue_work(driver->diag_wq, &(driver->diag_drain_work));
204}
205
206static void diag_drain_apps_data(struct diag_apps_data_t *data)
207{
208 int err = 0;
209
210 if (!data || !data->buf)
211 return;
212
213 err = diag_mux_write(DIAG_LOCAL_PROC, data->buf, data->len,
214 data->ctxt);
215 if (err)
216 diagmem_free(driver, data->buf, POOL_TYPE_HDLC);
217
218 data->buf = NULL;
219 data->len = 0;
220}
221
222void diag_update_user_client_work_fn(struct work_struct *work)
223{
224 diag_update_userspace_clients(HDLC_SUPPORT_TYPE);
225}
226
227static void diag_update_md_client_work_fn(struct work_struct *work)
228{
229 diag_update_md_clients(HDLC_SUPPORT_TYPE);
230}
231
232void diag_drain_work_fn(struct work_struct *work)
233{
234 struct diag_md_session_t *session_info = NULL;
235 uint8_t hdlc_disabled = 0;
236
237 timer_in_progress = 0;
238 mutex_lock(&apps_data_mutex);
239 session_info = diag_md_session_get_peripheral(APPS_DATA);
240 if (session_info)
241 hdlc_disabled = session_info->hdlc_disabled;
242 else
243 hdlc_disabled = driver->hdlc_disabled;
244
245 if (!hdlc_disabled)
246 diag_drain_apps_data(&hdlc_data);
247 else
248 diag_drain_apps_data(&non_hdlc_data);
249 mutex_unlock(&apps_data_mutex);
250}
251
252void check_drain_timer(void)
253{
254 int ret = 0;
255
256 if (!timer_in_progress) {
257 timer_in_progress = 1;
258 ret = mod_timer(&drain_timer, jiffies + msecs_to_jiffies(200));
259 }
260}
261
262void diag_add_client(int i, struct file *file)
263{
264 struct diagchar_priv *diagpriv_data;
265
266 driver->client_map[i].pid = current->tgid;
267 diagpriv_data = kmalloc(sizeof(struct diagchar_priv),
268 GFP_KERNEL);
269 if (diagpriv_data)
270 diagpriv_data->pid = current->tgid;
271 file->private_data = diagpriv_data;
272 strlcpy(driver->client_map[i].name, current->comm, 20);
273 driver->client_map[i].name[19] = '\0';
274}
275
276static void diag_mempool_init(void)
277{
278 uint32_t itemsize = DIAG_MAX_REQ_SIZE;
279 uint32_t itemsize_hdlc = DIAG_MAX_HDLC_BUF_SIZE + APF_DIAG_PADDING;
280 uint32_t itemsize_dci = IN_BUF_SIZE;
281 uint32_t itemsize_user = DCI_REQ_BUF_SIZE;
282
283 itemsize += ((DCI_HDR_SIZE > CALLBACK_HDR_SIZE) ? DCI_HDR_SIZE :
284 CALLBACK_HDR_SIZE);
285 diagmem_setsize(POOL_TYPE_COPY, itemsize, poolsize);
286 diagmem_setsize(POOL_TYPE_HDLC, itemsize_hdlc, poolsize_hdlc);
287 diagmem_setsize(POOL_TYPE_DCI, itemsize_dci, poolsize_dci);
288 diagmem_setsize(POOL_TYPE_USER, itemsize_user, poolsize_user);
289
290 diagmem_init(driver, POOL_TYPE_COPY);
291 diagmem_init(driver, POOL_TYPE_HDLC);
292 diagmem_init(driver, POOL_TYPE_USER);
293 diagmem_init(driver, POOL_TYPE_DCI);
294}
295
296static void diag_mempool_exit(void)
297{
298 diagmem_exit(driver, POOL_TYPE_COPY);
299 diagmem_exit(driver, POOL_TYPE_HDLC);
300 diagmem_exit(driver, POOL_TYPE_USER);
301 diagmem_exit(driver, POOL_TYPE_DCI);
302}
303
304static int diagchar_open(struct inode *inode, struct file *file)
305{
306 int i = 0;
307 void *temp;
308
309 if (driver) {
310 mutex_lock(&driver->diagchar_mutex);
311
312 for (i = 0; i < driver->num_clients; i++)
313 if (driver->client_map[i].pid == 0)
314 break;
315
316 if (i < driver->num_clients) {
317 diag_add_client(i, file);
318 } else {
319 if (i < threshold_client_limit) {
320 driver->num_clients++;
321 temp = krealloc(driver->client_map
322 , (driver->num_clients) * sizeof(struct
323 diag_client_map), GFP_KERNEL);
324 if (!temp)
325 goto fail;
326 else
327 driver->client_map = temp;
328 temp = krealloc(driver->data_ready
329 , (driver->num_clients) * sizeof(int),
330 GFP_KERNEL);
331 if (!temp)
332 goto fail;
333 else
334 driver->data_ready = temp;
335 diag_add_client(i, file);
336 } else {
337 mutex_unlock(&driver->diagchar_mutex);
338 pr_err_ratelimited("diag: Max client limit for DIAG reached\n");
339 pr_err_ratelimited("diag: Cannot open handle %s %d",
340 current->comm, current->tgid);
341 for (i = 0; i < driver->num_clients; i++)
342 pr_debug("%d) %s PID=%d", i, driver->
343 client_map[i].name,
344 driver->client_map[i].pid);
345 return -ENOMEM;
346 }
347 }
348 driver->data_ready[i] = 0x0;
349 driver->data_ready[i] |= MSG_MASKS_TYPE;
350 driver->data_ready[i] |= EVENT_MASKS_TYPE;
351 driver->data_ready[i] |= LOG_MASKS_TYPE;
352 driver->data_ready[i] |= DCI_LOG_MASKS_TYPE;
353 driver->data_ready[i] |= DCI_EVENT_MASKS_TYPE;
354
355 if (driver->ref_count == 0)
356 diag_mempool_init();
357 driver->ref_count++;
358 mutex_unlock(&driver->diagchar_mutex);
359 return 0;
360 }
361 return -ENOMEM;
362
363fail:
364 mutex_unlock(&driver->diagchar_mutex);
365 driver->num_clients--;
366 pr_err_ratelimited("diag: Insufficient memory for new client");
367 return -ENOMEM;
368}
369
370static uint32_t diag_translate_kernel_to_user_mask(uint32_t peripheral_mask)
371{
372 uint32_t ret = 0;
373
374 if (peripheral_mask & MD_PERIPHERAL_MASK(APPS_DATA))
375 ret |= DIAG_CON_APSS;
376 if (peripheral_mask & MD_PERIPHERAL_MASK(PERIPHERAL_MODEM))
377 ret |= DIAG_CON_MPSS;
378 if (peripheral_mask & MD_PERIPHERAL_MASK(PERIPHERAL_LPASS))
379 ret |= DIAG_CON_LPASS;
380 if (peripheral_mask & MD_PERIPHERAL_MASK(PERIPHERAL_WCNSS))
381 ret |= DIAG_CON_WCNSS;
382 if (peripheral_mask & MD_PERIPHERAL_MASK(PERIPHERAL_SENSORS))
383 ret |= DIAG_CON_SENSORS;
384 if (peripheral_mask & MD_PERIPHERAL_MASK(PERIPHERAL_WDSP))
385 ret |= DIAG_CON_WDSP;
Sreelakshmi Gownipalli588a31d2016-11-02 13:33:43 -0700386 if (peripheral_mask & MD_PERIPHERAL_MASK(PERIPHERAL_CDSP))
387 ret |= DIAG_CON_CDSP;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -0700388
389 return ret;
390}
391
392void diag_clear_masks(struct diag_md_session_t *info)
393{
394 int ret;
395 char cmd_disable_log_mask[] = { 0x73, 0, 0, 0, 0, 0, 0, 0};
396 char cmd_disable_msg_mask[] = { 0x7D, 0x05, 0, 0, 0, 0, 0, 0};
397 char cmd_disable_event_mask[] = { 0x60, 0};
398
399 DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
400 "diag: %s: masks clear request upon %s\n", __func__,
401 ((info) ? "ODL exit" : "USB Disconnection"));
402
403 ret = diag_process_apps_masks(cmd_disable_log_mask,
404 sizeof(cmd_disable_log_mask), info);
405 ret = diag_process_apps_masks(cmd_disable_msg_mask,
406 sizeof(cmd_disable_msg_mask), info);
407 ret = diag_process_apps_masks(cmd_disable_event_mask,
408 sizeof(cmd_disable_event_mask), info);
409 DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
410 "diag:%s: masks cleared successfully\n", __func__);
411}
412
413static void diag_close_logging_process(const int pid)
414{
415 int i;
416 int session_peripheral_mask;
417 struct diag_md_session_t *session_info = NULL;
418 struct diag_logging_mode_param_t params;
419
420 session_info = diag_md_session_get_pid(pid);
421 if (!session_info)
422 return;
423
424 diag_clear_masks(session_info);
425
426 mutex_lock(&driver->diag_maskclear_mutex);
427 driver->mask_clear = 1;
428 mutex_unlock(&driver->diag_maskclear_mutex);
429
430 session_peripheral_mask = session_info->peripheral_mask;
431 diag_md_session_close(session_info);
432 for (i = 0; i < NUM_MD_SESSIONS; i++)
433 if (MD_PERIPHERAL_MASK(i) & session_peripheral_mask)
434 diag_mux_close_peripheral(DIAG_LOCAL_PROC, i);
435
436 params.req_mode = USB_MODE;
437 params.mode_param = 0;
438 params.peripheral_mask =
439 diag_translate_kernel_to_user_mask(session_peripheral_mask);
440 mutex_lock(&driver->diagchar_mutex);
441 diag_switch_logging(&params);
442 mutex_unlock(&driver->diagchar_mutex);
443}
444
445static int diag_remove_client_entry(struct file *file)
446{
447 int i = -1;
448 struct diagchar_priv *diagpriv_data = NULL;
449 struct diag_dci_client_tbl *dci_entry = NULL;
450
451 if (!driver)
452 return -ENOMEM;
453
454 mutex_lock(&driver->diag_file_mutex);
455 if (!file) {
456 DIAG_LOG(DIAG_DEBUG_USERSPACE, "Invalid file pointer\n");
457 mutex_unlock(&driver->diag_file_mutex);
458 return -ENOENT;
459 }
460 if (!(file->private_data)) {
461 DIAG_LOG(DIAG_DEBUG_USERSPACE, "Invalid private data\n");
462 mutex_unlock(&driver->diag_file_mutex);
463 return -EINVAL;
464 }
465
466 diagpriv_data = file->private_data;
467
468 /*
469 * clean up any DCI registrations, if this is a DCI client
470 * This will specially help in case of ungraceful exit of any DCI client
471 * This call will remove any pending registrations of such client
472 */
473 mutex_lock(&driver->dci_mutex);
474 dci_entry = dci_lookup_client_entry_pid(current->tgid);
475 if (dci_entry)
476 diag_dci_deinit_client(dci_entry);
477 mutex_unlock(&driver->dci_mutex);
478
479 diag_close_logging_process(current->tgid);
480
481 /* Delete the pkt response table entry for the exiting process */
482 diag_cmd_remove_reg_by_pid(current->tgid);
483
484 mutex_lock(&driver->diagchar_mutex);
485 driver->ref_count--;
486 if (driver->ref_count == 0)
487 diag_mempool_exit();
488
489 for (i = 0; i < driver->num_clients; i++) {
490 if (diagpriv_data && diagpriv_data->pid ==
491 driver->client_map[i].pid) {
492 driver->client_map[i].pid = 0;
493 kfree(diagpriv_data);
494 diagpriv_data = NULL;
495 file->private_data = 0;
496 break;
497 }
498 }
499 mutex_unlock(&driver->diagchar_mutex);
500 mutex_unlock(&driver->diag_file_mutex);
501 return 0;
502}
503static int diagchar_close(struct inode *inode, struct file *file)
504{
505 int ret;
506
507 DIAG_LOG(DIAG_DEBUG_USERSPACE, "diag: process exit %s\n",
508 current->comm);
509 ret = diag_remove_client_entry(file);
510 mutex_lock(&driver->diag_maskclear_mutex);
511 driver->mask_clear = 0;
512 mutex_unlock(&driver->diag_maskclear_mutex);
513 return ret;
514}
515
516void diag_record_stats(int type, int flag)
517{
518 struct diag_pkt_stats_t *pkt_stats = NULL;
519
520 switch (type) {
521 case DATA_TYPE_EVENT:
522 pkt_stats = &driver->event_stats;
523 break;
524 case DATA_TYPE_F3:
525 pkt_stats = &driver->msg_stats;
526 break;
527 case DATA_TYPE_LOG:
528 pkt_stats = &driver->log_stats;
529 break;
530 case DATA_TYPE_RESPONSE:
531 if (flag != PKT_DROP)
532 return;
533 pr_err_ratelimited("diag: In %s, dropping response. This shouldn't happen\n",
534 __func__);
535 return;
536 case DATA_TYPE_DELAYED_RESPONSE:
537 /* No counters to increase for Delayed responses */
538 return;
539 default:
540 pr_err_ratelimited("diag: In %s, invalid pkt_type: %d\n",
541 __func__, type);
542 return;
543 }
544
545 switch (flag) {
546 case PKT_ALLOC:
547 atomic_add(1, (atomic_t *)&pkt_stats->alloc_count);
548 break;
549 case PKT_DROP:
550 atomic_add(1, (atomic_t *)&pkt_stats->drop_count);
551 break;
552 case PKT_RESET:
553 atomic_set((atomic_t *)&pkt_stats->alloc_count, 0);
554 atomic_set((atomic_t *)&pkt_stats->drop_count, 0);
555 break;
556 default:
557 pr_err_ratelimited("diag: In %s, invalid flag: %d\n",
558 __func__, flag);
559 return;
560 }
561}
562
563void diag_get_timestamp(char *time_str)
564{
565 struct timeval t;
566 struct tm broken_tm;
567
568 do_gettimeofday(&t);
569 if (!time_str)
570 return;
571 time_to_tm(t.tv_sec, 0, &broken_tm);
572 scnprintf(time_str, DIAG_TS_SIZE, "%d:%d:%d:%ld", broken_tm.tm_hour,
573 broken_tm.tm_min, broken_tm.tm_sec, t.tv_usec);
574}
575
576int diag_get_remote(int remote_info)
577{
578 int val = (remote_info < 0) ? -remote_info : remote_info;
579 int remote_val;
580
581 switch (val) {
582 case MDM:
583 case MDM2:
584 case QSC:
585 remote_val = -remote_info;
586 break;
587 default:
588 remote_val = 0;
589 break;
590 }
591
592 return remote_val;
593}
594
595int diag_cmd_chk_polling(struct diag_cmd_reg_entry_t *entry)
596{
597 int polling = DIAG_CMD_NOT_POLLING;
598
599 if (!entry)
600 return -EIO;
601
602 if (entry->cmd_code == DIAG_CMD_NO_SUBSYS) {
603 if (entry->subsys_id == DIAG_CMD_NO_SUBSYS &&
604 entry->cmd_code_hi >= DIAG_CMD_STATUS &&
605 entry->cmd_code_lo <= DIAG_CMD_STATUS)
606 polling = DIAG_CMD_POLLING;
607 else if (entry->subsys_id == DIAG_SS_WCDMA &&
608 entry->cmd_code_hi >= DIAG_CMD_QUERY_CALL &&
609 entry->cmd_code_lo <= DIAG_CMD_QUERY_CALL)
610 polling = DIAG_CMD_POLLING;
611 else if (entry->subsys_id == DIAG_SS_GSM &&
612 entry->cmd_code_hi >= DIAG_CMD_QUERY_TMC &&
613 entry->cmd_code_lo <= DIAG_CMD_QUERY_TMC)
614 polling = DIAG_CMD_POLLING;
615 else if (entry->subsys_id == DIAG_SS_PARAMS &&
616 entry->cmd_code_hi >= DIAG_DIAG_POLL &&
617 entry->cmd_code_lo <= DIAG_DIAG_POLL)
618 polling = DIAG_CMD_POLLING;
619 else if (entry->subsys_id == DIAG_SS_TDSCDMA &&
620 entry->cmd_code_hi >= DIAG_CMD_TDSCDMA_STATUS &&
621 entry->cmd_code_lo <= DIAG_CMD_TDSCDMA_STATUS)
622 polling = DIAG_CMD_POLLING;
623 }
624
625 return polling;
626}
627
628static void diag_cmd_invalidate_polling(int change_flag)
629{
630 int polling = DIAG_CMD_NOT_POLLING;
631 struct list_head *start;
632 struct list_head *temp;
633 struct diag_cmd_reg_t *item = NULL;
634
635 if (change_flag == DIAG_CMD_ADD) {
636 if (driver->polling_reg_flag)
637 return;
638 }
639
640 driver->polling_reg_flag = 0;
641 list_for_each_safe(start, temp, &driver->cmd_reg_list) {
642 item = list_entry(start, struct diag_cmd_reg_t, link);
643 polling = diag_cmd_chk_polling(&item->entry);
644 if (polling == DIAG_CMD_POLLING) {
645 driver->polling_reg_flag = 1;
646 break;
647 }
648 }
649}
650
651int diag_cmd_add_reg(struct diag_cmd_reg_entry_t *new_entry, uint8_t proc,
652 int pid)
653{
654 struct diag_cmd_reg_t *new_item = NULL;
655
656 if (!new_entry) {
657 pr_err("diag: In %s, invalid new entry\n", __func__);
658 return -EINVAL;
659 }
660
661 if (proc > APPS_DATA) {
662 pr_err("diag: In %s, invalid peripheral %d\n", __func__, proc);
663 return -EINVAL;
664 }
665
666 if (proc != APPS_DATA)
667 pid = INVALID_PID;
668
669 new_item = kzalloc(sizeof(struct diag_cmd_reg_t), GFP_KERNEL);
670 if (!new_item)
671 return -ENOMEM;
672 kmemleak_not_leak(new_item);
673
674 new_item->pid = pid;
675 new_item->proc = proc;
676 memcpy(&new_item->entry, new_entry,
677 sizeof(struct diag_cmd_reg_entry_t));
678 INIT_LIST_HEAD(&new_item->link);
679
680 mutex_lock(&driver->cmd_reg_mutex);
681 list_add_tail(&new_item->link, &driver->cmd_reg_list);
682 driver->cmd_reg_count++;
683 diag_cmd_invalidate_polling(DIAG_CMD_ADD);
684 mutex_unlock(&driver->cmd_reg_mutex);
685
686 return 0;
687}
688
689struct diag_cmd_reg_entry_t *diag_cmd_search(
690 struct diag_cmd_reg_entry_t *entry, int proc)
691{
692 struct list_head *start;
693 struct list_head *temp;
694 struct diag_cmd_reg_t *item = NULL;
695 struct diag_cmd_reg_entry_t *temp_entry = NULL;
696
697 if (!entry) {
698 pr_err("diag: In %s, invalid entry\n", __func__);
699 return NULL;
700 }
701
702 list_for_each_safe(start, temp, &driver->cmd_reg_list) {
703 item = list_entry(start, struct diag_cmd_reg_t, link);
Gopikrishna Mogasati9b332372016-11-10 20:03:46 +0530704 if (item == NULL || &item->entry == NULL) {
705 pr_err("diag: In %s, unable to search command\n",
706 __func__);
707 return NULL;
708 }
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -0700709 temp_entry = &item->entry;
710 if (temp_entry->cmd_code == entry->cmd_code &&
711 temp_entry->subsys_id == entry->subsys_id &&
712 temp_entry->cmd_code_hi >= entry->cmd_code_hi &&
713 temp_entry->cmd_code_lo <= entry->cmd_code_lo &&
714 (proc == item->proc || proc == ALL_PROC)) {
715 return &item->entry;
716 } else if (temp_entry->cmd_code == DIAG_CMD_NO_SUBSYS &&
717 entry->cmd_code == DIAG_CMD_DIAG_SUBSYS) {
718 if (temp_entry->subsys_id == entry->subsys_id &&
719 temp_entry->cmd_code_hi >= entry->cmd_code_hi &&
720 temp_entry->cmd_code_lo <= entry->cmd_code_lo &&
721 (proc == item->proc || proc == ALL_PROC)) {
722 return &item->entry;
723 }
724 } else if (temp_entry->cmd_code == DIAG_CMD_NO_SUBSYS &&
725 temp_entry->subsys_id == DIAG_CMD_NO_SUBSYS) {
726 if ((temp_entry->cmd_code_hi >= entry->cmd_code) &&
727 (temp_entry->cmd_code_lo <= entry->cmd_code) &&
728 (proc == item->proc || proc == ALL_PROC)) {
729 if (entry->cmd_code == MODE_CMD) {
730 if (entry->subsys_id == RESET_ID &&
731 item->proc != APPS_DATA) {
732 continue;
733 }
734 if (entry->subsys_id != RESET_ID &&
735 item->proc == APPS_DATA) {
736 continue;
737 }
738 }
739 return &item->entry;
740 }
741 }
742 }
743
744 return NULL;
745}
746
747void diag_cmd_remove_reg(struct diag_cmd_reg_entry_t *entry, uint8_t proc)
748{
749 struct diag_cmd_reg_t *item = NULL;
750 struct diag_cmd_reg_entry_t *temp_entry;
751
752 if (!entry) {
753 pr_err("diag: In %s, invalid entry\n", __func__);
754 return;
755 }
756
757 mutex_lock(&driver->cmd_reg_mutex);
758 temp_entry = diag_cmd_search(entry, proc);
759 if (temp_entry) {
760 item = container_of(temp_entry, struct diag_cmd_reg_t, entry);
761 if (!item) {
762 mutex_unlock(&driver->cmd_reg_mutex);
763 return;
764 }
765 list_del(&item->link);
766 kfree(item);
767 driver->cmd_reg_count--;
768 }
769 diag_cmd_invalidate_polling(DIAG_CMD_REMOVE);
770 mutex_unlock(&driver->cmd_reg_mutex);
771}
772
773void diag_cmd_remove_reg_by_pid(int pid)
774{
775 struct list_head *start;
776 struct list_head *temp;
777 struct diag_cmd_reg_t *item = NULL;
778
779 mutex_lock(&driver->cmd_reg_mutex);
780 list_for_each_safe(start, temp, &driver->cmd_reg_list) {
781 item = list_entry(start, struct diag_cmd_reg_t, link);
782 if (item->pid == pid) {
783 list_del(&item->link);
784 kfree(item);
785 driver->cmd_reg_count--;
786 }
787 }
788 mutex_unlock(&driver->cmd_reg_mutex);
789}
790
791void diag_cmd_remove_reg_by_proc(int proc)
792{
793 struct list_head *start;
794 struct list_head *temp;
795 struct diag_cmd_reg_t *item = NULL;
796
797 mutex_lock(&driver->cmd_reg_mutex);
798 list_for_each_safe(start, temp, &driver->cmd_reg_list) {
799 item = list_entry(start, struct diag_cmd_reg_t, link);
800 if (item->proc == proc) {
801 list_del(&item->link);
802 kfree(item);
803 driver->cmd_reg_count--;
804 }
805 }
806 diag_cmd_invalidate_polling(DIAG_CMD_REMOVE);
807 mutex_unlock(&driver->cmd_reg_mutex);
808}
809
810static int diag_copy_dci(char __user *buf, size_t count,
811 struct diag_dci_client_tbl *entry, int *pret)
812{
813 int total_data_len = 0;
814 int ret = 0;
815 int exit_stat = 1;
816 uint8_t drain_again = 0;
817 struct diag_dci_buffer_t *buf_entry, *temp;
818
819 if (!buf || !entry || !pret)
820 return exit_stat;
821
822 ret = *pret;
823
824 ret += sizeof(int);
825 if (ret >= count) {
826 pr_err("diag: In %s, invalid value for ret: %d, count: %zu\n",
827 __func__, ret, count);
828 return -EINVAL;
829 }
830
831 mutex_lock(&entry->write_buf_mutex);
832 list_for_each_entry_safe(buf_entry, temp, &entry->list_write_buf,
833 buf_track) {
834
835 if ((ret + buf_entry->data_len) > count) {
836 drain_again = 1;
837 break;
838 }
839
840 list_del(&buf_entry->buf_track);
841 mutex_lock(&buf_entry->data_mutex);
842 if ((buf_entry->data_len > 0) &&
843 (buf_entry->in_busy) &&
844 (buf_entry->data)) {
845 if (copy_to_user(buf+ret, (void *)buf_entry->data,
846 buf_entry->data_len))
847 goto drop;
848 ret += buf_entry->data_len;
849 total_data_len += buf_entry->data_len;
850 diag_ws_on_copy(DIAG_WS_DCI);
851drop:
852 buf_entry->in_busy = 0;
853 buf_entry->data_len = 0;
854 buf_entry->in_list = 0;
855 if (buf_entry->buf_type == DCI_BUF_CMD) {
856 mutex_unlock(&buf_entry->data_mutex);
857 continue;
858 } else if (buf_entry->buf_type == DCI_BUF_SECONDARY) {
859 diagmem_free(driver, buf_entry->data,
860 POOL_TYPE_DCI);
861 buf_entry->data = NULL;
862 mutex_unlock(&buf_entry->data_mutex);
863 kfree(buf_entry);
864 continue;
865 }
866
867 }
868 mutex_unlock(&buf_entry->data_mutex);
869 }
870
871 if (total_data_len > 0) {
872 /* Copy the total data length */
873 COPY_USER_SPACE_OR_ERR(buf+8, total_data_len, 4);
874 if (ret == -EFAULT)
875 goto exit;
876 ret -= 4;
877 } else {
878 pr_debug("diag: In %s, Trying to copy ZERO bytes, total_data_len: %d\n",
879 __func__, total_data_len);
880 }
881
882 exit_stat = 0;
883exit:
884 entry->in_service = 0;
885 mutex_unlock(&entry->write_buf_mutex);
886 *pret = ret;
887 if (drain_again)
888 dci_drain_data(0);
889
890 return exit_stat;
891}
892
893#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
894static int diag_remote_init(void)
895{
896 diagmem_setsize(POOL_TYPE_MDM, itemsize_mdm, poolsize_mdm);
897 diagmem_setsize(POOL_TYPE_MDM2, itemsize_mdm, poolsize_mdm);
898 diagmem_setsize(POOL_TYPE_MDM_DCI, itemsize_mdm_dci, poolsize_mdm_dci);
899 diagmem_setsize(POOL_TYPE_MDM2_DCI, itemsize_mdm_dci,
900 poolsize_mdm_dci);
901 diagmem_setsize(POOL_TYPE_MDM_MUX, itemsize_mdm_usb, poolsize_mdm_usb);
902 diagmem_setsize(POOL_TYPE_MDM2_MUX, itemsize_mdm_usb, poolsize_mdm_usb);
903 diagmem_setsize(POOL_TYPE_MDM_DCI_WRITE, itemsize_mdm_dci_write,
904 poolsize_mdm_dci_write);
905 diagmem_setsize(POOL_TYPE_MDM2_DCI_WRITE, itemsize_mdm_dci_write,
906 poolsize_mdm_dci_write);
907 diagmem_setsize(POOL_TYPE_QSC_MUX, itemsize_qsc_usb,
908 poolsize_qsc_usb);
909 driver->hdlc_encode_buf = kzalloc(DIAG_MAX_HDLC_BUF_SIZE, GFP_KERNEL);
910 if (!driver->hdlc_encode_buf)
911 return -ENOMEM;
912 driver->hdlc_encode_buf_len = 0;
913 return 0;
914}
915
916static void diag_remote_exit(void)
917{
918 kfree(driver->hdlc_encode_buf);
919}
920
921static int diag_send_raw_data_remote(int proc, void *buf, int len,
922 uint8_t hdlc_flag)
923{
924 int err = 0;
925 int max_len = 0;
926 uint8_t retry_count = 0;
927 uint8_t max_retries = 3;
928 uint16_t payload = 0;
929 struct diag_send_desc_type send = { NULL, NULL, DIAG_STATE_START, 0 };
930 struct diag_hdlc_dest_type enc = { NULL, NULL, 0 };
931 int bridge_index = proc - 1;
932 struct diag_md_session_t *session_info = NULL;
933 uint8_t hdlc_disabled = 0;
934
935 if (!buf)
936 return -EINVAL;
937
938 if (len <= 0) {
939 pr_err("diag: In %s, invalid len: %d", __func__, len);
940 return -EBADMSG;
941 }
942
943 if (bridge_index < 0 || bridge_index > NUM_REMOTE_DEV) {
944 pr_err("diag: In %s, invalid bridge index: %d\n", __func__,
945 bridge_index);
946 return -EINVAL;
947 }
948
949 do {
950 if (driver->hdlc_encode_buf_len == 0)
951 break;
952 usleep_range(10000, 10100);
953 retry_count++;
954 } while (retry_count < max_retries);
955
956 if (driver->hdlc_encode_buf_len != 0)
957 return -EAGAIN;
958 session_info = diag_md_session_get_peripheral(APPS_DATA);
959 if (session_info)
960 hdlc_disabled = session_info->hdlc_disabled;
961 else
962 hdlc_disabled = driver->hdlc_disabled;
963 if (hdlc_disabled) {
964 payload = *(uint16_t *)(buf + 2);
965 driver->hdlc_encode_buf_len = payload;
966 /*
967 * Adding 4 bytes for start (1 byte), version (1 byte) and
968 * payload (2 bytes)
969 */
970 memcpy(driver->hdlc_encode_buf, buf + 4, payload);
971 goto send_data;
972 }
973
974 if (hdlc_flag) {
975 if (len > DIAG_MAX_HDLC_BUF_SIZE) {
976 pr_err("diag: Dropping packet, HDLC encoded packet payload size crosses buffer limit. Current payload size %d\n",
977 len);
978 return -EBADMSG;
979 }
980 driver->hdlc_encode_buf_len = len;
981 memcpy(driver->hdlc_encode_buf, buf, len);
982 goto send_data;
983 }
984
985 /*
986 * The worst case length will be twice as the incoming packet length.
987 * Add 3 bytes for CRC bytes (2 bytes) and delimiter (1 byte)
988 */
989 max_len = (2 * len) + 3;
990 if (max_len > DIAG_MAX_HDLC_BUF_SIZE) {
991 pr_err("diag: Dropping packet, HDLC encoded packet payload size crosses buffer limit. Current payload size %d\n",
992 max_len);
993 return -EBADMSG;
994 }
995
996 /* Perform HDLC encoding on incoming data */
997 send.state = DIAG_STATE_START;
998 send.pkt = (void *)(buf);
999 send.last = (void *)(buf + len - 1);
1000 send.terminate = 1;
1001
1002 enc.dest = driver->hdlc_encode_buf;
1003 enc.dest_last = (void *)(driver->hdlc_encode_buf + max_len - 1);
1004 diag_hdlc_encode(&send, &enc);
1005 driver->hdlc_encode_buf_len = (int)(enc.dest -
1006 (void *)driver->hdlc_encode_buf);
1007
1008send_data:
1009 err = diagfwd_bridge_write(bridge_index, driver->hdlc_encode_buf,
1010 driver->hdlc_encode_buf_len);
1011 if (err) {
1012 pr_err_ratelimited("diag: Error writing Callback packet to proc: %d, err: %d\n",
1013 proc, err);
1014 driver->hdlc_encode_buf_len = 0;
1015 }
1016
1017 return err;
1018}
1019
1020static int diag_process_userspace_remote(int proc, void *buf, int len)
1021{
1022 int bridge_index = proc - 1;
1023
1024 if (!buf || len < 0) {
1025 pr_err("diag: Invalid input in %s, buf: %pK, len: %d\n",
1026 __func__, buf, len);
1027 return -EINVAL;
1028 }
1029
1030 if (bridge_index < 0 || bridge_index > NUM_REMOTE_DEV) {
1031 pr_err("diag: In %s, invalid bridge index: %d\n", __func__,
1032 bridge_index);
1033 return -EINVAL;
1034 }
1035
1036 driver->user_space_data_busy = 1;
1037 return diagfwd_bridge_write(bridge_index, buf, len);
1038}
1039#else
1040static int diag_remote_init(void)
1041{
1042 return 0;
1043}
1044
1045static void diag_remote_exit(void)
1046{
1047}
1048
1049int diagfwd_bridge_init(void)
1050{
1051 return 0;
1052}
1053
1054void diagfwd_bridge_exit(void)
1055{
1056}
1057
1058uint16_t diag_get_remote_device_mask(void)
1059{
1060 return 0;
1061}
1062
1063static int diag_send_raw_data_remote(int proc, void *buf, int len,
1064 uint8_t hdlc_flag)
1065{
1066 return -EINVAL;
1067}
1068
1069static int diag_process_userspace_remote(int proc, void *buf, int len)
1070{
1071 return 0;
1072}
1073#endif
1074
1075static int mask_request_validate(unsigned char mask_buf[])
1076{
1077 uint8_t packet_id;
1078 uint8_t subsys_id;
1079 uint16_t ss_cmd;
1080
1081 packet_id = mask_buf[0];
1082
1083 if (packet_id == DIAG_CMD_DIAG_SUBSYS_DELAY) {
1084 subsys_id = mask_buf[1];
1085 ss_cmd = *(uint16_t *)(mask_buf + 2);
1086 switch (subsys_id) {
1087 case DIAG_SS_DIAG:
1088 if ((ss_cmd == DIAG_SS_FILE_READ_MODEM) ||
1089 (ss_cmd == DIAG_SS_FILE_READ_ADSP) ||
1090 (ss_cmd == DIAG_SS_FILE_READ_WCNSS) ||
1091 (ss_cmd == DIAG_SS_FILE_READ_SLPI) ||
1092 (ss_cmd == DIAG_SS_FILE_READ_APPS))
1093 return 1;
1094 break;
1095 default:
1096 return 0;
1097 }
1098 } else if (packet_id == 0x4B) {
1099 subsys_id = mask_buf[1];
1100 ss_cmd = *(uint16_t *)(mask_buf + 2);
1101 /* Packets with SSID which are allowed */
1102 switch (subsys_id) {
1103 case 0x04: /* DIAG_SUBSYS_WCDMA */
1104 if ((ss_cmd == 0) || (ss_cmd == 0xF))
1105 return 1;
1106 break;
1107 case 0x08: /* DIAG_SUBSYS_GSM */
1108 if ((ss_cmd == 0) || (ss_cmd == 0x1))
1109 return 1;
1110 break;
1111 case 0x09: /* DIAG_SUBSYS_UMTS */
1112 case 0x0F: /* DIAG_SUBSYS_CM */
1113 if (ss_cmd == 0)
1114 return 1;
1115 break;
1116 case 0x0C: /* DIAG_SUBSYS_OS */
1117 if ((ss_cmd == 2) || (ss_cmd == 0x100))
1118 return 1; /* MPU and APU */
1119 break;
1120 case 0x12: /* DIAG_SUBSYS_DIAG_SERV */
1121 if ((ss_cmd == 0) || (ss_cmd == 0x6) || (ss_cmd == 0x7))
1122 return 1;
1123 else if (ss_cmd == 0x218) /* HDLC Disabled Command*/
1124 return 0;
1125 else if (ss_cmd == DIAG_GET_TIME_API)
1126 return 1;
1127 else if (ss_cmd == DIAG_SET_TIME_API)
1128 return 1;
1129 else if (ss_cmd == DIAG_SWITCH_COMMAND)
1130 return 1;
1131 else if (ss_cmd == DIAG_BUFFERING_MODE)
1132 return 1;
1133 break;
1134 case 0x13: /* DIAG_SUBSYS_FS */
1135 if ((ss_cmd == 0) || (ss_cmd == 0x1))
1136 return 1;
1137 break;
1138 default:
1139 return 0;
1140 }
1141 } else {
1142 switch (packet_id) {
1143 case 0x00: /* Version Number */
1144 case 0x0C: /* CDMA status packet */
1145 case 0x1C: /* Diag Version */
1146 case 0x1D: /* Time Stamp */
1147 case 0x60: /* Event Report Control */
1148 case 0x63: /* Status snapshot */
1149 case 0x73: /* Logging Configuration */
1150 case 0x7C: /* Extended build ID */
1151 case 0x7D: /* Extended Message configuration */
1152 case 0x81: /* Event get mask */
1153 case 0x82: /* Set the event mask */
1154 return 1;
1155 default:
1156 return 0;
1157 }
1158 }
1159 return 0;
1160}
1161
1162static void diag_md_session_init(void)
1163{
1164 int i;
1165
1166 mutex_init(&driver->md_session_lock);
1167 driver->md_session_mask = 0;
1168 driver->md_session_mode = DIAG_MD_NONE;
1169 for (i = 0; i < NUM_MD_SESSIONS; i++)
1170 driver->md_session_map[i] = NULL;
1171}
1172
1173static void diag_md_session_exit(void)
1174{
1175 int i;
1176 struct diag_md_session_t *session_info = NULL;
1177
1178 for (i = 0; i < NUM_MD_SESSIONS; i++) {
1179 if (driver->md_session_map[i]) {
1180 session_info = driver->md_session_map[i];
1181 diag_log_mask_free(session_info->log_mask);
1182 kfree(session_info->log_mask);
1183 session_info->log_mask = NULL;
1184 diag_msg_mask_free(session_info->msg_mask);
1185 kfree(session_info->msg_mask);
1186 session_info->msg_mask = NULL;
1187 diag_event_mask_free(session_info->event_mask);
1188 kfree(session_info->event_mask);
1189 session_info->event_mask = NULL;
1190 kfree(session_info);
1191 session_info = NULL;
1192 driver->md_session_map[i] = NULL;
1193 }
1194 }
1195 mutex_destroy(&driver->md_session_lock);
1196 driver->md_session_mask = 0;
1197 driver->md_session_mode = DIAG_MD_NONE;
1198}
1199
1200int diag_md_session_create(int mode, int peripheral_mask, int proc)
1201{
1202 int i;
1203 int err = 0;
1204 struct diag_md_session_t *new_session = NULL;
1205
1206 /*
1207 * If a session is running with a peripheral mask and a new session
1208 * request comes in with same peripheral mask value then return
1209 * invalid param
1210 */
1211 if (driver->md_session_mode == DIAG_MD_PERIPHERAL &&
1212 (driver->md_session_mask & peripheral_mask) != 0)
1213 return -EINVAL;
1214
1215 mutex_lock(&driver->md_session_lock);
1216 new_session = kzalloc(sizeof(struct diag_md_session_t), GFP_KERNEL);
1217 if (!new_session) {
1218 mutex_unlock(&driver->md_session_lock);
1219 return -ENOMEM;
1220 }
1221
1222 new_session->peripheral_mask = 0;
1223 new_session->pid = current->tgid;
1224 new_session->task = current;
1225
1226 new_session->log_mask = kzalloc(sizeof(struct diag_mask_info),
1227 GFP_KERNEL);
1228 if (!new_session->log_mask) {
1229 err = -ENOMEM;
1230 goto fail_peripheral;
1231 }
1232 new_session->event_mask = kzalloc(sizeof(struct diag_mask_info),
1233 GFP_KERNEL);
1234 if (!new_session->event_mask) {
1235 err = -ENOMEM;
1236 goto fail_peripheral;
1237 }
1238 new_session->msg_mask = kzalloc(sizeof(struct diag_mask_info),
1239 GFP_KERNEL);
1240 if (!new_session->msg_mask) {
1241 err = -ENOMEM;
1242 goto fail_peripheral;
1243 }
1244
1245 err = diag_log_mask_copy(new_session->log_mask, &log_mask);
1246 if (err) {
1247 DIAG_LOG(DIAG_DEBUG_USERSPACE,
1248 "return value of log copy. err %d\n", err);
1249 goto fail_peripheral;
1250 }
1251 err = diag_event_mask_copy(new_session->event_mask, &event_mask);
1252 if (err) {
1253 DIAG_LOG(DIAG_DEBUG_USERSPACE,
1254 "return value of event copy. err %d\n", err);
1255 goto fail_peripheral;
1256 }
1257 err = diag_msg_mask_copy(new_session->msg_mask, &msg_mask);
1258 if (err) {
1259 DIAG_LOG(DIAG_DEBUG_USERSPACE,
1260 "return value of msg copy. err %d\n", err);
1261 goto fail_peripheral;
1262 }
1263 for (i = 0; i < NUM_MD_SESSIONS; i++) {
1264 if ((MD_PERIPHERAL_MASK(i) & peripheral_mask) == 0)
1265 continue;
1266 if (driver->md_session_map[i] != NULL) {
1267 DIAG_LOG(DIAG_DEBUG_USERSPACE,
1268 "another instance present for %d\n", i);
1269 err = -EEXIST;
1270 goto fail_peripheral;
1271 }
1272 new_session->peripheral_mask |= MD_PERIPHERAL_MASK(i);
1273 driver->md_session_map[i] = new_session;
1274 driver->md_session_mask |= MD_PERIPHERAL_MASK(i);
1275 }
1276 setup_timer(&new_session->hdlc_reset_timer,
1277 diag_md_hdlc_reset_timer_func,
1278 new_session->pid);
1279
1280 driver->md_session_mode = DIAG_MD_PERIPHERAL;
1281 mutex_unlock(&driver->md_session_lock);
1282 DIAG_LOG(DIAG_DEBUG_USERSPACE,
1283 "created session in peripheral mode\n");
1284 return 0;
1285
1286fail_peripheral:
1287 diag_log_mask_free(new_session->log_mask);
1288 kfree(new_session->log_mask);
1289 new_session->log_mask = NULL;
1290 diag_event_mask_free(new_session->event_mask);
1291 kfree(new_session->event_mask);
1292 new_session->event_mask = NULL;
1293 diag_msg_mask_free(new_session->msg_mask);
1294 kfree(new_session->msg_mask);
1295 new_session->msg_mask = NULL;
1296 kfree(new_session);
1297 new_session = NULL;
1298 mutex_unlock(&driver->md_session_lock);
1299 return err;
1300}
1301
1302static void diag_md_session_close(struct diag_md_session_t *session_info)
1303{
1304 int i;
1305 uint8_t found = 0;
1306
1307 if (!session_info)
1308 return;
1309
1310 mutex_lock(&driver->md_session_lock);
1311 for (i = 0; i < NUM_MD_SESSIONS; i++) {
1312 if (driver->md_session_map[i] != session_info)
1313 continue;
1314 driver->md_session_map[i] = NULL;
1315 driver->md_session_mask &= ~session_info->peripheral_mask;
1316 }
1317 diag_log_mask_free(session_info->log_mask);
1318 kfree(session_info->log_mask);
1319 session_info->log_mask = NULL;
1320 diag_msg_mask_free(session_info->msg_mask);
1321 kfree(session_info->msg_mask);
1322 session_info->msg_mask = NULL;
1323 diag_event_mask_free(session_info->event_mask);
1324 kfree(session_info->event_mask);
1325 session_info->event_mask = NULL;
1326 del_timer(&session_info->hdlc_reset_timer);
1327
1328 for (i = 0; i < NUM_MD_SESSIONS && !found; i++) {
1329 if (driver->md_session_map[i] != NULL)
1330 found = 1;
1331 }
1332
1333 driver->md_session_mode = (found) ? DIAG_MD_PERIPHERAL : DIAG_MD_NONE;
1334 kfree(session_info);
1335 session_info = NULL;
1336 mutex_unlock(&driver->md_session_lock);
1337 DIAG_LOG(DIAG_DEBUG_USERSPACE, "cleared up session\n");
1338}
1339
1340struct diag_md_session_t *diag_md_session_get_pid(int pid)
1341{
1342 int i;
1343
1344 for (i = 0; i < NUM_MD_SESSIONS; i++) {
1345 if (driver->md_session_map[i] &&
1346 driver->md_session_map[i]->pid == pid)
1347 return driver->md_session_map[i];
1348 }
1349 return NULL;
1350}
1351
1352struct diag_md_session_t *diag_md_session_get_peripheral(uint8_t peripheral)
1353{
1354 if (peripheral >= NUM_MD_SESSIONS)
1355 return NULL;
1356 return driver->md_session_map[peripheral];
1357}
1358
1359static int diag_md_peripheral_switch(struct diag_md_session_t *session_info,
1360 int peripheral_mask, int req_mode) {
1361 int i, bit = 0;
1362
1363 if (!session_info)
1364 return -EINVAL;
1365 if (req_mode != DIAG_USB_MODE || req_mode != DIAG_MEMORY_DEVICE_MODE)
1366 return -EINVAL;
1367
1368 /*
1369 * check that md_session_map for i == session_info,
1370 * if not then race condition occurred and bail
1371 */
1372 mutex_lock(&driver->md_session_lock);
1373 for (i = 0; i < NUM_MD_SESSIONS; i++) {
1374 bit = MD_PERIPHERAL_MASK(i) & peripheral_mask;
1375 if (!bit)
1376 continue;
1377 if (req_mode == DIAG_USB_MODE) {
1378 if (driver->md_session_map[i] != session_info) {
1379 mutex_unlock(&driver->md_session_lock);
1380 return -EINVAL;
1381 }
1382 driver->md_session_map[i] = NULL;
1383 driver->md_session_mask &= ~bit;
1384 session_info->peripheral_mask &= ~bit;
1385
1386 } else {
1387 if (driver->md_session_map[i] != NULL) {
1388 mutex_unlock(&driver->md_session_lock);
1389 return -EINVAL;
1390 }
1391 driver->md_session_map[i] = session_info;
1392 driver->md_session_mask |= bit;
1393 session_info->peripheral_mask |= bit;
1394
1395 }
1396 }
1397
1398 driver->md_session_mode = DIAG_MD_PERIPHERAL;
1399 mutex_unlock(&driver->md_session_lock);
1400 DIAG_LOG(DIAG_DEBUG_USERSPACE, "Changed Peripherals:0x%x to mode:%d\n",
1401 peripheral_mask, req_mode);
1402}
1403
1404static int diag_md_session_check(int curr_mode, int req_mode,
1405 const struct diag_logging_mode_param_t *param,
1406 uint8_t *change_mode)
1407{
1408 int i, bit = 0, err = 0;
1409 int change_mask = 0;
1410 struct diag_md_session_t *session_info = NULL;
1411
1412 if (!param || !change_mode)
1413 return -EIO;
1414
1415 *change_mode = 0;
1416
1417 switch (curr_mode) {
1418 case DIAG_USB_MODE:
1419 case DIAG_MEMORY_DEVICE_MODE:
1420 case DIAG_MULTI_MODE:
1421 break;
1422 default:
1423 return -EINVAL;
1424 }
1425
1426 if (req_mode != DIAG_USB_MODE && req_mode != DIAG_MEMORY_DEVICE_MODE)
1427 return -EINVAL;
1428
1429 if (req_mode == DIAG_USB_MODE) {
1430 if (curr_mode == DIAG_USB_MODE)
1431 return 0;
1432 if (driver->md_session_mode == DIAG_MD_NONE
1433 && driver->md_session_mask == 0 && driver->logging_mask) {
1434 *change_mode = 1;
1435 return 0;
1436 }
1437
1438 /*
1439 * curr_mode is either DIAG_MULTI_MODE or DIAG_MD_MODE
1440 * Check if requested peripherals are already in usb mode
1441 */
1442 for (i = 0; i < NUM_MD_SESSIONS; i++) {
1443 bit = MD_PERIPHERAL_MASK(i) & param->peripheral_mask;
1444 if (!bit)
1445 continue;
1446 if (bit & driver->logging_mask)
1447 change_mask |= bit;
1448 }
1449 if (!change_mask)
1450 return 0;
1451
1452 /*
1453 * Change is needed. Check if this md_session has set all the
1454 * requested peripherals. If another md session set a requested
1455 * peripheral then we cannot switch that peripheral to USB.
1456 * If this session owns all the requested peripherals, then
1457 * call function to switch the modes/masks for the md_session
1458 */
1459 session_info = diag_md_session_get_pid(current->tgid);
1460 if (!session_info) {
1461 *change_mode = 1;
1462 return 0;
1463 }
1464 if ((change_mask & session_info->peripheral_mask)
1465 != change_mask) {
1466 DIAG_LOG(DIAG_DEBUG_USERSPACE,
1467 "Another MD Session owns a requested peripheral\n");
1468 return -EINVAL;
1469 }
1470 *change_mode = 1;
1471
1472 /* If all peripherals are being set to USB Mode, call close */
1473 if (~change_mask & session_info->peripheral_mask) {
1474 err = diag_md_peripheral_switch(session_info,
1475 change_mask, DIAG_USB_MODE);
1476 } else
1477 diag_md_session_close(session_info);
1478
1479 return err;
1480
1481 } else if (req_mode == DIAG_MEMORY_DEVICE_MODE) {
1482 /*
1483 * Get bit mask that represents what peripherals already have
1484 * been set. Check that requested peripherals already set are
1485 * owned by this md session
1486 */
1487 change_mask = driver->md_session_mask & param->peripheral_mask;
1488 session_info = diag_md_session_get_pid(current->tgid);
1489
1490 if (session_info) {
1491 if ((session_info->peripheral_mask & change_mask)
1492 != change_mask) {
1493 DIAG_LOG(DIAG_DEBUG_USERSPACE,
1494 "Another MD Session owns a requested peripheral\n");
1495 return -EINVAL;
1496 }
1497 err = diag_md_peripheral_switch(session_info,
1498 change_mask, DIAG_USB_MODE);
1499 } else {
1500 if (change_mask) {
1501 DIAG_LOG(DIAG_DEBUG_USERSPACE,
1502 "Another MD Session owns a requested peripheral\n");
1503 return -EINVAL;
1504 }
1505 err = diag_md_session_create(DIAG_MD_PERIPHERAL,
1506 param->peripheral_mask, DIAG_LOCAL_PROC);
1507 }
1508 *change_mode = 1;
1509 return err;
1510 }
1511 return -EINVAL;
1512}
1513
1514static uint32_t diag_translate_mask(uint32_t peripheral_mask)
1515{
1516 uint32_t ret = 0;
1517
1518 if (peripheral_mask & DIAG_CON_APSS)
1519 ret |= (1 << APPS_DATA);
1520 if (peripheral_mask & DIAG_CON_MPSS)
1521 ret |= (1 << PERIPHERAL_MODEM);
1522 if (peripheral_mask & DIAG_CON_LPASS)
1523 ret |= (1 << PERIPHERAL_LPASS);
1524 if (peripheral_mask & DIAG_CON_WCNSS)
1525 ret |= (1 << PERIPHERAL_WCNSS);
1526 if (peripheral_mask & DIAG_CON_SENSORS)
1527 ret |= (1 << PERIPHERAL_SENSORS);
1528 if (peripheral_mask & DIAG_CON_WDSP)
1529 ret |= (1 << PERIPHERAL_WDSP);
Sreelakshmi Gownipalli588a31d2016-11-02 13:33:43 -07001530 if (peripheral_mask & DIAG_CON_CDSP)
1531 ret |= (1 << PERIPHERAL_CDSP);
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001532
1533 return ret;
1534}
1535
1536static int diag_switch_logging(struct diag_logging_mode_param_t *param)
1537{
1538 int new_mode;
1539 int curr_mode;
1540 int err = 0;
1541 uint8_t do_switch = 1;
1542 uint32_t peripheral_mask = 0;
1543
1544 if (!param)
1545 return -EINVAL;
1546
1547 if (!param->peripheral_mask) {
1548 DIAG_LOG(DIAG_DEBUG_USERSPACE,
1549 "asking for mode switch with no peripheral mask set\n");
1550 return -EINVAL;
1551 }
1552
1553 peripheral_mask = diag_translate_mask(param->peripheral_mask);
1554 param->peripheral_mask = peripheral_mask;
1555
1556 switch (param->req_mode) {
1557 case CALLBACK_MODE:
1558 case UART_MODE:
1559 case SOCKET_MODE:
1560 case MEMORY_DEVICE_MODE:
1561 new_mode = DIAG_MEMORY_DEVICE_MODE;
1562 break;
1563 case USB_MODE:
1564 new_mode = DIAG_USB_MODE;
1565 break;
1566 default:
1567 pr_err("diag: In %s, request to switch to invalid mode: %d\n",
1568 __func__, param->req_mode);
1569 return -EINVAL;
1570 }
1571
1572 curr_mode = driver->logging_mode;
1573 DIAG_LOG(DIAG_DEBUG_USERSPACE,
1574 "request to switch logging from %d mask:%0x to %d mask:%0x\n",
1575 curr_mode, driver->md_session_mask, new_mode, peripheral_mask);
1576
1577 err = diag_md_session_check(curr_mode, new_mode, param, &do_switch);
1578 if (err) {
1579 DIAG_LOG(DIAG_DEBUG_USERSPACE,
1580 "err from diag_md_session_check, err: %d\n", err);
1581 return err;
1582 }
1583
1584 if (do_switch == 0) {
1585 DIAG_LOG(DIAG_DEBUG_USERSPACE,
1586 "not switching modes c: %d n: %d\n",
1587 curr_mode, new_mode);
1588 return 0;
1589 }
1590
1591 diag_ws_reset(DIAG_WS_MUX);
1592 err = diag_mux_switch_logging(&new_mode, &peripheral_mask);
1593 if (err) {
1594 pr_err("diag: In %s, unable to switch mode from %d to %d, err: %d\n",
1595 __func__, curr_mode, new_mode, err);
1596 driver->logging_mode = curr_mode;
1597 goto fail;
1598 }
1599 driver->logging_mode = new_mode;
1600 driver->logging_mask = peripheral_mask;
1601 DIAG_LOG(DIAG_DEBUG_USERSPACE,
1602 "Switch logging to %d mask:%0x\n", new_mode, peripheral_mask);
1603
1604 /* Update to take peripheral_mask */
1605 if (new_mode != DIAG_MEMORY_DEVICE_MODE) {
1606 diag_update_real_time_vote(DIAG_PROC_MEMORY_DEVICE,
1607 MODE_REALTIME, ALL_PROC);
1608 } else {
1609 diag_update_proc_vote(DIAG_PROC_MEMORY_DEVICE, VOTE_UP,
1610 ALL_PROC);
1611 }
1612
1613 if (!(new_mode == DIAG_MEMORY_DEVICE_MODE &&
1614 curr_mode == DIAG_USB_MODE)) {
1615 queue_work(driver->diag_real_time_wq,
1616 &driver->diag_real_time_work);
1617 }
1618
1619 return 0;
1620fail:
1621 return err;
1622}
1623
1624static int diag_ioctl_dci_reg(unsigned long ioarg)
1625{
1626 int result = -EINVAL;
1627 struct diag_dci_reg_tbl_t dci_reg_params;
1628
1629 if (copy_from_user(&dci_reg_params, (void __user *)ioarg,
1630 sizeof(struct diag_dci_reg_tbl_t)))
1631 return -EFAULT;
1632
1633 result = diag_dci_register_client(&dci_reg_params);
1634
1635 return result;
1636}
1637
1638static int diag_ioctl_dci_health_stats(unsigned long ioarg)
1639{
1640 int result = -EINVAL;
1641 struct diag_dci_health_stats_proc stats;
1642
1643 if (copy_from_user(&stats, (void __user *)ioarg,
1644 sizeof(struct diag_dci_health_stats_proc)))
1645 return -EFAULT;
1646
1647 result = diag_dci_copy_health_stats(&stats);
1648 if (result == DIAG_DCI_NO_ERROR) {
1649 if (copy_to_user((void __user *)ioarg, &stats,
1650 sizeof(struct diag_dci_health_stats_proc)))
1651 return -EFAULT;
1652 }
1653
1654 return result;
1655}
1656
1657static int diag_ioctl_dci_log_status(unsigned long ioarg)
1658{
1659 struct diag_log_event_stats le_stats;
1660 struct diag_dci_client_tbl *dci_client = NULL;
1661
1662 if (copy_from_user(&le_stats, (void __user *)ioarg,
1663 sizeof(struct diag_log_event_stats)))
1664 return -EFAULT;
1665
1666 dci_client = diag_dci_get_client_entry(le_stats.client_id);
1667 if (!dci_client)
1668 return DIAG_DCI_NOT_SUPPORTED;
1669 le_stats.is_set = diag_dci_query_log_mask(dci_client, le_stats.code);
1670 if (copy_to_user((void __user *)ioarg, &le_stats,
1671 sizeof(struct diag_log_event_stats)))
1672 return -EFAULT;
1673
1674 return DIAG_DCI_NO_ERROR;
1675}
1676
1677static int diag_ioctl_dci_event_status(unsigned long ioarg)
1678{
1679 struct diag_log_event_stats le_stats;
1680 struct diag_dci_client_tbl *dci_client = NULL;
1681
1682 if (copy_from_user(&le_stats, (void __user *)ioarg,
1683 sizeof(struct diag_log_event_stats)))
1684 return -EFAULT;
1685
1686 dci_client = diag_dci_get_client_entry(le_stats.client_id);
1687 if (!dci_client)
1688 return DIAG_DCI_NOT_SUPPORTED;
1689
1690 le_stats.is_set = diag_dci_query_event_mask(dci_client, le_stats.code);
1691 if (copy_to_user((void __user *)ioarg, &le_stats,
1692 sizeof(struct diag_log_event_stats)))
1693 return -EFAULT;
1694
1695 return DIAG_DCI_NO_ERROR;
1696}
1697
1698static int diag_ioctl_lsm_deinit(void)
1699{
1700 int i;
1701
1702 for (i = 0; i < driver->num_clients; i++)
1703 if (driver->client_map[i].pid == current->tgid)
1704 break;
1705
1706 if (i == driver->num_clients)
1707 return -EINVAL;
1708
1709 driver->data_ready[i] |= DEINIT_TYPE;
1710 wake_up_interruptible(&driver->wait_q);
1711
1712 return 1;
1713}
1714
1715static int diag_ioctl_vote_real_time(unsigned long ioarg)
1716{
1717 int real_time = 0;
1718 int temp_proc = ALL_PROC;
1719 struct real_time_vote_t vote;
1720 struct diag_dci_client_tbl *dci_client = NULL;
1721
1722 if (copy_from_user(&vote, (void __user *)ioarg,
1723 sizeof(struct real_time_vote_t)))
1724 return -EFAULT;
1725
1726 if (vote.proc > DIAG_PROC_MEMORY_DEVICE ||
1727 vote.real_time_vote > MODE_UNKNOWN ||
1728 vote.client_id < 0) {
1729 pr_err("diag: %s, invalid params, proc: %d, vote: %d, client_id: %d\n",
1730 __func__, vote.proc, vote.real_time_vote,
1731 vote.client_id);
1732 return -EINVAL;
1733 }
1734
1735 driver->real_time_update_busy++;
1736 if (vote.proc == DIAG_PROC_DCI) {
1737 dci_client = diag_dci_get_client_entry(vote.client_id);
1738 if (!dci_client) {
1739 driver->real_time_update_busy--;
1740 return DIAG_DCI_NOT_SUPPORTED;
1741 }
1742 diag_dci_set_real_time(dci_client, vote.real_time_vote);
1743 real_time = diag_dci_get_cumulative_real_time(
1744 dci_client->client_info.token);
1745 diag_update_real_time_vote(vote.proc, real_time,
1746 dci_client->client_info.token);
1747 } else {
1748 real_time = vote.real_time_vote;
1749 temp_proc = vote.client_id;
1750 diag_update_real_time_vote(vote.proc, real_time,
1751 temp_proc);
1752 }
1753 queue_work(driver->diag_real_time_wq, &driver->diag_real_time_work);
1754 return 0;
1755}
1756
1757static int diag_ioctl_get_real_time(unsigned long ioarg)
1758{
1759 int i;
1760 int retry_count = 0;
1761 int timer = 0;
1762 struct real_time_query_t rt_query;
1763
1764 if (copy_from_user(&rt_query, (void __user *)ioarg,
1765 sizeof(struct real_time_query_t)))
1766 return -EFAULT;
1767 while (retry_count < 3) {
1768 if (driver->real_time_update_busy > 0) {
1769 retry_count++;
1770 /*
1771 * The value 10000 was chosen empirically as an
1772 * optimum value in order to give the work in
1773 * diag_real_time_wq to complete processing.
1774 */
1775 for (timer = 0; timer < 5; timer++)
1776 usleep_range(10000, 10100);
1777 } else {
1778 break;
1779 }
1780 }
1781
1782 if (driver->real_time_update_busy > 0)
1783 return -EAGAIN;
1784
1785 if (rt_query.proc < 0 || rt_query.proc >= DIAG_NUM_PROC) {
1786 pr_err("diag: Invalid proc %d in %s\n", rt_query.proc,
1787 __func__);
1788 return -EINVAL;
1789 }
1790 rt_query.real_time = driver->real_time_mode[rt_query.proc];
1791 /*
1792 * For the local processor, if any of the peripherals is in buffering
1793 * mode, overwrite the value of real time with UNKNOWN_MODE
1794 */
1795 if (rt_query.proc == DIAG_LOCAL_PROC) {
1796 for (i = 0; i < NUM_PERIPHERALS; i++) {
1797 if (!driver->feature[i].peripheral_buffering)
1798 continue;
1799 switch (driver->buffering_mode[i].mode) {
1800 case DIAG_BUFFERING_MODE_CIRCULAR:
1801 case DIAG_BUFFERING_MODE_THRESHOLD:
1802 rt_query.real_time = MODE_UNKNOWN;
1803 break;
1804 }
1805 }
1806 }
1807
1808 if (copy_to_user((void __user *)ioarg, &rt_query,
1809 sizeof(struct real_time_query_t)))
1810 return -EFAULT;
1811
1812 return 0;
1813}
1814
1815static int diag_ioctl_set_buffering_mode(unsigned long ioarg)
1816{
1817 struct diag_buffering_mode_t params;
1818
1819 if (copy_from_user(&params, (void __user *)ioarg, sizeof(params)))
1820 return -EFAULT;
1821
1822 if (params.peripheral >= NUM_PERIPHERALS)
1823 return -EINVAL;
1824
1825 mutex_lock(&driver->mode_lock);
1826 driver->buffering_flag[params.peripheral] = 1;
1827 mutex_unlock(&driver->mode_lock);
1828
1829 return diag_send_peripheral_buffering_mode(&params);
1830}
1831
1832static int diag_ioctl_peripheral_drain_immediate(unsigned long ioarg)
1833{
1834 uint8_t peripheral;
1835
1836 if (copy_from_user(&peripheral, (void __user *)ioarg, sizeof(uint8_t)))
1837 return -EFAULT;
1838
1839 if (peripheral >= NUM_PERIPHERALS) {
1840 pr_err("diag: In %s, invalid peripheral %d\n", __func__,
1841 peripheral);
1842 return -EINVAL;
1843 }
1844
1845 if (!driver->feature[peripheral].peripheral_buffering) {
1846 pr_err("diag: In %s, peripheral %d doesn't support buffering\n",
1847 __func__, peripheral);
1848 return -EIO;
1849 }
1850
1851 return diag_send_peripheral_drain_immediate(peripheral);
1852}
1853
1854static int diag_ioctl_dci_support(unsigned long ioarg)
1855{
1856 struct diag_dci_peripherals_t dci_support;
1857 int result = -EINVAL;
1858
1859 if (copy_from_user(&dci_support, (void __user *)ioarg,
1860 sizeof(struct diag_dci_peripherals_t)))
1861 return -EFAULT;
1862
1863 result = diag_dci_get_support_list(&dci_support);
1864 if (result == DIAG_DCI_NO_ERROR)
1865 if (copy_to_user((void __user *)ioarg, &dci_support,
1866 sizeof(struct diag_dci_peripherals_t)))
1867 return -EFAULT;
1868
1869 return result;
1870}
1871
1872static int diag_ioctl_hdlc_toggle(unsigned long ioarg)
1873{
1874 uint8_t hdlc_support;
1875 struct diag_md_session_t *session_info = NULL;
1876
1877 session_info = diag_md_session_get_pid(current->tgid);
1878 if (copy_from_user(&hdlc_support, (void __user *)ioarg,
1879 sizeof(uint8_t)))
1880 return -EFAULT;
1881 mutex_lock(&driver->hdlc_disable_mutex);
1882 if (session_info) {
1883 mutex_lock(&driver->md_session_lock);
1884 session_info->hdlc_disabled = hdlc_support;
1885 mutex_unlock(&driver->md_session_lock);
1886 } else
1887 driver->hdlc_disabled = hdlc_support;
1888 mutex_unlock(&driver->hdlc_disable_mutex);
1889 diag_update_md_clients(HDLC_SUPPORT_TYPE);
1890
1891 return 0;
1892}
1893
1894static int diag_ioctl_register_callback(unsigned long ioarg)
1895{
1896 int err = 0;
1897 struct diag_callback_reg_t reg;
1898
1899 if (copy_from_user(&reg, (void __user *)ioarg,
1900 sizeof(struct diag_callback_reg_t))) {
1901 return -EFAULT;
1902 }
1903
1904 if (reg.proc < 0 || reg.proc >= DIAG_NUM_PROC) {
1905 pr_err("diag: In %s, invalid proc %d for callback registration\n",
1906 __func__, reg.proc);
1907 return -EINVAL;
1908 }
1909
1910 if (driver->md_session_mode == DIAG_MD_PERIPHERAL)
1911 return -EIO;
1912
1913 return err;
1914}
1915
1916static int diag_cmd_register_tbl(struct diag_cmd_reg_tbl_t *reg_tbl)
1917{
1918 int i;
1919 int err = 0;
1920 uint32_t count = 0;
1921 struct diag_cmd_reg_entry_t *entries = NULL;
1922 const uint16_t entry_len = sizeof(struct diag_cmd_reg_entry_t);
1923
1924
1925 if (!reg_tbl) {
1926 pr_err("diag: In %s, invalid registration table\n", __func__);
1927 return -EINVAL;
1928 }
1929
1930 count = reg_tbl->count;
1931 if ((UINT_MAX / entry_len) < count) {
1932 pr_warn("diag: In %s, possbile integer overflow.\n", __func__);
1933 return -EFAULT;
1934 }
1935
1936 entries = kzalloc(count * entry_len, GFP_KERNEL);
1937 if (!entries)
1938 return -ENOMEM;
1939
1940
1941 err = copy_from_user(entries, reg_tbl->entries, count * entry_len);
1942 if (err) {
1943 pr_err("diag: In %s, error copying data from userspace, err: %d\n",
1944 __func__, err);
1945 kfree(entries);
1946 return -EFAULT;
1947 }
1948
1949 for (i = 0; i < count; i++) {
1950 err = diag_cmd_add_reg(&entries[i], APPS_DATA, current->tgid);
1951 if (err) {
1952 pr_err("diag: In %s, unable to register command, err: %d\n",
1953 __func__, err);
1954 break;
1955 }
1956 }
1957
1958 kfree(entries);
1959 return err;
1960}
1961
1962static int diag_ioctl_cmd_reg(unsigned long ioarg)
1963{
1964 struct diag_cmd_reg_tbl_t reg_tbl;
1965
1966 if (copy_from_user(&reg_tbl, (void __user *)ioarg,
1967 sizeof(struct diag_cmd_reg_tbl_t))) {
1968 return -EFAULT;
1969 }
1970
1971 return diag_cmd_register_tbl(&reg_tbl);
1972}
1973
1974static int diag_ioctl_cmd_dereg(void)
1975{
1976 diag_cmd_remove_reg_by_pid(current->tgid);
1977 return 0;
1978}
1979
1980#ifdef CONFIG_COMPAT
1981/*
1982 * @sync_obj_name: name of the synchronization object associated with this proc
1983 * @count: number of entries in the bind
1984 * @params: the actual packet registrations
1985 */
1986struct diag_cmd_reg_tbl_compat_t {
1987 char sync_obj_name[MAX_SYNC_OBJ_NAME_SIZE];
1988 uint32_t count;
1989 compat_uptr_t entries;
1990};
1991
1992static int diag_ioctl_cmd_reg_compat(unsigned long ioarg)
1993{
1994 struct diag_cmd_reg_tbl_compat_t reg_tbl_compat;
1995 struct diag_cmd_reg_tbl_t reg_tbl;
1996
1997 if (copy_from_user(&reg_tbl_compat, (void __user *)ioarg,
1998 sizeof(struct diag_cmd_reg_tbl_compat_t))) {
1999 return -EFAULT;
2000 }
2001
2002 strlcpy(reg_tbl.sync_obj_name, reg_tbl_compat.sync_obj_name,
2003 MAX_SYNC_OBJ_NAME_SIZE);
2004 reg_tbl.count = reg_tbl_compat.count;
2005 reg_tbl.entries = (struct diag_cmd_reg_entry_t *)
2006 (uintptr_t)reg_tbl_compat.entries;
2007
2008 return diag_cmd_register_tbl(&reg_tbl);
2009}
2010
2011long diagchar_compat_ioctl(struct file *filp,
2012 unsigned int iocmd, unsigned long ioarg)
2013{
2014 int result = -EINVAL;
2015 int client_id = 0;
2016 uint16_t delayed_rsp_id = 0;
2017 uint16_t remote_dev;
2018 struct diag_dci_client_tbl *dci_client = NULL;
2019 struct diag_logging_mode_param_t mode_param;
2020
2021 switch (iocmd) {
2022 case DIAG_IOCTL_COMMAND_REG:
2023 result = diag_ioctl_cmd_reg_compat(ioarg);
2024 break;
2025 case DIAG_IOCTL_COMMAND_DEREG:
2026 result = diag_ioctl_cmd_dereg();
2027 break;
2028 case DIAG_IOCTL_GET_DELAYED_RSP_ID:
2029 delayed_rsp_id = diag_get_next_delayed_rsp_id();
2030 if (copy_to_user((void __user *)ioarg, &delayed_rsp_id,
2031 sizeof(uint16_t)))
2032 result = -EFAULT;
2033 else
2034 result = 0;
2035 break;
2036 case DIAG_IOCTL_DCI_REG:
2037 result = diag_ioctl_dci_reg(ioarg);
2038 break;
2039 case DIAG_IOCTL_DCI_DEINIT:
2040 mutex_lock(&driver->dci_mutex);
2041 if (copy_from_user((void *)&client_id, (void __user *)ioarg,
2042 sizeof(int))) {
2043 mutex_unlock(&driver->dci_mutex);
2044 return -EFAULT;
2045 }
2046 dci_client = diag_dci_get_client_entry(client_id);
2047 if (!dci_client) {
2048 mutex_unlock(&driver->dci_mutex);
2049 return DIAG_DCI_NOT_SUPPORTED;
2050 }
2051 result = diag_dci_deinit_client(dci_client);
2052 mutex_unlock(&driver->dci_mutex);
2053 break;
2054 case DIAG_IOCTL_DCI_SUPPORT:
2055 result = diag_ioctl_dci_support(ioarg);
2056 break;
2057 case DIAG_IOCTL_DCI_HEALTH_STATS:
2058 mutex_lock(&driver->dci_mutex);
2059 result = diag_ioctl_dci_health_stats(ioarg);
2060 mutex_unlock(&driver->dci_mutex);
2061 break;
2062 case DIAG_IOCTL_DCI_LOG_STATUS:
2063 mutex_lock(&driver->dci_mutex);
2064 result = diag_ioctl_dci_log_status(ioarg);
2065 mutex_unlock(&driver->dci_mutex);
2066 break;
2067 case DIAG_IOCTL_DCI_EVENT_STATUS:
2068 mutex_lock(&driver->dci_mutex);
2069 result = diag_ioctl_dci_event_status(ioarg);
2070 mutex_unlock(&driver->dci_mutex);
2071 break;
2072 case DIAG_IOCTL_DCI_CLEAR_LOGS:
2073 mutex_lock(&driver->dci_mutex);
2074 if (copy_from_user((void *)&client_id, (void __user *)ioarg,
2075 sizeof(int))) {
2076 mutex_unlock(&driver->dci_mutex);
2077 return -EFAULT;
2078 }
2079 result = diag_dci_clear_log_mask(client_id);
2080 mutex_unlock(&driver->dci_mutex);
2081 break;
2082 case DIAG_IOCTL_DCI_CLEAR_EVENTS:
2083 mutex_lock(&driver->dci_mutex);
2084 if (copy_from_user(&client_id, (void __user *)ioarg,
2085 sizeof(int))) {
2086 mutex_unlock(&driver->dci_mutex);
2087 return -EFAULT;
2088 }
2089 result = diag_dci_clear_event_mask(client_id);
2090 mutex_unlock(&driver->dci_mutex);
2091 break;
2092 case DIAG_IOCTL_LSM_DEINIT:
2093 result = diag_ioctl_lsm_deinit();
2094 break;
2095 case DIAG_IOCTL_SWITCH_LOGGING:
2096 if (copy_from_user((void *)&mode_param, (void __user *)ioarg,
2097 sizeof(mode_param)))
2098 return -EFAULT;
2099 mutex_lock(&driver->diagchar_mutex);
2100 result = diag_switch_logging(&mode_param);
2101 mutex_unlock(&driver->diagchar_mutex);
2102 break;
2103 case DIAG_IOCTL_REMOTE_DEV:
2104 remote_dev = diag_get_remote_device_mask();
2105 if (copy_to_user((void __user *)ioarg, &remote_dev,
2106 sizeof(uint16_t)))
2107 result = -EFAULT;
2108 else
2109 result = 1;
2110 break;
2111 case DIAG_IOCTL_VOTE_REAL_TIME:
2112 mutex_lock(&driver->dci_mutex);
2113 result = diag_ioctl_vote_real_time(ioarg);
2114 mutex_unlock(&driver->dci_mutex);
2115 break;
2116 case DIAG_IOCTL_GET_REAL_TIME:
2117 result = diag_ioctl_get_real_time(ioarg);
2118 break;
2119 case DIAG_IOCTL_PERIPHERAL_BUF_CONFIG:
2120 result = diag_ioctl_set_buffering_mode(ioarg);
2121 break;
2122 case DIAG_IOCTL_PERIPHERAL_BUF_DRAIN:
2123 result = diag_ioctl_peripheral_drain_immediate(ioarg);
2124 break;
2125 case DIAG_IOCTL_REGISTER_CALLBACK:
2126 result = diag_ioctl_register_callback(ioarg);
2127 break;
2128 case DIAG_IOCTL_HDLC_TOGGLE:
2129 result = diag_ioctl_hdlc_toggle(ioarg);
2130 break;
2131 }
2132 return result;
2133}
2134#endif
2135
2136long diagchar_ioctl(struct file *filp,
2137 unsigned int iocmd, unsigned long ioarg)
2138{
2139 int result = -EINVAL;
2140 int client_id = 0;
2141 uint16_t delayed_rsp_id;
2142 uint16_t remote_dev;
2143 struct diag_dci_client_tbl *dci_client = NULL;
2144 struct diag_logging_mode_param_t mode_param;
2145
2146 switch (iocmd) {
2147 case DIAG_IOCTL_COMMAND_REG:
2148 result = diag_ioctl_cmd_reg(ioarg);
2149 break;
2150 case DIAG_IOCTL_COMMAND_DEREG:
2151 result = diag_ioctl_cmd_dereg();
2152 break;
2153 case DIAG_IOCTL_GET_DELAYED_RSP_ID:
2154 delayed_rsp_id = diag_get_next_delayed_rsp_id();
2155 if (copy_to_user((void __user *)ioarg, &delayed_rsp_id,
2156 sizeof(uint16_t)))
2157 result = -EFAULT;
2158 else
2159 result = 0;
2160 break;
2161 case DIAG_IOCTL_DCI_REG:
2162 result = diag_ioctl_dci_reg(ioarg);
2163 break;
2164 case DIAG_IOCTL_DCI_DEINIT:
2165 mutex_lock(&driver->dci_mutex);
2166 if (copy_from_user((void *)&client_id, (void __user *)ioarg,
2167 sizeof(int))) {
2168 mutex_unlock(&driver->dci_mutex);
2169 return -EFAULT;
2170 }
2171 dci_client = diag_dci_get_client_entry(client_id);
2172 if (!dci_client) {
2173 mutex_unlock(&driver->dci_mutex);
2174 return DIAG_DCI_NOT_SUPPORTED;
2175 }
2176 result = diag_dci_deinit_client(dci_client);
2177 mutex_unlock(&driver->dci_mutex);
2178 break;
2179 case DIAG_IOCTL_DCI_SUPPORT:
2180 result = diag_ioctl_dci_support(ioarg);
2181 break;
2182 case DIAG_IOCTL_DCI_HEALTH_STATS:
2183 mutex_lock(&driver->dci_mutex);
2184 result = diag_ioctl_dci_health_stats(ioarg);
2185 mutex_unlock(&driver->dci_mutex);
2186 break;
2187 case DIAG_IOCTL_DCI_LOG_STATUS:
2188 mutex_lock(&driver->dci_mutex);
2189 result = diag_ioctl_dci_log_status(ioarg);
2190 mutex_unlock(&driver->dci_mutex);
2191 break;
2192 case DIAG_IOCTL_DCI_EVENT_STATUS:
2193 result = diag_ioctl_dci_event_status(ioarg);
2194 break;
2195 case DIAG_IOCTL_DCI_CLEAR_LOGS:
2196 mutex_lock(&driver->dci_mutex);
2197 if (copy_from_user((void *)&client_id, (void __user *)ioarg,
2198 sizeof(int))) {
2199 mutex_unlock(&driver->dci_mutex);
2200 return -EFAULT;
2201 }
2202 result = diag_dci_clear_log_mask(client_id);
2203 mutex_unlock(&driver->dci_mutex);
2204 break;
2205 case DIAG_IOCTL_DCI_CLEAR_EVENTS:
2206 mutex_lock(&driver->dci_mutex);
2207 if (copy_from_user(&client_id, (void __user *)ioarg,
2208 sizeof(int))) {
2209 mutex_unlock(&driver->dci_mutex);
2210 return -EFAULT;
2211 }
2212 result = diag_dci_clear_event_mask(client_id);
2213 mutex_unlock(&driver->dci_mutex);
2214 break;
2215 case DIAG_IOCTL_LSM_DEINIT:
2216 result = diag_ioctl_lsm_deinit();
2217 break;
2218 case DIAG_IOCTL_SWITCH_LOGGING:
2219 if (copy_from_user((void *)&mode_param, (void __user *)ioarg,
2220 sizeof(mode_param)))
2221 return -EFAULT;
2222 mutex_lock(&driver->diagchar_mutex);
2223 result = diag_switch_logging(&mode_param);
2224 mutex_unlock(&driver->diagchar_mutex);
2225 break;
2226 case DIAG_IOCTL_REMOTE_DEV:
2227 remote_dev = diag_get_remote_device_mask();
2228 if (copy_to_user((void __user *)ioarg, &remote_dev,
2229 sizeof(uint16_t)))
2230 result = -EFAULT;
2231 else
2232 result = 1;
2233 break;
2234 case DIAG_IOCTL_VOTE_REAL_TIME:
2235 mutex_lock(&driver->dci_mutex);
2236 result = diag_ioctl_vote_real_time(ioarg);
2237 mutex_unlock(&driver->dci_mutex);
2238 break;
2239 case DIAG_IOCTL_GET_REAL_TIME:
2240 result = diag_ioctl_get_real_time(ioarg);
2241 break;
2242 case DIAG_IOCTL_PERIPHERAL_BUF_CONFIG:
2243 result = diag_ioctl_set_buffering_mode(ioarg);
2244 break;
2245 case DIAG_IOCTL_PERIPHERAL_BUF_DRAIN:
2246 result = diag_ioctl_peripheral_drain_immediate(ioarg);
2247 break;
2248 case DIAG_IOCTL_REGISTER_CALLBACK:
2249 result = diag_ioctl_register_callback(ioarg);
2250 break;
2251 case DIAG_IOCTL_HDLC_TOGGLE:
2252 result = diag_ioctl_hdlc_toggle(ioarg);
2253 break;
2254 }
2255 return result;
2256}
2257
2258static int diag_process_apps_data_hdlc(unsigned char *buf, int len,
2259 int pkt_type)
2260{
2261 int err = 0;
2262 int ret = PKT_DROP;
2263 struct diag_apps_data_t *data = &hdlc_data;
2264 struct diag_send_desc_type send = { NULL, NULL, DIAG_STATE_START, 0 };
2265 struct diag_hdlc_dest_type enc = { NULL, NULL, 0 };
2266 /*
2267 * The maximum encoded size of the buffer can be atmost twice the length
2268 * of the packet. Add three bytes foe footer - 16 bit CRC (2 bytes) +
2269 * delimiter (1 byte).
2270 */
2271 const uint32_t max_encoded_size = ((2 * len) + 3);
2272
2273 if (!buf || len <= 0) {
2274 pr_err("diag: In %s, invalid buf: %pK len: %d\n",
2275 __func__, buf, len);
2276 return -EIO;
2277 }
2278
2279 if (max_encoded_size > DIAG_MAX_HDLC_BUF_SIZE) {
2280 pr_err_ratelimited("diag: In %s, encoded data is larger %d than the buffer size %d\n",
2281 __func__, max_encoded_size, DIAG_MAX_HDLC_BUF_SIZE);
2282 return -EBADMSG;
2283 }
2284
2285 send.state = DIAG_STATE_START;
2286 send.pkt = buf;
2287 send.last = (void *)(buf + len - 1);
2288 send.terminate = 1;
2289
2290 if (!data->buf)
2291 data->buf = diagmem_alloc(driver, DIAG_MAX_HDLC_BUF_SIZE +
2292 APF_DIAG_PADDING,
2293 POOL_TYPE_HDLC);
2294 if (!data->buf) {
2295 ret = PKT_DROP;
2296 goto fail_ret;
2297 }
2298
2299 if ((DIAG_MAX_HDLC_BUF_SIZE - data->len) <= max_encoded_size) {
2300 err = diag_mux_write(DIAG_LOCAL_PROC, data->buf, data->len,
2301 data->ctxt);
2302 if (err) {
2303 ret = -EIO;
2304 goto fail_free_buf;
2305 }
2306 data->buf = NULL;
2307 data->len = 0;
2308 data->buf = diagmem_alloc(driver, DIAG_MAX_HDLC_BUF_SIZE +
2309 APF_DIAG_PADDING,
2310 POOL_TYPE_HDLC);
2311 if (!data->buf) {
2312 ret = PKT_DROP;
2313 goto fail_ret;
2314 }
2315 }
2316
2317 enc.dest = data->buf + data->len;
2318 enc.dest_last = (void *)(data->buf + data->len + max_encoded_size);
2319 diag_hdlc_encode(&send, &enc);
2320
2321 /*
2322 * This is to check if after HDLC encoding, we are still within
2323 * the limits of aggregation buffer. If not, we write out the
2324 * current buffer and start aggregation in a newly allocated
2325 * buffer.
2326 */
2327 if ((uintptr_t)enc.dest >= (uintptr_t)(data->buf +
2328 DIAG_MAX_HDLC_BUF_SIZE)) {
2329 err = diag_mux_write(DIAG_LOCAL_PROC, data->buf, data->len,
2330 data->ctxt);
2331 if (err) {
2332 ret = -EIO;
2333 goto fail_free_buf;
2334 }
2335 data->buf = NULL;
2336 data->len = 0;
2337 data->buf = diagmem_alloc(driver, DIAG_MAX_HDLC_BUF_SIZE +
2338 APF_DIAG_PADDING,
2339 POOL_TYPE_HDLC);
2340 if (!data->buf) {
2341 ret = PKT_DROP;
2342 goto fail_ret;
2343 }
2344
2345 enc.dest = data->buf + data->len;
2346 enc.dest_last = (void *)(data->buf + data->len +
2347 max_encoded_size);
2348 diag_hdlc_encode(&send, &enc);
2349 }
2350
2351 data->len = (((uintptr_t)enc.dest - (uintptr_t)data->buf) <
2352 DIAG_MAX_HDLC_BUF_SIZE) ?
2353 ((uintptr_t)enc.dest - (uintptr_t)data->buf) :
2354 DIAG_MAX_HDLC_BUF_SIZE;
2355
2356 if (pkt_type == DATA_TYPE_RESPONSE) {
2357 err = diag_mux_write(DIAG_LOCAL_PROC, data->buf, data->len,
2358 data->ctxt);
2359 if (err) {
2360 ret = -EIO;
2361 goto fail_free_buf;
2362 }
2363 data->buf = NULL;
2364 data->len = 0;
2365 }
2366
2367 return PKT_ALLOC;
2368
2369fail_free_buf:
2370 diagmem_free(driver, data->buf, POOL_TYPE_HDLC);
2371 data->buf = NULL;
2372 data->len = 0;
2373
2374fail_ret:
2375 return ret;
2376}
2377
2378static int diag_process_apps_data_non_hdlc(unsigned char *buf, int len,
2379 int pkt_type)
2380{
2381 int err = 0;
2382 int ret = PKT_DROP;
2383 struct diag_pkt_frame_t header;
2384 struct diag_apps_data_t *data = &non_hdlc_data;
2385 /*
2386 * The maximum packet size, when the data is non hdlc encoded is equal
2387 * to the size of the packet frame header and the length. Add 1 for the
2388 * delimiter 0x7E at the end.
2389 */
2390 const uint32_t max_pkt_size = sizeof(header) + len + 1;
2391
2392 if (!buf || len <= 0) {
2393 pr_err("diag: In %s, invalid buf: %pK len: %d\n",
2394 __func__, buf, len);
2395 return -EIO;
2396 }
2397
2398 if (!data->buf) {
2399 data->buf = diagmem_alloc(driver, DIAG_MAX_HDLC_BUF_SIZE +
2400 APF_DIAG_PADDING,
2401 POOL_TYPE_HDLC);
2402 if (!data->buf) {
2403 ret = PKT_DROP;
2404 goto fail_ret;
2405 }
2406 }
2407
2408 if ((DIAG_MAX_HDLC_BUF_SIZE - data->len) <= max_pkt_size) {
2409 err = diag_mux_write(DIAG_LOCAL_PROC, data->buf, data->len,
2410 data->ctxt);
2411 if (err) {
2412 ret = -EIO;
2413 goto fail_free_buf;
2414 }
2415 data->buf = NULL;
2416 data->len = 0;
2417 data->buf = diagmem_alloc(driver, DIAG_MAX_HDLC_BUF_SIZE +
2418 APF_DIAG_PADDING,
2419 POOL_TYPE_HDLC);
2420 if (!data->buf) {
2421 ret = PKT_DROP;
2422 goto fail_ret;
2423 }
2424 }
2425
2426 header.start = CONTROL_CHAR;
2427 header.version = 1;
2428 header.length = len;
2429 memcpy(data->buf + data->len, &header, sizeof(header));
2430 data->len += sizeof(header);
2431 memcpy(data->buf + data->len, buf, len);
2432 data->len += len;
2433 *(uint8_t *)(data->buf + data->len) = CONTROL_CHAR;
2434 data->len += sizeof(uint8_t);
2435 if (pkt_type == DATA_TYPE_RESPONSE) {
2436 err = diag_mux_write(DIAG_LOCAL_PROC, data->buf, data->len,
2437 data->ctxt);
2438 if (err) {
2439 ret = -EIO;
2440 goto fail_free_buf;
2441 }
2442 data->buf = NULL;
2443 data->len = 0;
2444 }
2445
2446 return PKT_ALLOC;
2447
2448fail_free_buf:
2449 diagmem_free(driver, data->buf, POOL_TYPE_HDLC);
2450 data->buf = NULL;
2451 data->len = 0;
2452
2453fail_ret:
2454 return ret;
2455}
2456
2457static int diag_user_process_dci_data(const char __user *buf, int len)
2458{
2459 int err = 0;
2460 const int mempool = POOL_TYPE_USER;
2461 unsigned char *user_space_data = NULL;
2462
2463 if (!buf || len <= 0 || len > diag_mempools[mempool].itemsize) {
2464 pr_err_ratelimited("diag: In %s, invalid buf %pK len: %d\n",
2465 __func__, buf, len);
2466 return -EBADMSG;
2467 }
2468
2469 user_space_data = diagmem_alloc(driver, len, mempool);
2470 if (!user_space_data)
2471 return -ENOMEM;
2472
2473 err = copy_from_user(user_space_data, buf, len);
2474 if (err) {
2475 pr_err_ratelimited("diag: In %s, unable to copy data from userspace, err: %d\n",
2476 __func__, err);
2477 err = DIAG_DCI_SEND_DATA_FAIL;
2478 goto fail;
2479 }
2480
2481 err = diag_process_dci_transaction(user_space_data, len);
2482fail:
2483 diagmem_free(driver, user_space_data, mempool);
2484 user_space_data = NULL;
2485 return err;
2486}
2487
2488static int diag_user_process_dci_apps_data(const char __user *buf, int len,
2489 int pkt_type)
2490{
2491 int err = 0;
2492 const int mempool = POOL_TYPE_COPY;
2493 unsigned char *user_space_data = NULL;
2494
2495 if (!buf || len <= 0 || len > diag_mempools[mempool].itemsize) {
2496 pr_err_ratelimited("diag: In %s, invalid buf %pK len: %d\n",
2497 __func__, buf, len);
2498 return -EBADMSG;
2499 }
2500
2501 pkt_type &= (DCI_PKT_TYPE | DATA_TYPE_DCI_LOG | DATA_TYPE_DCI_EVENT);
2502 if (!pkt_type) {
2503 pr_err_ratelimited("diag: In %s, invalid pkt_type: %d\n",
2504 __func__, pkt_type);
2505 return -EBADMSG;
2506 }
2507
2508 user_space_data = diagmem_alloc(driver, len, mempool);
2509 if (!user_space_data)
2510 return -ENOMEM;
2511
2512 err = copy_from_user(user_space_data, buf, len);
2513 if (err) {
2514 pr_alert("diag: In %s, unable to copy data from userspace, err: %d\n",
2515 __func__, err);
2516 goto fail;
2517 }
2518
2519 diag_process_apps_dci_read_data(pkt_type, user_space_data, len);
2520fail:
2521 diagmem_free(driver, user_space_data, mempool);
2522 user_space_data = NULL;
2523 return err;
2524}
2525
2526static int diag_user_process_raw_data(const char __user *buf, int len)
2527{
2528 int err = 0;
2529 int ret = 0;
2530 int token_offset = 0;
2531 int remote_proc = 0;
2532 const int mempool = POOL_TYPE_COPY;
2533 unsigned char *user_space_data = NULL;
2534 struct diag_md_session_t *info = NULL;
2535
2536 if (!buf || len <= 0 || len > CALLBACK_BUF_SIZE) {
2537 pr_err_ratelimited("diag: In %s, invalid buf %pK len: %d\n",
2538 __func__, buf, len);
2539 return -EBADMSG;
2540 }
2541
2542 user_space_data = diagmem_alloc(driver, len, mempool);
2543 if (!user_space_data)
2544 return -ENOMEM;
2545
2546 err = copy_from_user(user_space_data, buf, len);
2547 if (err) {
2548 pr_err("diag: copy failed for user space data\n");
2549 goto fail;
2550 }
2551
2552 /* Check for proc_type */
2553 remote_proc = diag_get_remote(*(int *)user_space_data);
2554 if (remote_proc) {
2555 token_offset = sizeof(int);
2556 if (len <= MIN_SIZ_ALLOW) {
2557 pr_err("diag: In %s, possible integer underflow, payload size: %d\n",
2558 __func__, len);
2559 diagmem_free(driver, user_space_data, mempool);
2560 user_space_data = NULL;
2561 return -EBADMSG;
2562 }
2563 len -= sizeof(int);
2564 }
2565 if (driver->mask_check) {
2566 if (!mask_request_validate(user_space_data +
2567 token_offset)) {
2568 pr_alert("diag: mask request Invalid\n");
2569 diagmem_free(driver, user_space_data, mempool);
2570 user_space_data = NULL;
2571 return -EFAULT;
2572 }
2573 }
2574 if (remote_proc) {
2575 ret = diag_send_raw_data_remote(remote_proc,
2576 (void *)(user_space_data + token_offset),
2577 len, USER_SPACE_RAW_DATA);
2578 if (ret) {
2579 pr_err("diag: Error sending data to remote proc %d, err: %d\n",
2580 remote_proc, ret);
2581 }
2582 } else {
2583 wait_event_interruptible(driver->wait_q,
2584 (driver->in_busy_pktdata == 0));
2585 info = diag_md_session_get_pid(current->tgid);
2586 ret = diag_process_apps_pkt(user_space_data, len, info);
2587 if (ret == 1)
2588 diag_send_error_rsp((void *)(user_space_data), len);
2589 }
2590fail:
2591 diagmem_free(driver, user_space_data, mempool);
2592 user_space_data = NULL;
2593 return ret;
2594}
2595
2596static int diag_user_process_userspace_data(const char __user *buf, int len)
2597{
2598 int err = 0;
2599 int max_retries = 3;
2600 int retry_count = 0;
2601 int remote_proc = 0;
2602 int token_offset = 0;
2603 struct diag_md_session_t *session_info = NULL;
2604 uint8_t hdlc_disabled;
2605
2606 if (!buf || len <= 0 || len > USER_SPACE_DATA) {
2607 pr_err_ratelimited("diag: In %s, invalid buf %pK len: %d\n",
2608 __func__, buf, len);
2609 return -EBADMSG;
2610 }
2611
2612 do {
2613 if (!driver->user_space_data_busy)
2614 break;
2615 retry_count++;
2616 usleep_range(10000, 10100);
2617 } while (retry_count < max_retries);
2618
2619 if (driver->user_space_data_busy)
2620 return -EAGAIN;
2621
2622 err = copy_from_user(driver->user_space_data_buf, buf, len);
2623 if (err) {
2624 pr_err("diag: In %s, failed to copy data from userspace, err: %d\n",
2625 __func__, err);
2626 return -EIO;
2627 }
2628
2629 /* Check for proc_type */
2630 remote_proc = diag_get_remote(*(int *)driver->user_space_data_buf);
2631 if (remote_proc) {
2632 if (len <= MIN_SIZ_ALLOW) {
2633 pr_err("diag: Integer underflow in %s, payload size: %d",
2634 __func__, len);
2635 return -EBADMSG;
2636 }
2637 token_offset = sizeof(int);
2638 len -= sizeof(int);
2639 }
2640
2641 /* Check masks for On-Device logging */
2642 if (driver->mask_check) {
2643 if (!mask_request_validate(driver->user_space_data_buf +
2644 token_offset)) {
2645 pr_alert("diag: mask request Invalid\n");
2646 return -EFAULT;
2647 }
2648 }
2649
2650 /* send masks to local processor now */
2651 if (!remote_proc) {
2652 session_info = diag_md_session_get_pid(current->tgid);
2653 if (!session_info) {
2654 pr_err("diag:In %s request came from invalid md session pid:%d",
2655 __func__, current->tgid);
2656 return -EINVAL;
2657 }
2658 if (session_info)
2659 hdlc_disabled = session_info->hdlc_disabled;
2660 else
2661 hdlc_disabled = driver->hdlc_disabled;
2662 if (!hdlc_disabled)
2663 diag_process_hdlc_pkt((void *)
2664 (driver->user_space_data_buf),
2665 len, session_info);
2666 else
2667 diag_process_non_hdlc_pkt((char *)
2668 (driver->user_space_data_buf),
2669 len, session_info);
2670 return 0;
2671 }
2672
2673 err = diag_process_userspace_remote(remote_proc,
2674 driver->user_space_data_buf +
2675 token_offset, len);
2676 if (err) {
2677 driver->user_space_data_busy = 0;
2678 pr_err("diag: Error sending mask to remote proc %d, err: %d\n",
2679 remote_proc, err);
2680 }
2681
2682 return err;
2683}
2684
2685static int diag_user_process_apps_data(const char __user *buf, int len,
2686 int pkt_type)
2687{
2688 int ret = 0;
2689 int stm_size = 0;
2690 const int mempool = POOL_TYPE_COPY;
2691 unsigned char *user_space_data = NULL;
2692 struct diag_md_session_t *session_info = NULL;
2693 uint8_t hdlc_disabled;
2694
2695 if (!buf || len <= 0 || len > DIAG_MAX_RSP_SIZE) {
2696 pr_err_ratelimited("diag: In %s, invalid buf %pK len: %d\n",
2697 __func__, buf, len);
2698 return -EBADMSG;
2699 }
2700
2701 switch (pkt_type) {
2702 case DATA_TYPE_EVENT:
2703 case DATA_TYPE_F3:
2704 case DATA_TYPE_LOG:
2705 case DATA_TYPE_RESPONSE:
2706 case DATA_TYPE_DELAYED_RESPONSE:
2707 break;
2708 default:
2709 pr_err_ratelimited("diag: In %s, invalid pkt_type: %d\n",
2710 __func__, pkt_type);
2711 return -EBADMSG;
2712 }
2713
2714 user_space_data = diagmem_alloc(driver, len, mempool);
2715 if (!user_space_data) {
2716 diag_record_stats(pkt_type, PKT_DROP);
2717 return -ENOMEM;
2718 }
2719
2720 ret = copy_from_user(user_space_data, buf, len);
2721 if (ret) {
2722 pr_alert("diag: In %s, unable to copy data from userspace, err: %d\n",
2723 __func__, ret);
2724 diagmem_free(driver, user_space_data, mempool);
2725 user_space_data = NULL;
2726 diag_record_stats(pkt_type, PKT_DROP);
2727 return -EBADMSG;
2728 }
2729
2730 if (driver->stm_state[APPS_DATA] &&
2731 (pkt_type >= DATA_TYPE_EVENT) && (pkt_type <= DATA_TYPE_LOG)) {
2732 stm_size = stm_log_inv_ts(OST_ENTITY_DIAG, 0, user_space_data,
2733 len);
2734 if (stm_size == 0) {
2735 pr_debug("diag: In %s, stm_log_inv_ts returned size of 0\n",
2736 __func__);
2737 }
2738 diagmem_free(driver, user_space_data, mempool);
2739 user_space_data = NULL;
2740
2741 return 0;
2742 }
2743
2744 mutex_lock(&apps_data_mutex);
2745 mutex_lock(&driver->hdlc_disable_mutex);
2746 session_info = diag_md_session_get_peripheral(APPS_DATA);
2747 if (session_info)
2748 hdlc_disabled = session_info->hdlc_disabled;
2749 else
2750 hdlc_disabled = driver->hdlc_disabled;
2751 if (hdlc_disabled)
2752 ret = diag_process_apps_data_non_hdlc(user_space_data, len,
2753 pkt_type);
2754 else
2755 ret = diag_process_apps_data_hdlc(user_space_data, len,
2756 pkt_type);
2757 mutex_unlock(&driver->hdlc_disable_mutex);
2758 mutex_unlock(&apps_data_mutex);
2759
2760 diagmem_free(driver, user_space_data, mempool);
2761 user_space_data = NULL;
2762
2763 check_drain_timer();
2764
2765 if (ret == PKT_DROP)
2766 diag_record_stats(pkt_type, PKT_DROP);
2767 else if (ret == PKT_ALLOC)
2768 diag_record_stats(pkt_type, PKT_ALLOC);
2769 else
2770 return ret;
2771
2772 return 0;
2773}
2774
2775static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
2776 loff_t *ppos)
2777{
2778 struct diag_dci_client_tbl *entry;
2779 struct list_head *start, *temp;
2780 int index = -1, i = 0, ret = 0;
2781 int data_type;
2782 int copy_dci_data = 0;
2783 int exit_stat = 0;
2784 int write_len = 0;
2785 struct diag_md_session_t *session_info = NULL;
2786
2787 for (i = 0; i < driver->num_clients; i++)
2788 if (driver->client_map[i].pid == current->tgid)
2789 index = i;
2790
2791 if (index == -1) {
2792 pr_err("diag: Client PID not found in table");
2793 return -EINVAL;
2794 }
2795 if (!buf) {
2796 pr_err("diag: bad address from user side\n");
2797 return -EFAULT;
2798 }
2799 wait_event_interruptible(driver->wait_q, driver->data_ready[index]);
2800
2801 mutex_lock(&driver->diagchar_mutex);
2802
2803 if ((driver->data_ready[index] & USER_SPACE_DATA_TYPE) &&
2804 (driver->logging_mode == DIAG_MEMORY_DEVICE_MODE ||
2805 driver->logging_mode == DIAG_MULTI_MODE)) {
2806 pr_debug("diag: process woken up\n");
2807 /*Copy the type of data being passed*/
2808 data_type = driver->data_ready[index] & USER_SPACE_DATA_TYPE;
2809 driver->data_ready[index] ^= USER_SPACE_DATA_TYPE;
2810 COPY_USER_SPACE_OR_ERR(buf, data_type, sizeof(int));
2811 if (ret == -EFAULT)
2812 goto exit;
2813 /* place holder for number of data field */
2814 ret += sizeof(int);
2815 session_info = diag_md_session_get_pid(current->tgid);
2816 exit_stat = diag_md_copy_to_user(buf, &ret, count,
2817 session_info);
2818 goto exit;
2819 } else if (driver->data_ready[index] & USER_SPACE_DATA_TYPE) {
2820 /* In case, the thread wakes up and the logging mode is not
2821 * memory device any more, the condition needs to be cleared.
2822 */
2823 driver->data_ready[index] ^= USER_SPACE_DATA_TYPE;
2824 }
2825
2826 if (driver->data_ready[index] & HDLC_SUPPORT_TYPE) {
2827 data_type = driver->data_ready[index] & HDLC_SUPPORT_TYPE;
2828 driver->data_ready[index] ^= HDLC_SUPPORT_TYPE;
2829 COPY_USER_SPACE_OR_ERR(buf, data_type, sizeof(int));
2830 if (ret == -EFAULT)
2831 goto exit;
2832
2833 session_info = diag_md_session_get_pid(current->tgid);
2834 if (session_info) {
2835 COPY_USER_SPACE_OR_ERR(buf+4,
2836 session_info->hdlc_disabled,
2837 sizeof(uint8_t));
2838 if (ret == -EFAULT)
2839 goto exit;
2840 }
2841 goto exit;
2842 }
2843
2844 if (driver->data_ready[index] & DEINIT_TYPE) {
2845 /*Copy the type of data being passed*/
2846 data_type = driver->data_ready[index] & DEINIT_TYPE;
2847 COPY_USER_SPACE_OR_ERR(buf, data_type, 4);
2848 if (ret == -EFAULT)
2849 goto exit;
2850 driver->data_ready[index] ^= DEINIT_TYPE;
2851 mutex_unlock(&driver->diagchar_mutex);
2852 diag_remove_client_entry(file);
2853 return ret;
2854 }
2855
2856 if (driver->data_ready[index] & MSG_MASKS_TYPE) {
2857 /*Copy the type of data being passed*/
2858 data_type = driver->data_ready[index] & MSG_MASKS_TYPE;
2859 session_info = diag_md_session_get_peripheral(APPS_DATA);
2860 COPY_USER_SPACE_OR_ERR(buf, data_type, sizeof(int));
2861 if (ret == -EFAULT)
2862 goto exit;
2863 write_len = diag_copy_to_user_msg_mask(buf + ret, count,
2864 session_info);
2865 if (write_len > 0)
2866 ret += write_len;
2867 driver->data_ready[index] ^= MSG_MASKS_TYPE;
2868 goto exit;
2869 }
2870
2871 if (driver->data_ready[index] & EVENT_MASKS_TYPE) {
2872 /*Copy the type of data being passed*/
2873 data_type = driver->data_ready[index] & EVENT_MASKS_TYPE;
2874 session_info = diag_md_session_get_peripheral(APPS_DATA);
2875 COPY_USER_SPACE_OR_ERR(buf, data_type, 4);
2876 if (ret == -EFAULT)
2877 goto exit;
2878
2879 if (session_info && session_info->event_mask &&
2880 session_info->event_mask->ptr) {
2881 COPY_USER_SPACE_OR_ERR(buf + sizeof(int),
2882 *(session_info->event_mask->ptr),
2883 session_info->event_mask->mask_len);
2884 if (ret == -EFAULT)
2885 goto exit;
2886 } else {
2887 COPY_USER_SPACE_OR_ERR(buf + sizeof(int),
2888 *(event_mask.ptr),
2889 event_mask.mask_len);
2890 if (ret == -EFAULT)
2891 goto exit;
2892 }
2893 driver->data_ready[index] ^= EVENT_MASKS_TYPE;
2894 goto exit;
2895 }
2896
2897 if (driver->data_ready[index] & LOG_MASKS_TYPE) {
2898 /*Copy the type of data being passed*/
2899 data_type = driver->data_ready[index] & LOG_MASKS_TYPE;
2900 session_info = diag_md_session_get_peripheral(APPS_DATA);
2901 COPY_USER_SPACE_OR_ERR(buf, data_type, sizeof(int));
2902 if (ret == -EFAULT)
2903 goto exit;
2904
2905 write_len = diag_copy_to_user_log_mask(buf + ret, count,
2906 session_info);
2907 if (write_len > 0)
2908 ret += write_len;
2909 driver->data_ready[index] ^= LOG_MASKS_TYPE;
2910 goto exit;
2911 }
2912
2913 if (driver->data_ready[index] & PKT_TYPE) {
2914 /*Copy the type of data being passed*/
2915 data_type = driver->data_ready[index] & PKT_TYPE;
2916 COPY_USER_SPACE_OR_ERR(buf, data_type, sizeof(data_type));
2917 if (ret == -EFAULT)
2918 goto exit;
2919
2920 COPY_USER_SPACE_OR_ERR(buf + sizeof(data_type),
2921 *(driver->apps_req_buf),
2922 driver->apps_req_buf_len);
2923 if (ret == -EFAULT)
2924 goto exit;
2925 driver->data_ready[index] ^= PKT_TYPE;
2926 driver->in_busy_pktdata = 0;
2927 goto exit;
2928 }
2929
2930 if (driver->data_ready[index] & DCI_PKT_TYPE) {
2931 /* Copy the type of data being passed */
2932 data_type = driver->data_ready[index] & DCI_PKT_TYPE;
2933 COPY_USER_SPACE_OR_ERR(buf, data_type, 4);
2934 if (ret == -EFAULT)
2935 goto exit;
2936
2937 COPY_USER_SPACE_OR_ERR(buf+4, *(driver->dci_pkt_buf),
2938 driver->dci_pkt_length);
2939 if (ret == -EFAULT)
2940 goto exit;
2941
2942 driver->data_ready[index] ^= DCI_PKT_TYPE;
2943 driver->in_busy_dcipktdata = 0;
2944 goto exit;
2945 }
2946
2947 if (driver->data_ready[index] & DCI_EVENT_MASKS_TYPE) {
2948 /*Copy the type of data being passed*/
2949 data_type = driver->data_ready[index] & DCI_EVENT_MASKS_TYPE;
2950 COPY_USER_SPACE_OR_ERR(buf, data_type, 4);
2951 if (ret == -EFAULT)
2952 goto exit;
2953
2954 COPY_USER_SPACE_OR_ERR(buf+4, driver->num_dci_client, 4);
2955 if (ret == -EFAULT)
2956 goto exit;
2957
2958 COPY_USER_SPACE_OR_ERR(buf + 8, (dci_ops_tbl[DCI_LOCAL_PROC].
2959 event_mask_composite), DCI_EVENT_MASK_SIZE);
2960 if (ret == -EFAULT)
2961 goto exit;
2962
2963 driver->data_ready[index] ^= DCI_EVENT_MASKS_TYPE;
2964 goto exit;
2965 }
2966
2967 if (driver->data_ready[index] & DCI_LOG_MASKS_TYPE) {
2968 /*Copy the type of data being passed*/
2969 data_type = driver->data_ready[index] & DCI_LOG_MASKS_TYPE;
2970 COPY_USER_SPACE_OR_ERR(buf, data_type, 4);
2971 if (ret == -EFAULT)
2972 goto exit;
2973
2974 COPY_USER_SPACE_OR_ERR(buf+4, driver->num_dci_client, 4);
2975 if (ret == -EFAULT)
2976 goto exit;
2977
2978 COPY_USER_SPACE_OR_ERR(buf+8, (dci_ops_tbl[DCI_LOCAL_PROC].
2979 log_mask_composite), DCI_LOG_MASK_SIZE);
2980 if (ret == -EFAULT)
2981 goto exit;
2982 driver->data_ready[index] ^= DCI_LOG_MASKS_TYPE;
2983 goto exit;
2984 }
2985
2986exit:
2987 mutex_unlock(&driver->diagchar_mutex);
2988 if (driver->data_ready[index] & DCI_DATA_TYPE) {
2989 mutex_lock(&driver->dci_mutex);
2990 /* Copy the type of data being passed */
2991 data_type = driver->data_ready[index] & DCI_DATA_TYPE;
2992 list_for_each_safe(start, temp, &driver->dci_client_list) {
2993 entry = list_entry(start, struct diag_dci_client_tbl,
2994 track);
2995 if (entry->client->tgid != current->tgid)
2996 continue;
2997 if (!entry->in_service)
2998 continue;
2999 if (copy_to_user(buf + ret, &data_type, sizeof(int))) {
3000 mutex_unlock(&driver->dci_mutex);
3001 goto end;
3002 }
3003 ret += sizeof(int);
3004 if (copy_to_user(buf + ret, &entry->client_info.token,
3005 sizeof(int))) {
3006 mutex_unlock(&driver->dci_mutex);
3007 goto end;
3008 }
3009 ret += sizeof(int);
3010 copy_dci_data = 1;
3011 exit_stat = diag_copy_dci(buf, count, entry, &ret);
3012 mutex_lock(&driver->diagchar_mutex);
3013 driver->data_ready[index] ^= DCI_DATA_TYPE;
3014 mutex_unlock(&driver->diagchar_mutex);
3015 if (exit_stat == 1) {
3016 mutex_unlock(&driver->dci_mutex);
3017 goto end;
3018 }
3019 }
3020 mutex_unlock(&driver->dci_mutex);
3021 goto end;
3022 }
3023end:
3024 /*
3025 * Flush any read that is currently pending on DCI data and
3026 * command channnels. This will ensure that the next read is not
3027 * missed.
3028 */
3029 if (copy_dci_data) {
3030 diag_ws_on_copy_complete(DIAG_WS_DCI);
3031 flush_workqueue(driver->diag_dci_wq);
3032 }
3033 return ret;
3034}
3035
3036static ssize_t diagchar_write(struct file *file, const char __user *buf,
3037 size_t count, loff_t *ppos)
3038{
3039 int err = 0;
3040 int pkt_type = 0;
3041 int payload_len = 0;
3042 const char __user *payload_buf = NULL;
3043
3044 /*
3045 * The data coming from the user sapce should at least have the
3046 * packet type heeader.
3047 */
3048 if (count < sizeof(int)) {
3049 pr_err("diag: In %s, client is sending short data, len: %d\n",
3050 __func__, (int)count);
3051 return -EBADMSG;
3052 }
3053
3054 err = copy_from_user((&pkt_type), buf, sizeof(int));
3055 if (err) {
3056 pr_err_ratelimited("diag: In %s, unable to copy pkt_type from userspace, err: %d\n",
3057 __func__, err);
3058 return -EIO;
3059 }
3060
3061 if (driver->logging_mode == DIAG_USB_MODE && !driver->usb_connected) {
3062 if (!((pkt_type == DCI_DATA_TYPE) ||
3063 (pkt_type == DCI_PKT_TYPE) ||
3064 (pkt_type & DATA_TYPE_DCI_LOG) ||
3065 (pkt_type & DATA_TYPE_DCI_EVENT))) {
3066 pr_debug("diag: In %s, Dropping non DCI packet type\n",
3067 __func__);
3068 return -EIO;
3069 }
3070 }
3071
3072 payload_buf = buf + sizeof(int);
3073 payload_len = count - sizeof(int);
3074
3075 if (pkt_type == DCI_PKT_TYPE)
3076 return diag_user_process_dci_apps_data(payload_buf,
3077 payload_len,
3078 pkt_type);
3079 else if (pkt_type == DCI_DATA_TYPE)
3080 return diag_user_process_dci_data(payload_buf, payload_len);
3081 else if (pkt_type == USER_SPACE_RAW_DATA_TYPE)
3082 return diag_user_process_raw_data(payload_buf,
3083 payload_len);
3084 else if (pkt_type == USER_SPACE_DATA_TYPE)
3085 return diag_user_process_userspace_data(payload_buf,
3086 payload_len);
3087 if (pkt_type & (DATA_TYPE_DCI_LOG | DATA_TYPE_DCI_EVENT)) {
3088 err = diag_user_process_dci_apps_data(payload_buf, payload_len,
3089 pkt_type);
3090 if (pkt_type & DATA_TYPE_DCI_LOG)
3091 pkt_type ^= DATA_TYPE_DCI_LOG;
3092 if (pkt_type & DATA_TYPE_DCI_EVENT)
3093 pkt_type ^= DATA_TYPE_DCI_EVENT;
3094 /*
3095 * Check if the log or event is selected even on the regular
3096 * stream. If USB is not connected and we are not in memory
3097 * device mode, we should not process these logs/events.
3098 */
3099 if (pkt_type && driver->logging_mode == DIAG_USB_MODE &&
3100 !driver->usb_connected)
3101 return err;
3102 }
3103
3104 switch (pkt_type) {
3105 case DATA_TYPE_EVENT:
3106 case DATA_TYPE_F3:
3107 case DATA_TYPE_LOG:
3108 case DATA_TYPE_DELAYED_RESPONSE:
3109 case DATA_TYPE_RESPONSE:
3110 return diag_user_process_apps_data(payload_buf, payload_len,
3111 pkt_type);
3112 default:
3113 pr_err_ratelimited("diag: In %s, invalid pkt_type: %d\n",
3114 __func__, pkt_type);
3115 return -EINVAL;
3116 }
3117
3118 return err;
3119}
3120
3121void diag_ws_init(void)
3122{
3123 driver->dci_ws.ref_count = 0;
3124 driver->dci_ws.copy_count = 0;
3125 spin_lock_init(&driver->dci_ws.lock);
3126
3127 driver->md_ws.ref_count = 0;
3128 driver->md_ws.copy_count = 0;
3129 spin_lock_init(&driver->md_ws.lock);
3130}
3131
3132static void diag_stats_init(void)
3133{
3134 if (!driver)
3135 return;
3136
3137 driver->msg_stats.alloc_count = 0;
3138 driver->msg_stats.drop_count = 0;
3139
3140 driver->log_stats.alloc_count = 0;
3141 driver->log_stats.drop_count = 0;
3142
3143 driver->event_stats.alloc_count = 0;
3144 driver->event_stats.drop_count = 0;
3145}
3146
3147void diag_ws_on_notify(void)
3148{
3149 /*
3150 * Do not deal with reference count here as there can be spurious
3151 * interrupts.
3152 */
3153 pm_stay_awake(driver->diag_dev);
3154}
3155
3156void diag_ws_on_read(int type, int pkt_len)
3157{
3158 unsigned long flags;
3159 struct diag_ws_ref_t *ws_ref = NULL;
3160
3161 switch (type) {
3162 case DIAG_WS_DCI:
3163 ws_ref = &driver->dci_ws;
3164 break;
3165 case DIAG_WS_MUX:
3166 ws_ref = &driver->md_ws;
3167 break;
3168 default:
3169 pr_err_ratelimited("diag: In %s, invalid type: %d\n",
3170 __func__, type);
3171 return;
3172 }
3173
3174 spin_lock_irqsave(&ws_ref->lock, flags);
3175 if (pkt_len > 0) {
3176 ws_ref->ref_count++;
3177 } else {
3178 if (ws_ref->ref_count < 1) {
3179 ws_ref->ref_count = 0;
3180 ws_ref->copy_count = 0;
3181 }
3182 diag_ws_release();
3183 }
3184 spin_unlock_irqrestore(&ws_ref->lock, flags);
3185}
3186
3187
3188void diag_ws_on_copy(int type)
3189{
3190 unsigned long flags;
3191 struct diag_ws_ref_t *ws_ref = NULL;
3192
3193 switch (type) {
3194 case DIAG_WS_DCI:
3195 ws_ref = &driver->dci_ws;
3196 break;
3197 case DIAG_WS_MUX:
3198 ws_ref = &driver->md_ws;
3199 break;
3200 default:
3201 pr_err_ratelimited("diag: In %s, invalid type: %d\n",
3202 __func__, type);
3203 return;
3204 }
3205
3206 spin_lock_irqsave(&ws_ref->lock, flags);
3207 ws_ref->copy_count++;
3208 spin_unlock_irqrestore(&ws_ref->lock, flags);
3209}
3210
3211void diag_ws_on_copy_fail(int type)
3212{
3213 unsigned long flags;
3214 struct diag_ws_ref_t *ws_ref = NULL;
3215
3216 switch (type) {
3217 case DIAG_WS_DCI:
3218 ws_ref = &driver->dci_ws;
3219 break;
3220 case DIAG_WS_MUX:
3221 ws_ref = &driver->md_ws;
3222 break;
3223 default:
3224 pr_err_ratelimited("diag: In %s, invalid type: %d\n",
3225 __func__, type);
3226 return;
3227 }
3228
3229 spin_lock_irqsave(&ws_ref->lock, flags);
3230 ws_ref->ref_count--;
3231 spin_unlock_irqrestore(&ws_ref->lock, flags);
3232
3233 diag_ws_release();
3234}
3235
3236void diag_ws_on_copy_complete(int type)
3237{
3238 unsigned long flags;
3239 struct diag_ws_ref_t *ws_ref = NULL;
3240
3241 switch (type) {
3242 case DIAG_WS_DCI:
3243 ws_ref = &driver->dci_ws;
3244 break;
3245 case DIAG_WS_MUX:
3246 ws_ref = &driver->md_ws;
3247 break;
3248 default:
3249 pr_err_ratelimited("diag: In %s, invalid type: %d\n",
3250 __func__, type);
3251 return;
3252 }
3253
3254 spin_lock_irqsave(&ws_ref->lock, flags);
3255 ws_ref->ref_count -= ws_ref->copy_count;
3256 if (ws_ref->ref_count < 1)
3257 ws_ref->ref_count = 0;
3258 ws_ref->copy_count = 0;
3259 spin_unlock_irqrestore(&ws_ref->lock, flags);
3260
3261 diag_ws_release();
3262}
3263
3264void diag_ws_reset(int type)
3265{
3266 unsigned long flags;
3267 struct diag_ws_ref_t *ws_ref = NULL;
3268
3269 switch (type) {
3270 case DIAG_WS_DCI:
3271 ws_ref = &driver->dci_ws;
3272 break;
3273 case DIAG_WS_MUX:
3274 ws_ref = &driver->md_ws;
3275 break;
3276 default:
3277 pr_err_ratelimited("diag: In %s, invalid type: %d\n",
3278 __func__, type);
3279 return;
3280 }
3281
3282 spin_lock_irqsave(&ws_ref->lock, flags);
3283 ws_ref->ref_count = 0;
3284 ws_ref->copy_count = 0;
3285 spin_unlock_irqrestore(&ws_ref->lock, flags);
3286
3287 diag_ws_release();
3288}
3289
3290void diag_ws_release(void)
3291{
3292 if (driver->dci_ws.ref_count == 0 && driver->md_ws.ref_count == 0)
3293 pm_relax(driver->diag_dev);
3294}
3295
3296#ifdef DIAG_DEBUG
3297static void diag_debug_init(void)
3298{
3299 diag_ipc_log = ipc_log_context_create(DIAG_IPC_LOG_PAGES, "diag", 0);
3300 if (!diag_ipc_log)
3301 pr_err("diag: Failed to create IPC logging context\n");
3302 /*
3303 * Set the bit mask here as per diag_ipc_logging.h to enable debug logs
3304 * to be logged to IPC
3305 */
3306 diag_debug_mask = DIAG_DEBUG_PERIPHERALS | DIAG_DEBUG_DCI |
3307 DIAG_DEBUG_BRIDGE;
3308}
3309#else
3310static void diag_debug_init(void)
3311{
3312
3313}
3314#endif
3315
3316static int diag_real_time_info_init(void)
3317{
3318 int i;
3319
3320 if (!driver)
3321 return -EIO;
3322 for (i = 0; i < DIAG_NUM_PROC; i++) {
3323 driver->real_time_mode[i] = 1;
3324 driver->proc_rt_vote_mask[i] |= DIAG_PROC_DCI;
3325 driver->proc_rt_vote_mask[i] |= DIAG_PROC_MEMORY_DEVICE;
3326 }
3327 driver->real_time_update_busy = 0;
3328 driver->proc_active_mask = 0;
3329 driver->diag_real_time_wq = create_singlethread_workqueue(
3330 "diag_real_time_wq");
3331 if (!driver->diag_real_time_wq)
3332 return -ENOMEM;
3333 INIT_WORK(&(driver->diag_real_time_work), diag_real_time_work_fn);
3334 mutex_init(&driver->real_time_mutex);
3335 return 0;
3336}
3337
3338static const struct file_operations diagcharfops = {
3339 .owner = THIS_MODULE,
3340 .read = diagchar_read,
3341 .write = diagchar_write,
3342#ifdef CONFIG_COMPAT
3343 .compat_ioctl = diagchar_compat_ioctl,
3344#endif
3345 .unlocked_ioctl = diagchar_ioctl,
3346 .open = diagchar_open,
3347 .release = diagchar_close
3348};
3349
3350static int diagchar_setup_cdev(dev_t devno)
3351{
3352
3353 int err;
3354
3355 cdev_init(driver->cdev, &diagcharfops);
3356
3357 driver->cdev->owner = THIS_MODULE;
3358 driver->cdev->ops = &diagcharfops;
3359
3360 err = cdev_add(driver->cdev, devno, 1);
3361
3362 if (err) {
3363 pr_info("diagchar cdev registration failed !\n");
3364 return err;
3365 }
3366
3367 driver->diagchar_class = class_create(THIS_MODULE, "diag");
3368
3369 if (IS_ERR(driver->diagchar_class)) {
3370 pr_err("Error creating diagchar class.\n");
3371 return PTR_ERR(driver->diagchar_class);
3372 }
3373
3374 driver->diag_dev = device_create(driver->diagchar_class, NULL, devno,
3375 (void *)driver, "diag");
3376
3377 if (!driver->diag_dev)
3378 return -EIO;
3379
3380 driver->diag_dev->power.wakeup = wakeup_source_register("DIAG_WS");
3381 return 0;
3382
3383}
3384
3385static int diagchar_cleanup(void)
3386{
3387 if (driver) {
3388 if (driver->cdev) {
3389 /* TODO - Check if device exists before deleting */
3390 device_destroy(driver->diagchar_class,
3391 MKDEV(driver->major,
3392 driver->minor_start));
3393 cdev_del(driver->cdev);
3394 }
3395 if (!IS_ERR(driver->diagchar_class))
3396 class_destroy(driver->diagchar_class);
3397 kfree(driver);
3398 }
3399 return 0;
3400}
3401
3402static int __init diagchar_init(void)
3403{
3404 dev_t dev;
3405 int ret;
3406
3407 pr_debug("diagfwd initializing ..\n");
3408 ret = 0;
3409 driver = kzalloc(sizeof(struct diagchar_dev) + 5, GFP_KERNEL);
3410 if (!driver)
3411 return -ENOMEM;
3412 kmemleak_not_leak(driver);
3413
3414 timer_in_progress = 0;
3415 driver->delayed_rsp_id = 0;
3416 driver->hdlc_disabled = 0;
3417 driver->dci_state = DIAG_DCI_NO_ERROR;
3418 setup_timer(&drain_timer, drain_timer_func, 1234);
3419 driver->supports_sockets = 1;
3420 driver->time_sync_enabled = 0;
3421 driver->uses_time_api = 0;
3422 driver->poolsize = poolsize;
3423 driver->poolsize_hdlc = poolsize_hdlc;
3424 driver->poolsize_dci = poolsize_dci;
3425 driver->poolsize_user = poolsize_user;
3426 /*
3427 * POOL_TYPE_MUX_APPS is for the buffers in the Diag MUX layer.
3428 * The number of buffers encompasses Diag data generated on
3429 * the Apss processor + 1 for the responses generated exclusively on
3430 * the Apps processor + data from data channels (4 channels per
3431 * peripheral) + data from command channels (2)
3432 */
3433 diagmem_setsize(POOL_TYPE_MUX_APPS, itemsize_usb_apps,
3434 poolsize_usb_apps + 1 + (NUM_PERIPHERALS * 6));
3435 driver->num_clients = max_clients;
3436 driver->logging_mode = DIAG_USB_MODE;
3437 driver->mask_check = 0;
3438 driver->in_busy_pktdata = 0;
3439 driver->in_busy_dcipktdata = 0;
3440 driver->rsp_buf_ctxt = SET_BUF_CTXT(APPS_DATA, TYPE_CMD, 1);
3441 hdlc_data.ctxt = SET_BUF_CTXT(APPS_DATA, TYPE_DATA, 1);
3442 hdlc_data.len = 0;
3443 non_hdlc_data.ctxt = SET_BUF_CTXT(APPS_DATA, TYPE_DATA, 1);
3444 non_hdlc_data.len = 0;
3445 mutex_init(&driver->hdlc_disable_mutex);
3446 mutex_init(&driver->diagchar_mutex);
3447 mutex_init(&driver->diag_maskclear_mutex);
3448 mutex_init(&driver->diag_file_mutex);
3449 mutex_init(&driver->delayed_rsp_mutex);
3450 mutex_init(&apps_data_mutex);
3451 mutex_init(&driver->diagfwd_channel_mutex);
3452 init_waitqueue_head(&driver->wait_q);
3453 INIT_WORK(&(driver->diag_drain_work), diag_drain_work_fn);
3454 INIT_WORK(&(driver->update_user_clients),
3455 diag_update_user_client_work_fn);
3456 INIT_WORK(&(driver->update_md_clients),
3457 diag_update_md_client_work_fn);
3458 diag_ws_init();
3459 diag_stats_init();
3460 diag_debug_init();
3461 diag_md_session_init();
3462
3463 driver->incoming_pkt.capacity = DIAG_MAX_REQ_SIZE;
3464 driver->incoming_pkt.data = kzalloc(DIAG_MAX_REQ_SIZE, GFP_KERNEL);
3465 if (!driver->incoming_pkt.data) {
3466 ret = -ENOMEM;
3467 goto fail;
3468 }
3469 kmemleak_not_leak(driver->incoming_pkt.data);
3470 driver->incoming_pkt.processing = 0;
3471 driver->incoming_pkt.read_len = 0;
3472 driver->incoming_pkt.remaining = 0;
3473 driver->incoming_pkt.total_len = 0;
3474
3475 ret = diag_real_time_info_init();
3476 if (ret)
3477 goto fail;
3478 ret = diag_debugfs_init();
3479 if (ret)
3480 goto fail;
3481 ret = diag_masks_init();
3482 if (ret)
3483 goto fail;
3484 ret = diag_remote_init();
3485 if (ret)
3486 goto fail;
3487 ret = diag_mux_init();
3488 if (ret)
3489 goto fail;
3490 ret = diagfwd_init();
3491 if (ret)
3492 goto fail;
3493 ret = diagfwd_cntl_init();
3494 if (ret)
3495 goto fail;
3496 driver->dci_state = diag_dci_init();
3497 ret = diagfwd_peripheral_init();
3498 if (ret)
3499 goto fail;
3500 diagfwd_cntl_channel_init();
3501 if (driver->dci_state == DIAG_DCI_NO_ERROR)
3502 diag_dci_channel_init();
3503 pr_debug("diagchar initializing ..\n");
3504 driver->num = 1;
3505 driver->name = ((void *)driver) + sizeof(struct diagchar_dev);
3506 strlcpy(driver->name, "diag", 4);
3507 /* Get major number from kernel and initialize */
3508 ret = alloc_chrdev_region(&dev, driver->minor_start,
3509 driver->num, driver->name);
3510 if (!ret) {
3511 driver->major = MAJOR(dev);
3512 driver->minor_start = MINOR(dev);
3513 } else {
3514 pr_err("diag: Major number not allocated\n");
3515 goto fail;
3516 }
3517 driver->cdev = cdev_alloc();
3518 ret = diagchar_setup_cdev(dev);
3519 if (ret)
3520 goto fail;
Sreelakshmi Gownipalli8d477d32017-02-08 19:49:06 -08003521 mutex_init(&driver->diag_id_mutex);
3522 INIT_LIST_HEAD(&driver->diag_id_list);
3523 diag_add_diag_id_to_list(DIAG_ID_APPS, "APPS");
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07003524 pr_debug("diagchar initialized now");
3525 ret = diagfwd_bridge_init();
3526 if (ret)
3527 diagfwd_bridge_exit();
3528 return 0;
3529
3530fail:
3531 pr_err("diagchar is not initialized, ret: %d\n", ret);
3532 diag_debugfs_cleanup();
3533 diagchar_cleanup();
3534 diag_mux_exit();
3535 diagfwd_peripheral_exit();
3536 diagfwd_bridge_exit();
3537 diagfwd_exit();
3538 diagfwd_cntl_exit();
3539 diag_dci_exit();
3540 diag_masks_exit();
3541 diag_remote_exit();
3542 return ret;
3543
3544}
3545
3546static void diagchar_exit(void)
3547{
3548 pr_info("diagchar exiting...\n");
3549 diag_mempool_exit();
3550 diag_mux_exit();
3551 diagfwd_peripheral_exit();
3552 diagfwd_exit();
3553 diagfwd_cntl_exit();
3554 diag_dci_exit();
3555 diag_masks_exit();
3556 diag_md_session_exit();
3557 diag_remote_exit();
3558 diag_debugfs_cleanup();
3559 diagchar_cleanup();
3560 pr_info("done diagchar exit\n");
3561}
3562
3563module_init(diagchar_init);
3564module_exit(diagchar_exit);