blob: 543f0a22433e0cb1f2e90951071408ec506afcc6 [file] [log] [blame]
Sreelakshmi Gownipalli8d477d32017-02-08 19:49:06 -08001/* Copyright (c) 2008-2017, The Linux Foundation. All rights reserved.
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/slab.h>
14#include <linux/init.h>
15#include <linux/module.h>
16#include <linux/cdev.h>
17#include <linux/fs.h>
18#include <linux/device.h>
19#include <linux/delay.h>
20#include <linux/uaccess.h>
21#include <linux/diagchar.h>
22#include <linux/sched.h>
23#include <linux/ratelimit.h>
24#include <linux/timer.h>
25#ifdef CONFIG_DIAG_OVER_USB
26#include <linux/usb/usbdiag.h>
27#endif
28#include <asm/current.h>
29#include "diagchar_hdlc.h"
30#include "diagmem.h"
31#include "diagchar.h"
32#include "diagfwd.h"
33#include "diagfwd_cntl.h"
34#include "diag_dci.h"
35#include "diag_debugfs.h"
36#include "diag_masks.h"
37#include "diagfwd_bridge.h"
38#include "diag_usb.h"
39#include "diag_memorydevice.h"
40#include "diag_mux.h"
41#include "diag_ipc_logging.h"
42#include "diagfwd_peripheral.h"
43
44#include <linux/coresight-stm.h>
45#include <linux/kernel.h>
46#ifdef CONFIG_COMPAT
47#include <linux/compat.h>
48#endif
49
50MODULE_DESCRIPTION("Diag Char Driver");
51MODULE_LICENSE("GPL v2");
52
53#define MIN_SIZ_ALLOW 4
54#define INIT 1
55#define EXIT -1
56struct diagchar_dev *driver;
57struct diagchar_priv {
58 int pid;
59};
60
61#define USER_SPACE_RAW_DATA 0
62#define USER_SPACE_HDLC_DATA 1
63
64/* Memory pool variables */
65/* Used for copying any incoming packet from user space clients. */
66static unsigned int poolsize = 12;
67module_param(poolsize, uint, 0000);
68
69/*
70 * Used for HDLC encoding packets coming from the user
71 * space.
72 */
73static unsigned int poolsize_hdlc = 10;
74module_param(poolsize_hdlc, uint, 0000);
75
76/*
77 * This is used for incoming DCI requests from the user space clients.
78 * Don't expose itemsize as it is internal.
79 */
80static unsigned int poolsize_user = 8;
81module_param(poolsize_user, uint, 0000);
82
83/*
84 * USB structures allocated for writing Diag data generated on the Apps to USB.
85 * Don't expose itemsize as it is constant.
86 */
87static unsigned int itemsize_usb_apps = sizeof(struct diag_request);
88static unsigned int poolsize_usb_apps = 10;
89module_param(poolsize_usb_apps, uint, 0000);
90
91/* Used for DCI client buffers. Don't expose itemsize as it is constant. */
92static unsigned int poolsize_dci = 10;
93module_param(poolsize_dci, uint, 0000);
94
95#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
96/* Used for reading data from the remote device. */
97static unsigned int itemsize_mdm = DIAG_MDM_BUF_SIZE;
98static unsigned int poolsize_mdm = 18;
99module_param(itemsize_mdm, uint, 0000);
100module_param(poolsize_mdm, uint, 0000);
101
102/*
103 * Used for reading DCI data from the remote device.
104 * Don't expose poolsize for DCI data. There is only one read buffer
105 */
106static unsigned int itemsize_mdm_dci = DIAG_MDM_BUF_SIZE;
107static unsigned int poolsize_mdm_dci = 1;
108module_param(itemsize_mdm_dci, uint, 0000);
109
110/*
111 * Used for USB structues associated with a remote device.
112 * Don't expose the itemsize since it is constant.
113 */
114static unsigned int itemsize_mdm_usb = sizeof(struct diag_request);
115static unsigned int poolsize_mdm_usb = 18;
116module_param(poolsize_mdm_usb, uint, 0000);
117
118/*
119 * Used for writing read DCI data to remote peripherals. Don't
120 * expose poolsize for DCI data. There is only one read
121 * buffer. Add 6 bytes for DCI header information: Start (1),
122 * Version (1), Length (2), Tag (2)
123 */
124static unsigned int itemsize_mdm_dci_write = DIAG_MDM_DCI_BUF_SIZE;
125static unsigned int poolsize_mdm_dci_write = 1;
126module_param(itemsize_mdm_dci_write, uint, 0000);
127
128/*
129 * Used for USB structures associated with a remote SMUX
130 * device Don't expose the itemsize since it is constant
131 */
132static unsigned int itemsize_qsc_usb = sizeof(struct diag_request);
133static unsigned int poolsize_qsc_usb = 8;
134module_param(poolsize_qsc_usb, uint, 0000);
135#endif
136
137/* This is the max number of user-space clients supported at initialization*/
138static unsigned int max_clients = 15;
139static unsigned int threshold_client_limit = 50;
140module_param(max_clients, uint, 0000);
141
142/* Timer variables */
143static struct timer_list drain_timer;
144static int timer_in_progress;
145
Manoj Prabhu B95427a22016-11-04 11:58:11 +0530146/*
147 * Diag Mask clear variable
148 * Used for clearing masks upon
149 * USB disconnection and stopping ODL
150 */
151static int diag_mask_clear_param = 1;
152module_param(diag_mask_clear_param, int, 0644);
153
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -0700154struct diag_apps_data_t {
155 void *buf;
156 uint32_t len;
157 int ctxt;
158};
159
160static struct diag_apps_data_t hdlc_data;
161static struct diag_apps_data_t non_hdlc_data;
162static struct mutex apps_data_mutex;
163
164#define DIAGPKT_MAX_DELAYED_RSP 0xFFFF
165
166#ifdef DIAG_DEBUG
167uint16_t diag_debug_mask;
168void *diag_ipc_log;
169#endif
170
171static void diag_md_session_close(struct diag_md_session_t *session_info);
172
173/*
174 * Returns the next delayed rsp id. If wrapping is enabled,
175 * wraps the delayed rsp id to DIAGPKT_MAX_DELAYED_RSP.
176 */
177static uint16_t diag_get_next_delayed_rsp_id(void)
178{
179 uint16_t rsp_id = 0;
180
181 mutex_lock(&driver->delayed_rsp_mutex);
182 rsp_id = driver->delayed_rsp_id;
183 if (rsp_id < DIAGPKT_MAX_DELAYED_RSP)
184 rsp_id++;
185 else {
186 if (wrap_enabled) {
187 rsp_id = 1;
188 wrap_count++;
189 } else
190 rsp_id = DIAGPKT_MAX_DELAYED_RSP;
191 }
192 driver->delayed_rsp_id = rsp_id;
193 mutex_unlock(&driver->delayed_rsp_mutex);
194
195 return rsp_id;
196}
197
198static int diag_switch_logging(struct diag_logging_mode_param_t *param);
199
200#define COPY_USER_SPACE_OR_ERR(buf, data, length) \
201do { \
202 if ((count < ret+length) || (copy_to_user(buf, \
203 (void *)&data, length))) { \
204 ret = -EFAULT; \
205 } \
206 ret += length; \
207} while (0)
208
209static void drain_timer_func(unsigned long data)
210{
211 queue_work(driver->diag_wq, &(driver->diag_drain_work));
212}
213
214static void diag_drain_apps_data(struct diag_apps_data_t *data)
215{
216 int err = 0;
217
218 if (!data || !data->buf)
219 return;
220
221 err = diag_mux_write(DIAG_LOCAL_PROC, data->buf, data->len,
222 data->ctxt);
223 if (err)
224 diagmem_free(driver, data->buf, POOL_TYPE_HDLC);
225
226 data->buf = NULL;
227 data->len = 0;
228}
229
230void diag_update_user_client_work_fn(struct work_struct *work)
231{
232 diag_update_userspace_clients(HDLC_SUPPORT_TYPE);
233}
234
235static void diag_update_md_client_work_fn(struct work_struct *work)
236{
237 diag_update_md_clients(HDLC_SUPPORT_TYPE);
238}
239
240void diag_drain_work_fn(struct work_struct *work)
241{
242 struct diag_md_session_t *session_info = NULL;
243 uint8_t hdlc_disabled = 0;
244
245 timer_in_progress = 0;
246 mutex_lock(&apps_data_mutex);
247 session_info = diag_md_session_get_peripheral(APPS_DATA);
248 if (session_info)
249 hdlc_disabled = session_info->hdlc_disabled;
250 else
251 hdlc_disabled = driver->hdlc_disabled;
252
253 if (!hdlc_disabled)
254 diag_drain_apps_data(&hdlc_data);
255 else
256 diag_drain_apps_data(&non_hdlc_data);
257 mutex_unlock(&apps_data_mutex);
258}
259
260void check_drain_timer(void)
261{
262 int ret = 0;
263
264 if (!timer_in_progress) {
265 timer_in_progress = 1;
266 ret = mod_timer(&drain_timer, jiffies + msecs_to_jiffies(200));
267 }
268}
269
270void diag_add_client(int i, struct file *file)
271{
272 struct diagchar_priv *diagpriv_data;
273
274 driver->client_map[i].pid = current->tgid;
275 diagpriv_data = kmalloc(sizeof(struct diagchar_priv),
276 GFP_KERNEL);
277 if (diagpriv_data)
278 diagpriv_data->pid = current->tgid;
279 file->private_data = diagpriv_data;
280 strlcpy(driver->client_map[i].name, current->comm, 20);
281 driver->client_map[i].name[19] = '\0';
282}
283
284static void diag_mempool_init(void)
285{
286 uint32_t itemsize = DIAG_MAX_REQ_SIZE;
287 uint32_t itemsize_hdlc = DIAG_MAX_HDLC_BUF_SIZE + APF_DIAG_PADDING;
288 uint32_t itemsize_dci = IN_BUF_SIZE;
289 uint32_t itemsize_user = DCI_REQ_BUF_SIZE;
290
291 itemsize += ((DCI_HDR_SIZE > CALLBACK_HDR_SIZE) ? DCI_HDR_SIZE :
292 CALLBACK_HDR_SIZE);
293 diagmem_setsize(POOL_TYPE_COPY, itemsize, poolsize);
294 diagmem_setsize(POOL_TYPE_HDLC, itemsize_hdlc, poolsize_hdlc);
295 diagmem_setsize(POOL_TYPE_DCI, itemsize_dci, poolsize_dci);
296 diagmem_setsize(POOL_TYPE_USER, itemsize_user, poolsize_user);
297
298 diagmem_init(driver, POOL_TYPE_COPY);
299 diagmem_init(driver, POOL_TYPE_HDLC);
300 diagmem_init(driver, POOL_TYPE_USER);
301 diagmem_init(driver, POOL_TYPE_DCI);
302}
303
304static void diag_mempool_exit(void)
305{
306 diagmem_exit(driver, POOL_TYPE_COPY);
307 diagmem_exit(driver, POOL_TYPE_HDLC);
308 diagmem_exit(driver, POOL_TYPE_USER);
309 diagmem_exit(driver, POOL_TYPE_DCI);
310}
311
312static int diagchar_open(struct inode *inode, struct file *file)
313{
314 int i = 0;
315 void *temp;
316
317 if (driver) {
318 mutex_lock(&driver->diagchar_mutex);
319
320 for (i = 0; i < driver->num_clients; i++)
321 if (driver->client_map[i].pid == 0)
322 break;
323
324 if (i < driver->num_clients) {
325 diag_add_client(i, file);
326 } else {
327 if (i < threshold_client_limit) {
328 driver->num_clients++;
329 temp = krealloc(driver->client_map
330 , (driver->num_clients) * sizeof(struct
331 diag_client_map), GFP_KERNEL);
332 if (!temp)
333 goto fail;
334 else
335 driver->client_map = temp;
336 temp = krealloc(driver->data_ready
337 , (driver->num_clients) * sizeof(int),
338 GFP_KERNEL);
339 if (!temp)
340 goto fail;
341 else
342 driver->data_ready = temp;
343 diag_add_client(i, file);
344 } else {
345 mutex_unlock(&driver->diagchar_mutex);
346 pr_err_ratelimited("diag: Max client limit for DIAG reached\n");
347 pr_err_ratelimited("diag: Cannot open handle %s %d",
348 current->comm, current->tgid);
349 for (i = 0; i < driver->num_clients; i++)
350 pr_debug("%d) %s PID=%d", i, driver->
351 client_map[i].name,
352 driver->client_map[i].pid);
353 return -ENOMEM;
354 }
355 }
356 driver->data_ready[i] = 0x0;
357 driver->data_ready[i] |= MSG_MASKS_TYPE;
358 driver->data_ready[i] |= EVENT_MASKS_TYPE;
359 driver->data_ready[i] |= LOG_MASKS_TYPE;
360 driver->data_ready[i] |= DCI_LOG_MASKS_TYPE;
361 driver->data_ready[i] |= DCI_EVENT_MASKS_TYPE;
362
363 if (driver->ref_count == 0)
364 diag_mempool_init();
365 driver->ref_count++;
366 mutex_unlock(&driver->diagchar_mutex);
367 return 0;
368 }
369 return -ENOMEM;
370
371fail:
372 mutex_unlock(&driver->diagchar_mutex);
373 driver->num_clients--;
374 pr_err_ratelimited("diag: Insufficient memory for new client");
375 return -ENOMEM;
376}
377
378static uint32_t diag_translate_kernel_to_user_mask(uint32_t peripheral_mask)
379{
380 uint32_t ret = 0;
381
382 if (peripheral_mask & MD_PERIPHERAL_MASK(APPS_DATA))
383 ret |= DIAG_CON_APSS;
384 if (peripheral_mask & MD_PERIPHERAL_MASK(PERIPHERAL_MODEM))
385 ret |= DIAG_CON_MPSS;
386 if (peripheral_mask & MD_PERIPHERAL_MASK(PERIPHERAL_LPASS))
387 ret |= DIAG_CON_LPASS;
388 if (peripheral_mask & MD_PERIPHERAL_MASK(PERIPHERAL_WCNSS))
389 ret |= DIAG_CON_WCNSS;
390 if (peripheral_mask & MD_PERIPHERAL_MASK(PERIPHERAL_SENSORS))
391 ret |= DIAG_CON_SENSORS;
392 if (peripheral_mask & MD_PERIPHERAL_MASK(PERIPHERAL_WDSP))
393 ret |= DIAG_CON_WDSP;
Sreelakshmi Gownipalli588a31d2016-11-02 13:33:43 -0700394 if (peripheral_mask & MD_PERIPHERAL_MASK(PERIPHERAL_CDSP))
395 ret |= DIAG_CON_CDSP;
Manoj Prabhu B571cf422017-08-08 19:01:41 +0530396 if (peripheral_mask & MD_PERIPHERAL_MASK(UPD_WLAN))
397 ret |= DIAG_CON_UPD_WLAN;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -0700398 return ret;
399}
Manoj Prabhu B571cf422017-08-08 19:01:41 +0530400
401uint8_t diag_mask_to_pd_value(uint32_t peripheral_mask)
402{
403 uint8_t upd = 0;
404 uint32_t pd_mask = 0;
405
406 pd_mask = diag_translate_kernel_to_user_mask(peripheral_mask);
407 switch (pd_mask) {
408 case DIAG_CON_UPD_WLAN:
409 upd = UPD_WLAN;
410 break;
411 default:
412 DIAG_LOG(DIAG_DEBUG_MASKS,
413 "asking for mask update with no pd mask set\n");
414 }
415 return upd;
416}
417
Manoj Prabhu B95427a22016-11-04 11:58:11 +0530418int diag_mask_param(void)
419{
420 return diag_mask_clear_param;
421}
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -0700422void diag_clear_masks(struct diag_md_session_t *info)
423{
424 int ret;
425 char cmd_disable_log_mask[] = { 0x73, 0, 0, 0, 0, 0, 0, 0};
426 char cmd_disable_msg_mask[] = { 0x7D, 0x05, 0, 0, 0, 0, 0, 0};
427 char cmd_disable_event_mask[] = { 0x60, 0};
428
429 DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
430 "diag: %s: masks clear request upon %s\n", __func__,
431 ((info) ? "ODL exit" : "USB Disconnection"));
432
433 ret = diag_process_apps_masks(cmd_disable_log_mask,
434 sizeof(cmd_disable_log_mask), info);
435 ret = diag_process_apps_masks(cmd_disable_msg_mask,
436 sizeof(cmd_disable_msg_mask), info);
437 ret = diag_process_apps_masks(cmd_disable_event_mask,
438 sizeof(cmd_disable_event_mask), info);
439 DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
440 "diag:%s: masks cleared successfully\n", __func__);
441}
442
443static void diag_close_logging_process(const int pid)
444{
Manoj Prabhu B571cf422017-08-08 19:01:41 +0530445 int i, j;
446 int session_mask;
447 uint32_t p_mask;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -0700448 struct diag_md_session_t *session_info = NULL;
449 struct diag_logging_mode_param_t params;
450
451 session_info = diag_md_session_get_pid(pid);
452 if (!session_info)
453 return;
454
Manoj Prabhu B95427a22016-11-04 11:58:11 +0530455 if (diag_mask_clear_param)
456 diag_clear_masks(session_info);
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -0700457
458 mutex_lock(&driver->diag_maskclear_mutex);
459 driver->mask_clear = 1;
460 mutex_unlock(&driver->diag_maskclear_mutex);
461
Sreelakshmi Gownipalli078824f2017-01-17 14:03:54 -0800462 mutex_lock(&driver->diagchar_mutex);
Manoj Prabhu B571cf422017-08-08 19:01:41 +0530463 session_mask = session_info->peripheral_mask;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -0700464 diag_md_session_close(session_info);
Manoj Prabhu B571cf422017-08-08 19:01:41 +0530465
466 p_mask =
467 diag_translate_kernel_to_user_mask(session_mask);
468
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -0700469 for (i = 0; i < NUM_MD_SESSIONS; i++)
Manoj Prabhu B571cf422017-08-08 19:01:41 +0530470 if (MD_PERIPHERAL_MASK(i) & session_mask)
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -0700471 diag_mux_close_peripheral(DIAG_LOCAL_PROC, i);
472
473 params.req_mode = USB_MODE;
474 params.mode_param = 0;
Manoj Prabhu B571cf422017-08-08 19:01:41 +0530475 params.pd_mask = 0;
476 params.peripheral_mask = p_mask;
477
478 if (driver->num_pd_session > 0) {
479 for (i = UPD_WLAN; (i < NUM_MD_SESSIONS); i++) {
480 if (session_mask & MD_PERIPHERAL_MASK(i)) {
481 j = i - UPD_WLAN;
482 driver->pd_session_clear[j] = 1;
483 driver->pd_logging_mode[j] = 0;
484 driver->num_pd_session -= 1;
485 params.pd_mask = p_mask;
486 }
487 }
488 }
489
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -0700490 diag_switch_logging(&params);
491 mutex_unlock(&driver->diagchar_mutex);
492}
493
494static int diag_remove_client_entry(struct file *file)
495{
496 int i = -1;
497 struct diagchar_priv *diagpriv_data = NULL;
498 struct diag_dci_client_tbl *dci_entry = NULL;
499
500 if (!driver)
501 return -ENOMEM;
502
503 mutex_lock(&driver->diag_file_mutex);
504 if (!file) {
505 DIAG_LOG(DIAG_DEBUG_USERSPACE, "Invalid file pointer\n");
506 mutex_unlock(&driver->diag_file_mutex);
507 return -ENOENT;
508 }
509 if (!(file->private_data)) {
510 DIAG_LOG(DIAG_DEBUG_USERSPACE, "Invalid private data\n");
511 mutex_unlock(&driver->diag_file_mutex);
512 return -EINVAL;
513 }
514
515 diagpriv_data = file->private_data;
516
517 /*
518 * clean up any DCI registrations, if this is a DCI client
519 * This will specially help in case of ungraceful exit of any DCI client
520 * This call will remove any pending registrations of such client
521 */
522 mutex_lock(&driver->dci_mutex);
523 dci_entry = dci_lookup_client_entry_pid(current->tgid);
524 if (dci_entry)
525 diag_dci_deinit_client(dci_entry);
526 mutex_unlock(&driver->dci_mutex);
527
528 diag_close_logging_process(current->tgid);
529
530 /* Delete the pkt response table entry for the exiting process */
531 diag_cmd_remove_reg_by_pid(current->tgid);
532
533 mutex_lock(&driver->diagchar_mutex);
534 driver->ref_count--;
535 if (driver->ref_count == 0)
536 diag_mempool_exit();
537
538 for (i = 0; i < driver->num_clients; i++) {
539 if (diagpriv_data && diagpriv_data->pid ==
540 driver->client_map[i].pid) {
541 driver->client_map[i].pid = 0;
542 kfree(diagpriv_data);
543 diagpriv_data = NULL;
544 file->private_data = 0;
545 break;
546 }
547 }
548 mutex_unlock(&driver->diagchar_mutex);
549 mutex_unlock(&driver->diag_file_mutex);
550 return 0;
551}
552static int diagchar_close(struct inode *inode, struct file *file)
553{
554 int ret;
555
556 DIAG_LOG(DIAG_DEBUG_USERSPACE, "diag: process exit %s\n",
557 current->comm);
558 ret = diag_remove_client_entry(file);
559 mutex_lock(&driver->diag_maskclear_mutex);
560 driver->mask_clear = 0;
561 mutex_unlock(&driver->diag_maskclear_mutex);
562 return ret;
563}
564
565void diag_record_stats(int type, int flag)
566{
567 struct diag_pkt_stats_t *pkt_stats = NULL;
568
569 switch (type) {
570 case DATA_TYPE_EVENT:
571 pkt_stats = &driver->event_stats;
572 break;
573 case DATA_TYPE_F3:
574 pkt_stats = &driver->msg_stats;
575 break;
576 case DATA_TYPE_LOG:
577 pkt_stats = &driver->log_stats;
578 break;
579 case DATA_TYPE_RESPONSE:
580 if (flag != PKT_DROP)
581 return;
582 pr_err_ratelimited("diag: In %s, dropping response. This shouldn't happen\n",
583 __func__);
584 return;
585 case DATA_TYPE_DELAYED_RESPONSE:
586 /* No counters to increase for Delayed responses */
587 return;
588 default:
589 pr_err_ratelimited("diag: In %s, invalid pkt_type: %d\n",
590 __func__, type);
591 return;
592 }
593
594 switch (flag) {
595 case PKT_ALLOC:
596 atomic_add(1, (atomic_t *)&pkt_stats->alloc_count);
597 break;
598 case PKT_DROP:
599 atomic_add(1, (atomic_t *)&pkt_stats->drop_count);
600 break;
601 case PKT_RESET:
602 atomic_set((atomic_t *)&pkt_stats->alloc_count, 0);
603 atomic_set((atomic_t *)&pkt_stats->drop_count, 0);
604 break;
605 default:
606 pr_err_ratelimited("diag: In %s, invalid flag: %d\n",
607 __func__, flag);
608 return;
609 }
610}
611
612void diag_get_timestamp(char *time_str)
613{
614 struct timeval t;
615 struct tm broken_tm;
616
617 do_gettimeofday(&t);
618 if (!time_str)
619 return;
620 time_to_tm(t.tv_sec, 0, &broken_tm);
621 scnprintf(time_str, DIAG_TS_SIZE, "%d:%d:%d:%ld", broken_tm.tm_hour,
622 broken_tm.tm_min, broken_tm.tm_sec, t.tv_usec);
623}
624
625int diag_get_remote(int remote_info)
626{
627 int val = (remote_info < 0) ? -remote_info : remote_info;
628 int remote_val;
629
630 switch (val) {
631 case MDM:
632 case MDM2:
633 case QSC:
634 remote_val = -remote_info;
635 break;
636 default:
637 remote_val = 0;
638 break;
639 }
640
641 return remote_val;
642}
643
644int diag_cmd_chk_polling(struct diag_cmd_reg_entry_t *entry)
645{
646 int polling = DIAG_CMD_NOT_POLLING;
647
648 if (!entry)
649 return -EIO;
650
651 if (entry->cmd_code == DIAG_CMD_NO_SUBSYS) {
652 if (entry->subsys_id == DIAG_CMD_NO_SUBSYS &&
653 entry->cmd_code_hi >= DIAG_CMD_STATUS &&
654 entry->cmd_code_lo <= DIAG_CMD_STATUS)
655 polling = DIAG_CMD_POLLING;
656 else if (entry->subsys_id == DIAG_SS_WCDMA &&
657 entry->cmd_code_hi >= DIAG_CMD_QUERY_CALL &&
658 entry->cmd_code_lo <= DIAG_CMD_QUERY_CALL)
659 polling = DIAG_CMD_POLLING;
660 else if (entry->subsys_id == DIAG_SS_GSM &&
661 entry->cmd_code_hi >= DIAG_CMD_QUERY_TMC &&
662 entry->cmd_code_lo <= DIAG_CMD_QUERY_TMC)
663 polling = DIAG_CMD_POLLING;
664 else if (entry->subsys_id == DIAG_SS_PARAMS &&
665 entry->cmd_code_hi >= DIAG_DIAG_POLL &&
666 entry->cmd_code_lo <= DIAG_DIAG_POLL)
667 polling = DIAG_CMD_POLLING;
668 else if (entry->subsys_id == DIAG_SS_TDSCDMA &&
669 entry->cmd_code_hi >= DIAG_CMD_TDSCDMA_STATUS &&
670 entry->cmd_code_lo <= DIAG_CMD_TDSCDMA_STATUS)
671 polling = DIAG_CMD_POLLING;
672 }
673
674 return polling;
675}
676
677static void diag_cmd_invalidate_polling(int change_flag)
678{
679 int polling = DIAG_CMD_NOT_POLLING;
680 struct list_head *start;
681 struct list_head *temp;
682 struct diag_cmd_reg_t *item = NULL;
683
684 if (change_flag == DIAG_CMD_ADD) {
685 if (driver->polling_reg_flag)
686 return;
687 }
688
689 driver->polling_reg_flag = 0;
690 list_for_each_safe(start, temp, &driver->cmd_reg_list) {
691 item = list_entry(start, struct diag_cmd_reg_t, link);
Hardik Aryaf76d6602017-07-18 13:38:26 +0530692 if (&item->entry == NULL) {
693 pr_err("diag: In %s, unable to search command\n",
694 __func__);
695 return;
696 }
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -0700697 polling = diag_cmd_chk_polling(&item->entry);
698 if (polling == DIAG_CMD_POLLING) {
699 driver->polling_reg_flag = 1;
700 break;
701 }
702 }
703}
704
705int diag_cmd_add_reg(struct diag_cmd_reg_entry_t *new_entry, uint8_t proc,
706 int pid)
707{
708 struct diag_cmd_reg_t *new_item = NULL;
709
710 if (!new_entry) {
711 pr_err("diag: In %s, invalid new entry\n", __func__);
712 return -EINVAL;
713 }
714
715 if (proc > APPS_DATA) {
716 pr_err("diag: In %s, invalid peripheral %d\n", __func__, proc);
717 return -EINVAL;
718 }
719
720 if (proc != APPS_DATA)
721 pid = INVALID_PID;
722
723 new_item = kzalloc(sizeof(struct diag_cmd_reg_t), GFP_KERNEL);
724 if (!new_item)
725 return -ENOMEM;
726 kmemleak_not_leak(new_item);
727
728 new_item->pid = pid;
729 new_item->proc = proc;
730 memcpy(&new_item->entry, new_entry,
731 sizeof(struct diag_cmd_reg_entry_t));
732 INIT_LIST_HEAD(&new_item->link);
733
734 mutex_lock(&driver->cmd_reg_mutex);
735 list_add_tail(&new_item->link, &driver->cmd_reg_list);
736 driver->cmd_reg_count++;
737 diag_cmd_invalidate_polling(DIAG_CMD_ADD);
738 mutex_unlock(&driver->cmd_reg_mutex);
739
740 return 0;
741}
742
743struct diag_cmd_reg_entry_t *diag_cmd_search(
744 struct diag_cmd_reg_entry_t *entry, int proc)
745{
746 struct list_head *start;
747 struct list_head *temp;
748 struct diag_cmd_reg_t *item = NULL;
749 struct diag_cmd_reg_entry_t *temp_entry = NULL;
750
751 if (!entry) {
752 pr_err("diag: In %s, invalid entry\n", __func__);
753 return NULL;
754 }
755
756 list_for_each_safe(start, temp, &driver->cmd_reg_list) {
757 item = list_entry(start, struct diag_cmd_reg_t, link);
Manoj Prabhu Bd9b3b622017-01-17 10:15:53 +0530758 if (&item->entry == NULL) {
Gopikrishna Mogasati9b332372016-11-10 20:03:46 +0530759 pr_err("diag: In %s, unable to search command\n",
760 __func__);
761 return NULL;
762 }
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -0700763 temp_entry = &item->entry;
764 if (temp_entry->cmd_code == entry->cmd_code &&
765 temp_entry->subsys_id == entry->subsys_id &&
766 temp_entry->cmd_code_hi >= entry->cmd_code_hi &&
767 temp_entry->cmd_code_lo <= entry->cmd_code_lo &&
768 (proc == item->proc || proc == ALL_PROC)) {
769 return &item->entry;
770 } else if (temp_entry->cmd_code == DIAG_CMD_NO_SUBSYS &&
771 entry->cmd_code == DIAG_CMD_DIAG_SUBSYS) {
772 if (temp_entry->subsys_id == entry->subsys_id &&
773 temp_entry->cmd_code_hi >= entry->cmd_code_hi &&
774 temp_entry->cmd_code_lo <= entry->cmd_code_lo &&
775 (proc == item->proc || proc == ALL_PROC)) {
776 return &item->entry;
777 }
778 } else if (temp_entry->cmd_code == DIAG_CMD_NO_SUBSYS &&
779 temp_entry->subsys_id == DIAG_CMD_NO_SUBSYS) {
780 if ((temp_entry->cmd_code_hi >= entry->cmd_code) &&
781 (temp_entry->cmd_code_lo <= entry->cmd_code) &&
782 (proc == item->proc || proc == ALL_PROC)) {
783 if (entry->cmd_code == MODE_CMD) {
784 if (entry->subsys_id == RESET_ID &&
785 item->proc != APPS_DATA) {
786 continue;
787 }
788 if (entry->subsys_id != RESET_ID &&
789 item->proc == APPS_DATA) {
790 continue;
791 }
792 }
793 return &item->entry;
794 }
795 }
796 }
797
798 return NULL;
799}
800
801void diag_cmd_remove_reg(struct diag_cmd_reg_entry_t *entry, uint8_t proc)
802{
803 struct diag_cmd_reg_t *item = NULL;
804 struct diag_cmd_reg_entry_t *temp_entry;
805
806 if (!entry) {
807 pr_err("diag: In %s, invalid entry\n", __func__);
808 return;
809 }
810
811 mutex_lock(&driver->cmd_reg_mutex);
812 temp_entry = diag_cmd_search(entry, proc);
813 if (temp_entry) {
814 item = container_of(temp_entry, struct diag_cmd_reg_t, entry);
815 if (!item) {
816 mutex_unlock(&driver->cmd_reg_mutex);
817 return;
818 }
819 list_del(&item->link);
820 kfree(item);
821 driver->cmd_reg_count--;
822 }
823 diag_cmd_invalidate_polling(DIAG_CMD_REMOVE);
824 mutex_unlock(&driver->cmd_reg_mutex);
825}
826
827void diag_cmd_remove_reg_by_pid(int pid)
828{
829 struct list_head *start;
830 struct list_head *temp;
831 struct diag_cmd_reg_t *item = NULL;
832
833 mutex_lock(&driver->cmd_reg_mutex);
834 list_for_each_safe(start, temp, &driver->cmd_reg_list) {
835 item = list_entry(start, struct diag_cmd_reg_t, link);
Hardik Aryaf76d6602017-07-18 13:38:26 +0530836 if (&item->entry == NULL) {
837 pr_err("diag: In %s, unable to search command\n",
838 __func__);
839 mutex_unlock(&driver->cmd_reg_mutex);
840 return;
841 }
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -0700842 if (item->pid == pid) {
843 list_del(&item->link);
844 kfree(item);
845 driver->cmd_reg_count--;
846 }
847 }
848 mutex_unlock(&driver->cmd_reg_mutex);
849}
850
851void diag_cmd_remove_reg_by_proc(int proc)
852{
853 struct list_head *start;
854 struct list_head *temp;
855 struct diag_cmd_reg_t *item = NULL;
856
857 mutex_lock(&driver->cmd_reg_mutex);
858 list_for_each_safe(start, temp, &driver->cmd_reg_list) {
859 item = list_entry(start, struct diag_cmd_reg_t, link);
Hardik Aryaf76d6602017-07-18 13:38:26 +0530860 if (&item->entry == NULL) {
861 pr_err("diag: In %s, unable to search command\n",
862 __func__);
863 mutex_unlock(&driver->cmd_reg_mutex);
864 return;
865 }
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -0700866 if (item->proc == proc) {
867 list_del(&item->link);
868 kfree(item);
869 driver->cmd_reg_count--;
870 }
871 }
872 diag_cmd_invalidate_polling(DIAG_CMD_REMOVE);
873 mutex_unlock(&driver->cmd_reg_mutex);
874}
875
876static int diag_copy_dci(char __user *buf, size_t count,
877 struct diag_dci_client_tbl *entry, int *pret)
878{
879 int total_data_len = 0;
880 int ret = 0;
881 int exit_stat = 1;
882 uint8_t drain_again = 0;
883 struct diag_dci_buffer_t *buf_entry, *temp;
884
885 if (!buf || !entry || !pret)
886 return exit_stat;
887
888 ret = *pret;
889
890 ret += sizeof(int);
891 if (ret >= count) {
892 pr_err("diag: In %s, invalid value for ret: %d, count: %zu\n",
893 __func__, ret, count);
894 return -EINVAL;
895 }
896
897 mutex_lock(&entry->write_buf_mutex);
898 list_for_each_entry_safe(buf_entry, temp, &entry->list_write_buf,
899 buf_track) {
900
901 if ((ret + buf_entry->data_len) > count) {
902 drain_again = 1;
903 break;
904 }
905
906 list_del(&buf_entry->buf_track);
907 mutex_lock(&buf_entry->data_mutex);
908 if ((buf_entry->data_len > 0) &&
909 (buf_entry->in_busy) &&
910 (buf_entry->data)) {
911 if (copy_to_user(buf+ret, (void *)buf_entry->data,
912 buf_entry->data_len))
913 goto drop;
914 ret += buf_entry->data_len;
915 total_data_len += buf_entry->data_len;
916 diag_ws_on_copy(DIAG_WS_DCI);
917drop:
918 buf_entry->in_busy = 0;
919 buf_entry->data_len = 0;
920 buf_entry->in_list = 0;
921 if (buf_entry->buf_type == DCI_BUF_CMD) {
922 mutex_unlock(&buf_entry->data_mutex);
923 continue;
924 } else if (buf_entry->buf_type == DCI_BUF_SECONDARY) {
925 diagmem_free(driver, buf_entry->data,
926 POOL_TYPE_DCI);
927 buf_entry->data = NULL;
928 mutex_unlock(&buf_entry->data_mutex);
929 kfree(buf_entry);
930 continue;
931 }
932
933 }
934 mutex_unlock(&buf_entry->data_mutex);
935 }
936
937 if (total_data_len > 0) {
938 /* Copy the total data length */
939 COPY_USER_SPACE_OR_ERR(buf+8, total_data_len, 4);
940 if (ret == -EFAULT)
941 goto exit;
942 ret -= 4;
943 } else {
944 pr_debug("diag: In %s, Trying to copy ZERO bytes, total_data_len: %d\n",
945 __func__, total_data_len);
946 }
947
948 exit_stat = 0;
949exit:
950 entry->in_service = 0;
951 mutex_unlock(&entry->write_buf_mutex);
952 *pret = ret;
953 if (drain_again)
954 dci_drain_data(0);
955
956 return exit_stat;
957}
958
959#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
960static int diag_remote_init(void)
961{
962 diagmem_setsize(POOL_TYPE_MDM, itemsize_mdm, poolsize_mdm);
963 diagmem_setsize(POOL_TYPE_MDM2, itemsize_mdm, poolsize_mdm);
964 diagmem_setsize(POOL_TYPE_MDM_DCI, itemsize_mdm_dci, poolsize_mdm_dci);
965 diagmem_setsize(POOL_TYPE_MDM2_DCI, itemsize_mdm_dci,
966 poolsize_mdm_dci);
967 diagmem_setsize(POOL_TYPE_MDM_MUX, itemsize_mdm_usb, poolsize_mdm_usb);
968 diagmem_setsize(POOL_TYPE_MDM2_MUX, itemsize_mdm_usb, poolsize_mdm_usb);
969 diagmem_setsize(POOL_TYPE_MDM_DCI_WRITE, itemsize_mdm_dci_write,
970 poolsize_mdm_dci_write);
971 diagmem_setsize(POOL_TYPE_MDM2_DCI_WRITE, itemsize_mdm_dci_write,
972 poolsize_mdm_dci_write);
973 diagmem_setsize(POOL_TYPE_QSC_MUX, itemsize_qsc_usb,
974 poolsize_qsc_usb);
975 driver->hdlc_encode_buf = kzalloc(DIAG_MAX_HDLC_BUF_SIZE, GFP_KERNEL);
976 if (!driver->hdlc_encode_buf)
977 return -ENOMEM;
978 driver->hdlc_encode_buf_len = 0;
979 return 0;
980}
981
982static void diag_remote_exit(void)
983{
984 kfree(driver->hdlc_encode_buf);
985}
986
987static int diag_send_raw_data_remote(int proc, void *buf, int len,
988 uint8_t hdlc_flag)
989{
990 int err = 0;
991 int max_len = 0;
992 uint8_t retry_count = 0;
993 uint8_t max_retries = 3;
994 uint16_t payload = 0;
995 struct diag_send_desc_type send = { NULL, NULL, DIAG_STATE_START, 0 };
996 struct diag_hdlc_dest_type enc = { NULL, NULL, 0 };
997 int bridge_index = proc - 1;
998 struct diag_md_session_t *session_info = NULL;
999 uint8_t hdlc_disabled = 0;
1000
1001 if (!buf)
1002 return -EINVAL;
1003
1004 if (len <= 0) {
1005 pr_err("diag: In %s, invalid len: %d", __func__, len);
1006 return -EBADMSG;
1007 }
1008
1009 if (bridge_index < 0 || bridge_index > NUM_REMOTE_DEV) {
1010 pr_err("diag: In %s, invalid bridge index: %d\n", __func__,
1011 bridge_index);
1012 return -EINVAL;
1013 }
1014
1015 do {
1016 if (driver->hdlc_encode_buf_len == 0)
1017 break;
1018 usleep_range(10000, 10100);
1019 retry_count++;
1020 } while (retry_count < max_retries);
1021
1022 if (driver->hdlc_encode_buf_len != 0)
1023 return -EAGAIN;
1024 session_info = diag_md_session_get_peripheral(APPS_DATA);
1025 if (session_info)
1026 hdlc_disabled = session_info->hdlc_disabled;
1027 else
1028 hdlc_disabled = driver->hdlc_disabled;
1029 if (hdlc_disabled) {
Hardik Arya5dbb4aa2017-06-12 11:26:05 +05301030 if (len < 4) {
1031 pr_err("diag: In %s, invalid len: %d of non_hdlc pkt",
1032 __func__, len);
1033 return -EBADMSG;
1034 }
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001035 payload = *(uint16_t *)(buf + 2);
Gopikrishna Mogasati810223f2017-04-20 16:25:20 +05301036 if (payload > DIAG_MAX_HDLC_BUF_SIZE) {
1037 pr_err("diag: Dropping packet, payload size is %d\n",
1038 payload);
1039 return -EBADMSG;
1040 }
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001041 driver->hdlc_encode_buf_len = payload;
1042 /*
Hardik Arya5dbb4aa2017-06-12 11:26:05 +05301043 * Adding 5 bytes for start (1 byte), version (1 byte),
1044 * payload (2 bytes) and end (1 byte)
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001045 */
Hardik Arya5dbb4aa2017-06-12 11:26:05 +05301046 if (len == (payload + 5)) {
1047 /*
1048 * Adding 4 bytes for start (1 byte), version (1 byte)
1049 * and payload (2 bytes)
1050 */
1051 memcpy(driver->hdlc_encode_buf, buf + 4, payload);
1052 goto send_data;
1053 } else {
1054 pr_err("diag: In %s, invalid len: %d of non_hdlc pkt",
1055 __func__, len);
1056 return -EBADMSG;
1057 }
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001058 }
1059
1060 if (hdlc_flag) {
1061 if (len > DIAG_MAX_HDLC_BUF_SIZE) {
1062 pr_err("diag: Dropping packet, HDLC encoded packet payload size crosses buffer limit. Current payload size %d\n",
1063 len);
1064 return -EBADMSG;
1065 }
1066 driver->hdlc_encode_buf_len = len;
1067 memcpy(driver->hdlc_encode_buf, buf, len);
1068 goto send_data;
1069 }
1070
1071 /*
1072 * The worst case length will be twice as the incoming packet length.
1073 * Add 3 bytes for CRC bytes (2 bytes) and delimiter (1 byte)
1074 */
1075 max_len = (2 * len) + 3;
1076 if (max_len > DIAG_MAX_HDLC_BUF_SIZE) {
1077 pr_err("diag: Dropping packet, HDLC encoded packet payload size crosses buffer limit. Current payload size %d\n",
1078 max_len);
1079 return -EBADMSG;
1080 }
1081
1082 /* Perform HDLC encoding on incoming data */
1083 send.state = DIAG_STATE_START;
1084 send.pkt = (void *)(buf);
1085 send.last = (void *)(buf + len - 1);
1086 send.terminate = 1;
1087
1088 enc.dest = driver->hdlc_encode_buf;
1089 enc.dest_last = (void *)(driver->hdlc_encode_buf + max_len - 1);
1090 diag_hdlc_encode(&send, &enc);
1091 driver->hdlc_encode_buf_len = (int)(enc.dest -
1092 (void *)driver->hdlc_encode_buf);
1093
1094send_data:
1095 err = diagfwd_bridge_write(bridge_index, driver->hdlc_encode_buf,
1096 driver->hdlc_encode_buf_len);
1097 if (err) {
1098 pr_err_ratelimited("diag: Error writing Callback packet to proc: %d, err: %d\n",
1099 proc, err);
1100 driver->hdlc_encode_buf_len = 0;
1101 }
1102
1103 return err;
1104}
1105
1106static int diag_process_userspace_remote(int proc, void *buf, int len)
1107{
1108 int bridge_index = proc - 1;
1109
1110 if (!buf || len < 0) {
1111 pr_err("diag: Invalid input in %s, buf: %pK, len: %d\n",
1112 __func__, buf, len);
1113 return -EINVAL;
1114 }
1115
1116 if (bridge_index < 0 || bridge_index > NUM_REMOTE_DEV) {
1117 pr_err("diag: In %s, invalid bridge index: %d\n", __func__,
1118 bridge_index);
1119 return -EINVAL;
1120 }
1121
1122 driver->user_space_data_busy = 1;
1123 return diagfwd_bridge_write(bridge_index, buf, len);
1124}
1125#else
1126static int diag_remote_init(void)
1127{
1128 return 0;
1129}
1130
1131static void diag_remote_exit(void)
1132{
1133}
1134
1135int diagfwd_bridge_init(void)
1136{
1137 return 0;
1138}
1139
1140void diagfwd_bridge_exit(void)
1141{
1142}
1143
1144uint16_t diag_get_remote_device_mask(void)
1145{
1146 return 0;
1147}
1148
1149static int diag_send_raw_data_remote(int proc, void *buf, int len,
1150 uint8_t hdlc_flag)
1151{
1152 return -EINVAL;
1153}
1154
1155static int diag_process_userspace_remote(int proc, void *buf, int len)
1156{
1157 return 0;
1158}
1159#endif
1160
1161static int mask_request_validate(unsigned char mask_buf[])
1162{
1163 uint8_t packet_id;
1164 uint8_t subsys_id;
1165 uint16_t ss_cmd;
1166
1167 packet_id = mask_buf[0];
1168
1169 if (packet_id == DIAG_CMD_DIAG_SUBSYS_DELAY) {
1170 subsys_id = mask_buf[1];
1171 ss_cmd = *(uint16_t *)(mask_buf + 2);
1172 switch (subsys_id) {
1173 case DIAG_SS_DIAG:
1174 if ((ss_cmd == DIAG_SS_FILE_READ_MODEM) ||
1175 (ss_cmd == DIAG_SS_FILE_READ_ADSP) ||
1176 (ss_cmd == DIAG_SS_FILE_READ_WCNSS) ||
1177 (ss_cmd == DIAG_SS_FILE_READ_SLPI) ||
1178 (ss_cmd == DIAG_SS_FILE_READ_APPS))
1179 return 1;
1180 break;
1181 default:
1182 return 0;
1183 }
1184 } else if (packet_id == 0x4B) {
1185 subsys_id = mask_buf[1];
1186 ss_cmd = *(uint16_t *)(mask_buf + 2);
1187 /* Packets with SSID which are allowed */
1188 switch (subsys_id) {
1189 case 0x04: /* DIAG_SUBSYS_WCDMA */
1190 if ((ss_cmd == 0) || (ss_cmd == 0xF))
1191 return 1;
1192 break;
1193 case 0x08: /* DIAG_SUBSYS_GSM */
1194 if ((ss_cmd == 0) || (ss_cmd == 0x1))
1195 return 1;
1196 break;
1197 case 0x09: /* DIAG_SUBSYS_UMTS */
1198 case 0x0F: /* DIAG_SUBSYS_CM */
1199 if (ss_cmd == 0)
1200 return 1;
1201 break;
1202 case 0x0C: /* DIAG_SUBSYS_OS */
1203 if ((ss_cmd == 2) || (ss_cmd == 0x100))
1204 return 1; /* MPU and APU */
1205 break;
1206 case 0x12: /* DIAG_SUBSYS_DIAG_SERV */
1207 if ((ss_cmd == 0) || (ss_cmd == 0x6) || (ss_cmd == 0x7))
1208 return 1;
1209 else if (ss_cmd == 0x218) /* HDLC Disabled Command*/
1210 return 0;
1211 else if (ss_cmd == DIAG_GET_TIME_API)
1212 return 1;
1213 else if (ss_cmd == DIAG_SET_TIME_API)
1214 return 1;
1215 else if (ss_cmd == DIAG_SWITCH_COMMAND)
1216 return 1;
1217 else if (ss_cmd == DIAG_BUFFERING_MODE)
1218 return 1;
1219 break;
1220 case 0x13: /* DIAG_SUBSYS_FS */
1221 if ((ss_cmd == 0) || (ss_cmd == 0x1))
1222 return 1;
1223 break;
1224 default:
1225 return 0;
1226 }
1227 } else {
1228 switch (packet_id) {
1229 case 0x00: /* Version Number */
1230 case 0x0C: /* CDMA status packet */
1231 case 0x1C: /* Diag Version */
1232 case 0x1D: /* Time Stamp */
1233 case 0x60: /* Event Report Control */
1234 case 0x63: /* Status snapshot */
1235 case 0x73: /* Logging Configuration */
1236 case 0x7C: /* Extended build ID */
1237 case 0x7D: /* Extended Message configuration */
1238 case 0x81: /* Event get mask */
1239 case 0x82: /* Set the event mask */
1240 return 1;
1241 default:
1242 return 0;
1243 }
1244 }
1245 return 0;
1246}
1247
1248static void diag_md_session_init(void)
1249{
1250 int i;
1251
1252 mutex_init(&driver->md_session_lock);
1253 driver->md_session_mask = 0;
1254 driver->md_session_mode = DIAG_MD_NONE;
1255 for (i = 0; i < NUM_MD_SESSIONS; i++)
1256 driver->md_session_map[i] = NULL;
1257}
1258
1259static void diag_md_session_exit(void)
1260{
1261 int i;
1262 struct diag_md_session_t *session_info = NULL;
1263
1264 for (i = 0; i < NUM_MD_SESSIONS; i++) {
1265 if (driver->md_session_map[i]) {
1266 session_info = driver->md_session_map[i];
1267 diag_log_mask_free(session_info->log_mask);
1268 kfree(session_info->log_mask);
1269 session_info->log_mask = NULL;
1270 diag_msg_mask_free(session_info->msg_mask);
1271 kfree(session_info->msg_mask);
1272 session_info->msg_mask = NULL;
1273 diag_event_mask_free(session_info->event_mask);
1274 kfree(session_info->event_mask);
1275 session_info->event_mask = NULL;
1276 kfree(session_info);
1277 session_info = NULL;
1278 driver->md_session_map[i] = NULL;
1279 }
1280 }
1281 mutex_destroy(&driver->md_session_lock);
1282 driver->md_session_mask = 0;
1283 driver->md_session_mode = DIAG_MD_NONE;
1284}
1285
1286int diag_md_session_create(int mode, int peripheral_mask, int proc)
1287{
1288 int i;
1289 int err = 0;
1290 struct diag_md_session_t *new_session = NULL;
1291
1292 /*
1293 * If a session is running with a peripheral mask and a new session
1294 * request comes in with same peripheral mask value then return
1295 * invalid param
1296 */
1297 if (driver->md_session_mode == DIAG_MD_PERIPHERAL &&
1298 (driver->md_session_mask & peripheral_mask) != 0)
1299 return -EINVAL;
1300
1301 mutex_lock(&driver->md_session_lock);
1302 new_session = kzalloc(sizeof(struct diag_md_session_t), GFP_KERNEL);
1303 if (!new_session) {
1304 mutex_unlock(&driver->md_session_lock);
1305 return -ENOMEM;
1306 }
1307
1308 new_session->peripheral_mask = 0;
1309 new_session->pid = current->tgid;
1310 new_session->task = current;
1311
1312 new_session->log_mask = kzalloc(sizeof(struct diag_mask_info),
1313 GFP_KERNEL);
1314 if (!new_session->log_mask) {
1315 err = -ENOMEM;
1316 goto fail_peripheral;
1317 }
1318 new_session->event_mask = kzalloc(sizeof(struct diag_mask_info),
1319 GFP_KERNEL);
1320 if (!new_session->event_mask) {
1321 err = -ENOMEM;
1322 goto fail_peripheral;
1323 }
1324 new_session->msg_mask = kzalloc(sizeof(struct diag_mask_info),
1325 GFP_KERNEL);
1326 if (!new_session->msg_mask) {
1327 err = -ENOMEM;
1328 goto fail_peripheral;
1329 }
1330
1331 err = diag_log_mask_copy(new_session->log_mask, &log_mask);
1332 if (err) {
1333 DIAG_LOG(DIAG_DEBUG_USERSPACE,
1334 "return value of log copy. err %d\n", err);
1335 goto fail_peripheral;
1336 }
1337 err = diag_event_mask_copy(new_session->event_mask, &event_mask);
1338 if (err) {
1339 DIAG_LOG(DIAG_DEBUG_USERSPACE,
1340 "return value of event copy. err %d\n", err);
1341 goto fail_peripheral;
1342 }
1343 err = diag_msg_mask_copy(new_session->msg_mask, &msg_mask);
1344 if (err) {
1345 DIAG_LOG(DIAG_DEBUG_USERSPACE,
1346 "return value of msg copy. err %d\n", err);
1347 goto fail_peripheral;
1348 }
1349 for (i = 0; i < NUM_MD_SESSIONS; i++) {
1350 if ((MD_PERIPHERAL_MASK(i) & peripheral_mask) == 0)
1351 continue;
1352 if (driver->md_session_map[i] != NULL) {
1353 DIAG_LOG(DIAG_DEBUG_USERSPACE,
1354 "another instance present for %d\n", i);
1355 err = -EEXIST;
1356 goto fail_peripheral;
1357 }
1358 new_session->peripheral_mask |= MD_PERIPHERAL_MASK(i);
1359 driver->md_session_map[i] = new_session;
1360 driver->md_session_mask |= MD_PERIPHERAL_MASK(i);
1361 }
1362 setup_timer(&new_session->hdlc_reset_timer,
1363 diag_md_hdlc_reset_timer_func,
1364 new_session->pid);
1365
1366 driver->md_session_mode = DIAG_MD_PERIPHERAL;
1367 mutex_unlock(&driver->md_session_lock);
1368 DIAG_LOG(DIAG_DEBUG_USERSPACE,
1369 "created session in peripheral mode\n");
1370 return 0;
1371
1372fail_peripheral:
1373 diag_log_mask_free(new_session->log_mask);
1374 kfree(new_session->log_mask);
1375 new_session->log_mask = NULL;
1376 diag_event_mask_free(new_session->event_mask);
1377 kfree(new_session->event_mask);
1378 new_session->event_mask = NULL;
1379 diag_msg_mask_free(new_session->msg_mask);
1380 kfree(new_session->msg_mask);
1381 new_session->msg_mask = NULL;
1382 kfree(new_session);
1383 new_session = NULL;
1384 mutex_unlock(&driver->md_session_lock);
1385 return err;
1386}
1387
1388static void diag_md_session_close(struct diag_md_session_t *session_info)
1389{
1390 int i;
1391 uint8_t found = 0;
1392
1393 if (!session_info)
1394 return;
1395
1396 mutex_lock(&driver->md_session_lock);
1397 for (i = 0; i < NUM_MD_SESSIONS; i++) {
1398 if (driver->md_session_map[i] != session_info)
1399 continue;
1400 driver->md_session_map[i] = NULL;
1401 driver->md_session_mask &= ~session_info->peripheral_mask;
1402 }
1403 diag_log_mask_free(session_info->log_mask);
1404 kfree(session_info->log_mask);
1405 session_info->log_mask = NULL;
1406 diag_msg_mask_free(session_info->msg_mask);
1407 kfree(session_info->msg_mask);
1408 session_info->msg_mask = NULL;
1409 diag_event_mask_free(session_info->event_mask);
1410 kfree(session_info->event_mask);
1411 session_info->event_mask = NULL;
1412 del_timer(&session_info->hdlc_reset_timer);
1413
1414 for (i = 0; i < NUM_MD_SESSIONS && !found; i++) {
1415 if (driver->md_session_map[i] != NULL)
1416 found = 1;
1417 }
1418
1419 driver->md_session_mode = (found) ? DIAG_MD_PERIPHERAL : DIAG_MD_NONE;
1420 kfree(session_info);
1421 session_info = NULL;
1422 mutex_unlock(&driver->md_session_lock);
1423 DIAG_LOG(DIAG_DEBUG_USERSPACE, "cleared up session\n");
1424}
1425
1426struct diag_md_session_t *diag_md_session_get_pid(int pid)
1427{
1428 int i;
1429
1430 for (i = 0; i < NUM_MD_SESSIONS; i++) {
1431 if (driver->md_session_map[i] &&
1432 driver->md_session_map[i]->pid == pid)
1433 return driver->md_session_map[i];
1434 }
1435 return NULL;
1436}
1437
1438struct diag_md_session_t *diag_md_session_get_peripheral(uint8_t peripheral)
1439{
1440 if (peripheral >= NUM_MD_SESSIONS)
1441 return NULL;
1442 return driver->md_session_map[peripheral];
1443}
1444
1445static int diag_md_peripheral_switch(struct diag_md_session_t *session_info,
1446 int peripheral_mask, int req_mode) {
1447 int i, bit = 0;
1448
1449 if (!session_info)
1450 return -EINVAL;
1451 if (req_mode != DIAG_USB_MODE || req_mode != DIAG_MEMORY_DEVICE_MODE)
1452 return -EINVAL;
1453
1454 /*
1455 * check that md_session_map for i == session_info,
1456 * if not then race condition occurred and bail
1457 */
1458 mutex_lock(&driver->md_session_lock);
1459 for (i = 0; i < NUM_MD_SESSIONS; i++) {
1460 bit = MD_PERIPHERAL_MASK(i) & peripheral_mask;
1461 if (!bit)
1462 continue;
1463 if (req_mode == DIAG_USB_MODE) {
1464 if (driver->md_session_map[i] != session_info) {
1465 mutex_unlock(&driver->md_session_lock);
1466 return -EINVAL;
1467 }
1468 driver->md_session_map[i] = NULL;
1469 driver->md_session_mask &= ~bit;
1470 session_info->peripheral_mask &= ~bit;
1471
1472 } else {
1473 if (driver->md_session_map[i] != NULL) {
1474 mutex_unlock(&driver->md_session_lock);
1475 return -EINVAL;
1476 }
1477 driver->md_session_map[i] = session_info;
1478 driver->md_session_mask |= bit;
1479 session_info->peripheral_mask |= bit;
1480
1481 }
1482 }
1483
1484 driver->md_session_mode = DIAG_MD_PERIPHERAL;
1485 mutex_unlock(&driver->md_session_lock);
1486 DIAG_LOG(DIAG_DEBUG_USERSPACE, "Changed Peripherals:0x%x to mode:%d\n",
1487 peripheral_mask, req_mode);
1488}
1489
1490static int diag_md_session_check(int curr_mode, int req_mode,
1491 const struct diag_logging_mode_param_t *param,
1492 uint8_t *change_mode)
1493{
1494 int i, bit = 0, err = 0;
1495 int change_mask = 0;
1496 struct diag_md_session_t *session_info = NULL;
1497
1498 if (!param || !change_mode)
1499 return -EIO;
1500
1501 *change_mode = 0;
1502
1503 switch (curr_mode) {
1504 case DIAG_USB_MODE:
1505 case DIAG_MEMORY_DEVICE_MODE:
1506 case DIAG_MULTI_MODE:
1507 break;
1508 default:
1509 return -EINVAL;
1510 }
1511
1512 if (req_mode != DIAG_USB_MODE && req_mode != DIAG_MEMORY_DEVICE_MODE)
1513 return -EINVAL;
1514
1515 if (req_mode == DIAG_USB_MODE) {
1516 if (curr_mode == DIAG_USB_MODE)
1517 return 0;
1518 if (driver->md_session_mode == DIAG_MD_NONE
1519 && driver->md_session_mask == 0 && driver->logging_mask) {
1520 *change_mode = 1;
1521 return 0;
1522 }
1523
1524 /*
1525 * curr_mode is either DIAG_MULTI_MODE or DIAG_MD_MODE
1526 * Check if requested peripherals are already in usb mode
1527 */
1528 for (i = 0; i < NUM_MD_SESSIONS; i++) {
1529 bit = MD_PERIPHERAL_MASK(i) & param->peripheral_mask;
1530 if (!bit)
1531 continue;
1532 if (bit & driver->logging_mask)
1533 change_mask |= bit;
1534 }
1535 if (!change_mask)
1536 return 0;
1537
1538 /*
1539 * Change is needed. Check if this md_session has set all the
1540 * requested peripherals. If another md session set a requested
1541 * peripheral then we cannot switch that peripheral to USB.
1542 * If this session owns all the requested peripherals, then
1543 * call function to switch the modes/masks for the md_session
1544 */
1545 session_info = diag_md_session_get_pid(current->tgid);
1546 if (!session_info) {
1547 *change_mode = 1;
1548 return 0;
1549 }
1550 if ((change_mask & session_info->peripheral_mask)
1551 != change_mask) {
1552 DIAG_LOG(DIAG_DEBUG_USERSPACE,
1553 "Another MD Session owns a requested peripheral\n");
1554 return -EINVAL;
1555 }
1556 *change_mode = 1;
1557
1558 /* If all peripherals are being set to USB Mode, call close */
1559 if (~change_mask & session_info->peripheral_mask) {
1560 err = diag_md_peripheral_switch(session_info,
1561 change_mask, DIAG_USB_MODE);
1562 } else
1563 diag_md_session_close(session_info);
1564
1565 return err;
1566
1567 } else if (req_mode == DIAG_MEMORY_DEVICE_MODE) {
1568 /*
1569 * Get bit mask that represents what peripherals already have
1570 * been set. Check that requested peripherals already set are
1571 * owned by this md session
1572 */
1573 change_mask = driver->md_session_mask & param->peripheral_mask;
1574 session_info = diag_md_session_get_pid(current->tgid);
1575
1576 if (session_info) {
1577 if ((session_info->peripheral_mask & change_mask)
1578 != change_mask) {
1579 DIAG_LOG(DIAG_DEBUG_USERSPACE,
1580 "Another MD Session owns a requested peripheral\n");
1581 return -EINVAL;
1582 }
1583 err = diag_md_peripheral_switch(session_info,
1584 change_mask, DIAG_USB_MODE);
1585 } else {
1586 if (change_mask) {
1587 DIAG_LOG(DIAG_DEBUG_USERSPACE,
1588 "Another MD Session owns a requested peripheral\n");
1589 return -EINVAL;
1590 }
1591 err = diag_md_session_create(DIAG_MD_PERIPHERAL,
1592 param->peripheral_mask, DIAG_LOCAL_PROC);
1593 }
1594 *change_mode = 1;
1595 return err;
1596 }
1597 return -EINVAL;
1598}
1599
1600static uint32_t diag_translate_mask(uint32_t peripheral_mask)
1601{
1602 uint32_t ret = 0;
1603
1604 if (peripheral_mask & DIAG_CON_APSS)
1605 ret |= (1 << APPS_DATA);
1606 if (peripheral_mask & DIAG_CON_MPSS)
1607 ret |= (1 << PERIPHERAL_MODEM);
1608 if (peripheral_mask & DIAG_CON_LPASS)
1609 ret |= (1 << PERIPHERAL_LPASS);
1610 if (peripheral_mask & DIAG_CON_WCNSS)
1611 ret |= (1 << PERIPHERAL_WCNSS);
1612 if (peripheral_mask & DIAG_CON_SENSORS)
1613 ret |= (1 << PERIPHERAL_SENSORS);
1614 if (peripheral_mask & DIAG_CON_WDSP)
1615 ret |= (1 << PERIPHERAL_WDSP);
Sreelakshmi Gownipalli588a31d2016-11-02 13:33:43 -07001616 if (peripheral_mask & DIAG_CON_CDSP)
1617 ret |= (1 << PERIPHERAL_CDSP);
Manoj Prabhu B571cf422017-08-08 19:01:41 +05301618 if (peripheral_mask & DIAG_CON_UPD_WLAN)
1619 ret |= (1 << UPD_WLAN);
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001620
1621 return ret;
1622}
1623
1624static int diag_switch_logging(struct diag_logging_mode_param_t *param)
1625{
Manoj Prabhu B571cf422017-08-08 19:01:41 +05301626 int new_mode, i = 0;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001627 int curr_mode;
1628 int err = 0;
1629 uint8_t do_switch = 1;
1630 uint32_t peripheral_mask = 0;
Manoj Prabhu B571cf422017-08-08 19:01:41 +05301631 uint8_t peripheral, upd;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001632
1633 if (!param)
1634 return -EINVAL;
1635
1636 if (!param->peripheral_mask) {
1637 DIAG_LOG(DIAG_DEBUG_USERSPACE,
1638 "asking for mode switch with no peripheral mask set\n");
1639 return -EINVAL;
1640 }
1641
Manoj Prabhu B571cf422017-08-08 19:01:41 +05301642 if (param->pd_mask) {
1643 switch (param->pd_mask) {
1644 case DIAG_CON_UPD_WLAN:
1645 peripheral = PERIPHERAL_MODEM;
1646 upd = UPD_WLAN;
1647 break;
1648 default:
1649 DIAG_LOG(DIAG_DEBUG_USERSPACE,
1650 "asking for mode switch with no pd mask set\n");
1651 return -EINVAL;
1652 }
1653
1654 if (driver->md_session_map[peripheral] &&
1655 (MD_PERIPHERAL_MASK(peripheral) &
1656 diag_mux->mux_mask)) {
1657 DIAG_LOG(DIAG_DEBUG_USERSPACE,
1658 "diag_fr: User PD is already logging onto active peripheral logging\n");
1659 i = upd - UPD_WLAN;
1660 driver->pd_session_clear[i] = 0;
1661 return -EINVAL;
1662 }
1663 peripheral_mask =
1664 diag_translate_mask(param->pd_mask);
1665 param->peripheral_mask = peripheral_mask;
1666 i = upd - UPD_WLAN;
1667 if (!driver->pd_session_clear[i]) {
1668 driver->pd_logging_mode[i] = 1;
1669 driver->num_pd_session += 1;
1670 }
1671 driver->pd_session_clear[i] = 0;
1672 } else {
1673 peripheral_mask =
1674 diag_translate_mask(param->peripheral_mask);
1675 param->peripheral_mask = peripheral_mask;
1676 }
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001677
1678 switch (param->req_mode) {
1679 case CALLBACK_MODE:
1680 case UART_MODE:
1681 case SOCKET_MODE:
1682 case MEMORY_DEVICE_MODE:
1683 new_mode = DIAG_MEMORY_DEVICE_MODE;
1684 break;
1685 case USB_MODE:
1686 new_mode = DIAG_USB_MODE;
1687 break;
1688 default:
1689 pr_err("diag: In %s, request to switch to invalid mode: %d\n",
1690 __func__, param->req_mode);
1691 return -EINVAL;
1692 }
1693
1694 curr_mode = driver->logging_mode;
1695 DIAG_LOG(DIAG_DEBUG_USERSPACE,
Manoj Prabhu B571cf422017-08-08 19:01:41 +05301696 "request to switch logging from %d mask:%0x to new_mode %d mask:%0x\n",
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001697 curr_mode, driver->md_session_mask, new_mode, peripheral_mask);
1698
1699 err = diag_md_session_check(curr_mode, new_mode, param, &do_switch);
1700 if (err) {
1701 DIAG_LOG(DIAG_DEBUG_USERSPACE,
1702 "err from diag_md_session_check, err: %d\n", err);
1703 return err;
1704 }
1705
1706 if (do_switch == 0) {
1707 DIAG_LOG(DIAG_DEBUG_USERSPACE,
1708 "not switching modes c: %d n: %d\n",
1709 curr_mode, new_mode);
1710 return 0;
1711 }
1712
1713 diag_ws_reset(DIAG_WS_MUX);
1714 err = diag_mux_switch_logging(&new_mode, &peripheral_mask);
1715 if (err) {
1716 pr_err("diag: In %s, unable to switch mode from %d to %d, err: %d\n",
1717 __func__, curr_mode, new_mode, err);
1718 driver->logging_mode = curr_mode;
1719 goto fail;
1720 }
1721 driver->logging_mode = new_mode;
1722 driver->logging_mask = peripheral_mask;
1723 DIAG_LOG(DIAG_DEBUG_USERSPACE,
1724 "Switch logging to %d mask:%0x\n", new_mode, peripheral_mask);
1725
1726 /* Update to take peripheral_mask */
1727 if (new_mode != DIAG_MEMORY_DEVICE_MODE) {
1728 diag_update_real_time_vote(DIAG_PROC_MEMORY_DEVICE,
1729 MODE_REALTIME, ALL_PROC);
1730 } else {
1731 diag_update_proc_vote(DIAG_PROC_MEMORY_DEVICE, VOTE_UP,
1732 ALL_PROC);
1733 }
1734
1735 if (!(new_mode == DIAG_MEMORY_DEVICE_MODE &&
1736 curr_mode == DIAG_USB_MODE)) {
1737 queue_work(driver->diag_real_time_wq,
1738 &driver->diag_real_time_work);
1739 }
1740
1741 return 0;
1742fail:
1743 return err;
1744}
1745
1746static int diag_ioctl_dci_reg(unsigned long ioarg)
1747{
1748 int result = -EINVAL;
1749 struct diag_dci_reg_tbl_t dci_reg_params;
1750
1751 if (copy_from_user(&dci_reg_params, (void __user *)ioarg,
1752 sizeof(struct diag_dci_reg_tbl_t)))
1753 return -EFAULT;
1754
1755 result = diag_dci_register_client(&dci_reg_params);
1756
1757 return result;
1758}
1759
1760static int diag_ioctl_dci_health_stats(unsigned long ioarg)
1761{
1762 int result = -EINVAL;
1763 struct diag_dci_health_stats_proc stats;
1764
1765 if (copy_from_user(&stats, (void __user *)ioarg,
1766 sizeof(struct diag_dci_health_stats_proc)))
1767 return -EFAULT;
1768
1769 result = diag_dci_copy_health_stats(&stats);
1770 if (result == DIAG_DCI_NO_ERROR) {
1771 if (copy_to_user((void __user *)ioarg, &stats,
1772 sizeof(struct diag_dci_health_stats_proc)))
1773 return -EFAULT;
1774 }
1775
1776 return result;
1777}
1778
1779static int diag_ioctl_dci_log_status(unsigned long ioarg)
1780{
1781 struct diag_log_event_stats le_stats;
1782 struct diag_dci_client_tbl *dci_client = NULL;
1783
1784 if (copy_from_user(&le_stats, (void __user *)ioarg,
1785 sizeof(struct diag_log_event_stats)))
1786 return -EFAULT;
1787
1788 dci_client = diag_dci_get_client_entry(le_stats.client_id);
1789 if (!dci_client)
1790 return DIAG_DCI_NOT_SUPPORTED;
1791 le_stats.is_set = diag_dci_query_log_mask(dci_client, le_stats.code);
1792 if (copy_to_user((void __user *)ioarg, &le_stats,
1793 sizeof(struct diag_log_event_stats)))
1794 return -EFAULT;
1795
1796 return DIAG_DCI_NO_ERROR;
1797}
1798
1799static int diag_ioctl_dci_event_status(unsigned long ioarg)
1800{
1801 struct diag_log_event_stats le_stats;
1802 struct diag_dci_client_tbl *dci_client = NULL;
1803
1804 if (copy_from_user(&le_stats, (void __user *)ioarg,
1805 sizeof(struct diag_log_event_stats)))
1806 return -EFAULT;
1807
1808 dci_client = diag_dci_get_client_entry(le_stats.client_id);
1809 if (!dci_client)
1810 return DIAG_DCI_NOT_SUPPORTED;
1811
1812 le_stats.is_set = diag_dci_query_event_mask(dci_client, le_stats.code);
1813 if (copy_to_user((void __user *)ioarg, &le_stats,
1814 sizeof(struct diag_log_event_stats)))
1815 return -EFAULT;
1816
1817 return DIAG_DCI_NO_ERROR;
1818}
1819
1820static int diag_ioctl_lsm_deinit(void)
1821{
1822 int i;
1823
Mohit Aggarwal9f694302017-07-06 10:16:52 +05301824 mutex_lock(&driver->diagchar_mutex);
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001825 for (i = 0; i < driver->num_clients; i++)
1826 if (driver->client_map[i].pid == current->tgid)
1827 break;
1828
Mohit Aggarwal9f694302017-07-06 10:16:52 +05301829 if (i == driver->num_clients) {
1830 mutex_unlock(&driver->diagchar_mutex);
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001831 return -EINVAL;
Mohit Aggarwal9f694302017-07-06 10:16:52 +05301832 }
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001833
1834 driver->data_ready[i] |= DEINIT_TYPE;
Mohit Aggarwal9f694302017-07-06 10:16:52 +05301835 mutex_unlock(&driver->diagchar_mutex);
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001836 wake_up_interruptible(&driver->wait_q);
1837
1838 return 1;
1839}
1840
1841static int diag_ioctl_vote_real_time(unsigned long ioarg)
1842{
1843 int real_time = 0;
1844 int temp_proc = ALL_PROC;
1845 struct real_time_vote_t vote;
1846 struct diag_dci_client_tbl *dci_client = NULL;
1847
1848 if (copy_from_user(&vote, (void __user *)ioarg,
1849 sizeof(struct real_time_vote_t)))
1850 return -EFAULT;
1851
1852 if (vote.proc > DIAG_PROC_MEMORY_DEVICE ||
1853 vote.real_time_vote > MODE_UNKNOWN ||
1854 vote.client_id < 0) {
1855 pr_err("diag: %s, invalid params, proc: %d, vote: %d, client_id: %d\n",
1856 __func__, vote.proc, vote.real_time_vote,
1857 vote.client_id);
1858 return -EINVAL;
1859 }
1860
1861 driver->real_time_update_busy++;
1862 if (vote.proc == DIAG_PROC_DCI) {
1863 dci_client = diag_dci_get_client_entry(vote.client_id);
1864 if (!dci_client) {
1865 driver->real_time_update_busy--;
1866 return DIAG_DCI_NOT_SUPPORTED;
1867 }
1868 diag_dci_set_real_time(dci_client, vote.real_time_vote);
1869 real_time = diag_dci_get_cumulative_real_time(
1870 dci_client->client_info.token);
1871 diag_update_real_time_vote(vote.proc, real_time,
1872 dci_client->client_info.token);
1873 } else {
1874 real_time = vote.real_time_vote;
1875 temp_proc = vote.client_id;
1876 diag_update_real_time_vote(vote.proc, real_time,
1877 temp_proc);
1878 }
1879 queue_work(driver->diag_real_time_wq, &driver->diag_real_time_work);
1880 return 0;
1881}
1882
1883static int diag_ioctl_get_real_time(unsigned long ioarg)
1884{
1885 int i;
1886 int retry_count = 0;
1887 int timer = 0;
1888 struct real_time_query_t rt_query;
1889
1890 if (copy_from_user(&rt_query, (void __user *)ioarg,
1891 sizeof(struct real_time_query_t)))
1892 return -EFAULT;
1893 while (retry_count < 3) {
1894 if (driver->real_time_update_busy > 0) {
1895 retry_count++;
1896 /*
1897 * The value 10000 was chosen empirically as an
1898 * optimum value in order to give the work in
1899 * diag_real_time_wq to complete processing.
1900 */
1901 for (timer = 0; timer < 5; timer++)
1902 usleep_range(10000, 10100);
1903 } else {
1904 break;
1905 }
1906 }
1907
1908 if (driver->real_time_update_busy > 0)
1909 return -EAGAIN;
1910
1911 if (rt_query.proc < 0 || rt_query.proc >= DIAG_NUM_PROC) {
1912 pr_err("diag: Invalid proc %d in %s\n", rt_query.proc,
1913 __func__);
1914 return -EINVAL;
1915 }
1916 rt_query.real_time = driver->real_time_mode[rt_query.proc];
1917 /*
1918 * For the local processor, if any of the peripherals is in buffering
1919 * mode, overwrite the value of real time with UNKNOWN_MODE
1920 */
1921 if (rt_query.proc == DIAG_LOCAL_PROC) {
1922 for (i = 0; i < NUM_PERIPHERALS; i++) {
1923 if (!driver->feature[i].peripheral_buffering)
1924 continue;
1925 switch (driver->buffering_mode[i].mode) {
1926 case DIAG_BUFFERING_MODE_CIRCULAR:
1927 case DIAG_BUFFERING_MODE_THRESHOLD:
1928 rt_query.real_time = MODE_UNKNOWN;
1929 break;
1930 }
1931 }
1932 }
1933
1934 if (copy_to_user((void __user *)ioarg, &rt_query,
1935 sizeof(struct real_time_query_t)))
1936 return -EFAULT;
1937
1938 return 0;
1939}
1940
1941static int diag_ioctl_set_buffering_mode(unsigned long ioarg)
1942{
1943 struct diag_buffering_mode_t params;
1944
1945 if (copy_from_user(&params, (void __user *)ioarg, sizeof(params)))
1946 return -EFAULT;
1947
1948 if (params.peripheral >= NUM_PERIPHERALS)
1949 return -EINVAL;
1950
1951 mutex_lock(&driver->mode_lock);
1952 driver->buffering_flag[params.peripheral] = 1;
1953 mutex_unlock(&driver->mode_lock);
1954
1955 return diag_send_peripheral_buffering_mode(&params);
1956}
1957
1958static int diag_ioctl_peripheral_drain_immediate(unsigned long ioarg)
1959{
1960 uint8_t peripheral;
1961
1962 if (copy_from_user(&peripheral, (void __user *)ioarg, sizeof(uint8_t)))
1963 return -EFAULT;
1964
1965 if (peripheral >= NUM_PERIPHERALS) {
1966 pr_err("diag: In %s, invalid peripheral %d\n", __func__,
1967 peripheral);
1968 return -EINVAL;
1969 }
1970
1971 if (!driver->feature[peripheral].peripheral_buffering) {
1972 pr_err("diag: In %s, peripheral %d doesn't support buffering\n",
1973 __func__, peripheral);
1974 return -EIO;
1975 }
1976
1977 return diag_send_peripheral_drain_immediate(peripheral);
1978}
1979
1980static int diag_ioctl_dci_support(unsigned long ioarg)
1981{
1982 struct diag_dci_peripherals_t dci_support;
1983 int result = -EINVAL;
1984
1985 if (copy_from_user(&dci_support, (void __user *)ioarg,
1986 sizeof(struct diag_dci_peripherals_t)))
1987 return -EFAULT;
1988
1989 result = diag_dci_get_support_list(&dci_support);
1990 if (result == DIAG_DCI_NO_ERROR)
1991 if (copy_to_user((void __user *)ioarg, &dci_support,
1992 sizeof(struct diag_dci_peripherals_t)))
1993 return -EFAULT;
1994
1995 return result;
1996}
1997
1998static int diag_ioctl_hdlc_toggle(unsigned long ioarg)
1999{
2000 uint8_t hdlc_support;
2001 struct diag_md_session_t *session_info = NULL;
2002
2003 session_info = diag_md_session_get_pid(current->tgid);
2004 if (copy_from_user(&hdlc_support, (void __user *)ioarg,
2005 sizeof(uint8_t)))
2006 return -EFAULT;
2007 mutex_lock(&driver->hdlc_disable_mutex);
2008 if (session_info) {
2009 mutex_lock(&driver->md_session_lock);
2010 session_info->hdlc_disabled = hdlc_support;
2011 mutex_unlock(&driver->md_session_lock);
2012 } else
2013 driver->hdlc_disabled = hdlc_support;
2014 mutex_unlock(&driver->hdlc_disable_mutex);
2015 diag_update_md_clients(HDLC_SUPPORT_TYPE);
2016
2017 return 0;
2018}
2019
Manoj Prabhu B571cf422017-08-08 19:01:41 +05302020static int diag_ioctl_query_pd_logging(struct diag_logging_mode_param_t *param)
2021{
2022 int ret = -EINVAL;
2023 int peripheral;
2024 char *p_str = NULL;
2025
2026 if (!param)
2027 return -EINVAL;
2028
2029 if (!param->pd_mask) {
2030 DIAG_LOG(DIAG_DEBUG_USERSPACE,
2031 "query with no pd mask set, returning error\n");
2032 return -EINVAL;
2033 }
2034
2035 switch (param->pd_mask) {
2036 case DIAG_CON_UPD_WLAN:
2037 peripheral = PERIPHERAL_MODEM;
2038 p_str = "MODEM";
2039 break;
2040 default:
2041 DIAG_LOG(DIAG_DEBUG_USERSPACE,
2042 "Invalid pd mask, returning EINVAL\n");
2043 return -EINVAL;
2044 }
2045
2046 mutex_lock(&driver->diag_cntl_mutex);
2047 DIAG_LOG(DIAG_DEBUG_USERSPACE,
2048 "diag: %s: Untagging support on APPS is %s\n", __func__,
2049 ((driver->supports_apps_header_untagging) ?
2050 "present" : "absent"));
2051
2052 DIAG_LOG(DIAG_DEBUG_USERSPACE,
2053 "diag: %s: Tagging support on %s is %s\n",
2054 __func__, p_str,
2055 (driver->feature[peripheral].untag_header ?
2056 "present" : "absent"));
2057
2058 if (driver->supports_apps_header_untagging &&
2059 driver->feature[peripheral].untag_header)
2060 ret = 0;
2061
2062 mutex_unlock(&driver->diag_cntl_mutex);
2063 return ret;
2064}
2065
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07002066static int diag_ioctl_register_callback(unsigned long ioarg)
2067{
2068 int err = 0;
2069 struct diag_callback_reg_t reg;
2070
2071 if (copy_from_user(&reg, (void __user *)ioarg,
2072 sizeof(struct diag_callback_reg_t))) {
2073 return -EFAULT;
2074 }
2075
2076 if (reg.proc < 0 || reg.proc >= DIAG_NUM_PROC) {
2077 pr_err("diag: In %s, invalid proc %d for callback registration\n",
2078 __func__, reg.proc);
2079 return -EINVAL;
2080 }
2081
2082 if (driver->md_session_mode == DIAG_MD_PERIPHERAL)
2083 return -EIO;
2084
2085 return err;
2086}
2087
2088static int diag_cmd_register_tbl(struct diag_cmd_reg_tbl_t *reg_tbl)
2089{
2090 int i;
2091 int err = 0;
2092 uint32_t count = 0;
2093 struct diag_cmd_reg_entry_t *entries = NULL;
2094 const uint16_t entry_len = sizeof(struct diag_cmd_reg_entry_t);
2095
2096
2097 if (!reg_tbl) {
2098 pr_err("diag: In %s, invalid registration table\n", __func__);
2099 return -EINVAL;
2100 }
2101
2102 count = reg_tbl->count;
2103 if ((UINT_MAX / entry_len) < count) {
2104 pr_warn("diag: In %s, possbile integer overflow.\n", __func__);
2105 return -EFAULT;
2106 }
2107
2108 entries = kzalloc(count * entry_len, GFP_KERNEL);
2109 if (!entries)
2110 return -ENOMEM;
2111
2112
2113 err = copy_from_user(entries, reg_tbl->entries, count * entry_len);
2114 if (err) {
2115 pr_err("diag: In %s, error copying data from userspace, err: %d\n",
2116 __func__, err);
2117 kfree(entries);
2118 return -EFAULT;
2119 }
2120
2121 for (i = 0; i < count; i++) {
2122 err = diag_cmd_add_reg(&entries[i], APPS_DATA, current->tgid);
2123 if (err) {
2124 pr_err("diag: In %s, unable to register command, err: %d\n",
2125 __func__, err);
2126 break;
2127 }
2128 }
2129
2130 kfree(entries);
2131 return err;
2132}
2133
2134static int diag_ioctl_cmd_reg(unsigned long ioarg)
2135{
2136 struct diag_cmd_reg_tbl_t reg_tbl;
2137
2138 if (copy_from_user(&reg_tbl, (void __user *)ioarg,
2139 sizeof(struct diag_cmd_reg_tbl_t))) {
2140 return -EFAULT;
2141 }
2142
2143 return diag_cmd_register_tbl(&reg_tbl);
2144}
2145
2146static int diag_ioctl_cmd_dereg(void)
2147{
2148 diag_cmd_remove_reg_by_pid(current->tgid);
2149 return 0;
2150}
2151
2152#ifdef CONFIG_COMPAT
2153/*
2154 * @sync_obj_name: name of the synchronization object associated with this proc
2155 * @count: number of entries in the bind
2156 * @params: the actual packet registrations
2157 */
2158struct diag_cmd_reg_tbl_compat_t {
2159 char sync_obj_name[MAX_SYNC_OBJ_NAME_SIZE];
2160 uint32_t count;
2161 compat_uptr_t entries;
2162};
2163
2164static int diag_ioctl_cmd_reg_compat(unsigned long ioarg)
2165{
2166 struct diag_cmd_reg_tbl_compat_t reg_tbl_compat;
2167 struct diag_cmd_reg_tbl_t reg_tbl;
2168
2169 if (copy_from_user(&reg_tbl_compat, (void __user *)ioarg,
2170 sizeof(struct diag_cmd_reg_tbl_compat_t))) {
2171 return -EFAULT;
2172 }
2173
2174 strlcpy(reg_tbl.sync_obj_name, reg_tbl_compat.sync_obj_name,
2175 MAX_SYNC_OBJ_NAME_SIZE);
2176 reg_tbl.count = reg_tbl_compat.count;
2177 reg_tbl.entries = (struct diag_cmd_reg_entry_t *)
2178 (uintptr_t)reg_tbl_compat.entries;
2179
2180 return diag_cmd_register_tbl(&reg_tbl);
2181}
2182
2183long diagchar_compat_ioctl(struct file *filp,
2184 unsigned int iocmd, unsigned long ioarg)
2185{
2186 int result = -EINVAL;
2187 int client_id = 0;
2188 uint16_t delayed_rsp_id = 0;
2189 uint16_t remote_dev;
2190 struct diag_dci_client_tbl *dci_client = NULL;
2191 struct diag_logging_mode_param_t mode_param;
2192
2193 switch (iocmd) {
2194 case DIAG_IOCTL_COMMAND_REG:
2195 result = diag_ioctl_cmd_reg_compat(ioarg);
2196 break;
2197 case DIAG_IOCTL_COMMAND_DEREG:
2198 result = diag_ioctl_cmd_dereg();
2199 break;
2200 case DIAG_IOCTL_GET_DELAYED_RSP_ID:
2201 delayed_rsp_id = diag_get_next_delayed_rsp_id();
2202 if (copy_to_user((void __user *)ioarg, &delayed_rsp_id,
2203 sizeof(uint16_t)))
2204 result = -EFAULT;
2205 else
2206 result = 0;
2207 break;
2208 case DIAG_IOCTL_DCI_REG:
2209 result = diag_ioctl_dci_reg(ioarg);
2210 break;
2211 case DIAG_IOCTL_DCI_DEINIT:
2212 mutex_lock(&driver->dci_mutex);
2213 if (copy_from_user((void *)&client_id, (void __user *)ioarg,
2214 sizeof(int))) {
2215 mutex_unlock(&driver->dci_mutex);
2216 return -EFAULT;
2217 }
2218 dci_client = diag_dci_get_client_entry(client_id);
2219 if (!dci_client) {
2220 mutex_unlock(&driver->dci_mutex);
2221 return DIAG_DCI_NOT_SUPPORTED;
2222 }
2223 result = diag_dci_deinit_client(dci_client);
2224 mutex_unlock(&driver->dci_mutex);
2225 break;
2226 case DIAG_IOCTL_DCI_SUPPORT:
2227 result = diag_ioctl_dci_support(ioarg);
2228 break;
2229 case DIAG_IOCTL_DCI_HEALTH_STATS:
2230 mutex_lock(&driver->dci_mutex);
2231 result = diag_ioctl_dci_health_stats(ioarg);
2232 mutex_unlock(&driver->dci_mutex);
2233 break;
2234 case DIAG_IOCTL_DCI_LOG_STATUS:
2235 mutex_lock(&driver->dci_mutex);
2236 result = diag_ioctl_dci_log_status(ioarg);
2237 mutex_unlock(&driver->dci_mutex);
2238 break;
2239 case DIAG_IOCTL_DCI_EVENT_STATUS:
2240 mutex_lock(&driver->dci_mutex);
2241 result = diag_ioctl_dci_event_status(ioarg);
2242 mutex_unlock(&driver->dci_mutex);
2243 break;
2244 case DIAG_IOCTL_DCI_CLEAR_LOGS:
2245 mutex_lock(&driver->dci_mutex);
2246 if (copy_from_user((void *)&client_id, (void __user *)ioarg,
2247 sizeof(int))) {
2248 mutex_unlock(&driver->dci_mutex);
2249 return -EFAULT;
2250 }
2251 result = diag_dci_clear_log_mask(client_id);
2252 mutex_unlock(&driver->dci_mutex);
2253 break;
2254 case DIAG_IOCTL_DCI_CLEAR_EVENTS:
2255 mutex_lock(&driver->dci_mutex);
2256 if (copy_from_user(&client_id, (void __user *)ioarg,
2257 sizeof(int))) {
2258 mutex_unlock(&driver->dci_mutex);
2259 return -EFAULT;
2260 }
2261 result = diag_dci_clear_event_mask(client_id);
2262 mutex_unlock(&driver->dci_mutex);
2263 break;
2264 case DIAG_IOCTL_LSM_DEINIT:
2265 result = diag_ioctl_lsm_deinit();
2266 break;
2267 case DIAG_IOCTL_SWITCH_LOGGING:
2268 if (copy_from_user((void *)&mode_param, (void __user *)ioarg,
2269 sizeof(mode_param)))
2270 return -EFAULT;
2271 mutex_lock(&driver->diagchar_mutex);
2272 result = diag_switch_logging(&mode_param);
2273 mutex_unlock(&driver->diagchar_mutex);
2274 break;
2275 case DIAG_IOCTL_REMOTE_DEV:
2276 remote_dev = diag_get_remote_device_mask();
2277 if (copy_to_user((void __user *)ioarg, &remote_dev,
2278 sizeof(uint16_t)))
2279 result = -EFAULT;
2280 else
2281 result = 1;
2282 break;
2283 case DIAG_IOCTL_VOTE_REAL_TIME:
2284 mutex_lock(&driver->dci_mutex);
2285 result = diag_ioctl_vote_real_time(ioarg);
2286 mutex_unlock(&driver->dci_mutex);
2287 break;
2288 case DIAG_IOCTL_GET_REAL_TIME:
2289 result = diag_ioctl_get_real_time(ioarg);
2290 break;
2291 case DIAG_IOCTL_PERIPHERAL_BUF_CONFIG:
2292 result = diag_ioctl_set_buffering_mode(ioarg);
2293 break;
2294 case DIAG_IOCTL_PERIPHERAL_BUF_DRAIN:
2295 result = diag_ioctl_peripheral_drain_immediate(ioarg);
2296 break;
2297 case DIAG_IOCTL_REGISTER_CALLBACK:
2298 result = diag_ioctl_register_callback(ioarg);
2299 break;
2300 case DIAG_IOCTL_HDLC_TOGGLE:
2301 result = diag_ioctl_hdlc_toggle(ioarg);
2302 break;
Manoj Prabhu B571cf422017-08-08 19:01:41 +05302303 case DIAG_IOCTL_QUERY_PD_LOGGING:
2304 if (copy_from_user((void *)&mode_param, (void __user *)ioarg,
2305 sizeof(mode_param)))
2306 return -EFAULT;
2307 result = diag_ioctl_query_pd_logging(&mode_param);
2308 break;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07002309 }
2310 return result;
2311}
2312#endif
2313
2314long diagchar_ioctl(struct file *filp,
2315 unsigned int iocmd, unsigned long ioarg)
2316{
2317 int result = -EINVAL;
2318 int client_id = 0;
2319 uint16_t delayed_rsp_id;
2320 uint16_t remote_dev;
2321 struct diag_dci_client_tbl *dci_client = NULL;
2322 struct diag_logging_mode_param_t mode_param;
2323
2324 switch (iocmd) {
2325 case DIAG_IOCTL_COMMAND_REG:
2326 result = diag_ioctl_cmd_reg(ioarg);
2327 break;
2328 case DIAG_IOCTL_COMMAND_DEREG:
2329 result = diag_ioctl_cmd_dereg();
2330 break;
2331 case DIAG_IOCTL_GET_DELAYED_RSP_ID:
2332 delayed_rsp_id = diag_get_next_delayed_rsp_id();
2333 if (copy_to_user((void __user *)ioarg, &delayed_rsp_id,
2334 sizeof(uint16_t)))
2335 result = -EFAULT;
2336 else
2337 result = 0;
2338 break;
2339 case DIAG_IOCTL_DCI_REG:
2340 result = diag_ioctl_dci_reg(ioarg);
2341 break;
2342 case DIAG_IOCTL_DCI_DEINIT:
2343 mutex_lock(&driver->dci_mutex);
2344 if (copy_from_user((void *)&client_id, (void __user *)ioarg,
2345 sizeof(int))) {
2346 mutex_unlock(&driver->dci_mutex);
2347 return -EFAULT;
2348 }
2349 dci_client = diag_dci_get_client_entry(client_id);
2350 if (!dci_client) {
2351 mutex_unlock(&driver->dci_mutex);
2352 return DIAG_DCI_NOT_SUPPORTED;
2353 }
2354 result = diag_dci_deinit_client(dci_client);
2355 mutex_unlock(&driver->dci_mutex);
2356 break;
2357 case DIAG_IOCTL_DCI_SUPPORT:
2358 result = diag_ioctl_dci_support(ioarg);
2359 break;
2360 case DIAG_IOCTL_DCI_HEALTH_STATS:
2361 mutex_lock(&driver->dci_mutex);
2362 result = diag_ioctl_dci_health_stats(ioarg);
2363 mutex_unlock(&driver->dci_mutex);
2364 break;
2365 case DIAG_IOCTL_DCI_LOG_STATUS:
2366 mutex_lock(&driver->dci_mutex);
2367 result = diag_ioctl_dci_log_status(ioarg);
2368 mutex_unlock(&driver->dci_mutex);
2369 break;
2370 case DIAG_IOCTL_DCI_EVENT_STATUS:
Mohit Aggarwal91199ae2017-04-22 10:49:18 +05302371 mutex_lock(&driver->dci_mutex);
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07002372 result = diag_ioctl_dci_event_status(ioarg);
Mohit Aggarwal91199ae2017-04-22 10:49:18 +05302373 mutex_unlock(&driver->dci_mutex);
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07002374 break;
2375 case DIAG_IOCTL_DCI_CLEAR_LOGS:
2376 mutex_lock(&driver->dci_mutex);
2377 if (copy_from_user((void *)&client_id, (void __user *)ioarg,
2378 sizeof(int))) {
2379 mutex_unlock(&driver->dci_mutex);
2380 return -EFAULT;
2381 }
2382 result = diag_dci_clear_log_mask(client_id);
2383 mutex_unlock(&driver->dci_mutex);
2384 break;
2385 case DIAG_IOCTL_DCI_CLEAR_EVENTS:
2386 mutex_lock(&driver->dci_mutex);
2387 if (copy_from_user(&client_id, (void __user *)ioarg,
2388 sizeof(int))) {
2389 mutex_unlock(&driver->dci_mutex);
2390 return -EFAULT;
2391 }
2392 result = diag_dci_clear_event_mask(client_id);
2393 mutex_unlock(&driver->dci_mutex);
2394 break;
2395 case DIAG_IOCTL_LSM_DEINIT:
2396 result = diag_ioctl_lsm_deinit();
2397 break;
2398 case DIAG_IOCTL_SWITCH_LOGGING:
2399 if (copy_from_user((void *)&mode_param, (void __user *)ioarg,
2400 sizeof(mode_param)))
2401 return -EFAULT;
2402 mutex_lock(&driver->diagchar_mutex);
2403 result = diag_switch_logging(&mode_param);
2404 mutex_unlock(&driver->diagchar_mutex);
2405 break;
2406 case DIAG_IOCTL_REMOTE_DEV:
2407 remote_dev = diag_get_remote_device_mask();
2408 if (copy_to_user((void __user *)ioarg, &remote_dev,
2409 sizeof(uint16_t)))
2410 result = -EFAULT;
2411 else
2412 result = 1;
2413 break;
2414 case DIAG_IOCTL_VOTE_REAL_TIME:
2415 mutex_lock(&driver->dci_mutex);
2416 result = diag_ioctl_vote_real_time(ioarg);
2417 mutex_unlock(&driver->dci_mutex);
2418 break;
2419 case DIAG_IOCTL_GET_REAL_TIME:
2420 result = diag_ioctl_get_real_time(ioarg);
2421 break;
2422 case DIAG_IOCTL_PERIPHERAL_BUF_CONFIG:
2423 result = diag_ioctl_set_buffering_mode(ioarg);
2424 break;
2425 case DIAG_IOCTL_PERIPHERAL_BUF_DRAIN:
2426 result = diag_ioctl_peripheral_drain_immediate(ioarg);
2427 break;
2428 case DIAG_IOCTL_REGISTER_CALLBACK:
2429 result = diag_ioctl_register_callback(ioarg);
2430 break;
2431 case DIAG_IOCTL_HDLC_TOGGLE:
2432 result = diag_ioctl_hdlc_toggle(ioarg);
2433 break;
Manoj Prabhu B571cf422017-08-08 19:01:41 +05302434 case DIAG_IOCTL_QUERY_PD_LOGGING:
2435 if (copy_from_user((void *)&mode_param, (void __user *)ioarg,
2436 sizeof(mode_param)))
2437 return -EFAULT;
2438 result = diag_ioctl_query_pd_logging(&mode_param);
2439 break;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07002440 }
2441 return result;
2442}
2443
2444static int diag_process_apps_data_hdlc(unsigned char *buf, int len,
2445 int pkt_type)
2446{
2447 int err = 0;
2448 int ret = PKT_DROP;
2449 struct diag_apps_data_t *data = &hdlc_data;
2450 struct diag_send_desc_type send = { NULL, NULL, DIAG_STATE_START, 0 };
2451 struct diag_hdlc_dest_type enc = { NULL, NULL, 0 };
2452 /*
2453 * The maximum encoded size of the buffer can be atmost twice the length
2454 * of the packet. Add three bytes foe footer - 16 bit CRC (2 bytes) +
2455 * delimiter (1 byte).
2456 */
2457 const uint32_t max_encoded_size = ((2 * len) + 3);
2458
2459 if (!buf || len <= 0) {
2460 pr_err("diag: In %s, invalid buf: %pK len: %d\n",
2461 __func__, buf, len);
2462 return -EIO;
2463 }
2464
2465 if (max_encoded_size > DIAG_MAX_HDLC_BUF_SIZE) {
2466 pr_err_ratelimited("diag: In %s, encoded data is larger %d than the buffer size %d\n",
2467 __func__, max_encoded_size, DIAG_MAX_HDLC_BUF_SIZE);
2468 return -EBADMSG;
2469 }
2470
2471 send.state = DIAG_STATE_START;
2472 send.pkt = buf;
2473 send.last = (void *)(buf + len - 1);
2474 send.terminate = 1;
2475
2476 if (!data->buf)
2477 data->buf = diagmem_alloc(driver, DIAG_MAX_HDLC_BUF_SIZE +
2478 APF_DIAG_PADDING,
2479 POOL_TYPE_HDLC);
2480 if (!data->buf) {
2481 ret = PKT_DROP;
2482 goto fail_ret;
2483 }
2484
2485 if ((DIAG_MAX_HDLC_BUF_SIZE - data->len) <= max_encoded_size) {
2486 err = diag_mux_write(DIAG_LOCAL_PROC, data->buf, data->len,
2487 data->ctxt);
2488 if (err) {
2489 ret = -EIO;
2490 goto fail_free_buf;
2491 }
2492 data->buf = NULL;
2493 data->len = 0;
2494 data->buf = diagmem_alloc(driver, DIAG_MAX_HDLC_BUF_SIZE +
2495 APF_DIAG_PADDING,
2496 POOL_TYPE_HDLC);
2497 if (!data->buf) {
2498 ret = PKT_DROP;
2499 goto fail_ret;
2500 }
2501 }
2502
2503 enc.dest = data->buf + data->len;
2504 enc.dest_last = (void *)(data->buf + data->len + max_encoded_size);
2505 diag_hdlc_encode(&send, &enc);
2506
2507 /*
2508 * This is to check if after HDLC encoding, we are still within
2509 * the limits of aggregation buffer. If not, we write out the
2510 * current buffer and start aggregation in a newly allocated
2511 * buffer.
2512 */
2513 if ((uintptr_t)enc.dest >= (uintptr_t)(data->buf +
2514 DIAG_MAX_HDLC_BUF_SIZE)) {
2515 err = diag_mux_write(DIAG_LOCAL_PROC, data->buf, data->len,
2516 data->ctxt);
2517 if (err) {
2518 ret = -EIO;
2519 goto fail_free_buf;
2520 }
2521 data->buf = NULL;
2522 data->len = 0;
2523 data->buf = diagmem_alloc(driver, DIAG_MAX_HDLC_BUF_SIZE +
2524 APF_DIAG_PADDING,
2525 POOL_TYPE_HDLC);
2526 if (!data->buf) {
2527 ret = PKT_DROP;
2528 goto fail_ret;
2529 }
2530
2531 enc.dest = data->buf + data->len;
2532 enc.dest_last = (void *)(data->buf + data->len +
2533 max_encoded_size);
2534 diag_hdlc_encode(&send, &enc);
2535 }
2536
2537 data->len = (((uintptr_t)enc.dest - (uintptr_t)data->buf) <
2538 DIAG_MAX_HDLC_BUF_SIZE) ?
2539 ((uintptr_t)enc.dest - (uintptr_t)data->buf) :
2540 DIAG_MAX_HDLC_BUF_SIZE;
2541
2542 if (pkt_type == DATA_TYPE_RESPONSE) {
2543 err = diag_mux_write(DIAG_LOCAL_PROC, data->buf, data->len,
2544 data->ctxt);
2545 if (err) {
2546 ret = -EIO;
2547 goto fail_free_buf;
2548 }
2549 data->buf = NULL;
2550 data->len = 0;
2551 }
2552
2553 return PKT_ALLOC;
2554
2555fail_free_buf:
2556 diagmem_free(driver, data->buf, POOL_TYPE_HDLC);
2557 data->buf = NULL;
2558 data->len = 0;
2559
2560fail_ret:
2561 return ret;
2562}
2563
2564static int diag_process_apps_data_non_hdlc(unsigned char *buf, int len,
2565 int pkt_type)
2566{
2567 int err = 0;
2568 int ret = PKT_DROP;
2569 struct diag_pkt_frame_t header;
2570 struct diag_apps_data_t *data = &non_hdlc_data;
2571 /*
2572 * The maximum packet size, when the data is non hdlc encoded is equal
2573 * to the size of the packet frame header and the length. Add 1 for the
2574 * delimiter 0x7E at the end.
2575 */
2576 const uint32_t max_pkt_size = sizeof(header) + len + 1;
2577
2578 if (!buf || len <= 0) {
2579 pr_err("diag: In %s, invalid buf: %pK len: %d\n",
2580 __func__, buf, len);
2581 return -EIO;
2582 }
2583
2584 if (!data->buf) {
2585 data->buf = diagmem_alloc(driver, DIAG_MAX_HDLC_BUF_SIZE +
2586 APF_DIAG_PADDING,
2587 POOL_TYPE_HDLC);
2588 if (!data->buf) {
2589 ret = PKT_DROP;
2590 goto fail_ret;
2591 }
2592 }
2593
2594 if ((DIAG_MAX_HDLC_BUF_SIZE - data->len) <= max_pkt_size) {
2595 err = diag_mux_write(DIAG_LOCAL_PROC, data->buf, data->len,
2596 data->ctxt);
2597 if (err) {
2598 ret = -EIO;
2599 goto fail_free_buf;
2600 }
2601 data->buf = NULL;
2602 data->len = 0;
2603 data->buf = diagmem_alloc(driver, DIAG_MAX_HDLC_BUF_SIZE +
2604 APF_DIAG_PADDING,
2605 POOL_TYPE_HDLC);
2606 if (!data->buf) {
2607 ret = PKT_DROP;
2608 goto fail_ret;
2609 }
2610 }
2611
2612 header.start = CONTROL_CHAR;
2613 header.version = 1;
2614 header.length = len;
2615 memcpy(data->buf + data->len, &header, sizeof(header));
2616 data->len += sizeof(header);
2617 memcpy(data->buf + data->len, buf, len);
2618 data->len += len;
2619 *(uint8_t *)(data->buf + data->len) = CONTROL_CHAR;
2620 data->len += sizeof(uint8_t);
2621 if (pkt_type == DATA_TYPE_RESPONSE) {
2622 err = diag_mux_write(DIAG_LOCAL_PROC, data->buf, data->len,
2623 data->ctxt);
2624 if (err) {
2625 ret = -EIO;
2626 goto fail_free_buf;
2627 }
2628 data->buf = NULL;
2629 data->len = 0;
2630 }
2631
2632 return PKT_ALLOC;
2633
2634fail_free_buf:
2635 diagmem_free(driver, data->buf, POOL_TYPE_HDLC);
2636 data->buf = NULL;
2637 data->len = 0;
2638
2639fail_ret:
2640 return ret;
2641}
2642
2643static int diag_user_process_dci_data(const char __user *buf, int len)
2644{
2645 int err = 0;
2646 const int mempool = POOL_TYPE_USER;
2647 unsigned char *user_space_data = NULL;
2648
2649 if (!buf || len <= 0 || len > diag_mempools[mempool].itemsize) {
2650 pr_err_ratelimited("diag: In %s, invalid buf %pK len: %d\n",
2651 __func__, buf, len);
2652 return -EBADMSG;
2653 }
2654
2655 user_space_data = diagmem_alloc(driver, len, mempool);
2656 if (!user_space_data)
2657 return -ENOMEM;
2658
2659 err = copy_from_user(user_space_data, buf, len);
2660 if (err) {
2661 pr_err_ratelimited("diag: In %s, unable to copy data from userspace, err: %d\n",
2662 __func__, err);
2663 err = DIAG_DCI_SEND_DATA_FAIL;
2664 goto fail;
2665 }
2666
2667 err = diag_process_dci_transaction(user_space_data, len);
2668fail:
2669 diagmem_free(driver, user_space_data, mempool);
2670 user_space_data = NULL;
2671 return err;
2672}
2673
2674static int diag_user_process_dci_apps_data(const char __user *buf, int len,
2675 int pkt_type)
2676{
2677 int err = 0;
2678 const int mempool = POOL_TYPE_COPY;
2679 unsigned char *user_space_data = NULL;
2680
2681 if (!buf || len <= 0 || len > diag_mempools[mempool].itemsize) {
2682 pr_err_ratelimited("diag: In %s, invalid buf %pK len: %d\n",
2683 __func__, buf, len);
2684 return -EBADMSG;
2685 }
2686
2687 pkt_type &= (DCI_PKT_TYPE | DATA_TYPE_DCI_LOG | DATA_TYPE_DCI_EVENT);
2688 if (!pkt_type) {
2689 pr_err_ratelimited("diag: In %s, invalid pkt_type: %d\n",
2690 __func__, pkt_type);
2691 return -EBADMSG;
2692 }
2693
2694 user_space_data = diagmem_alloc(driver, len, mempool);
2695 if (!user_space_data)
2696 return -ENOMEM;
2697
2698 err = copy_from_user(user_space_data, buf, len);
2699 if (err) {
2700 pr_alert("diag: In %s, unable to copy data from userspace, err: %d\n",
2701 __func__, err);
2702 goto fail;
2703 }
2704
2705 diag_process_apps_dci_read_data(pkt_type, user_space_data, len);
2706fail:
2707 diagmem_free(driver, user_space_data, mempool);
2708 user_space_data = NULL;
2709 return err;
2710}
2711
2712static int diag_user_process_raw_data(const char __user *buf, int len)
2713{
2714 int err = 0;
2715 int ret = 0;
2716 int token_offset = 0;
2717 int remote_proc = 0;
2718 const int mempool = POOL_TYPE_COPY;
2719 unsigned char *user_space_data = NULL;
2720 struct diag_md_session_t *info = NULL;
2721
2722 if (!buf || len <= 0 || len > CALLBACK_BUF_SIZE) {
2723 pr_err_ratelimited("diag: In %s, invalid buf %pK len: %d\n",
2724 __func__, buf, len);
2725 return -EBADMSG;
2726 }
2727
2728 user_space_data = diagmem_alloc(driver, len, mempool);
2729 if (!user_space_data)
2730 return -ENOMEM;
2731
2732 err = copy_from_user(user_space_data, buf, len);
2733 if (err) {
2734 pr_err("diag: copy failed for user space data\n");
2735 goto fail;
2736 }
2737
2738 /* Check for proc_type */
2739 remote_proc = diag_get_remote(*(int *)user_space_data);
2740 if (remote_proc) {
2741 token_offset = sizeof(int);
2742 if (len <= MIN_SIZ_ALLOW) {
2743 pr_err("diag: In %s, possible integer underflow, payload size: %d\n",
2744 __func__, len);
2745 diagmem_free(driver, user_space_data, mempool);
2746 user_space_data = NULL;
2747 return -EBADMSG;
2748 }
2749 len -= sizeof(int);
2750 }
2751 if (driver->mask_check) {
2752 if (!mask_request_validate(user_space_data +
2753 token_offset)) {
2754 pr_alert("diag: mask request Invalid\n");
2755 diagmem_free(driver, user_space_data, mempool);
2756 user_space_data = NULL;
2757 return -EFAULT;
2758 }
2759 }
2760 if (remote_proc) {
2761 ret = diag_send_raw_data_remote(remote_proc,
2762 (void *)(user_space_data + token_offset),
2763 len, USER_SPACE_RAW_DATA);
2764 if (ret) {
2765 pr_err("diag: Error sending data to remote proc %d, err: %d\n",
2766 remote_proc, ret);
2767 }
2768 } else {
2769 wait_event_interruptible(driver->wait_q,
2770 (driver->in_busy_pktdata == 0));
2771 info = diag_md_session_get_pid(current->tgid);
2772 ret = diag_process_apps_pkt(user_space_data, len, info);
2773 if (ret == 1)
2774 diag_send_error_rsp((void *)(user_space_data), len);
2775 }
2776fail:
2777 diagmem_free(driver, user_space_data, mempool);
2778 user_space_data = NULL;
2779 return ret;
2780}
2781
2782static int diag_user_process_userspace_data(const char __user *buf, int len)
2783{
2784 int err = 0;
2785 int max_retries = 3;
2786 int retry_count = 0;
2787 int remote_proc = 0;
2788 int token_offset = 0;
2789 struct diag_md_session_t *session_info = NULL;
2790 uint8_t hdlc_disabled;
2791
2792 if (!buf || len <= 0 || len > USER_SPACE_DATA) {
2793 pr_err_ratelimited("diag: In %s, invalid buf %pK len: %d\n",
2794 __func__, buf, len);
2795 return -EBADMSG;
2796 }
2797
2798 do {
2799 if (!driver->user_space_data_busy)
2800 break;
2801 retry_count++;
2802 usleep_range(10000, 10100);
2803 } while (retry_count < max_retries);
2804
2805 if (driver->user_space_data_busy)
2806 return -EAGAIN;
2807
2808 err = copy_from_user(driver->user_space_data_buf, buf, len);
2809 if (err) {
2810 pr_err("diag: In %s, failed to copy data from userspace, err: %d\n",
2811 __func__, err);
2812 return -EIO;
2813 }
2814
2815 /* Check for proc_type */
2816 remote_proc = diag_get_remote(*(int *)driver->user_space_data_buf);
2817 if (remote_proc) {
2818 if (len <= MIN_SIZ_ALLOW) {
2819 pr_err("diag: Integer underflow in %s, payload size: %d",
2820 __func__, len);
2821 return -EBADMSG;
2822 }
2823 token_offset = sizeof(int);
2824 len -= sizeof(int);
2825 }
2826
2827 /* Check masks for On-Device logging */
2828 if (driver->mask_check) {
2829 if (!mask_request_validate(driver->user_space_data_buf +
2830 token_offset)) {
2831 pr_alert("diag: mask request Invalid\n");
2832 return -EFAULT;
2833 }
2834 }
2835
2836 /* send masks to local processor now */
2837 if (!remote_proc) {
2838 session_info = diag_md_session_get_pid(current->tgid);
2839 if (!session_info) {
2840 pr_err("diag:In %s request came from invalid md session pid:%d",
2841 __func__, current->tgid);
2842 return -EINVAL;
2843 }
2844 if (session_info)
2845 hdlc_disabled = session_info->hdlc_disabled;
2846 else
2847 hdlc_disabled = driver->hdlc_disabled;
2848 if (!hdlc_disabled)
2849 diag_process_hdlc_pkt((void *)
2850 (driver->user_space_data_buf),
2851 len, session_info);
2852 else
2853 diag_process_non_hdlc_pkt((char *)
2854 (driver->user_space_data_buf),
2855 len, session_info);
2856 return 0;
2857 }
2858
2859 err = diag_process_userspace_remote(remote_proc,
2860 driver->user_space_data_buf +
2861 token_offset, len);
2862 if (err) {
2863 driver->user_space_data_busy = 0;
2864 pr_err("diag: Error sending mask to remote proc %d, err: %d\n",
2865 remote_proc, err);
2866 }
2867
2868 return err;
2869}
2870
2871static int diag_user_process_apps_data(const char __user *buf, int len,
2872 int pkt_type)
2873{
2874 int ret = 0;
2875 int stm_size = 0;
2876 const int mempool = POOL_TYPE_COPY;
2877 unsigned char *user_space_data = NULL;
2878 struct diag_md_session_t *session_info = NULL;
2879 uint8_t hdlc_disabled;
2880
2881 if (!buf || len <= 0 || len > DIAG_MAX_RSP_SIZE) {
2882 pr_err_ratelimited("diag: In %s, invalid buf %pK len: %d\n",
2883 __func__, buf, len);
2884 return -EBADMSG;
2885 }
2886
2887 switch (pkt_type) {
2888 case DATA_TYPE_EVENT:
2889 case DATA_TYPE_F3:
2890 case DATA_TYPE_LOG:
2891 case DATA_TYPE_RESPONSE:
2892 case DATA_TYPE_DELAYED_RESPONSE:
2893 break;
2894 default:
2895 pr_err_ratelimited("diag: In %s, invalid pkt_type: %d\n",
2896 __func__, pkt_type);
2897 return -EBADMSG;
2898 }
2899
2900 user_space_data = diagmem_alloc(driver, len, mempool);
2901 if (!user_space_data) {
2902 diag_record_stats(pkt_type, PKT_DROP);
2903 return -ENOMEM;
2904 }
2905
2906 ret = copy_from_user(user_space_data, buf, len);
2907 if (ret) {
2908 pr_alert("diag: In %s, unable to copy data from userspace, err: %d\n",
2909 __func__, ret);
2910 diagmem_free(driver, user_space_data, mempool);
2911 user_space_data = NULL;
2912 diag_record_stats(pkt_type, PKT_DROP);
2913 return -EBADMSG;
2914 }
2915
2916 if (driver->stm_state[APPS_DATA] &&
2917 (pkt_type >= DATA_TYPE_EVENT) && (pkt_type <= DATA_TYPE_LOG)) {
2918 stm_size = stm_log_inv_ts(OST_ENTITY_DIAG, 0, user_space_data,
2919 len);
2920 if (stm_size == 0) {
2921 pr_debug("diag: In %s, stm_log_inv_ts returned size of 0\n",
2922 __func__);
2923 }
2924 diagmem_free(driver, user_space_data, mempool);
2925 user_space_data = NULL;
2926
2927 return 0;
2928 }
2929
2930 mutex_lock(&apps_data_mutex);
2931 mutex_lock(&driver->hdlc_disable_mutex);
2932 session_info = diag_md_session_get_peripheral(APPS_DATA);
2933 if (session_info)
2934 hdlc_disabled = session_info->hdlc_disabled;
2935 else
2936 hdlc_disabled = driver->hdlc_disabled;
2937 if (hdlc_disabled)
2938 ret = diag_process_apps_data_non_hdlc(user_space_data, len,
2939 pkt_type);
2940 else
2941 ret = diag_process_apps_data_hdlc(user_space_data, len,
2942 pkt_type);
2943 mutex_unlock(&driver->hdlc_disable_mutex);
2944 mutex_unlock(&apps_data_mutex);
2945
2946 diagmem_free(driver, user_space_data, mempool);
2947 user_space_data = NULL;
2948
2949 check_drain_timer();
2950
2951 if (ret == PKT_DROP)
2952 diag_record_stats(pkt_type, PKT_DROP);
2953 else if (ret == PKT_ALLOC)
2954 diag_record_stats(pkt_type, PKT_ALLOC);
2955 else
2956 return ret;
2957
2958 return 0;
2959}
2960
Mohit Aggarwal99a06732017-07-28 15:40:27 +05302961static int check_data_ready(int index)
2962{
2963 int data_type = 0;
2964
2965 mutex_lock(&driver->diagchar_mutex);
2966 data_type = driver->data_ready[index];
2967 mutex_unlock(&driver->diagchar_mutex);
2968 return data_type;
2969}
2970
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07002971static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
2972 loff_t *ppos)
2973{
2974 struct diag_dci_client_tbl *entry;
2975 struct list_head *start, *temp;
2976 int index = -1, i = 0, ret = 0;
2977 int data_type;
2978 int copy_dci_data = 0;
2979 int exit_stat = 0;
2980 int write_len = 0;
2981 struct diag_md_session_t *session_info = NULL;
2982
Mohit Aggarwal99a06732017-07-28 15:40:27 +05302983 mutex_lock(&driver->diagchar_mutex);
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07002984 for (i = 0; i < driver->num_clients; i++)
2985 if (driver->client_map[i].pid == current->tgid)
2986 index = i;
Mohit Aggarwal99a06732017-07-28 15:40:27 +05302987 mutex_unlock(&driver->diagchar_mutex);
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07002988
2989 if (index == -1) {
2990 pr_err("diag: Client PID not found in table");
2991 return -EINVAL;
2992 }
2993 if (!buf) {
2994 pr_err("diag: bad address from user side\n");
2995 return -EFAULT;
2996 }
Mohit Aggarwal99a06732017-07-28 15:40:27 +05302997 wait_event_interruptible(driver->wait_q, (check_data_ready(index)) > 0);
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07002998
2999 mutex_lock(&driver->diagchar_mutex);
3000
3001 if ((driver->data_ready[index] & USER_SPACE_DATA_TYPE) &&
3002 (driver->logging_mode == DIAG_MEMORY_DEVICE_MODE ||
3003 driver->logging_mode == DIAG_MULTI_MODE)) {
3004 pr_debug("diag: process woken up\n");
3005 /*Copy the type of data being passed*/
3006 data_type = driver->data_ready[index] & USER_SPACE_DATA_TYPE;
3007 driver->data_ready[index] ^= USER_SPACE_DATA_TYPE;
3008 COPY_USER_SPACE_OR_ERR(buf, data_type, sizeof(int));
3009 if (ret == -EFAULT)
3010 goto exit;
3011 /* place holder for number of data field */
3012 ret += sizeof(int);
3013 session_info = diag_md_session_get_pid(current->tgid);
3014 exit_stat = diag_md_copy_to_user(buf, &ret, count,
3015 session_info);
3016 goto exit;
3017 } else if (driver->data_ready[index] & USER_SPACE_DATA_TYPE) {
3018 /* In case, the thread wakes up and the logging mode is not
3019 * memory device any more, the condition needs to be cleared.
3020 */
3021 driver->data_ready[index] ^= USER_SPACE_DATA_TYPE;
3022 }
3023
3024 if (driver->data_ready[index] & HDLC_SUPPORT_TYPE) {
3025 data_type = driver->data_ready[index] & HDLC_SUPPORT_TYPE;
3026 driver->data_ready[index] ^= HDLC_SUPPORT_TYPE;
3027 COPY_USER_SPACE_OR_ERR(buf, data_type, sizeof(int));
3028 if (ret == -EFAULT)
3029 goto exit;
3030
3031 session_info = diag_md_session_get_pid(current->tgid);
3032 if (session_info) {
3033 COPY_USER_SPACE_OR_ERR(buf+4,
3034 session_info->hdlc_disabled,
3035 sizeof(uint8_t));
3036 if (ret == -EFAULT)
3037 goto exit;
3038 }
3039 goto exit;
3040 }
3041
3042 if (driver->data_ready[index] & DEINIT_TYPE) {
3043 /*Copy the type of data being passed*/
3044 data_type = driver->data_ready[index] & DEINIT_TYPE;
3045 COPY_USER_SPACE_OR_ERR(buf, data_type, 4);
3046 if (ret == -EFAULT)
3047 goto exit;
3048 driver->data_ready[index] ^= DEINIT_TYPE;
3049 mutex_unlock(&driver->diagchar_mutex);
3050 diag_remove_client_entry(file);
3051 return ret;
3052 }
3053
3054 if (driver->data_ready[index] & MSG_MASKS_TYPE) {
3055 /*Copy the type of data being passed*/
3056 data_type = driver->data_ready[index] & MSG_MASKS_TYPE;
3057 session_info = diag_md_session_get_peripheral(APPS_DATA);
3058 COPY_USER_SPACE_OR_ERR(buf, data_type, sizeof(int));
3059 if (ret == -EFAULT)
3060 goto exit;
3061 write_len = diag_copy_to_user_msg_mask(buf + ret, count,
3062 session_info);
3063 if (write_len > 0)
3064 ret += write_len;
3065 driver->data_ready[index] ^= MSG_MASKS_TYPE;
3066 goto exit;
3067 }
3068
3069 if (driver->data_ready[index] & EVENT_MASKS_TYPE) {
3070 /*Copy the type of data being passed*/
3071 data_type = driver->data_ready[index] & EVENT_MASKS_TYPE;
3072 session_info = diag_md_session_get_peripheral(APPS_DATA);
3073 COPY_USER_SPACE_OR_ERR(buf, data_type, 4);
3074 if (ret == -EFAULT)
3075 goto exit;
3076
3077 if (session_info && session_info->event_mask &&
3078 session_info->event_mask->ptr) {
3079 COPY_USER_SPACE_OR_ERR(buf + sizeof(int),
3080 *(session_info->event_mask->ptr),
3081 session_info->event_mask->mask_len);
3082 if (ret == -EFAULT)
3083 goto exit;
3084 } else {
3085 COPY_USER_SPACE_OR_ERR(buf + sizeof(int),
3086 *(event_mask.ptr),
3087 event_mask.mask_len);
3088 if (ret == -EFAULT)
3089 goto exit;
3090 }
3091 driver->data_ready[index] ^= EVENT_MASKS_TYPE;
3092 goto exit;
3093 }
3094
3095 if (driver->data_ready[index] & LOG_MASKS_TYPE) {
3096 /*Copy the type of data being passed*/
3097 data_type = driver->data_ready[index] & LOG_MASKS_TYPE;
3098 session_info = diag_md_session_get_peripheral(APPS_DATA);
3099 COPY_USER_SPACE_OR_ERR(buf, data_type, sizeof(int));
3100 if (ret == -EFAULT)
3101 goto exit;
3102
3103 write_len = diag_copy_to_user_log_mask(buf + ret, count,
3104 session_info);
3105 if (write_len > 0)
3106 ret += write_len;
3107 driver->data_ready[index] ^= LOG_MASKS_TYPE;
3108 goto exit;
3109 }
3110
3111 if (driver->data_ready[index] & PKT_TYPE) {
3112 /*Copy the type of data being passed*/
3113 data_type = driver->data_ready[index] & PKT_TYPE;
3114 COPY_USER_SPACE_OR_ERR(buf, data_type, sizeof(data_type));
3115 if (ret == -EFAULT)
3116 goto exit;
3117
3118 COPY_USER_SPACE_OR_ERR(buf + sizeof(data_type),
3119 *(driver->apps_req_buf),
3120 driver->apps_req_buf_len);
3121 if (ret == -EFAULT)
3122 goto exit;
3123 driver->data_ready[index] ^= PKT_TYPE;
3124 driver->in_busy_pktdata = 0;
3125 goto exit;
3126 }
3127
3128 if (driver->data_ready[index] & DCI_PKT_TYPE) {
3129 /* Copy the type of data being passed */
3130 data_type = driver->data_ready[index] & DCI_PKT_TYPE;
3131 COPY_USER_SPACE_OR_ERR(buf, data_type, 4);
3132 if (ret == -EFAULT)
3133 goto exit;
3134
3135 COPY_USER_SPACE_OR_ERR(buf+4, *(driver->dci_pkt_buf),
3136 driver->dci_pkt_length);
3137 if (ret == -EFAULT)
3138 goto exit;
3139
3140 driver->data_ready[index] ^= DCI_PKT_TYPE;
3141 driver->in_busy_dcipktdata = 0;
3142 goto exit;
3143 }
3144
3145 if (driver->data_ready[index] & DCI_EVENT_MASKS_TYPE) {
3146 /*Copy the type of data being passed*/
3147 data_type = driver->data_ready[index] & DCI_EVENT_MASKS_TYPE;
3148 COPY_USER_SPACE_OR_ERR(buf, data_type, 4);
3149 if (ret == -EFAULT)
3150 goto exit;
3151
3152 COPY_USER_SPACE_OR_ERR(buf+4, driver->num_dci_client, 4);
3153 if (ret == -EFAULT)
3154 goto exit;
3155
3156 COPY_USER_SPACE_OR_ERR(buf + 8, (dci_ops_tbl[DCI_LOCAL_PROC].
3157 event_mask_composite), DCI_EVENT_MASK_SIZE);
3158 if (ret == -EFAULT)
3159 goto exit;
3160
3161 driver->data_ready[index] ^= DCI_EVENT_MASKS_TYPE;
3162 goto exit;
3163 }
3164
3165 if (driver->data_ready[index] & DCI_LOG_MASKS_TYPE) {
3166 /*Copy the type of data being passed*/
3167 data_type = driver->data_ready[index] & DCI_LOG_MASKS_TYPE;
3168 COPY_USER_SPACE_OR_ERR(buf, data_type, 4);
3169 if (ret == -EFAULT)
3170 goto exit;
3171
3172 COPY_USER_SPACE_OR_ERR(buf+4, driver->num_dci_client, 4);
3173 if (ret == -EFAULT)
3174 goto exit;
3175
3176 COPY_USER_SPACE_OR_ERR(buf+8, (dci_ops_tbl[DCI_LOCAL_PROC].
3177 log_mask_composite), DCI_LOG_MASK_SIZE);
3178 if (ret == -EFAULT)
3179 goto exit;
3180 driver->data_ready[index] ^= DCI_LOG_MASKS_TYPE;
3181 goto exit;
3182 }
3183
3184exit:
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07003185 if (driver->data_ready[index] & DCI_DATA_TYPE) {
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07003186 data_type = driver->data_ready[index] & DCI_DATA_TYPE;
Mohit Aggarwal99a06732017-07-28 15:40:27 +05303187 mutex_unlock(&driver->diagchar_mutex);
3188 /* Copy the type of data being passed */
3189 mutex_lock(&driver->dci_mutex);
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07003190 list_for_each_safe(start, temp, &driver->dci_client_list) {
3191 entry = list_entry(start, struct diag_dci_client_tbl,
3192 track);
3193 if (entry->client->tgid != current->tgid)
3194 continue;
3195 if (!entry->in_service)
3196 continue;
3197 if (copy_to_user(buf + ret, &data_type, sizeof(int))) {
3198 mutex_unlock(&driver->dci_mutex);
3199 goto end;
3200 }
3201 ret += sizeof(int);
3202 if (copy_to_user(buf + ret, &entry->client_info.token,
3203 sizeof(int))) {
3204 mutex_unlock(&driver->dci_mutex);
3205 goto end;
3206 }
3207 ret += sizeof(int);
3208 copy_dci_data = 1;
3209 exit_stat = diag_copy_dci(buf, count, entry, &ret);
3210 mutex_lock(&driver->diagchar_mutex);
3211 driver->data_ready[index] ^= DCI_DATA_TYPE;
3212 mutex_unlock(&driver->diagchar_mutex);
3213 if (exit_stat == 1) {
3214 mutex_unlock(&driver->dci_mutex);
3215 goto end;
3216 }
3217 }
3218 mutex_unlock(&driver->dci_mutex);
3219 goto end;
3220 }
Mohit Aggarwal99a06732017-07-28 15:40:27 +05303221 mutex_unlock(&driver->diagchar_mutex);
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07003222end:
3223 /*
3224 * Flush any read that is currently pending on DCI data and
3225 * command channnels. This will ensure that the next read is not
3226 * missed.
3227 */
3228 if (copy_dci_data) {
3229 diag_ws_on_copy_complete(DIAG_WS_DCI);
3230 flush_workqueue(driver->diag_dci_wq);
3231 }
3232 return ret;
3233}
3234
3235static ssize_t diagchar_write(struct file *file, const char __user *buf,
3236 size_t count, loff_t *ppos)
3237{
3238 int err = 0;
3239 int pkt_type = 0;
3240 int payload_len = 0;
3241 const char __user *payload_buf = NULL;
3242
3243 /*
3244 * The data coming from the user sapce should at least have the
3245 * packet type heeader.
3246 */
3247 if (count < sizeof(int)) {
3248 pr_err("diag: In %s, client is sending short data, len: %d\n",
3249 __func__, (int)count);
3250 return -EBADMSG;
3251 }
3252
3253 err = copy_from_user((&pkt_type), buf, sizeof(int));
3254 if (err) {
3255 pr_err_ratelimited("diag: In %s, unable to copy pkt_type from userspace, err: %d\n",
3256 __func__, err);
3257 return -EIO;
3258 }
3259
3260 if (driver->logging_mode == DIAG_USB_MODE && !driver->usb_connected) {
3261 if (!((pkt_type == DCI_DATA_TYPE) ||
3262 (pkt_type == DCI_PKT_TYPE) ||
3263 (pkt_type & DATA_TYPE_DCI_LOG) ||
3264 (pkt_type & DATA_TYPE_DCI_EVENT))) {
3265 pr_debug("diag: In %s, Dropping non DCI packet type\n",
3266 __func__);
3267 return -EIO;
3268 }
3269 }
3270
3271 payload_buf = buf + sizeof(int);
3272 payload_len = count - sizeof(int);
3273
3274 if (pkt_type == DCI_PKT_TYPE)
3275 return diag_user_process_dci_apps_data(payload_buf,
3276 payload_len,
3277 pkt_type);
3278 else if (pkt_type == DCI_DATA_TYPE)
3279 return diag_user_process_dci_data(payload_buf, payload_len);
3280 else if (pkt_type == USER_SPACE_RAW_DATA_TYPE)
3281 return diag_user_process_raw_data(payload_buf,
3282 payload_len);
3283 else if (pkt_type == USER_SPACE_DATA_TYPE)
3284 return diag_user_process_userspace_data(payload_buf,
3285 payload_len);
3286 if (pkt_type & (DATA_TYPE_DCI_LOG | DATA_TYPE_DCI_EVENT)) {
3287 err = diag_user_process_dci_apps_data(payload_buf, payload_len,
3288 pkt_type);
3289 if (pkt_type & DATA_TYPE_DCI_LOG)
3290 pkt_type ^= DATA_TYPE_DCI_LOG;
3291 if (pkt_type & DATA_TYPE_DCI_EVENT)
3292 pkt_type ^= DATA_TYPE_DCI_EVENT;
3293 /*
3294 * Check if the log or event is selected even on the regular
3295 * stream. If USB is not connected and we are not in memory
3296 * device mode, we should not process these logs/events.
3297 */
3298 if (pkt_type && driver->logging_mode == DIAG_USB_MODE &&
3299 !driver->usb_connected)
3300 return err;
3301 }
3302
3303 switch (pkt_type) {
3304 case DATA_TYPE_EVENT:
3305 case DATA_TYPE_F3:
3306 case DATA_TYPE_LOG:
3307 case DATA_TYPE_DELAYED_RESPONSE:
3308 case DATA_TYPE_RESPONSE:
3309 return diag_user_process_apps_data(payload_buf, payload_len,
3310 pkt_type);
3311 default:
3312 pr_err_ratelimited("diag: In %s, invalid pkt_type: %d\n",
3313 __func__, pkt_type);
3314 return -EINVAL;
3315 }
3316
3317 return err;
3318}
3319
3320void diag_ws_init(void)
3321{
3322 driver->dci_ws.ref_count = 0;
3323 driver->dci_ws.copy_count = 0;
3324 spin_lock_init(&driver->dci_ws.lock);
3325
3326 driver->md_ws.ref_count = 0;
3327 driver->md_ws.copy_count = 0;
3328 spin_lock_init(&driver->md_ws.lock);
3329}
3330
3331static void diag_stats_init(void)
3332{
3333 if (!driver)
3334 return;
3335
3336 driver->msg_stats.alloc_count = 0;
3337 driver->msg_stats.drop_count = 0;
3338
3339 driver->log_stats.alloc_count = 0;
3340 driver->log_stats.drop_count = 0;
3341
3342 driver->event_stats.alloc_count = 0;
3343 driver->event_stats.drop_count = 0;
3344}
3345
3346void diag_ws_on_notify(void)
3347{
3348 /*
3349 * Do not deal with reference count here as there can be spurious
3350 * interrupts.
3351 */
3352 pm_stay_awake(driver->diag_dev);
3353}
3354
3355void diag_ws_on_read(int type, int pkt_len)
3356{
3357 unsigned long flags;
3358 struct diag_ws_ref_t *ws_ref = NULL;
3359
3360 switch (type) {
3361 case DIAG_WS_DCI:
3362 ws_ref = &driver->dci_ws;
3363 break;
3364 case DIAG_WS_MUX:
3365 ws_ref = &driver->md_ws;
3366 break;
3367 default:
3368 pr_err_ratelimited("diag: In %s, invalid type: %d\n",
3369 __func__, type);
3370 return;
3371 }
3372
3373 spin_lock_irqsave(&ws_ref->lock, flags);
3374 if (pkt_len > 0) {
3375 ws_ref->ref_count++;
3376 } else {
3377 if (ws_ref->ref_count < 1) {
3378 ws_ref->ref_count = 0;
3379 ws_ref->copy_count = 0;
3380 }
3381 diag_ws_release();
3382 }
3383 spin_unlock_irqrestore(&ws_ref->lock, flags);
3384}
3385
3386
3387void diag_ws_on_copy(int type)
3388{
3389 unsigned long flags;
3390 struct diag_ws_ref_t *ws_ref = NULL;
3391
3392 switch (type) {
3393 case DIAG_WS_DCI:
3394 ws_ref = &driver->dci_ws;
3395 break;
3396 case DIAG_WS_MUX:
3397 ws_ref = &driver->md_ws;
3398 break;
3399 default:
3400 pr_err_ratelimited("diag: In %s, invalid type: %d\n",
3401 __func__, type);
3402 return;
3403 }
3404
3405 spin_lock_irqsave(&ws_ref->lock, flags);
3406 ws_ref->copy_count++;
3407 spin_unlock_irqrestore(&ws_ref->lock, flags);
3408}
3409
3410void diag_ws_on_copy_fail(int type)
3411{
3412 unsigned long flags;
3413 struct diag_ws_ref_t *ws_ref = NULL;
3414
3415 switch (type) {
3416 case DIAG_WS_DCI:
3417 ws_ref = &driver->dci_ws;
3418 break;
3419 case DIAG_WS_MUX:
3420 ws_ref = &driver->md_ws;
3421 break;
3422 default:
3423 pr_err_ratelimited("diag: In %s, invalid type: %d\n",
3424 __func__, type);
3425 return;
3426 }
3427
3428 spin_lock_irqsave(&ws_ref->lock, flags);
3429 ws_ref->ref_count--;
3430 spin_unlock_irqrestore(&ws_ref->lock, flags);
3431
3432 diag_ws_release();
3433}
3434
3435void diag_ws_on_copy_complete(int type)
3436{
3437 unsigned long flags;
3438 struct diag_ws_ref_t *ws_ref = NULL;
3439
3440 switch (type) {
3441 case DIAG_WS_DCI:
3442 ws_ref = &driver->dci_ws;
3443 break;
3444 case DIAG_WS_MUX:
3445 ws_ref = &driver->md_ws;
3446 break;
3447 default:
3448 pr_err_ratelimited("diag: In %s, invalid type: %d\n",
3449 __func__, type);
3450 return;
3451 }
3452
3453 spin_lock_irqsave(&ws_ref->lock, flags);
3454 ws_ref->ref_count -= ws_ref->copy_count;
3455 if (ws_ref->ref_count < 1)
3456 ws_ref->ref_count = 0;
3457 ws_ref->copy_count = 0;
3458 spin_unlock_irqrestore(&ws_ref->lock, flags);
3459
3460 diag_ws_release();
3461}
3462
3463void diag_ws_reset(int type)
3464{
3465 unsigned long flags;
3466 struct diag_ws_ref_t *ws_ref = NULL;
3467
3468 switch (type) {
3469 case DIAG_WS_DCI:
3470 ws_ref = &driver->dci_ws;
3471 break;
3472 case DIAG_WS_MUX:
3473 ws_ref = &driver->md_ws;
3474 break;
3475 default:
3476 pr_err_ratelimited("diag: In %s, invalid type: %d\n",
3477 __func__, type);
3478 return;
3479 }
3480
3481 spin_lock_irqsave(&ws_ref->lock, flags);
3482 ws_ref->ref_count = 0;
3483 ws_ref->copy_count = 0;
3484 spin_unlock_irqrestore(&ws_ref->lock, flags);
3485
3486 diag_ws_release();
3487}
3488
3489void diag_ws_release(void)
3490{
3491 if (driver->dci_ws.ref_count == 0 && driver->md_ws.ref_count == 0)
3492 pm_relax(driver->diag_dev);
3493}
3494
3495#ifdef DIAG_DEBUG
3496static void diag_debug_init(void)
3497{
3498 diag_ipc_log = ipc_log_context_create(DIAG_IPC_LOG_PAGES, "diag", 0);
3499 if (!diag_ipc_log)
3500 pr_err("diag: Failed to create IPC logging context\n");
3501 /*
3502 * Set the bit mask here as per diag_ipc_logging.h to enable debug logs
3503 * to be logged to IPC
3504 */
3505 diag_debug_mask = DIAG_DEBUG_PERIPHERALS | DIAG_DEBUG_DCI |
Manoj Prabhu B571cf422017-08-08 19:01:41 +05303506 DIAG_DEBUG_USERSPACE | DIAG_DEBUG_BRIDGE;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07003507}
3508#else
3509static void diag_debug_init(void)
3510{
3511
3512}
3513#endif
3514
3515static int diag_real_time_info_init(void)
3516{
3517 int i;
3518
3519 if (!driver)
3520 return -EIO;
3521 for (i = 0; i < DIAG_NUM_PROC; i++) {
3522 driver->real_time_mode[i] = 1;
3523 driver->proc_rt_vote_mask[i] |= DIAG_PROC_DCI;
3524 driver->proc_rt_vote_mask[i] |= DIAG_PROC_MEMORY_DEVICE;
3525 }
3526 driver->real_time_update_busy = 0;
3527 driver->proc_active_mask = 0;
3528 driver->diag_real_time_wq = create_singlethread_workqueue(
3529 "diag_real_time_wq");
3530 if (!driver->diag_real_time_wq)
3531 return -ENOMEM;
3532 INIT_WORK(&(driver->diag_real_time_work), diag_real_time_work_fn);
3533 mutex_init(&driver->real_time_mutex);
3534 return 0;
3535}
3536
3537static const struct file_operations diagcharfops = {
3538 .owner = THIS_MODULE,
3539 .read = diagchar_read,
3540 .write = diagchar_write,
3541#ifdef CONFIG_COMPAT
3542 .compat_ioctl = diagchar_compat_ioctl,
3543#endif
3544 .unlocked_ioctl = diagchar_ioctl,
3545 .open = diagchar_open,
3546 .release = diagchar_close
3547};
3548
3549static int diagchar_setup_cdev(dev_t devno)
3550{
3551
3552 int err;
3553
3554 cdev_init(driver->cdev, &diagcharfops);
3555
3556 driver->cdev->owner = THIS_MODULE;
3557 driver->cdev->ops = &diagcharfops;
3558
3559 err = cdev_add(driver->cdev, devno, 1);
3560
3561 if (err) {
3562 pr_info("diagchar cdev registration failed !\n");
3563 return err;
3564 }
3565
3566 driver->diagchar_class = class_create(THIS_MODULE, "diag");
3567
3568 if (IS_ERR(driver->diagchar_class)) {
3569 pr_err("Error creating diagchar class.\n");
3570 return PTR_ERR(driver->diagchar_class);
3571 }
3572
3573 driver->diag_dev = device_create(driver->diagchar_class, NULL, devno,
3574 (void *)driver, "diag");
3575
3576 if (!driver->diag_dev)
3577 return -EIO;
3578
3579 driver->diag_dev->power.wakeup = wakeup_source_register("DIAG_WS");
3580 return 0;
3581
3582}
3583
3584static int diagchar_cleanup(void)
3585{
3586 if (driver) {
3587 if (driver->cdev) {
3588 /* TODO - Check if device exists before deleting */
3589 device_destroy(driver->diagchar_class,
3590 MKDEV(driver->major,
3591 driver->minor_start));
3592 cdev_del(driver->cdev);
3593 }
3594 if (!IS_ERR(driver->diagchar_class))
3595 class_destroy(driver->diagchar_class);
3596 kfree(driver);
3597 }
3598 return 0;
3599}
3600
3601static int __init diagchar_init(void)
3602{
3603 dev_t dev;
Manoj Prabhu B98325462017-01-10 20:19:28 +05303604 int ret, i;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07003605
3606 pr_debug("diagfwd initializing ..\n");
3607 ret = 0;
3608 driver = kzalloc(sizeof(struct diagchar_dev) + 5, GFP_KERNEL);
3609 if (!driver)
3610 return -ENOMEM;
3611 kmemleak_not_leak(driver);
3612
3613 timer_in_progress = 0;
3614 driver->delayed_rsp_id = 0;
3615 driver->hdlc_disabled = 0;
3616 driver->dci_state = DIAG_DCI_NO_ERROR;
3617 setup_timer(&drain_timer, drain_timer_func, 1234);
3618 driver->supports_sockets = 1;
3619 driver->time_sync_enabled = 0;
3620 driver->uses_time_api = 0;
3621 driver->poolsize = poolsize;
3622 driver->poolsize_hdlc = poolsize_hdlc;
3623 driver->poolsize_dci = poolsize_dci;
3624 driver->poolsize_user = poolsize_user;
3625 /*
3626 * POOL_TYPE_MUX_APPS is for the buffers in the Diag MUX layer.
3627 * The number of buffers encompasses Diag data generated on
3628 * the Apss processor + 1 for the responses generated exclusively on
3629 * the Apps processor + data from data channels (4 channels per
3630 * peripheral) + data from command channels (2)
3631 */
3632 diagmem_setsize(POOL_TYPE_MUX_APPS, itemsize_usb_apps,
3633 poolsize_usb_apps + 1 + (NUM_PERIPHERALS * 6));
3634 driver->num_clients = max_clients;
3635 driver->logging_mode = DIAG_USB_MODE;
Manoj Prabhu B571cf422017-08-08 19:01:41 +05303636 for (i = 0; i < NUM_UPD; i++) {
3637 driver->pd_logging_mode[i] = 0;
3638 driver->pd_session_clear[i] = 0;
3639 }
3640 driver->num_pd_session = 0;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07003641 driver->mask_check = 0;
3642 driver->in_busy_pktdata = 0;
3643 driver->in_busy_dcipktdata = 0;
3644 driver->rsp_buf_ctxt = SET_BUF_CTXT(APPS_DATA, TYPE_CMD, 1);
3645 hdlc_data.ctxt = SET_BUF_CTXT(APPS_DATA, TYPE_DATA, 1);
3646 hdlc_data.len = 0;
3647 non_hdlc_data.ctxt = SET_BUF_CTXT(APPS_DATA, TYPE_DATA, 1);
3648 non_hdlc_data.len = 0;
3649 mutex_init(&driver->hdlc_disable_mutex);
3650 mutex_init(&driver->diagchar_mutex);
3651 mutex_init(&driver->diag_maskclear_mutex);
Manoj Prabhu B2a428272016-12-22 15:22:03 +05303652 mutex_init(&driver->diag_notifier_mutex);
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07003653 mutex_init(&driver->diag_file_mutex);
3654 mutex_init(&driver->delayed_rsp_mutex);
3655 mutex_init(&apps_data_mutex);
Gopikrishna Mogasati9a44d8d2017-05-05 16:04:35 +05303656 mutex_init(&driver->msg_mask_lock);
Hardik Arya62dce9f2017-06-15 10:39:34 +05303657 mutex_init(&driver->hdlc_recovery_mutex);
Manoj Prabhu B571cf422017-08-08 19:01:41 +05303658 for (i = 0; i < NUM_PERIPHERALS; i++) {
Manoj Prabhu B98325462017-01-10 20:19:28 +05303659 mutex_init(&driver->diagfwd_channel_mutex[i]);
Manoj Prabhu B571cf422017-08-08 19:01:41 +05303660 driver->diag_id_sent[i] = 0;
3661 }
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07003662 init_waitqueue_head(&driver->wait_q);
3663 INIT_WORK(&(driver->diag_drain_work), diag_drain_work_fn);
3664 INIT_WORK(&(driver->update_user_clients),
3665 diag_update_user_client_work_fn);
3666 INIT_WORK(&(driver->update_md_clients),
3667 diag_update_md_client_work_fn);
3668 diag_ws_init();
3669 diag_stats_init();
3670 diag_debug_init();
3671 diag_md_session_init();
3672
3673 driver->incoming_pkt.capacity = DIAG_MAX_REQ_SIZE;
3674 driver->incoming_pkt.data = kzalloc(DIAG_MAX_REQ_SIZE, GFP_KERNEL);
3675 if (!driver->incoming_pkt.data) {
3676 ret = -ENOMEM;
3677 goto fail;
3678 }
3679 kmemleak_not_leak(driver->incoming_pkt.data);
3680 driver->incoming_pkt.processing = 0;
3681 driver->incoming_pkt.read_len = 0;
3682 driver->incoming_pkt.remaining = 0;
3683 driver->incoming_pkt.total_len = 0;
3684
3685 ret = diag_real_time_info_init();
3686 if (ret)
3687 goto fail;
3688 ret = diag_debugfs_init();
3689 if (ret)
3690 goto fail;
3691 ret = diag_masks_init();
3692 if (ret)
3693 goto fail;
3694 ret = diag_remote_init();
3695 if (ret)
3696 goto fail;
3697 ret = diag_mux_init();
3698 if (ret)
3699 goto fail;
3700 ret = diagfwd_init();
3701 if (ret)
3702 goto fail;
3703 ret = diagfwd_cntl_init();
3704 if (ret)
3705 goto fail;
3706 driver->dci_state = diag_dci_init();
3707 ret = diagfwd_peripheral_init();
3708 if (ret)
3709 goto fail;
3710 diagfwd_cntl_channel_init();
3711 if (driver->dci_state == DIAG_DCI_NO_ERROR)
3712 diag_dci_channel_init();
3713 pr_debug("diagchar initializing ..\n");
3714 driver->num = 1;
3715 driver->name = ((void *)driver) + sizeof(struct diagchar_dev);
3716 strlcpy(driver->name, "diag", 4);
3717 /* Get major number from kernel and initialize */
3718 ret = alloc_chrdev_region(&dev, driver->minor_start,
3719 driver->num, driver->name);
3720 if (!ret) {
3721 driver->major = MAJOR(dev);
3722 driver->minor_start = MINOR(dev);
3723 } else {
3724 pr_err("diag: Major number not allocated\n");
3725 goto fail;
3726 }
3727 driver->cdev = cdev_alloc();
3728 ret = diagchar_setup_cdev(dev);
3729 if (ret)
3730 goto fail;
Sreelakshmi Gownipalli8d477d32017-02-08 19:49:06 -08003731 mutex_init(&driver->diag_id_mutex);
3732 INIT_LIST_HEAD(&driver->diag_id_list);
3733 diag_add_diag_id_to_list(DIAG_ID_APPS, "APPS");
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07003734 pr_debug("diagchar initialized now");
3735 ret = diagfwd_bridge_init();
3736 if (ret)
3737 diagfwd_bridge_exit();
3738 return 0;
3739
3740fail:
3741 pr_err("diagchar is not initialized, ret: %d\n", ret);
3742 diag_debugfs_cleanup();
3743 diagchar_cleanup();
3744 diag_mux_exit();
3745 diagfwd_peripheral_exit();
3746 diagfwd_bridge_exit();
3747 diagfwd_exit();
3748 diagfwd_cntl_exit();
3749 diag_dci_exit();
3750 diag_masks_exit();
3751 diag_remote_exit();
3752 return ret;
3753
3754}
3755
3756static void diagchar_exit(void)
3757{
3758 pr_info("diagchar exiting...\n");
3759 diag_mempool_exit();
3760 diag_mux_exit();
3761 diagfwd_peripheral_exit();
3762 diagfwd_exit();
3763 diagfwd_cntl_exit();
3764 diag_dci_exit();
3765 diag_masks_exit();
3766 diag_md_session_exit();
3767 diag_remote_exit();
3768 diag_debugfs_cleanup();
3769 diagchar_cleanup();
3770 pr_info("done diagchar exit\n");
3771}
3772
3773module_init(diagchar_init);
3774module_exit(diagchar_exit);