blob: 18f941df03fe14c1b1974bb7285ad2baec6110f4 [file] [log] [blame]
Sreelakshmi Gownipalli8d477d32017-02-08 19:49:06 -08001/* Copyright (c) 2008-2017, The Linux Foundation. All rights reserved.
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/slab.h>
14#include <linux/init.h>
15#include <linux/module.h>
16#include <linux/cdev.h>
17#include <linux/fs.h>
18#include <linux/device.h>
19#include <linux/delay.h>
20#include <linux/uaccess.h>
21#include <linux/diagchar.h>
22#include <linux/sched.h>
23#include <linux/ratelimit.h>
24#include <linux/timer.h>
25#ifdef CONFIG_DIAG_OVER_USB
26#include <linux/usb/usbdiag.h>
27#endif
28#include <asm/current.h>
29#include "diagchar_hdlc.h"
30#include "diagmem.h"
31#include "diagchar.h"
32#include "diagfwd.h"
33#include "diagfwd_cntl.h"
34#include "diag_dci.h"
35#include "diag_debugfs.h"
36#include "diag_masks.h"
37#include "diagfwd_bridge.h"
38#include "diag_usb.h"
39#include "diag_memorydevice.h"
40#include "diag_mux.h"
41#include "diag_ipc_logging.h"
42#include "diagfwd_peripheral.h"
43
44#include <linux/coresight-stm.h>
45#include <linux/kernel.h>
46#ifdef CONFIG_COMPAT
47#include <linux/compat.h>
48#endif
49
50MODULE_DESCRIPTION("Diag Char Driver");
51MODULE_LICENSE("GPL v2");
52
53#define MIN_SIZ_ALLOW 4
54#define INIT 1
55#define EXIT -1
56struct diagchar_dev *driver;
57struct diagchar_priv {
58 int pid;
59};
60
61#define USER_SPACE_RAW_DATA 0
62#define USER_SPACE_HDLC_DATA 1
63
64/* Memory pool variables */
65/* Used for copying any incoming packet from user space clients. */
66static unsigned int poolsize = 12;
67module_param(poolsize, uint, 0000);
68
69/*
70 * Used for HDLC encoding packets coming from the user
71 * space.
72 */
73static unsigned int poolsize_hdlc = 10;
74module_param(poolsize_hdlc, uint, 0000);
75
76/*
77 * This is used for incoming DCI requests from the user space clients.
78 * Don't expose itemsize as it is internal.
79 */
80static unsigned int poolsize_user = 8;
81module_param(poolsize_user, uint, 0000);
82
83/*
84 * USB structures allocated for writing Diag data generated on the Apps to USB.
85 * Don't expose itemsize as it is constant.
86 */
87static unsigned int itemsize_usb_apps = sizeof(struct diag_request);
88static unsigned int poolsize_usb_apps = 10;
89module_param(poolsize_usb_apps, uint, 0000);
90
91/* Used for DCI client buffers. Don't expose itemsize as it is constant. */
92static unsigned int poolsize_dci = 10;
93module_param(poolsize_dci, uint, 0000);
94
95#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
96/* Used for reading data from the remote device. */
97static unsigned int itemsize_mdm = DIAG_MDM_BUF_SIZE;
98static unsigned int poolsize_mdm = 18;
99module_param(itemsize_mdm, uint, 0000);
100module_param(poolsize_mdm, uint, 0000);
101
102/*
103 * Used for reading DCI data from the remote device.
104 * Don't expose poolsize for DCI data. There is only one read buffer
105 */
106static unsigned int itemsize_mdm_dci = DIAG_MDM_BUF_SIZE;
107static unsigned int poolsize_mdm_dci = 1;
108module_param(itemsize_mdm_dci, uint, 0000);
109
110/*
111 * Used for USB structues associated with a remote device.
112 * Don't expose the itemsize since it is constant.
113 */
114static unsigned int itemsize_mdm_usb = sizeof(struct diag_request);
115static unsigned int poolsize_mdm_usb = 18;
116module_param(poolsize_mdm_usb, uint, 0000);
117
118/*
119 * Used for writing read DCI data to remote peripherals. Don't
120 * expose poolsize for DCI data. There is only one read
121 * buffer. Add 6 bytes for DCI header information: Start (1),
122 * Version (1), Length (2), Tag (2)
123 */
124static unsigned int itemsize_mdm_dci_write = DIAG_MDM_DCI_BUF_SIZE;
125static unsigned int poolsize_mdm_dci_write = 1;
126module_param(itemsize_mdm_dci_write, uint, 0000);
127
128/*
129 * Used for USB structures associated with a remote SMUX
130 * device Don't expose the itemsize since it is constant
131 */
132static unsigned int itemsize_qsc_usb = sizeof(struct diag_request);
133static unsigned int poolsize_qsc_usb = 8;
134module_param(poolsize_qsc_usb, uint, 0000);
135#endif
136
137/* This is the max number of user-space clients supported at initialization*/
138static unsigned int max_clients = 15;
139static unsigned int threshold_client_limit = 50;
140module_param(max_clients, uint, 0000);
141
142/* Timer variables */
143static struct timer_list drain_timer;
144static int timer_in_progress;
145
Manoj Prabhu B95427a22016-11-04 11:58:11 +0530146/*
147 * Diag Mask clear variable
148 * Used for clearing masks upon
149 * USB disconnection and stopping ODL
150 */
151static int diag_mask_clear_param = 1;
152module_param(diag_mask_clear_param, int, 0644);
153
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -0700154struct diag_apps_data_t {
155 void *buf;
156 uint32_t len;
157 int ctxt;
158};
159
160static struct diag_apps_data_t hdlc_data;
161static struct diag_apps_data_t non_hdlc_data;
162static struct mutex apps_data_mutex;
163
164#define DIAGPKT_MAX_DELAYED_RSP 0xFFFF
165
166#ifdef DIAG_DEBUG
167uint16_t diag_debug_mask;
168void *diag_ipc_log;
169#endif
170
171static void diag_md_session_close(struct diag_md_session_t *session_info);
172
173/*
174 * Returns the next delayed rsp id. If wrapping is enabled,
175 * wraps the delayed rsp id to DIAGPKT_MAX_DELAYED_RSP.
176 */
177static uint16_t diag_get_next_delayed_rsp_id(void)
178{
179 uint16_t rsp_id = 0;
180
181 mutex_lock(&driver->delayed_rsp_mutex);
182 rsp_id = driver->delayed_rsp_id;
183 if (rsp_id < DIAGPKT_MAX_DELAYED_RSP)
184 rsp_id++;
185 else {
186 if (wrap_enabled) {
187 rsp_id = 1;
188 wrap_count++;
189 } else
190 rsp_id = DIAGPKT_MAX_DELAYED_RSP;
191 }
192 driver->delayed_rsp_id = rsp_id;
193 mutex_unlock(&driver->delayed_rsp_mutex);
194
195 return rsp_id;
196}
197
198static int diag_switch_logging(struct diag_logging_mode_param_t *param);
199
200#define COPY_USER_SPACE_OR_ERR(buf, data, length) \
201do { \
202 if ((count < ret+length) || (copy_to_user(buf, \
203 (void *)&data, length))) { \
204 ret = -EFAULT; \
205 } \
206 ret += length; \
207} while (0)
208
209static void drain_timer_func(unsigned long data)
210{
211 queue_work(driver->diag_wq, &(driver->diag_drain_work));
212}
213
214static void diag_drain_apps_data(struct diag_apps_data_t *data)
215{
216 int err = 0;
217
218 if (!data || !data->buf)
219 return;
220
221 err = diag_mux_write(DIAG_LOCAL_PROC, data->buf, data->len,
222 data->ctxt);
223 if (err)
224 diagmem_free(driver, data->buf, POOL_TYPE_HDLC);
225
226 data->buf = NULL;
227 data->len = 0;
228}
229
230void diag_update_user_client_work_fn(struct work_struct *work)
231{
232 diag_update_userspace_clients(HDLC_SUPPORT_TYPE);
233}
234
235static void diag_update_md_client_work_fn(struct work_struct *work)
236{
237 diag_update_md_clients(HDLC_SUPPORT_TYPE);
238}
239
240void diag_drain_work_fn(struct work_struct *work)
241{
242 struct diag_md_session_t *session_info = NULL;
243 uint8_t hdlc_disabled = 0;
244
245 timer_in_progress = 0;
246 mutex_lock(&apps_data_mutex);
247 session_info = diag_md_session_get_peripheral(APPS_DATA);
248 if (session_info)
249 hdlc_disabled = session_info->hdlc_disabled;
250 else
251 hdlc_disabled = driver->hdlc_disabled;
252
253 if (!hdlc_disabled)
254 diag_drain_apps_data(&hdlc_data);
255 else
256 diag_drain_apps_data(&non_hdlc_data);
257 mutex_unlock(&apps_data_mutex);
258}
259
260void check_drain_timer(void)
261{
262 int ret = 0;
263
264 if (!timer_in_progress) {
265 timer_in_progress = 1;
266 ret = mod_timer(&drain_timer, jiffies + msecs_to_jiffies(200));
267 }
268}
269
270void diag_add_client(int i, struct file *file)
271{
272 struct diagchar_priv *diagpriv_data;
273
274 driver->client_map[i].pid = current->tgid;
275 diagpriv_data = kmalloc(sizeof(struct diagchar_priv),
276 GFP_KERNEL);
277 if (diagpriv_data)
278 diagpriv_data->pid = current->tgid;
279 file->private_data = diagpriv_data;
280 strlcpy(driver->client_map[i].name, current->comm, 20);
281 driver->client_map[i].name[19] = '\0';
282}
283
284static void diag_mempool_init(void)
285{
286 uint32_t itemsize = DIAG_MAX_REQ_SIZE;
287 uint32_t itemsize_hdlc = DIAG_MAX_HDLC_BUF_SIZE + APF_DIAG_PADDING;
288 uint32_t itemsize_dci = IN_BUF_SIZE;
289 uint32_t itemsize_user = DCI_REQ_BUF_SIZE;
290
291 itemsize += ((DCI_HDR_SIZE > CALLBACK_HDR_SIZE) ? DCI_HDR_SIZE :
292 CALLBACK_HDR_SIZE);
293 diagmem_setsize(POOL_TYPE_COPY, itemsize, poolsize);
294 diagmem_setsize(POOL_TYPE_HDLC, itemsize_hdlc, poolsize_hdlc);
295 diagmem_setsize(POOL_TYPE_DCI, itemsize_dci, poolsize_dci);
296 diagmem_setsize(POOL_TYPE_USER, itemsize_user, poolsize_user);
297
298 diagmem_init(driver, POOL_TYPE_COPY);
299 diagmem_init(driver, POOL_TYPE_HDLC);
300 diagmem_init(driver, POOL_TYPE_USER);
301 diagmem_init(driver, POOL_TYPE_DCI);
302}
303
304static void diag_mempool_exit(void)
305{
306 diagmem_exit(driver, POOL_TYPE_COPY);
307 diagmem_exit(driver, POOL_TYPE_HDLC);
308 diagmem_exit(driver, POOL_TYPE_USER);
309 diagmem_exit(driver, POOL_TYPE_DCI);
310}
311
312static int diagchar_open(struct inode *inode, struct file *file)
313{
314 int i = 0;
315 void *temp;
316
317 if (driver) {
318 mutex_lock(&driver->diagchar_mutex);
319
320 for (i = 0; i < driver->num_clients; i++)
321 if (driver->client_map[i].pid == 0)
322 break;
323
324 if (i < driver->num_clients) {
325 diag_add_client(i, file);
326 } else {
327 if (i < threshold_client_limit) {
328 driver->num_clients++;
329 temp = krealloc(driver->client_map
330 , (driver->num_clients) * sizeof(struct
331 diag_client_map), GFP_KERNEL);
332 if (!temp)
333 goto fail;
334 else
335 driver->client_map = temp;
336 temp = krealloc(driver->data_ready
337 , (driver->num_clients) * sizeof(int),
338 GFP_KERNEL);
339 if (!temp)
340 goto fail;
341 else
342 driver->data_ready = temp;
343 diag_add_client(i, file);
344 } else {
345 mutex_unlock(&driver->diagchar_mutex);
346 pr_err_ratelimited("diag: Max client limit for DIAG reached\n");
347 pr_err_ratelimited("diag: Cannot open handle %s %d",
348 current->comm, current->tgid);
349 for (i = 0; i < driver->num_clients; i++)
350 pr_debug("%d) %s PID=%d", i, driver->
351 client_map[i].name,
352 driver->client_map[i].pid);
353 return -ENOMEM;
354 }
355 }
356 driver->data_ready[i] = 0x0;
357 driver->data_ready[i] |= MSG_MASKS_TYPE;
358 driver->data_ready[i] |= EVENT_MASKS_TYPE;
359 driver->data_ready[i] |= LOG_MASKS_TYPE;
360 driver->data_ready[i] |= DCI_LOG_MASKS_TYPE;
361 driver->data_ready[i] |= DCI_EVENT_MASKS_TYPE;
362
363 if (driver->ref_count == 0)
364 diag_mempool_init();
365 driver->ref_count++;
366 mutex_unlock(&driver->diagchar_mutex);
367 return 0;
368 }
369 return -ENOMEM;
370
371fail:
372 mutex_unlock(&driver->diagchar_mutex);
373 driver->num_clients--;
374 pr_err_ratelimited("diag: Insufficient memory for new client");
375 return -ENOMEM;
376}
377
378static uint32_t diag_translate_kernel_to_user_mask(uint32_t peripheral_mask)
379{
380 uint32_t ret = 0;
381
382 if (peripheral_mask & MD_PERIPHERAL_MASK(APPS_DATA))
383 ret |= DIAG_CON_APSS;
384 if (peripheral_mask & MD_PERIPHERAL_MASK(PERIPHERAL_MODEM))
385 ret |= DIAG_CON_MPSS;
386 if (peripheral_mask & MD_PERIPHERAL_MASK(PERIPHERAL_LPASS))
387 ret |= DIAG_CON_LPASS;
388 if (peripheral_mask & MD_PERIPHERAL_MASK(PERIPHERAL_WCNSS))
389 ret |= DIAG_CON_WCNSS;
390 if (peripheral_mask & MD_PERIPHERAL_MASK(PERIPHERAL_SENSORS))
391 ret |= DIAG_CON_SENSORS;
392 if (peripheral_mask & MD_PERIPHERAL_MASK(PERIPHERAL_WDSP))
393 ret |= DIAG_CON_WDSP;
Sreelakshmi Gownipalli588a31d2016-11-02 13:33:43 -0700394 if (peripheral_mask & MD_PERIPHERAL_MASK(PERIPHERAL_CDSP))
395 ret |= DIAG_CON_CDSP;
Manoj Prabhu B571cf422017-08-08 19:01:41 +0530396 if (peripheral_mask & MD_PERIPHERAL_MASK(UPD_WLAN))
397 ret |= DIAG_CON_UPD_WLAN;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -0700398 return ret;
399}
Manoj Prabhu B571cf422017-08-08 19:01:41 +0530400
401uint8_t diag_mask_to_pd_value(uint32_t peripheral_mask)
402{
403 uint8_t upd = 0;
404 uint32_t pd_mask = 0;
405
406 pd_mask = diag_translate_kernel_to_user_mask(peripheral_mask);
407 switch (pd_mask) {
408 case DIAG_CON_UPD_WLAN:
409 upd = UPD_WLAN;
410 break;
411 default:
412 DIAG_LOG(DIAG_DEBUG_MASKS,
413 "asking for mask update with no pd mask set\n");
414 }
415 return upd;
416}
417
Manoj Prabhu B95427a22016-11-04 11:58:11 +0530418int diag_mask_param(void)
419{
420 return diag_mask_clear_param;
421}
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -0700422void diag_clear_masks(struct diag_md_session_t *info)
423{
424 int ret;
425 char cmd_disable_log_mask[] = { 0x73, 0, 0, 0, 0, 0, 0, 0};
426 char cmd_disable_msg_mask[] = { 0x7D, 0x05, 0, 0, 0, 0, 0, 0};
427 char cmd_disable_event_mask[] = { 0x60, 0};
428
429 DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
430 "diag: %s: masks clear request upon %s\n", __func__,
431 ((info) ? "ODL exit" : "USB Disconnection"));
432
433 ret = diag_process_apps_masks(cmd_disable_log_mask,
434 sizeof(cmd_disable_log_mask), info);
435 ret = diag_process_apps_masks(cmd_disable_msg_mask,
436 sizeof(cmd_disable_msg_mask), info);
437 ret = diag_process_apps_masks(cmd_disable_event_mask,
438 sizeof(cmd_disable_event_mask), info);
439 DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
440 "diag:%s: masks cleared successfully\n", __func__);
441}
442
443static void diag_close_logging_process(const int pid)
444{
Manoj Prabhu B571cf422017-08-08 19:01:41 +0530445 int i, j;
446 int session_mask;
447 uint32_t p_mask;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -0700448 struct diag_md_session_t *session_info = NULL;
449 struct diag_logging_mode_param_t params;
450
451 session_info = diag_md_session_get_pid(pid);
452 if (!session_info)
453 return;
454
Manoj Prabhu B95427a22016-11-04 11:58:11 +0530455 if (diag_mask_clear_param)
456 diag_clear_masks(session_info);
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -0700457
458 mutex_lock(&driver->diag_maskclear_mutex);
459 driver->mask_clear = 1;
460 mutex_unlock(&driver->diag_maskclear_mutex);
461
Sreelakshmi Gownipalli078824f2017-01-17 14:03:54 -0800462 mutex_lock(&driver->diagchar_mutex);
Manoj Prabhu B571cf422017-08-08 19:01:41 +0530463 session_mask = session_info->peripheral_mask;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -0700464 diag_md_session_close(session_info);
Manoj Prabhu B571cf422017-08-08 19:01:41 +0530465
466 p_mask =
467 diag_translate_kernel_to_user_mask(session_mask);
468
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -0700469 for (i = 0; i < NUM_MD_SESSIONS; i++)
Manoj Prabhu B571cf422017-08-08 19:01:41 +0530470 if (MD_PERIPHERAL_MASK(i) & session_mask)
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -0700471 diag_mux_close_peripheral(DIAG_LOCAL_PROC, i);
472
473 params.req_mode = USB_MODE;
474 params.mode_param = 0;
Manoj Prabhu B571cf422017-08-08 19:01:41 +0530475 params.pd_mask = 0;
476 params.peripheral_mask = p_mask;
477
478 if (driver->num_pd_session > 0) {
479 for (i = UPD_WLAN; (i < NUM_MD_SESSIONS); i++) {
480 if (session_mask & MD_PERIPHERAL_MASK(i)) {
481 j = i - UPD_WLAN;
482 driver->pd_session_clear[j] = 1;
483 driver->pd_logging_mode[j] = 0;
484 driver->num_pd_session -= 1;
485 params.pd_mask = p_mask;
486 }
487 }
488 }
489
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -0700490 diag_switch_logging(&params);
491 mutex_unlock(&driver->diagchar_mutex);
492}
493
494static int diag_remove_client_entry(struct file *file)
495{
496 int i = -1;
497 struct diagchar_priv *diagpriv_data = NULL;
498 struct diag_dci_client_tbl *dci_entry = NULL;
499
500 if (!driver)
501 return -ENOMEM;
502
503 mutex_lock(&driver->diag_file_mutex);
504 if (!file) {
505 DIAG_LOG(DIAG_DEBUG_USERSPACE, "Invalid file pointer\n");
506 mutex_unlock(&driver->diag_file_mutex);
507 return -ENOENT;
508 }
509 if (!(file->private_data)) {
510 DIAG_LOG(DIAG_DEBUG_USERSPACE, "Invalid private data\n");
511 mutex_unlock(&driver->diag_file_mutex);
512 return -EINVAL;
513 }
514
515 diagpriv_data = file->private_data;
516
517 /*
518 * clean up any DCI registrations, if this is a DCI client
519 * This will specially help in case of ungraceful exit of any DCI client
520 * This call will remove any pending registrations of such client
521 */
522 mutex_lock(&driver->dci_mutex);
523 dci_entry = dci_lookup_client_entry_pid(current->tgid);
524 if (dci_entry)
525 diag_dci_deinit_client(dci_entry);
526 mutex_unlock(&driver->dci_mutex);
527
528 diag_close_logging_process(current->tgid);
529
530 /* Delete the pkt response table entry for the exiting process */
531 diag_cmd_remove_reg_by_pid(current->tgid);
532
533 mutex_lock(&driver->diagchar_mutex);
534 driver->ref_count--;
535 if (driver->ref_count == 0)
536 diag_mempool_exit();
537
538 for (i = 0; i < driver->num_clients; i++) {
539 if (diagpriv_data && diagpriv_data->pid ==
540 driver->client_map[i].pid) {
541 driver->client_map[i].pid = 0;
542 kfree(diagpriv_data);
543 diagpriv_data = NULL;
544 file->private_data = 0;
545 break;
546 }
547 }
548 mutex_unlock(&driver->diagchar_mutex);
549 mutex_unlock(&driver->diag_file_mutex);
550 return 0;
551}
552static int diagchar_close(struct inode *inode, struct file *file)
553{
554 int ret;
555
556 DIAG_LOG(DIAG_DEBUG_USERSPACE, "diag: process exit %s\n",
557 current->comm);
558 ret = diag_remove_client_entry(file);
559 mutex_lock(&driver->diag_maskclear_mutex);
560 driver->mask_clear = 0;
561 mutex_unlock(&driver->diag_maskclear_mutex);
562 return ret;
563}
564
565void diag_record_stats(int type, int flag)
566{
567 struct diag_pkt_stats_t *pkt_stats = NULL;
568
569 switch (type) {
570 case DATA_TYPE_EVENT:
571 pkt_stats = &driver->event_stats;
572 break;
573 case DATA_TYPE_F3:
574 pkt_stats = &driver->msg_stats;
575 break;
576 case DATA_TYPE_LOG:
577 pkt_stats = &driver->log_stats;
578 break;
579 case DATA_TYPE_RESPONSE:
580 if (flag != PKT_DROP)
581 return;
582 pr_err_ratelimited("diag: In %s, dropping response. This shouldn't happen\n",
583 __func__);
584 return;
585 case DATA_TYPE_DELAYED_RESPONSE:
586 /* No counters to increase for Delayed responses */
587 return;
588 default:
589 pr_err_ratelimited("diag: In %s, invalid pkt_type: %d\n",
590 __func__, type);
591 return;
592 }
593
594 switch (flag) {
595 case PKT_ALLOC:
596 atomic_add(1, (atomic_t *)&pkt_stats->alloc_count);
597 break;
598 case PKT_DROP:
599 atomic_add(1, (atomic_t *)&pkt_stats->drop_count);
600 break;
601 case PKT_RESET:
602 atomic_set((atomic_t *)&pkt_stats->alloc_count, 0);
603 atomic_set((atomic_t *)&pkt_stats->drop_count, 0);
604 break;
605 default:
606 pr_err_ratelimited("diag: In %s, invalid flag: %d\n",
607 __func__, flag);
608 return;
609 }
610}
611
612void diag_get_timestamp(char *time_str)
613{
614 struct timeval t;
615 struct tm broken_tm;
616
617 do_gettimeofday(&t);
618 if (!time_str)
619 return;
620 time_to_tm(t.tv_sec, 0, &broken_tm);
621 scnprintf(time_str, DIAG_TS_SIZE, "%d:%d:%d:%ld", broken_tm.tm_hour,
622 broken_tm.tm_min, broken_tm.tm_sec, t.tv_usec);
623}
624
625int diag_get_remote(int remote_info)
626{
627 int val = (remote_info < 0) ? -remote_info : remote_info;
628 int remote_val;
629
630 switch (val) {
631 case MDM:
632 case MDM2:
633 case QSC:
634 remote_val = -remote_info;
635 break;
636 default:
637 remote_val = 0;
638 break;
639 }
640
641 return remote_val;
642}
643
644int diag_cmd_chk_polling(struct diag_cmd_reg_entry_t *entry)
645{
646 int polling = DIAG_CMD_NOT_POLLING;
647
648 if (!entry)
649 return -EIO;
650
651 if (entry->cmd_code == DIAG_CMD_NO_SUBSYS) {
652 if (entry->subsys_id == DIAG_CMD_NO_SUBSYS &&
653 entry->cmd_code_hi >= DIAG_CMD_STATUS &&
654 entry->cmd_code_lo <= DIAG_CMD_STATUS)
655 polling = DIAG_CMD_POLLING;
656 else if (entry->subsys_id == DIAG_SS_WCDMA &&
657 entry->cmd_code_hi >= DIAG_CMD_QUERY_CALL &&
658 entry->cmd_code_lo <= DIAG_CMD_QUERY_CALL)
659 polling = DIAG_CMD_POLLING;
660 else if (entry->subsys_id == DIAG_SS_GSM &&
661 entry->cmd_code_hi >= DIAG_CMD_QUERY_TMC &&
662 entry->cmd_code_lo <= DIAG_CMD_QUERY_TMC)
663 polling = DIAG_CMD_POLLING;
664 else if (entry->subsys_id == DIAG_SS_PARAMS &&
665 entry->cmd_code_hi >= DIAG_DIAG_POLL &&
666 entry->cmd_code_lo <= DIAG_DIAG_POLL)
667 polling = DIAG_CMD_POLLING;
668 else if (entry->subsys_id == DIAG_SS_TDSCDMA &&
669 entry->cmd_code_hi >= DIAG_CMD_TDSCDMA_STATUS &&
670 entry->cmd_code_lo <= DIAG_CMD_TDSCDMA_STATUS)
671 polling = DIAG_CMD_POLLING;
672 }
673
674 return polling;
675}
676
677static void diag_cmd_invalidate_polling(int change_flag)
678{
679 int polling = DIAG_CMD_NOT_POLLING;
680 struct list_head *start;
681 struct list_head *temp;
682 struct diag_cmd_reg_t *item = NULL;
683
684 if (change_flag == DIAG_CMD_ADD) {
685 if (driver->polling_reg_flag)
686 return;
687 }
688
689 driver->polling_reg_flag = 0;
690 list_for_each_safe(start, temp, &driver->cmd_reg_list) {
691 item = list_entry(start, struct diag_cmd_reg_t, link);
Hardik Aryaf76d6602017-07-18 13:38:26 +0530692 if (&item->entry == NULL) {
693 pr_err("diag: In %s, unable to search command\n",
694 __func__);
695 return;
696 }
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -0700697 polling = diag_cmd_chk_polling(&item->entry);
698 if (polling == DIAG_CMD_POLLING) {
699 driver->polling_reg_flag = 1;
700 break;
701 }
702 }
703}
704
705int diag_cmd_add_reg(struct diag_cmd_reg_entry_t *new_entry, uint8_t proc,
706 int pid)
707{
708 struct diag_cmd_reg_t *new_item = NULL;
709
710 if (!new_entry) {
711 pr_err("diag: In %s, invalid new entry\n", __func__);
712 return -EINVAL;
713 }
714
715 if (proc > APPS_DATA) {
716 pr_err("diag: In %s, invalid peripheral %d\n", __func__, proc);
717 return -EINVAL;
718 }
719
720 if (proc != APPS_DATA)
721 pid = INVALID_PID;
722
723 new_item = kzalloc(sizeof(struct diag_cmd_reg_t), GFP_KERNEL);
724 if (!new_item)
725 return -ENOMEM;
726 kmemleak_not_leak(new_item);
727
728 new_item->pid = pid;
729 new_item->proc = proc;
730 memcpy(&new_item->entry, new_entry,
731 sizeof(struct diag_cmd_reg_entry_t));
732 INIT_LIST_HEAD(&new_item->link);
733
734 mutex_lock(&driver->cmd_reg_mutex);
735 list_add_tail(&new_item->link, &driver->cmd_reg_list);
736 driver->cmd_reg_count++;
737 diag_cmd_invalidate_polling(DIAG_CMD_ADD);
738 mutex_unlock(&driver->cmd_reg_mutex);
739
740 return 0;
741}
742
743struct diag_cmd_reg_entry_t *diag_cmd_search(
744 struct diag_cmd_reg_entry_t *entry, int proc)
745{
746 struct list_head *start;
747 struct list_head *temp;
748 struct diag_cmd_reg_t *item = NULL;
749 struct diag_cmd_reg_entry_t *temp_entry = NULL;
750
751 if (!entry) {
752 pr_err("diag: In %s, invalid entry\n", __func__);
753 return NULL;
754 }
755
756 list_for_each_safe(start, temp, &driver->cmd_reg_list) {
757 item = list_entry(start, struct diag_cmd_reg_t, link);
Manoj Prabhu Bd9b3b622017-01-17 10:15:53 +0530758 if (&item->entry == NULL) {
Gopikrishna Mogasati9b332372016-11-10 20:03:46 +0530759 pr_err("diag: In %s, unable to search command\n",
760 __func__);
761 return NULL;
762 }
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -0700763 temp_entry = &item->entry;
764 if (temp_entry->cmd_code == entry->cmd_code &&
765 temp_entry->subsys_id == entry->subsys_id &&
766 temp_entry->cmd_code_hi >= entry->cmd_code_hi &&
767 temp_entry->cmd_code_lo <= entry->cmd_code_lo &&
768 (proc == item->proc || proc == ALL_PROC)) {
769 return &item->entry;
770 } else if (temp_entry->cmd_code == DIAG_CMD_NO_SUBSYS &&
771 entry->cmd_code == DIAG_CMD_DIAG_SUBSYS) {
772 if (temp_entry->subsys_id == entry->subsys_id &&
773 temp_entry->cmd_code_hi >= entry->cmd_code_hi &&
774 temp_entry->cmd_code_lo <= entry->cmd_code_lo &&
775 (proc == item->proc || proc == ALL_PROC)) {
776 return &item->entry;
777 }
778 } else if (temp_entry->cmd_code == DIAG_CMD_NO_SUBSYS &&
779 temp_entry->subsys_id == DIAG_CMD_NO_SUBSYS) {
780 if ((temp_entry->cmd_code_hi >= entry->cmd_code) &&
781 (temp_entry->cmd_code_lo <= entry->cmd_code) &&
782 (proc == item->proc || proc == ALL_PROC)) {
783 if (entry->cmd_code == MODE_CMD) {
784 if (entry->subsys_id == RESET_ID &&
785 item->proc != APPS_DATA) {
786 continue;
787 }
788 if (entry->subsys_id != RESET_ID &&
789 item->proc == APPS_DATA) {
790 continue;
791 }
792 }
793 return &item->entry;
794 }
795 }
796 }
797
798 return NULL;
799}
800
801void diag_cmd_remove_reg(struct diag_cmd_reg_entry_t *entry, uint8_t proc)
802{
803 struct diag_cmd_reg_t *item = NULL;
804 struct diag_cmd_reg_entry_t *temp_entry;
805
806 if (!entry) {
807 pr_err("diag: In %s, invalid entry\n", __func__);
808 return;
809 }
810
811 mutex_lock(&driver->cmd_reg_mutex);
812 temp_entry = diag_cmd_search(entry, proc);
813 if (temp_entry) {
814 item = container_of(temp_entry, struct diag_cmd_reg_t, entry);
815 if (!item) {
816 mutex_unlock(&driver->cmd_reg_mutex);
817 return;
818 }
819 list_del(&item->link);
820 kfree(item);
821 driver->cmd_reg_count--;
822 }
823 diag_cmd_invalidate_polling(DIAG_CMD_REMOVE);
824 mutex_unlock(&driver->cmd_reg_mutex);
825}
826
827void diag_cmd_remove_reg_by_pid(int pid)
828{
829 struct list_head *start;
830 struct list_head *temp;
831 struct diag_cmd_reg_t *item = NULL;
832
833 mutex_lock(&driver->cmd_reg_mutex);
834 list_for_each_safe(start, temp, &driver->cmd_reg_list) {
835 item = list_entry(start, struct diag_cmd_reg_t, link);
Hardik Aryaf76d6602017-07-18 13:38:26 +0530836 if (&item->entry == NULL) {
837 pr_err("diag: In %s, unable to search command\n",
838 __func__);
839 mutex_unlock(&driver->cmd_reg_mutex);
840 return;
841 }
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -0700842 if (item->pid == pid) {
843 list_del(&item->link);
844 kfree(item);
845 driver->cmd_reg_count--;
846 }
847 }
848 mutex_unlock(&driver->cmd_reg_mutex);
849}
850
851void diag_cmd_remove_reg_by_proc(int proc)
852{
853 struct list_head *start;
854 struct list_head *temp;
855 struct diag_cmd_reg_t *item = NULL;
856
857 mutex_lock(&driver->cmd_reg_mutex);
858 list_for_each_safe(start, temp, &driver->cmd_reg_list) {
859 item = list_entry(start, struct diag_cmd_reg_t, link);
Hardik Aryaf76d6602017-07-18 13:38:26 +0530860 if (&item->entry == NULL) {
861 pr_err("diag: In %s, unable to search command\n",
862 __func__);
863 mutex_unlock(&driver->cmd_reg_mutex);
864 return;
865 }
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -0700866 if (item->proc == proc) {
867 list_del(&item->link);
868 kfree(item);
869 driver->cmd_reg_count--;
870 }
871 }
872 diag_cmd_invalidate_polling(DIAG_CMD_REMOVE);
873 mutex_unlock(&driver->cmd_reg_mutex);
874}
875
876static int diag_copy_dci(char __user *buf, size_t count,
877 struct diag_dci_client_tbl *entry, int *pret)
878{
879 int total_data_len = 0;
880 int ret = 0;
881 int exit_stat = 1;
882 uint8_t drain_again = 0;
883 struct diag_dci_buffer_t *buf_entry, *temp;
884
885 if (!buf || !entry || !pret)
886 return exit_stat;
887
888 ret = *pret;
889
890 ret += sizeof(int);
891 if (ret >= count) {
892 pr_err("diag: In %s, invalid value for ret: %d, count: %zu\n",
893 __func__, ret, count);
894 return -EINVAL;
895 }
896
897 mutex_lock(&entry->write_buf_mutex);
898 list_for_each_entry_safe(buf_entry, temp, &entry->list_write_buf,
899 buf_track) {
900
901 if ((ret + buf_entry->data_len) > count) {
902 drain_again = 1;
903 break;
904 }
905
906 list_del(&buf_entry->buf_track);
907 mutex_lock(&buf_entry->data_mutex);
908 if ((buf_entry->data_len > 0) &&
909 (buf_entry->in_busy) &&
910 (buf_entry->data)) {
911 if (copy_to_user(buf+ret, (void *)buf_entry->data,
912 buf_entry->data_len))
913 goto drop;
914 ret += buf_entry->data_len;
915 total_data_len += buf_entry->data_len;
916 diag_ws_on_copy(DIAG_WS_DCI);
917drop:
918 buf_entry->in_busy = 0;
919 buf_entry->data_len = 0;
920 buf_entry->in_list = 0;
921 if (buf_entry->buf_type == DCI_BUF_CMD) {
922 mutex_unlock(&buf_entry->data_mutex);
923 continue;
924 } else if (buf_entry->buf_type == DCI_BUF_SECONDARY) {
925 diagmem_free(driver, buf_entry->data,
926 POOL_TYPE_DCI);
927 buf_entry->data = NULL;
928 mutex_unlock(&buf_entry->data_mutex);
929 kfree(buf_entry);
930 continue;
931 }
932
933 }
934 mutex_unlock(&buf_entry->data_mutex);
935 }
936
937 if (total_data_len > 0) {
938 /* Copy the total data length */
939 COPY_USER_SPACE_OR_ERR(buf+8, total_data_len, 4);
940 if (ret == -EFAULT)
941 goto exit;
942 ret -= 4;
943 } else {
944 pr_debug("diag: In %s, Trying to copy ZERO bytes, total_data_len: %d\n",
945 __func__, total_data_len);
946 }
947
948 exit_stat = 0;
949exit:
950 entry->in_service = 0;
951 mutex_unlock(&entry->write_buf_mutex);
952 *pret = ret;
953 if (drain_again)
954 dci_drain_data(0);
955
956 return exit_stat;
957}
958
959#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
960static int diag_remote_init(void)
961{
962 diagmem_setsize(POOL_TYPE_MDM, itemsize_mdm, poolsize_mdm);
963 diagmem_setsize(POOL_TYPE_MDM2, itemsize_mdm, poolsize_mdm);
964 diagmem_setsize(POOL_TYPE_MDM_DCI, itemsize_mdm_dci, poolsize_mdm_dci);
965 diagmem_setsize(POOL_TYPE_MDM2_DCI, itemsize_mdm_dci,
966 poolsize_mdm_dci);
967 diagmem_setsize(POOL_TYPE_MDM_MUX, itemsize_mdm_usb, poolsize_mdm_usb);
968 diagmem_setsize(POOL_TYPE_MDM2_MUX, itemsize_mdm_usb, poolsize_mdm_usb);
969 diagmem_setsize(POOL_TYPE_MDM_DCI_WRITE, itemsize_mdm_dci_write,
970 poolsize_mdm_dci_write);
971 diagmem_setsize(POOL_TYPE_MDM2_DCI_WRITE, itemsize_mdm_dci_write,
972 poolsize_mdm_dci_write);
973 diagmem_setsize(POOL_TYPE_QSC_MUX, itemsize_qsc_usb,
974 poolsize_qsc_usb);
975 driver->hdlc_encode_buf = kzalloc(DIAG_MAX_HDLC_BUF_SIZE, GFP_KERNEL);
976 if (!driver->hdlc_encode_buf)
977 return -ENOMEM;
978 driver->hdlc_encode_buf_len = 0;
979 return 0;
980}
981
982static void diag_remote_exit(void)
983{
984 kfree(driver->hdlc_encode_buf);
985}
986
987static int diag_send_raw_data_remote(int proc, void *buf, int len,
988 uint8_t hdlc_flag)
989{
990 int err = 0;
991 int max_len = 0;
992 uint8_t retry_count = 0;
993 uint8_t max_retries = 3;
994 uint16_t payload = 0;
995 struct diag_send_desc_type send = { NULL, NULL, DIAG_STATE_START, 0 };
996 struct diag_hdlc_dest_type enc = { NULL, NULL, 0 };
997 int bridge_index = proc - 1;
998 struct diag_md_session_t *session_info = NULL;
999 uint8_t hdlc_disabled = 0;
1000
1001 if (!buf)
1002 return -EINVAL;
1003
1004 if (len <= 0) {
1005 pr_err("diag: In %s, invalid len: %d", __func__, len);
1006 return -EBADMSG;
1007 }
1008
1009 if (bridge_index < 0 || bridge_index > NUM_REMOTE_DEV) {
1010 pr_err("diag: In %s, invalid bridge index: %d\n", __func__,
1011 bridge_index);
1012 return -EINVAL;
1013 }
1014
1015 do {
1016 if (driver->hdlc_encode_buf_len == 0)
1017 break;
1018 usleep_range(10000, 10100);
1019 retry_count++;
1020 } while (retry_count < max_retries);
1021
1022 if (driver->hdlc_encode_buf_len != 0)
1023 return -EAGAIN;
1024 session_info = diag_md_session_get_peripheral(APPS_DATA);
1025 if (session_info)
1026 hdlc_disabled = session_info->hdlc_disabled;
1027 else
1028 hdlc_disabled = driver->hdlc_disabled;
1029 if (hdlc_disabled) {
Hardik Arya5dbb4aa2017-06-12 11:26:05 +05301030 if (len < 4) {
1031 pr_err("diag: In %s, invalid len: %d of non_hdlc pkt",
1032 __func__, len);
1033 return -EBADMSG;
1034 }
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001035 payload = *(uint16_t *)(buf + 2);
Gopikrishna Mogasati810223f2017-04-20 16:25:20 +05301036 if (payload > DIAG_MAX_HDLC_BUF_SIZE) {
1037 pr_err("diag: Dropping packet, payload size is %d\n",
1038 payload);
1039 return -EBADMSG;
1040 }
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001041 driver->hdlc_encode_buf_len = payload;
1042 /*
Hardik Arya5dbb4aa2017-06-12 11:26:05 +05301043 * Adding 5 bytes for start (1 byte), version (1 byte),
1044 * payload (2 bytes) and end (1 byte)
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001045 */
Hardik Arya5dbb4aa2017-06-12 11:26:05 +05301046 if (len == (payload + 5)) {
1047 /*
1048 * Adding 4 bytes for start (1 byte), version (1 byte)
1049 * and payload (2 bytes)
1050 */
1051 memcpy(driver->hdlc_encode_buf, buf + 4, payload);
1052 goto send_data;
1053 } else {
1054 pr_err("diag: In %s, invalid len: %d of non_hdlc pkt",
1055 __func__, len);
1056 return -EBADMSG;
1057 }
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001058 }
1059
1060 if (hdlc_flag) {
1061 if (len > DIAG_MAX_HDLC_BUF_SIZE) {
1062 pr_err("diag: Dropping packet, HDLC encoded packet payload size crosses buffer limit. Current payload size %d\n",
1063 len);
1064 return -EBADMSG;
1065 }
1066 driver->hdlc_encode_buf_len = len;
1067 memcpy(driver->hdlc_encode_buf, buf, len);
1068 goto send_data;
1069 }
1070
1071 /*
1072 * The worst case length will be twice as the incoming packet length.
1073 * Add 3 bytes for CRC bytes (2 bytes) and delimiter (1 byte)
1074 */
1075 max_len = (2 * len) + 3;
1076 if (max_len > DIAG_MAX_HDLC_BUF_SIZE) {
1077 pr_err("diag: Dropping packet, HDLC encoded packet payload size crosses buffer limit. Current payload size %d\n",
1078 max_len);
1079 return -EBADMSG;
1080 }
1081
1082 /* Perform HDLC encoding on incoming data */
1083 send.state = DIAG_STATE_START;
1084 send.pkt = (void *)(buf);
1085 send.last = (void *)(buf + len - 1);
1086 send.terminate = 1;
1087
1088 enc.dest = driver->hdlc_encode_buf;
1089 enc.dest_last = (void *)(driver->hdlc_encode_buf + max_len - 1);
1090 diag_hdlc_encode(&send, &enc);
1091 driver->hdlc_encode_buf_len = (int)(enc.dest -
1092 (void *)driver->hdlc_encode_buf);
1093
1094send_data:
1095 err = diagfwd_bridge_write(bridge_index, driver->hdlc_encode_buf,
1096 driver->hdlc_encode_buf_len);
1097 if (err) {
1098 pr_err_ratelimited("diag: Error writing Callback packet to proc: %d, err: %d\n",
1099 proc, err);
1100 driver->hdlc_encode_buf_len = 0;
1101 }
1102
1103 return err;
1104}
1105
1106static int diag_process_userspace_remote(int proc, void *buf, int len)
1107{
1108 int bridge_index = proc - 1;
1109
1110 if (!buf || len < 0) {
1111 pr_err("diag: Invalid input in %s, buf: %pK, len: %d\n",
1112 __func__, buf, len);
1113 return -EINVAL;
1114 }
1115
1116 if (bridge_index < 0 || bridge_index > NUM_REMOTE_DEV) {
1117 pr_err("diag: In %s, invalid bridge index: %d\n", __func__,
1118 bridge_index);
1119 return -EINVAL;
1120 }
1121
1122 driver->user_space_data_busy = 1;
1123 return diagfwd_bridge_write(bridge_index, buf, len);
1124}
1125#else
1126static int diag_remote_init(void)
1127{
1128 return 0;
1129}
1130
1131static void diag_remote_exit(void)
1132{
1133}
1134
1135int diagfwd_bridge_init(void)
1136{
1137 return 0;
1138}
1139
1140void diagfwd_bridge_exit(void)
1141{
1142}
1143
1144uint16_t diag_get_remote_device_mask(void)
1145{
1146 return 0;
1147}
1148
1149static int diag_send_raw_data_remote(int proc, void *buf, int len,
1150 uint8_t hdlc_flag)
1151{
1152 return -EINVAL;
1153}
1154
1155static int diag_process_userspace_remote(int proc, void *buf, int len)
1156{
1157 return 0;
1158}
1159#endif
1160
1161static int mask_request_validate(unsigned char mask_buf[])
1162{
1163 uint8_t packet_id;
1164 uint8_t subsys_id;
1165 uint16_t ss_cmd;
1166
1167 packet_id = mask_buf[0];
1168
1169 if (packet_id == DIAG_CMD_DIAG_SUBSYS_DELAY) {
1170 subsys_id = mask_buf[1];
1171 ss_cmd = *(uint16_t *)(mask_buf + 2);
1172 switch (subsys_id) {
1173 case DIAG_SS_DIAG:
1174 if ((ss_cmd == DIAG_SS_FILE_READ_MODEM) ||
1175 (ss_cmd == DIAG_SS_FILE_READ_ADSP) ||
1176 (ss_cmd == DIAG_SS_FILE_READ_WCNSS) ||
1177 (ss_cmd == DIAG_SS_FILE_READ_SLPI) ||
1178 (ss_cmd == DIAG_SS_FILE_READ_APPS))
1179 return 1;
1180 break;
1181 default:
1182 return 0;
1183 }
1184 } else if (packet_id == 0x4B) {
1185 subsys_id = mask_buf[1];
1186 ss_cmd = *(uint16_t *)(mask_buf + 2);
1187 /* Packets with SSID which are allowed */
1188 switch (subsys_id) {
1189 case 0x04: /* DIAG_SUBSYS_WCDMA */
1190 if ((ss_cmd == 0) || (ss_cmd == 0xF))
1191 return 1;
1192 break;
1193 case 0x08: /* DIAG_SUBSYS_GSM */
1194 if ((ss_cmd == 0) || (ss_cmd == 0x1))
1195 return 1;
1196 break;
1197 case 0x09: /* DIAG_SUBSYS_UMTS */
1198 case 0x0F: /* DIAG_SUBSYS_CM */
1199 if (ss_cmd == 0)
1200 return 1;
1201 break;
1202 case 0x0C: /* DIAG_SUBSYS_OS */
1203 if ((ss_cmd == 2) || (ss_cmd == 0x100))
1204 return 1; /* MPU and APU */
1205 break;
1206 case 0x12: /* DIAG_SUBSYS_DIAG_SERV */
1207 if ((ss_cmd == 0) || (ss_cmd == 0x6) || (ss_cmd == 0x7))
1208 return 1;
1209 else if (ss_cmd == 0x218) /* HDLC Disabled Command*/
1210 return 0;
1211 else if (ss_cmd == DIAG_GET_TIME_API)
1212 return 1;
1213 else if (ss_cmd == DIAG_SET_TIME_API)
1214 return 1;
1215 else if (ss_cmd == DIAG_SWITCH_COMMAND)
1216 return 1;
1217 else if (ss_cmd == DIAG_BUFFERING_MODE)
1218 return 1;
1219 break;
1220 case 0x13: /* DIAG_SUBSYS_FS */
1221 if ((ss_cmd == 0) || (ss_cmd == 0x1))
1222 return 1;
1223 break;
1224 default:
1225 return 0;
1226 }
1227 } else {
1228 switch (packet_id) {
1229 case 0x00: /* Version Number */
1230 case 0x0C: /* CDMA status packet */
1231 case 0x1C: /* Diag Version */
1232 case 0x1D: /* Time Stamp */
1233 case 0x60: /* Event Report Control */
1234 case 0x63: /* Status snapshot */
1235 case 0x73: /* Logging Configuration */
1236 case 0x7C: /* Extended build ID */
1237 case 0x7D: /* Extended Message configuration */
1238 case 0x81: /* Event get mask */
1239 case 0x82: /* Set the event mask */
1240 return 1;
1241 default:
1242 return 0;
1243 }
1244 }
1245 return 0;
1246}
1247
1248static void diag_md_session_init(void)
1249{
1250 int i;
1251
1252 mutex_init(&driver->md_session_lock);
1253 driver->md_session_mask = 0;
1254 driver->md_session_mode = DIAG_MD_NONE;
1255 for (i = 0; i < NUM_MD_SESSIONS; i++)
1256 driver->md_session_map[i] = NULL;
1257}
1258
1259static void diag_md_session_exit(void)
1260{
1261 int i;
1262 struct diag_md_session_t *session_info = NULL;
1263
1264 for (i = 0; i < NUM_MD_SESSIONS; i++) {
1265 if (driver->md_session_map[i]) {
1266 session_info = driver->md_session_map[i];
1267 diag_log_mask_free(session_info->log_mask);
1268 kfree(session_info->log_mask);
1269 session_info->log_mask = NULL;
1270 diag_msg_mask_free(session_info->msg_mask);
1271 kfree(session_info->msg_mask);
1272 session_info->msg_mask = NULL;
1273 diag_event_mask_free(session_info->event_mask);
1274 kfree(session_info->event_mask);
1275 session_info->event_mask = NULL;
1276 kfree(session_info);
1277 session_info = NULL;
1278 driver->md_session_map[i] = NULL;
1279 }
1280 }
1281 mutex_destroy(&driver->md_session_lock);
1282 driver->md_session_mask = 0;
1283 driver->md_session_mode = DIAG_MD_NONE;
1284}
1285
1286int diag_md_session_create(int mode, int peripheral_mask, int proc)
1287{
1288 int i;
1289 int err = 0;
1290 struct diag_md_session_t *new_session = NULL;
1291
1292 /*
1293 * If a session is running with a peripheral mask and a new session
1294 * request comes in with same peripheral mask value then return
1295 * invalid param
1296 */
1297 if (driver->md_session_mode == DIAG_MD_PERIPHERAL &&
1298 (driver->md_session_mask & peripheral_mask) != 0)
1299 return -EINVAL;
1300
1301 mutex_lock(&driver->md_session_lock);
1302 new_session = kzalloc(sizeof(struct diag_md_session_t), GFP_KERNEL);
1303 if (!new_session) {
1304 mutex_unlock(&driver->md_session_lock);
1305 return -ENOMEM;
1306 }
1307
1308 new_session->peripheral_mask = 0;
1309 new_session->pid = current->tgid;
1310 new_session->task = current;
1311
1312 new_session->log_mask = kzalloc(sizeof(struct diag_mask_info),
1313 GFP_KERNEL);
1314 if (!new_session->log_mask) {
1315 err = -ENOMEM;
1316 goto fail_peripheral;
1317 }
1318 new_session->event_mask = kzalloc(sizeof(struct diag_mask_info),
1319 GFP_KERNEL);
1320 if (!new_session->event_mask) {
1321 err = -ENOMEM;
1322 goto fail_peripheral;
1323 }
1324 new_session->msg_mask = kzalloc(sizeof(struct diag_mask_info),
1325 GFP_KERNEL);
1326 if (!new_session->msg_mask) {
1327 err = -ENOMEM;
1328 goto fail_peripheral;
1329 }
1330
1331 err = diag_log_mask_copy(new_session->log_mask, &log_mask);
1332 if (err) {
1333 DIAG_LOG(DIAG_DEBUG_USERSPACE,
1334 "return value of log copy. err %d\n", err);
1335 goto fail_peripheral;
1336 }
1337 err = diag_event_mask_copy(new_session->event_mask, &event_mask);
1338 if (err) {
1339 DIAG_LOG(DIAG_DEBUG_USERSPACE,
1340 "return value of event copy. err %d\n", err);
1341 goto fail_peripheral;
1342 }
1343 err = diag_msg_mask_copy(new_session->msg_mask, &msg_mask);
1344 if (err) {
1345 DIAG_LOG(DIAG_DEBUG_USERSPACE,
1346 "return value of msg copy. err %d\n", err);
1347 goto fail_peripheral;
1348 }
1349 for (i = 0; i < NUM_MD_SESSIONS; i++) {
1350 if ((MD_PERIPHERAL_MASK(i) & peripheral_mask) == 0)
1351 continue;
1352 if (driver->md_session_map[i] != NULL) {
1353 DIAG_LOG(DIAG_DEBUG_USERSPACE,
1354 "another instance present for %d\n", i);
1355 err = -EEXIST;
1356 goto fail_peripheral;
1357 }
1358 new_session->peripheral_mask |= MD_PERIPHERAL_MASK(i);
1359 driver->md_session_map[i] = new_session;
1360 driver->md_session_mask |= MD_PERIPHERAL_MASK(i);
1361 }
1362 setup_timer(&new_session->hdlc_reset_timer,
1363 diag_md_hdlc_reset_timer_func,
1364 new_session->pid);
1365
1366 driver->md_session_mode = DIAG_MD_PERIPHERAL;
1367 mutex_unlock(&driver->md_session_lock);
1368 DIAG_LOG(DIAG_DEBUG_USERSPACE,
1369 "created session in peripheral mode\n");
1370 return 0;
1371
1372fail_peripheral:
1373 diag_log_mask_free(new_session->log_mask);
1374 kfree(new_session->log_mask);
1375 new_session->log_mask = NULL;
1376 diag_event_mask_free(new_session->event_mask);
1377 kfree(new_session->event_mask);
1378 new_session->event_mask = NULL;
1379 diag_msg_mask_free(new_session->msg_mask);
1380 kfree(new_session->msg_mask);
1381 new_session->msg_mask = NULL;
1382 kfree(new_session);
1383 new_session = NULL;
1384 mutex_unlock(&driver->md_session_lock);
1385 return err;
1386}
1387
1388static void diag_md_session_close(struct diag_md_session_t *session_info)
1389{
1390 int i;
1391 uint8_t found = 0;
1392
1393 if (!session_info)
1394 return;
1395
1396 mutex_lock(&driver->md_session_lock);
1397 for (i = 0; i < NUM_MD_SESSIONS; i++) {
1398 if (driver->md_session_map[i] != session_info)
1399 continue;
1400 driver->md_session_map[i] = NULL;
1401 driver->md_session_mask &= ~session_info->peripheral_mask;
1402 }
1403 diag_log_mask_free(session_info->log_mask);
1404 kfree(session_info->log_mask);
1405 session_info->log_mask = NULL;
1406 diag_msg_mask_free(session_info->msg_mask);
1407 kfree(session_info->msg_mask);
1408 session_info->msg_mask = NULL;
1409 diag_event_mask_free(session_info->event_mask);
1410 kfree(session_info->event_mask);
1411 session_info->event_mask = NULL;
1412 del_timer(&session_info->hdlc_reset_timer);
1413
1414 for (i = 0; i < NUM_MD_SESSIONS && !found; i++) {
1415 if (driver->md_session_map[i] != NULL)
1416 found = 1;
1417 }
1418
1419 driver->md_session_mode = (found) ? DIAG_MD_PERIPHERAL : DIAG_MD_NONE;
1420 kfree(session_info);
1421 session_info = NULL;
1422 mutex_unlock(&driver->md_session_lock);
1423 DIAG_LOG(DIAG_DEBUG_USERSPACE, "cleared up session\n");
1424}
1425
1426struct diag_md_session_t *diag_md_session_get_pid(int pid)
1427{
1428 int i;
1429
1430 for (i = 0; i < NUM_MD_SESSIONS; i++) {
1431 if (driver->md_session_map[i] &&
1432 driver->md_session_map[i]->pid == pid)
1433 return driver->md_session_map[i];
1434 }
1435 return NULL;
1436}
1437
1438struct diag_md_session_t *diag_md_session_get_peripheral(uint8_t peripheral)
1439{
1440 if (peripheral >= NUM_MD_SESSIONS)
1441 return NULL;
1442 return driver->md_session_map[peripheral];
1443}
1444
1445static int diag_md_peripheral_switch(struct diag_md_session_t *session_info,
1446 int peripheral_mask, int req_mode) {
1447 int i, bit = 0;
1448
1449 if (!session_info)
1450 return -EINVAL;
1451 if (req_mode != DIAG_USB_MODE || req_mode != DIAG_MEMORY_DEVICE_MODE)
1452 return -EINVAL;
1453
1454 /*
1455 * check that md_session_map for i == session_info,
1456 * if not then race condition occurred and bail
1457 */
1458 mutex_lock(&driver->md_session_lock);
1459 for (i = 0; i < NUM_MD_SESSIONS; i++) {
1460 bit = MD_PERIPHERAL_MASK(i) & peripheral_mask;
1461 if (!bit)
1462 continue;
1463 if (req_mode == DIAG_USB_MODE) {
1464 if (driver->md_session_map[i] != session_info) {
1465 mutex_unlock(&driver->md_session_lock);
1466 return -EINVAL;
1467 }
1468 driver->md_session_map[i] = NULL;
1469 driver->md_session_mask &= ~bit;
1470 session_info->peripheral_mask &= ~bit;
1471
1472 } else {
1473 if (driver->md_session_map[i] != NULL) {
1474 mutex_unlock(&driver->md_session_lock);
1475 return -EINVAL;
1476 }
1477 driver->md_session_map[i] = session_info;
1478 driver->md_session_mask |= bit;
1479 session_info->peripheral_mask |= bit;
1480
1481 }
1482 }
1483
1484 driver->md_session_mode = DIAG_MD_PERIPHERAL;
1485 mutex_unlock(&driver->md_session_lock);
1486 DIAG_LOG(DIAG_DEBUG_USERSPACE, "Changed Peripherals:0x%x to mode:%d\n",
1487 peripheral_mask, req_mode);
1488}
1489
1490static int diag_md_session_check(int curr_mode, int req_mode,
1491 const struct diag_logging_mode_param_t *param,
1492 uint8_t *change_mode)
1493{
1494 int i, bit = 0, err = 0;
1495 int change_mask = 0;
1496 struct diag_md_session_t *session_info = NULL;
1497
1498 if (!param || !change_mode)
1499 return -EIO;
1500
1501 *change_mode = 0;
1502
1503 switch (curr_mode) {
1504 case DIAG_USB_MODE:
1505 case DIAG_MEMORY_DEVICE_MODE:
1506 case DIAG_MULTI_MODE:
1507 break;
1508 default:
1509 return -EINVAL;
1510 }
1511
1512 if (req_mode != DIAG_USB_MODE && req_mode != DIAG_MEMORY_DEVICE_MODE)
1513 return -EINVAL;
1514
1515 if (req_mode == DIAG_USB_MODE) {
1516 if (curr_mode == DIAG_USB_MODE)
1517 return 0;
1518 if (driver->md_session_mode == DIAG_MD_NONE
1519 && driver->md_session_mask == 0 && driver->logging_mask) {
1520 *change_mode = 1;
1521 return 0;
1522 }
1523
1524 /*
1525 * curr_mode is either DIAG_MULTI_MODE or DIAG_MD_MODE
1526 * Check if requested peripherals are already in usb mode
1527 */
1528 for (i = 0; i < NUM_MD_SESSIONS; i++) {
1529 bit = MD_PERIPHERAL_MASK(i) & param->peripheral_mask;
1530 if (!bit)
1531 continue;
1532 if (bit & driver->logging_mask)
1533 change_mask |= bit;
1534 }
1535 if (!change_mask)
1536 return 0;
1537
1538 /*
1539 * Change is needed. Check if this md_session has set all the
1540 * requested peripherals. If another md session set a requested
1541 * peripheral then we cannot switch that peripheral to USB.
1542 * If this session owns all the requested peripherals, then
1543 * call function to switch the modes/masks for the md_session
1544 */
1545 session_info = diag_md_session_get_pid(current->tgid);
1546 if (!session_info) {
1547 *change_mode = 1;
1548 return 0;
1549 }
1550 if ((change_mask & session_info->peripheral_mask)
1551 != change_mask) {
1552 DIAG_LOG(DIAG_DEBUG_USERSPACE,
1553 "Another MD Session owns a requested peripheral\n");
1554 return -EINVAL;
1555 }
1556 *change_mode = 1;
1557
1558 /* If all peripherals are being set to USB Mode, call close */
1559 if (~change_mask & session_info->peripheral_mask) {
1560 err = diag_md_peripheral_switch(session_info,
1561 change_mask, DIAG_USB_MODE);
1562 } else
1563 diag_md_session_close(session_info);
1564
1565 return err;
1566
1567 } else if (req_mode == DIAG_MEMORY_DEVICE_MODE) {
1568 /*
1569 * Get bit mask that represents what peripherals already have
1570 * been set. Check that requested peripherals already set are
1571 * owned by this md session
1572 */
1573 change_mask = driver->md_session_mask & param->peripheral_mask;
1574 session_info = diag_md_session_get_pid(current->tgid);
1575
1576 if (session_info) {
1577 if ((session_info->peripheral_mask & change_mask)
1578 != change_mask) {
1579 DIAG_LOG(DIAG_DEBUG_USERSPACE,
1580 "Another MD Session owns a requested peripheral\n");
1581 return -EINVAL;
1582 }
1583 err = diag_md_peripheral_switch(session_info,
1584 change_mask, DIAG_USB_MODE);
1585 } else {
1586 if (change_mask) {
1587 DIAG_LOG(DIAG_DEBUG_USERSPACE,
1588 "Another MD Session owns a requested peripheral\n");
1589 return -EINVAL;
1590 }
1591 err = diag_md_session_create(DIAG_MD_PERIPHERAL,
1592 param->peripheral_mask, DIAG_LOCAL_PROC);
1593 }
1594 *change_mode = 1;
1595 return err;
1596 }
1597 return -EINVAL;
1598}
1599
1600static uint32_t diag_translate_mask(uint32_t peripheral_mask)
1601{
1602 uint32_t ret = 0;
1603
1604 if (peripheral_mask & DIAG_CON_APSS)
1605 ret |= (1 << APPS_DATA);
1606 if (peripheral_mask & DIAG_CON_MPSS)
1607 ret |= (1 << PERIPHERAL_MODEM);
1608 if (peripheral_mask & DIAG_CON_LPASS)
1609 ret |= (1 << PERIPHERAL_LPASS);
1610 if (peripheral_mask & DIAG_CON_WCNSS)
1611 ret |= (1 << PERIPHERAL_WCNSS);
1612 if (peripheral_mask & DIAG_CON_SENSORS)
1613 ret |= (1 << PERIPHERAL_SENSORS);
1614 if (peripheral_mask & DIAG_CON_WDSP)
1615 ret |= (1 << PERIPHERAL_WDSP);
Sreelakshmi Gownipalli588a31d2016-11-02 13:33:43 -07001616 if (peripheral_mask & DIAG_CON_CDSP)
1617 ret |= (1 << PERIPHERAL_CDSP);
Manoj Prabhu B571cf422017-08-08 19:01:41 +05301618 if (peripheral_mask & DIAG_CON_UPD_WLAN)
1619 ret |= (1 << UPD_WLAN);
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001620
1621 return ret;
1622}
1623
1624static int diag_switch_logging(struct diag_logging_mode_param_t *param)
1625{
Manoj Prabhu B571cf422017-08-08 19:01:41 +05301626 int new_mode, i = 0;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001627 int curr_mode;
1628 int err = 0;
1629 uint8_t do_switch = 1;
1630 uint32_t peripheral_mask = 0;
Manoj Prabhu B571cf422017-08-08 19:01:41 +05301631 uint8_t peripheral, upd;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001632
1633 if (!param)
1634 return -EINVAL;
1635
1636 if (!param->peripheral_mask) {
1637 DIAG_LOG(DIAG_DEBUG_USERSPACE,
1638 "asking for mode switch with no peripheral mask set\n");
1639 return -EINVAL;
1640 }
1641
Manoj Prabhu B571cf422017-08-08 19:01:41 +05301642 if (param->pd_mask) {
1643 switch (param->pd_mask) {
1644 case DIAG_CON_UPD_WLAN:
1645 peripheral = PERIPHERAL_MODEM;
1646 upd = UPD_WLAN;
1647 break;
1648 default:
1649 DIAG_LOG(DIAG_DEBUG_USERSPACE,
1650 "asking for mode switch with no pd mask set\n");
1651 return -EINVAL;
1652 }
1653
1654 if (driver->md_session_map[peripheral] &&
1655 (MD_PERIPHERAL_MASK(peripheral) &
1656 diag_mux->mux_mask)) {
1657 DIAG_LOG(DIAG_DEBUG_USERSPACE,
1658 "diag_fr: User PD is already logging onto active peripheral logging\n");
1659 i = upd - UPD_WLAN;
1660 driver->pd_session_clear[i] = 0;
1661 return -EINVAL;
1662 }
1663 peripheral_mask =
1664 diag_translate_mask(param->pd_mask);
1665 param->peripheral_mask = peripheral_mask;
1666 i = upd - UPD_WLAN;
1667 if (!driver->pd_session_clear[i]) {
1668 driver->pd_logging_mode[i] = 1;
1669 driver->num_pd_session += 1;
1670 }
1671 driver->pd_session_clear[i] = 0;
1672 } else {
1673 peripheral_mask =
1674 diag_translate_mask(param->peripheral_mask);
1675 param->peripheral_mask = peripheral_mask;
1676 }
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001677
1678 switch (param->req_mode) {
1679 case CALLBACK_MODE:
1680 case UART_MODE:
1681 case SOCKET_MODE:
1682 case MEMORY_DEVICE_MODE:
1683 new_mode = DIAG_MEMORY_DEVICE_MODE;
1684 break;
1685 case USB_MODE:
1686 new_mode = DIAG_USB_MODE;
1687 break;
1688 default:
1689 pr_err("diag: In %s, request to switch to invalid mode: %d\n",
1690 __func__, param->req_mode);
1691 return -EINVAL;
1692 }
1693
1694 curr_mode = driver->logging_mode;
1695 DIAG_LOG(DIAG_DEBUG_USERSPACE,
Manoj Prabhu B571cf422017-08-08 19:01:41 +05301696 "request to switch logging from %d mask:%0x to new_mode %d mask:%0x\n",
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001697 curr_mode, driver->md_session_mask, new_mode, peripheral_mask);
1698
1699 err = diag_md_session_check(curr_mode, new_mode, param, &do_switch);
1700 if (err) {
1701 DIAG_LOG(DIAG_DEBUG_USERSPACE,
1702 "err from diag_md_session_check, err: %d\n", err);
1703 return err;
1704 }
1705
1706 if (do_switch == 0) {
1707 DIAG_LOG(DIAG_DEBUG_USERSPACE,
1708 "not switching modes c: %d n: %d\n",
1709 curr_mode, new_mode);
1710 return 0;
1711 }
1712
1713 diag_ws_reset(DIAG_WS_MUX);
1714 err = diag_mux_switch_logging(&new_mode, &peripheral_mask);
1715 if (err) {
1716 pr_err("diag: In %s, unable to switch mode from %d to %d, err: %d\n",
1717 __func__, curr_mode, new_mode, err);
1718 driver->logging_mode = curr_mode;
1719 goto fail;
1720 }
1721 driver->logging_mode = new_mode;
1722 driver->logging_mask = peripheral_mask;
1723 DIAG_LOG(DIAG_DEBUG_USERSPACE,
1724 "Switch logging to %d mask:%0x\n", new_mode, peripheral_mask);
1725
1726 /* Update to take peripheral_mask */
1727 if (new_mode != DIAG_MEMORY_DEVICE_MODE) {
1728 diag_update_real_time_vote(DIAG_PROC_MEMORY_DEVICE,
1729 MODE_REALTIME, ALL_PROC);
1730 } else {
1731 diag_update_proc_vote(DIAG_PROC_MEMORY_DEVICE, VOTE_UP,
1732 ALL_PROC);
1733 }
1734
1735 if (!(new_mode == DIAG_MEMORY_DEVICE_MODE &&
1736 curr_mode == DIAG_USB_MODE)) {
1737 queue_work(driver->diag_real_time_wq,
1738 &driver->diag_real_time_work);
1739 }
1740
1741 return 0;
1742fail:
1743 return err;
1744}
1745
1746static int diag_ioctl_dci_reg(unsigned long ioarg)
1747{
1748 int result = -EINVAL;
1749 struct diag_dci_reg_tbl_t dci_reg_params;
1750
1751 if (copy_from_user(&dci_reg_params, (void __user *)ioarg,
1752 sizeof(struct diag_dci_reg_tbl_t)))
1753 return -EFAULT;
1754
1755 result = diag_dci_register_client(&dci_reg_params);
1756
1757 return result;
1758}
1759
1760static int diag_ioctl_dci_health_stats(unsigned long ioarg)
1761{
1762 int result = -EINVAL;
1763 struct diag_dci_health_stats_proc stats;
1764
1765 if (copy_from_user(&stats, (void __user *)ioarg,
1766 sizeof(struct diag_dci_health_stats_proc)))
1767 return -EFAULT;
1768
1769 result = diag_dci_copy_health_stats(&stats);
1770 if (result == DIAG_DCI_NO_ERROR) {
1771 if (copy_to_user((void __user *)ioarg, &stats,
1772 sizeof(struct diag_dci_health_stats_proc)))
1773 return -EFAULT;
1774 }
1775
1776 return result;
1777}
1778
1779static int diag_ioctl_dci_log_status(unsigned long ioarg)
1780{
1781 struct diag_log_event_stats le_stats;
1782 struct diag_dci_client_tbl *dci_client = NULL;
1783
1784 if (copy_from_user(&le_stats, (void __user *)ioarg,
1785 sizeof(struct diag_log_event_stats)))
1786 return -EFAULT;
1787
1788 dci_client = diag_dci_get_client_entry(le_stats.client_id);
1789 if (!dci_client)
1790 return DIAG_DCI_NOT_SUPPORTED;
1791 le_stats.is_set = diag_dci_query_log_mask(dci_client, le_stats.code);
1792 if (copy_to_user((void __user *)ioarg, &le_stats,
1793 sizeof(struct diag_log_event_stats)))
1794 return -EFAULT;
1795
1796 return DIAG_DCI_NO_ERROR;
1797}
1798
1799static int diag_ioctl_dci_event_status(unsigned long ioarg)
1800{
1801 struct diag_log_event_stats le_stats;
1802 struct diag_dci_client_tbl *dci_client = NULL;
1803
1804 if (copy_from_user(&le_stats, (void __user *)ioarg,
1805 sizeof(struct diag_log_event_stats)))
1806 return -EFAULT;
1807
1808 dci_client = diag_dci_get_client_entry(le_stats.client_id);
1809 if (!dci_client)
1810 return DIAG_DCI_NOT_SUPPORTED;
1811
1812 le_stats.is_set = diag_dci_query_event_mask(dci_client, le_stats.code);
1813 if (copy_to_user((void __user *)ioarg, &le_stats,
1814 sizeof(struct diag_log_event_stats)))
1815 return -EFAULT;
1816
1817 return DIAG_DCI_NO_ERROR;
1818}
1819
1820static int diag_ioctl_lsm_deinit(void)
1821{
1822 int i;
1823
Mohit Aggarwal9f694302017-07-06 10:16:52 +05301824 mutex_lock(&driver->diagchar_mutex);
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001825 for (i = 0; i < driver->num_clients; i++)
1826 if (driver->client_map[i].pid == current->tgid)
1827 break;
1828
Mohit Aggarwal9f694302017-07-06 10:16:52 +05301829 if (i == driver->num_clients) {
1830 mutex_unlock(&driver->diagchar_mutex);
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001831 return -EINVAL;
Mohit Aggarwal9f694302017-07-06 10:16:52 +05301832 }
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001833
1834 driver->data_ready[i] |= DEINIT_TYPE;
Mohit Aggarwal9f694302017-07-06 10:16:52 +05301835 mutex_unlock(&driver->diagchar_mutex);
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001836 wake_up_interruptible(&driver->wait_q);
1837
1838 return 1;
1839}
1840
1841static int diag_ioctl_vote_real_time(unsigned long ioarg)
1842{
1843 int real_time = 0;
1844 int temp_proc = ALL_PROC;
1845 struct real_time_vote_t vote;
1846 struct diag_dci_client_tbl *dci_client = NULL;
1847
1848 if (copy_from_user(&vote, (void __user *)ioarg,
1849 sizeof(struct real_time_vote_t)))
1850 return -EFAULT;
1851
1852 if (vote.proc > DIAG_PROC_MEMORY_DEVICE ||
1853 vote.real_time_vote > MODE_UNKNOWN ||
1854 vote.client_id < 0) {
1855 pr_err("diag: %s, invalid params, proc: %d, vote: %d, client_id: %d\n",
1856 __func__, vote.proc, vote.real_time_vote,
1857 vote.client_id);
1858 return -EINVAL;
1859 }
1860
1861 driver->real_time_update_busy++;
1862 if (vote.proc == DIAG_PROC_DCI) {
1863 dci_client = diag_dci_get_client_entry(vote.client_id);
1864 if (!dci_client) {
1865 driver->real_time_update_busy--;
1866 return DIAG_DCI_NOT_SUPPORTED;
1867 }
1868 diag_dci_set_real_time(dci_client, vote.real_time_vote);
1869 real_time = diag_dci_get_cumulative_real_time(
1870 dci_client->client_info.token);
1871 diag_update_real_time_vote(vote.proc, real_time,
1872 dci_client->client_info.token);
1873 } else {
1874 real_time = vote.real_time_vote;
1875 temp_proc = vote.client_id;
1876 diag_update_real_time_vote(vote.proc, real_time,
1877 temp_proc);
1878 }
1879 queue_work(driver->diag_real_time_wq, &driver->diag_real_time_work);
1880 return 0;
1881}
1882
1883static int diag_ioctl_get_real_time(unsigned long ioarg)
1884{
1885 int i;
1886 int retry_count = 0;
1887 int timer = 0;
1888 struct real_time_query_t rt_query;
1889
1890 if (copy_from_user(&rt_query, (void __user *)ioarg,
1891 sizeof(struct real_time_query_t)))
1892 return -EFAULT;
1893 while (retry_count < 3) {
1894 if (driver->real_time_update_busy > 0) {
1895 retry_count++;
1896 /*
1897 * The value 10000 was chosen empirically as an
1898 * optimum value in order to give the work in
1899 * diag_real_time_wq to complete processing.
1900 */
1901 for (timer = 0; timer < 5; timer++)
1902 usleep_range(10000, 10100);
1903 } else {
1904 break;
1905 }
1906 }
1907
1908 if (driver->real_time_update_busy > 0)
1909 return -EAGAIN;
1910
1911 if (rt_query.proc < 0 || rt_query.proc >= DIAG_NUM_PROC) {
1912 pr_err("diag: Invalid proc %d in %s\n", rt_query.proc,
1913 __func__);
1914 return -EINVAL;
1915 }
1916 rt_query.real_time = driver->real_time_mode[rt_query.proc];
1917 /*
1918 * For the local processor, if any of the peripherals is in buffering
1919 * mode, overwrite the value of real time with UNKNOWN_MODE
1920 */
1921 if (rt_query.proc == DIAG_LOCAL_PROC) {
1922 for (i = 0; i < NUM_PERIPHERALS; i++) {
1923 if (!driver->feature[i].peripheral_buffering)
1924 continue;
1925 switch (driver->buffering_mode[i].mode) {
1926 case DIAG_BUFFERING_MODE_CIRCULAR:
1927 case DIAG_BUFFERING_MODE_THRESHOLD:
1928 rt_query.real_time = MODE_UNKNOWN;
1929 break;
1930 }
1931 }
1932 }
1933
1934 if (copy_to_user((void __user *)ioarg, &rt_query,
1935 sizeof(struct real_time_query_t)))
1936 return -EFAULT;
1937
1938 return 0;
1939}
1940
1941static int diag_ioctl_set_buffering_mode(unsigned long ioarg)
1942{
1943 struct diag_buffering_mode_t params;
1944
1945 if (copy_from_user(&params, (void __user *)ioarg, sizeof(params)))
1946 return -EFAULT;
1947
1948 if (params.peripheral >= NUM_PERIPHERALS)
1949 return -EINVAL;
1950
1951 mutex_lock(&driver->mode_lock);
1952 driver->buffering_flag[params.peripheral] = 1;
1953 mutex_unlock(&driver->mode_lock);
1954
1955 return diag_send_peripheral_buffering_mode(&params);
1956}
1957
1958static int diag_ioctl_peripheral_drain_immediate(unsigned long ioarg)
1959{
1960 uint8_t peripheral;
1961
1962 if (copy_from_user(&peripheral, (void __user *)ioarg, sizeof(uint8_t)))
1963 return -EFAULT;
1964
1965 if (peripheral >= NUM_PERIPHERALS) {
1966 pr_err("diag: In %s, invalid peripheral %d\n", __func__,
1967 peripheral);
1968 return -EINVAL;
1969 }
1970
1971 if (!driver->feature[peripheral].peripheral_buffering) {
1972 pr_err("diag: In %s, peripheral %d doesn't support buffering\n",
1973 __func__, peripheral);
1974 return -EIO;
1975 }
1976
1977 return diag_send_peripheral_drain_immediate(peripheral);
1978}
1979
1980static int diag_ioctl_dci_support(unsigned long ioarg)
1981{
1982 struct diag_dci_peripherals_t dci_support;
1983 int result = -EINVAL;
1984
1985 if (copy_from_user(&dci_support, (void __user *)ioarg,
1986 sizeof(struct diag_dci_peripherals_t)))
1987 return -EFAULT;
1988
1989 result = diag_dci_get_support_list(&dci_support);
1990 if (result == DIAG_DCI_NO_ERROR)
1991 if (copy_to_user((void __user *)ioarg, &dci_support,
1992 sizeof(struct diag_dci_peripherals_t)))
1993 return -EFAULT;
1994
1995 return result;
1996}
1997
1998static int diag_ioctl_hdlc_toggle(unsigned long ioarg)
1999{
2000 uint8_t hdlc_support;
2001 struct diag_md_session_t *session_info = NULL;
2002
2003 session_info = diag_md_session_get_pid(current->tgid);
2004 if (copy_from_user(&hdlc_support, (void __user *)ioarg,
2005 sizeof(uint8_t)))
2006 return -EFAULT;
2007 mutex_lock(&driver->hdlc_disable_mutex);
2008 if (session_info) {
2009 mutex_lock(&driver->md_session_lock);
2010 session_info->hdlc_disabled = hdlc_support;
2011 mutex_unlock(&driver->md_session_lock);
2012 } else
2013 driver->hdlc_disabled = hdlc_support;
2014 mutex_unlock(&driver->hdlc_disable_mutex);
2015 diag_update_md_clients(HDLC_SUPPORT_TYPE);
2016
2017 return 0;
2018}
2019
Manoj Prabhu B571cf422017-08-08 19:01:41 +05302020static int diag_ioctl_query_pd_logging(struct diag_logging_mode_param_t *param)
2021{
2022 int ret = -EINVAL;
2023 int peripheral;
2024 char *p_str = NULL;
2025
2026 if (!param)
2027 return -EINVAL;
2028
2029 if (!param->pd_mask) {
2030 DIAG_LOG(DIAG_DEBUG_USERSPACE,
2031 "query with no pd mask set, returning error\n");
2032 return -EINVAL;
2033 }
2034
2035 switch (param->pd_mask) {
2036 case DIAG_CON_UPD_WLAN:
2037 peripheral = PERIPHERAL_MODEM;
2038 p_str = "MODEM";
2039 break;
2040 default:
2041 DIAG_LOG(DIAG_DEBUG_USERSPACE,
2042 "Invalid pd mask, returning EINVAL\n");
2043 return -EINVAL;
2044 }
2045
2046 mutex_lock(&driver->diag_cntl_mutex);
2047 DIAG_LOG(DIAG_DEBUG_USERSPACE,
2048 "diag: %s: Untagging support on APPS is %s\n", __func__,
2049 ((driver->supports_apps_header_untagging) ?
2050 "present" : "absent"));
2051
2052 DIAG_LOG(DIAG_DEBUG_USERSPACE,
2053 "diag: %s: Tagging support on %s is %s\n",
2054 __func__, p_str,
2055 (driver->feature[peripheral].untag_header ?
2056 "present" : "absent"));
2057
2058 if (driver->supports_apps_header_untagging &&
2059 driver->feature[peripheral].untag_header)
2060 ret = 0;
2061
2062 mutex_unlock(&driver->diag_cntl_mutex);
2063 return ret;
2064}
2065
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07002066static int diag_ioctl_register_callback(unsigned long ioarg)
2067{
2068 int err = 0;
2069 struct diag_callback_reg_t reg;
2070
2071 if (copy_from_user(&reg, (void __user *)ioarg,
2072 sizeof(struct diag_callback_reg_t))) {
2073 return -EFAULT;
2074 }
2075
2076 if (reg.proc < 0 || reg.proc >= DIAG_NUM_PROC) {
2077 pr_err("diag: In %s, invalid proc %d for callback registration\n",
2078 __func__, reg.proc);
2079 return -EINVAL;
2080 }
2081
2082 if (driver->md_session_mode == DIAG_MD_PERIPHERAL)
2083 return -EIO;
2084
2085 return err;
2086}
2087
2088static int diag_cmd_register_tbl(struct diag_cmd_reg_tbl_t *reg_tbl)
2089{
2090 int i;
2091 int err = 0;
2092 uint32_t count = 0;
2093 struct diag_cmd_reg_entry_t *entries = NULL;
2094 const uint16_t entry_len = sizeof(struct diag_cmd_reg_entry_t);
2095
2096
2097 if (!reg_tbl) {
2098 pr_err("diag: In %s, invalid registration table\n", __func__);
2099 return -EINVAL;
2100 }
2101
2102 count = reg_tbl->count;
2103 if ((UINT_MAX / entry_len) < count) {
2104 pr_warn("diag: In %s, possbile integer overflow.\n", __func__);
2105 return -EFAULT;
2106 }
2107
2108 entries = kzalloc(count * entry_len, GFP_KERNEL);
2109 if (!entries)
2110 return -ENOMEM;
2111
2112
2113 err = copy_from_user(entries, reg_tbl->entries, count * entry_len);
2114 if (err) {
2115 pr_err("diag: In %s, error copying data from userspace, err: %d\n",
2116 __func__, err);
2117 kfree(entries);
2118 return -EFAULT;
2119 }
2120
2121 for (i = 0; i < count; i++) {
2122 err = diag_cmd_add_reg(&entries[i], APPS_DATA, current->tgid);
2123 if (err) {
2124 pr_err("diag: In %s, unable to register command, err: %d\n",
2125 __func__, err);
2126 break;
2127 }
2128 }
2129
2130 kfree(entries);
2131 return err;
2132}
2133
2134static int diag_ioctl_cmd_reg(unsigned long ioarg)
2135{
2136 struct diag_cmd_reg_tbl_t reg_tbl;
2137
2138 if (copy_from_user(&reg_tbl, (void __user *)ioarg,
2139 sizeof(struct diag_cmd_reg_tbl_t))) {
2140 return -EFAULT;
2141 }
2142
2143 return diag_cmd_register_tbl(&reg_tbl);
2144}
2145
2146static int diag_ioctl_cmd_dereg(void)
2147{
2148 diag_cmd_remove_reg_by_pid(current->tgid);
2149 return 0;
2150}
2151
2152#ifdef CONFIG_COMPAT
2153/*
2154 * @sync_obj_name: name of the synchronization object associated with this proc
2155 * @count: number of entries in the bind
2156 * @params: the actual packet registrations
2157 */
2158struct diag_cmd_reg_tbl_compat_t {
2159 char sync_obj_name[MAX_SYNC_OBJ_NAME_SIZE];
2160 uint32_t count;
2161 compat_uptr_t entries;
2162};
2163
2164static int diag_ioctl_cmd_reg_compat(unsigned long ioarg)
2165{
2166 struct diag_cmd_reg_tbl_compat_t reg_tbl_compat;
2167 struct diag_cmd_reg_tbl_t reg_tbl;
2168
2169 if (copy_from_user(&reg_tbl_compat, (void __user *)ioarg,
2170 sizeof(struct diag_cmd_reg_tbl_compat_t))) {
2171 return -EFAULT;
2172 }
2173
2174 strlcpy(reg_tbl.sync_obj_name, reg_tbl_compat.sync_obj_name,
2175 MAX_SYNC_OBJ_NAME_SIZE);
2176 reg_tbl.count = reg_tbl_compat.count;
2177 reg_tbl.entries = (struct diag_cmd_reg_entry_t *)
2178 (uintptr_t)reg_tbl_compat.entries;
2179
2180 return diag_cmd_register_tbl(&reg_tbl);
2181}
2182
2183long diagchar_compat_ioctl(struct file *filp,
2184 unsigned int iocmd, unsigned long ioarg)
2185{
2186 int result = -EINVAL;
2187 int client_id = 0;
2188 uint16_t delayed_rsp_id = 0;
2189 uint16_t remote_dev;
2190 struct diag_dci_client_tbl *dci_client = NULL;
2191 struct diag_logging_mode_param_t mode_param;
2192
2193 switch (iocmd) {
2194 case DIAG_IOCTL_COMMAND_REG:
2195 result = diag_ioctl_cmd_reg_compat(ioarg);
2196 break;
2197 case DIAG_IOCTL_COMMAND_DEREG:
2198 result = diag_ioctl_cmd_dereg();
2199 break;
2200 case DIAG_IOCTL_GET_DELAYED_RSP_ID:
2201 delayed_rsp_id = diag_get_next_delayed_rsp_id();
2202 if (copy_to_user((void __user *)ioarg, &delayed_rsp_id,
2203 sizeof(uint16_t)))
2204 result = -EFAULT;
2205 else
2206 result = 0;
2207 break;
2208 case DIAG_IOCTL_DCI_REG:
2209 result = diag_ioctl_dci_reg(ioarg);
2210 break;
2211 case DIAG_IOCTL_DCI_DEINIT:
2212 mutex_lock(&driver->dci_mutex);
2213 if (copy_from_user((void *)&client_id, (void __user *)ioarg,
2214 sizeof(int))) {
2215 mutex_unlock(&driver->dci_mutex);
2216 return -EFAULT;
2217 }
2218 dci_client = diag_dci_get_client_entry(client_id);
2219 if (!dci_client) {
2220 mutex_unlock(&driver->dci_mutex);
2221 return DIAG_DCI_NOT_SUPPORTED;
2222 }
2223 result = diag_dci_deinit_client(dci_client);
2224 mutex_unlock(&driver->dci_mutex);
2225 break;
2226 case DIAG_IOCTL_DCI_SUPPORT:
2227 result = diag_ioctl_dci_support(ioarg);
2228 break;
2229 case DIAG_IOCTL_DCI_HEALTH_STATS:
2230 mutex_lock(&driver->dci_mutex);
2231 result = diag_ioctl_dci_health_stats(ioarg);
2232 mutex_unlock(&driver->dci_mutex);
2233 break;
2234 case DIAG_IOCTL_DCI_LOG_STATUS:
2235 mutex_lock(&driver->dci_mutex);
2236 result = diag_ioctl_dci_log_status(ioarg);
2237 mutex_unlock(&driver->dci_mutex);
2238 break;
2239 case DIAG_IOCTL_DCI_EVENT_STATUS:
2240 mutex_lock(&driver->dci_mutex);
2241 result = diag_ioctl_dci_event_status(ioarg);
2242 mutex_unlock(&driver->dci_mutex);
2243 break;
2244 case DIAG_IOCTL_DCI_CLEAR_LOGS:
2245 mutex_lock(&driver->dci_mutex);
2246 if (copy_from_user((void *)&client_id, (void __user *)ioarg,
2247 sizeof(int))) {
2248 mutex_unlock(&driver->dci_mutex);
2249 return -EFAULT;
2250 }
2251 result = diag_dci_clear_log_mask(client_id);
2252 mutex_unlock(&driver->dci_mutex);
2253 break;
2254 case DIAG_IOCTL_DCI_CLEAR_EVENTS:
2255 mutex_lock(&driver->dci_mutex);
2256 if (copy_from_user(&client_id, (void __user *)ioarg,
2257 sizeof(int))) {
2258 mutex_unlock(&driver->dci_mutex);
2259 return -EFAULT;
2260 }
2261 result = diag_dci_clear_event_mask(client_id);
2262 mutex_unlock(&driver->dci_mutex);
2263 break;
2264 case DIAG_IOCTL_LSM_DEINIT:
2265 result = diag_ioctl_lsm_deinit();
2266 break;
2267 case DIAG_IOCTL_SWITCH_LOGGING:
2268 if (copy_from_user((void *)&mode_param, (void __user *)ioarg,
2269 sizeof(mode_param)))
2270 return -EFAULT;
2271 mutex_lock(&driver->diagchar_mutex);
2272 result = diag_switch_logging(&mode_param);
2273 mutex_unlock(&driver->diagchar_mutex);
2274 break;
2275 case DIAG_IOCTL_REMOTE_DEV:
2276 remote_dev = diag_get_remote_device_mask();
2277 if (copy_to_user((void __user *)ioarg, &remote_dev,
2278 sizeof(uint16_t)))
2279 result = -EFAULT;
2280 else
2281 result = 1;
2282 break;
2283 case DIAG_IOCTL_VOTE_REAL_TIME:
2284 mutex_lock(&driver->dci_mutex);
2285 result = diag_ioctl_vote_real_time(ioarg);
2286 mutex_unlock(&driver->dci_mutex);
2287 break;
2288 case DIAG_IOCTL_GET_REAL_TIME:
2289 result = diag_ioctl_get_real_time(ioarg);
2290 break;
2291 case DIAG_IOCTL_PERIPHERAL_BUF_CONFIG:
2292 result = diag_ioctl_set_buffering_mode(ioarg);
2293 break;
2294 case DIAG_IOCTL_PERIPHERAL_BUF_DRAIN:
2295 result = diag_ioctl_peripheral_drain_immediate(ioarg);
2296 break;
2297 case DIAG_IOCTL_REGISTER_CALLBACK:
2298 result = diag_ioctl_register_callback(ioarg);
2299 break;
2300 case DIAG_IOCTL_HDLC_TOGGLE:
2301 result = diag_ioctl_hdlc_toggle(ioarg);
2302 break;
Manoj Prabhu B571cf422017-08-08 19:01:41 +05302303 case DIAG_IOCTL_QUERY_PD_LOGGING:
2304 if (copy_from_user((void *)&mode_param, (void __user *)ioarg,
2305 sizeof(mode_param)))
2306 return -EFAULT;
2307 result = diag_ioctl_query_pd_logging(&mode_param);
2308 break;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07002309 }
2310 return result;
2311}
2312#endif
2313
2314long diagchar_ioctl(struct file *filp,
2315 unsigned int iocmd, unsigned long ioarg)
2316{
2317 int result = -EINVAL;
2318 int client_id = 0;
2319 uint16_t delayed_rsp_id;
2320 uint16_t remote_dev;
2321 struct diag_dci_client_tbl *dci_client = NULL;
2322 struct diag_logging_mode_param_t mode_param;
2323
2324 switch (iocmd) {
2325 case DIAG_IOCTL_COMMAND_REG:
2326 result = diag_ioctl_cmd_reg(ioarg);
2327 break;
2328 case DIAG_IOCTL_COMMAND_DEREG:
2329 result = diag_ioctl_cmd_dereg();
2330 break;
2331 case DIAG_IOCTL_GET_DELAYED_RSP_ID:
2332 delayed_rsp_id = diag_get_next_delayed_rsp_id();
2333 if (copy_to_user((void __user *)ioarg, &delayed_rsp_id,
2334 sizeof(uint16_t)))
2335 result = -EFAULT;
2336 else
2337 result = 0;
2338 break;
2339 case DIAG_IOCTL_DCI_REG:
2340 result = diag_ioctl_dci_reg(ioarg);
2341 break;
2342 case DIAG_IOCTL_DCI_DEINIT:
2343 mutex_lock(&driver->dci_mutex);
2344 if (copy_from_user((void *)&client_id, (void __user *)ioarg,
2345 sizeof(int))) {
2346 mutex_unlock(&driver->dci_mutex);
2347 return -EFAULT;
2348 }
2349 dci_client = diag_dci_get_client_entry(client_id);
2350 if (!dci_client) {
2351 mutex_unlock(&driver->dci_mutex);
2352 return DIAG_DCI_NOT_SUPPORTED;
2353 }
2354 result = diag_dci_deinit_client(dci_client);
2355 mutex_unlock(&driver->dci_mutex);
2356 break;
2357 case DIAG_IOCTL_DCI_SUPPORT:
2358 result = diag_ioctl_dci_support(ioarg);
2359 break;
2360 case DIAG_IOCTL_DCI_HEALTH_STATS:
2361 mutex_lock(&driver->dci_mutex);
2362 result = diag_ioctl_dci_health_stats(ioarg);
2363 mutex_unlock(&driver->dci_mutex);
2364 break;
2365 case DIAG_IOCTL_DCI_LOG_STATUS:
2366 mutex_lock(&driver->dci_mutex);
2367 result = diag_ioctl_dci_log_status(ioarg);
2368 mutex_unlock(&driver->dci_mutex);
2369 break;
2370 case DIAG_IOCTL_DCI_EVENT_STATUS:
2371 result = diag_ioctl_dci_event_status(ioarg);
2372 break;
2373 case DIAG_IOCTL_DCI_CLEAR_LOGS:
2374 mutex_lock(&driver->dci_mutex);
2375 if (copy_from_user((void *)&client_id, (void __user *)ioarg,
2376 sizeof(int))) {
2377 mutex_unlock(&driver->dci_mutex);
2378 return -EFAULT;
2379 }
2380 result = diag_dci_clear_log_mask(client_id);
2381 mutex_unlock(&driver->dci_mutex);
2382 break;
2383 case DIAG_IOCTL_DCI_CLEAR_EVENTS:
2384 mutex_lock(&driver->dci_mutex);
2385 if (copy_from_user(&client_id, (void __user *)ioarg,
2386 sizeof(int))) {
2387 mutex_unlock(&driver->dci_mutex);
2388 return -EFAULT;
2389 }
2390 result = diag_dci_clear_event_mask(client_id);
2391 mutex_unlock(&driver->dci_mutex);
2392 break;
2393 case DIAG_IOCTL_LSM_DEINIT:
2394 result = diag_ioctl_lsm_deinit();
2395 break;
2396 case DIAG_IOCTL_SWITCH_LOGGING:
2397 if (copy_from_user((void *)&mode_param, (void __user *)ioarg,
2398 sizeof(mode_param)))
2399 return -EFAULT;
2400 mutex_lock(&driver->diagchar_mutex);
2401 result = diag_switch_logging(&mode_param);
2402 mutex_unlock(&driver->diagchar_mutex);
2403 break;
2404 case DIAG_IOCTL_REMOTE_DEV:
2405 remote_dev = diag_get_remote_device_mask();
2406 if (copy_to_user((void __user *)ioarg, &remote_dev,
2407 sizeof(uint16_t)))
2408 result = -EFAULT;
2409 else
2410 result = 1;
2411 break;
2412 case DIAG_IOCTL_VOTE_REAL_TIME:
2413 mutex_lock(&driver->dci_mutex);
2414 result = diag_ioctl_vote_real_time(ioarg);
2415 mutex_unlock(&driver->dci_mutex);
2416 break;
2417 case DIAG_IOCTL_GET_REAL_TIME:
2418 result = diag_ioctl_get_real_time(ioarg);
2419 break;
2420 case DIAG_IOCTL_PERIPHERAL_BUF_CONFIG:
2421 result = diag_ioctl_set_buffering_mode(ioarg);
2422 break;
2423 case DIAG_IOCTL_PERIPHERAL_BUF_DRAIN:
2424 result = diag_ioctl_peripheral_drain_immediate(ioarg);
2425 break;
2426 case DIAG_IOCTL_REGISTER_CALLBACK:
2427 result = diag_ioctl_register_callback(ioarg);
2428 break;
2429 case DIAG_IOCTL_HDLC_TOGGLE:
2430 result = diag_ioctl_hdlc_toggle(ioarg);
2431 break;
Manoj Prabhu B571cf422017-08-08 19:01:41 +05302432 case DIAG_IOCTL_QUERY_PD_LOGGING:
2433 if (copy_from_user((void *)&mode_param, (void __user *)ioarg,
2434 sizeof(mode_param)))
2435 return -EFAULT;
2436 result = diag_ioctl_query_pd_logging(&mode_param);
2437 break;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07002438 }
2439 return result;
2440}
2441
2442static int diag_process_apps_data_hdlc(unsigned char *buf, int len,
2443 int pkt_type)
2444{
2445 int err = 0;
2446 int ret = PKT_DROP;
2447 struct diag_apps_data_t *data = &hdlc_data;
2448 struct diag_send_desc_type send = { NULL, NULL, DIAG_STATE_START, 0 };
2449 struct diag_hdlc_dest_type enc = { NULL, NULL, 0 };
2450 /*
2451 * The maximum encoded size of the buffer can be atmost twice the length
2452 * of the packet. Add three bytes foe footer - 16 bit CRC (2 bytes) +
2453 * delimiter (1 byte).
2454 */
2455 const uint32_t max_encoded_size = ((2 * len) + 3);
2456
2457 if (!buf || len <= 0) {
2458 pr_err("diag: In %s, invalid buf: %pK len: %d\n",
2459 __func__, buf, len);
2460 return -EIO;
2461 }
2462
2463 if (max_encoded_size > DIAG_MAX_HDLC_BUF_SIZE) {
2464 pr_err_ratelimited("diag: In %s, encoded data is larger %d than the buffer size %d\n",
2465 __func__, max_encoded_size, DIAG_MAX_HDLC_BUF_SIZE);
2466 return -EBADMSG;
2467 }
2468
2469 send.state = DIAG_STATE_START;
2470 send.pkt = buf;
2471 send.last = (void *)(buf + len - 1);
2472 send.terminate = 1;
2473
2474 if (!data->buf)
2475 data->buf = diagmem_alloc(driver, DIAG_MAX_HDLC_BUF_SIZE +
2476 APF_DIAG_PADDING,
2477 POOL_TYPE_HDLC);
2478 if (!data->buf) {
2479 ret = PKT_DROP;
2480 goto fail_ret;
2481 }
2482
2483 if ((DIAG_MAX_HDLC_BUF_SIZE - data->len) <= max_encoded_size) {
2484 err = diag_mux_write(DIAG_LOCAL_PROC, data->buf, data->len,
2485 data->ctxt);
2486 if (err) {
2487 ret = -EIO;
2488 goto fail_free_buf;
2489 }
2490 data->buf = NULL;
2491 data->len = 0;
2492 data->buf = diagmem_alloc(driver, DIAG_MAX_HDLC_BUF_SIZE +
2493 APF_DIAG_PADDING,
2494 POOL_TYPE_HDLC);
2495 if (!data->buf) {
2496 ret = PKT_DROP;
2497 goto fail_ret;
2498 }
2499 }
2500
2501 enc.dest = data->buf + data->len;
2502 enc.dest_last = (void *)(data->buf + data->len + max_encoded_size);
2503 diag_hdlc_encode(&send, &enc);
2504
2505 /*
2506 * This is to check if after HDLC encoding, we are still within
2507 * the limits of aggregation buffer. If not, we write out the
2508 * current buffer and start aggregation in a newly allocated
2509 * buffer.
2510 */
2511 if ((uintptr_t)enc.dest >= (uintptr_t)(data->buf +
2512 DIAG_MAX_HDLC_BUF_SIZE)) {
2513 err = diag_mux_write(DIAG_LOCAL_PROC, data->buf, data->len,
2514 data->ctxt);
2515 if (err) {
2516 ret = -EIO;
2517 goto fail_free_buf;
2518 }
2519 data->buf = NULL;
2520 data->len = 0;
2521 data->buf = diagmem_alloc(driver, DIAG_MAX_HDLC_BUF_SIZE +
2522 APF_DIAG_PADDING,
2523 POOL_TYPE_HDLC);
2524 if (!data->buf) {
2525 ret = PKT_DROP;
2526 goto fail_ret;
2527 }
2528
2529 enc.dest = data->buf + data->len;
2530 enc.dest_last = (void *)(data->buf + data->len +
2531 max_encoded_size);
2532 diag_hdlc_encode(&send, &enc);
2533 }
2534
2535 data->len = (((uintptr_t)enc.dest - (uintptr_t)data->buf) <
2536 DIAG_MAX_HDLC_BUF_SIZE) ?
2537 ((uintptr_t)enc.dest - (uintptr_t)data->buf) :
2538 DIAG_MAX_HDLC_BUF_SIZE;
2539
2540 if (pkt_type == DATA_TYPE_RESPONSE) {
2541 err = diag_mux_write(DIAG_LOCAL_PROC, data->buf, data->len,
2542 data->ctxt);
2543 if (err) {
2544 ret = -EIO;
2545 goto fail_free_buf;
2546 }
2547 data->buf = NULL;
2548 data->len = 0;
2549 }
2550
2551 return PKT_ALLOC;
2552
2553fail_free_buf:
2554 diagmem_free(driver, data->buf, POOL_TYPE_HDLC);
2555 data->buf = NULL;
2556 data->len = 0;
2557
2558fail_ret:
2559 return ret;
2560}
2561
2562static int diag_process_apps_data_non_hdlc(unsigned char *buf, int len,
2563 int pkt_type)
2564{
2565 int err = 0;
2566 int ret = PKT_DROP;
2567 struct diag_pkt_frame_t header;
2568 struct diag_apps_data_t *data = &non_hdlc_data;
2569 /*
2570 * The maximum packet size, when the data is non hdlc encoded is equal
2571 * to the size of the packet frame header and the length. Add 1 for the
2572 * delimiter 0x7E at the end.
2573 */
2574 const uint32_t max_pkt_size = sizeof(header) + len + 1;
2575
2576 if (!buf || len <= 0) {
2577 pr_err("diag: In %s, invalid buf: %pK len: %d\n",
2578 __func__, buf, len);
2579 return -EIO;
2580 }
2581
2582 if (!data->buf) {
2583 data->buf = diagmem_alloc(driver, DIAG_MAX_HDLC_BUF_SIZE +
2584 APF_DIAG_PADDING,
2585 POOL_TYPE_HDLC);
2586 if (!data->buf) {
2587 ret = PKT_DROP;
2588 goto fail_ret;
2589 }
2590 }
2591
2592 if ((DIAG_MAX_HDLC_BUF_SIZE - data->len) <= max_pkt_size) {
2593 err = diag_mux_write(DIAG_LOCAL_PROC, data->buf, data->len,
2594 data->ctxt);
2595 if (err) {
2596 ret = -EIO;
2597 goto fail_free_buf;
2598 }
2599 data->buf = NULL;
2600 data->len = 0;
2601 data->buf = diagmem_alloc(driver, DIAG_MAX_HDLC_BUF_SIZE +
2602 APF_DIAG_PADDING,
2603 POOL_TYPE_HDLC);
2604 if (!data->buf) {
2605 ret = PKT_DROP;
2606 goto fail_ret;
2607 }
2608 }
2609
2610 header.start = CONTROL_CHAR;
2611 header.version = 1;
2612 header.length = len;
2613 memcpy(data->buf + data->len, &header, sizeof(header));
2614 data->len += sizeof(header);
2615 memcpy(data->buf + data->len, buf, len);
2616 data->len += len;
2617 *(uint8_t *)(data->buf + data->len) = CONTROL_CHAR;
2618 data->len += sizeof(uint8_t);
2619 if (pkt_type == DATA_TYPE_RESPONSE) {
2620 err = diag_mux_write(DIAG_LOCAL_PROC, data->buf, data->len,
2621 data->ctxt);
2622 if (err) {
2623 ret = -EIO;
2624 goto fail_free_buf;
2625 }
2626 data->buf = NULL;
2627 data->len = 0;
2628 }
2629
2630 return PKT_ALLOC;
2631
2632fail_free_buf:
2633 diagmem_free(driver, data->buf, POOL_TYPE_HDLC);
2634 data->buf = NULL;
2635 data->len = 0;
2636
2637fail_ret:
2638 return ret;
2639}
2640
2641static int diag_user_process_dci_data(const char __user *buf, int len)
2642{
2643 int err = 0;
2644 const int mempool = POOL_TYPE_USER;
2645 unsigned char *user_space_data = NULL;
2646
2647 if (!buf || len <= 0 || len > diag_mempools[mempool].itemsize) {
2648 pr_err_ratelimited("diag: In %s, invalid buf %pK len: %d\n",
2649 __func__, buf, len);
2650 return -EBADMSG;
2651 }
2652
2653 user_space_data = diagmem_alloc(driver, len, mempool);
2654 if (!user_space_data)
2655 return -ENOMEM;
2656
2657 err = copy_from_user(user_space_data, buf, len);
2658 if (err) {
2659 pr_err_ratelimited("diag: In %s, unable to copy data from userspace, err: %d\n",
2660 __func__, err);
2661 err = DIAG_DCI_SEND_DATA_FAIL;
2662 goto fail;
2663 }
2664
2665 err = diag_process_dci_transaction(user_space_data, len);
2666fail:
2667 diagmem_free(driver, user_space_data, mempool);
2668 user_space_data = NULL;
2669 return err;
2670}
2671
2672static int diag_user_process_dci_apps_data(const char __user *buf, int len,
2673 int pkt_type)
2674{
2675 int err = 0;
2676 const int mempool = POOL_TYPE_COPY;
2677 unsigned char *user_space_data = NULL;
2678
2679 if (!buf || len <= 0 || len > diag_mempools[mempool].itemsize) {
2680 pr_err_ratelimited("diag: In %s, invalid buf %pK len: %d\n",
2681 __func__, buf, len);
2682 return -EBADMSG;
2683 }
2684
2685 pkt_type &= (DCI_PKT_TYPE | DATA_TYPE_DCI_LOG | DATA_TYPE_DCI_EVENT);
2686 if (!pkt_type) {
2687 pr_err_ratelimited("diag: In %s, invalid pkt_type: %d\n",
2688 __func__, pkt_type);
2689 return -EBADMSG;
2690 }
2691
2692 user_space_data = diagmem_alloc(driver, len, mempool);
2693 if (!user_space_data)
2694 return -ENOMEM;
2695
2696 err = copy_from_user(user_space_data, buf, len);
2697 if (err) {
2698 pr_alert("diag: In %s, unable to copy data from userspace, err: %d\n",
2699 __func__, err);
2700 goto fail;
2701 }
2702
2703 diag_process_apps_dci_read_data(pkt_type, user_space_data, len);
2704fail:
2705 diagmem_free(driver, user_space_data, mempool);
2706 user_space_data = NULL;
2707 return err;
2708}
2709
2710static int diag_user_process_raw_data(const char __user *buf, int len)
2711{
2712 int err = 0;
2713 int ret = 0;
2714 int token_offset = 0;
2715 int remote_proc = 0;
2716 const int mempool = POOL_TYPE_COPY;
2717 unsigned char *user_space_data = NULL;
2718 struct diag_md_session_t *info = NULL;
2719
2720 if (!buf || len <= 0 || len > CALLBACK_BUF_SIZE) {
2721 pr_err_ratelimited("diag: In %s, invalid buf %pK len: %d\n",
2722 __func__, buf, len);
2723 return -EBADMSG;
2724 }
2725
2726 user_space_data = diagmem_alloc(driver, len, mempool);
2727 if (!user_space_data)
2728 return -ENOMEM;
2729
2730 err = copy_from_user(user_space_data, buf, len);
2731 if (err) {
2732 pr_err("diag: copy failed for user space data\n");
2733 goto fail;
2734 }
2735
2736 /* Check for proc_type */
2737 remote_proc = diag_get_remote(*(int *)user_space_data);
2738 if (remote_proc) {
2739 token_offset = sizeof(int);
2740 if (len <= MIN_SIZ_ALLOW) {
2741 pr_err("diag: In %s, possible integer underflow, payload size: %d\n",
2742 __func__, len);
2743 diagmem_free(driver, user_space_data, mempool);
2744 user_space_data = NULL;
2745 return -EBADMSG;
2746 }
2747 len -= sizeof(int);
2748 }
2749 if (driver->mask_check) {
2750 if (!mask_request_validate(user_space_data +
2751 token_offset)) {
2752 pr_alert("diag: mask request Invalid\n");
2753 diagmem_free(driver, user_space_data, mempool);
2754 user_space_data = NULL;
2755 return -EFAULT;
2756 }
2757 }
2758 if (remote_proc) {
2759 ret = diag_send_raw_data_remote(remote_proc,
2760 (void *)(user_space_data + token_offset),
2761 len, USER_SPACE_RAW_DATA);
2762 if (ret) {
2763 pr_err("diag: Error sending data to remote proc %d, err: %d\n",
2764 remote_proc, ret);
2765 }
2766 } else {
2767 wait_event_interruptible(driver->wait_q,
2768 (driver->in_busy_pktdata == 0));
2769 info = diag_md_session_get_pid(current->tgid);
2770 ret = diag_process_apps_pkt(user_space_data, len, info);
2771 if (ret == 1)
2772 diag_send_error_rsp((void *)(user_space_data), len);
2773 }
2774fail:
2775 diagmem_free(driver, user_space_data, mempool);
2776 user_space_data = NULL;
2777 return ret;
2778}
2779
2780static int diag_user_process_userspace_data(const char __user *buf, int len)
2781{
2782 int err = 0;
2783 int max_retries = 3;
2784 int retry_count = 0;
2785 int remote_proc = 0;
2786 int token_offset = 0;
2787 struct diag_md_session_t *session_info = NULL;
2788 uint8_t hdlc_disabled;
2789
2790 if (!buf || len <= 0 || len > USER_SPACE_DATA) {
2791 pr_err_ratelimited("diag: In %s, invalid buf %pK len: %d\n",
2792 __func__, buf, len);
2793 return -EBADMSG;
2794 }
2795
2796 do {
2797 if (!driver->user_space_data_busy)
2798 break;
2799 retry_count++;
2800 usleep_range(10000, 10100);
2801 } while (retry_count < max_retries);
2802
2803 if (driver->user_space_data_busy)
2804 return -EAGAIN;
2805
2806 err = copy_from_user(driver->user_space_data_buf, buf, len);
2807 if (err) {
2808 pr_err("diag: In %s, failed to copy data from userspace, err: %d\n",
2809 __func__, err);
2810 return -EIO;
2811 }
2812
2813 /* Check for proc_type */
2814 remote_proc = diag_get_remote(*(int *)driver->user_space_data_buf);
2815 if (remote_proc) {
2816 if (len <= MIN_SIZ_ALLOW) {
2817 pr_err("diag: Integer underflow in %s, payload size: %d",
2818 __func__, len);
2819 return -EBADMSG;
2820 }
2821 token_offset = sizeof(int);
2822 len -= sizeof(int);
2823 }
2824
2825 /* Check masks for On-Device logging */
2826 if (driver->mask_check) {
2827 if (!mask_request_validate(driver->user_space_data_buf +
2828 token_offset)) {
2829 pr_alert("diag: mask request Invalid\n");
2830 return -EFAULT;
2831 }
2832 }
2833
2834 /* send masks to local processor now */
2835 if (!remote_proc) {
2836 session_info = diag_md_session_get_pid(current->tgid);
2837 if (!session_info) {
2838 pr_err("diag:In %s request came from invalid md session pid:%d",
2839 __func__, current->tgid);
2840 return -EINVAL;
2841 }
2842 if (session_info)
2843 hdlc_disabled = session_info->hdlc_disabled;
2844 else
2845 hdlc_disabled = driver->hdlc_disabled;
2846 if (!hdlc_disabled)
2847 diag_process_hdlc_pkt((void *)
2848 (driver->user_space_data_buf),
2849 len, session_info);
2850 else
2851 diag_process_non_hdlc_pkt((char *)
2852 (driver->user_space_data_buf),
2853 len, session_info);
2854 return 0;
2855 }
2856
2857 err = diag_process_userspace_remote(remote_proc,
2858 driver->user_space_data_buf +
2859 token_offset, len);
2860 if (err) {
2861 driver->user_space_data_busy = 0;
2862 pr_err("diag: Error sending mask to remote proc %d, err: %d\n",
2863 remote_proc, err);
2864 }
2865
2866 return err;
2867}
2868
2869static int diag_user_process_apps_data(const char __user *buf, int len,
2870 int pkt_type)
2871{
2872 int ret = 0;
2873 int stm_size = 0;
2874 const int mempool = POOL_TYPE_COPY;
2875 unsigned char *user_space_data = NULL;
2876 struct diag_md_session_t *session_info = NULL;
2877 uint8_t hdlc_disabled;
2878
2879 if (!buf || len <= 0 || len > DIAG_MAX_RSP_SIZE) {
2880 pr_err_ratelimited("diag: In %s, invalid buf %pK len: %d\n",
2881 __func__, buf, len);
2882 return -EBADMSG;
2883 }
2884
2885 switch (pkt_type) {
2886 case DATA_TYPE_EVENT:
2887 case DATA_TYPE_F3:
2888 case DATA_TYPE_LOG:
2889 case DATA_TYPE_RESPONSE:
2890 case DATA_TYPE_DELAYED_RESPONSE:
2891 break;
2892 default:
2893 pr_err_ratelimited("diag: In %s, invalid pkt_type: %d\n",
2894 __func__, pkt_type);
2895 return -EBADMSG;
2896 }
2897
2898 user_space_data = diagmem_alloc(driver, len, mempool);
2899 if (!user_space_data) {
2900 diag_record_stats(pkt_type, PKT_DROP);
2901 return -ENOMEM;
2902 }
2903
2904 ret = copy_from_user(user_space_data, buf, len);
2905 if (ret) {
2906 pr_alert("diag: In %s, unable to copy data from userspace, err: %d\n",
2907 __func__, ret);
2908 diagmem_free(driver, user_space_data, mempool);
2909 user_space_data = NULL;
2910 diag_record_stats(pkt_type, PKT_DROP);
2911 return -EBADMSG;
2912 }
2913
2914 if (driver->stm_state[APPS_DATA] &&
2915 (pkt_type >= DATA_TYPE_EVENT) && (pkt_type <= DATA_TYPE_LOG)) {
2916 stm_size = stm_log_inv_ts(OST_ENTITY_DIAG, 0, user_space_data,
2917 len);
2918 if (stm_size == 0) {
2919 pr_debug("diag: In %s, stm_log_inv_ts returned size of 0\n",
2920 __func__);
2921 }
2922 diagmem_free(driver, user_space_data, mempool);
2923 user_space_data = NULL;
2924
2925 return 0;
2926 }
2927
2928 mutex_lock(&apps_data_mutex);
2929 mutex_lock(&driver->hdlc_disable_mutex);
2930 session_info = diag_md_session_get_peripheral(APPS_DATA);
2931 if (session_info)
2932 hdlc_disabled = session_info->hdlc_disabled;
2933 else
2934 hdlc_disabled = driver->hdlc_disabled;
2935 if (hdlc_disabled)
2936 ret = diag_process_apps_data_non_hdlc(user_space_data, len,
2937 pkt_type);
2938 else
2939 ret = diag_process_apps_data_hdlc(user_space_data, len,
2940 pkt_type);
2941 mutex_unlock(&driver->hdlc_disable_mutex);
2942 mutex_unlock(&apps_data_mutex);
2943
2944 diagmem_free(driver, user_space_data, mempool);
2945 user_space_data = NULL;
2946
2947 check_drain_timer();
2948
2949 if (ret == PKT_DROP)
2950 diag_record_stats(pkt_type, PKT_DROP);
2951 else if (ret == PKT_ALLOC)
2952 diag_record_stats(pkt_type, PKT_ALLOC);
2953 else
2954 return ret;
2955
2956 return 0;
2957}
2958
Mohit Aggarwal99a06732017-07-28 15:40:27 +05302959static int check_data_ready(int index)
2960{
2961 int data_type = 0;
2962
2963 mutex_lock(&driver->diagchar_mutex);
2964 data_type = driver->data_ready[index];
2965 mutex_unlock(&driver->diagchar_mutex);
2966 return data_type;
2967}
2968
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07002969static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
2970 loff_t *ppos)
2971{
2972 struct diag_dci_client_tbl *entry;
2973 struct list_head *start, *temp;
2974 int index = -1, i = 0, ret = 0;
2975 int data_type;
2976 int copy_dci_data = 0;
2977 int exit_stat = 0;
2978 int write_len = 0;
2979 struct diag_md_session_t *session_info = NULL;
2980
Mohit Aggarwal99a06732017-07-28 15:40:27 +05302981 mutex_lock(&driver->diagchar_mutex);
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07002982 for (i = 0; i < driver->num_clients; i++)
2983 if (driver->client_map[i].pid == current->tgid)
2984 index = i;
Mohit Aggarwal99a06732017-07-28 15:40:27 +05302985 mutex_unlock(&driver->diagchar_mutex);
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07002986
2987 if (index == -1) {
2988 pr_err("diag: Client PID not found in table");
2989 return -EINVAL;
2990 }
2991 if (!buf) {
2992 pr_err("diag: bad address from user side\n");
2993 return -EFAULT;
2994 }
Mohit Aggarwal99a06732017-07-28 15:40:27 +05302995 wait_event_interruptible(driver->wait_q, (check_data_ready(index)) > 0);
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07002996
2997 mutex_lock(&driver->diagchar_mutex);
2998
2999 if ((driver->data_ready[index] & USER_SPACE_DATA_TYPE) &&
3000 (driver->logging_mode == DIAG_MEMORY_DEVICE_MODE ||
3001 driver->logging_mode == DIAG_MULTI_MODE)) {
3002 pr_debug("diag: process woken up\n");
3003 /*Copy the type of data being passed*/
3004 data_type = driver->data_ready[index] & USER_SPACE_DATA_TYPE;
3005 driver->data_ready[index] ^= USER_SPACE_DATA_TYPE;
3006 COPY_USER_SPACE_OR_ERR(buf, data_type, sizeof(int));
3007 if (ret == -EFAULT)
3008 goto exit;
3009 /* place holder for number of data field */
3010 ret += sizeof(int);
3011 session_info = diag_md_session_get_pid(current->tgid);
3012 exit_stat = diag_md_copy_to_user(buf, &ret, count,
3013 session_info);
3014 goto exit;
3015 } else if (driver->data_ready[index] & USER_SPACE_DATA_TYPE) {
3016 /* In case, the thread wakes up and the logging mode is not
3017 * memory device any more, the condition needs to be cleared.
3018 */
3019 driver->data_ready[index] ^= USER_SPACE_DATA_TYPE;
3020 }
3021
3022 if (driver->data_ready[index] & HDLC_SUPPORT_TYPE) {
3023 data_type = driver->data_ready[index] & HDLC_SUPPORT_TYPE;
3024 driver->data_ready[index] ^= HDLC_SUPPORT_TYPE;
3025 COPY_USER_SPACE_OR_ERR(buf, data_type, sizeof(int));
3026 if (ret == -EFAULT)
3027 goto exit;
3028
3029 session_info = diag_md_session_get_pid(current->tgid);
3030 if (session_info) {
3031 COPY_USER_SPACE_OR_ERR(buf+4,
3032 session_info->hdlc_disabled,
3033 sizeof(uint8_t));
3034 if (ret == -EFAULT)
3035 goto exit;
3036 }
3037 goto exit;
3038 }
3039
3040 if (driver->data_ready[index] & DEINIT_TYPE) {
3041 /*Copy the type of data being passed*/
3042 data_type = driver->data_ready[index] & DEINIT_TYPE;
3043 COPY_USER_SPACE_OR_ERR(buf, data_type, 4);
3044 if (ret == -EFAULT)
3045 goto exit;
3046 driver->data_ready[index] ^= DEINIT_TYPE;
3047 mutex_unlock(&driver->diagchar_mutex);
3048 diag_remove_client_entry(file);
3049 return ret;
3050 }
3051
3052 if (driver->data_ready[index] & MSG_MASKS_TYPE) {
3053 /*Copy the type of data being passed*/
3054 data_type = driver->data_ready[index] & MSG_MASKS_TYPE;
3055 session_info = diag_md_session_get_peripheral(APPS_DATA);
3056 COPY_USER_SPACE_OR_ERR(buf, data_type, sizeof(int));
3057 if (ret == -EFAULT)
3058 goto exit;
3059 write_len = diag_copy_to_user_msg_mask(buf + ret, count,
3060 session_info);
3061 if (write_len > 0)
3062 ret += write_len;
3063 driver->data_ready[index] ^= MSG_MASKS_TYPE;
3064 goto exit;
3065 }
3066
3067 if (driver->data_ready[index] & EVENT_MASKS_TYPE) {
3068 /*Copy the type of data being passed*/
3069 data_type = driver->data_ready[index] & EVENT_MASKS_TYPE;
3070 session_info = diag_md_session_get_peripheral(APPS_DATA);
3071 COPY_USER_SPACE_OR_ERR(buf, data_type, 4);
3072 if (ret == -EFAULT)
3073 goto exit;
3074
3075 if (session_info && session_info->event_mask &&
3076 session_info->event_mask->ptr) {
3077 COPY_USER_SPACE_OR_ERR(buf + sizeof(int),
3078 *(session_info->event_mask->ptr),
3079 session_info->event_mask->mask_len);
3080 if (ret == -EFAULT)
3081 goto exit;
3082 } else {
3083 COPY_USER_SPACE_OR_ERR(buf + sizeof(int),
3084 *(event_mask.ptr),
3085 event_mask.mask_len);
3086 if (ret == -EFAULT)
3087 goto exit;
3088 }
3089 driver->data_ready[index] ^= EVENT_MASKS_TYPE;
3090 goto exit;
3091 }
3092
3093 if (driver->data_ready[index] & LOG_MASKS_TYPE) {
3094 /*Copy the type of data being passed*/
3095 data_type = driver->data_ready[index] & LOG_MASKS_TYPE;
3096 session_info = diag_md_session_get_peripheral(APPS_DATA);
3097 COPY_USER_SPACE_OR_ERR(buf, data_type, sizeof(int));
3098 if (ret == -EFAULT)
3099 goto exit;
3100
3101 write_len = diag_copy_to_user_log_mask(buf + ret, count,
3102 session_info);
3103 if (write_len > 0)
3104 ret += write_len;
3105 driver->data_ready[index] ^= LOG_MASKS_TYPE;
3106 goto exit;
3107 }
3108
3109 if (driver->data_ready[index] & PKT_TYPE) {
3110 /*Copy the type of data being passed*/
3111 data_type = driver->data_ready[index] & PKT_TYPE;
3112 COPY_USER_SPACE_OR_ERR(buf, data_type, sizeof(data_type));
3113 if (ret == -EFAULT)
3114 goto exit;
3115
3116 COPY_USER_SPACE_OR_ERR(buf + sizeof(data_type),
3117 *(driver->apps_req_buf),
3118 driver->apps_req_buf_len);
3119 if (ret == -EFAULT)
3120 goto exit;
3121 driver->data_ready[index] ^= PKT_TYPE;
3122 driver->in_busy_pktdata = 0;
3123 goto exit;
3124 }
3125
3126 if (driver->data_ready[index] & DCI_PKT_TYPE) {
3127 /* Copy the type of data being passed */
3128 data_type = driver->data_ready[index] & DCI_PKT_TYPE;
3129 COPY_USER_SPACE_OR_ERR(buf, data_type, 4);
3130 if (ret == -EFAULT)
3131 goto exit;
3132
3133 COPY_USER_SPACE_OR_ERR(buf+4, *(driver->dci_pkt_buf),
3134 driver->dci_pkt_length);
3135 if (ret == -EFAULT)
3136 goto exit;
3137
3138 driver->data_ready[index] ^= DCI_PKT_TYPE;
3139 driver->in_busy_dcipktdata = 0;
3140 goto exit;
3141 }
3142
3143 if (driver->data_ready[index] & DCI_EVENT_MASKS_TYPE) {
3144 /*Copy the type of data being passed*/
3145 data_type = driver->data_ready[index] & DCI_EVENT_MASKS_TYPE;
3146 COPY_USER_SPACE_OR_ERR(buf, data_type, 4);
3147 if (ret == -EFAULT)
3148 goto exit;
3149
3150 COPY_USER_SPACE_OR_ERR(buf+4, driver->num_dci_client, 4);
3151 if (ret == -EFAULT)
3152 goto exit;
3153
3154 COPY_USER_SPACE_OR_ERR(buf + 8, (dci_ops_tbl[DCI_LOCAL_PROC].
3155 event_mask_composite), DCI_EVENT_MASK_SIZE);
3156 if (ret == -EFAULT)
3157 goto exit;
3158
3159 driver->data_ready[index] ^= DCI_EVENT_MASKS_TYPE;
3160 goto exit;
3161 }
3162
3163 if (driver->data_ready[index] & DCI_LOG_MASKS_TYPE) {
3164 /*Copy the type of data being passed*/
3165 data_type = driver->data_ready[index] & DCI_LOG_MASKS_TYPE;
3166 COPY_USER_SPACE_OR_ERR(buf, data_type, 4);
3167 if (ret == -EFAULT)
3168 goto exit;
3169
3170 COPY_USER_SPACE_OR_ERR(buf+4, driver->num_dci_client, 4);
3171 if (ret == -EFAULT)
3172 goto exit;
3173
3174 COPY_USER_SPACE_OR_ERR(buf+8, (dci_ops_tbl[DCI_LOCAL_PROC].
3175 log_mask_composite), DCI_LOG_MASK_SIZE);
3176 if (ret == -EFAULT)
3177 goto exit;
3178 driver->data_ready[index] ^= DCI_LOG_MASKS_TYPE;
3179 goto exit;
3180 }
3181
3182exit:
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07003183 if (driver->data_ready[index] & DCI_DATA_TYPE) {
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07003184 data_type = driver->data_ready[index] & DCI_DATA_TYPE;
Mohit Aggarwal99a06732017-07-28 15:40:27 +05303185 mutex_unlock(&driver->diagchar_mutex);
3186 /* Copy the type of data being passed */
3187 mutex_lock(&driver->dci_mutex);
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07003188 list_for_each_safe(start, temp, &driver->dci_client_list) {
3189 entry = list_entry(start, struct diag_dci_client_tbl,
3190 track);
3191 if (entry->client->tgid != current->tgid)
3192 continue;
3193 if (!entry->in_service)
3194 continue;
3195 if (copy_to_user(buf + ret, &data_type, sizeof(int))) {
3196 mutex_unlock(&driver->dci_mutex);
3197 goto end;
3198 }
3199 ret += sizeof(int);
3200 if (copy_to_user(buf + ret, &entry->client_info.token,
3201 sizeof(int))) {
3202 mutex_unlock(&driver->dci_mutex);
3203 goto end;
3204 }
3205 ret += sizeof(int);
3206 copy_dci_data = 1;
3207 exit_stat = diag_copy_dci(buf, count, entry, &ret);
3208 mutex_lock(&driver->diagchar_mutex);
3209 driver->data_ready[index] ^= DCI_DATA_TYPE;
3210 mutex_unlock(&driver->diagchar_mutex);
3211 if (exit_stat == 1) {
3212 mutex_unlock(&driver->dci_mutex);
3213 goto end;
3214 }
3215 }
3216 mutex_unlock(&driver->dci_mutex);
3217 goto end;
3218 }
Mohit Aggarwal99a06732017-07-28 15:40:27 +05303219 mutex_unlock(&driver->diagchar_mutex);
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07003220end:
3221 /*
3222 * Flush any read that is currently pending on DCI data and
3223 * command channnels. This will ensure that the next read is not
3224 * missed.
3225 */
3226 if (copy_dci_data) {
3227 diag_ws_on_copy_complete(DIAG_WS_DCI);
3228 flush_workqueue(driver->diag_dci_wq);
3229 }
3230 return ret;
3231}
3232
3233static ssize_t diagchar_write(struct file *file, const char __user *buf,
3234 size_t count, loff_t *ppos)
3235{
3236 int err = 0;
3237 int pkt_type = 0;
3238 int payload_len = 0;
3239 const char __user *payload_buf = NULL;
3240
3241 /*
3242 * The data coming from the user sapce should at least have the
3243 * packet type heeader.
3244 */
3245 if (count < sizeof(int)) {
3246 pr_err("diag: In %s, client is sending short data, len: %d\n",
3247 __func__, (int)count);
3248 return -EBADMSG;
3249 }
3250
3251 err = copy_from_user((&pkt_type), buf, sizeof(int));
3252 if (err) {
3253 pr_err_ratelimited("diag: In %s, unable to copy pkt_type from userspace, err: %d\n",
3254 __func__, err);
3255 return -EIO;
3256 }
3257
3258 if (driver->logging_mode == DIAG_USB_MODE && !driver->usb_connected) {
3259 if (!((pkt_type == DCI_DATA_TYPE) ||
3260 (pkt_type == DCI_PKT_TYPE) ||
3261 (pkt_type & DATA_TYPE_DCI_LOG) ||
3262 (pkt_type & DATA_TYPE_DCI_EVENT))) {
3263 pr_debug("diag: In %s, Dropping non DCI packet type\n",
3264 __func__);
3265 return -EIO;
3266 }
3267 }
3268
3269 payload_buf = buf + sizeof(int);
3270 payload_len = count - sizeof(int);
3271
3272 if (pkt_type == DCI_PKT_TYPE)
3273 return diag_user_process_dci_apps_data(payload_buf,
3274 payload_len,
3275 pkt_type);
3276 else if (pkt_type == DCI_DATA_TYPE)
3277 return diag_user_process_dci_data(payload_buf, payload_len);
3278 else if (pkt_type == USER_SPACE_RAW_DATA_TYPE)
3279 return diag_user_process_raw_data(payload_buf,
3280 payload_len);
3281 else if (pkt_type == USER_SPACE_DATA_TYPE)
3282 return diag_user_process_userspace_data(payload_buf,
3283 payload_len);
3284 if (pkt_type & (DATA_TYPE_DCI_LOG | DATA_TYPE_DCI_EVENT)) {
3285 err = diag_user_process_dci_apps_data(payload_buf, payload_len,
3286 pkt_type);
3287 if (pkt_type & DATA_TYPE_DCI_LOG)
3288 pkt_type ^= DATA_TYPE_DCI_LOG;
3289 if (pkt_type & DATA_TYPE_DCI_EVENT)
3290 pkt_type ^= DATA_TYPE_DCI_EVENT;
3291 /*
3292 * Check if the log or event is selected even on the regular
3293 * stream. If USB is not connected and we are not in memory
3294 * device mode, we should not process these logs/events.
3295 */
3296 if (pkt_type && driver->logging_mode == DIAG_USB_MODE &&
3297 !driver->usb_connected)
3298 return err;
3299 }
3300
3301 switch (pkt_type) {
3302 case DATA_TYPE_EVENT:
3303 case DATA_TYPE_F3:
3304 case DATA_TYPE_LOG:
3305 case DATA_TYPE_DELAYED_RESPONSE:
3306 case DATA_TYPE_RESPONSE:
3307 return diag_user_process_apps_data(payload_buf, payload_len,
3308 pkt_type);
3309 default:
3310 pr_err_ratelimited("diag: In %s, invalid pkt_type: %d\n",
3311 __func__, pkt_type);
3312 return -EINVAL;
3313 }
3314
3315 return err;
3316}
3317
3318void diag_ws_init(void)
3319{
3320 driver->dci_ws.ref_count = 0;
3321 driver->dci_ws.copy_count = 0;
3322 spin_lock_init(&driver->dci_ws.lock);
3323
3324 driver->md_ws.ref_count = 0;
3325 driver->md_ws.copy_count = 0;
3326 spin_lock_init(&driver->md_ws.lock);
3327}
3328
3329static void diag_stats_init(void)
3330{
3331 if (!driver)
3332 return;
3333
3334 driver->msg_stats.alloc_count = 0;
3335 driver->msg_stats.drop_count = 0;
3336
3337 driver->log_stats.alloc_count = 0;
3338 driver->log_stats.drop_count = 0;
3339
3340 driver->event_stats.alloc_count = 0;
3341 driver->event_stats.drop_count = 0;
3342}
3343
3344void diag_ws_on_notify(void)
3345{
3346 /*
3347 * Do not deal with reference count here as there can be spurious
3348 * interrupts.
3349 */
3350 pm_stay_awake(driver->diag_dev);
3351}
3352
3353void diag_ws_on_read(int type, int pkt_len)
3354{
3355 unsigned long flags;
3356 struct diag_ws_ref_t *ws_ref = NULL;
3357
3358 switch (type) {
3359 case DIAG_WS_DCI:
3360 ws_ref = &driver->dci_ws;
3361 break;
3362 case DIAG_WS_MUX:
3363 ws_ref = &driver->md_ws;
3364 break;
3365 default:
3366 pr_err_ratelimited("diag: In %s, invalid type: %d\n",
3367 __func__, type);
3368 return;
3369 }
3370
3371 spin_lock_irqsave(&ws_ref->lock, flags);
3372 if (pkt_len > 0) {
3373 ws_ref->ref_count++;
3374 } else {
3375 if (ws_ref->ref_count < 1) {
3376 ws_ref->ref_count = 0;
3377 ws_ref->copy_count = 0;
3378 }
3379 diag_ws_release();
3380 }
3381 spin_unlock_irqrestore(&ws_ref->lock, flags);
3382}
3383
3384
3385void diag_ws_on_copy(int type)
3386{
3387 unsigned long flags;
3388 struct diag_ws_ref_t *ws_ref = NULL;
3389
3390 switch (type) {
3391 case DIAG_WS_DCI:
3392 ws_ref = &driver->dci_ws;
3393 break;
3394 case DIAG_WS_MUX:
3395 ws_ref = &driver->md_ws;
3396 break;
3397 default:
3398 pr_err_ratelimited("diag: In %s, invalid type: %d\n",
3399 __func__, type);
3400 return;
3401 }
3402
3403 spin_lock_irqsave(&ws_ref->lock, flags);
3404 ws_ref->copy_count++;
3405 spin_unlock_irqrestore(&ws_ref->lock, flags);
3406}
3407
3408void diag_ws_on_copy_fail(int type)
3409{
3410 unsigned long flags;
3411 struct diag_ws_ref_t *ws_ref = NULL;
3412
3413 switch (type) {
3414 case DIAG_WS_DCI:
3415 ws_ref = &driver->dci_ws;
3416 break;
3417 case DIAG_WS_MUX:
3418 ws_ref = &driver->md_ws;
3419 break;
3420 default:
3421 pr_err_ratelimited("diag: In %s, invalid type: %d\n",
3422 __func__, type);
3423 return;
3424 }
3425
3426 spin_lock_irqsave(&ws_ref->lock, flags);
3427 ws_ref->ref_count--;
3428 spin_unlock_irqrestore(&ws_ref->lock, flags);
3429
3430 diag_ws_release();
3431}
3432
3433void diag_ws_on_copy_complete(int type)
3434{
3435 unsigned long flags;
3436 struct diag_ws_ref_t *ws_ref = NULL;
3437
3438 switch (type) {
3439 case DIAG_WS_DCI:
3440 ws_ref = &driver->dci_ws;
3441 break;
3442 case DIAG_WS_MUX:
3443 ws_ref = &driver->md_ws;
3444 break;
3445 default:
3446 pr_err_ratelimited("diag: In %s, invalid type: %d\n",
3447 __func__, type);
3448 return;
3449 }
3450
3451 spin_lock_irqsave(&ws_ref->lock, flags);
3452 ws_ref->ref_count -= ws_ref->copy_count;
3453 if (ws_ref->ref_count < 1)
3454 ws_ref->ref_count = 0;
3455 ws_ref->copy_count = 0;
3456 spin_unlock_irqrestore(&ws_ref->lock, flags);
3457
3458 diag_ws_release();
3459}
3460
3461void diag_ws_reset(int type)
3462{
3463 unsigned long flags;
3464 struct diag_ws_ref_t *ws_ref = NULL;
3465
3466 switch (type) {
3467 case DIAG_WS_DCI:
3468 ws_ref = &driver->dci_ws;
3469 break;
3470 case DIAG_WS_MUX:
3471 ws_ref = &driver->md_ws;
3472 break;
3473 default:
3474 pr_err_ratelimited("diag: In %s, invalid type: %d\n",
3475 __func__, type);
3476 return;
3477 }
3478
3479 spin_lock_irqsave(&ws_ref->lock, flags);
3480 ws_ref->ref_count = 0;
3481 ws_ref->copy_count = 0;
3482 spin_unlock_irqrestore(&ws_ref->lock, flags);
3483
3484 diag_ws_release();
3485}
3486
3487void diag_ws_release(void)
3488{
3489 if (driver->dci_ws.ref_count == 0 && driver->md_ws.ref_count == 0)
3490 pm_relax(driver->diag_dev);
3491}
3492
3493#ifdef DIAG_DEBUG
3494static void diag_debug_init(void)
3495{
3496 diag_ipc_log = ipc_log_context_create(DIAG_IPC_LOG_PAGES, "diag", 0);
3497 if (!diag_ipc_log)
3498 pr_err("diag: Failed to create IPC logging context\n");
3499 /*
3500 * Set the bit mask here as per diag_ipc_logging.h to enable debug logs
3501 * to be logged to IPC
3502 */
3503 diag_debug_mask = DIAG_DEBUG_PERIPHERALS | DIAG_DEBUG_DCI |
Manoj Prabhu B571cf422017-08-08 19:01:41 +05303504 DIAG_DEBUG_USERSPACE | DIAG_DEBUG_BRIDGE;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07003505}
3506#else
3507static void diag_debug_init(void)
3508{
3509
3510}
3511#endif
3512
3513static int diag_real_time_info_init(void)
3514{
3515 int i;
3516
3517 if (!driver)
3518 return -EIO;
3519 for (i = 0; i < DIAG_NUM_PROC; i++) {
3520 driver->real_time_mode[i] = 1;
3521 driver->proc_rt_vote_mask[i] |= DIAG_PROC_DCI;
3522 driver->proc_rt_vote_mask[i] |= DIAG_PROC_MEMORY_DEVICE;
3523 }
3524 driver->real_time_update_busy = 0;
3525 driver->proc_active_mask = 0;
3526 driver->diag_real_time_wq = create_singlethread_workqueue(
3527 "diag_real_time_wq");
3528 if (!driver->diag_real_time_wq)
3529 return -ENOMEM;
3530 INIT_WORK(&(driver->diag_real_time_work), diag_real_time_work_fn);
3531 mutex_init(&driver->real_time_mutex);
3532 return 0;
3533}
3534
3535static const struct file_operations diagcharfops = {
3536 .owner = THIS_MODULE,
3537 .read = diagchar_read,
3538 .write = diagchar_write,
3539#ifdef CONFIG_COMPAT
3540 .compat_ioctl = diagchar_compat_ioctl,
3541#endif
3542 .unlocked_ioctl = diagchar_ioctl,
3543 .open = diagchar_open,
3544 .release = diagchar_close
3545};
3546
3547static int diagchar_setup_cdev(dev_t devno)
3548{
3549
3550 int err;
3551
3552 cdev_init(driver->cdev, &diagcharfops);
3553
3554 driver->cdev->owner = THIS_MODULE;
3555 driver->cdev->ops = &diagcharfops;
3556
3557 err = cdev_add(driver->cdev, devno, 1);
3558
3559 if (err) {
3560 pr_info("diagchar cdev registration failed !\n");
3561 return err;
3562 }
3563
3564 driver->diagchar_class = class_create(THIS_MODULE, "diag");
3565
3566 if (IS_ERR(driver->diagchar_class)) {
3567 pr_err("Error creating diagchar class.\n");
3568 return PTR_ERR(driver->diagchar_class);
3569 }
3570
3571 driver->diag_dev = device_create(driver->diagchar_class, NULL, devno,
3572 (void *)driver, "diag");
3573
3574 if (!driver->diag_dev)
3575 return -EIO;
3576
3577 driver->diag_dev->power.wakeup = wakeup_source_register("DIAG_WS");
3578 return 0;
3579
3580}
3581
3582static int diagchar_cleanup(void)
3583{
3584 if (driver) {
3585 if (driver->cdev) {
3586 /* TODO - Check if device exists before deleting */
3587 device_destroy(driver->diagchar_class,
3588 MKDEV(driver->major,
3589 driver->minor_start));
3590 cdev_del(driver->cdev);
3591 }
3592 if (!IS_ERR(driver->diagchar_class))
3593 class_destroy(driver->diagchar_class);
3594 kfree(driver);
3595 }
3596 return 0;
3597}
3598
3599static int __init diagchar_init(void)
3600{
3601 dev_t dev;
Manoj Prabhu B98325462017-01-10 20:19:28 +05303602 int ret, i;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07003603
3604 pr_debug("diagfwd initializing ..\n");
3605 ret = 0;
3606 driver = kzalloc(sizeof(struct diagchar_dev) + 5, GFP_KERNEL);
3607 if (!driver)
3608 return -ENOMEM;
3609 kmemleak_not_leak(driver);
3610
3611 timer_in_progress = 0;
3612 driver->delayed_rsp_id = 0;
3613 driver->hdlc_disabled = 0;
3614 driver->dci_state = DIAG_DCI_NO_ERROR;
3615 setup_timer(&drain_timer, drain_timer_func, 1234);
3616 driver->supports_sockets = 1;
3617 driver->time_sync_enabled = 0;
3618 driver->uses_time_api = 0;
3619 driver->poolsize = poolsize;
3620 driver->poolsize_hdlc = poolsize_hdlc;
3621 driver->poolsize_dci = poolsize_dci;
3622 driver->poolsize_user = poolsize_user;
3623 /*
3624 * POOL_TYPE_MUX_APPS is for the buffers in the Diag MUX layer.
3625 * The number of buffers encompasses Diag data generated on
3626 * the Apss processor + 1 for the responses generated exclusively on
3627 * the Apps processor + data from data channels (4 channels per
3628 * peripheral) + data from command channels (2)
3629 */
3630 diagmem_setsize(POOL_TYPE_MUX_APPS, itemsize_usb_apps,
3631 poolsize_usb_apps + 1 + (NUM_PERIPHERALS * 6));
3632 driver->num_clients = max_clients;
3633 driver->logging_mode = DIAG_USB_MODE;
Manoj Prabhu B571cf422017-08-08 19:01:41 +05303634 for (i = 0; i < NUM_UPD; i++) {
3635 driver->pd_logging_mode[i] = 0;
3636 driver->pd_session_clear[i] = 0;
3637 }
3638 driver->num_pd_session = 0;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07003639 driver->mask_check = 0;
3640 driver->in_busy_pktdata = 0;
3641 driver->in_busy_dcipktdata = 0;
3642 driver->rsp_buf_ctxt = SET_BUF_CTXT(APPS_DATA, TYPE_CMD, 1);
3643 hdlc_data.ctxt = SET_BUF_CTXT(APPS_DATA, TYPE_DATA, 1);
3644 hdlc_data.len = 0;
3645 non_hdlc_data.ctxt = SET_BUF_CTXT(APPS_DATA, TYPE_DATA, 1);
3646 non_hdlc_data.len = 0;
3647 mutex_init(&driver->hdlc_disable_mutex);
3648 mutex_init(&driver->diagchar_mutex);
3649 mutex_init(&driver->diag_maskclear_mutex);
Manoj Prabhu B2a428272016-12-22 15:22:03 +05303650 mutex_init(&driver->diag_notifier_mutex);
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07003651 mutex_init(&driver->diag_file_mutex);
3652 mutex_init(&driver->delayed_rsp_mutex);
3653 mutex_init(&apps_data_mutex);
Gopikrishna Mogasati9a44d8d2017-05-05 16:04:35 +05303654 mutex_init(&driver->msg_mask_lock);
Hardik Arya62dce9f2017-06-15 10:39:34 +05303655 mutex_init(&driver->hdlc_recovery_mutex);
Manoj Prabhu B571cf422017-08-08 19:01:41 +05303656 for (i = 0; i < NUM_PERIPHERALS; i++) {
Manoj Prabhu B98325462017-01-10 20:19:28 +05303657 mutex_init(&driver->diagfwd_channel_mutex[i]);
Manoj Prabhu B571cf422017-08-08 19:01:41 +05303658 driver->diag_id_sent[i] = 0;
3659 }
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07003660 init_waitqueue_head(&driver->wait_q);
3661 INIT_WORK(&(driver->diag_drain_work), diag_drain_work_fn);
3662 INIT_WORK(&(driver->update_user_clients),
3663 diag_update_user_client_work_fn);
3664 INIT_WORK(&(driver->update_md_clients),
3665 diag_update_md_client_work_fn);
3666 diag_ws_init();
3667 diag_stats_init();
3668 diag_debug_init();
3669 diag_md_session_init();
3670
3671 driver->incoming_pkt.capacity = DIAG_MAX_REQ_SIZE;
3672 driver->incoming_pkt.data = kzalloc(DIAG_MAX_REQ_SIZE, GFP_KERNEL);
3673 if (!driver->incoming_pkt.data) {
3674 ret = -ENOMEM;
3675 goto fail;
3676 }
3677 kmemleak_not_leak(driver->incoming_pkt.data);
3678 driver->incoming_pkt.processing = 0;
3679 driver->incoming_pkt.read_len = 0;
3680 driver->incoming_pkt.remaining = 0;
3681 driver->incoming_pkt.total_len = 0;
3682
3683 ret = diag_real_time_info_init();
3684 if (ret)
3685 goto fail;
3686 ret = diag_debugfs_init();
3687 if (ret)
3688 goto fail;
3689 ret = diag_masks_init();
3690 if (ret)
3691 goto fail;
3692 ret = diag_remote_init();
3693 if (ret)
3694 goto fail;
3695 ret = diag_mux_init();
3696 if (ret)
3697 goto fail;
3698 ret = diagfwd_init();
3699 if (ret)
3700 goto fail;
3701 ret = diagfwd_cntl_init();
3702 if (ret)
3703 goto fail;
3704 driver->dci_state = diag_dci_init();
3705 ret = diagfwd_peripheral_init();
3706 if (ret)
3707 goto fail;
3708 diagfwd_cntl_channel_init();
3709 if (driver->dci_state == DIAG_DCI_NO_ERROR)
3710 diag_dci_channel_init();
3711 pr_debug("diagchar initializing ..\n");
3712 driver->num = 1;
3713 driver->name = ((void *)driver) + sizeof(struct diagchar_dev);
3714 strlcpy(driver->name, "diag", 4);
3715 /* Get major number from kernel and initialize */
3716 ret = alloc_chrdev_region(&dev, driver->minor_start,
3717 driver->num, driver->name);
3718 if (!ret) {
3719 driver->major = MAJOR(dev);
3720 driver->minor_start = MINOR(dev);
3721 } else {
3722 pr_err("diag: Major number not allocated\n");
3723 goto fail;
3724 }
3725 driver->cdev = cdev_alloc();
3726 ret = diagchar_setup_cdev(dev);
3727 if (ret)
3728 goto fail;
Sreelakshmi Gownipalli8d477d32017-02-08 19:49:06 -08003729 mutex_init(&driver->diag_id_mutex);
3730 INIT_LIST_HEAD(&driver->diag_id_list);
3731 diag_add_diag_id_to_list(DIAG_ID_APPS, "APPS");
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07003732 pr_debug("diagchar initialized now");
3733 ret = diagfwd_bridge_init();
3734 if (ret)
3735 diagfwd_bridge_exit();
3736 return 0;
3737
3738fail:
3739 pr_err("diagchar is not initialized, ret: %d\n", ret);
3740 diag_debugfs_cleanup();
3741 diagchar_cleanup();
3742 diag_mux_exit();
3743 diagfwd_peripheral_exit();
3744 diagfwd_bridge_exit();
3745 diagfwd_exit();
3746 diagfwd_cntl_exit();
3747 diag_dci_exit();
3748 diag_masks_exit();
3749 diag_remote_exit();
3750 return ret;
3751
3752}
3753
3754static void diagchar_exit(void)
3755{
3756 pr_info("diagchar exiting...\n");
3757 diag_mempool_exit();
3758 diag_mux_exit();
3759 diagfwd_peripheral_exit();
3760 diagfwd_exit();
3761 diagfwd_cntl_exit();
3762 diag_dci_exit();
3763 diag_masks_exit();
3764 diag_md_session_exit();
3765 diag_remote_exit();
3766 diag_debugfs_cleanup();
3767 diagchar_cleanup();
3768 pr_info("done diagchar exit\n");
3769}
3770
3771module_init(diagchar_init);
3772module_exit(diagchar_exit);