blob: 45376d8a0142e82742c08a3bc5352025941ba6d2 [file] [log] [blame]
Sreelakshmi Gownipalli8d477d32017-02-08 19:49:06 -08001/* Copyright (c) 2008-2017, The Linux Foundation. All rights reserved.
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/slab.h>
14#include <linux/init.h>
15#include <linux/module.h>
16#include <linux/cdev.h>
17#include <linux/fs.h>
18#include <linux/device.h>
19#include <linux/delay.h>
20#include <linux/uaccess.h>
21#include <linux/diagchar.h>
22#include <linux/sched.h>
23#include <linux/ratelimit.h>
24#include <linux/timer.h>
25#ifdef CONFIG_DIAG_OVER_USB
26#include <linux/usb/usbdiag.h>
27#endif
28#include <asm/current.h>
29#include "diagchar_hdlc.h"
30#include "diagmem.h"
31#include "diagchar.h"
32#include "diagfwd.h"
33#include "diagfwd_cntl.h"
34#include "diag_dci.h"
35#include "diag_debugfs.h"
36#include "diag_masks.h"
37#include "diagfwd_bridge.h"
38#include "diag_usb.h"
39#include "diag_memorydevice.h"
40#include "diag_mux.h"
41#include "diag_ipc_logging.h"
42#include "diagfwd_peripheral.h"
43
44#include <linux/coresight-stm.h>
45#include <linux/kernel.h>
46#ifdef CONFIG_COMPAT
47#include <linux/compat.h>
48#endif
49
50MODULE_DESCRIPTION("Diag Char Driver");
51MODULE_LICENSE("GPL v2");
52
53#define MIN_SIZ_ALLOW 4
54#define INIT 1
55#define EXIT -1
56struct diagchar_dev *driver;
57struct diagchar_priv {
58 int pid;
59};
60
61#define USER_SPACE_RAW_DATA 0
62#define USER_SPACE_HDLC_DATA 1
63
64/* Memory pool variables */
65/* Used for copying any incoming packet from user space clients. */
66static unsigned int poolsize = 12;
67module_param(poolsize, uint, 0000);
68
69/*
70 * Used for HDLC encoding packets coming from the user
71 * space.
72 */
73static unsigned int poolsize_hdlc = 10;
74module_param(poolsize_hdlc, uint, 0000);
75
76/*
77 * This is used for incoming DCI requests from the user space clients.
78 * Don't expose itemsize as it is internal.
79 */
80static unsigned int poolsize_user = 8;
81module_param(poolsize_user, uint, 0000);
82
83/*
84 * USB structures allocated for writing Diag data generated on the Apps to USB.
85 * Don't expose itemsize as it is constant.
86 */
87static unsigned int itemsize_usb_apps = sizeof(struct diag_request);
88static unsigned int poolsize_usb_apps = 10;
89module_param(poolsize_usb_apps, uint, 0000);
90
91/* Used for DCI client buffers. Don't expose itemsize as it is constant. */
92static unsigned int poolsize_dci = 10;
93module_param(poolsize_dci, uint, 0000);
94
95#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
96/* Used for reading data from the remote device. */
97static unsigned int itemsize_mdm = DIAG_MDM_BUF_SIZE;
98static unsigned int poolsize_mdm = 18;
99module_param(itemsize_mdm, uint, 0000);
100module_param(poolsize_mdm, uint, 0000);
101
102/*
103 * Used for reading DCI data from the remote device.
104 * Don't expose poolsize for DCI data. There is only one read buffer
105 */
106static unsigned int itemsize_mdm_dci = DIAG_MDM_BUF_SIZE;
107static unsigned int poolsize_mdm_dci = 1;
108module_param(itemsize_mdm_dci, uint, 0000);
109
110/*
111 * Used for USB structues associated with a remote device.
112 * Don't expose the itemsize since it is constant.
113 */
114static unsigned int itemsize_mdm_usb = sizeof(struct diag_request);
115static unsigned int poolsize_mdm_usb = 18;
116module_param(poolsize_mdm_usb, uint, 0000);
117
118/*
119 * Used for writing read DCI data to remote peripherals. Don't
120 * expose poolsize for DCI data. There is only one read
121 * buffer. Add 6 bytes for DCI header information: Start (1),
122 * Version (1), Length (2), Tag (2)
123 */
124static unsigned int itemsize_mdm_dci_write = DIAG_MDM_DCI_BUF_SIZE;
125static unsigned int poolsize_mdm_dci_write = 1;
126module_param(itemsize_mdm_dci_write, uint, 0000);
127
128/*
129 * Used for USB structures associated with a remote SMUX
130 * device Don't expose the itemsize since it is constant
131 */
132static unsigned int itemsize_qsc_usb = sizeof(struct diag_request);
133static unsigned int poolsize_qsc_usb = 8;
134module_param(poolsize_qsc_usb, uint, 0000);
135#endif
136
137/* This is the max number of user-space clients supported at initialization*/
138static unsigned int max_clients = 15;
139static unsigned int threshold_client_limit = 50;
140module_param(max_clients, uint, 0000);
141
142/* Timer variables */
143static struct timer_list drain_timer;
144static int timer_in_progress;
145
Manoj Prabhu B95427a22016-11-04 11:58:11 +0530146/*
147 * Diag Mask clear variable
148 * Used for clearing masks upon
149 * USB disconnection and stopping ODL
150 */
151static int diag_mask_clear_param = 1;
152module_param(diag_mask_clear_param, int, 0644);
153
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -0700154struct diag_apps_data_t {
155 void *buf;
156 uint32_t len;
157 int ctxt;
158};
159
160static struct diag_apps_data_t hdlc_data;
161static struct diag_apps_data_t non_hdlc_data;
162static struct mutex apps_data_mutex;
163
164#define DIAGPKT_MAX_DELAYED_RSP 0xFFFF
165
166#ifdef DIAG_DEBUG
167uint16_t diag_debug_mask;
168void *diag_ipc_log;
169#endif
170
171static void diag_md_session_close(struct diag_md_session_t *session_info);
172
173/*
174 * Returns the next delayed rsp id. If wrapping is enabled,
175 * wraps the delayed rsp id to DIAGPKT_MAX_DELAYED_RSP.
176 */
177static uint16_t diag_get_next_delayed_rsp_id(void)
178{
179 uint16_t rsp_id = 0;
180
181 mutex_lock(&driver->delayed_rsp_mutex);
182 rsp_id = driver->delayed_rsp_id;
183 if (rsp_id < DIAGPKT_MAX_DELAYED_RSP)
184 rsp_id++;
185 else {
186 if (wrap_enabled) {
187 rsp_id = 1;
188 wrap_count++;
189 } else
190 rsp_id = DIAGPKT_MAX_DELAYED_RSP;
191 }
192 driver->delayed_rsp_id = rsp_id;
193 mutex_unlock(&driver->delayed_rsp_mutex);
194
195 return rsp_id;
196}
197
198static int diag_switch_logging(struct diag_logging_mode_param_t *param);
199
200#define COPY_USER_SPACE_OR_ERR(buf, data, length) \
201do { \
202 if ((count < ret+length) || (copy_to_user(buf, \
203 (void *)&data, length))) { \
204 ret = -EFAULT; \
205 } \
206 ret += length; \
207} while (0)
208
209static void drain_timer_func(unsigned long data)
210{
211 queue_work(driver->diag_wq, &(driver->diag_drain_work));
212}
213
214static void diag_drain_apps_data(struct diag_apps_data_t *data)
215{
216 int err = 0;
217
218 if (!data || !data->buf)
219 return;
220
221 err = diag_mux_write(DIAG_LOCAL_PROC, data->buf, data->len,
222 data->ctxt);
223 if (err)
224 diagmem_free(driver, data->buf, POOL_TYPE_HDLC);
225
226 data->buf = NULL;
227 data->len = 0;
228}
229
230void diag_update_user_client_work_fn(struct work_struct *work)
231{
232 diag_update_userspace_clients(HDLC_SUPPORT_TYPE);
233}
234
235static void diag_update_md_client_work_fn(struct work_struct *work)
236{
237 diag_update_md_clients(HDLC_SUPPORT_TYPE);
238}
239
240void diag_drain_work_fn(struct work_struct *work)
241{
242 struct diag_md_session_t *session_info = NULL;
243 uint8_t hdlc_disabled = 0;
244
245 timer_in_progress = 0;
246 mutex_lock(&apps_data_mutex);
247 session_info = diag_md_session_get_peripheral(APPS_DATA);
248 if (session_info)
249 hdlc_disabled = session_info->hdlc_disabled;
250 else
251 hdlc_disabled = driver->hdlc_disabled;
252
253 if (!hdlc_disabled)
254 diag_drain_apps_data(&hdlc_data);
255 else
256 diag_drain_apps_data(&non_hdlc_data);
257 mutex_unlock(&apps_data_mutex);
258}
259
260void check_drain_timer(void)
261{
262 int ret = 0;
263
264 if (!timer_in_progress) {
265 timer_in_progress = 1;
266 ret = mod_timer(&drain_timer, jiffies + msecs_to_jiffies(200));
267 }
268}
269
270void diag_add_client(int i, struct file *file)
271{
272 struct diagchar_priv *diagpriv_data;
273
274 driver->client_map[i].pid = current->tgid;
275 diagpriv_data = kmalloc(sizeof(struct diagchar_priv),
276 GFP_KERNEL);
277 if (diagpriv_data)
278 diagpriv_data->pid = current->tgid;
279 file->private_data = diagpriv_data;
280 strlcpy(driver->client_map[i].name, current->comm, 20);
281 driver->client_map[i].name[19] = '\0';
282}
283
284static void diag_mempool_init(void)
285{
286 uint32_t itemsize = DIAG_MAX_REQ_SIZE;
287 uint32_t itemsize_hdlc = DIAG_MAX_HDLC_BUF_SIZE + APF_DIAG_PADDING;
288 uint32_t itemsize_dci = IN_BUF_SIZE;
289 uint32_t itemsize_user = DCI_REQ_BUF_SIZE;
290
291 itemsize += ((DCI_HDR_SIZE > CALLBACK_HDR_SIZE) ? DCI_HDR_SIZE :
292 CALLBACK_HDR_SIZE);
293 diagmem_setsize(POOL_TYPE_COPY, itemsize, poolsize);
294 diagmem_setsize(POOL_TYPE_HDLC, itemsize_hdlc, poolsize_hdlc);
295 diagmem_setsize(POOL_TYPE_DCI, itemsize_dci, poolsize_dci);
296 diagmem_setsize(POOL_TYPE_USER, itemsize_user, poolsize_user);
297
298 diagmem_init(driver, POOL_TYPE_COPY);
299 diagmem_init(driver, POOL_TYPE_HDLC);
300 diagmem_init(driver, POOL_TYPE_USER);
301 diagmem_init(driver, POOL_TYPE_DCI);
302}
303
304static void diag_mempool_exit(void)
305{
306 diagmem_exit(driver, POOL_TYPE_COPY);
307 diagmem_exit(driver, POOL_TYPE_HDLC);
308 diagmem_exit(driver, POOL_TYPE_USER);
309 diagmem_exit(driver, POOL_TYPE_DCI);
310}
311
312static int diagchar_open(struct inode *inode, struct file *file)
313{
314 int i = 0;
315 void *temp;
316
317 if (driver) {
318 mutex_lock(&driver->diagchar_mutex);
319
320 for (i = 0; i < driver->num_clients; i++)
321 if (driver->client_map[i].pid == 0)
322 break;
323
324 if (i < driver->num_clients) {
325 diag_add_client(i, file);
326 } else {
327 if (i < threshold_client_limit) {
328 driver->num_clients++;
329 temp = krealloc(driver->client_map
330 , (driver->num_clients) * sizeof(struct
331 diag_client_map), GFP_KERNEL);
332 if (!temp)
333 goto fail;
334 else
335 driver->client_map = temp;
336 temp = krealloc(driver->data_ready
337 , (driver->num_clients) * sizeof(int),
338 GFP_KERNEL);
339 if (!temp)
340 goto fail;
341 else
342 driver->data_ready = temp;
343 diag_add_client(i, file);
344 } else {
345 mutex_unlock(&driver->diagchar_mutex);
346 pr_err_ratelimited("diag: Max client limit for DIAG reached\n");
347 pr_err_ratelimited("diag: Cannot open handle %s %d",
348 current->comm, current->tgid);
349 for (i = 0; i < driver->num_clients; i++)
350 pr_debug("%d) %s PID=%d", i, driver->
351 client_map[i].name,
352 driver->client_map[i].pid);
353 return -ENOMEM;
354 }
355 }
356 driver->data_ready[i] = 0x0;
357 driver->data_ready[i] |= MSG_MASKS_TYPE;
358 driver->data_ready[i] |= EVENT_MASKS_TYPE;
359 driver->data_ready[i] |= LOG_MASKS_TYPE;
360 driver->data_ready[i] |= DCI_LOG_MASKS_TYPE;
361 driver->data_ready[i] |= DCI_EVENT_MASKS_TYPE;
362
363 if (driver->ref_count == 0)
364 diag_mempool_init();
365 driver->ref_count++;
366 mutex_unlock(&driver->diagchar_mutex);
367 return 0;
368 }
369 return -ENOMEM;
370
371fail:
372 mutex_unlock(&driver->diagchar_mutex);
373 driver->num_clients--;
374 pr_err_ratelimited("diag: Insufficient memory for new client");
375 return -ENOMEM;
376}
377
378static uint32_t diag_translate_kernel_to_user_mask(uint32_t peripheral_mask)
379{
380 uint32_t ret = 0;
381
382 if (peripheral_mask & MD_PERIPHERAL_MASK(APPS_DATA))
383 ret |= DIAG_CON_APSS;
384 if (peripheral_mask & MD_PERIPHERAL_MASK(PERIPHERAL_MODEM))
385 ret |= DIAG_CON_MPSS;
386 if (peripheral_mask & MD_PERIPHERAL_MASK(PERIPHERAL_LPASS))
387 ret |= DIAG_CON_LPASS;
388 if (peripheral_mask & MD_PERIPHERAL_MASK(PERIPHERAL_WCNSS))
389 ret |= DIAG_CON_WCNSS;
390 if (peripheral_mask & MD_PERIPHERAL_MASK(PERIPHERAL_SENSORS))
391 ret |= DIAG_CON_SENSORS;
392 if (peripheral_mask & MD_PERIPHERAL_MASK(PERIPHERAL_WDSP))
393 ret |= DIAG_CON_WDSP;
Sreelakshmi Gownipalli588a31d2016-11-02 13:33:43 -0700394 if (peripheral_mask & MD_PERIPHERAL_MASK(PERIPHERAL_CDSP))
395 ret |= DIAG_CON_CDSP;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -0700396
397 return ret;
398}
Manoj Prabhu B95427a22016-11-04 11:58:11 +0530399int diag_mask_param(void)
400{
401 return diag_mask_clear_param;
402}
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -0700403void diag_clear_masks(struct diag_md_session_t *info)
404{
405 int ret;
406 char cmd_disable_log_mask[] = { 0x73, 0, 0, 0, 0, 0, 0, 0};
407 char cmd_disable_msg_mask[] = { 0x7D, 0x05, 0, 0, 0, 0, 0, 0};
408 char cmd_disable_event_mask[] = { 0x60, 0};
409
410 DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
411 "diag: %s: masks clear request upon %s\n", __func__,
412 ((info) ? "ODL exit" : "USB Disconnection"));
413
414 ret = diag_process_apps_masks(cmd_disable_log_mask,
415 sizeof(cmd_disable_log_mask), info);
416 ret = diag_process_apps_masks(cmd_disable_msg_mask,
417 sizeof(cmd_disable_msg_mask), info);
418 ret = diag_process_apps_masks(cmd_disable_event_mask,
419 sizeof(cmd_disable_event_mask), info);
420 DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
421 "diag:%s: masks cleared successfully\n", __func__);
422}
423
424static void diag_close_logging_process(const int pid)
425{
426 int i;
427 int session_peripheral_mask;
428 struct diag_md_session_t *session_info = NULL;
429 struct diag_logging_mode_param_t params;
430
431 session_info = diag_md_session_get_pid(pid);
432 if (!session_info)
433 return;
434
Manoj Prabhu B95427a22016-11-04 11:58:11 +0530435 if (diag_mask_clear_param)
436 diag_clear_masks(session_info);
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -0700437
438 mutex_lock(&driver->diag_maskclear_mutex);
439 driver->mask_clear = 1;
440 mutex_unlock(&driver->diag_maskclear_mutex);
441
Sreelakshmi Gownipalli078824f2017-01-17 14:03:54 -0800442 mutex_lock(&driver->diagchar_mutex);
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -0700443 session_peripheral_mask = session_info->peripheral_mask;
444 diag_md_session_close(session_info);
Sreelakshmi Gownipalli078824f2017-01-17 14:03:54 -0800445 mutex_unlock(&driver->diagchar_mutex);
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -0700446 for (i = 0; i < NUM_MD_SESSIONS; i++)
447 if (MD_PERIPHERAL_MASK(i) & session_peripheral_mask)
448 diag_mux_close_peripheral(DIAG_LOCAL_PROC, i);
449
450 params.req_mode = USB_MODE;
451 params.mode_param = 0;
452 params.peripheral_mask =
453 diag_translate_kernel_to_user_mask(session_peripheral_mask);
454 mutex_lock(&driver->diagchar_mutex);
455 diag_switch_logging(&params);
456 mutex_unlock(&driver->diagchar_mutex);
457}
458
459static int diag_remove_client_entry(struct file *file)
460{
461 int i = -1;
462 struct diagchar_priv *diagpriv_data = NULL;
463 struct diag_dci_client_tbl *dci_entry = NULL;
464
465 if (!driver)
466 return -ENOMEM;
467
468 mutex_lock(&driver->diag_file_mutex);
469 if (!file) {
470 DIAG_LOG(DIAG_DEBUG_USERSPACE, "Invalid file pointer\n");
471 mutex_unlock(&driver->diag_file_mutex);
472 return -ENOENT;
473 }
474 if (!(file->private_data)) {
475 DIAG_LOG(DIAG_DEBUG_USERSPACE, "Invalid private data\n");
476 mutex_unlock(&driver->diag_file_mutex);
477 return -EINVAL;
478 }
479
480 diagpriv_data = file->private_data;
481
482 /*
483 * clean up any DCI registrations, if this is a DCI client
484 * This will specially help in case of ungraceful exit of any DCI client
485 * This call will remove any pending registrations of such client
486 */
487 mutex_lock(&driver->dci_mutex);
488 dci_entry = dci_lookup_client_entry_pid(current->tgid);
489 if (dci_entry)
490 diag_dci_deinit_client(dci_entry);
491 mutex_unlock(&driver->dci_mutex);
492
493 diag_close_logging_process(current->tgid);
494
495 /* Delete the pkt response table entry for the exiting process */
496 diag_cmd_remove_reg_by_pid(current->tgid);
497
498 mutex_lock(&driver->diagchar_mutex);
499 driver->ref_count--;
500 if (driver->ref_count == 0)
501 diag_mempool_exit();
502
503 for (i = 0; i < driver->num_clients; i++) {
504 if (diagpriv_data && diagpriv_data->pid ==
505 driver->client_map[i].pid) {
506 driver->client_map[i].pid = 0;
507 kfree(diagpriv_data);
508 diagpriv_data = NULL;
509 file->private_data = 0;
510 break;
511 }
512 }
513 mutex_unlock(&driver->diagchar_mutex);
514 mutex_unlock(&driver->diag_file_mutex);
515 return 0;
516}
517static int diagchar_close(struct inode *inode, struct file *file)
518{
519 int ret;
520
521 DIAG_LOG(DIAG_DEBUG_USERSPACE, "diag: process exit %s\n",
522 current->comm);
523 ret = diag_remove_client_entry(file);
524 mutex_lock(&driver->diag_maskclear_mutex);
525 driver->mask_clear = 0;
526 mutex_unlock(&driver->diag_maskclear_mutex);
527 return ret;
528}
529
530void diag_record_stats(int type, int flag)
531{
532 struct diag_pkt_stats_t *pkt_stats = NULL;
533
534 switch (type) {
535 case DATA_TYPE_EVENT:
536 pkt_stats = &driver->event_stats;
537 break;
538 case DATA_TYPE_F3:
539 pkt_stats = &driver->msg_stats;
540 break;
541 case DATA_TYPE_LOG:
542 pkt_stats = &driver->log_stats;
543 break;
544 case DATA_TYPE_RESPONSE:
545 if (flag != PKT_DROP)
546 return;
547 pr_err_ratelimited("diag: In %s, dropping response. This shouldn't happen\n",
548 __func__);
549 return;
550 case DATA_TYPE_DELAYED_RESPONSE:
551 /* No counters to increase for Delayed responses */
552 return;
553 default:
554 pr_err_ratelimited("diag: In %s, invalid pkt_type: %d\n",
555 __func__, type);
556 return;
557 }
558
559 switch (flag) {
560 case PKT_ALLOC:
561 atomic_add(1, (atomic_t *)&pkt_stats->alloc_count);
562 break;
563 case PKT_DROP:
564 atomic_add(1, (atomic_t *)&pkt_stats->drop_count);
565 break;
566 case PKT_RESET:
567 atomic_set((atomic_t *)&pkt_stats->alloc_count, 0);
568 atomic_set((atomic_t *)&pkt_stats->drop_count, 0);
569 break;
570 default:
571 pr_err_ratelimited("diag: In %s, invalid flag: %d\n",
572 __func__, flag);
573 return;
574 }
575}
576
577void diag_get_timestamp(char *time_str)
578{
579 struct timeval t;
580 struct tm broken_tm;
581
582 do_gettimeofday(&t);
583 if (!time_str)
584 return;
585 time_to_tm(t.tv_sec, 0, &broken_tm);
586 scnprintf(time_str, DIAG_TS_SIZE, "%d:%d:%d:%ld", broken_tm.tm_hour,
587 broken_tm.tm_min, broken_tm.tm_sec, t.tv_usec);
588}
589
590int diag_get_remote(int remote_info)
591{
592 int val = (remote_info < 0) ? -remote_info : remote_info;
593 int remote_val;
594
595 switch (val) {
596 case MDM:
597 case MDM2:
598 case QSC:
599 remote_val = -remote_info;
600 break;
601 default:
602 remote_val = 0;
603 break;
604 }
605
606 return remote_val;
607}
608
609int diag_cmd_chk_polling(struct diag_cmd_reg_entry_t *entry)
610{
611 int polling = DIAG_CMD_NOT_POLLING;
612
613 if (!entry)
614 return -EIO;
615
616 if (entry->cmd_code == DIAG_CMD_NO_SUBSYS) {
617 if (entry->subsys_id == DIAG_CMD_NO_SUBSYS &&
618 entry->cmd_code_hi >= DIAG_CMD_STATUS &&
619 entry->cmd_code_lo <= DIAG_CMD_STATUS)
620 polling = DIAG_CMD_POLLING;
621 else if (entry->subsys_id == DIAG_SS_WCDMA &&
622 entry->cmd_code_hi >= DIAG_CMD_QUERY_CALL &&
623 entry->cmd_code_lo <= DIAG_CMD_QUERY_CALL)
624 polling = DIAG_CMD_POLLING;
625 else if (entry->subsys_id == DIAG_SS_GSM &&
626 entry->cmd_code_hi >= DIAG_CMD_QUERY_TMC &&
627 entry->cmd_code_lo <= DIAG_CMD_QUERY_TMC)
628 polling = DIAG_CMD_POLLING;
629 else if (entry->subsys_id == DIAG_SS_PARAMS &&
630 entry->cmd_code_hi >= DIAG_DIAG_POLL &&
631 entry->cmd_code_lo <= DIAG_DIAG_POLL)
632 polling = DIAG_CMD_POLLING;
633 else if (entry->subsys_id == DIAG_SS_TDSCDMA &&
634 entry->cmd_code_hi >= DIAG_CMD_TDSCDMA_STATUS &&
635 entry->cmd_code_lo <= DIAG_CMD_TDSCDMA_STATUS)
636 polling = DIAG_CMD_POLLING;
637 }
638
639 return polling;
640}
641
642static void diag_cmd_invalidate_polling(int change_flag)
643{
644 int polling = DIAG_CMD_NOT_POLLING;
645 struct list_head *start;
646 struct list_head *temp;
647 struct diag_cmd_reg_t *item = NULL;
648
649 if (change_flag == DIAG_CMD_ADD) {
650 if (driver->polling_reg_flag)
651 return;
652 }
653
654 driver->polling_reg_flag = 0;
655 list_for_each_safe(start, temp, &driver->cmd_reg_list) {
656 item = list_entry(start, struct diag_cmd_reg_t, link);
657 polling = diag_cmd_chk_polling(&item->entry);
658 if (polling == DIAG_CMD_POLLING) {
659 driver->polling_reg_flag = 1;
660 break;
661 }
662 }
663}
664
665int diag_cmd_add_reg(struct diag_cmd_reg_entry_t *new_entry, uint8_t proc,
666 int pid)
667{
668 struct diag_cmd_reg_t *new_item = NULL;
669
670 if (!new_entry) {
671 pr_err("diag: In %s, invalid new entry\n", __func__);
672 return -EINVAL;
673 }
674
675 if (proc > APPS_DATA) {
676 pr_err("diag: In %s, invalid peripheral %d\n", __func__, proc);
677 return -EINVAL;
678 }
679
680 if (proc != APPS_DATA)
681 pid = INVALID_PID;
682
683 new_item = kzalloc(sizeof(struct diag_cmd_reg_t), GFP_KERNEL);
684 if (!new_item)
685 return -ENOMEM;
686 kmemleak_not_leak(new_item);
687
688 new_item->pid = pid;
689 new_item->proc = proc;
690 memcpy(&new_item->entry, new_entry,
691 sizeof(struct diag_cmd_reg_entry_t));
692 INIT_LIST_HEAD(&new_item->link);
693
694 mutex_lock(&driver->cmd_reg_mutex);
695 list_add_tail(&new_item->link, &driver->cmd_reg_list);
696 driver->cmd_reg_count++;
697 diag_cmd_invalidate_polling(DIAG_CMD_ADD);
698 mutex_unlock(&driver->cmd_reg_mutex);
699
700 return 0;
701}
702
703struct diag_cmd_reg_entry_t *diag_cmd_search(
704 struct diag_cmd_reg_entry_t *entry, int proc)
705{
706 struct list_head *start;
707 struct list_head *temp;
708 struct diag_cmd_reg_t *item = NULL;
709 struct diag_cmd_reg_entry_t *temp_entry = NULL;
710
711 if (!entry) {
712 pr_err("diag: In %s, invalid entry\n", __func__);
713 return NULL;
714 }
715
716 list_for_each_safe(start, temp, &driver->cmd_reg_list) {
717 item = list_entry(start, struct diag_cmd_reg_t, link);
Manoj Prabhu Bd9b3b622017-01-17 10:15:53 +0530718 if (&item->entry == NULL) {
Gopikrishna Mogasati9b332372016-11-10 20:03:46 +0530719 pr_err("diag: In %s, unable to search command\n",
720 __func__);
721 return NULL;
722 }
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -0700723 temp_entry = &item->entry;
724 if (temp_entry->cmd_code == entry->cmd_code &&
725 temp_entry->subsys_id == entry->subsys_id &&
726 temp_entry->cmd_code_hi >= entry->cmd_code_hi &&
727 temp_entry->cmd_code_lo <= entry->cmd_code_lo &&
728 (proc == item->proc || proc == ALL_PROC)) {
729 return &item->entry;
730 } else if (temp_entry->cmd_code == DIAG_CMD_NO_SUBSYS &&
731 entry->cmd_code == DIAG_CMD_DIAG_SUBSYS) {
732 if (temp_entry->subsys_id == entry->subsys_id &&
733 temp_entry->cmd_code_hi >= entry->cmd_code_hi &&
734 temp_entry->cmd_code_lo <= entry->cmd_code_lo &&
735 (proc == item->proc || proc == ALL_PROC)) {
736 return &item->entry;
737 }
738 } else if (temp_entry->cmd_code == DIAG_CMD_NO_SUBSYS &&
739 temp_entry->subsys_id == DIAG_CMD_NO_SUBSYS) {
740 if ((temp_entry->cmd_code_hi >= entry->cmd_code) &&
741 (temp_entry->cmd_code_lo <= entry->cmd_code) &&
742 (proc == item->proc || proc == ALL_PROC)) {
743 if (entry->cmd_code == MODE_CMD) {
744 if (entry->subsys_id == RESET_ID &&
745 item->proc != APPS_DATA) {
746 continue;
747 }
748 if (entry->subsys_id != RESET_ID &&
749 item->proc == APPS_DATA) {
750 continue;
751 }
752 }
753 return &item->entry;
754 }
755 }
756 }
757
758 return NULL;
759}
760
761void diag_cmd_remove_reg(struct diag_cmd_reg_entry_t *entry, uint8_t proc)
762{
763 struct diag_cmd_reg_t *item = NULL;
764 struct diag_cmd_reg_entry_t *temp_entry;
765
766 if (!entry) {
767 pr_err("diag: In %s, invalid entry\n", __func__);
768 return;
769 }
770
771 mutex_lock(&driver->cmd_reg_mutex);
772 temp_entry = diag_cmd_search(entry, proc);
773 if (temp_entry) {
774 item = container_of(temp_entry, struct diag_cmd_reg_t, entry);
775 if (!item) {
776 mutex_unlock(&driver->cmd_reg_mutex);
777 return;
778 }
779 list_del(&item->link);
780 kfree(item);
781 driver->cmd_reg_count--;
782 }
783 diag_cmd_invalidate_polling(DIAG_CMD_REMOVE);
784 mutex_unlock(&driver->cmd_reg_mutex);
785}
786
787void diag_cmd_remove_reg_by_pid(int pid)
788{
789 struct list_head *start;
790 struct list_head *temp;
791 struct diag_cmd_reg_t *item = NULL;
792
793 mutex_lock(&driver->cmd_reg_mutex);
794 list_for_each_safe(start, temp, &driver->cmd_reg_list) {
795 item = list_entry(start, struct diag_cmd_reg_t, link);
796 if (item->pid == pid) {
797 list_del(&item->link);
798 kfree(item);
799 driver->cmd_reg_count--;
800 }
801 }
802 mutex_unlock(&driver->cmd_reg_mutex);
803}
804
805void diag_cmd_remove_reg_by_proc(int proc)
806{
807 struct list_head *start;
808 struct list_head *temp;
809 struct diag_cmd_reg_t *item = NULL;
810
811 mutex_lock(&driver->cmd_reg_mutex);
812 list_for_each_safe(start, temp, &driver->cmd_reg_list) {
813 item = list_entry(start, struct diag_cmd_reg_t, link);
814 if (item->proc == proc) {
815 list_del(&item->link);
816 kfree(item);
817 driver->cmd_reg_count--;
818 }
819 }
820 diag_cmd_invalidate_polling(DIAG_CMD_REMOVE);
821 mutex_unlock(&driver->cmd_reg_mutex);
822}
823
824static int diag_copy_dci(char __user *buf, size_t count,
825 struct diag_dci_client_tbl *entry, int *pret)
826{
827 int total_data_len = 0;
828 int ret = 0;
829 int exit_stat = 1;
830 uint8_t drain_again = 0;
831 struct diag_dci_buffer_t *buf_entry, *temp;
832
833 if (!buf || !entry || !pret)
834 return exit_stat;
835
836 ret = *pret;
837
838 ret += sizeof(int);
839 if (ret >= count) {
840 pr_err("diag: In %s, invalid value for ret: %d, count: %zu\n",
841 __func__, ret, count);
842 return -EINVAL;
843 }
844
845 mutex_lock(&entry->write_buf_mutex);
846 list_for_each_entry_safe(buf_entry, temp, &entry->list_write_buf,
847 buf_track) {
848
849 if ((ret + buf_entry->data_len) > count) {
850 drain_again = 1;
851 break;
852 }
853
854 list_del(&buf_entry->buf_track);
855 mutex_lock(&buf_entry->data_mutex);
856 if ((buf_entry->data_len > 0) &&
857 (buf_entry->in_busy) &&
858 (buf_entry->data)) {
859 if (copy_to_user(buf+ret, (void *)buf_entry->data,
860 buf_entry->data_len))
861 goto drop;
862 ret += buf_entry->data_len;
863 total_data_len += buf_entry->data_len;
864 diag_ws_on_copy(DIAG_WS_DCI);
865drop:
866 buf_entry->in_busy = 0;
867 buf_entry->data_len = 0;
868 buf_entry->in_list = 0;
869 if (buf_entry->buf_type == DCI_BUF_CMD) {
870 mutex_unlock(&buf_entry->data_mutex);
871 continue;
872 } else if (buf_entry->buf_type == DCI_BUF_SECONDARY) {
873 diagmem_free(driver, buf_entry->data,
874 POOL_TYPE_DCI);
875 buf_entry->data = NULL;
876 mutex_unlock(&buf_entry->data_mutex);
877 kfree(buf_entry);
878 continue;
879 }
880
881 }
882 mutex_unlock(&buf_entry->data_mutex);
883 }
884
885 if (total_data_len > 0) {
886 /* Copy the total data length */
887 COPY_USER_SPACE_OR_ERR(buf+8, total_data_len, 4);
888 if (ret == -EFAULT)
889 goto exit;
890 ret -= 4;
891 } else {
892 pr_debug("diag: In %s, Trying to copy ZERO bytes, total_data_len: %d\n",
893 __func__, total_data_len);
894 }
895
896 exit_stat = 0;
897exit:
898 entry->in_service = 0;
899 mutex_unlock(&entry->write_buf_mutex);
900 *pret = ret;
901 if (drain_again)
902 dci_drain_data(0);
903
904 return exit_stat;
905}
906
907#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
908static int diag_remote_init(void)
909{
910 diagmem_setsize(POOL_TYPE_MDM, itemsize_mdm, poolsize_mdm);
911 diagmem_setsize(POOL_TYPE_MDM2, itemsize_mdm, poolsize_mdm);
912 diagmem_setsize(POOL_TYPE_MDM_DCI, itemsize_mdm_dci, poolsize_mdm_dci);
913 diagmem_setsize(POOL_TYPE_MDM2_DCI, itemsize_mdm_dci,
914 poolsize_mdm_dci);
915 diagmem_setsize(POOL_TYPE_MDM_MUX, itemsize_mdm_usb, poolsize_mdm_usb);
916 diagmem_setsize(POOL_TYPE_MDM2_MUX, itemsize_mdm_usb, poolsize_mdm_usb);
917 diagmem_setsize(POOL_TYPE_MDM_DCI_WRITE, itemsize_mdm_dci_write,
918 poolsize_mdm_dci_write);
919 diagmem_setsize(POOL_TYPE_MDM2_DCI_WRITE, itemsize_mdm_dci_write,
920 poolsize_mdm_dci_write);
921 diagmem_setsize(POOL_TYPE_QSC_MUX, itemsize_qsc_usb,
922 poolsize_qsc_usb);
923 driver->hdlc_encode_buf = kzalloc(DIAG_MAX_HDLC_BUF_SIZE, GFP_KERNEL);
924 if (!driver->hdlc_encode_buf)
925 return -ENOMEM;
926 driver->hdlc_encode_buf_len = 0;
927 return 0;
928}
929
930static void diag_remote_exit(void)
931{
932 kfree(driver->hdlc_encode_buf);
933}
934
935static int diag_send_raw_data_remote(int proc, void *buf, int len,
936 uint8_t hdlc_flag)
937{
938 int err = 0;
939 int max_len = 0;
940 uint8_t retry_count = 0;
941 uint8_t max_retries = 3;
942 uint16_t payload = 0;
943 struct diag_send_desc_type send = { NULL, NULL, DIAG_STATE_START, 0 };
944 struct diag_hdlc_dest_type enc = { NULL, NULL, 0 };
945 int bridge_index = proc - 1;
946 struct diag_md_session_t *session_info = NULL;
947 uint8_t hdlc_disabled = 0;
948
949 if (!buf)
950 return -EINVAL;
951
952 if (len <= 0) {
953 pr_err("diag: In %s, invalid len: %d", __func__, len);
954 return -EBADMSG;
955 }
956
957 if (bridge_index < 0 || bridge_index > NUM_REMOTE_DEV) {
958 pr_err("diag: In %s, invalid bridge index: %d\n", __func__,
959 bridge_index);
960 return -EINVAL;
961 }
962
963 do {
964 if (driver->hdlc_encode_buf_len == 0)
965 break;
966 usleep_range(10000, 10100);
967 retry_count++;
968 } while (retry_count < max_retries);
969
970 if (driver->hdlc_encode_buf_len != 0)
971 return -EAGAIN;
972 session_info = diag_md_session_get_peripheral(APPS_DATA);
973 if (session_info)
974 hdlc_disabled = session_info->hdlc_disabled;
975 else
976 hdlc_disabled = driver->hdlc_disabled;
977 if (hdlc_disabled) {
978 payload = *(uint16_t *)(buf + 2);
Gopikrishna Mogasati810223f2017-04-20 16:25:20 +0530979 if (payload > DIAG_MAX_HDLC_BUF_SIZE) {
980 pr_err("diag: Dropping packet, payload size is %d\n",
981 payload);
982 return -EBADMSG;
983 }
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -0700984 driver->hdlc_encode_buf_len = payload;
985 /*
986 * Adding 4 bytes for start (1 byte), version (1 byte) and
987 * payload (2 bytes)
988 */
989 memcpy(driver->hdlc_encode_buf, buf + 4, payload);
990 goto send_data;
991 }
992
993 if (hdlc_flag) {
994 if (len > DIAG_MAX_HDLC_BUF_SIZE) {
995 pr_err("diag: Dropping packet, HDLC encoded packet payload size crosses buffer limit. Current payload size %d\n",
996 len);
997 return -EBADMSG;
998 }
999 driver->hdlc_encode_buf_len = len;
1000 memcpy(driver->hdlc_encode_buf, buf, len);
1001 goto send_data;
1002 }
1003
1004 /*
1005 * The worst case length will be twice as the incoming packet length.
1006 * Add 3 bytes for CRC bytes (2 bytes) and delimiter (1 byte)
1007 */
1008 max_len = (2 * len) + 3;
1009 if (max_len > DIAG_MAX_HDLC_BUF_SIZE) {
1010 pr_err("diag: Dropping packet, HDLC encoded packet payload size crosses buffer limit. Current payload size %d\n",
1011 max_len);
1012 return -EBADMSG;
1013 }
1014
1015 /* Perform HDLC encoding on incoming data */
1016 send.state = DIAG_STATE_START;
1017 send.pkt = (void *)(buf);
1018 send.last = (void *)(buf + len - 1);
1019 send.terminate = 1;
1020
1021 enc.dest = driver->hdlc_encode_buf;
1022 enc.dest_last = (void *)(driver->hdlc_encode_buf + max_len - 1);
1023 diag_hdlc_encode(&send, &enc);
1024 driver->hdlc_encode_buf_len = (int)(enc.dest -
1025 (void *)driver->hdlc_encode_buf);
1026
1027send_data:
1028 err = diagfwd_bridge_write(bridge_index, driver->hdlc_encode_buf,
1029 driver->hdlc_encode_buf_len);
1030 if (err) {
1031 pr_err_ratelimited("diag: Error writing Callback packet to proc: %d, err: %d\n",
1032 proc, err);
1033 driver->hdlc_encode_buf_len = 0;
1034 }
1035
1036 return err;
1037}
1038
1039static int diag_process_userspace_remote(int proc, void *buf, int len)
1040{
1041 int bridge_index = proc - 1;
1042
1043 if (!buf || len < 0) {
1044 pr_err("diag: Invalid input in %s, buf: %pK, len: %d\n",
1045 __func__, buf, len);
1046 return -EINVAL;
1047 }
1048
1049 if (bridge_index < 0 || bridge_index > NUM_REMOTE_DEV) {
1050 pr_err("diag: In %s, invalid bridge index: %d\n", __func__,
1051 bridge_index);
1052 return -EINVAL;
1053 }
1054
1055 driver->user_space_data_busy = 1;
1056 return diagfwd_bridge_write(bridge_index, buf, len);
1057}
1058#else
1059static int diag_remote_init(void)
1060{
1061 return 0;
1062}
1063
1064static void diag_remote_exit(void)
1065{
1066}
1067
1068int diagfwd_bridge_init(void)
1069{
1070 return 0;
1071}
1072
1073void diagfwd_bridge_exit(void)
1074{
1075}
1076
1077uint16_t diag_get_remote_device_mask(void)
1078{
1079 return 0;
1080}
1081
1082static int diag_send_raw_data_remote(int proc, void *buf, int len,
1083 uint8_t hdlc_flag)
1084{
1085 return -EINVAL;
1086}
1087
1088static int diag_process_userspace_remote(int proc, void *buf, int len)
1089{
1090 return 0;
1091}
1092#endif
1093
1094static int mask_request_validate(unsigned char mask_buf[])
1095{
1096 uint8_t packet_id;
1097 uint8_t subsys_id;
1098 uint16_t ss_cmd;
1099
1100 packet_id = mask_buf[0];
1101
1102 if (packet_id == DIAG_CMD_DIAG_SUBSYS_DELAY) {
1103 subsys_id = mask_buf[1];
1104 ss_cmd = *(uint16_t *)(mask_buf + 2);
1105 switch (subsys_id) {
1106 case DIAG_SS_DIAG:
1107 if ((ss_cmd == DIAG_SS_FILE_READ_MODEM) ||
1108 (ss_cmd == DIAG_SS_FILE_READ_ADSP) ||
1109 (ss_cmd == DIAG_SS_FILE_READ_WCNSS) ||
1110 (ss_cmd == DIAG_SS_FILE_READ_SLPI) ||
1111 (ss_cmd == DIAG_SS_FILE_READ_APPS))
1112 return 1;
1113 break;
1114 default:
1115 return 0;
1116 }
1117 } else if (packet_id == 0x4B) {
1118 subsys_id = mask_buf[1];
1119 ss_cmd = *(uint16_t *)(mask_buf + 2);
1120 /* Packets with SSID which are allowed */
1121 switch (subsys_id) {
1122 case 0x04: /* DIAG_SUBSYS_WCDMA */
1123 if ((ss_cmd == 0) || (ss_cmd == 0xF))
1124 return 1;
1125 break;
1126 case 0x08: /* DIAG_SUBSYS_GSM */
1127 if ((ss_cmd == 0) || (ss_cmd == 0x1))
1128 return 1;
1129 break;
1130 case 0x09: /* DIAG_SUBSYS_UMTS */
1131 case 0x0F: /* DIAG_SUBSYS_CM */
1132 if (ss_cmd == 0)
1133 return 1;
1134 break;
1135 case 0x0C: /* DIAG_SUBSYS_OS */
1136 if ((ss_cmd == 2) || (ss_cmd == 0x100))
1137 return 1; /* MPU and APU */
1138 break;
1139 case 0x12: /* DIAG_SUBSYS_DIAG_SERV */
1140 if ((ss_cmd == 0) || (ss_cmd == 0x6) || (ss_cmd == 0x7))
1141 return 1;
1142 else if (ss_cmd == 0x218) /* HDLC Disabled Command*/
1143 return 0;
1144 else if (ss_cmd == DIAG_GET_TIME_API)
1145 return 1;
1146 else if (ss_cmd == DIAG_SET_TIME_API)
1147 return 1;
1148 else if (ss_cmd == DIAG_SWITCH_COMMAND)
1149 return 1;
1150 else if (ss_cmd == DIAG_BUFFERING_MODE)
1151 return 1;
1152 break;
1153 case 0x13: /* DIAG_SUBSYS_FS */
1154 if ((ss_cmd == 0) || (ss_cmd == 0x1))
1155 return 1;
1156 break;
1157 default:
1158 return 0;
1159 }
1160 } else {
1161 switch (packet_id) {
1162 case 0x00: /* Version Number */
1163 case 0x0C: /* CDMA status packet */
1164 case 0x1C: /* Diag Version */
1165 case 0x1D: /* Time Stamp */
1166 case 0x60: /* Event Report Control */
1167 case 0x63: /* Status snapshot */
1168 case 0x73: /* Logging Configuration */
1169 case 0x7C: /* Extended build ID */
1170 case 0x7D: /* Extended Message configuration */
1171 case 0x81: /* Event get mask */
1172 case 0x82: /* Set the event mask */
1173 return 1;
1174 default:
1175 return 0;
1176 }
1177 }
1178 return 0;
1179}
1180
1181static void diag_md_session_init(void)
1182{
1183 int i;
1184
1185 mutex_init(&driver->md_session_lock);
1186 driver->md_session_mask = 0;
1187 driver->md_session_mode = DIAG_MD_NONE;
1188 for (i = 0; i < NUM_MD_SESSIONS; i++)
1189 driver->md_session_map[i] = NULL;
1190}
1191
1192static void diag_md_session_exit(void)
1193{
1194 int i;
1195 struct diag_md_session_t *session_info = NULL;
1196
1197 for (i = 0; i < NUM_MD_SESSIONS; i++) {
1198 if (driver->md_session_map[i]) {
1199 session_info = driver->md_session_map[i];
1200 diag_log_mask_free(session_info->log_mask);
1201 kfree(session_info->log_mask);
1202 session_info->log_mask = NULL;
1203 diag_msg_mask_free(session_info->msg_mask);
1204 kfree(session_info->msg_mask);
1205 session_info->msg_mask = NULL;
1206 diag_event_mask_free(session_info->event_mask);
1207 kfree(session_info->event_mask);
1208 session_info->event_mask = NULL;
1209 kfree(session_info);
1210 session_info = NULL;
1211 driver->md_session_map[i] = NULL;
1212 }
1213 }
1214 mutex_destroy(&driver->md_session_lock);
1215 driver->md_session_mask = 0;
1216 driver->md_session_mode = DIAG_MD_NONE;
1217}
1218
1219int diag_md_session_create(int mode, int peripheral_mask, int proc)
1220{
1221 int i;
1222 int err = 0;
1223 struct diag_md_session_t *new_session = NULL;
1224
1225 /*
1226 * If a session is running with a peripheral mask and a new session
1227 * request comes in with same peripheral mask value then return
1228 * invalid param
1229 */
1230 if (driver->md_session_mode == DIAG_MD_PERIPHERAL &&
1231 (driver->md_session_mask & peripheral_mask) != 0)
1232 return -EINVAL;
1233
1234 mutex_lock(&driver->md_session_lock);
1235 new_session = kzalloc(sizeof(struct diag_md_session_t), GFP_KERNEL);
1236 if (!new_session) {
1237 mutex_unlock(&driver->md_session_lock);
1238 return -ENOMEM;
1239 }
1240
1241 new_session->peripheral_mask = 0;
1242 new_session->pid = current->tgid;
1243 new_session->task = current;
1244
1245 new_session->log_mask = kzalloc(sizeof(struct diag_mask_info),
1246 GFP_KERNEL);
1247 if (!new_session->log_mask) {
1248 err = -ENOMEM;
1249 goto fail_peripheral;
1250 }
1251 new_session->event_mask = kzalloc(sizeof(struct diag_mask_info),
1252 GFP_KERNEL);
1253 if (!new_session->event_mask) {
1254 err = -ENOMEM;
1255 goto fail_peripheral;
1256 }
1257 new_session->msg_mask = kzalloc(sizeof(struct diag_mask_info),
1258 GFP_KERNEL);
1259 if (!new_session->msg_mask) {
1260 err = -ENOMEM;
1261 goto fail_peripheral;
1262 }
1263
1264 err = diag_log_mask_copy(new_session->log_mask, &log_mask);
1265 if (err) {
1266 DIAG_LOG(DIAG_DEBUG_USERSPACE,
1267 "return value of log copy. err %d\n", err);
1268 goto fail_peripheral;
1269 }
1270 err = diag_event_mask_copy(new_session->event_mask, &event_mask);
1271 if (err) {
1272 DIAG_LOG(DIAG_DEBUG_USERSPACE,
1273 "return value of event copy. err %d\n", err);
1274 goto fail_peripheral;
1275 }
1276 err = diag_msg_mask_copy(new_session->msg_mask, &msg_mask);
1277 if (err) {
1278 DIAG_LOG(DIAG_DEBUG_USERSPACE,
1279 "return value of msg copy. err %d\n", err);
1280 goto fail_peripheral;
1281 }
1282 for (i = 0; i < NUM_MD_SESSIONS; i++) {
1283 if ((MD_PERIPHERAL_MASK(i) & peripheral_mask) == 0)
1284 continue;
1285 if (driver->md_session_map[i] != NULL) {
1286 DIAG_LOG(DIAG_DEBUG_USERSPACE,
1287 "another instance present for %d\n", i);
1288 err = -EEXIST;
1289 goto fail_peripheral;
1290 }
1291 new_session->peripheral_mask |= MD_PERIPHERAL_MASK(i);
1292 driver->md_session_map[i] = new_session;
1293 driver->md_session_mask |= MD_PERIPHERAL_MASK(i);
1294 }
1295 setup_timer(&new_session->hdlc_reset_timer,
1296 diag_md_hdlc_reset_timer_func,
1297 new_session->pid);
1298
1299 driver->md_session_mode = DIAG_MD_PERIPHERAL;
1300 mutex_unlock(&driver->md_session_lock);
1301 DIAG_LOG(DIAG_DEBUG_USERSPACE,
1302 "created session in peripheral mode\n");
1303 return 0;
1304
1305fail_peripheral:
1306 diag_log_mask_free(new_session->log_mask);
1307 kfree(new_session->log_mask);
1308 new_session->log_mask = NULL;
1309 diag_event_mask_free(new_session->event_mask);
1310 kfree(new_session->event_mask);
1311 new_session->event_mask = NULL;
1312 diag_msg_mask_free(new_session->msg_mask);
1313 kfree(new_session->msg_mask);
1314 new_session->msg_mask = NULL;
1315 kfree(new_session);
1316 new_session = NULL;
1317 mutex_unlock(&driver->md_session_lock);
1318 return err;
1319}
1320
1321static void diag_md_session_close(struct diag_md_session_t *session_info)
1322{
1323 int i;
1324 uint8_t found = 0;
1325
1326 if (!session_info)
1327 return;
1328
1329 mutex_lock(&driver->md_session_lock);
1330 for (i = 0; i < NUM_MD_SESSIONS; i++) {
1331 if (driver->md_session_map[i] != session_info)
1332 continue;
1333 driver->md_session_map[i] = NULL;
1334 driver->md_session_mask &= ~session_info->peripheral_mask;
1335 }
1336 diag_log_mask_free(session_info->log_mask);
1337 kfree(session_info->log_mask);
1338 session_info->log_mask = NULL;
1339 diag_msg_mask_free(session_info->msg_mask);
1340 kfree(session_info->msg_mask);
1341 session_info->msg_mask = NULL;
1342 diag_event_mask_free(session_info->event_mask);
1343 kfree(session_info->event_mask);
1344 session_info->event_mask = NULL;
1345 del_timer(&session_info->hdlc_reset_timer);
1346
1347 for (i = 0; i < NUM_MD_SESSIONS && !found; i++) {
1348 if (driver->md_session_map[i] != NULL)
1349 found = 1;
1350 }
1351
1352 driver->md_session_mode = (found) ? DIAG_MD_PERIPHERAL : DIAG_MD_NONE;
1353 kfree(session_info);
1354 session_info = NULL;
1355 mutex_unlock(&driver->md_session_lock);
1356 DIAG_LOG(DIAG_DEBUG_USERSPACE, "cleared up session\n");
1357}
1358
1359struct diag_md_session_t *diag_md_session_get_pid(int pid)
1360{
1361 int i;
1362
1363 for (i = 0; i < NUM_MD_SESSIONS; i++) {
1364 if (driver->md_session_map[i] &&
1365 driver->md_session_map[i]->pid == pid)
1366 return driver->md_session_map[i];
1367 }
1368 return NULL;
1369}
1370
1371struct diag_md_session_t *diag_md_session_get_peripheral(uint8_t peripheral)
1372{
1373 if (peripheral >= NUM_MD_SESSIONS)
1374 return NULL;
1375 return driver->md_session_map[peripheral];
1376}
1377
1378static int diag_md_peripheral_switch(struct diag_md_session_t *session_info,
1379 int peripheral_mask, int req_mode) {
1380 int i, bit = 0;
1381
1382 if (!session_info)
1383 return -EINVAL;
1384 if (req_mode != DIAG_USB_MODE || req_mode != DIAG_MEMORY_DEVICE_MODE)
1385 return -EINVAL;
1386
1387 /*
1388 * check that md_session_map for i == session_info,
1389 * if not then race condition occurred and bail
1390 */
1391 mutex_lock(&driver->md_session_lock);
1392 for (i = 0; i < NUM_MD_SESSIONS; i++) {
1393 bit = MD_PERIPHERAL_MASK(i) & peripheral_mask;
1394 if (!bit)
1395 continue;
1396 if (req_mode == DIAG_USB_MODE) {
1397 if (driver->md_session_map[i] != session_info) {
1398 mutex_unlock(&driver->md_session_lock);
1399 return -EINVAL;
1400 }
1401 driver->md_session_map[i] = NULL;
1402 driver->md_session_mask &= ~bit;
1403 session_info->peripheral_mask &= ~bit;
1404
1405 } else {
1406 if (driver->md_session_map[i] != NULL) {
1407 mutex_unlock(&driver->md_session_lock);
1408 return -EINVAL;
1409 }
1410 driver->md_session_map[i] = session_info;
1411 driver->md_session_mask |= bit;
1412 session_info->peripheral_mask |= bit;
1413
1414 }
1415 }
1416
1417 driver->md_session_mode = DIAG_MD_PERIPHERAL;
1418 mutex_unlock(&driver->md_session_lock);
1419 DIAG_LOG(DIAG_DEBUG_USERSPACE, "Changed Peripherals:0x%x to mode:%d\n",
1420 peripheral_mask, req_mode);
1421}
1422
1423static int diag_md_session_check(int curr_mode, int req_mode,
1424 const struct diag_logging_mode_param_t *param,
1425 uint8_t *change_mode)
1426{
1427 int i, bit = 0, err = 0;
1428 int change_mask = 0;
1429 struct diag_md_session_t *session_info = NULL;
1430
1431 if (!param || !change_mode)
1432 return -EIO;
1433
1434 *change_mode = 0;
1435
1436 switch (curr_mode) {
1437 case DIAG_USB_MODE:
1438 case DIAG_MEMORY_DEVICE_MODE:
1439 case DIAG_MULTI_MODE:
1440 break;
1441 default:
1442 return -EINVAL;
1443 }
1444
1445 if (req_mode != DIAG_USB_MODE && req_mode != DIAG_MEMORY_DEVICE_MODE)
1446 return -EINVAL;
1447
1448 if (req_mode == DIAG_USB_MODE) {
1449 if (curr_mode == DIAG_USB_MODE)
1450 return 0;
1451 if (driver->md_session_mode == DIAG_MD_NONE
1452 && driver->md_session_mask == 0 && driver->logging_mask) {
1453 *change_mode = 1;
1454 return 0;
1455 }
1456
1457 /*
1458 * curr_mode is either DIAG_MULTI_MODE or DIAG_MD_MODE
1459 * Check if requested peripherals are already in usb mode
1460 */
1461 for (i = 0; i < NUM_MD_SESSIONS; i++) {
1462 bit = MD_PERIPHERAL_MASK(i) & param->peripheral_mask;
1463 if (!bit)
1464 continue;
1465 if (bit & driver->logging_mask)
1466 change_mask |= bit;
1467 }
1468 if (!change_mask)
1469 return 0;
1470
1471 /*
1472 * Change is needed. Check if this md_session has set all the
1473 * requested peripherals. If another md session set a requested
1474 * peripheral then we cannot switch that peripheral to USB.
1475 * If this session owns all the requested peripherals, then
1476 * call function to switch the modes/masks for the md_session
1477 */
1478 session_info = diag_md_session_get_pid(current->tgid);
1479 if (!session_info) {
1480 *change_mode = 1;
1481 return 0;
1482 }
1483 if ((change_mask & session_info->peripheral_mask)
1484 != change_mask) {
1485 DIAG_LOG(DIAG_DEBUG_USERSPACE,
1486 "Another MD Session owns a requested peripheral\n");
1487 return -EINVAL;
1488 }
1489 *change_mode = 1;
1490
1491 /* If all peripherals are being set to USB Mode, call close */
1492 if (~change_mask & session_info->peripheral_mask) {
1493 err = diag_md_peripheral_switch(session_info,
1494 change_mask, DIAG_USB_MODE);
1495 } else
1496 diag_md_session_close(session_info);
1497
1498 return err;
1499
1500 } else if (req_mode == DIAG_MEMORY_DEVICE_MODE) {
1501 /*
1502 * Get bit mask that represents what peripherals already have
1503 * been set. Check that requested peripherals already set are
1504 * owned by this md session
1505 */
1506 change_mask = driver->md_session_mask & param->peripheral_mask;
1507 session_info = diag_md_session_get_pid(current->tgid);
1508
1509 if (session_info) {
1510 if ((session_info->peripheral_mask & change_mask)
1511 != change_mask) {
1512 DIAG_LOG(DIAG_DEBUG_USERSPACE,
1513 "Another MD Session owns a requested peripheral\n");
1514 return -EINVAL;
1515 }
1516 err = diag_md_peripheral_switch(session_info,
1517 change_mask, DIAG_USB_MODE);
1518 } else {
1519 if (change_mask) {
1520 DIAG_LOG(DIAG_DEBUG_USERSPACE,
1521 "Another MD Session owns a requested peripheral\n");
1522 return -EINVAL;
1523 }
1524 err = diag_md_session_create(DIAG_MD_PERIPHERAL,
1525 param->peripheral_mask, DIAG_LOCAL_PROC);
1526 }
1527 *change_mode = 1;
1528 return err;
1529 }
1530 return -EINVAL;
1531}
1532
1533static uint32_t diag_translate_mask(uint32_t peripheral_mask)
1534{
1535 uint32_t ret = 0;
1536
1537 if (peripheral_mask & DIAG_CON_APSS)
1538 ret |= (1 << APPS_DATA);
1539 if (peripheral_mask & DIAG_CON_MPSS)
1540 ret |= (1 << PERIPHERAL_MODEM);
1541 if (peripheral_mask & DIAG_CON_LPASS)
1542 ret |= (1 << PERIPHERAL_LPASS);
1543 if (peripheral_mask & DIAG_CON_WCNSS)
1544 ret |= (1 << PERIPHERAL_WCNSS);
1545 if (peripheral_mask & DIAG_CON_SENSORS)
1546 ret |= (1 << PERIPHERAL_SENSORS);
1547 if (peripheral_mask & DIAG_CON_WDSP)
1548 ret |= (1 << PERIPHERAL_WDSP);
Sreelakshmi Gownipalli588a31d2016-11-02 13:33:43 -07001549 if (peripheral_mask & DIAG_CON_CDSP)
1550 ret |= (1 << PERIPHERAL_CDSP);
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001551
1552 return ret;
1553}
1554
1555static int diag_switch_logging(struct diag_logging_mode_param_t *param)
1556{
1557 int new_mode;
1558 int curr_mode;
1559 int err = 0;
1560 uint8_t do_switch = 1;
1561 uint32_t peripheral_mask = 0;
1562
1563 if (!param)
1564 return -EINVAL;
1565
1566 if (!param->peripheral_mask) {
1567 DIAG_LOG(DIAG_DEBUG_USERSPACE,
1568 "asking for mode switch with no peripheral mask set\n");
1569 return -EINVAL;
1570 }
1571
1572 peripheral_mask = diag_translate_mask(param->peripheral_mask);
1573 param->peripheral_mask = peripheral_mask;
1574
1575 switch (param->req_mode) {
1576 case CALLBACK_MODE:
1577 case UART_MODE:
1578 case SOCKET_MODE:
1579 case MEMORY_DEVICE_MODE:
1580 new_mode = DIAG_MEMORY_DEVICE_MODE;
1581 break;
1582 case USB_MODE:
1583 new_mode = DIAG_USB_MODE;
1584 break;
1585 default:
1586 pr_err("diag: In %s, request to switch to invalid mode: %d\n",
1587 __func__, param->req_mode);
1588 return -EINVAL;
1589 }
1590
1591 curr_mode = driver->logging_mode;
1592 DIAG_LOG(DIAG_DEBUG_USERSPACE,
1593 "request to switch logging from %d mask:%0x to %d mask:%0x\n",
1594 curr_mode, driver->md_session_mask, new_mode, peripheral_mask);
1595
1596 err = diag_md_session_check(curr_mode, new_mode, param, &do_switch);
1597 if (err) {
1598 DIAG_LOG(DIAG_DEBUG_USERSPACE,
1599 "err from diag_md_session_check, err: %d\n", err);
1600 return err;
1601 }
1602
1603 if (do_switch == 0) {
1604 DIAG_LOG(DIAG_DEBUG_USERSPACE,
1605 "not switching modes c: %d n: %d\n",
1606 curr_mode, new_mode);
1607 return 0;
1608 }
1609
1610 diag_ws_reset(DIAG_WS_MUX);
1611 err = diag_mux_switch_logging(&new_mode, &peripheral_mask);
1612 if (err) {
1613 pr_err("diag: In %s, unable to switch mode from %d to %d, err: %d\n",
1614 __func__, curr_mode, new_mode, err);
1615 driver->logging_mode = curr_mode;
1616 goto fail;
1617 }
1618 driver->logging_mode = new_mode;
1619 driver->logging_mask = peripheral_mask;
1620 DIAG_LOG(DIAG_DEBUG_USERSPACE,
1621 "Switch logging to %d mask:%0x\n", new_mode, peripheral_mask);
1622
1623 /* Update to take peripheral_mask */
1624 if (new_mode != DIAG_MEMORY_DEVICE_MODE) {
1625 diag_update_real_time_vote(DIAG_PROC_MEMORY_DEVICE,
1626 MODE_REALTIME, ALL_PROC);
1627 } else {
1628 diag_update_proc_vote(DIAG_PROC_MEMORY_DEVICE, VOTE_UP,
1629 ALL_PROC);
1630 }
1631
1632 if (!(new_mode == DIAG_MEMORY_DEVICE_MODE &&
1633 curr_mode == DIAG_USB_MODE)) {
1634 queue_work(driver->diag_real_time_wq,
1635 &driver->diag_real_time_work);
1636 }
1637
1638 return 0;
1639fail:
1640 return err;
1641}
1642
1643static int diag_ioctl_dci_reg(unsigned long ioarg)
1644{
1645 int result = -EINVAL;
1646 struct diag_dci_reg_tbl_t dci_reg_params;
1647
1648 if (copy_from_user(&dci_reg_params, (void __user *)ioarg,
1649 sizeof(struct diag_dci_reg_tbl_t)))
1650 return -EFAULT;
1651
1652 result = diag_dci_register_client(&dci_reg_params);
1653
1654 return result;
1655}
1656
1657static int diag_ioctl_dci_health_stats(unsigned long ioarg)
1658{
1659 int result = -EINVAL;
1660 struct diag_dci_health_stats_proc stats;
1661
1662 if (copy_from_user(&stats, (void __user *)ioarg,
1663 sizeof(struct diag_dci_health_stats_proc)))
1664 return -EFAULT;
1665
1666 result = diag_dci_copy_health_stats(&stats);
1667 if (result == DIAG_DCI_NO_ERROR) {
1668 if (copy_to_user((void __user *)ioarg, &stats,
1669 sizeof(struct diag_dci_health_stats_proc)))
1670 return -EFAULT;
1671 }
1672
1673 return result;
1674}
1675
1676static int diag_ioctl_dci_log_status(unsigned long ioarg)
1677{
1678 struct diag_log_event_stats le_stats;
1679 struct diag_dci_client_tbl *dci_client = NULL;
1680
1681 if (copy_from_user(&le_stats, (void __user *)ioarg,
1682 sizeof(struct diag_log_event_stats)))
1683 return -EFAULT;
1684
1685 dci_client = diag_dci_get_client_entry(le_stats.client_id);
1686 if (!dci_client)
1687 return DIAG_DCI_NOT_SUPPORTED;
1688 le_stats.is_set = diag_dci_query_log_mask(dci_client, le_stats.code);
1689 if (copy_to_user((void __user *)ioarg, &le_stats,
1690 sizeof(struct diag_log_event_stats)))
1691 return -EFAULT;
1692
1693 return DIAG_DCI_NO_ERROR;
1694}
1695
1696static int diag_ioctl_dci_event_status(unsigned long ioarg)
1697{
1698 struct diag_log_event_stats le_stats;
1699 struct diag_dci_client_tbl *dci_client = NULL;
1700
1701 if (copy_from_user(&le_stats, (void __user *)ioarg,
1702 sizeof(struct diag_log_event_stats)))
1703 return -EFAULT;
1704
1705 dci_client = diag_dci_get_client_entry(le_stats.client_id);
1706 if (!dci_client)
1707 return DIAG_DCI_NOT_SUPPORTED;
1708
1709 le_stats.is_set = diag_dci_query_event_mask(dci_client, le_stats.code);
1710 if (copy_to_user((void __user *)ioarg, &le_stats,
1711 sizeof(struct diag_log_event_stats)))
1712 return -EFAULT;
1713
1714 return DIAG_DCI_NO_ERROR;
1715}
1716
1717static int diag_ioctl_lsm_deinit(void)
1718{
1719 int i;
1720
Mohit Aggarwal9f694302017-07-06 10:16:52 +05301721 mutex_lock(&driver->diagchar_mutex);
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001722 for (i = 0; i < driver->num_clients; i++)
1723 if (driver->client_map[i].pid == current->tgid)
1724 break;
1725
Mohit Aggarwal9f694302017-07-06 10:16:52 +05301726 if (i == driver->num_clients) {
1727 mutex_unlock(&driver->diagchar_mutex);
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001728 return -EINVAL;
Mohit Aggarwal9f694302017-07-06 10:16:52 +05301729 }
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001730
1731 driver->data_ready[i] |= DEINIT_TYPE;
Mohit Aggarwal9f694302017-07-06 10:16:52 +05301732 mutex_unlock(&driver->diagchar_mutex);
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001733 wake_up_interruptible(&driver->wait_q);
1734
1735 return 1;
1736}
1737
1738static int diag_ioctl_vote_real_time(unsigned long ioarg)
1739{
1740 int real_time = 0;
1741 int temp_proc = ALL_PROC;
1742 struct real_time_vote_t vote;
1743 struct diag_dci_client_tbl *dci_client = NULL;
1744
1745 if (copy_from_user(&vote, (void __user *)ioarg,
1746 sizeof(struct real_time_vote_t)))
1747 return -EFAULT;
1748
1749 if (vote.proc > DIAG_PROC_MEMORY_DEVICE ||
1750 vote.real_time_vote > MODE_UNKNOWN ||
1751 vote.client_id < 0) {
1752 pr_err("diag: %s, invalid params, proc: %d, vote: %d, client_id: %d\n",
1753 __func__, vote.proc, vote.real_time_vote,
1754 vote.client_id);
1755 return -EINVAL;
1756 }
1757
1758 driver->real_time_update_busy++;
1759 if (vote.proc == DIAG_PROC_DCI) {
1760 dci_client = diag_dci_get_client_entry(vote.client_id);
1761 if (!dci_client) {
1762 driver->real_time_update_busy--;
1763 return DIAG_DCI_NOT_SUPPORTED;
1764 }
1765 diag_dci_set_real_time(dci_client, vote.real_time_vote);
1766 real_time = diag_dci_get_cumulative_real_time(
1767 dci_client->client_info.token);
1768 diag_update_real_time_vote(vote.proc, real_time,
1769 dci_client->client_info.token);
1770 } else {
1771 real_time = vote.real_time_vote;
1772 temp_proc = vote.client_id;
1773 diag_update_real_time_vote(vote.proc, real_time,
1774 temp_proc);
1775 }
1776 queue_work(driver->diag_real_time_wq, &driver->diag_real_time_work);
1777 return 0;
1778}
1779
1780static int diag_ioctl_get_real_time(unsigned long ioarg)
1781{
1782 int i;
1783 int retry_count = 0;
1784 int timer = 0;
1785 struct real_time_query_t rt_query;
1786
1787 if (copy_from_user(&rt_query, (void __user *)ioarg,
1788 sizeof(struct real_time_query_t)))
1789 return -EFAULT;
1790 while (retry_count < 3) {
1791 if (driver->real_time_update_busy > 0) {
1792 retry_count++;
1793 /*
1794 * The value 10000 was chosen empirically as an
1795 * optimum value in order to give the work in
1796 * diag_real_time_wq to complete processing.
1797 */
1798 for (timer = 0; timer < 5; timer++)
1799 usleep_range(10000, 10100);
1800 } else {
1801 break;
1802 }
1803 }
1804
1805 if (driver->real_time_update_busy > 0)
1806 return -EAGAIN;
1807
1808 if (rt_query.proc < 0 || rt_query.proc >= DIAG_NUM_PROC) {
1809 pr_err("diag: Invalid proc %d in %s\n", rt_query.proc,
1810 __func__);
1811 return -EINVAL;
1812 }
1813 rt_query.real_time = driver->real_time_mode[rt_query.proc];
1814 /*
1815 * For the local processor, if any of the peripherals is in buffering
1816 * mode, overwrite the value of real time with UNKNOWN_MODE
1817 */
1818 if (rt_query.proc == DIAG_LOCAL_PROC) {
1819 for (i = 0; i < NUM_PERIPHERALS; i++) {
1820 if (!driver->feature[i].peripheral_buffering)
1821 continue;
1822 switch (driver->buffering_mode[i].mode) {
1823 case DIAG_BUFFERING_MODE_CIRCULAR:
1824 case DIAG_BUFFERING_MODE_THRESHOLD:
1825 rt_query.real_time = MODE_UNKNOWN;
1826 break;
1827 }
1828 }
1829 }
1830
1831 if (copy_to_user((void __user *)ioarg, &rt_query,
1832 sizeof(struct real_time_query_t)))
1833 return -EFAULT;
1834
1835 return 0;
1836}
1837
1838static int diag_ioctl_set_buffering_mode(unsigned long ioarg)
1839{
1840 struct diag_buffering_mode_t params;
1841
1842 if (copy_from_user(&params, (void __user *)ioarg, sizeof(params)))
1843 return -EFAULT;
1844
1845 if (params.peripheral >= NUM_PERIPHERALS)
1846 return -EINVAL;
1847
1848 mutex_lock(&driver->mode_lock);
1849 driver->buffering_flag[params.peripheral] = 1;
1850 mutex_unlock(&driver->mode_lock);
1851
1852 return diag_send_peripheral_buffering_mode(&params);
1853}
1854
1855static int diag_ioctl_peripheral_drain_immediate(unsigned long ioarg)
1856{
1857 uint8_t peripheral;
1858
1859 if (copy_from_user(&peripheral, (void __user *)ioarg, sizeof(uint8_t)))
1860 return -EFAULT;
1861
1862 if (peripheral >= NUM_PERIPHERALS) {
1863 pr_err("diag: In %s, invalid peripheral %d\n", __func__,
1864 peripheral);
1865 return -EINVAL;
1866 }
1867
1868 if (!driver->feature[peripheral].peripheral_buffering) {
1869 pr_err("diag: In %s, peripheral %d doesn't support buffering\n",
1870 __func__, peripheral);
1871 return -EIO;
1872 }
1873
1874 return diag_send_peripheral_drain_immediate(peripheral);
1875}
1876
1877static int diag_ioctl_dci_support(unsigned long ioarg)
1878{
1879 struct diag_dci_peripherals_t dci_support;
1880 int result = -EINVAL;
1881
1882 if (copy_from_user(&dci_support, (void __user *)ioarg,
1883 sizeof(struct diag_dci_peripherals_t)))
1884 return -EFAULT;
1885
1886 result = diag_dci_get_support_list(&dci_support);
1887 if (result == DIAG_DCI_NO_ERROR)
1888 if (copy_to_user((void __user *)ioarg, &dci_support,
1889 sizeof(struct diag_dci_peripherals_t)))
1890 return -EFAULT;
1891
1892 return result;
1893}
1894
1895static int diag_ioctl_hdlc_toggle(unsigned long ioarg)
1896{
1897 uint8_t hdlc_support;
1898 struct diag_md_session_t *session_info = NULL;
1899
1900 session_info = diag_md_session_get_pid(current->tgid);
1901 if (copy_from_user(&hdlc_support, (void __user *)ioarg,
1902 sizeof(uint8_t)))
1903 return -EFAULT;
1904 mutex_lock(&driver->hdlc_disable_mutex);
1905 if (session_info) {
1906 mutex_lock(&driver->md_session_lock);
1907 session_info->hdlc_disabled = hdlc_support;
1908 mutex_unlock(&driver->md_session_lock);
1909 } else
1910 driver->hdlc_disabled = hdlc_support;
1911 mutex_unlock(&driver->hdlc_disable_mutex);
1912 diag_update_md_clients(HDLC_SUPPORT_TYPE);
1913
1914 return 0;
1915}
1916
1917static int diag_ioctl_register_callback(unsigned long ioarg)
1918{
1919 int err = 0;
1920 struct diag_callback_reg_t reg;
1921
1922 if (copy_from_user(&reg, (void __user *)ioarg,
1923 sizeof(struct diag_callback_reg_t))) {
1924 return -EFAULT;
1925 }
1926
1927 if (reg.proc < 0 || reg.proc >= DIAG_NUM_PROC) {
1928 pr_err("diag: In %s, invalid proc %d for callback registration\n",
1929 __func__, reg.proc);
1930 return -EINVAL;
1931 }
1932
1933 if (driver->md_session_mode == DIAG_MD_PERIPHERAL)
1934 return -EIO;
1935
1936 return err;
1937}
1938
1939static int diag_cmd_register_tbl(struct diag_cmd_reg_tbl_t *reg_tbl)
1940{
1941 int i;
1942 int err = 0;
1943 uint32_t count = 0;
1944 struct diag_cmd_reg_entry_t *entries = NULL;
1945 const uint16_t entry_len = sizeof(struct diag_cmd_reg_entry_t);
1946
1947
1948 if (!reg_tbl) {
1949 pr_err("diag: In %s, invalid registration table\n", __func__);
1950 return -EINVAL;
1951 }
1952
1953 count = reg_tbl->count;
1954 if ((UINT_MAX / entry_len) < count) {
1955 pr_warn("diag: In %s, possbile integer overflow.\n", __func__);
1956 return -EFAULT;
1957 }
1958
1959 entries = kzalloc(count * entry_len, GFP_KERNEL);
1960 if (!entries)
1961 return -ENOMEM;
1962
1963
1964 err = copy_from_user(entries, reg_tbl->entries, count * entry_len);
1965 if (err) {
1966 pr_err("diag: In %s, error copying data from userspace, err: %d\n",
1967 __func__, err);
1968 kfree(entries);
1969 return -EFAULT;
1970 }
1971
1972 for (i = 0; i < count; i++) {
1973 err = diag_cmd_add_reg(&entries[i], APPS_DATA, current->tgid);
1974 if (err) {
1975 pr_err("diag: In %s, unable to register command, err: %d\n",
1976 __func__, err);
1977 break;
1978 }
1979 }
1980
1981 kfree(entries);
1982 return err;
1983}
1984
1985static int diag_ioctl_cmd_reg(unsigned long ioarg)
1986{
1987 struct diag_cmd_reg_tbl_t reg_tbl;
1988
1989 if (copy_from_user(&reg_tbl, (void __user *)ioarg,
1990 sizeof(struct diag_cmd_reg_tbl_t))) {
1991 return -EFAULT;
1992 }
1993
1994 return diag_cmd_register_tbl(&reg_tbl);
1995}
1996
1997static int diag_ioctl_cmd_dereg(void)
1998{
1999 diag_cmd_remove_reg_by_pid(current->tgid);
2000 return 0;
2001}
2002
2003#ifdef CONFIG_COMPAT
2004/*
2005 * @sync_obj_name: name of the synchronization object associated with this proc
2006 * @count: number of entries in the bind
2007 * @params: the actual packet registrations
2008 */
2009struct diag_cmd_reg_tbl_compat_t {
2010 char sync_obj_name[MAX_SYNC_OBJ_NAME_SIZE];
2011 uint32_t count;
2012 compat_uptr_t entries;
2013};
2014
2015static int diag_ioctl_cmd_reg_compat(unsigned long ioarg)
2016{
2017 struct diag_cmd_reg_tbl_compat_t reg_tbl_compat;
2018 struct diag_cmd_reg_tbl_t reg_tbl;
2019
2020 if (copy_from_user(&reg_tbl_compat, (void __user *)ioarg,
2021 sizeof(struct diag_cmd_reg_tbl_compat_t))) {
2022 return -EFAULT;
2023 }
2024
2025 strlcpy(reg_tbl.sync_obj_name, reg_tbl_compat.sync_obj_name,
2026 MAX_SYNC_OBJ_NAME_SIZE);
2027 reg_tbl.count = reg_tbl_compat.count;
2028 reg_tbl.entries = (struct diag_cmd_reg_entry_t *)
2029 (uintptr_t)reg_tbl_compat.entries;
2030
2031 return diag_cmd_register_tbl(&reg_tbl);
2032}
2033
2034long diagchar_compat_ioctl(struct file *filp,
2035 unsigned int iocmd, unsigned long ioarg)
2036{
2037 int result = -EINVAL;
2038 int client_id = 0;
2039 uint16_t delayed_rsp_id = 0;
2040 uint16_t remote_dev;
2041 struct diag_dci_client_tbl *dci_client = NULL;
2042 struct diag_logging_mode_param_t mode_param;
2043
2044 switch (iocmd) {
2045 case DIAG_IOCTL_COMMAND_REG:
2046 result = diag_ioctl_cmd_reg_compat(ioarg);
2047 break;
2048 case DIAG_IOCTL_COMMAND_DEREG:
2049 result = diag_ioctl_cmd_dereg();
2050 break;
2051 case DIAG_IOCTL_GET_DELAYED_RSP_ID:
2052 delayed_rsp_id = diag_get_next_delayed_rsp_id();
2053 if (copy_to_user((void __user *)ioarg, &delayed_rsp_id,
2054 sizeof(uint16_t)))
2055 result = -EFAULT;
2056 else
2057 result = 0;
2058 break;
2059 case DIAG_IOCTL_DCI_REG:
2060 result = diag_ioctl_dci_reg(ioarg);
2061 break;
2062 case DIAG_IOCTL_DCI_DEINIT:
2063 mutex_lock(&driver->dci_mutex);
2064 if (copy_from_user((void *)&client_id, (void __user *)ioarg,
2065 sizeof(int))) {
2066 mutex_unlock(&driver->dci_mutex);
2067 return -EFAULT;
2068 }
2069 dci_client = diag_dci_get_client_entry(client_id);
2070 if (!dci_client) {
2071 mutex_unlock(&driver->dci_mutex);
2072 return DIAG_DCI_NOT_SUPPORTED;
2073 }
2074 result = diag_dci_deinit_client(dci_client);
2075 mutex_unlock(&driver->dci_mutex);
2076 break;
2077 case DIAG_IOCTL_DCI_SUPPORT:
2078 result = diag_ioctl_dci_support(ioarg);
2079 break;
2080 case DIAG_IOCTL_DCI_HEALTH_STATS:
2081 mutex_lock(&driver->dci_mutex);
2082 result = diag_ioctl_dci_health_stats(ioarg);
2083 mutex_unlock(&driver->dci_mutex);
2084 break;
2085 case DIAG_IOCTL_DCI_LOG_STATUS:
2086 mutex_lock(&driver->dci_mutex);
2087 result = diag_ioctl_dci_log_status(ioarg);
2088 mutex_unlock(&driver->dci_mutex);
2089 break;
2090 case DIAG_IOCTL_DCI_EVENT_STATUS:
2091 mutex_lock(&driver->dci_mutex);
2092 result = diag_ioctl_dci_event_status(ioarg);
2093 mutex_unlock(&driver->dci_mutex);
2094 break;
2095 case DIAG_IOCTL_DCI_CLEAR_LOGS:
2096 mutex_lock(&driver->dci_mutex);
2097 if (copy_from_user((void *)&client_id, (void __user *)ioarg,
2098 sizeof(int))) {
2099 mutex_unlock(&driver->dci_mutex);
2100 return -EFAULT;
2101 }
2102 result = diag_dci_clear_log_mask(client_id);
2103 mutex_unlock(&driver->dci_mutex);
2104 break;
2105 case DIAG_IOCTL_DCI_CLEAR_EVENTS:
2106 mutex_lock(&driver->dci_mutex);
2107 if (copy_from_user(&client_id, (void __user *)ioarg,
2108 sizeof(int))) {
2109 mutex_unlock(&driver->dci_mutex);
2110 return -EFAULT;
2111 }
2112 result = diag_dci_clear_event_mask(client_id);
2113 mutex_unlock(&driver->dci_mutex);
2114 break;
2115 case DIAG_IOCTL_LSM_DEINIT:
2116 result = diag_ioctl_lsm_deinit();
2117 break;
2118 case DIAG_IOCTL_SWITCH_LOGGING:
2119 if (copy_from_user((void *)&mode_param, (void __user *)ioarg,
2120 sizeof(mode_param)))
2121 return -EFAULT;
2122 mutex_lock(&driver->diagchar_mutex);
2123 result = diag_switch_logging(&mode_param);
2124 mutex_unlock(&driver->diagchar_mutex);
2125 break;
2126 case DIAG_IOCTL_REMOTE_DEV:
2127 remote_dev = diag_get_remote_device_mask();
2128 if (copy_to_user((void __user *)ioarg, &remote_dev,
2129 sizeof(uint16_t)))
2130 result = -EFAULT;
2131 else
2132 result = 1;
2133 break;
2134 case DIAG_IOCTL_VOTE_REAL_TIME:
2135 mutex_lock(&driver->dci_mutex);
2136 result = diag_ioctl_vote_real_time(ioarg);
2137 mutex_unlock(&driver->dci_mutex);
2138 break;
2139 case DIAG_IOCTL_GET_REAL_TIME:
2140 result = diag_ioctl_get_real_time(ioarg);
2141 break;
2142 case DIAG_IOCTL_PERIPHERAL_BUF_CONFIG:
2143 result = diag_ioctl_set_buffering_mode(ioarg);
2144 break;
2145 case DIAG_IOCTL_PERIPHERAL_BUF_DRAIN:
2146 result = diag_ioctl_peripheral_drain_immediate(ioarg);
2147 break;
2148 case DIAG_IOCTL_REGISTER_CALLBACK:
2149 result = diag_ioctl_register_callback(ioarg);
2150 break;
2151 case DIAG_IOCTL_HDLC_TOGGLE:
2152 result = diag_ioctl_hdlc_toggle(ioarg);
2153 break;
2154 }
2155 return result;
2156}
2157#endif
2158
2159long diagchar_ioctl(struct file *filp,
2160 unsigned int iocmd, unsigned long ioarg)
2161{
2162 int result = -EINVAL;
2163 int client_id = 0;
2164 uint16_t delayed_rsp_id;
2165 uint16_t remote_dev;
2166 struct diag_dci_client_tbl *dci_client = NULL;
2167 struct diag_logging_mode_param_t mode_param;
2168
2169 switch (iocmd) {
2170 case DIAG_IOCTL_COMMAND_REG:
2171 result = diag_ioctl_cmd_reg(ioarg);
2172 break;
2173 case DIAG_IOCTL_COMMAND_DEREG:
2174 result = diag_ioctl_cmd_dereg();
2175 break;
2176 case DIAG_IOCTL_GET_DELAYED_RSP_ID:
2177 delayed_rsp_id = diag_get_next_delayed_rsp_id();
2178 if (copy_to_user((void __user *)ioarg, &delayed_rsp_id,
2179 sizeof(uint16_t)))
2180 result = -EFAULT;
2181 else
2182 result = 0;
2183 break;
2184 case DIAG_IOCTL_DCI_REG:
2185 result = diag_ioctl_dci_reg(ioarg);
2186 break;
2187 case DIAG_IOCTL_DCI_DEINIT:
2188 mutex_lock(&driver->dci_mutex);
2189 if (copy_from_user((void *)&client_id, (void __user *)ioarg,
2190 sizeof(int))) {
2191 mutex_unlock(&driver->dci_mutex);
2192 return -EFAULT;
2193 }
2194 dci_client = diag_dci_get_client_entry(client_id);
2195 if (!dci_client) {
2196 mutex_unlock(&driver->dci_mutex);
2197 return DIAG_DCI_NOT_SUPPORTED;
2198 }
2199 result = diag_dci_deinit_client(dci_client);
2200 mutex_unlock(&driver->dci_mutex);
2201 break;
2202 case DIAG_IOCTL_DCI_SUPPORT:
2203 result = diag_ioctl_dci_support(ioarg);
2204 break;
2205 case DIAG_IOCTL_DCI_HEALTH_STATS:
2206 mutex_lock(&driver->dci_mutex);
2207 result = diag_ioctl_dci_health_stats(ioarg);
2208 mutex_unlock(&driver->dci_mutex);
2209 break;
2210 case DIAG_IOCTL_DCI_LOG_STATUS:
2211 mutex_lock(&driver->dci_mutex);
2212 result = diag_ioctl_dci_log_status(ioarg);
2213 mutex_unlock(&driver->dci_mutex);
2214 break;
2215 case DIAG_IOCTL_DCI_EVENT_STATUS:
2216 result = diag_ioctl_dci_event_status(ioarg);
2217 break;
2218 case DIAG_IOCTL_DCI_CLEAR_LOGS:
2219 mutex_lock(&driver->dci_mutex);
2220 if (copy_from_user((void *)&client_id, (void __user *)ioarg,
2221 sizeof(int))) {
2222 mutex_unlock(&driver->dci_mutex);
2223 return -EFAULT;
2224 }
2225 result = diag_dci_clear_log_mask(client_id);
2226 mutex_unlock(&driver->dci_mutex);
2227 break;
2228 case DIAG_IOCTL_DCI_CLEAR_EVENTS:
2229 mutex_lock(&driver->dci_mutex);
2230 if (copy_from_user(&client_id, (void __user *)ioarg,
2231 sizeof(int))) {
2232 mutex_unlock(&driver->dci_mutex);
2233 return -EFAULT;
2234 }
2235 result = diag_dci_clear_event_mask(client_id);
2236 mutex_unlock(&driver->dci_mutex);
2237 break;
2238 case DIAG_IOCTL_LSM_DEINIT:
2239 result = diag_ioctl_lsm_deinit();
2240 break;
2241 case DIAG_IOCTL_SWITCH_LOGGING:
2242 if (copy_from_user((void *)&mode_param, (void __user *)ioarg,
2243 sizeof(mode_param)))
2244 return -EFAULT;
2245 mutex_lock(&driver->diagchar_mutex);
2246 result = diag_switch_logging(&mode_param);
2247 mutex_unlock(&driver->diagchar_mutex);
2248 break;
2249 case DIAG_IOCTL_REMOTE_DEV:
2250 remote_dev = diag_get_remote_device_mask();
2251 if (copy_to_user((void __user *)ioarg, &remote_dev,
2252 sizeof(uint16_t)))
2253 result = -EFAULT;
2254 else
2255 result = 1;
2256 break;
2257 case DIAG_IOCTL_VOTE_REAL_TIME:
2258 mutex_lock(&driver->dci_mutex);
2259 result = diag_ioctl_vote_real_time(ioarg);
2260 mutex_unlock(&driver->dci_mutex);
2261 break;
2262 case DIAG_IOCTL_GET_REAL_TIME:
2263 result = diag_ioctl_get_real_time(ioarg);
2264 break;
2265 case DIAG_IOCTL_PERIPHERAL_BUF_CONFIG:
2266 result = diag_ioctl_set_buffering_mode(ioarg);
2267 break;
2268 case DIAG_IOCTL_PERIPHERAL_BUF_DRAIN:
2269 result = diag_ioctl_peripheral_drain_immediate(ioarg);
2270 break;
2271 case DIAG_IOCTL_REGISTER_CALLBACK:
2272 result = diag_ioctl_register_callback(ioarg);
2273 break;
2274 case DIAG_IOCTL_HDLC_TOGGLE:
2275 result = diag_ioctl_hdlc_toggle(ioarg);
2276 break;
2277 }
2278 return result;
2279}
2280
2281static int diag_process_apps_data_hdlc(unsigned char *buf, int len,
2282 int pkt_type)
2283{
2284 int err = 0;
2285 int ret = PKT_DROP;
2286 struct diag_apps_data_t *data = &hdlc_data;
2287 struct diag_send_desc_type send = { NULL, NULL, DIAG_STATE_START, 0 };
2288 struct diag_hdlc_dest_type enc = { NULL, NULL, 0 };
2289 /*
2290 * The maximum encoded size of the buffer can be atmost twice the length
2291 * of the packet. Add three bytes foe footer - 16 bit CRC (2 bytes) +
2292 * delimiter (1 byte).
2293 */
2294 const uint32_t max_encoded_size = ((2 * len) + 3);
2295
2296 if (!buf || len <= 0) {
2297 pr_err("diag: In %s, invalid buf: %pK len: %d\n",
2298 __func__, buf, len);
2299 return -EIO;
2300 }
2301
2302 if (max_encoded_size > DIAG_MAX_HDLC_BUF_SIZE) {
2303 pr_err_ratelimited("diag: In %s, encoded data is larger %d than the buffer size %d\n",
2304 __func__, max_encoded_size, DIAG_MAX_HDLC_BUF_SIZE);
2305 return -EBADMSG;
2306 }
2307
2308 send.state = DIAG_STATE_START;
2309 send.pkt = buf;
2310 send.last = (void *)(buf + len - 1);
2311 send.terminate = 1;
2312
2313 if (!data->buf)
2314 data->buf = diagmem_alloc(driver, DIAG_MAX_HDLC_BUF_SIZE +
2315 APF_DIAG_PADDING,
2316 POOL_TYPE_HDLC);
2317 if (!data->buf) {
2318 ret = PKT_DROP;
2319 goto fail_ret;
2320 }
2321
2322 if ((DIAG_MAX_HDLC_BUF_SIZE - data->len) <= max_encoded_size) {
2323 err = diag_mux_write(DIAG_LOCAL_PROC, data->buf, data->len,
2324 data->ctxt);
2325 if (err) {
2326 ret = -EIO;
2327 goto fail_free_buf;
2328 }
2329 data->buf = NULL;
2330 data->len = 0;
2331 data->buf = diagmem_alloc(driver, DIAG_MAX_HDLC_BUF_SIZE +
2332 APF_DIAG_PADDING,
2333 POOL_TYPE_HDLC);
2334 if (!data->buf) {
2335 ret = PKT_DROP;
2336 goto fail_ret;
2337 }
2338 }
2339
2340 enc.dest = data->buf + data->len;
2341 enc.dest_last = (void *)(data->buf + data->len + max_encoded_size);
2342 diag_hdlc_encode(&send, &enc);
2343
2344 /*
2345 * This is to check if after HDLC encoding, we are still within
2346 * the limits of aggregation buffer. If not, we write out the
2347 * current buffer and start aggregation in a newly allocated
2348 * buffer.
2349 */
2350 if ((uintptr_t)enc.dest >= (uintptr_t)(data->buf +
2351 DIAG_MAX_HDLC_BUF_SIZE)) {
2352 err = diag_mux_write(DIAG_LOCAL_PROC, data->buf, data->len,
2353 data->ctxt);
2354 if (err) {
2355 ret = -EIO;
2356 goto fail_free_buf;
2357 }
2358 data->buf = NULL;
2359 data->len = 0;
2360 data->buf = diagmem_alloc(driver, DIAG_MAX_HDLC_BUF_SIZE +
2361 APF_DIAG_PADDING,
2362 POOL_TYPE_HDLC);
2363 if (!data->buf) {
2364 ret = PKT_DROP;
2365 goto fail_ret;
2366 }
2367
2368 enc.dest = data->buf + data->len;
2369 enc.dest_last = (void *)(data->buf + data->len +
2370 max_encoded_size);
2371 diag_hdlc_encode(&send, &enc);
2372 }
2373
2374 data->len = (((uintptr_t)enc.dest - (uintptr_t)data->buf) <
2375 DIAG_MAX_HDLC_BUF_SIZE) ?
2376 ((uintptr_t)enc.dest - (uintptr_t)data->buf) :
2377 DIAG_MAX_HDLC_BUF_SIZE;
2378
2379 if (pkt_type == DATA_TYPE_RESPONSE) {
2380 err = diag_mux_write(DIAG_LOCAL_PROC, data->buf, data->len,
2381 data->ctxt);
2382 if (err) {
2383 ret = -EIO;
2384 goto fail_free_buf;
2385 }
2386 data->buf = NULL;
2387 data->len = 0;
2388 }
2389
2390 return PKT_ALLOC;
2391
2392fail_free_buf:
2393 diagmem_free(driver, data->buf, POOL_TYPE_HDLC);
2394 data->buf = NULL;
2395 data->len = 0;
2396
2397fail_ret:
2398 return ret;
2399}
2400
2401static int diag_process_apps_data_non_hdlc(unsigned char *buf, int len,
2402 int pkt_type)
2403{
2404 int err = 0;
2405 int ret = PKT_DROP;
2406 struct diag_pkt_frame_t header;
2407 struct diag_apps_data_t *data = &non_hdlc_data;
2408 /*
2409 * The maximum packet size, when the data is non hdlc encoded is equal
2410 * to the size of the packet frame header and the length. Add 1 for the
2411 * delimiter 0x7E at the end.
2412 */
2413 const uint32_t max_pkt_size = sizeof(header) + len + 1;
2414
2415 if (!buf || len <= 0) {
2416 pr_err("diag: In %s, invalid buf: %pK len: %d\n",
2417 __func__, buf, len);
2418 return -EIO;
2419 }
2420
2421 if (!data->buf) {
2422 data->buf = diagmem_alloc(driver, DIAG_MAX_HDLC_BUF_SIZE +
2423 APF_DIAG_PADDING,
2424 POOL_TYPE_HDLC);
2425 if (!data->buf) {
2426 ret = PKT_DROP;
2427 goto fail_ret;
2428 }
2429 }
2430
2431 if ((DIAG_MAX_HDLC_BUF_SIZE - data->len) <= max_pkt_size) {
2432 err = diag_mux_write(DIAG_LOCAL_PROC, data->buf, data->len,
2433 data->ctxt);
2434 if (err) {
2435 ret = -EIO;
2436 goto fail_free_buf;
2437 }
2438 data->buf = NULL;
2439 data->len = 0;
2440 data->buf = diagmem_alloc(driver, DIAG_MAX_HDLC_BUF_SIZE +
2441 APF_DIAG_PADDING,
2442 POOL_TYPE_HDLC);
2443 if (!data->buf) {
2444 ret = PKT_DROP;
2445 goto fail_ret;
2446 }
2447 }
2448
2449 header.start = CONTROL_CHAR;
2450 header.version = 1;
2451 header.length = len;
2452 memcpy(data->buf + data->len, &header, sizeof(header));
2453 data->len += sizeof(header);
2454 memcpy(data->buf + data->len, buf, len);
2455 data->len += len;
2456 *(uint8_t *)(data->buf + data->len) = CONTROL_CHAR;
2457 data->len += sizeof(uint8_t);
2458 if (pkt_type == DATA_TYPE_RESPONSE) {
2459 err = diag_mux_write(DIAG_LOCAL_PROC, data->buf, data->len,
2460 data->ctxt);
2461 if (err) {
2462 ret = -EIO;
2463 goto fail_free_buf;
2464 }
2465 data->buf = NULL;
2466 data->len = 0;
2467 }
2468
2469 return PKT_ALLOC;
2470
2471fail_free_buf:
2472 diagmem_free(driver, data->buf, POOL_TYPE_HDLC);
2473 data->buf = NULL;
2474 data->len = 0;
2475
2476fail_ret:
2477 return ret;
2478}
2479
2480static int diag_user_process_dci_data(const char __user *buf, int len)
2481{
2482 int err = 0;
2483 const int mempool = POOL_TYPE_USER;
2484 unsigned char *user_space_data = NULL;
2485
2486 if (!buf || len <= 0 || len > diag_mempools[mempool].itemsize) {
2487 pr_err_ratelimited("diag: In %s, invalid buf %pK len: %d\n",
2488 __func__, buf, len);
2489 return -EBADMSG;
2490 }
2491
2492 user_space_data = diagmem_alloc(driver, len, mempool);
2493 if (!user_space_data)
2494 return -ENOMEM;
2495
2496 err = copy_from_user(user_space_data, buf, len);
2497 if (err) {
2498 pr_err_ratelimited("diag: In %s, unable to copy data from userspace, err: %d\n",
2499 __func__, err);
2500 err = DIAG_DCI_SEND_DATA_FAIL;
2501 goto fail;
2502 }
2503
2504 err = diag_process_dci_transaction(user_space_data, len);
2505fail:
2506 diagmem_free(driver, user_space_data, mempool);
2507 user_space_data = NULL;
2508 return err;
2509}
2510
2511static int diag_user_process_dci_apps_data(const char __user *buf, int len,
2512 int pkt_type)
2513{
2514 int err = 0;
2515 const int mempool = POOL_TYPE_COPY;
2516 unsigned char *user_space_data = NULL;
2517
2518 if (!buf || len <= 0 || len > diag_mempools[mempool].itemsize) {
2519 pr_err_ratelimited("diag: In %s, invalid buf %pK len: %d\n",
2520 __func__, buf, len);
2521 return -EBADMSG;
2522 }
2523
2524 pkt_type &= (DCI_PKT_TYPE | DATA_TYPE_DCI_LOG | DATA_TYPE_DCI_EVENT);
2525 if (!pkt_type) {
2526 pr_err_ratelimited("diag: In %s, invalid pkt_type: %d\n",
2527 __func__, pkt_type);
2528 return -EBADMSG;
2529 }
2530
2531 user_space_data = diagmem_alloc(driver, len, mempool);
2532 if (!user_space_data)
2533 return -ENOMEM;
2534
2535 err = copy_from_user(user_space_data, buf, len);
2536 if (err) {
2537 pr_alert("diag: In %s, unable to copy data from userspace, err: %d\n",
2538 __func__, err);
2539 goto fail;
2540 }
2541
2542 diag_process_apps_dci_read_data(pkt_type, user_space_data, len);
2543fail:
2544 diagmem_free(driver, user_space_data, mempool);
2545 user_space_data = NULL;
2546 return err;
2547}
2548
2549static int diag_user_process_raw_data(const char __user *buf, int len)
2550{
2551 int err = 0;
2552 int ret = 0;
2553 int token_offset = 0;
2554 int remote_proc = 0;
2555 const int mempool = POOL_TYPE_COPY;
2556 unsigned char *user_space_data = NULL;
2557 struct diag_md_session_t *info = NULL;
2558
2559 if (!buf || len <= 0 || len > CALLBACK_BUF_SIZE) {
2560 pr_err_ratelimited("diag: In %s, invalid buf %pK len: %d\n",
2561 __func__, buf, len);
2562 return -EBADMSG;
2563 }
2564
2565 user_space_data = diagmem_alloc(driver, len, mempool);
2566 if (!user_space_data)
2567 return -ENOMEM;
2568
2569 err = copy_from_user(user_space_data, buf, len);
2570 if (err) {
2571 pr_err("diag: copy failed for user space data\n");
2572 goto fail;
2573 }
2574
2575 /* Check for proc_type */
2576 remote_proc = diag_get_remote(*(int *)user_space_data);
2577 if (remote_proc) {
2578 token_offset = sizeof(int);
2579 if (len <= MIN_SIZ_ALLOW) {
2580 pr_err("diag: In %s, possible integer underflow, payload size: %d\n",
2581 __func__, len);
2582 diagmem_free(driver, user_space_data, mempool);
2583 user_space_data = NULL;
2584 return -EBADMSG;
2585 }
2586 len -= sizeof(int);
2587 }
2588 if (driver->mask_check) {
2589 if (!mask_request_validate(user_space_data +
2590 token_offset)) {
2591 pr_alert("diag: mask request Invalid\n");
2592 diagmem_free(driver, user_space_data, mempool);
2593 user_space_data = NULL;
2594 return -EFAULT;
2595 }
2596 }
2597 if (remote_proc) {
2598 ret = diag_send_raw_data_remote(remote_proc,
2599 (void *)(user_space_data + token_offset),
2600 len, USER_SPACE_RAW_DATA);
2601 if (ret) {
2602 pr_err("diag: Error sending data to remote proc %d, err: %d\n",
2603 remote_proc, ret);
2604 }
2605 } else {
2606 wait_event_interruptible(driver->wait_q,
2607 (driver->in_busy_pktdata == 0));
2608 info = diag_md_session_get_pid(current->tgid);
2609 ret = diag_process_apps_pkt(user_space_data, len, info);
2610 if (ret == 1)
2611 diag_send_error_rsp((void *)(user_space_data), len);
2612 }
2613fail:
2614 diagmem_free(driver, user_space_data, mempool);
2615 user_space_data = NULL;
2616 return ret;
2617}
2618
2619static int diag_user_process_userspace_data(const char __user *buf, int len)
2620{
2621 int err = 0;
2622 int max_retries = 3;
2623 int retry_count = 0;
2624 int remote_proc = 0;
2625 int token_offset = 0;
2626 struct diag_md_session_t *session_info = NULL;
2627 uint8_t hdlc_disabled;
2628
2629 if (!buf || len <= 0 || len > USER_SPACE_DATA) {
2630 pr_err_ratelimited("diag: In %s, invalid buf %pK len: %d\n",
2631 __func__, buf, len);
2632 return -EBADMSG;
2633 }
2634
2635 do {
2636 if (!driver->user_space_data_busy)
2637 break;
2638 retry_count++;
2639 usleep_range(10000, 10100);
2640 } while (retry_count < max_retries);
2641
2642 if (driver->user_space_data_busy)
2643 return -EAGAIN;
2644
2645 err = copy_from_user(driver->user_space_data_buf, buf, len);
2646 if (err) {
2647 pr_err("diag: In %s, failed to copy data from userspace, err: %d\n",
2648 __func__, err);
2649 return -EIO;
2650 }
2651
2652 /* Check for proc_type */
2653 remote_proc = diag_get_remote(*(int *)driver->user_space_data_buf);
2654 if (remote_proc) {
2655 if (len <= MIN_SIZ_ALLOW) {
2656 pr_err("diag: Integer underflow in %s, payload size: %d",
2657 __func__, len);
2658 return -EBADMSG;
2659 }
2660 token_offset = sizeof(int);
2661 len -= sizeof(int);
2662 }
2663
2664 /* Check masks for On-Device logging */
2665 if (driver->mask_check) {
2666 if (!mask_request_validate(driver->user_space_data_buf +
2667 token_offset)) {
2668 pr_alert("diag: mask request Invalid\n");
2669 return -EFAULT;
2670 }
2671 }
2672
2673 /* send masks to local processor now */
2674 if (!remote_proc) {
2675 session_info = diag_md_session_get_pid(current->tgid);
2676 if (!session_info) {
2677 pr_err("diag:In %s request came from invalid md session pid:%d",
2678 __func__, current->tgid);
2679 return -EINVAL;
2680 }
2681 if (session_info)
2682 hdlc_disabled = session_info->hdlc_disabled;
2683 else
2684 hdlc_disabled = driver->hdlc_disabled;
2685 if (!hdlc_disabled)
2686 diag_process_hdlc_pkt((void *)
2687 (driver->user_space_data_buf),
2688 len, session_info);
2689 else
2690 diag_process_non_hdlc_pkt((char *)
2691 (driver->user_space_data_buf),
2692 len, session_info);
2693 return 0;
2694 }
2695
2696 err = diag_process_userspace_remote(remote_proc,
2697 driver->user_space_data_buf +
2698 token_offset, len);
2699 if (err) {
2700 driver->user_space_data_busy = 0;
2701 pr_err("diag: Error sending mask to remote proc %d, err: %d\n",
2702 remote_proc, err);
2703 }
2704
2705 return err;
2706}
2707
2708static int diag_user_process_apps_data(const char __user *buf, int len,
2709 int pkt_type)
2710{
2711 int ret = 0;
2712 int stm_size = 0;
2713 const int mempool = POOL_TYPE_COPY;
2714 unsigned char *user_space_data = NULL;
2715 struct diag_md_session_t *session_info = NULL;
2716 uint8_t hdlc_disabled;
2717
2718 if (!buf || len <= 0 || len > DIAG_MAX_RSP_SIZE) {
2719 pr_err_ratelimited("diag: In %s, invalid buf %pK len: %d\n",
2720 __func__, buf, len);
2721 return -EBADMSG;
2722 }
2723
2724 switch (pkt_type) {
2725 case DATA_TYPE_EVENT:
2726 case DATA_TYPE_F3:
2727 case DATA_TYPE_LOG:
2728 case DATA_TYPE_RESPONSE:
2729 case DATA_TYPE_DELAYED_RESPONSE:
2730 break;
2731 default:
2732 pr_err_ratelimited("diag: In %s, invalid pkt_type: %d\n",
2733 __func__, pkt_type);
2734 return -EBADMSG;
2735 }
2736
2737 user_space_data = diagmem_alloc(driver, len, mempool);
2738 if (!user_space_data) {
2739 diag_record_stats(pkt_type, PKT_DROP);
2740 return -ENOMEM;
2741 }
2742
2743 ret = copy_from_user(user_space_data, buf, len);
2744 if (ret) {
2745 pr_alert("diag: In %s, unable to copy data from userspace, err: %d\n",
2746 __func__, ret);
2747 diagmem_free(driver, user_space_data, mempool);
2748 user_space_data = NULL;
2749 diag_record_stats(pkt_type, PKT_DROP);
2750 return -EBADMSG;
2751 }
2752
2753 if (driver->stm_state[APPS_DATA] &&
2754 (pkt_type >= DATA_TYPE_EVENT) && (pkt_type <= DATA_TYPE_LOG)) {
2755 stm_size = stm_log_inv_ts(OST_ENTITY_DIAG, 0, user_space_data,
2756 len);
2757 if (stm_size == 0) {
2758 pr_debug("diag: In %s, stm_log_inv_ts returned size of 0\n",
2759 __func__);
2760 }
2761 diagmem_free(driver, user_space_data, mempool);
2762 user_space_data = NULL;
2763
2764 return 0;
2765 }
2766
2767 mutex_lock(&apps_data_mutex);
2768 mutex_lock(&driver->hdlc_disable_mutex);
2769 session_info = diag_md_session_get_peripheral(APPS_DATA);
2770 if (session_info)
2771 hdlc_disabled = session_info->hdlc_disabled;
2772 else
2773 hdlc_disabled = driver->hdlc_disabled;
2774 if (hdlc_disabled)
2775 ret = diag_process_apps_data_non_hdlc(user_space_data, len,
2776 pkt_type);
2777 else
2778 ret = diag_process_apps_data_hdlc(user_space_data, len,
2779 pkt_type);
2780 mutex_unlock(&driver->hdlc_disable_mutex);
2781 mutex_unlock(&apps_data_mutex);
2782
2783 diagmem_free(driver, user_space_data, mempool);
2784 user_space_data = NULL;
2785
2786 check_drain_timer();
2787
2788 if (ret == PKT_DROP)
2789 diag_record_stats(pkt_type, PKT_DROP);
2790 else if (ret == PKT_ALLOC)
2791 diag_record_stats(pkt_type, PKT_ALLOC);
2792 else
2793 return ret;
2794
2795 return 0;
2796}
2797
2798static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
2799 loff_t *ppos)
2800{
2801 struct diag_dci_client_tbl *entry;
2802 struct list_head *start, *temp;
2803 int index = -1, i = 0, ret = 0;
2804 int data_type;
2805 int copy_dci_data = 0;
2806 int exit_stat = 0;
2807 int write_len = 0;
2808 struct diag_md_session_t *session_info = NULL;
2809
2810 for (i = 0; i < driver->num_clients; i++)
2811 if (driver->client_map[i].pid == current->tgid)
2812 index = i;
2813
2814 if (index == -1) {
2815 pr_err("diag: Client PID not found in table");
2816 return -EINVAL;
2817 }
2818 if (!buf) {
2819 pr_err("diag: bad address from user side\n");
2820 return -EFAULT;
2821 }
2822 wait_event_interruptible(driver->wait_q, driver->data_ready[index]);
2823
2824 mutex_lock(&driver->diagchar_mutex);
2825
2826 if ((driver->data_ready[index] & USER_SPACE_DATA_TYPE) &&
2827 (driver->logging_mode == DIAG_MEMORY_DEVICE_MODE ||
2828 driver->logging_mode == DIAG_MULTI_MODE)) {
2829 pr_debug("diag: process woken up\n");
2830 /*Copy the type of data being passed*/
2831 data_type = driver->data_ready[index] & USER_SPACE_DATA_TYPE;
2832 driver->data_ready[index] ^= USER_SPACE_DATA_TYPE;
2833 COPY_USER_SPACE_OR_ERR(buf, data_type, sizeof(int));
2834 if (ret == -EFAULT)
2835 goto exit;
2836 /* place holder for number of data field */
2837 ret += sizeof(int);
2838 session_info = diag_md_session_get_pid(current->tgid);
2839 exit_stat = diag_md_copy_to_user(buf, &ret, count,
2840 session_info);
2841 goto exit;
2842 } else if (driver->data_ready[index] & USER_SPACE_DATA_TYPE) {
2843 /* In case, the thread wakes up and the logging mode is not
2844 * memory device any more, the condition needs to be cleared.
2845 */
2846 driver->data_ready[index] ^= USER_SPACE_DATA_TYPE;
2847 }
2848
2849 if (driver->data_ready[index] & HDLC_SUPPORT_TYPE) {
2850 data_type = driver->data_ready[index] & HDLC_SUPPORT_TYPE;
2851 driver->data_ready[index] ^= HDLC_SUPPORT_TYPE;
2852 COPY_USER_SPACE_OR_ERR(buf, data_type, sizeof(int));
2853 if (ret == -EFAULT)
2854 goto exit;
2855
2856 session_info = diag_md_session_get_pid(current->tgid);
2857 if (session_info) {
2858 COPY_USER_SPACE_OR_ERR(buf+4,
2859 session_info->hdlc_disabled,
2860 sizeof(uint8_t));
2861 if (ret == -EFAULT)
2862 goto exit;
2863 }
2864 goto exit;
2865 }
2866
2867 if (driver->data_ready[index] & DEINIT_TYPE) {
2868 /*Copy the type of data being passed*/
2869 data_type = driver->data_ready[index] & DEINIT_TYPE;
2870 COPY_USER_SPACE_OR_ERR(buf, data_type, 4);
2871 if (ret == -EFAULT)
2872 goto exit;
2873 driver->data_ready[index] ^= DEINIT_TYPE;
2874 mutex_unlock(&driver->diagchar_mutex);
2875 diag_remove_client_entry(file);
2876 return ret;
2877 }
2878
2879 if (driver->data_ready[index] & MSG_MASKS_TYPE) {
2880 /*Copy the type of data being passed*/
2881 data_type = driver->data_ready[index] & MSG_MASKS_TYPE;
2882 session_info = diag_md_session_get_peripheral(APPS_DATA);
2883 COPY_USER_SPACE_OR_ERR(buf, data_type, sizeof(int));
2884 if (ret == -EFAULT)
2885 goto exit;
2886 write_len = diag_copy_to_user_msg_mask(buf + ret, count,
2887 session_info);
2888 if (write_len > 0)
2889 ret += write_len;
2890 driver->data_ready[index] ^= MSG_MASKS_TYPE;
2891 goto exit;
2892 }
2893
2894 if (driver->data_ready[index] & EVENT_MASKS_TYPE) {
2895 /*Copy the type of data being passed*/
2896 data_type = driver->data_ready[index] & EVENT_MASKS_TYPE;
2897 session_info = diag_md_session_get_peripheral(APPS_DATA);
2898 COPY_USER_SPACE_OR_ERR(buf, data_type, 4);
2899 if (ret == -EFAULT)
2900 goto exit;
2901
2902 if (session_info && session_info->event_mask &&
2903 session_info->event_mask->ptr) {
2904 COPY_USER_SPACE_OR_ERR(buf + sizeof(int),
2905 *(session_info->event_mask->ptr),
2906 session_info->event_mask->mask_len);
2907 if (ret == -EFAULT)
2908 goto exit;
2909 } else {
2910 COPY_USER_SPACE_OR_ERR(buf + sizeof(int),
2911 *(event_mask.ptr),
2912 event_mask.mask_len);
2913 if (ret == -EFAULT)
2914 goto exit;
2915 }
2916 driver->data_ready[index] ^= EVENT_MASKS_TYPE;
2917 goto exit;
2918 }
2919
2920 if (driver->data_ready[index] & LOG_MASKS_TYPE) {
2921 /*Copy the type of data being passed*/
2922 data_type = driver->data_ready[index] & LOG_MASKS_TYPE;
2923 session_info = diag_md_session_get_peripheral(APPS_DATA);
2924 COPY_USER_SPACE_OR_ERR(buf, data_type, sizeof(int));
2925 if (ret == -EFAULT)
2926 goto exit;
2927
2928 write_len = diag_copy_to_user_log_mask(buf + ret, count,
2929 session_info);
2930 if (write_len > 0)
2931 ret += write_len;
2932 driver->data_ready[index] ^= LOG_MASKS_TYPE;
2933 goto exit;
2934 }
2935
2936 if (driver->data_ready[index] & PKT_TYPE) {
2937 /*Copy the type of data being passed*/
2938 data_type = driver->data_ready[index] & PKT_TYPE;
2939 COPY_USER_SPACE_OR_ERR(buf, data_type, sizeof(data_type));
2940 if (ret == -EFAULT)
2941 goto exit;
2942
2943 COPY_USER_SPACE_OR_ERR(buf + sizeof(data_type),
2944 *(driver->apps_req_buf),
2945 driver->apps_req_buf_len);
2946 if (ret == -EFAULT)
2947 goto exit;
2948 driver->data_ready[index] ^= PKT_TYPE;
2949 driver->in_busy_pktdata = 0;
2950 goto exit;
2951 }
2952
2953 if (driver->data_ready[index] & DCI_PKT_TYPE) {
2954 /* Copy the type of data being passed */
2955 data_type = driver->data_ready[index] & DCI_PKT_TYPE;
2956 COPY_USER_SPACE_OR_ERR(buf, data_type, 4);
2957 if (ret == -EFAULT)
2958 goto exit;
2959
2960 COPY_USER_SPACE_OR_ERR(buf+4, *(driver->dci_pkt_buf),
2961 driver->dci_pkt_length);
2962 if (ret == -EFAULT)
2963 goto exit;
2964
2965 driver->data_ready[index] ^= DCI_PKT_TYPE;
2966 driver->in_busy_dcipktdata = 0;
2967 goto exit;
2968 }
2969
2970 if (driver->data_ready[index] & DCI_EVENT_MASKS_TYPE) {
2971 /*Copy the type of data being passed*/
2972 data_type = driver->data_ready[index] & DCI_EVENT_MASKS_TYPE;
2973 COPY_USER_SPACE_OR_ERR(buf, data_type, 4);
2974 if (ret == -EFAULT)
2975 goto exit;
2976
2977 COPY_USER_SPACE_OR_ERR(buf+4, driver->num_dci_client, 4);
2978 if (ret == -EFAULT)
2979 goto exit;
2980
2981 COPY_USER_SPACE_OR_ERR(buf + 8, (dci_ops_tbl[DCI_LOCAL_PROC].
2982 event_mask_composite), DCI_EVENT_MASK_SIZE);
2983 if (ret == -EFAULT)
2984 goto exit;
2985
2986 driver->data_ready[index] ^= DCI_EVENT_MASKS_TYPE;
2987 goto exit;
2988 }
2989
2990 if (driver->data_ready[index] & DCI_LOG_MASKS_TYPE) {
2991 /*Copy the type of data being passed*/
2992 data_type = driver->data_ready[index] & DCI_LOG_MASKS_TYPE;
2993 COPY_USER_SPACE_OR_ERR(buf, data_type, 4);
2994 if (ret == -EFAULT)
2995 goto exit;
2996
2997 COPY_USER_SPACE_OR_ERR(buf+4, driver->num_dci_client, 4);
2998 if (ret == -EFAULT)
2999 goto exit;
3000
3001 COPY_USER_SPACE_OR_ERR(buf+8, (dci_ops_tbl[DCI_LOCAL_PROC].
3002 log_mask_composite), DCI_LOG_MASK_SIZE);
3003 if (ret == -EFAULT)
3004 goto exit;
3005 driver->data_ready[index] ^= DCI_LOG_MASKS_TYPE;
3006 goto exit;
3007 }
3008
3009exit:
3010 mutex_unlock(&driver->diagchar_mutex);
3011 if (driver->data_ready[index] & DCI_DATA_TYPE) {
3012 mutex_lock(&driver->dci_mutex);
3013 /* Copy the type of data being passed */
3014 data_type = driver->data_ready[index] & DCI_DATA_TYPE;
3015 list_for_each_safe(start, temp, &driver->dci_client_list) {
3016 entry = list_entry(start, struct diag_dci_client_tbl,
3017 track);
3018 if (entry->client->tgid != current->tgid)
3019 continue;
3020 if (!entry->in_service)
3021 continue;
3022 if (copy_to_user(buf + ret, &data_type, sizeof(int))) {
3023 mutex_unlock(&driver->dci_mutex);
3024 goto end;
3025 }
3026 ret += sizeof(int);
3027 if (copy_to_user(buf + ret, &entry->client_info.token,
3028 sizeof(int))) {
3029 mutex_unlock(&driver->dci_mutex);
3030 goto end;
3031 }
3032 ret += sizeof(int);
3033 copy_dci_data = 1;
3034 exit_stat = diag_copy_dci(buf, count, entry, &ret);
3035 mutex_lock(&driver->diagchar_mutex);
3036 driver->data_ready[index] ^= DCI_DATA_TYPE;
3037 mutex_unlock(&driver->diagchar_mutex);
3038 if (exit_stat == 1) {
3039 mutex_unlock(&driver->dci_mutex);
3040 goto end;
3041 }
3042 }
3043 mutex_unlock(&driver->dci_mutex);
3044 goto end;
3045 }
3046end:
3047 /*
3048 * Flush any read that is currently pending on DCI data and
3049 * command channnels. This will ensure that the next read is not
3050 * missed.
3051 */
3052 if (copy_dci_data) {
3053 diag_ws_on_copy_complete(DIAG_WS_DCI);
3054 flush_workqueue(driver->diag_dci_wq);
3055 }
3056 return ret;
3057}
3058
3059static ssize_t diagchar_write(struct file *file, const char __user *buf,
3060 size_t count, loff_t *ppos)
3061{
3062 int err = 0;
3063 int pkt_type = 0;
3064 int payload_len = 0;
3065 const char __user *payload_buf = NULL;
3066
3067 /*
3068 * The data coming from the user sapce should at least have the
3069 * packet type heeader.
3070 */
3071 if (count < sizeof(int)) {
3072 pr_err("diag: In %s, client is sending short data, len: %d\n",
3073 __func__, (int)count);
3074 return -EBADMSG;
3075 }
3076
3077 err = copy_from_user((&pkt_type), buf, sizeof(int));
3078 if (err) {
3079 pr_err_ratelimited("diag: In %s, unable to copy pkt_type from userspace, err: %d\n",
3080 __func__, err);
3081 return -EIO;
3082 }
3083
3084 if (driver->logging_mode == DIAG_USB_MODE && !driver->usb_connected) {
3085 if (!((pkt_type == DCI_DATA_TYPE) ||
3086 (pkt_type == DCI_PKT_TYPE) ||
3087 (pkt_type & DATA_TYPE_DCI_LOG) ||
3088 (pkt_type & DATA_TYPE_DCI_EVENT))) {
3089 pr_debug("diag: In %s, Dropping non DCI packet type\n",
3090 __func__);
3091 return -EIO;
3092 }
3093 }
3094
3095 payload_buf = buf + sizeof(int);
3096 payload_len = count - sizeof(int);
3097
3098 if (pkt_type == DCI_PKT_TYPE)
3099 return diag_user_process_dci_apps_data(payload_buf,
3100 payload_len,
3101 pkt_type);
3102 else if (pkt_type == DCI_DATA_TYPE)
3103 return diag_user_process_dci_data(payload_buf, payload_len);
3104 else if (pkt_type == USER_SPACE_RAW_DATA_TYPE)
3105 return diag_user_process_raw_data(payload_buf,
3106 payload_len);
3107 else if (pkt_type == USER_SPACE_DATA_TYPE)
3108 return diag_user_process_userspace_data(payload_buf,
3109 payload_len);
3110 if (pkt_type & (DATA_TYPE_DCI_LOG | DATA_TYPE_DCI_EVENT)) {
3111 err = diag_user_process_dci_apps_data(payload_buf, payload_len,
3112 pkt_type);
3113 if (pkt_type & DATA_TYPE_DCI_LOG)
3114 pkt_type ^= DATA_TYPE_DCI_LOG;
3115 if (pkt_type & DATA_TYPE_DCI_EVENT)
3116 pkt_type ^= DATA_TYPE_DCI_EVENT;
3117 /*
3118 * Check if the log or event is selected even on the regular
3119 * stream. If USB is not connected and we are not in memory
3120 * device mode, we should not process these logs/events.
3121 */
3122 if (pkt_type && driver->logging_mode == DIAG_USB_MODE &&
3123 !driver->usb_connected)
3124 return err;
3125 }
3126
3127 switch (pkt_type) {
3128 case DATA_TYPE_EVENT:
3129 case DATA_TYPE_F3:
3130 case DATA_TYPE_LOG:
3131 case DATA_TYPE_DELAYED_RESPONSE:
3132 case DATA_TYPE_RESPONSE:
3133 return diag_user_process_apps_data(payload_buf, payload_len,
3134 pkt_type);
3135 default:
3136 pr_err_ratelimited("diag: In %s, invalid pkt_type: %d\n",
3137 __func__, pkt_type);
3138 return -EINVAL;
3139 }
3140
3141 return err;
3142}
3143
3144void diag_ws_init(void)
3145{
3146 driver->dci_ws.ref_count = 0;
3147 driver->dci_ws.copy_count = 0;
3148 spin_lock_init(&driver->dci_ws.lock);
3149
3150 driver->md_ws.ref_count = 0;
3151 driver->md_ws.copy_count = 0;
3152 spin_lock_init(&driver->md_ws.lock);
3153}
3154
3155static void diag_stats_init(void)
3156{
3157 if (!driver)
3158 return;
3159
3160 driver->msg_stats.alloc_count = 0;
3161 driver->msg_stats.drop_count = 0;
3162
3163 driver->log_stats.alloc_count = 0;
3164 driver->log_stats.drop_count = 0;
3165
3166 driver->event_stats.alloc_count = 0;
3167 driver->event_stats.drop_count = 0;
3168}
3169
3170void diag_ws_on_notify(void)
3171{
3172 /*
3173 * Do not deal with reference count here as there can be spurious
3174 * interrupts.
3175 */
3176 pm_stay_awake(driver->diag_dev);
3177}
3178
3179void diag_ws_on_read(int type, int pkt_len)
3180{
3181 unsigned long flags;
3182 struct diag_ws_ref_t *ws_ref = NULL;
3183
3184 switch (type) {
3185 case DIAG_WS_DCI:
3186 ws_ref = &driver->dci_ws;
3187 break;
3188 case DIAG_WS_MUX:
3189 ws_ref = &driver->md_ws;
3190 break;
3191 default:
3192 pr_err_ratelimited("diag: In %s, invalid type: %d\n",
3193 __func__, type);
3194 return;
3195 }
3196
3197 spin_lock_irqsave(&ws_ref->lock, flags);
3198 if (pkt_len > 0) {
3199 ws_ref->ref_count++;
3200 } else {
3201 if (ws_ref->ref_count < 1) {
3202 ws_ref->ref_count = 0;
3203 ws_ref->copy_count = 0;
3204 }
3205 diag_ws_release();
3206 }
3207 spin_unlock_irqrestore(&ws_ref->lock, flags);
3208}
3209
3210
3211void diag_ws_on_copy(int type)
3212{
3213 unsigned long flags;
3214 struct diag_ws_ref_t *ws_ref = NULL;
3215
3216 switch (type) {
3217 case DIAG_WS_DCI:
3218 ws_ref = &driver->dci_ws;
3219 break;
3220 case DIAG_WS_MUX:
3221 ws_ref = &driver->md_ws;
3222 break;
3223 default:
3224 pr_err_ratelimited("diag: In %s, invalid type: %d\n",
3225 __func__, type);
3226 return;
3227 }
3228
3229 spin_lock_irqsave(&ws_ref->lock, flags);
3230 ws_ref->copy_count++;
3231 spin_unlock_irqrestore(&ws_ref->lock, flags);
3232}
3233
3234void diag_ws_on_copy_fail(int type)
3235{
3236 unsigned long flags;
3237 struct diag_ws_ref_t *ws_ref = NULL;
3238
3239 switch (type) {
3240 case DIAG_WS_DCI:
3241 ws_ref = &driver->dci_ws;
3242 break;
3243 case DIAG_WS_MUX:
3244 ws_ref = &driver->md_ws;
3245 break;
3246 default:
3247 pr_err_ratelimited("diag: In %s, invalid type: %d\n",
3248 __func__, type);
3249 return;
3250 }
3251
3252 spin_lock_irqsave(&ws_ref->lock, flags);
3253 ws_ref->ref_count--;
3254 spin_unlock_irqrestore(&ws_ref->lock, flags);
3255
3256 diag_ws_release();
3257}
3258
3259void diag_ws_on_copy_complete(int type)
3260{
3261 unsigned long flags;
3262 struct diag_ws_ref_t *ws_ref = NULL;
3263
3264 switch (type) {
3265 case DIAG_WS_DCI:
3266 ws_ref = &driver->dci_ws;
3267 break;
3268 case DIAG_WS_MUX:
3269 ws_ref = &driver->md_ws;
3270 break;
3271 default:
3272 pr_err_ratelimited("diag: In %s, invalid type: %d\n",
3273 __func__, type);
3274 return;
3275 }
3276
3277 spin_lock_irqsave(&ws_ref->lock, flags);
3278 ws_ref->ref_count -= ws_ref->copy_count;
3279 if (ws_ref->ref_count < 1)
3280 ws_ref->ref_count = 0;
3281 ws_ref->copy_count = 0;
3282 spin_unlock_irqrestore(&ws_ref->lock, flags);
3283
3284 diag_ws_release();
3285}
3286
3287void diag_ws_reset(int type)
3288{
3289 unsigned long flags;
3290 struct diag_ws_ref_t *ws_ref = NULL;
3291
3292 switch (type) {
3293 case DIAG_WS_DCI:
3294 ws_ref = &driver->dci_ws;
3295 break;
3296 case DIAG_WS_MUX:
3297 ws_ref = &driver->md_ws;
3298 break;
3299 default:
3300 pr_err_ratelimited("diag: In %s, invalid type: %d\n",
3301 __func__, type);
3302 return;
3303 }
3304
3305 spin_lock_irqsave(&ws_ref->lock, flags);
3306 ws_ref->ref_count = 0;
3307 ws_ref->copy_count = 0;
3308 spin_unlock_irqrestore(&ws_ref->lock, flags);
3309
3310 diag_ws_release();
3311}
3312
3313void diag_ws_release(void)
3314{
3315 if (driver->dci_ws.ref_count == 0 && driver->md_ws.ref_count == 0)
3316 pm_relax(driver->diag_dev);
3317}
3318
3319#ifdef DIAG_DEBUG
3320static void diag_debug_init(void)
3321{
3322 diag_ipc_log = ipc_log_context_create(DIAG_IPC_LOG_PAGES, "diag", 0);
3323 if (!diag_ipc_log)
3324 pr_err("diag: Failed to create IPC logging context\n");
3325 /*
3326 * Set the bit mask here as per diag_ipc_logging.h to enable debug logs
3327 * to be logged to IPC
3328 */
3329 diag_debug_mask = DIAG_DEBUG_PERIPHERALS | DIAG_DEBUG_DCI |
3330 DIAG_DEBUG_BRIDGE;
3331}
3332#else
3333static void diag_debug_init(void)
3334{
3335
3336}
3337#endif
3338
3339static int diag_real_time_info_init(void)
3340{
3341 int i;
3342
3343 if (!driver)
3344 return -EIO;
3345 for (i = 0; i < DIAG_NUM_PROC; i++) {
3346 driver->real_time_mode[i] = 1;
3347 driver->proc_rt_vote_mask[i] |= DIAG_PROC_DCI;
3348 driver->proc_rt_vote_mask[i] |= DIAG_PROC_MEMORY_DEVICE;
3349 }
3350 driver->real_time_update_busy = 0;
3351 driver->proc_active_mask = 0;
3352 driver->diag_real_time_wq = create_singlethread_workqueue(
3353 "diag_real_time_wq");
3354 if (!driver->diag_real_time_wq)
3355 return -ENOMEM;
3356 INIT_WORK(&(driver->diag_real_time_work), diag_real_time_work_fn);
3357 mutex_init(&driver->real_time_mutex);
3358 return 0;
3359}
3360
3361static const struct file_operations diagcharfops = {
3362 .owner = THIS_MODULE,
3363 .read = diagchar_read,
3364 .write = diagchar_write,
3365#ifdef CONFIG_COMPAT
3366 .compat_ioctl = diagchar_compat_ioctl,
3367#endif
3368 .unlocked_ioctl = diagchar_ioctl,
3369 .open = diagchar_open,
3370 .release = diagchar_close
3371};
3372
3373static int diagchar_setup_cdev(dev_t devno)
3374{
3375
3376 int err;
3377
3378 cdev_init(driver->cdev, &diagcharfops);
3379
3380 driver->cdev->owner = THIS_MODULE;
3381 driver->cdev->ops = &diagcharfops;
3382
3383 err = cdev_add(driver->cdev, devno, 1);
3384
3385 if (err) {
3386 pr_info("diagchar cdev registration failed !\n");
3387 return err;
3388 }
3389
3390 driver->diagchar_class = class_create(THIS_MODULE, "diag");
3391
3392 if (IS_ERR(driver->diagchar_class)) {
3393 pr_err("Error creating diagchar class.\n");
3394 return PTR_ERR(driver->diagchar_class);
3395 }
3396
3397 driver->diag_dev = device_create(driver->diagchar_class, NULL, devno,
3398 (void *)driver, "diag");
3399
3400 if (!driver->diag_dev)
3401 return -EIO;
3402
3403 driver->diag_dev->power.wakeup = wakeup_source_register("DIAG_WS");
3404 return 0;
3405
3406}
3407
3408static int diagchar_cleanup(void)
3409{
3410 if (driver) {
3411 if (driver->cdev) {
3412 /* TODO - Check if device exists before deleting */
3413 device_destroy(driver->diagchar_class,
3414 MKDEV(driver->major,
3415 driver->minor_start));
3416 cdev_del(driver->cdev);
3417 }
3418 if (!IS_ERR(driver->diagchar_class))
3419 class_destroy(driver->diagchar_class);
3420 kfree(driver);
3421 }
3422 return 0;
3423}
3424
3425static int __init diagchar_init(void)
3426{
3427 dev_t dev;
Manoj Prabhu B98325462017-01-10 20:19:28 +05303428 int ret, i;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07003429
3430 pr_debug("diagfwd initializing ..\n");
3431 ret = 0;
3432 driver = kzalloc(sizeof(struct diagchar_dev) + 5, GFP_KERNEL);
3433 if (!driver)
3434 return -ENOMEM;
3435 kmemleak_not_leak(driver);
3436
3437 timer_in_progress = 0;
3438 driver->delayed_rsp_id = 0;
3439 driver->hdlc_disabled = 0;
3440 driver->dci_state = DIAG_DCI_NO_ERROR;
3441 setup_timer(&drain_timer, drain_timer_func, 1234);
3442 driver->supports_sockets = 1;
3443 driver->time_sync_enabled = 0;
3444 driver->uses_time_api = 0;
3445 driver->poolsize = poolsize;
3446 driver->poolsize_hdlc = poolsize_hdlc;
3447 driver->poolsize_dci = poolsize_dci;
3448 driver->poolsize_user = poolsize_user;
3449 /*
3450 * POOL_TYPE_MUX_APPS is for the buffers in the Diag MUX layer.
3451 * The number of buffers encompasses Diag data generated on
3452 * the Apss processor + 1 for the responses generated exclusively on
3453 * the Apps processor + data from data channels (4 channels per
3454 * peripheral) + data from command channels (2)
3455 */
3456 diagmem_setsize(POOL_TYPE_MUX_APPS, itemsize_usb_apps,
3457 poolsize_usb_apps + 1 + (NUM_PERIPHERALS * 6));
3458 driver->num_clients = max_clients;
3459 driver->logging_mode = DIAG_USB_MODE;
3460 driver->mask_check = 0;
3461 driver->in_busy_pktdata = 0;
3462 driver->in_busy_dcipktdata = 0;
3463 driver->rsp_buf_ctxt = SET_BUF_CTXT(APPS_DATA, TYPE_CMD, 1);
3464 hdlc_data.ctxt = SET_BUF_CTXT(APPS_DATA, TYPE_DATA, 1);
3465 hdlc_data.len = 0;
3466 non_hdlc_data.ctxt = SET_BUF_CTXT(APPS_DATA, TYPE_DATA, 1);
3467 non_hdlc_data.len = 0;
3468 mutex_init(&driver->hdlc_disable_mutex);
3469 mutex_init(&driver->diagchar_mutex);
3470 mutex_init(&driver->diag_maskclear_mutex);
Manoj Prabhu B2a428272016-12-22 15:22:03 +05303471 mutex_init(&driver->diag_notifier_mutex);
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07003472 mutex_init(&driver->diag_file_mutex);
3473 mutex_init(&driver->delayed_rsp_mutex);
3474 mutex_init(&apps_data_mutex);
Gopikrishna Mogasati9a44d8d2017-05-05 16:04:35 +05303475 mutex_init(&driver->msg_mask_lock);
Hardik Arya62dce9f2017-06-15 10:39:34 +05303476 mutex_init(&driver->hdlc_recovery_mutex);
Manoj Prabhu B98325462017-01-10 20:19:28 +05303477 for (i = 0; i < NUM_PERIPHERALS; i++)
3478 mutex_init(&driver->diagfwd_channel_mutex[i]);
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07003479 init_waitqueue_head(&driver->wait_q);
3480 INIT_WORK(&(driver->diag_drain_work), diag_drain_work_fn);
3481 INIT_WORK(&(driver->update_user_clients),
3482 diag_update_user_client_work_fn);
3483 INIT_WORK(&(driver->update_md_clients),
3484 diag_update_md_client_work_fn);
3485 diag_ws_init();
3486 diag_stats_init();
3487 diag_debug_init();
3488 diag_md_session_init();
3489
3490 driver->incoming_pkt.capacity = DIAG_MAX_REQ_SIZE;
3491 driver->incoming_pkt.data = kzalloc(DIAG_MAX_REQ_SIZE, GFP_KERNEL);
3492 if (!driver->incoming_pkt.data) {
3493 ret = -ENOMEM;
3494 goto fail;
3495 }
3496 kmemleak_not_leak(driver->incoming_pkt.data);
3497 driver->incoming_pkt.processing = 0;
3498 driver->incoming_pkt.read_len = 0;
3499 driver->incoming_pkt.remaining = 0;
3500 driver->incoming_pkt.total_len = 0;
3501
3502 ret = diag_real_time_info_init();
3503 if (ret)
3504 goto fail;
3505 ret = diag_debugfs_init();
3506 if (ret)
3507 goto fail;
3508 ret = diag_masks_init();
3509 if (ret)
3510 goto fail;
3511 ret = diag_remote_init();
3512 if (ret)
3513 goto fail;
3514 ret = diag_mux_init();
3515 if (ret)
3516 goto fail;
3517 ret = diagfwd_init();
3518 if (ret)
3519 goto fail;
3520 ret = diagfwd_cntl_init();
3521 if (ret)
3522 goto fail;
3523 driver->dci_state = diag_dci_init();
3524 ret = diagfwd_peripheral_init();
3525 if (ret)
3526 goto fail;
3527 diagfwd_cntl_channel_init();
3528 if (driver->dci_state == DIAG_DCI_NO_ERROR)
3529 diag_dci_channel_init();
3530 pr_debug("diagchar initializing ..\n");
3531 driver->num = 1;
3532 driver->name = ((void *)driver) + sizeof(struct diagchar_dev);
3533 strlcpy(driver->name, "diag", 4);
3534 /* Get major number from kernel and initialize */
3535 ret = alloc_chrdev_region(&dev, driver->minor_start,
3536 driver->num, driver->name);
3537 if (!ret) {
3538 driver->major = MAJOR(dev);
3539 driver->minor_start = MINOR(dev);
3540 } else {
3541 pr_err("diag: Major number not allocated\n");
3542 goto fail;
3543 }
3544 driver->cdev = cdev_alloc();
3545 ret = diagchar_setup_cdev(dev);
3546 if (ret)
3547 goto fail;
Sreelakshmi Gownipalli8d477d32017-02-08 19:49:06 -08003548 mutex_init(&driver->diag_id_mutex);
3549 INIT_LIST_HEAD(&driver->diag_id_list);
3550 diag_add_diag_id_to_list(DIAG_ID_APPS, "APPS");
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07003551 pr_debug("diagchar initialized now");
3552 ret = diagfwd_bridge_init();
3553 if (ret)
3554 diagfwd_bridge_exit();
3555 return 0;
3556
3557fail:
3558 pr_err("diagchar is not initialized, ret: %d\n", ret);
3559 diag_debugfs_cleanup();
3560 diagchar_cleanup();
3561 diag_mux_exit();
3562 diagfwd_peripheral_exit();
3563 diagfwd_bridge_exit();
3564 diagfwd_exit();
3565 diagfwd_cntl_exit();
3566 diag_dci_exit();
3567 diag_masks_exit();
3568 diag_remote_exit();
3569 return ret;
3570
3571}
3572
3573static void diagchar_exit(void)
3574{
3575 pr_info("diagchar exiting...\n");
3576 diag_mempool_exit();
3577 diag_mux_exit();
3578 diagfwd_peripheral_exit();
3579 diagfwd_exit();
3580 diagfwd_cntl_exit();
3581 diag_dci_exit();
3582 diag_masks_exit();
3583 diag_md_session_exit();
3584 diag_remote_exit();
3585 diag_debugfs_cleanup();
3586 diagchar_cleanup();
3587 pr_info("done diagchar exit\n");
3588}
3589
3590module_init(diagchar_init);
3591module_exit(diagchar_exit);