blob: 5b507dfe4c5574e4791592f53854b360386ae9b0 [file] [log] [blame]
Sreelakshmi Gownipalli8d477d32017-02-08 19:49:06 -08001/* Copyright (c) 2008-2017, The Linux Foundation. All rights reserved.
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/slab.h>
14#include <linux/init.h>
15#include <linux/module.h>
16#include <linux/cdev.h>
17#include <linux/fs.h>
18#include <linux/device.h>
19#include <linux/delay.h>
20#include <linux/uaccess.h>
21#include <linux/diagchar.h>
22#include <linux/sched.h>
23#include <linux/ratelimit.h>
24#include <linux/timer.h>
25#ifdef CONFIG_DIAG_OVER_USB
26#include <linux/usb/usbdiag.h>
27#endif
28#include <asm/current.h>
29#include "diagchar_hdlc.h"
30#include "diagmem.h"
31#include "diagchar.h"
32#include "diagfwd.h"
33#include "diagfwd_cntl.h"
34#include "diag_dci.h"
35#include "diag_debugfs.h"
36#include "diag_masks.h"
37#include "diagfwd_bridge.h"
38#include "diag_usb.h"
39#include "diag_memorydevice.h"
40#include "diag_mux.h"
41#include "diag_ipc_logging.h"
42#include "diagfwd_peripheral.h"
43
44#include <linux/coresight-stm.h>
45#include <linux/kernel.h>
46#ifdef CONFIG_COMPAT
47#include <linux/compat.h>
48#endif
49
50MODULE_DESCRIPTION("Diag Char Driver");
51MODULE_LICENSE("GPL v2");
52
53#define MIN_SIZ_ALLOW 4
54#define INIT 1
55#define EXIT -1
56struct diagchar_dev *driver;
57struct diagchar_priv {
58 int pid;
59};
60
61#define USER_SPACE_RAW_DATA 0
62#define USER_SPACE_HDLC_DATA 1
63
64/* Memory pool variables */
65/* Used for copying any incoming packet from user space clients. */
66static unsigned int poolsize = 12;
67module_param(poolsize, uint, 0000);
68
69/*
70 * Used for HDLC encoding packets coming from the user
71 * space.
72 */
73static unsigned int poolsize_hdlc = 10;
74module_param(poolsize_hdlc, uint, 0000);
75
76/*
77 * This is used for incoming DCI requests from the user space clients.
78 * Don't expose itemsize as it is internal.
79 */
80static unsigned int poolsize_user = 8;
81module_param(poolsize_user, uint, 0000);
82
83/*
84 * USB structures allocated for writing Diag data generated on the Apps to USB.
85 * Don't expose itemsize as it is constant.
86 */
87static unsigned int itemsize_usb_apps = sizeof(struct diag_request);
88static unsigned int poolsize_usb_apps = 10;
89module_param(poolsize_usb_apps, uint, 0000);
90
91/* Used for DCI client buffers. Don't expose itemsize as it is constant. */
92static unsigned int poolsize_dci = 10;
93module_param(poolsize_dci, uint, 0000);
94
95#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
96/* Used for reading data from the remote device. */
97static unsigned int itemsize_mdm = DIAG_MDM_BUF_SIZE;
98static unsigned int poolsize_mdm = 18;
99module_param(itemsize_mdm, uint, 0000);
100module_param(poolsize_mdm, uint, 0000);
101
102/*
103 * Used for reading DCI data from the remote device.
104 * Don't expose poolsize for DCI data. There is only one read buffer
105 */
106static unsigned int itemsize_mdm_dci = DIAG_MDM_BUF_SIZE;
107static unsigned int poolsize_mdm_dci = 1;
108module_param(itemsize_mdm_dci, uint, 0000);
109
110/*
111 * Used for USB structues associated with a remote device.
112 * Don't expose the itemsize since it is constant.
113 */
114static unsigned int itemsize_mdm_usb = sizeof(struct diag_request);
115static unsigned int poolsize_mdm_usb = 18;
116module_param(poolsize_mdm_usb, uint, 0000);
117
118/*
119 * Used for writing read DCI data to remote peripherals. Don't
120 * expose poolsize for DCI data. There is only one read
121 * buffer. Add 6 bytes for DCI header information: Start (1),
122 * Version (1), Length (2), Tag (2)
123 */
124static unsigned int itemsize_mdm_dci_write = DIAG_MDM_DCI_BUF_SIZE;
125static unsigned int poolsize_mdm_dci_write = 1;
126module_param(itemsize_mdm_dci_write, uint, 0000);
127
128/*
129 * Used for USB structures associated with a remote SMUX
130 * device Don't expose the itemsize since it is constant
131 */
132static unsigned int itemsize_qsc_usb = sizeof(struct diag_request);
133static unsigned int poolsize_qsc_usb = 8;
134module_param(poolsize_qsc_usb, uint, 0000);
135#endif
136
137/* This is the max number of user-space clients supported at initialization*/
138static unsigned int max_clients = 15;
139static unsigned int threshold_client_limit = 50;
140module_param(max_clients, uint, 0000);
141
142/* Timer variables */
143static struct timer_list drain_timer;
144static int timer_in_progress;
145
Manoj Prabhu B95427a22016-11-04 11:58:11 +0530146/*
147 * Diag Mask clear variable
148 * Used for clearing masks upon
149 * USB disconnection and stopping ODL
150 */
151static int diag_mask_clear_param = 1;
152module_param(diag_mask_clear_param, int, 0644);
153
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -0700154struct diag_apps_data_t {
155 void *buf;
156 uint32_t len;
157 int ctxt;
158};
159
160static struct diag_apps_data_t hdlc_data;
161static struct diag_apps_data_t non_hdlc_data;
162static struct mutex apps_data_mutex;
163
164#define DIAGPKT_MAX_DELAYED_RSP 0xFFFF
165
166#ifdef DIAG_DEBUG
167uint16_t diag_debug_mask;
168void *diag_ipc_log;
169#endif
170
171static void diag_md_session_close(struct diag_md_session_t *session_info);
172
173/*
174 * Returns the next delayed rsp id. If wrapping is enabled,
175 * wraps the delayed rsp id to DIAGPKT_MAX_DELAYED_RSP.
176 */
177static uint16_t diag_get_next_delayed_rsp_id(void)
178{
179 uint16_t rsp_id = 0;
180
181 mutex_lock(&driver->delayed_rsp_mutex);
182 rsp_id = driver->delayed_rsp_id;
183 if (rsp_id < DIAGPKT_MAX_DELAYED_RSP)
184 rsp_id++;
185 else {
186 if (wrap_enabled) {
187 rsp_id = 1;
188 wrap_count++;
189 } else
190 rsp_id = DIAGPKT_MAX_DELAYED_RSP;
191 }
192 driver->delayed_rsp_id = rsp_id;
193 mutex_unlock(&driver->delayed_rsp_mutex);
194
195 return rsp_id;
196}
197
198static int diag_switch_logging(struct diag_logging_mode_param_t *param);
199
200#define COPY_USER_SPACE_OR_ERR(buf, data, length) \
201do { \
202 if ((count < ret+length) || (copy_to_user(buf, \
203 (void *)&data, length))) { \
204 ret = -EFAULT; \
205 } \
206 ret += length; \
207} while (0)
208
209static void drain_timer_func(unsigned long data)
210{
211 queue_work(driver->diag_wq, &(driver->diag_drain_work));
212}
213
214static void diag_drain_apps_data(struct diag_apps_data_t *data)
215{
216 int err = 0;
217
218 if (!data || !data->buf)
219 return;
220
221 err = diag_mux_write(DIAG_LOCAL_PROC, data->buf, data->len,
222 data->ctxt);
223 if (err)
224 diagmem_free(driver, data->buf, POOL_TYPE_HDLC);
225
226 data->buf = NULL;
227 data->len = 0;
228}
229
230void diag_update_user_client_work_fn(struct work_struct *work)
231{
232 diag_update_userspace_clients(HDLC_SUPPORT_TYPE);
233}
234
235static void diag_update_md_client_work_fn(struct work_struct *work)
236{
237 diag_update_md_clients(HDLC_SUPPORT_TYPE);
238}
239
240void diag_drain_work_fn(struct work_struct *work)
241{
242 struct diag_md_session_t *session_info = NULL;
243 uint8_t hdlc_disabled = 0;
244
245 timer_in_progress = 0;
246 mutex_lock(&apps_data_mutex);
247 session_info = diag_md_session_get_peripheral(APPS_DATA);
248 if (session_info)
249 hdlc_disabled = session_info->hdlc_disabled;
250 else
251 hdlc_disabled = driver->hdlc_disabled;
252
253 if (!hdlc_disabled)
254 diag_drain_apps_data(&hdlc_data);
255 else
256 diag_drain_apps_data(&non_hdlc_data);
257 mutex_unlock(&apps_data_mutex);
258}
259
260void check_drain_timer(void)
261{
262 int ret = 0;
263
264 if (!timer_in_progress) {
265 timer_in_progress = 1;
266 ret = mod_timer(&drain_timer, jiffies + msecs_to_jiffies(200));
267 }
268}
269
270void diag_add_client(int i, struct file *file)
271{
272 struct diagchar_priv *diagpriv_data;
273
274 driver->client_map[i].pid = current->tgid;
275 diagpriv_data = kmalloc(sizeof(struct diagchar_priv),
276 GFP_KERNEL);
277 if (diagpriv_data)
278 diagpriv_data->pid = current->tgid;
279 file->private_data = diagpriv_data;
280 strlcpy(driver->client_map[i].name, current->comm, 20);
281 driver->client_map[i].name[19] = '\0';
282}
283
284static void diag_mempool_init(void)
285{
286 uint32_t itemsize = DIAG_MAX_REQ_SIZE;
287 uint32_t itemsize_hdlc = DIAG_MAX_HDLC_BUF_SIZE + APF_DIAG_PADDING;
288 uint32_t itemsize_dci = IN_BUF_SIZE;
289 uint32_t itemsize_user = DCI_REQ_BUF_SIZE;
290
291 itemsize += ((DCI_HDR_SIZE > CALLBACK_HDR_SIZE) ? DCI_HDR_SIZE :
292 CALLBACK_HDR_SIZE);
293 diagmem_setsize(POOL_TYPE_COPY, itemsize, poolsize);
294 diagmem_setsize(POOL_TYPE_HDLC, itemsize_hdlc, poolsize_hdlc);
295 diagmem_setsize(POOL_TYPE_DCI, itemsize_dci, poolsize_dci);
296 diagmem_setsize(POOL_TYPE_USER, itemsize_user, poolsize_user);
297
298 diagmem_init(driver, POOL_TYPE_COPY);
299 diagmem_init(driver, POOL_TYPE_HDLC);
300 diagmem_init(driver, POOL_TYPE_USER);
301 diagmem_init(driver, POOL_TYPE_DCI);
302}
303
304static void diag_mempool_exit(void)
305{
306 diagmem_exit(driver, POOL_TYPE_COPY);
307 diagmem_exit(driver, POOL_TYPE_HDLC);
308 diagmem_exit(driver, POOL_TYPE_USER);
309 diagmem_exit(driver, POOL_TYPE_DCI);
310}
311
312static int diagchar_open(struct inode *inode, struct file *file)
313{
314 int i = 0;
315 void *temp;
316
317 if (driver) {
318 mutex_lock(&driver->diagchar_mutex);
319
320 for (i = 0; i < driver->num_clients; i++)
321 if (driver->client_map[i].pid == 0)
322 break;
323
324 if (i < driver->num_clients) {
325 diag_add_client(i, file);
326 } else {
327 if (i < threshold_client_limit) {
328 driver->num_clients++;
329 temp = krealloc(driver->client_map
330 , (driver->num_clients) * sizeof(struct
331 diag_client_map), GFP_KERNEL);
332 if (!temp)
333 goto fail;
334 else
335 driver->client_map = temp;
336 temp = krealloc(driver->data_ready
337 , (driver->num_clients) * sizeof(int),
338 GFP_KERNEL);
339 if (!temp)
340 goto fail;
341 else
342 driver->data_ready = temp;
343 diag_add_client(i, file);
344 } else {
345 mutex_unlock(&driver->diagchar_mutex);
346 pr_err_ratelimited("diag: Max client limit for DIAG reached\n");
347 pr_err_ratelimited("diag: Cannot open handle %s %d",
348 current->comm, current->tgid);
349 for (i = 0; i < driver->num_clients; i++)
350 pr_debug("%d) %s PID=%d", i, driver->
351 client_map[i].name,
352 driver->client_map[i].pid);
353 return -ENOMEM;
354 }
355 }
356 driver->data_ready[i] = 0x0;
357 driver->data_ready[i] |= MSG_MASKS_TYPE;
358 driver->data_ready[i] |= EVENT_MASKS_TYPE;
359 driver->data_ready[i] |= LOG_MASKS_TYPE;
360 driver->data_ready[i] |= DCI_LOG_MASKS_TYPE;
361 driver->data_ready[i] |= DCI_EVENT_MASKS_TYPE;
362
363 if (driver->ref_count == 0)
364 diag_mempool_init();
365 driver->ref_count++;
366 mutex_unlock(&driver->diagchar_mutex);
367 return 0;
368 }
369 return -ENOMEM;
370
371fail:
372 mutex_unlock(&driver->diagchar_mutex);
373 driver->num_clients--;
374 pr_err_ratelimited("diag: Insufficient memory for new client");
375 return -ENOMEM;
376}
377
378static uint32_t diag_translate_kernel_to_user_mask(uint32_t peripheral_mask)
379{
380 uint32_t ret = 0;
381
382 if (peripheral_mask & MD_PERIPHERAL_MASK(APPS_DATA))
383 ret |= DIAG_CON_APSS;
384 if (peripheral_mask & MD_PERIPHERAL_MASK(PERIPHERAL_MODEM))
385 ret |= DIAG_CON_MPSS;
386 if (peripheral_mask & MD_PERIPHERAL_MASK(PERIPHERAL_LPASS))
387 ret |= DIAG_CON_LPASS;
388 if (peripheral_mask & MD_PERIPHERAL_MASK(PERIPHERAL_WCNSS))
389 ret |= DIAG_CON_WCNSS;
390 if (peripheral_mask & MD_PERIPHERAL_MASK(PERIPHERAL_SENSORS))
391 ret |= DIAG_CON_SENSORS;
392 if (peripheral_mask & MD_PERIPHERAL_MASK(PERIPHERAL_WDSP))
393 ret |= DIAG_CON_WDSP;
Sreelakshmi Gownipalli588a31d2016-11-02 13:33:43 -0700394 if (peripheral_mask & MD_PERIPHERAL_MASK(PERIPHERAL_CDSP))
395 ret |= DIAG_CON_CDSP;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -0700396
397 return ret;
398}
Manoj Prabhu B95427a22016-11-04 11:58:11 +0530399int diag_mask_param(void)
400{
401 return diag_mask_clear_param;
402}
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -0700403void diag_clear_masks(struct diag_md_session_t *info)
404{
405 int ret;
406 char cmd_disable_log_mask[] = { 0x73, 0, 0, 0, 0, 0, 0, 0};
407 char cmd_disable_msg_mask[] = { 0x7D, 0x05, 0, 0, 0, 0, 0, 0};
408 char cmd_disable_event_mask[] = { 0x60, 0};
409
410 DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
411 "diag: %s: masks clear request upon %s\n", __func__,
412 ((info) ? "ODL exit" : "USB Disconnection"));
413
414 ret = diag_process_apps_masks(cmd_disable_log_mask,
415 sizeof(cmd_disable_log_mask), info);
416 ret = diag_process_apps_masks(cmd_disable_msg_mask,
417 sizeof(cmd_disable_msg_mask), info);
418 ret = diag_process_apps_masks(cmd_disable_event_mask,
419 sizeof(cmd_disable_event_mask), info);
420 DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
421 "diag:%s: masks cleared successfully\n", __func__);
422}
423
424static void diag_close_logging_process(const int pid)
425{
426 int i;
427 int session_peripheral_mask;
428 struct diag_md_session_t *session_info = NULL;
429 struct diag_logging_mode_param_t params;
430
431 session_info = diag_md_session_get_pid(pid);
432 if (!session_info)
433 return;
434
Manoj Prabhu B95427a22016-11-04 11:58:11 +0530435 if (diag_mask_clear_param)
436 diag_clear_masks(session_info);
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -0700437
438 mutex_lock(&driver->diag_maskclear_mutex);
439 driver->mask_clear = 1;
440 mutex_unlock(&driver->diag_maskclear_mutex);
441
Sreelakshmi Gownipalli078824f2017-01-17 14:03:54 -0800442 mutex_lock(&driver->diagchar_mutex);
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -0700443 session_peripheral_mask = session_info->peripheral_mask;
444 diag_md_session_close(session_info);
Sreelakshmi Gownipalli078824f2017-01-17 14:03:54 -0800445 mutex_unlock(&driver->diagchar_mutex);
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -0700446 for (i = 0; i < NUM_MD_SESSIONS; i++)
447 if (MD_PERIPHERAL_MASK(i) & session_peripheral_mask)
448 diag_mux_close_peripheral(DIAG_LOCAL_PROC, i);
449
450 params.req_mode = USB_MODE;
451 params.mode_param = 0;
452 params.peripheral_mask =
453 diag_translate_kernel_to_user_mask(session_peripheral_mask);
454 mutex_lock(&driver->diagchar_mutex);
455 diag_switch_logging(&params);
456 mutex_unlock(&driver->diagchar_mutex);
457}
458
459static int diag_remove_client_entry(struct file *file)
460{
461 int i = -1;
462 struct diagchar_priv *diagpriv_data = NULL;
463 struct diag_dci_client_tbl *dci_entry = NULL;
464
465 if (!driver)
466 return -ENOMEM;
467
468 mutex_lock(&driver->diag_file_mutex);
469 if (!file) {
470 DIAG_LOG(DIAG_DEBUG_USERSPACE, "Invalid file pointer\n");
471 mutex_unlock(&driver->diag_file_mutex);
472 return -ENOENT;
473 }
474 if (!(file->private_data)) {
475 DIAG_LOG(DIAG_DEBUG_USERSPACE, "Invalid private data\n");
476 mutex_unlock(&driver->diag_file_mutex);
477 return -EINVAL;
478 }
479
480 diagpriv_data = file->private_data;
481
482 /*
483 * clean up any DCI registrations, if this is a DCI client
484 * This will specially help in case of ungraceful exit of any DCI client
485 * This call will remove any pending registrations of such client
486 */
487 mutex_lock(&driver->dci_mutex);
488 dci_entry = dci_lookup_client_entry_pid(current->tgid);
489 if (dci_entry)
490 diag_dci_deinit_client(dci_entry);
491 mutex_unlock(&driver->dci_mutex);
492
493 diag_close_logging_process(current->tgid);
494
495 /* Delete the pkt response table entry for the exiting process */
496 diag_cmd_remove_reg_by_pid(current->tgid);
497
498 mutex_lock(&driver->diagchar_mutex);
499 driver->ref_count--;
500 if (driver->ref_count == 0)
501 diag_mempool_exit();
502
503 for (i = 0; i < driver->num_clients; i++) {
504 if (diagpriv_data && diagpriv_data->pid ==
505 driver->client_map[i].pid) {
506 driver->client_map[i].pid = 0;
507 kfree(diagpriv_data);
508 diagpriv_data = NULL;
509 file->private_data = 0;
510 break;
511 }
512 }
513 mutex_unlock(&driver->diagchar_mutex);
514 mutex_unlock(&driver->diag_file_mutex);
515 return 0;
516}
517static int diagchar_close(struct inode *inode, struct file *file)
518{
519 int ret;
520
521 DIAG_LOG(DIAG_DEBUG_USERSPACE, "diag: process exit %s\n",
522 current->comm);
523 ret = diag_remove_client_entry(file);
524 mutex_lock(&driver->diag_maskclear_mutex);
525 driver->mask_clear = 0;
526 mutex_unlock(&driver->diag_maskclear_mutex);
527 return ret;
528}
529
530void diag_record_stats(int type, int flag)
531{
532 struct diag_pkt_stats_t *pkt_stats = NULL;
533
534 switch (type) {
535 case DATA_TYPE_EVENT:
536 pkt_stats = &driver->event_stats;
537 break;
538 case DATA_TYPE_F3:
539 pkt_stats = &driver->msg_stats;
540 break;
541 case DATA_TYPE_LOG:
542 pkt_stats = &driver->log_stats;
543 break;
544 case DATA_TYPE_RESPONSE:
545 if (flag != PKT_DROP)
546 return;
547 pr_err_ratelimited("diag: In %s, dropping response. This shouldn't happen\n",
548 __func__);
549 return;
550 case DATA_TYPE_DELAYED_RESPONSE:
551 /* No counters to increase for Delayed responses */
552 return;
553 default:
554 pr_err_ratelimited("diag: In %s, invalid pkt_type: %d\n",
555 __func__, type);
556 return;
557 }
558
559 switch (flag) {
560 case PKT_ALLOC:
561 atomic_add(1, (atomic_t *)&pkt_stats->alloc_count);
562 break;
563 case PKT_DROP:
564 atomic_add(1, (atomic_t *)&pkt_stats->drop_count);
565 break;
566 case PKT_RESET:
567 atomic_set((atomic_t *)&pkt_stats->alloc_count, 0);
568 atomic_set((atomic_t *)&pkt_stats->drop_count, 0);
569 break;
570 default:
571 pr_err_ratelimited("diag: In %s, invalid flag: %d\n",
572 __func__, flag);
573 return;
574 }
575}
576
577void diag_get_timestamp(char *time_str)
578{
579 struct timeval t;
580 struct tm broken_tm;
581
582 do_gettimeofday(&t);
583 if (!time_str)
584 return;
585 time_to_tm(t.tv_sec, 0, &broken_tm);
586 scnprintf(time_str, DIAG_TS_SIZE, "%d:%d:%d:%ld", broken_tm.tm_hour,
587 broken_tm.tm_min, broken_tm.tm_sec, t.tv_usec);
588}
589
590int diag_get_remote(int remote_info)
591{
592 int val = (remote_info < 0) ? -remote_info : remote_info;
593 int remote_val;
594
595 switch (val) {
596 case MDM:
597 case MDM2:
598 case QSC:
599 remote_val = -remote_info;
600 break;
601 default:
602 remote_val = 0;
603 break;
604 }
605
606 return remote_val;
607}
608
609int diag_cmd_chk_polling(struct diag_cmd_reg_entry_t *entry)
610{
611 int polling = DIAG_CMD_NOT_POLLING;
612
613 if (!entry)
614 return -EIO;
615
616 if (entry->cmd_code == DIAG_CMD_NO_SUBSYS) {
617 if (entry->subsys_id == DIAG_CMD_NO_SUBSYS &&
618 entry->cmd_code_hi >= DIAG_CMD_STATUS &&
619 entry->cmd_code_lo <= DIAG_CMD_STATUS)
620 polling = DIAG_CMD_POLLING;
621 else if (entry->subsys_id == DIAG_SS_WCDMA &&
622 entry->cmd_code_hi >= DIAG_CMD_QUERY_CALL &&
623 entry->cmd_code_lo <= DIAG_CMD_QUERY_CALL)
624 polling = DIAG_CMD_POLLING;
625 else if (entry->subsys_id == DIAG_SS_GSM &&
626 entry->cmd_code_hi >= DIAG_CMD_QUERY_TMC &&
627 entry->cmd_code_lo <= DIAG_CMD_QUERY_TMC)
628 polling = DIAG_CMD_POLLING;
629 else if (entry->subsys_id == DIAG_SS_PARAMS &&
630 entry->cmd_code_hi >= DIAG_DIAG_POLL &&
631 entry->cmd_code_lo <= DIAG_DIAG_POLL)
632 polling = DIAG_CMD_POLLING;
633 else if (entry->subsys_id == DIAG_SS_TDSCDMA &&
634 entry->cmd_code_hi >= DIAG_CMD_TDSCDMA_STATUS &&
635 entry->cmd_code_lo <= DIAG_CMD_TDSCDMA_STATUS)
636 polling = DIAG_CMD_POLLING;
637 }
638
639 return polling;
640}
641
642static void diag_cmd_invalidate_polling(int change_flag)
643{
644 int polling = DIAG_CMD_NOT_POLLING;
645 struct list_head *start;
646 struct list_head *temp;
647 struct diag_cmd_reg_t *item = NULL;
648
649 if (change_flag == DIAG_CMD_ADD) {
650 if (driver->polling_reg_flag)
651 return;
652 }
653
654 driver->polling_reg_flag = 0;
655 list_for_each_safe(start, temp, &driver->cmd_reg_list) {
656 item = list_entry(start, struct diag_cmd_reg_t, link);
657 polling = diag_cmd_chk_polling(&item->entry);
658 if (polling == DIAG_CMD_POLLING) {
659 driver->polling_reg_flag = 1;
660 break;
661 }
662 }
663}
664
665int diag_cmd_add_reg(struct diag_cmd_reg_entry_t *new_entry, uint8_t proc,
666 int pid)
667{
668 struct diag_cmd_reg_t *new_item = NULL;
669
670 if (!new_entry) {
671 pr_err("diag: In %s, invalid new entry\n", __func__);
672 return -EINVAL;
673 }
674
675 if (proc > APPS_DATA) {
676 pr_err("diag: In %s, invalid peripheral %d\n", __func__, proc);
677 return -EINVAL;
678 }
679
680 if (proc != APPS_DATA)
681 pid = INVALID_PID;
682
683 new_item = kzalloc(sizeof(struct diag_cmd_reg_t), GFP_KERNEL);
684 if (!new_item)
685 return -ENOMEM;
686 kmemleak_not_leak(new_item);
687
688 new_item->pid = pid;
689 new_item->proc = proc;
690 memcpy(&new_item->entry, new_entry,
691 sizeof(struct diag_cmd_reg_entry_t));
692 INIT_LIST_HEAD(&new_item->link);
693
694 mutex_lock(&driver->cmd_reg_mutex);
695 list_add_tail(&new_item->link, &driver->cmd_reg_list);
696 driver->cmd_reg_count++;
697 diag_cmd_invalidate_polling(DIAG_CMD_ADD);
698 mutex_unlock(&driver->cmd_reg_mutex);
699
700 return 0;
701}
702
703struct diag_cmd_reg_entry_t *diag_cmd_search(
704 struct diag_cmd_reg_entry_t *entry, int proc)
705{
706 struct list_head *start;
707 struct list_head *temp;
708 struct diag_cmd_reg_t *item = NULL;
709 struct diag_cmd_reg_entry_t *temp_entry = NULL;
710
711 if (!entry) {
712 pr_err("diag: In %s, invalid entry\n", __func__);
713 return NULL;
714 }
715
716 list_for_each_safe(start, temp, &driver->cmd_reg_list) {
717 item = list_entry(start, struct diag_cmd_reg_t, link);
Manoj Prabhu Bd9b3b622017-01-17 10:15:53 +0530718 if (&item->entry == NULL) {
Gopikrishna Mogasati9b332372016-11-10 20:03:46 +0530719 pr_err("diag: In %s, unable to search command\n",
720 __func__);
721 return NULL;
722 }
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -0700723 temp_entry = &item->entry;
724 if (temp_entry->cmd_code == entry->cmd_code &&
725 temp_entry->subsys_id == entry->subsys_id &&
726 temp_entry->cmd_code_hi >= entry->cmd_code_hi &&
727 temp_entry->cmd_code_lo <= entry->cmd_code_lo &&
728 (proc == item->proc || proc == ALL_PROC)) {
729 return &item->entry;
730 } else if (temp_entry->cmd_code == DIAG_CMD_NO_SUBSYS &&
731 entry->cmd_code == DIAG_CMD_DIAG_SUBSYS) {
732 if (temp_entry->subsys_id == entry->subsys_id &&
733 temp_entry->cmd_code_hi >= entry->cmd_code_hi &&
734 temp_entry->cmd_code_lo <= entry->cmd_code_lo &&
735 (proc == item->proc || proc == ALL_PROC)) {
736 return &item->entry;
737 }
738 } else if (temp_entry->cmd_code == DIAG_CMD_NO_SUBSYS &&
739 temp_entry->subsys_id == DIAG_CMD_NO_SUBSYS) {
740 if ((temp_entry->cmd_code_hi >= entry->cmd_code) &&
741 (temp_entry->cmd_code_lo <= entry->cmd_code) &&
742 (proc == item->proc || proc == ALL_PROC)) {
743 if (entry->cmd_code == MODE_CMD) {
744 if (entry->subsys_id == RESET_ID &&
745 item->proc != APPS_DATA) {
746 continue;
747 }
748 if (entry->subsys_id != RESET_ID &&
749 item->proc == APPS_DATA) {
750 continue;
751 }
752 }
753 return &item->entry;
754 }
755 }
756 }
757
758 return NULL;
759}
760
761void diag_cmd_remove_reg(struct diag_cmd_reg_entry_t *entry, uint8_t proc)
762{
763 struct diag_cmd_reg_t *item = NULL;
764 struct diag_cmd_reg_entry_t *temp_entry;
765
766 if (!entry) {
767 pr_err("diag: In %s, invalid entry\n", __func__);
768 return;
769 }
770
771 mutex_lock(&driver->cmd_reg_mutex);
772 temp_entry = diag_cmd_search(entry, proc);
773 if (temp_entry) {
774 item = container_of(temp_entry, struct diag_cmd_reg_t, entry);
775 if (!item) {
776 mutex_unlock(&driver->cmd_reg_mutex);
777 return;
778 }
779 list_del(&item->link);
780 kfree(item);
781 driver->cmd_reg_count--;
782 }
783 diag_cmd_invalidate_polling(DIAG_CMD_REMOVE);
784 mutex_unlock(&driver->cmd_reg_mutex);
785}
786
787void diag_cmd_remove_reg_by_pid(int pid)
788{
789 struct list_head *start;
790 struct list_head *temp;
791 struct diag_cmd_reg_t *item = NULL;
792
793 mutex_lock(&driver->cmd_reg_mutex);
794 list_for_each_safe(start, temp, &driver->cmd_reg_list) {
795 item = list_entry(start, struct diag_cmd_reg_t, link);
796 if (item->pid == pid) {
797 list_del(&item->link);
798 kfree(item);
799 driver->cmd_reg_count--;
800 }
801 }
802 mutex_unlock(&driver->cmd_reg_mutex);
803}
804
805void diag_cmd_remove_reg_by_proc(int proc)
806{
807 struct list_head *start;
808 struct list_head *temp;
809 struct diag_cmd_reg_t *item = NULL;
810
811 mutex_lock(&driver->cmd_reg_mutex);
812 list_for_each_safe(start, temp, &driver->cmd_reg_list) {
813 item = list_entry(start, struct diag_cmd_reg_t, link);
814 if (item->proc == proc) {
815 list_del(&item->link);
816 kfree(item);
817 driver->cmd_reg_count--;
818 }
819 }
820 diag_cmd_invalidate_polling(DIAG_CMD_REMOVE);
821 mutex_unlock(&driver->cmd_reg_mutex);
822}
823
824static int diag_copy_dci(char __user *buf, size_t count,
825 struct diag_dci_client_tbl *entry, int *pret)
826{
827 int total_data_len = 0;
828 int ret = 0;
829 int exit_stat = 1;
830 uint8_t drain_again = 0;
831 struct diag_dci_buffer_t *buf_entry, *temp;
832
833 if (!buf || !entry || !pret)
834 return exit_stat;
835
836 ret = *pret;
837
838 ret += sizeof(int);
839 if (ret >= count) {
840 pr_err("diag: In %s, invalid value for ret: %d, count: %zu\n",
841 __func__, ret, count);
842 return -EINVAL;
843 }
844
845 mutex_lock(&entry->write_buf_mutex);
846 list_for_each_entry_safe(buf_entry, temp, &entry->list_write_buf,
847 buf_track) {
848
849 if ((ret + buf_entry->data_len) > count) {
850 drain_again = 1;
851 break;
852 }
853
854 list_del(&buf_entry->buf_track);
855 mutex_lock(&buf_entry->data_mutex);
856 if ((buf_entry->data_len > 0) &&
857 (buf_entry->in_busy) &&
858 (buf_entry->data)) {
859 if (copy_to_user(buf+ret, (void *)buf_entry->data,
860 buf_entry->data_len))
861 goto drop;
862 ret += buf_entry->data_len;
863 total_data_len += buf_entry->data_len;
864 diag_ws_on_copy(DIAG_WS_DCI);
865drop:
866 buf_entry->in_busy = 0;
867 buf_entry->data_len = 0;
868 buf_entry->in_list = 0;
869 if (buf_entry->buf_type == DCI_BUF_CMD) {
870 mutex_unlock(&buf_entry->data_mutex);
871 continue;
872 } else if (buf_entry->buf_type == DCI_BUF_SECONDARY) {
873 diagmem_free(driver, buf_entry->data,
874 POOL_TYPE_DCI);
875 buf_entry->data = NULL;
876 mutex_unlock(&buf_entry->data_mutex);
877 kfree(buf_entry);
878 continue;
879 }
880
881 }
882 mutex_unlock(&buf_entry->data_mutex);
883 }
884
885 if (total_data_len > 0) {
886 /* Copy the total data length */
887 COPY_USER_SPACE_OR_ERR(buf+8, total_data_len, 4);
888 if (ret == -EFAULT)
889 goto exit;
890 ret -= 4;
891 } else {
892 pr_debug("diag: In %s, Trying to copy ZERO bytes, total_data_len: %d\n",
893 __func__, total_data_len);
894 }
895
896 exit_stat = 0;
897exit:
898 entry->in_service = 0;
899 mutex_unlock(&entry->write_buf_mutex);
900 *pret = ret;
901 if (drain_again)
902 dci_drain_data(0);
903
904 return exit_stat;
905}
906
907#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
908static int diag_remote_init(void)
909{
910 diagmem_setsize(POOL_TYPE_MDM, itemsize_mdm, poolsize_mdm);
911 diagmem_setsize(POOL_TYPE_MDM2, itemsize_mdm, poolsize_mdm);
912 diagmem_setsize(POOL_TYPE_MDM_DCI, itemsize_mdm_dci, poolsize_mdm_dci);
913 diagmem_setsize(POOL_TYPE_MDM2_DCI, itemsize_mdm_dci,
914 poolsize_mdm_dci);
915 diagmem_setsize(POOL_TYPE_MDM_MUX, itemsize_mdm_usb, poolsize_mdm_usb);
916 diagmem_setsize(POOL_TYPE_MDM2_MUX, itemsize_mdm_usb, poolsize_mdm_usb);
917 diagmem_setsize(POOL_TYPE_MDM_DCI_WRITE, itemsize_mdm_dci_write,
918 poolsize_mdm_dci_write);
919 diagmem_setsize(POOL_TYPE_MDM2_DCI_WRITE, itemsize_mdm_dci_write,
920 poolsize_mdm_dci_write);
921 diagmem_setsize(POOL_TYPE_QSC_MUX, itemsize_qsc_usb,
922 poolsize_qsc_usb);
923 driver->hdlc_encode_buf = kzalloc(DIAG_MAX_HDLC_BUF_SIZE, GFP_KERNEL);
924 if (!driver->hdlc_encode_buf)
925 return -ENOMEM;
926 driver->hdlc_encode_buf_len = 0;
927 return 0;
928}
929
930static void diag_remote_exit(void)
931{
932 kfree(driver->hdlc_encode_buf);
933}
934
935static int diag_send_raw_data_remote(int proc, void *buf, int len,
936 uint8_t hdlc_flag)
937{
938 int err = 0;
939 int max_len = 0;
940 uint8_t retry_count = 0;
941 uint8_t max_retries = 3;
942 uint16_t payload = 0;
943 struct diag_send_desc_type send = { NULL, NULL, DIAG_STATE_START, 0 };
944 struct diag_hdlc_dest_type enc = { NULL, NULL, 0 };
945 int bridge_index = proc - 1;
946 struct diag_md_session_t *session_info = NULL;
947 uint8_t hdlc_disabled = 0;
948
949 if (!buf)
950 return -EINVAL;
951
952 if (len <= 0) {
953 pr_err("diag: In %s, invalid len: %d", __func__, len);
954 return -EBADMSG;
955 }
956
957 if (bridge_index < 0 || bridge_index > NUM_REMOTE_DEV) {
958 pr_err("diag: In %s, invalid bridge index: %d\n", __func__,
959 bridge_index);
960 return -EINVAL;
961 }
962
963 do {
964 if (driver->hdlc_encode_buf_len == 0)
965 break;
966 usleep_range(10000, 10100);
967 retry_count++;
968 } while (retry_count < max_retries);
969
970 if (driver->hdlc_encode_buf_len != 0)
971 return -EAGAIN;
972 session_info = diag_md_session_get_peripheral(APPS_DATA);
973 if (session_info)
974 hdlc_disabled = session_info->hdlc_disabled;
975 else
976 hdlc_disabled = driver->hdlc_disabled;
977 if (hdlc_disabled) {
978 payload = *(uint16_t *)(buf + 2);
979 driver->hdlc_encode_buf_len = payload;
980 /*
981 * Adding 4 bytes for start (1 byte), version (1 byte) and
982 * payload (2 bytes)
983 */
984 memcpy(driver->hdlc_encode_buf, buf + 4, payload);
985 goto send_data;
986 }
987
988 if (hdlc_flag) {
989 if (len > DIAG_MAX_HDLC_BUF_SIZE) {
990 pr_err("diag: Dropping packet, HDLC encoded packet payload size crosses buffer limit. Current payload size %d\n",
991 len);
992 return -EBADMSG;
993 }
994 driver->hdlc_encode_buf_len = len;
995 memcpy(driver->hdlc_encode_buf, buf, len);
996 goto send_data;
997 }
998
999 /*
1000 * The worst case length will be twice as the incoming packet length.
1001 * Add 3 bytes for CRC bytes (2 bytes) and delimiter (1 byte)
1002 */
1003 max_len = (2 * len) + 3;
1004 if (max_len > DIAG_MAX_HDLC_BUF_SIZE) {
1005 pr_err("diag: Dropping packet, HDLC encoded packet payload size crosses buffer limit. Current payload size %d\n",
1006 max_len);
1007 return -EBADMSG;
1008 }
1009
1010 /* Perform HDLC encoding on incoming data */
1011 send.state = DIAG_STATE_START;
1012 send.pkt = (void *)(buf);
1013 send.last = (void *)(buf + len - 1);
1014 send.terminate = 1;
1015
1016 enc.dest = driver->hdlc_encode_buf;
1017 enc.dest_last = (void *)(driver->hdlc_encode_buf + max_len - 1);
1018 diag_hdlc_encode(&send, &enc);
1019 driver->hdlc_encode_buf_len = (int)(enc.dest -
1020 (void *)driver->hdlc_encode_buf);
1021
1022send_data:
1023 err = diagfwd_bridge_write(bridge_index, driver->hdlc_encode_buf,
1024 driver->hdlc_encode_buf_len);
1025 if (err) {
1026 pr_err_ratelimited("diag: Error writing Callback packet to proc: %d, err: %d\n",
1027 proc, err);
1028 driver->hdlc_encode_buf_len = 0;
1029 }
1030
1031 return err;
1032}
1033
1034static int diag_process_userspace_remote(int proc, void *buf, int len)
1035{
1036 int bridge_index = proc - 1;
1037
1038 if (!buf || len < 0) {
1039 pr_err("diag: Invalid input in %s, buf: %pK, len: %d\n",
1040 __func__, buf, len);
1041 return -EINVAL;
1042 }
1043
1044 if (bridge_index < 0 || bridge_index > NUM_REMOTE_DEV) {
1045 pr_err("diag: In %s, invalid bridge index: %d\n", __func__,
1046 bridge_index);
1047 return -EINVAL;
1048 }
1049
1050 driver->user_space_data_busy = 1;
1051 return diagfwd_bridge_write(bridge_index, buf, len);
1052}
1053#else
1054static int diag_remote_init(void)
1055{
1056 return 0;
1057}
1058
1059static void diag_remote_exit(void)
1060{
1061}
1062
1063int diagfwd_bridge_init(void)
1064{
1065 return 0;
1066}
1067
1068void diagfwd_bridge_exit(void)
1069{
1070}
1071
1072uint16_t diag_get_remote_device_mask(void)
1073{
1074 return 0;
1075}
1076
1077static int diag_send_raw_data_remote(int proc, void *buf, int len,
1078 uint8_t hdlc_flag)
1079{
1080 return -EINVAL;
1081}
1082
1083static int diag_process_userspace_remote(int proc, void *buf, int len)
1084{
1085 return 0;
1086}
1087#endif
1088
1089static int mask_request_validate(unsigned char mask_buf[])
1090{
1091 uint8_t packet_id;
1092 uint8_t subsys_id;
1093 uint16_t ss_cmd;
1094
1095 packet_id = mask_buf[0];
1096
1097 if (packet_id == DIAG_CMD_DIAG_SUBSYS_DELAY) {
1098 subsys_id = mask_buf[1];
1099 ss_cmd = *(uint16_t *)(mask_buf + 2);
1100 switch (subsys_id) {
1101 case DIAG_SS_DIAG:
1102 if ((ss_cmd == DIAG_SS_FILE_READ_MODEM) ||
1103 (ss_cmd == DIAG_SS_FILE_READ_ADSP) ||
1104 (ss_cmd == DIAG_SS_FILE_READ_WCNSS) ||
1105 (ss_cmd == DIAG_SS_FILE_READ_SLPI) ||
1106 (ss_cmd == DIAG_SS_FILE_READ_APPS))
1107 return 1;
1108 break;
1109 default:
1110 return 0;
1111 }
1112 } else if (packet_id == 0x4B) {
1113 subsys_id = mask_buf[1];
1114 ss_cmd = *(uint16_t *)(mask_buf + 2);
1115 /* Packets with SSID which are allowed */
1116 switch (subsys_id) {
1117 case 0x04: /* DIAG_SUBSYS_WCDMA */
1118 if ((ss_cmd == 0) || (ss_cmd == 0xF))
1119 return 1;
1120 break;
1121 case 0x08: /* DIAG_SUBSYS_GSM */
1122 if ((ss_cmd == 0) || (ss_cmd == 0x1))
1123 return 1;
1124 break;
1125 case 0x09: /* DIAG_SUBSYS_UMTS */
1126 case 0x0F: /* DIAG_SUBSYS_CM */
1127 if (ss_cmd == 0)
1128 return 1;
1129 break;
1130 case 0x0C: /* DIAG_SUBSYS_OS */
1131 if ((ss_cmd == 2) || (ss_cmd == 0x100))
1132 return 1; /* MPU and APU */
1133 break;
1134 case 0x12: /* DIAG_SUBSYS_DIAG_SERV */
1135 if ((ss_cmd == 0) || (ss_cmd == 0x6) || (ss_cmd == 0x7))
1136 return 1;
1137 else if (ss_cmd == 0x218) /* HDLC Disabled Command*/
1138 return 0;
1139 else if (ss_cmd == DIAG_GET_TIME_API)
1140 return 1;
1141 else if (ss_cmd == DIAG_SET_TIME_API)
1142 return 1;
1143 else if (ss_cmd == DIAG_SWITCH_COMMAND)
1144 return 1;
1145 else if (ss_cmd == DIAG_BUFFERING_MODE)
1146 return 1;
1147 break;
1148 case 0x13: /* DIAG_SUBSYS_FS */
1149 if ((ss_cmd == 0) || (ss_cmd == 0x1))
1150 return 1;
1151 break;
1152 default:
1153 return 0;
1154 }
1155 } else {
1156 switch (packet_id) {
1157 case 0x00: /* Version Number */
1158 case 0x0C: /* CDMA status packet */
1159 case 0x1C: /* Diag Version */
1160 case 0x1D: /* Time Stamp */
1161 case 0x60: /* Event Report Control */
1162 case 0x63: /* Status snapshot */
1163 case 0x73: /* Logging Configuration */
1164 case 0x7C: /* Extended build ID */
1165 case 0x7D: /* Extended Message configuration */
1166 case 0x81: /* Event get mask */
1167 case 0x82: /* Set the event mask */
1168 return 1;
1169 default:
1170 return 0;
1171 }
1172 }
1173 return 0;
1174}
1175
1176static void diag_md_session_init(void)
1177{
1178 int i;
1179
1180 mutex_init(&driver->md_session_lock);
1181 driver->md_session_mask = 0;
1182 driver->md_session_mode = DIAG_MD_NONE;
1183 for (i = 0; i < NUM_MD_SESSIONS; i++)
1184 driver->md_session_map[i] = NULL;
1185}
1186
1187static void diag_md_session_exit(void)
1188{
1189 int i;
1190 struct diag_md_session_t *session_info = NULL;
1191
1192 for (i = 0; i < NUM_MD_SESSIONS; i++) {
1193 if (driver->md_session_map[i]) {
1194 session_info = driver->md_session_map[i];
1195 diag_log_mask_free(session_info->log_mask);
1196 kfree(session_info->log_mask);
1197 session_info->log_mask = NULL;
1198 diag_msg_mask_free(session_info->msg_mask);
1199 kfree(session_info->msg_mask);
1200 session_info->msg_mask = NULL;
1201 diag_event_mask_free(session_info->event_mask);
1202 kfree(session_info->event_mask);
1203 session_info->event_mask = NULL;
1204 kfree(session_info);
1205 session_info = NULL;
1206 driver->md_session_map[i] = NULL;
1207 }
1208 }
1209 mutex_destroy(&driver->md_session_lock);
1210 driver->md_session_mask = 0;
1211 driver->md_session_mode = DIAG_MD_NONE;
1212}
1213
1214int diag_md_session_create(int mode, int peripheral_mask, int proc)
1215{
1216 int i;
1217 int err = 0;
1218 struct diag_md_session_t *new_session = NULL;
1219
1220 /*
1221 * If a session is running with a peripheral mask and a new session
1222 * request comes in with same peripheral mask value then return
1223 * invalid param
1224 */
1225 if (driver->md_session_mode == DIAG_MD_PERIPHERAL &&
1226 (driver->md_session_mask & peripheral_mask) != 0)
1227 return -EINVAL;
1228
1229 mutex_lock(&driver->md_session_lock);
1230 new_session = kzalloc(sizeof(struct diag_md_session_t), GFP_KERNEL);
1231 if (!new_session) {
1232 mutex_unlock(&driver->md_session_lock);
1233 return -ENOMEM;
1234 }
1235
1236 new_session->peripheral_mask = 0;
1237 new_session->pid = current->tgid;
1238 new_session->task = current;
1239
1240 new_session->log_mask = kzalloc(sizeof(struct diag_mask_info),
1241 GFP_KERNEL);
1242 if (!new_session->log_mask) {
1243 err = -ENOMEM;
1244 goto fail_peripheral;
1245 }
1246 new_session->event_mask = kzalloc(sizeof(struct diag_mask_info),
1247 GFP_KERNEL);
1248 if (!new_session->event_mask) {
1249 err = -ENOMEM;
1250 goto fail_peripheral;
1251 }
1252 new_session->msg_mask = kzalloc(sizeof(struct diag_mask_info),
1253 GFP_KERNEL);
1254 if (!new_session->msg_mask) {
1255 err = -ENOMEM;
1256 goto fail_peripheral;
1257 }
1258
1259 err = diag_log_mask_copy(new_session->log_mask, &log_mask);
1260 if (err) {
1261 DIAG_LOG(DIAG_DEBUG_USERSPACE,
1262 "return value of log copy. err %d\n", err);
1263 goto fail_peripheral;
1264 }
1265 err = diag_event_mask_copy(new_session->event_mask, &event_mask);
1266 if (err) {
1267 DIAG_LOG(DIAG_DEBUG_USERSPACE,
1268 "return value of event copy. err %d\n", err);
1269 goto fail_peripheral;
1270 }
1271 err = diag_msg_mask_copy(new_session->msg_mask, &msg_mask);
1272 if (err) {
1273 DIAG_LOG(DIAG_DEBUG_USERSPACE,
1274 "return value of msg copy. err %d\n", err);
1275 goto fail_peripheral;
1276 }
1277 for (i = 0; i < NUM_MD_SESSIONS; i++) {
1278 if ((MD_PERIPHERAL_MASK(i) & peripheral_mask) == 0)
1279 continue;
1280 if (driver->md_session_map[i] != NULL) {
1281 DIAG_LOG(DIAG_DEBUG_USERSPACE,
1282 "another instance present for %d\n", i);
1283 err = -EEXIST;
1284 goto fail_peripheral;
1285 }
1286 new_session->peripheral_mask |= MD_PERIPHERAL_MASK(i);
1287 driver->md_session_map[i] = new_session;
1288 driver->md_session_mask |= MD_PERIPHERAL_MASK(i);
1289 }
1290 setup_timer(&new_session->hdlc_reset_timer,
1291 diag_md_hdlc_reset_timer_func,
1292 new_session->pid);
1293
1294 driver->md_session_mode = DIAG_MD_PERIPHERAL;
1295 mutex_unlock(&driver->md_session_lock);
1296 DIAG_LOG(DIAG_DEBUG_USERSPACE,
1297 "created session in peripheral mode\n");
1298 return 0;
1299
1300fail_peripheral:
1301 diag_log_mask_free(new_session->log_mask);
1302 kfree(new_session->log_mask);
1303 new_session->log_mask = NULL;
1304 diag_event_mask_free(new_session->event_mask);
1305 kfree(new_session->event_mask);
1306 new_session->event_mask = NULL;
1307 diag_msg_mask_free(new_session->msg_mask);
1308 kfree(new_session->msg_mask);
1309 new_session->msg_mask = NULL;
1310 kfree(new_session);
1311 new_session = NULL;
1312 mutex_unlock(&driver->md_session_lock);
1313 return err;
1314}
1315
1316static void diag_md_session_close(struct diag_md_session_t *session_info)
1317{
1318 int i;
1319 uint8_t found = 0;
1320
1321 if (!session_info)
1322 return;
1323
1324 mutex_lock(&driver->md_session_lock);
1325 for (i = 0; i < NUM_MD_SESSIONS; i++) {
1326 if (driver->md_session_map[i] != session_info)
1327 continue;
1328 driver->md_session_map[i] = NULL;
1329 driver->md_session_mask &= ~session_info->peripheral_mask;
1330 }
1331 diag_log_mask_free(session_info->log_mask);
1332 kfree(session_info->log_mask);
1333 session_info->log_mask = NULL;
1334 diag_msg_mask_free(session_info->msg_mask);
1335 kfree(session_info->msg_mask);
1336 session_info->msg_mask = NULL;
1337 diag_event_mask_free(session_info->event_mask);
1338 kfree(session_info->event_mask);
1339 session_info->event_mask = NULL;
1340 del_timer(&session_info->hdlc_reset_timer);
1341
1342 for (i = 0; i < NUM_MD_SESSIONS && !found; i++) {
1343 if (driver->md_session_map[i] != NULL)
1344 found = 1;
1345 }
1346
1347 driver->md_session_mode = (found) ? DIAG_MD_PERIPHERAL : DIAG_MD_NONE;
1348 kfree(session_info);
1349 session_info = NULL;
1350 mutex_unlock(&driver->md_session_lock);
1351 DIAG_LOG(DIAG_DEBUG_USERSPACE, "cleared up session\n");
1352}
1353
1354struct diag_md_session_t *diag_md_session_get_pid(int pid)
1355{
1356 int i;
1357
1358 for (i = 0; i < NUM_MD_SESSIONS; i++) {
1359 if (driver->md_session_map[i] &&
1360 driver->md_session_map[i]->pid == pid)
1361 return driver->md_session_map[i];
1362 }
1363 return NULL;
1364}
1365
1366struct diag_md_session_t *diag_md_session_get_peripheral(uint8_t peripheral)
1367{
1368 if (peripheral >= NUM_MD_SESSIONS)
1369 return NULL;
1370 return driver->md_session_map[peripheral];
1371}
1372
1373static int diag_md_peripheral_switch(struct diag_md_session_t *session_info,
1374 int peripheral_mask, int req_mode) {
1375 int i, bit = 0;
1376
1377 if (!session_info)
1378 return -EINVAL;
1379 if (req_mode != DIAG_USB_MODE || req_mode != DIAG_MEMORY_DEVICE_MODE)
1380 return -EINVAL;
1381
1382 /*
1383 * check that md_session_map for i == session_info,
1384 * if not then race condition occurred and bail
1385 */
1386 mutex_lock(&driver->md_session_lock);
1387 for (i = 0; i < NUM_MD_SESSIONS; i++) {
1388 bit = MD_PERIPHERAL_MASK(i) & peripheral_mask;
1389 if (!bit)
1390 continue;
1391 if (req_mode == DIAG_USB_MODE) {
1392 if (driver->md_session_map[i] != session_info) {
1393 mutex_unlock(&driver->md_session_lock);
1394 return -EINVAL;
1395 }
1396 driver->md_session_map[i] = NULL;
1397 driver->md_session_mask &= ~bit;
1398 session_info->peripheral_mask &= ~bit;
1399
1400 } else {
1401 if (driver->md_session_map[i] != NULL) {
1402 mutex_unlock(&driver->md_session_lock);
1403 return -EINVAL;
1404 }
1405 driver->md_session_map[i] = session_info;
1406 driver->md_session_mask |= bit;
1407 session_info->peripheral_mask |= bit;
1408
1409 }
1410 }
1411
1412 driver->md_session_mode = DIAG_MD_PERIPHERAL;
1413 mutex_unlock(&driver->md_session_lock);
1414 DIAG_LOG(DIAG_DEBUG_USERSPACE, "Changed Peripherals:0x%x to mode:%d\n",
1415 peripheral_mask, req_mode);
1416}
1417
1418static int diag_md_session_check(int curr_mode, int req_mode,
1419 const struct diag_logging_mode_param_t *param,
1420 uint8_t *change_mode)
1421{
1422 int i, bit = 0, err = 0;
1423 int change_mask = 0;
1424 struct diag_md_session_t *session_info = NULL;
1425
1426 if (!param || !change_mode)
1427 return -EIO;
1428
1429 *change_mode = 0;
1430
1431 switch (curr_mode) {
1432 case DIAG_USB_MODE:
1433 case DIAG_MEMORY_DEVICE_MODE:
1434 case DIAG_MULTI_MODE:
1435 break;
1436 default:
1437 return -EINVAL;
1438 }
1439
1440 if (req_mode != DIAG_USB_MODE && req_mode != DIAG_MEMORY_DEVICE_MODE)
1441 return -EINVAL;
1442
1443 if (req_mode == DIAG_USB_MODE) {
1444 if (curr_mode == DIAG_USB_MODE)
1445 return 0;
1446 if (driver->md_session_mode == DIAG_MD_NONE
1447 && driver->md_session_mask == 0 && driver->logging_mask) {
1448 *change_mode = 1;
1449 return 0;
1450 }
1451
1452 /*
1453 * curr_mode is either DIAG_MULTI_MODE or DIAG_MD_MODE
1454 * Check if requested peripherals are already in usb mode
1455 */
1456 for (i = 0; i < NUM_MD_SESSIONS; i++) {
1457 bit = MD_PERIPHERAL_MASK(i) & param->peripheral_mask;
1458 if (!bit)
1459 continue;
1460 if (bit & driver->logging_mask)
1461 change_mask |= bit;
1462 }
1463 if (!change_mask)
1464 return 0;
1465
1466 /*
1467 * Change is needed. Check if this md_session has set all the
1468 * requested peripherals. If another md session set a requested
1469 * peripheral then we cannot switch that peripheral to USB.
1470 * If this session owns all the requested peripherals, then
1471 * call function to switch the modes/masks for the md_session
1472 */
1473 session_info = diag_md_session_get_pid(current->tgid);
1474 if (!session_info) {
1475 *change_mode = 1;
1476 return 0;
1477 }
1478 if ((change_mask & session_info->peripheral_mask)
1479 != change_mask) {
1480 DIAG_LOG(DIAG_DEBUG_USERSPACE,
1481 "Another MD Session owns a requested peripheral\n");
1482 return -EINVAL;
1483 }
1484 *change_mode = 1;
1485
1486 /* If all peripherals are being set to USB Mode, call close */
1487 if (~change_mask & session_info->peripheral_mask) {
1488 err = diag_md_peripheral_switch(session_info,
1489 change_mask, DIAG_USB_MODE);
1490 } else
1491 diag_md_session_close(session_info);
1492
1493 return err;
1494
1495 } else if (req_mode == DIAG_MEMORY_DEVICE_MODE) {
1496 /*
1497 * Get bit mask that represents what peripherals already have
1498 * been set. Check that requested peripherals already set are
1499 * owned by this md session
1500 */
1501 change_mask = driver->md_session_mask & param->peripheral_mask;
1502 session_info = diag_md_session_get_pid(current->tgid);
1503
1504 if (session_info) {
1505 if ((session_info->peripheral_mask & change_mask)
1506 != change_mask) {
1507 DIAG_LOG(DIAG_DEBUG_USERSPACE,
1508 "Another MD Session owns a requested peripheral\n");
1509 return -EINVAL;
1510 }
1511 err = diag_md_peripheral_switch(session_info,
1512 change_mask, DIAG_USB_MODE);
1513 } else {
1514 if (change_mask) {
1515 DIAG_LOG(DIAG_DEBUG_USERSPACE,
1516 "Another MD Session owns a requested peripheral\n");
1517 return -EINVAL;
1518 }
1519 err = diag_md_session_create(DIAG_MD_PERIPHERAL,
1520 param->peripheral_mask, DIAG_LOCAL_PROC);
1521 }
1522 *change_mode = 1;
1523 return err;
1524 }
1525 return -EINVAL;
1526}
1527
1528static uint32_t diag_translate_mask(uint32_t peripheral_mask)
1529{
1530 uint32_t ret = 0;
1531
1532 if (peripheral_mask & DIAG_CON_APSS)
1533 ret |= (1 << APPS_DATA);
1534 if (peripheral_mask & DIAG_CON_MPSS)
1535 ret |= (1 << PERIPHERAL_MODEM);
1536 if (peripheral_mask & DIAG_CON_LPASS)
1537 ret |= (1 << PERIPHERAL_LPASS);
1538 if (peripheral_mask & DIAG_CON_WCNSS)
1539 ret |= (1 << PERIPHERAL_WCNSS);
1540 if (peripheral_mask & DIAG_CON_SENSORS)
1541 ret |= (1 << PERIPHERAL_SENSORS);
1542 if (peripheral_mask & DIAG_CON_WDSP)
1543 ret |= (1 << PERIPHERAL_WDSP);
Sreelakshmi Gownipalli588a31d2016-11-02 13:33:43 -07001544 if (peripheral_mask & DIAG_CON_CDSP)
1545 ret |= (1 << PERIPHERAL_CDSP);
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001546
1547 return ret;
1548}
1549
1550static int diag_switch_logging(struct diag_logging_mode_param_t *param)
1551{
1552 int new_mode;
1553 int curr_mode;
1554 int err = 0;
1555 uint8_t do_switch = 1;
1556 uint32_t peripheral_mask = 0;
1557
1558 if (!param)
1559 return -EINVAL;
1560
1561 if (!param->peripheral_mask) {
1562 DIAG_LOG(DIAG_DEBUG_USERSPACE,
1563 "asking for mode switch with no peripheral mask set\n");
1564 return -EINVAL;
1565 }
1566
1567 peripheral_mask = diag_translate_mask(param->peripheral_mask);
1568 param->peripheral_mask = peripheral_mask;
1569
1570 switch (param->req_mode) {
1571 case CALLBACK_MODE:
1572 case UART_MODE:
1573 case SOCKET_MODE:
1574 case MEMORY_DEVICE_MODE:
1575 new_mode = DIAG_MEMORY_DEVICE_MODE;
1576 break;
1577 case USB_MODE:
1578 new_mode = DIAG_USB_MODE;
1579 break;
1580 default:
1581 pr_err("diag: In %s, request to switch to invalid mode: %d\n",
1582 __func__, param->req_mode);
1583 return -EINVAL;
1584 }
1585
1586 curr_mode = driver->logging_mode;
1587 DIAG_LOG(DIAG_DEBUG_USERSPACE,
1588 "request to switch logging from %d mask:%0x to %d mask:%0x\n",
1589 curr_mode, driver->md_session_mask, new_mode, peripheral_mask);
1590
1591 err = diag_md_session_check(curr_mode, new_mode, param, &do_switch);
1592 if (err) {
1593 DIAG_LOG(DIAG_DEBUG_USERSPACE,
1594 "err from diag_md_session_check, err: %d\n", err);
1595 return err;
1596 }
1597
1598 if (do_switch == 0) {
1599 DIAG_LOG(DIAG_DEBUG_USERSPACE,
1600 "not switching modes c: %d n: %d\n",
1601 curr_mode, new_mode);
1602 return 0;
1603 }
1604
1605 diag_ws_reset(DIAG_WS_MUX);
1606 err = diag_mux_switch_logging(&new_mode, &peripheral_mask);
1607 if (err) {
1608 pr_err("diag: In %s, unable to switch mode from %d to %d, err: %d\n",
1609 __func__, curr_mode, new_mode, err);
1610 driver->logging_mode = curr_mode;
1611 goto fail;
1612 }
1613 driver->logging_mode = new_mode;
1614 driver->logging_mask = peripheral_mask;
1615 DIAG_LOG(DIAG_DEBUG_USERSPACE,
1616 "Switch logging to %d mask:%0x\n", new_mode, peripheral_mask);
1617
1618 /* Update to take peripheral_mask */
1619 if (new_mode != DIAG_MEMORY_DEVICE_MODE) {
1620 diag_update_real_time_vote(DIAG_PROC_MEMORY_DEVICE,
1621 MODE_REALTIME, ALL_PROC);
1622 } else {
1623 diag_update_proc_vote(DIAG_PROC_MEMORY_DEVICE, VOTE_UP,
1624 ALL_PROC);
1625 }
1626
1627 if (!(new_mode == DIAG_MEMORY_DEVICE_MODE &&
1628 curr_mode == DIAG_USB_MODE)) {
1629 queue_work(driver->diag_real_time_wq,
1630 &driver->diag_real_time_work);
1631 }
1632
1633 return 0;
1634fail:
1635 return err;
1636}
1637
1638static int diag_ioctl_dci_reg(unsigned long ioarg)
1639{
1640 int result = -EINVAL;
1641 struct diag_dci_reg_tbl_t dci_reg_params;
1642
1643 if (copy_from_user(&dci_reg_params, (void __user *)ioarg,
1644 sizeof(struct diag_dci_reg_tbl_t)))
1645 return -EFAULT;
1646
1647 result = diag_dci_register_client(&dci_reg_params);
1648
1649 return result;
1650}
1651
1652static int diag_ioctl_dci_health_stats(unsigned long ioarg)
1653{
1654 int result = -EINVAL;
1655 struct diag_dci_health_stats_proc stats;
1656
1657 if (copy_from_user(&stats, (void __user *)ioarg,
1658 sizeof(struct diag_dci_health_stats_proc)))
1659 return -EFAULT;
1660
1661 result = diag_dci_copy_health_stats(&stats);
1662 if (result == DIAG_DCI_NO_ERROR) {
1663 if (copy_to_user((void __user *)ioarg, &stats,
1664 sizeof(struct diag_dci_health_stats_proc)))
1665 return -EFAULT;
1666 }
1667
1668 return result;
1669}
1670
1671static int diag_ioctl_dci_log_status(unsigned long ioarg)
1672{
1673 struct diag_log_event_stats le_stats;
1674 struct diag_dci_client_tbl *dci_client = NULL;
1675
1676 if (copy_from_user(&le_stats, (void __user *)ioarg,
1677 sizeof(struct diag_log_event_stats)))
1678 return -EFAULT;
1679
1680 dci_client = diag_dci_get_client_entry(le_stats.client_id);
1681 if (!dci_client)
1682 return DIAG_DCI_NOT_SUPPORTED;
1683 le_stats.is_set = diag_dci_query_log_mask(dci_client, le_stats.code);
1684 if (copy_to_user((void __user *)ioarg, &le_stats,
1685 sizeof(struct diag_log_event_stats)))
1686 return -EFAULT;
1687
1688 return DIAG_DCI_NO_ERROR;
1689}
1690
1691static int diag_ioctl_dci_event_status(unsigned long ioarg)
1692{
1693 struct diag_log_event_stats le_stats;
1694 struct diag_dci_client_tbl *dci_client = NULL;
1695
1696 if (copy_from_user(&le_stats, (void __user *)ioarg,
1697 sizeof(struct diag_log_event_stats)))
1698 return -EFAULT;
1699
1700 dci_client = diag_dci_get_client_entry(le_stats.client_id);
1701 if (!dci_client)
1702 return DIAG_DCI_NOT_SUPPORTED;
1703
1704 le_stats.is_set = diag_dci_query_event_mask(dci_client, le_stats.code);
1705 if (copy_to_user((void __user *)ioarg, &le_stats,
1706 sizeof(struct diag_log_event_stats)))
1707 return -EFAULT;
1708
1709 return DIAG_DCI_NO_ERROR;
1710}
1711
1712static int diag_ioctl_lsm_deinit(void)
1713{
1714 int i;
1715
Mohit Aggarwal9f694302017-07-06 10:16:52 +05301716 mutex_lock(&driver->diagchar_mutex);
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001717 for (i = 0; i < driver->num_clients; i++)
1718 if (driver->client_map[i].pid == current->tgid)
1719 break;
1720
Mohit Aggarwal9f694302017-07-06 10:16:52 +05301721 if (i == driver->num_clients) {
1722 mutex_unlock(&driver->diagchar_mutex);
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001723 return -EINVAL;
Mohit Aggarwal9f694302017-07-06 10:16:52 +05301724 }
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001725
1726 driver->data_ready[i] |= DEINIT_TYPE;
Mohit Aggarwal9f694302017-07-06 10:16:52 +05301727 mutex_unlock(&driver->diagchar_mutex);
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001728 wake_up_interruptible(&driver->wait_q);
1729
1730 return 1;
1731}
1732
1733static int diag_ioctl_vote_real_time(unsigned long ioarg)
1734{
1735 int real_time = 0;
1736 int temp_proc = ALL_PROC;
1737 struct real_time_vote_t vote;
1738 struct diag_dci_client_tbl *dci_client = NULL;
1739
1740 if (copy_from_user(&vote, (void __user *)ioarg,
1741 sizeof(struct real_time_vote_t)))
1742 return -EFAULT;
1743
1744 if (vote.proc > DIAG_PROC_MEMORY_DEVICE ||
1745 vote.real_time_vote > MODE_UNKNOWN ||
1746 vote.client_id < 0) {
1747 pr_err("diag: %s, invalid params, proc: %d, vote: %d, client_id: %d\n",
1748 __func__, vote.proc, vote.real_time_vote,
1749 vote.client_id);
1750 return -EINVAL;
1751 }
1752
1753 driver->real_time_update_busy++;
1754 if (vote.proc == DIAG_PROC_DCI) {
1755 dci_client = diag_dci_get_client_entry(vote.client_id);
1756 if (!dci_client) {
1757 driver->real_time_update_busy--;
1758 return DIAG_DCI_NOT_SUPPORTED;
1759 }
1760 diag_dci_set_real_time(dci_client, vote.real_time_vote);
1761 real_time = diag_dci_get_cumulative_real_time(
1762 dci_client->client_info.token);
1763 diag_update_real_time_vote(vote.proc, real_time,
1764 dci_client->client_info.token);
1765 } else {
1766 real_time = vote.real_time_vote;
1767 temp_proc = vote.client_id;
1768 diag_update_real_time_vote(vote.proc, real_time,
1769 temp_proc);
1770 }
1771 queue_work(driver->diag_real_time_wq, &driver->diag_real_time_work);
1772 return 0;
1773}
1774
1775static int diag_ioctl_get_real_time(unsigned long ioarg)
1776{
1777 int i;
1778 int retry_count = 0;
1779 int timer = 0;
1780 struct real_time_query_t rt_query;
1781
1782 if (copy_from_user(&rt_query, (void __user *)ioarg,
1783 sizeof(struct real_time_query_t)))
1784 return -EFAULT;
1785 while (retry_count < 3) {
1786 if (driver->real_time_update_busy > 0) {
1787 retry_count++;
1788 /*
1789 * The value 10000 was chosen empirically as an
1790 * optimum value in order to give the work in
1791 * diag_real_time_wq to complete processing.
1792 */
1793 for (timer = 0; timer < 5; timer++)
1794 usleep_range(10000, 10100);
1795 } else {
1796 break;
1797 }
1798 }
1799
1800 if (driver->real_time_update_busy > 0)
1801 return -EAGAIN;
1802
1803 if (rt_query.proc < 0 || rt_query.proc >= DIAG_NUM_PROC) {
1804 pr_err("diag: Invalid proc %d in %s\n", rt_query.proc,
1805 __func__);
1806 return -EINVAL;
1807 }
1808 rt_query.real_time = driver->real_time_mode[rt_query.proc];
1809 /*
1810 * For the local processor, if any of the peripherals is in buffering
1811 * mode, overwrite the value of real time with UNKNOWN_MODE
1812 */
1813 if (rt_query.proc == DIAG_LOCAL_PROC) {
1814 for (i = 0; i < NUM_PERIPHERALS; i++) {
1815 if (!driver->feature[i].peripheral_buffering)
1816 continue;
1817 switch (driver->buffering_mode[i].mode) {
1818 case DIAG_BUFFERING_MODE_CIRCULAR:
1819 case DIAG_BUFFERING_MODE_THRESHOLD:
1820 rt_query.real_time = MODE_UNKNOWN;
1821 break;
1822 }
1823 }
1824 }
1825
1826 if (copy_to_user((void __user *)ioarg, &rt_query,
1827 sizeof(struct real_time_query_t)))
1828 return -EFAULT;
1829
1830 return 0;
1831}
1832
1833static int diag_ioctl_set_buffering_mode(unsigned long ioarg)
1834{
1835 struct diag_buffering_mode_t params;
1836
1837 if (copy_from_user(&params, (void __user *)ioarg, sizeof(params)))
1838 return -EFAULT;
1839
1840 if (params.peripheral >= NUM_PERIPHERALS)
1841 return -EINVAL;
1842
1843 mutex_lock(&driver->mode_lock);
1844 driver->buffering_flag[params.peripheral] = 1;
1845 mutex_unlock(&driver->mode_lock);
1846
1847 return diag_send_peripheral_buffering_mode(&params);
1848}
1849
1850static int diag_ioctl_peripheral_drain_immediate(unsigned long ioarg)
1851{
1852 uint8_t peripheral;
1853
1854 if (copy_from_user(&peripheral, (void __user *)ioarg, sizeof(uint8_t)))
1855 return -EFAULT;
1856
1857 if (peripheral >= NUM_PERIPHERALS) {
1858 pr_err("diag: In %s, invalid peripheral %d\n", __func__,
1859 peripheral);
1860 return -EINVAL;
1861 }
1862
1863 if (!driver->feature[peripheral].peripheral_buffering) {
1864 pr_err("diag: In %s, peripheral %d doesn't support buffering\n",
1865 __func__, peripheral);
1866 return -EIO;
1867 }
1868
1869 return diag_send_peripheral_drain_immediate(peripheral);
1870}
1871
1872static int diag_ioctl_dci_support(unsigned long ioarg)
1873{
1874 struct diag_dci_peripherals_t dci_support;
1875 int result = -EINVAL;
1876
1877 if (copy_from_user(&dci_support, (void __user *)ioarg,
1878 sizeof(struct diag_dci_peripherals_t)))
1879 return -EFAULT;
1880
1881 result = diag_dci_get_support_list(&dci_support);
1882 if (result == DIAG_DCI_NO_ERROR)
1883 if (copy_to_user((void __user *)ioarg, &dci_support,
1884 sizeof(struct diag_dci_peripherals_t)))
1885 return -EFAULT;
1886
1887 return result;
1888}
1889
1890static int diag_ioctl_hdlc_toggle(unsigned long ioarg)
1891{
1892 uint8_t hdlc_support;
1893 struct diag_md_session_t *session_info = NULL;
1894
1895 session_info = diag_md_session_get_pid(current->tgid);
1896 if (copy_from_user(&hdlc_support, (void __user *)ioarg,
1897 sizeof(uint8_t)))
1898 return -EFAULT;
1899 mutex_lock(&driver->hdlc_disable_mutex);
1900 if (session_info) {
1901 mutex_lock(&driver->md_session_lock);
1902 session_info->hdlc_disabled = hdlc_support;
1903 mutex_unlock(&driver->md_session_lock);
1904 } else
1905 driver->hdlc_disabled = hdlc_support;
1906 mutex_unlock(&driver->hdlc_disable_mutex);
1907 diag_update_md_clients(HDLC_SUPPORT_TYPE);
1908
1909 return 0;
1910}
1911
1912static int diag_ioctl_register_callback(unsigned long ioarg)
1913{
1914 int err = 0;
1915 struct diag_callback_reg_t reg;
1916
1917 if (copy_from_user(&reg, (void __user *)ioarg,
1918 sizeof(struct diag_callback_reg_t))) {
1919 return -EFAULT;
1920 }
1921
1922 if (reg.proc < 0 || reg.proc >= DIAG_NUM_PROC) {
1923 pr_err("diag: In %s, invalid proc %d for callback registration\n",
1924 __func__, reg.proc);
1925 return -EINVAL;
1926 }
1927
1928 if (driver->md_session_mode == DIAG_MD_PERIPHERAL)
1929 return -EIO;
1930
1931 return err;
1932}
1933
1934static int diag_cmd_register_tbl(struct diag_cmd_reg_tbl_t *reg_tbl)
1935{
1936 int i;
1937 int err = 0;
1938 uint32_t count = 0;
1939 struct diag_cmd_reg_entry_t *entries = NULL;
1940 const uint16_t entry_len = sizeof(struct diag_cmd_reg_entry_t);
1941
1942
1943 if (!reg_tbl) {
1944 pr_err("diag: In %s, invalid registration table\n", __func__);
1945 return -EINVAL;
1946 }
1947
1948 count = reg_tbl->count;
1949 if ((UINT_MAX / entry_len) < count) {
1950 pr_warn("diag: In %s, possbile integer overflow.\n", __func__);
1951 return -EFAULT;
1952 }
1953
1954 entries = kzalloc(count * entry_len, GFP_KERNEL);
1955 if (!entries)
1956 return -ENOMEM;
1957
1958
1959 err = copy_from_user(entries, reg_tbl->entries, count * entry_len);
1960 if (err) {
1961 pr_err("diag: In %s, error copying data from userspace, err: %d\n",
1962 __func__, err);
1963 kfree(entries);
1964 return -EFAULT;
1965 }
1966
1967 for (i = 0; i < count; i++) {
1968 err = diag_cmd_add_reg(&entries[i], APPS_DATA, current->tgid);
1969 if (err) {
1970 pr_err("diag: In %s, unable to register command, err: %d\n",
1971 __func__, err);
1972 break;
1973 }
1974 }
1975
1976 kfree(entries);
1977 return err;
1978}
1979
1980static int diag_ioctl_cmd_reg(unsigned long ioarg)
1981{
1982 struct diag_cmd_reg_tbl_t reg_tbl;
1983
1984 if (copy_from_user(&reg_tbl, (void __user *)ioarg,
1985 sizeof(struct diag_cmd_reg_tbl_t))) {
1986 return -EFAULT;
1987 }
1988
1989 return diag_cmd_register_tbl(&reg_tbl);
1990}
1991
1992static int diag_ioctl_cmd_dereg(void)
1993{
1994 diag_cmd_remove_reg_by_pid(current->tgid);
1995 return 0;
1996}
1997
1998#ifdef CONFIG_COMPAT
1999/*
2000 * @sync_obj_name: name of the synchronization object associated with this proc
2001 * @count: number of entries in the bind
2002 * @params: the actual packet registrations
2003 */
2004struct diag_cmd_reg_tbl_compat_t {
2005 char sync_obj_name[MAX_SYNC_OBJ_NAME_SIZE];
2006 uint32_t count;
2007 compat_uptr_t entries;
2008};
2009
2010static int diag_ioctl_cmd_reg_compat(unsigned long ioarg)
2011{
2012 struct diag_cmd_reg_tbl_compat_t reg_tbl_compat;
2013 struct diag_cmd_reg_tbl_t reg_tbl;
2014
2015 if (copy_from_user(&reg_tbl_compat, (void __user *)ioarg,
2016 sizeof(struct diag_cmd_reg_tbl_compat_t))) {
2017 return -EFAULT;
2018 }
2019
2020 strlcpy(reg_tbl.sync_obj_name, reg_tbl_compat.sync_obj_name,
2021 MAX_SYNC_OBJ_NAME_SIZE);
2022 reg_tbl.count = reg_tbl_compat.count;
2023 reg_tbl.entries = (struct diag_cmd_reg_entry_t *)
2024 (uintptr_t)reg_tbl_compat.entries;
2025
2026 return diag_cmd_register_tbl(&reg_tbl);
2027}
2028
2029long diagchar_compat_ioctl(struct file *filp,
2030 unsigned int iocmd, unsigned long ioarg)
2031{
2032 int result = -EINVAL;
2033 int client_id = 0;
2034 uint16_t delayed_rsp_id = 0;
2035 uint16_t remote_dev;
2036 struct diag_dci_client_tbl *dci_client = NULL;
2037 struct diag_logging_mode_param_t mode_param;
2038
2039 switch (iocmd) {
2040 case DIAG_IOCTL_COMMAND_REG:
2041 result = diag_ioctl_cmd_reg_compat(ioarg);
2042 break;
2043 case DIAG_IOCTL_COMMAND_DEREG:
2044 result = diag_ioctl_cmd_dereg();
2045 break;
2046 case DIAG_IOCTL_GET_DELAYED_RSP_ID:
2047 delayed_rsp_id = diag_get_next_delayed_rsp_id();
2048 if (copy_to_user((void __user *)ioarg, &delayed_rsp_id,
2049 sizeof(uint16_t)))
2050 result = -EFAULT;
2051 else
2052 result = 0;
2053 break;
2054 case DIAG_IOCTL_DCI_REG:
2055 result = diag_ioctl_dci_reg(ioarg);
2056 break;
2057 case DIAG_IOCTL_DCI_DEINIT:
2058 mutex_lock(&driver->dci_mutex);
2059 if (copy_from_user((void *)&client_id, (void __user *)ioarg,
2060 sizeof(int))) {
2061 mutex_unlock(&driver->dci_mutex);
2062 return -EFAULT;
2063 }
2064 dci_client = diag_dci_get_client_entry(client_id);
2065 if (!dci_client) {
2066 mutex_unlock(&driver->dci_mutex);
2067 return DIAG_DCI_NOT_SUPPORTED;
2068 }
2069 result = diag_dci_deinit_client(dci_client);
2070 mutex_unlock(&driver->dci_mutex);
2071 break;
2072 case DIAG_IOCTL_DCI_SUPPORT:
2073 result = diag_ioctl_dci_support(ioarg);
2074 break;
2075 case DIAG_IOCTL_DCI_HEALTH_STATS:
2076 mutex_lock(&driver->dci_mutex);
2077 result = diag_ioctl_dci_health_stats(ioarg);
2078 mutex_unlock(&driver->dci_mutex);
2079 break;
2080 case DIAG_IOCTL_DCI_LOG_STATUS:
2081 mutex_lock(&driver->dci_mutex);
2082 result = diag_ioctl_dci_log_status(ioarg);
2083 mutex_unlock(&driver->dci_mutex);
2084 break;
2085 case DIAG_IOCTL_DCI_EVENT_STATUS:
2086 mutex_lock(&driver->dci_mutex);
2087 result = diag_ioctl_dci_event_status(ioarg);
2088 mutex_unlock(&driver->dci_mutex);
2089 break;
2090 case DIAG_IOCTL_DCI_CLEAR_LOGS:
2091 mutex_lock(&driver->dci_mutex);
2092 if (copy_from_user((void *)&client_id, (void __user *)ioarg,
2093 sizeof(int))) {
2094 mutex_unlock(&driver->dci_mutex);
2095 return -EFAULT;
2096 }
2097 result = diag_dci_clear_log_mask(client_id);
2098 mutex_unlock(&driver->dci_mutex);
2099 break;
2100 case DIAG_IOCTL_DCI_CLEAR_EVENTS:
2101 mutex_lock(&driver->dci_mutex);
2102 if (copy_from_user(&client_id, (void __user *)ioarg,
2103 sizeof(int))) {
2104 mutex_unlock(&driver->dci_mutex);
2105 return -EFAULT;
2106 }
2107 result = diag_dci_clear_event_mask(client_id);
2108 mutex_unlock(&driver->dci_mutex);
2109 break;
2110 case DIAG_IOCTL_LSM_DEINIT:
2111 result = diag_ioctl_lsm_deinit();
2112 break;
2113 case DIAG_IOCTL_SWITCH_LOGGING:
2114 if (copy_from_user((void *)&mode_param, (void __user *)ioarg,
2115 sizeof(mode_param)))
2116 return -EFAULT;
2117 mutex_lock(&driver->diagchar_mutex);
2118 result = diag_switch_logging(&mode_param);
2119 mutex_unlock(&driver->diagchar_mutex);
2120 break;
2121 case DIAG_IOCTL_REMOTE_DEV:
2122 remote_dev = diag_get_remote_device_mask();
2123 if (copy_to_user((void __user *)ioarg, &remote_dev,
2124 sizeof(uint16_t)))
2125 result = -EFAULT;
2126 else
2127 result = 1;
2128 break;
2129 case DIAG_IOCTL_VOTE_REAL_TIME:
2130 mutex_lock(&driver->dci_mutex);
2131 result = diag_ioctl_vote_real_time(ioarg);
2132 mutex_unlock(&driver->dci_mutex);
2133 break;
2134 case DIAG_IOCTL_GET_REAL_TIME:
2135 result = diag_ioctl_get_real_time(ioarg);
2136 break;
2137 case DIAG_IOCTL_PERIPHERAL_BUF_CONFIG:
2138 result = diag_ioctl_set_buffering_mode(ioarg);
2139 break;
2140 case DIAG_IOCTL_PERIPHERAL_BUF_DRAIN:
2141 result = diag_ioctl_peripheral_drain_immediate(ioarg);
2142 break;
2143 case DIAG_IOCTL_REGISTER_CALLBACK:
2144 result = diag_ioctl_register_callback(ioarg);
2145 break;
2146 case DIAG_IOCTL_HDLC_TOGGLE:
2147 result = diag_ioctl_hdlc_toggle(ioarg);
2148 break;
2149 }
2150 return result;
2151}
2152#endif
2153
2154long diagchar_ioctl(struct file *filp,
2155 unsigned int iocmd, unsigned long ioarg)
2156{
2157 int result = -EINVAL;
2158 int client_id = 0;
2159 uint16_t delayed_rsp_id;
2160 uint16_t remote_dev;
2161 struct diag_dci_client_tbl *dci_client = NULL;
2162 struct diag_logging_mode_param_t mode_param;
2163
2164 switch (iocmd) {
2165 case DIAG_IOCTL_COMMAND_REG:
2166 result = diag_ioctl_cmd_reg(ioarg);
2167 break;
2168 case DIAG_IOCTL_COMMAND_DEREG:
2169 result = diag_ioctl_cmd_dereg();
2170 break;
2171 case DIAG_IOCTL_GET_DELAYED_RSP_ID:
2172 delayed_rsp_id = diag_get_next_delayed_rsp_id();
2173 if (copy_to_user((void __user *)ioarg, &delayed_rsp_id,
2174 sizeof(uint16_t)))
2175 result = -EFAULT;
2176 else
2177 result = 0;
2178 break;
2179 case DIAG_IOCTL_DCI_REG:
2180 result = diag_ioctl_dci_reg(ioarg);
2181 break;
2182 case DIAG_IOCTL_DCI_DEINIT:
2183 mutex_lock(&driver->dci_mutex);
2184 if (copy_from_user((void *)&client_id, (void __user *)ioarg,
2185 sizeof(int))) {
2186 mutex_unlock(&driver->dci_mutex);
2187 return -EFAULT;
2188 }
2189 dci_client = diag_dci_get_client_entry(client_id);
2190 if (!dci_client) {
2191 mutex_unlock(&driver->dci_mutex);
2192 return DIAG_DCI_NOT_SUPPORTED;
2193 }
2194 result = diag_dci_deinit_client(dci_client);
2195 mutex_unlock(&driver->dci_mutex);
2196 break;
2197 case DIAG_IOCTL_DCI_SUPPORT:
2198 result = diag_ioctl_dci_support(ioarg);
2199 break;
2200 case DIAG_IOCTL_DCI_HEALTH_STATS:
2201 mutex_lock(&driver->dci_mutex);
2202 result = diag_ioctl_dci_health_stats(ioarg);
2203 mutex_unlock(&driver->dci_mutex);
2204 break;
2205 case DIAG_IOCTL_DCI_LOG_STATUS:
2206 mutex_lock(&driver->dci_mutex);
2207 result = diag_ioctl_dci_log_status(ioarg);
2208 mutex_unlock(&driver->dci_mutex);
2209 break;
2210 case DIAG_IOCTL_DCI_EVENT_STATUS:
2211 result = diag_ioctl_dci_event_status(ioarg);
2212 break;
2213 case DIAG_IOCTL_DCI_CLEAR_LOGS:
2214 mutex_lock(&driver->dci_mutex);
2215 if (copy_from_user((void *)&client_id, (void __user *)ioarg,
2216 sizeof(int))) {
2217 mutex_unlock(&driver->dci_mutex);
2218 return -EFAULT;
2219 }
2220 result = diag_dci_clear_log_mask(client_id);
2221 mutex_unlock(&driver->dci_mutex);
2222 break;
2223 case DIAG_IOCTL_DCI_CLEAR_EVENTS:
2224 mutex_lock(&driver->dci_mutex);
2225 if (copy_from_user(&client_id, (void __user *)ioarg,
2226 sizeof(int))) {
2227 mutex_unlock(&driver->dci_mutex);
2228 return -EFAULT;
2229 }
2230 result = diag_dci_clear_event_mask(client_id);
2231 mutex_unlock(&driver->dci_mutex);
2232 break;
2233 case DIAG_IOCTL_LSM_DEINIT:
2234 result = diag_ioctl_lsm_deinit();
2235 break;
2236 case DIAG_IOCTL_SWITCH_LOGGING:
2237 if (copy_from_user((void *)&mode_param, (void __user *)ioarg,
2238 sizeof(mode_param)))
2239 return -EFAULT;
2240 mutex_lock(&driver->diagchar_mutex);
2241 result = diag_switch_logging(&mode_param);
2242 mutex_unlock(&driver->diagchar_mutex);
2243 break;
2244 case DIAG_IOCTL_REMOTE_DEV:
2245 remote_dev = diag_get_remote_device_mask();
2246 if (copy_to_user((void __user *)ioarg, &remote_dev,
2247 sizeof(uint16_t)))
2248 result = -EFAULT;
2249 else
2250 result = 1;
2251 break;
2252 case DIAG_IOCTL_VOTE_REAL_TIME:
2253 mutex_lock(&driver->dci_mutex);
2254 result = diag_ioctl_vote_real_time(ioarg);
2255 mutex_unlock(&driver->dci_mutex);
2256 break;
2257 case DIAG_IOCTL_GET_REAL_TIME:
2258 result = diag_ioctl_get_real_time(ioarg);
2259 break;
2260 case DIAG_IOCTL_PERIPHERAL_BUF_CONFIG:
2261 result = diag_ioctl_set_buffering_mode(ioarg);
2262 break;
2263 case DIAG_IOCTL_PERIPHERAL_BUF_DRAIN:
2264 result = diag_ioctl_peripheral_drain_immediate(ioarg);
2265 break;
2266 case DIAG_IOCTL_REGISTER_CALLBACK:
2267 result = diag_ioctl_register_callback(ioarg);
2268 break;
2269 case DIAG_IOCTL_HDLC_TOGGLE:
2270 result = diag_ioctl_hdlc_toggle(ioarg);
2271 break;
2272 }
2273 return result;
2274}
2275
2276static int diag_process_apps_data_hdlc(unsigned char *buf, int len,
2277 int pkt_type)
2278{
2279 int err = 0;
2280 int ret = PKT_DROP;
2281 struct diag_apps_data_t *data = &hdlc_data;
2282 struct diag_send_desc_type send = { NULL, NULL, DIAG_STATE_START, 0 };
2283 struct diag_hdlc_dest_type enc = { NULL, NULL, 0 };
2284 /*
2285 * The maximum encoded size of the buffer can be atmost twice the length
2286 * of the packet. Add three bytes foe footer - 16 bit CRC (2 bytes) +
2287 * delimiter (1 byte).
2288 */
2289 const uint32_t max_encoded_size = ((2 * len) + 3);
2290
2291 if (!buf || len <= 0) {
2292 pr_err("diag: In %s, invalid buf: %pK len: %d\n",
2293 __func__, buf, len);
2294 return -EIO;
2295 }
2296
2297 if (max_encoded_size > DIAG_MAX_HDLC_BUF_SIZE) {
2298 pr_err_ratelimited("diag: In %s, encoded data is larger %d than the buffer size %d\n",
2299 __func__, max_encoded_size, DIAG_MAX_HDLC_BUF_SIZE);
2300 return -EBADMSG;
2301 }
2302
2303 send.state = DIAG_STATE_START;
2304 send.pkt = buf;
2305 send.last = (void *)(buf + len - 1);
2306 send.terminate = 1;
2307
2308 if (!data->buf)
2309 data->buf = diagmem_alloc(driver, DIAG_MAX_HDLC_BUF_SIZE +
2310 APF_DIAG_PADDING,
2311 POOL_TYPE_HDLC);
2312 if (!data->buf) {
2313 ret = PKT_DROP;
2314 goto fail_ret;
2315 }
2316
2317 if ((DIAG_MAX_HDLC_BUF_SIZE - data->len) <= max_encoded_size) {
2318 err = diag_mux_write(DIAG_LOCAL_PROC, data->buf, data->len,
2319 data->ctxt);
2320 if (err) {
2321 ret = -EIO;
2322 goto fail_free_buf;
2323 }
2324 data->buf = NULL;
2325 data->len = 0;
2326 data->buf = diagmem_alloc(driver, DIAG_MAX_HDLC_BUF_SIZE +
2327 APF_DIAG_PADDING,
2328 POOL_TYPE_HDLC);
2329 if (!data->buf) {
2330 ret = PKT_DROP;
2331 goto fail_ret;
2332 }
2333 }
2334
2335 enc.dest = data->buf + data->len;
2336 enc.dest_last = (void *)(data->buf + data->len + max_encoded_size);
2337 diag_hdlc_encode(&send, &enc);
2338
2339 /*
2340 * This is to check if after HDLC encoding, we are still within
2341 * the limits of aggregation buffer. If not, we write out the
2342 * current buffer and start aggregation in a newly allocated
2343 * buffer.
2344 */
2345 if ((uintptr_t)enc.dest >= (uintptr_t)(data->buf +
2346 DIAG_MAX_HDLC_BUF_SIZE)) {
2347 err = diag_mux_write(DIAG_LOCAL_PROC, data->buf, data->len,
2348 data->ctxt);
2349 if (err) {
2350 ret = -EIO;
2351 goto fail_free_buf;
2352 }
2353 data->buf = NULL;
2354 data->len = 0;
2355 data->buf = diagmem_alloc(driver, DIAG_MAX_HDLC_BUF_SIZE +
2356 APF_DIAG_PADDING,
2357 POOL_TYPE_HDLC);
2358 if (!data->buf) {
2359 ret = PKT_DROP;
2360 goto fail_ret;
2361 }
2362
2363 enc.dest = data->buf + data->len;
2364 enc.dest_last = (void *)(data->buf + data->len +
2365 max_encoded_size);
2366 diag_hdlc_encode(&send, &enc);
2367 }
2368
2369 data->len = (((uintptr_t)enc.dest - (uintptr_t)data->buf) <
2370 DIAG_MAX_HDLC_BUF_SIZE) ?
2371 ((uintptr_t)enc.dest - (uintptr_t)data->buf) :
2372 DIAG_MAX_HDLC_BUF_SIZE;
2373
2374 if (pkt_type == DATA_TYPE_RESPONSE) {
2375 err = diag_mux_write(DIAG_LOCAL_PROC, data->buf, data->len,
2376 data->ctxt);
2377 if (err) {
2378 ret = -EIO;
2379 goto fail_free_buf;
2380 }
2381 data->buf = NULL;
2382 data->len = 0;
2383 }
2384
2385 return PKT_ALLOC;
2386
2387fail_free_buf:
2388 diagmem_free(driver, data->buf, POOL_TYPE_HDLC);
2389 data->buf = NULL;
2390 data->len = 0;
2391
2392fail_ret:
2393 return ret;
2394}
2395
2396static int diag_process_apps_data_non_hdlc(unsigned char *buf, int len,
2397 int pkt_type)
2398{
2399 int err = 0;
2400 int ret = PKT_DROP;
2401 struct diag_pkt_frame_t header;
2402 struct diag_apps_data_t *data = &non_hdlc_data;
2403 /*
2404 * The maximum packet size, when the data is non hdlc encoded is equal
2405 * to the size of the packet frame header and the length. Add 1 for the
2406 * delimiter 0x7E at the end.
2407 */
2408 const uint32_t max_pkt_size = sizeof(header) + len + 1;
2409
2410 if (!buf || len <= 0) {
2411 pr_err("diag: In %s, invalid buf: %pK len: %d\n",
2412 __func__, buf, len);
2413 return -EIO;
2414 }
2415
2416 if (!data->buf) {
2417 data->buf = diagmem_alloc(driver, DIAG_MAX_HDLC_BUF_SIZE +
2418 APF_DIAG_PADDING,
2419 POOL_TYPE_HDLC);
2420 if (!data->buf) {
2421 ret = PKT_DROP;
2422 goto fail_ret;
2423 }
2424 }
2425
2426 if ((DIAG_MAX_HDLC_BUF_SIZE - data->len) <= max_pkt_size) {
2427 err = diag_mux_write(DIAG_LOCAL_PROC, data->buf, data->len,
2428 data->ctxt);
2429 if (err) {
2430 ret = -EIO;
2431 goto fail_free_buf;
2432 }
2433 data->buf = NULL;
2434 data->len = 0;
2435 data->buf = diagmem_alloc(driver, DIAG_MAX_HDLC_BUF_SIZE +
2436 APF_DIAG_PADDING,
2437 POOL_TYPE_HDLC);
2438 if (!data->buf) {
2439 ret = PKT_DROP;
2440 goto fail_ret;
2441 }
2442 }
2443
2444 header.start = CONTROL_CHAR;
2445 header.version = 1;
2446 header.length = len;
2447 memcpy(data->buf + data->len, &header, sizeof(header));
2448 data->len += sizeof(header);
2449 memcpy(data->buf + data->len, buf, len);
2450 data->len += len;
2451 *(uint8_t *)(data->buf + data->len) = CONTROL_CHAR;
2452 data->len += sizeof(uint8_t);
2453 if (pkt_type == DATA_TYPE_RESPONSE) {
2454 err = diag_mux_write(DIAG_LOCAL_PROC, data->buf, data->len,
2455 data->ctxt);
2456 if (err) {
2457 ret = -EIO;
2458 goto fail_free_buf;
2459 }
2460 data->buf = NULL;
2461 data->len = 0;
2462 }
2463
2464 return PKT_ALLOC;
2465
2466fail_free_buf:
2467 diagmem_free(driver, data->buf, POOL_TYPE_HDLC);
2468 data->buf = NULL;
2469 data->len = 0;
2470
2471fail_ret:
2472 return ret;
2473}
2474
2475static int diag_user_process_dci_data(const char __user *buf, int len)
2476{
2477 int err = 0;
2478 const int mempool = POOL_TYPE_USER;
2479 unsigned char *user_space_data = NULL;
2480
2481 if (!buf || len <= 0 || len > diag_mempools[mempool].itemsize) {
2482 pr_err_ratelimited("diag: In %s, invalid buf %pK len: %d\n",
2483 __func__, buf, len);
2484 return -EBADMSG;
2485 }
2486
2487 user_space_data = diagmem_alloc(driver, len, mempool);
2488 if (!user_space_data)
2489 return -ENOMEM;
2490
2491 err = copy_from_user(user_space_data, buf, len);
2492 if (err) {
2493 pr_err_ratelimited("diag: In %s, unable to copy data from userspace, err: %d\n",
2494 __func__, err);
2495 err = DIAG_DCI_SEND_DATA_FAIL;
2496 goto fail;
2497 }
2498
2499 err = diag_process_dci_transaction(user_space_data, len);
2500fail:
2501 diagmem_free(driver, user_space_data, mempool);
2502 user_space_data = NULL;
2503 return err;
2504}
2505
2506static int diag_user_process_dci_apps_data(const char __user *buf, int len,
2507 int pkt_type)
2508{
2509 int err = 0;
2510 const int mempool = POOL_TYPE_COPY;
2511 unsigned char *user_space_data = NULL;
2512
2513 if (!buf || len <= 0 || len > diag_mempools[mempool].itemsize) {
2514 pr_err_ratelimited("diag: In %s, invalid buf %pK len: %d\n",
2515 __func__, buf, len);
2516 return -EBADMSG;
2517 }
2518
2519 pkt_type &= (DCI_PKT_TYPE | DATA_TYPE_DCI_LOG | DATA_TYPE_DCI_EVENT);
2520 if (!pkt_type) {
2521 pr_err_ratelimited("diag: In %s, invalid pkt_type: %d\n",
2522 __func__, pkt_type);
2523 return -EBADMSG;
2524 }
2525
2526 user_space_data = diagmem_alloc(driver, len, mempool);
2527 if (!user_space_data)
2528 return -ENOMEM;
2529
2530 err = copy_from_user(user_space_data, buf, len);
2531 if (err) {
2532 pr_alert("diag: In %s, unable to copy data from userspace, err: %d\n",
2533 __func__, err);
2534 goto fail;
2535 }
2536
2537 diag_process_apps_dci_read_data(pkt_type, user_space_data, len);
2538fail:
2539 diagmem_free(driver, user_space_data, mempool);
2540 user_space_data = NULL;
2541 return err;
2542}
2543
2544static int diag_user_process_raw_data(const char __user *buf, int len)
2545{
2546 int err = 0;
2547 int ret = 0;
2548 int token_offset = 0;
2549 int remote_proc = 0;
2550 const int mempool = POOL_TYPE_COPY;
2551 unsigned char *user_space_data = NULL;
2552 struct diag_md_session_t *info = NULL;
2553
2554 if (!buf || len <= 0 || len > CALLBACK_BUF_SIZE) {
2555 pr_err_ratelimited("diag: In %s, invalid buf %pK len: %d\n",
2556 __func__, buf, len);
2557 return -EBADMSG;
2558 }
2559
2560 user_space_data = diagmem_alloc(driver, len, mempool);
2561 if (!user_space_data)
2562 return -ENOMEM;
2563
2564 err = copy_from_user(user_space_data, buf, len);
2565 if (err) {
2566 pr_err("diag: copy failed for user space data\n");
2567 goto fail;
2568 }
2569
2570 /* Check for proc_type */
2571 remote_proc = diag_get_remote(*(int *)user_space_data);
2572 if (remote_proc) {
2573 token_offset = sizeof(int);
2574 if (len <= MIN_SIZ_ALLOW) {
2575 pr_err("diag: In %s, possible integer underflow, payload size: %d\n",
2576 __func__, len);
2577 diagmem_free(driver, user_space_data, mempool);
2578 user_space_data = NULL;
2579 return -EBADMSG;
2580 }
2581 len -= sizeof(int);
2582 }
2583 if (driver->mask_check) {
2584 if (!mask_request_validate(user_space_data +
2585 token_offset)) {
2586 pr_alert("diag: mask request Invalid\n");
2587 diagmem_free(driver, user_space_data, mempool);
2588 user_space_data = NULL;
2589 return -EFAULT;
2590 }
2591 }
2592 if (remote_proc) {
2593 ret = diag_send_raw_data_remote(remote_proc,
2594 (void *)(user_space_data + token_offset),
2595 len, USER_SPACE_RAW_DATA);
2596 if (ret) {
2597 pr_err("diag: Error sending data to remote proc %d, err: %d\n",
2598 remote_proc, ret);
2599 }
2600 } else {
2601 wait_event_interruptible(driver->wait_q,
2602 (driver->in_busy_pktdata == 0));
2603 info = diag_md_session_get_pid(current->tgid);
2604 ret = diag_process_apps_pkt(user_space_data, len, info);
2605 if (ret == 1)
2606 diag_send_error_rsp((void *)(user_space_data), len);
2607 }
2608fail:
2609 diagmem_free(driver, user_space_data, mempool);
2610 user_space_data = NULL;
2611 return ret;
2612}
2613
2614static int diag_user_process_userspace_data(const char __user *buf, int len)
2615{
2616 int err = 0;
2617 int max_retries = 3;
2618 int retry_count = 0;
2619 int remote_proc = 0;
2620 int token_offset = 0;
2621 struct diag_md_session_t *session_info = NULL;
2622 uint8_t hdlc_disabled;
2623
2624 if (!buf || len <= 0 || len > USER_SPACE_DATA) {
2625 pr_err_ratelimited("diag: In %s, invalid buf %pK len: %d\n",
2626 __func__, buf, len);
2627 return -EBADMSG;
2628 }
2629
2630 do {
2631 if (!driver->user_space_data_busy)
2632 break;
2633 retry_count++;
2634 usleep_range(10000, 10100);
2635 } while (retry_count < max_retries);
2636
2637 if (driver->user_space_data_busy)
2638 return -EAGAIN;
2639
2640 err = copy_from_user(driver->user_space_data_buf, buf, len);
2641 if (err) {
2642 pr_err("diag: In %s, failed to copy data from userspace, err: %d\n",
2643 __func__, err);
2644 return -EIO;
2645 }
2646
2647 /* Check for proc_type */
2648 remote_proc = diag_get_remote(*(int *)driver->user_space_data_buf);
2649 if (remote_proc) {
2650 if (len <= MIN_SIZ_ALLOW) {
2651 pr_err("diag: Integer underflow in %s, payload size: %d",
2652 __func__, len);
2653 return -EBADMSG;
2654 }
2655 token_offset = sizeof(int);
2656 len -= sizeof(int);
2657 }
2658
2659 /* Check masks for On-Device logging */
2660 if (driver->mask_check) {
2661 if (!mask_request_validate(driver->user_space_data_buf +
2662 token_offset)) {
2663 pr_alert("diag: mask request Invalid\n");
2664 return -EFAULT;
2665 }
2666 }
2667
2668 /* send masks to local processor now */
2669 if (!remote_proc) {
2670 session_info = diag_md_session_get_pid(current->tgid);
2671 if (!session_info) {
2672 pr_err("diag:In %s request came from invalid md session pid:%d",
2673 __func__, current->tgid);
2674 return -EINVAL;
2675 }
2676 if (session_info)
2677 hdlc_disabled = session_info->hdlc_disabled;
2678 else
2679 hdlc_disabled = driver->hdlc_disabled;
2680 if (!hdlc_disabled)
2681 diag_process_hdlc_pkt((void *)
2682 (driver->user_space_data_buf),
2683 len, session_info);
2684 else
2685 diag_process_non_hdlc_pkt((char *)
2686 (driver->user_space_data_buf),
2687 len, session_info);
2688 return 0;
2689 }
2690
2691 err = diag_process_userspace_remote(remote_proc,
2692 driver->user_space_data_buf +
2693 token_offset, len);
2694 if (err) {
2695 driver->user_space_data_busy = 0;
2696 pr_err("diag: Error sending mask to remote proc %d, err: %d\n",
2697 remote_proc, err);
2698 }
2699
2700 return err;
2701}
2702
2703static int diag_user_process_apps_data(const char __user *buf, int len,
2704 int pkt_type)
2705{
2706 int ret = 0;
2707 int stm_size = 0;
2708 const int mempool = POOL_TYPE_COPY;
2709 unsigned char *user_space_data = NULL;
2710 struct diag_md_session_t *session_info = NULL;
2711 uint8_t hdlc_disabled;
2712
2713 if (!buf || len <= 0 || len > DIAG_MAX_RSP_SIZE) {
2714 pr_err_ratelimited("diag: In %s, invalid buf %pK len: %d\n",
2715 __func__, buf, len);
2716 return -EBADMSG;
2717 }
2718
2719 switch (pkt_type) {
2720 case DATA_TYPE_EVENT:
2721 case DATA_TYPE_F3:
2722 case DATA_TYPE_LOG:
2723 case DATA_TYPE_RESPONSE:
2724 case DATA_TYPE_DELAYED_RESPONSE:
2725 break;
2726 default:
2727 pr_err_ratelimited("diag: In %s, invalid pkt_type: %d\n",
2728 __func__, pkt_type);
2729 return -EBADMSG;
2730 }
2731
2732 user_space_data = diagmem_alloc(driver, len, mempool);
2733 if (!user_space_data) {
2734 diag_record_stats(pkt_type, PKT_DROP);
2735 return -ENOMEM;
2736 }
2737
2738 ret = copy_from_user(user_space_data, buf, len);
2739 if (ret) {
2740 pr_alert("diag: In %s, unable to copy data from userspace, err: %d\n",
2741 __func__, ret);
2742 diagmem_free(driver, user_space_data, mempool);
2743 user_space_data = NULL;
2744 diag_record_stats(pkt_type, PKT_DROP);
2745 return -EBADMSG;
2746 }
2747
2748 if (driver->stm_state[APPS_DATA] &&
2749 (pkt_type >= DATA_TYPE_EVENT) && (pkt_type <= DATA_TYPE_LOG)) {
2750 stm_size = stm_log_inv_ts(OST_ENTITY_DIAG, 0, user_space_data,
2751 len);
2752 if (stm_size == 0) {
2753 pr_debug("diag: In %s, stm_log_inv_ts returned size of 0\n",
2754 __func__);
2755 }
2756 diagmem_free(driver, user_space_data, mempool);
2757 user_space_data = NULL;
2758
2759 return 0;
2760 }
2761
2762 mutex_lock(&apps_data_mutex);
2763 mutex_lock(&driver->hdlc_disable_mutex);
2764 session_info = diag_md_session_get_peripheral(APPS_DATA);
2765 if (session_info)
2766 hdlc_disabled = session_info->hdlc_disabled;
2767 else
2768 hdlc_disabled = driver->hdlc_disabled;
2769 if (hdlc_disabled)
2770 ret = diag_process_apps_data_non_hdlc(user_space_data, len,
2771 pkt_type);
2772 else
2773 ret = diag_process_apps_data_hdlc(user_space_data, len,
2774 pkt_type);
2775 mutex_unlock(&driver->hdlc_disable_mutex);
2776 mutex_unlock(&apps_data_mutex);
2777
2778 diagmem_free(driver, user_space_data, mempool);
2779 user_space_data = NULL;
2780
2781 check_drain_timer();
2782
2783 if (ret == PKT_DROP)
2784 diag_record_stats(pkt_type, PKT_DROP);
2785 else if (ret == PKT_ALLOC)
2786 diag_record_stats(pkt_type, PKT_ALLOC);
2787 else
2788 return ret;
2789
2790 return 0;
2791}
2792
2793static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
2794 loff_t *ppos)
2795{
2796 struct diag_dci_client_tbl *entry;
2797 struct list_head *start, *temp;
2798 int index = -1, i = 0, ret = 0;
2799 int data_type;
2800 int copy_dci_data = 0;
2801 int exit_stat = 0;
2802 int write_len = 0;
2803 struct diag_md_session_t *session_info = NULL;
2804
2805 for (i = 0; i < driver->num_clients; i++)
2806 if (driver->client_map[i].pid == current->tgid)
2807 index = i;
2808
2809 if (index == -1) {
2810 pr_err("diag: Client PID not found in table");
2811 return -EINVAL;
2812 }
2813 if (!buf) {
2814 pr_err("diag: bad address from user side\n");
2815 return -EFAULT;
2816 }
2817 wait_event_interruptible(driver->wait_q, driver->data_ready[index]);
2818
2819 mutex_lock(&driver->diagchar_mutex);
2820
2821 if ((driver->data_ready[index] & USER_SPACE_DATA_TYPE) &&
2822 (driver->logging_mode == DIAG_MEMORY_DEVICE_MODE ||
2823 driver->logging_mode == DIAG_MULTI_MODE)) {
2824 pr_debug("diag: process woken up\n");
2825 /*Copy the type of data being passed*/
2826 data_type = driver->data_ready[index] & USER_SPACE_DATA_TYPE;
2827 driver->data_ready[index] ^= USER_SPACE_DATA_TYPE;
2828 COPY_USER_SPACE_OR_ERR(buf, data_type, sizeof(int));
2829 if (ret == -EFAULT)
2830 goto exit;
2831 /* place holder for number of data field */
2832 ret += sizeof(int);
2833 session_info = diag_md_session_get_pid(current->tgid);
2834 exit_stat = diag_md_copy_to_user(buf, &ret, count,
2835 session_info);
2836 goto exit;
2837 } else if (driver->data_ready[index] & USER_SPACE_DATA_TYPE) {
2838 /* In case, the thread wakes up and the logging mode is not
2839 * memory device any more, the condition needs to be cleared.
2840 */
2841 driver->data_ready[index] ^= USER_SPACE_DATA_TYPE;
2842 }
2843
2844 if (driver->data_ready[index] & HDLC_SUPPORT_TYPE) {
2845 data_type = driver->data_ready[index] & HDLC_SUPPORT_TYPE;
2846 driver->data_ready[index] ^= HDLC_SUPPORT_TYPE;
2847 COPY_USER_SPACE_OR_ERR(buf, data_type, sizeof(int));
2848 if (ret == -EFAULT)
2849 goto exit;
2850
2851 session_info = diag_md_session_get_pid(current->tgid);
2852 if (session_info) {
2853 COPY_USER_SPACE_OR_ERR(buf+4,
2854 session_info->hdlc_disabled,
2855 sizeof(uint8_t));
2856 if (ret == -EFAULT)
2857 goto exit;
2858 }
2859 goto exit;
2860 }
2861
2862 if (driver->data_ready[index] & DEINIT_TYPE) {
2863 /*Copy the type of data being passed*/
2864 data_type = driver->data_ready[index] & DEINIT_TYPE;
2865 COPY_USER_SPACE_OR_ERR(buf, data_type, 4);
2866 if (ret == -EFAULT)
2867 goto exit;
2868 driver->data_ready[index] ^= DEINIT_TYPE;
2869 mutex_unlock(&driver->diagchar_mutex);
2870 diag_remove_client_entry(file);
2871 return ret;
2872 }
2873
2874 if (driver->data_ready[index] & MSG_MASKS_TYPE) {
2875 /*Copy the type of data being passed*/
2876 data_type = driver->data_ready[index] & MSG_MASKS_TYPE;
2877 session_info = diag_md_session_get_peripheral(APPS_DATA);
2878 COPY_USER_SPACE_OR_ERR(buf, data_type, sizeof(int));
2879 if (ret == -EFAULT)
2880 goto exit;
2881 write_len = diag_copy_to_user_msg_mask(buf + ret, count,
2882 session_info);
2883 if (write_len > 0)
2884 ret += write_len;
2885 driver->data_ready[index] ^= MSG_MASKS_TYPE;
2886 goto exit;
2887 }
2888
2889 if (driver->data_ready[index] & EVENT_MASKS_TYPE) {
2890 /*Copy the type of data being passed*/
2891 data_type = driver->data_ready[index] & EVENT_MASKS_TYPE;
2892 session_info = diag_md_session_get_peripheral(APPS_DATA);
2893 COPY_USER_SPACE_OR_ERR(buf, data_type, 4);
2894 if (ret == -EFAULT)
2895 goto exit;
2896
2897 if (session_info && session_info->event_mask &&
2898 session_info->event_mask->ptr) {
2899 COPY_USER_SPACE_OR_ERR(buf + sizeof(int),
2900 *(session_info->event_mask->ptr),
2901 session_info->event_mask->mask_len);
2902 if (ret == -EFAULT)
2903 goto exit;
2904 } else {
2905 COPY_USER_SPACE_OR_ERR(buf + sizeof(int),
2906 *(event_mask.ptr),
2907 event_mask.mask_len);
2908 if (ret == -EFAULT)
2909 goto exit;
2910 }
2911 driver->data_ready[index] ^= EVENT_MASKS_TYPE;
2912 goto exit;
2913 }
2914
2915 if (driver->data_ready[index] & LOG_MASKS_TYPE) {
2916 /*Copy the type of data being passed*/
2917 data_type = driver->data_ready[index] & LOG_MASKS_TYPE;
2918 session_info = diag_md_session_get_peripheral(APPS_DATA);
2919 COPY_USER_SPACE_OR_ERR(buf, data_type, sizeof(int));
2920 if (ret == -EFAULT)
2921 goto exit;
2922
2923 write_len = diag_copy_to_user_log_mask(buf + ret, count,
2924 session_info);
2925 if (write_len > 0)
2926 ret += write_len;
2927 driver->data_ready[index] ^= LOG_MASKS_TYPE;
2928 goto exit;
2929 }
2930
2931 if (driver->data_ready[index] & PKT_TYPE) {
2932 /*Copy the type of data being passed*/
2933 data_type = driver->data_ready[index] & PKT_TYPE;
2934 COPY_USER_SPACE_OR_ERR(buf, data_type, sizeof(data_type));
2935 if (ret == -EFAULT)
2936 goto exit;
2937
2938 COPY_USER_SPACE_OR_ERR(buf + sizeof(data_type),
2939 *(driver->apps_req_buf),
2940 driver->apps_req_buf_len);
2941 if (ret == -EFAULT)
2942 goto exit;
2943 driver->data_ready[index] ^= PKT_TYPE;
2944 driver->in_busy_pktdata = 0;
2945 goto exit;
2946 }
2947
2948 if (driver->data_ready[index] & DCI_PKT_TYPE) {
2949 /* Copy the type of data being passed */
2950 data_type = driver->data_ready[index] & DCI_PKT_TYPE;
2951 COPY_USER_SPACE_OR_ERR(buf, data_type, 4);
2952 if (ret == -EFAULT)
2953 goto exit;
2954
2955 COPY_USER_SPACE_OR_ERR(buf+4, *(driver->dci_pkt_buf),
2956 driver->dci_pkt_length);
2957 if (ret == -EFAULT)
2958 goto exit;
2959
2960 driver->data_ready[index] ^= DCI_PKT_TYPE;
2961 driver->in_busy_dcipktdata = 0;
2962 goto exit;
2963 }
2964
2965 if (driver->data_ready[index] & DCI_EVENT_MASKS_TYPE) {
2966 /*Copy the type of data being passed*/
2967 data_type = driver->data_ready[index] & DCI_EVENT_MASKS_TYPE;
2968 COPY_USER_SPACE_OR_ERR(buf, data_type, 4);
2969 if (ret == -EFAULT)
2970 goto exit;
2971
2972 COPY_USER_SPACE_OR_ERR(buf+4, driver->num_dci_client, 4);
2973 if (ret == -EFAULT)
2974 goto exit;
2975
2976 COPY_USER_SPACE_OR_ERR(buf + 8, (dci_ops_tbl[DCI_LOCAL_PROC].
2977 event_mask_composite), DCI_EVENT_MASK_SIZE);
2978 if (ret == -EFAULT)
2979 goto exit;
2980
2981 driver->data_ready[index] ^= DCI_EVENT_MASKS_TYPE;
2982 goto exit;
2983 }
2984
2985 if (driver->data_ready[index] & DCI_LOG_MASKS_TYPE) {
2986 /*Copy the type of data being passed*/
2987 data_type = driver->data_ready[index] & DCI_LOG_MASKS_TYPE;
2988 COPY_USER_SPACE_OR_ERR(buf, data_type, 4);
2989 if (ret == -EFAULT)
2990 goto exit;
2991
2992 COPY_USER_SPACE_OR_ERR(buf+4, driver->num_dci_client, 4);
2993 if (ret == -EFAULT)
2994 goto exit;
2995
2996 COPY_USER_SPACE_OR_ERR(buf+8, (dci_ops_tbl[DCI_LOCAL_PROC].
2997 log_mask_composite), DCI_LOG_MASK_SIZE);
2998 if (ret == -EFAULT)
2999 goto exit;
3000 driver->data_ready[index] ^= DCI_LOG_MASKS_TYPE;
3001 goto exit;
3002 }
3003
3004exit:
3005 mutex_unlock(&driver->diagchar_mutex);
3006 if (driver->data_ready[index] & DCI_DATA_TYPE) {
3007 mutex_lock(&driver->dci_mutex);
3008 /* Copy the type of data being passed */
3009 data_type = driver->data_ready[index] & DCI_DATA_TYPE;
3010 list_for_each_safe(start, temp, &driver->dci_client_list) {
3011 entry = list_entry(start, struct diag_dci_client_tbl,
3012 track);
3013 if (entry->client->tgid != current->tgid)
3014 continue;
3015 if (!entry->in_service)
3016 continue;
3017 if (copy_to_user(buf + ret, &data_type, sizeof(int))) {
3018 mutex_unlock(&driver->dci_mutex);
3019 goto end;
3020 }
3021 ret += sizeof(int);
3022 if (copy_to_user(buf + ret, &entry->client_info.token,
3023 sizeof(int))) {
3024 mutex_unlock(&driver->dci_mutex);
3025 goto end;
3026 }
3027 ret += sizeof(int);
3028 copy_dci_data = 1;
3029 exit_stat = diag_copy_dci(buf, count, entry, &ret);
3030 mutex_lock(&driver->diagchar_mutex);
3031 driver->data_ready[index] ^= DCI_DATA_TYPE;
3032 mutex_unlock(&driver->diagchar_mutex);
3033 if (exit_stat == 1) {
3034 mutex_unlock(&driver->dci_mutex);
3035 goto end;
3036 }
3037 }
3038 mutex_unlock(&driver->dci_mutex);
3039 goto end;
3040 }
3041end:
3042 /*
3043 * Flush any read that is currently pending on DCI data and
3044 * command channnels. This will ensure that the next read is not
3045 * missed.
3046 */
3047 if (copy_dci_data) {
3048 diag_ws_on_copy_complete(DIAG_WS_DCI);
3049 flush_workqueue(driver->diag_dci_wq);
3050 }
3051 return ret;
3052}
3053
3054static ssize_t diagchar_write(struct file *file, const char __user *buf,
3055 size_t count, loff_t *ppos)
3056{
3057 int err = 0;
3058 int pkt_type = 0;
3059 int payload_len = 0;
3060 const char __user *payload_buf = NULL;
3061
3062 /*
3063 * The data coming from the user sapce should at least have the
3064 * packet type heeader.
3065 */
3066 if (count < sizeof(int)) {
3067 pr_err("diag: In %s, client is sending short data, len: %d\n",
3068 __func__, (int)count);
3069 return -EBADMSG;
3070 }
3071
3072 err = copy_from_user((&pkt_type), buf, sizeof(int));
3073 if (err) {
3074 pr_err_ratelimited("diag: In %s, unable to copy pkt_type from userspace, err: %d\n",
3075 __func__, err);
3076 return -EIO;
3077 }
3078
3079 if (driver->logging_mode == DIAG_USB_MODE && !driver->usb_connected) {
3080 if (!((pkt_type == DCI_DATA_TYPE) ||
3081 (pkt_type == DCI_PKT_TYPE) ||
3082 (pkt_type & DATA_TYPE_DCI_LOG) ||
3083 (pkt_type & DATA_TYPE_DCI_EVENT))) {
3084 pr_debug("diag: In %s, Dropping non DCI packet type\n",
3085 __func__);
3086 return -EIO;
3087 }
3088 }
3089
3090 payload_buf = buf + sizeof(int);
3091 payload_len = count - sizeof(int);
3092
3093 if (pkt_type == DCI_PKT_TYPE)
3094 return diag_user_process_dci_apps_data(payload_buf,
3095 payload_len,
3096 pkt_type);
3097 else if (pkt_type == DCI_DATA_TYPE)
3098 return diag_user_process_dci_data(payload_buf, payload_len);
3099 else if (pkt_type == USER_SPACE_RAW_DATA_TYPE)
3100 return diag_user_process_raw_data(payload_buf,
3101 payload_len);
3102 else if (pkt_type == USER_SPACE_DATA_TYPE)
3103 return diag_user_process_userspace_data(payload_buf,
3104 payload_len);
3105 if (pkt_type & (DATA_TYPE_DCI_LOG | DATA_TYPE_DCI_EVENT)) {
3106 err = diag_user_process_dci_apps_data(payload_buf, payload_len,
3107 pkt_type);
3108 if (pkt_type & DATA_TYPE_DCI_LOG)
3109 pkt_type ^= DATA_TYPE_DCI_LOG;
3110 if (pkt_type & DATA_TYPE_DCI_EVENT)
3111 pkt_type ^= DATA_TYPE_DCI_EVENT;
3112 /*
3113 * Check if the log or event is selected even on the regular
3114 * stream. If USB is not connected and we are not in memory
3115 * device mode, we should not process these logs/events.
3116 */
3117 if (pkt_type && driver->logging_mode == DIAG_USB_MODE &&
3118 !driver->usb_connected)
3119 return err;
3120 }
3121
3122 switch (pkt_type) {
3123 case DATA_TYPE_EVENT:
3124 case DATA_TYPE_F3:
3125 case DATA_TYPE_LOG:
3126 case DATA_TYPE_DELAYED_RESPONSE:
3127 case DATA_TYPE_RESPONSE:
3128 return diag_user_process_apps_data(payload_buf, payload_len,
3129 pkt_type);
3130 default:
3131 pr_err_ratelimited("diag: In %s, invalid pkt_type: %d\n",
3132 __func__, pkt_type);
3133 return -EINVAL;
3134 }
3135
3136 return err;
3137}
3138
3139void diag_ws_init(void)
3140{
3141 driver->dci_ws.ref_count = 0;
3142 driver->dci_ws.copy_count = 0;
3143 spin_lock_init(&driver->dci_ws.lock);
3144
3145 driver->md_ws.ref_count = 0;
3146 driver->md_ws.copy_count = 0;
3147 spin_lock_init(&driver->md_ws.lock);
3148}
3149
3150static void diag_stats_init(void)
3151{
3152 if (!driver)
3153 return;
3154
3155 driver->msg_stats.alloc_count = 0;
3156 driver->msg_stats.drop_count = 0;
3157
3158 driver->log_stats.alloc_count = 0;
3159 driver->log_stats.drop_count = 0;
3160
3161 driver->event_stats.alloc_count = 0;
3162 driver->event_stats.drop_count = 0;
3163}
3164
3165void diag_ws_on_notify(void)
3166{
3167 /*
3168 * Do not deal with reference count here as there can be spurious
3169 * interrupts.
3170 */
3171 pm_stay_awake(driver->diag_dev);
3172}
3173
3174void diag_ws_on_read(int type, int pkt_len)
3175{
3176 unsigned long flags;
3177 struct diag_ws_ref_t *ws_ref = NULL;
3178
3179 switch (type) {
3180 case DIAG_WS_DCI:
3181 ws_ref = &driver->dci_ws;
3182 break;
3183 case DIAG_WS_MUX:
3184 ws_ref = &driver->md_ws;
3185 break;
3186 default:
3187 pr_err_ratelimited("diag: In %s, invalid type: %d\n",
3188 __func__, type);
3189 return;
3190 }
3191
3192 spin_lock_irqsave(&ws_ref->lock, flags);
3193 if (pkt_len > 0) {
3194 ws_ref->ref_count++;
3195 } else {
3196 if (ws_ref->ref_count < 1) {
3197 ws_ref->ref_count = 0;
3198 ws_ref->copy_count = 0;
3199 }
3200 diag_ws_release();
3201 }
3202 spin_unlock_irqrestore(&ws_ref->lock, flags);
3203}
3204
3205
3206void diag_ws_on_copy(int type)
3207{
3208 unsigned long flags;
3209 struct diag_ws_ref_t *ws_ref = NULL;
3210
3211 switch (type) {
3212 case DIAG_WS_DCI:
3213 ws_ref = &driver->dci_ws;
3214 break;
3215 case DIAG_WS_MUX:
3216 ws_ref = &driver->md_ws;
3217 break;
3218 default:
3219 pr_err_ratelimited("diag: In %s, invalid type: %d\n",
3220 __func__, type);
3221 return;
3222 }
3223
3224 spin_lock_irqsave(&ws_ref->lock, flags);
3225 ws_ref->copy_count++;
3226 spin_unlock_irqrestore(&ws_ref->lock, flags);
3227}
3228
3229void diag_ws_on_copy_fail(int type)
3230{
3231 unsigned long flags;
3232 struct diag_ws_ref_t *ws_ref = NULL;
3233
3234 switch (type) {
3235 case DIAG_WS_DCI:
3236 ws_ref = &driver->dci_ws;
3237 break;
3238 case DIAG_WS_MUX:
3239 ws_ref = &driver->md_ws;
3240 break;
3241 default:
3242 pr_err_ratelimited("diag: In %s, invalid type: %d\n",
3243 __func__, type);
3244 return;
3245 }
3246
3247 spin_lock_irqsave(&ws_ref->lock, flags);
3248 ws_ref->ref_count--;
3249 spin_unlock_irqrestore(&ws_ref->lock, flags);
3250
3251 diag_ws_release();
3252}
3253
3254void diag_ws_on_copy_complete(int type)
3255{
3256 unsigned long flags;
3257 struct diag_ws_ref_t *ws_ref = NULL;
3258
3259 switch (type) {
3260 case DIAG_WS_DCI:
3261 ws_ref = &driver->dci_ws;
3262 break;
3263 case DIAG_WS_MUX:
3264 ws_ref = &driver->md_ws;
3265 break;
3266 default:
3267 pr_err_ratelimited("diag: In %s, invalid type: %d\n",
3268 __func__, type);
3269 return;
3270 }
3271
3272 spin_lock_irqsave(&ws_ref->lock, flags);
3273 ws_ref->ref_count -= ws_ref->copy_count;
3274 if (ws_ref->ref_count < 1)
3275 ws_ref->ref_count = 0;
3276 ws_ref->copy_count = 0;
3277 spin_unlock_irqrestore(&ws_ref->lock, flags);
3278
3279 diag_ws_release();
3280}
3281
3282void diag_ws_reset(int type)
3283{
3284 unsigned long flags;
3285 struct diag_ws_ref_t *ws_ref = NULL;
3286
3287 switch (type) {
3288 case DIAG_WS_DCI:
3289 ws_ref = &driver->dci_ws;
3290 break;
3291 case DIAG_WS_MUX:
3292 ws_ref = &driver->md_ws;
3293 break;
3294 default:
3295 pr_err_ratelimited("diag: In %s, invalid type: %d\n",
3296 __func__, type);
3297 return;
3298 }
3299
3300 spin_lock_irqsave(&ws_ref->lock, flags);
3301 ws_ref->ref_count = 0;
3302 ws_ref->copy_count = 0;
3303 spin_unlock_irqrestore(&ws_ref->lock, flags);
3304
3305 diag_ws_release();
3306}
3307
3308void diag_ws_release(void)
3309{
3310 if (driver->dci_ws.ref_count == 0 && driver->md_ws.ref_count == 0)
3311 pm_relax(driver->diag_dev);
3312}
3313
3314#ifdef DIAG_DEBUG
3315static void diag_debug_init(void)
3316{
3317 diag_ipc_log = ipc_log_context_create(DIAG_IPC_LOG_PAGES, "diag", 0);
3318 if (!diag_ipc_log)
3319 pr_err("diag: Failed to create IPC logging context\n");
3320 /*
3321 * Set the bit mask here as per diag_ipc_logging.h to enable debug logs
3322 * to be logged to IPC
3323 */
3324 diag_debug_mask = DIAG_DEBUG_PERIPHERALS | DIAG_DEBUG_DCI |
3325 DIAG_DEBUG_BRIDGE;
3326}
3327#else
3328static void diag_debug_init(void)
3329{
3330
3331}
3332#endif
3333
3334static int diag_real_time_info_init(void)
3335{
3336 int i;
3337
3338 if (!driver)
3339 return -EIO;
3340 for (i = 0; i < DIAG_NUM_PROC; i++) {
3341 driver->real_time_mode[i] = 1;
3342 driver->proc_rt_vote_mask[i] |= DIAG_PROC_DCI;
3343 driver->proc_rt_vote_mask[i] |= DIAG_PROC_MEMORY_DEVICE;
3344 }
3345 driver->real_time_update_busy = 0;
3346 driver->proc_active_mask = 0;
3347 driver->diag_real_time_wq = create_singlethread_workqueue(
3348 "diag_real_time_wq");
3349 if (!driver->diag_real_time_wq)
3350 return -ENOMEM;
3351 INIT_WORK(&(driver->diag_real_time_work), diag_real_time_work_fn);
3352 mutex_init(&driver->real_time_mutex);
3353 return 0;
3354}
3355
3356static const struct file_operations diagcharfops = {
3357 .owner = THIS_MODULE,
3358 .read = diagchar_read,
3359 .write = diagchar_write,
3360#ifdef CONFIG_COMPAT
3361 .compat_ioctl = diagchar_compat_ioctl,
3362#endif
3363 .unlocked_ioctl = diagchar_ioctl,
3364 .open = diagchar_open,
3365 .release = diagchar_close
3366};
3367
3368static int diagchar_setup_cdev(dev_t devno)
3369{
3370
3371 int err;
3372
3373 cdev_init(driver->cdev, &diagcharfops);
3374
3375 driver->cdev->owner = THIS_MODULE;
3376 driver->cdev->ops = &diagcharfops;
3377
3378 err = cdev_add(driver->cdev, devno, 1);
3379
3380 if (err) {
3381 pr_info("diagchar cdev registration failed !\n");
3382 return err;
3383 }
3384
3385 driver->diagchar_class = class_create(THIS_MODULE, "diag");
3386
3387 if (IS_ERR(driver->diagchar_class)) {
3388 pr_err("Error creating diagchar class.\n");
3389 return PTR_ERR(driver->diagchar_class);
3390 }
3391
3392 driver->diag_dev = device_create(driver->diagchar_class, NULL, devno,
3393 (void *)driver, "diag");
3394
3395 if (!driver->diag_dev)
3396 return -EIO;
3397
3398 driver->diag_dev->power.wakeup = wakeup_source_register("DIAG_WS");
3399 return 0;
3400
3401}
3402
3403static int diagchar_cleanup(void)
3404{
3405 if (driver) {
3406 if (driver->cdev) {
3407 /* TODO - Check if device exists before deleting */
3408 device_destroy(driver->diagchar_class,
3409 MKDEV(driver->major,
3410 driver->minor_start));
3411 cdev_del(driver->cdev);
3412 }
3413 if (!IS_ERR(driver->diagchar_class))
3414 class_destroy(driver->diagchar_class);
3415 kfree(driver);
3416 }
3417 return 0;
3418}
3419
3420static int __init diagchar_init(void)
3421{
3422 dev_t dev;
Manoj Prabhu B98325462017-01-10 20:19:28 +05303423 int ret, i;
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07003424
3425 pr_debug("diagfwd initializing ..\n");
3426 ret = 0;
3427 driver = kzalloc(sizeof(struct diagchar_dev) + 5, GFP_KERNEL);
3428 if (!driver)
3429 return -ENOMEM;
3430 kmemleak_not_leak(driver);
3431
3432 timer_in_progress = 0;
3433 driver->delayed_rsp_id = 0;
3434 driver->hdlc_disabled = 0;
3435 driver->dci_state = DIAG_DCI_NO_ERROR;
3436 setup_timer(&drain_timer, drain_timer_func, 1234);
3437 driver->supports_sockets = 1;
3438 driver->time_sync_enabled = 0;
3439 driver->uses_time_api = 0;
3440 driver->poolsize = poolsize;
3441 driver->poolsize_hdlc = poolsize_hdlc;
3442 driver->poolsize_dci = poolsize_dci;
3443 driver->poolsize_user = poolsize_user;
3444 /*
3445 * POOL_TYPE_MUX_APPS is for the buffers in the Diag MUX layer.
3446 * The number of buffers encompasses Diag data generated on
3447 * the Apss processor + 1 for the responses generated exclusively on
3448 * the Apps processor + data from data channels (4 channels per
3449 * peripheral) + data from command channels (2)
3450 */
3451 diagmem_setsize(POOL_TYPE_MUX_APPS, itemsize_usb_apps,
3452 poolsize_usb_apps + 1 + (NUM_PERIPHERALS * 6));
3453 driver->num_clients = max_clients;
3454 driver->logging_mode = DIAG_USB_MODE;
3455 driver->mask_check = 0;
3456 driver->in_busy_pktdata = 0;
3457 driver->in_busy_dcipktdata = 0;
3458 driver->rsp_buf_ctxt = SET_BUF_CTXT(APPS_DATA, TYPE_CMD, 1);
3459 hdlc_data.ctxt = SET_BUF_CTXT(APPS_DATA, TYPE_DATA, 1);
3460 hdlc_data.len = 0;
3461 non_hdlc_data.ctxt = SET_BUF_CTXT(APPS_DATA, TYPE_DATA, 1);
3462 non_hdlc_data.len = 0;
3463 mutex_init(&driver->hdlc_disable_mutex);
3464 mutex_init(&driver->diagchar_mutex);
3465 mutex_init(&driver->diag_maskclear_mutex);
Manoj Prabhu B2a428272016-12-22 15:22:03 +05303466 mutex_init(&driver->diag_notifier_mutex);
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07003467 mutex_init(&driver->diag_file_mutex);
3468 mutex_init(&driver->delayed_rsp_mutex);
3469 mutex_init(&apps_data_mutex);
Gopikrishna Mogasati9a44d8d2017-05-05 16:04:35 +05303470 mutex_init(&driver->msg_mask_lock);
Manoj Prabhu B98325462017-01-10 20:19:28 +05303471 for (i = 0; i < NUM_PERIPHERALS; i++)
3472 mutex_init(&driver->diagfwd_channel_mutex[i]);
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07003473 init_waitqueue_head(&driver->wait_q);
3474 INIT_WORK(&(driver->diag_drain_work), diag_drain_work_fn);
3475 INIT_WORK(&(driver->update_user_clients),
3476 diag_update_user_client_work_fn);
3477 INIT_WORK(&(driver->update_md_clients),
3478 diag_update_md_client_work_fn);
3479 diag_ws_init();
3480 diag_stats_init();
3481 diag_debug_init();
3482 diag_md_session_init();
3483
3484 driver->incoming_pkt.capacity = DIAG_MAX_REQ_SIZE;
3485 driver->incoming_pkt.data = kzalloc(DIAG_MAX_REQ_SIZE, GFP_KERNEL);
3486 if (!driver->incoming_pkt.data) {
3487 ret = -ENOMEM;
3488 goto fail;
3489 }
3490 kmemleak_not_leak(driver->incoming_pkt.data);
3491 driver->incoming_pkt.processing = 0;
3492 driver->incoming_pkt.read_len = 0;
3493 driver->incoming_pkt.remaining = 0;
3494 driver->incoming_pkt.total_len = 0;
3495
3496 ret = diag_real_time_info_init();
3497 if (ret)
3498 goto fail;
3499 ret = diag_debugfs_init();
3500 if (ret)
3501 goto fail;
3502 ret = diag_masks_init();
3503 if (ret)
3504 goto fail;
3505 ret = diag_remote_init();
3506 if (ret)
3507 goto fail;
3508 ret = diag_mux_init();
3509 if (ret)
3510 goto fail;
3511 ret = diagfwd_init();
3512 if (ret)
3513 goto fail;
3514 ret = diagfwd_cntl_init();
3515 if (ret)
3516 goto fail;
3517 driver->dci_state = diag_dci_init();
3518 ret = diagfwd_peripheral_init();
3519 if (ret)
3520 goto fail;
3521 diagfwd_cntl_channel_init();
3522 if (driver->dci_state == DIAG_DCI_NO_ERROR)
3523 diag_dci_channel_init();
3524 pr_debug("diagchar initializing ..\n");
3525 driver->num = 1;
3526 driver->name = ((void *)driver) + sizeof(struct diagchar_dev);
3527 strlcpy(driver->name, "diag", 4);
3528 /* Get major number from kernel and initialize */
3529 ret = alloc_chrdev_region(&dev, driver->minor_start,
3530 driver->num, driver->name);
3531 if (!ret) {
3532 driver->major = MAJOR(dev);
3533 driver->minor_start = MINOR(dev);
3534 } else {
3535 pr_err("diag: Major number not allocated\n");
3536 goto fail;
3537 }
3538 driver->cdev = cdev_alloc();
3539 ret = diagchar_setup_cdev(dev);
3540 if (ret)
3541 goto fail;
Sreelakshmi Gownipalli8d477d32017-02-08 19:49:06 -08003542 mutex_init(&driver->diag_id_mutex);
3543 INIT_LIST_HEAD(&driver->diag_id_list);
3544 diag_add_diag_id_to_list(DIAG_ID_APPS, "APPS");
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07003545 pr_debug("diagchar initialized now");
3546 ret = diagfwd_bridge_init();
3547 if (ret)
3548 diagfwd_bridge_exit();
3549 return 0;
3550
3551fail:
3552 pr_err("diagchar is not initialized, ret: %d\n", ret);
3553 diag_debugfs_cleanup();
3554 diagchar_cleanup();
3555 diag_mux_exit();
3556 diagfwd_peripheral_exit();
3557 diagfwd_bridge_exit();
3558 diagfwd_exit();
3559 diagfwd_cntl_exit();
3560 diag_dci_exit();
3561 diag_masks_exit();
3562 diag_remote_exit();
3563 return ret;
3564
3565}
3566
3567static void diagchar_exit(void)
3568{
3569 pr_info("diagchar exiting...\n");
3570 diag_mempool_exit();
3571 diag_mux_exit();
3572 diagfwd_peripheral_exit();
3573 diagfwd_exit();
3574 diagfwd_cntl_exit();
3575 diag_dci_exit();
3576 diag_masks_exit();
3577 diag_md_session_exit();
3578 diag_remote_exit();
3579 diag_debugfs_cleanup();
3580 diagchar_cleanup();
3581 pr_info("done diagchar exit\n");
3582}
3583
3584module_init(diagchar_init);
3585module_exit(diagchar_exit);