diag: Add snapshot of Diag
This snapshot is taken as of msm-4.4 commit <e2787f510d> Merge
"ARM: dts: msm: change UFS/SDHC2 power supply for msmcobalt
interposer QRD"
Remove references to SMD and fix code style warnings and errors.
CRs-Fixed: 1092890
Change-Id: Id5903c391446c8f908a4efc0651df08e3c302d1a
Signed-off-by: Sreelakshmi Gownipalli <sgownipa@codeaurora.org>
Signed-off-by: Chris Lew <clew@codeaurora.org>
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index dcc0973..438c907 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -48,6 +48,8 @@
source "drivers/tty/serial/Kconfig"
+source "drivers/char/diag/Kconfig"
+
config TTY_PRINTK
tristate "TTY driver to output user messages via printk"
depends on EXPERT && TTY
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index 6e6c244..60653fd 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -58,5 +58,6 @@
js-rtc-y = rtc.o
obj-$(CONFIG_TILE_SROM) += tile-srom.o
+obj-$(CONFIG_DIAG_CHAR) += diag/
obj-$(CONFIG_XILLYBUS) += xillybus/
obj-$(CONFIG_POWERNV_OP_PANEL) += powernv-op-panel.o
diff --git a/drivers/char/diag/Kconfig b/drivers/char/diag/Kconfig
new file mode 100644
index 0000000..e309241
--- /dev/null
+++ b/drivers/char/diag/Kconfig
@@ -0,0 +1,37 @@
+menu "Diag Support"
+
+config DIAG_CHAR
+ tristate "DIAG CHAR Interface Core"
+ default m
+ depends on USB_CONFIGFS_F_DIAG || USB_FUNCTION_DIAG || USB_QCOM_MAEMO
+ depends on ARCH_QCOM
+ depends on POWER_RESET_QCOM
+ select CRC_CCITT
+ help
+ Char driver interface for SoC Diagnostic information. The DIAG Char
+ driver provides diag forwarding to user space and SoC Peripherals.
+ This enables diagchar for maemo usb gadget or android usb gadget
+ based on config selected.
+
+config DIAG_OVER_USB
+ bool "Enable DIAG traffic to go over USB"
+ depends on DIAG_CHAR
+ depends on ARCH_QCOM
+ default y
+ help
+ Diag over USB enables sending DIAG traffic over a USB transport. When
+ the USB endpoints become available, the DIAG driver will enable Diag
+ traffic over USB. This allows for host side tools to parse and display
+ Diag traffic from the USB endpoint.
+
+config DIAGFWD_BRIDGE_CODE
+ bool "Enable QSC/9K DIAG traffic over SMUX/HSIC"
+ depends on DIAG_CHAR
+ depends on USB_QCOM_DIAG_BRIDGE || MSM_MHI
+ default y
+ help
+ SMUX/HSIC Transport Layer for DIAG Router. When the MHI/SMUX endpoints
+ become available, this bridge driver enables DIAG traffic over MHI
+ and SMUX.
+
+endmenu
diff --git a/drivers/char/diag/Makefile b/drivers/char/diag/Makefile
new file mode 100644
index 0000000..b61aae8
--- /dev/null
+++ b/drivers/char/diag/Makefile
@@ -0,0 +1,6 @@
+obj-$(CONFIG_DIAG_CHAR) := diagchar.o
+obj-$(CONFIG_DIAGFWD_BRIDGE_CODE) += diagfwd_bridge.o
+obj-$(CONFIG_USB_QCOM_DIAG_BRIDGE) += diagfwd_hsic.o
+obj-$(CONFIG_USB_QCOM_DIAG_BRIDGE) += diagfwd_smux.o
+obj-$(CONFIG_MSM_MHI) += diagfwd_mhi.o
+diagchar-objs := diagchar_core.o diagchar_hdlc.o diagfwd.o diagfwd_glink.o diagfwd_peripheral.o diagfwd_socket.o diag_mux.o diag_memorydevice.o diag_usb.o diagmem.o diagfwd_cntl.o diag_dci.o diag_masks.o diag_debugfs.o
diff --git a/drivers/char/diag/diag_dci.c b/drivers/char/diag/diag_dci.c
new file mode 100644
index 0000000..ea684fc
--- /dev/null
+++ b/drivers/char/diag/diag_dci.c
@@ -0,0 +1,3169 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/uaccess.h>
+#include <linux/diagchar.h>
+#include <linux/sched.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/workqueue.h>
+#include <linux/pm_runtime.h>
+#include <linux/platform_device.h>
+#include <linux/pm_wakeup.h>
+#include <linux/spinlock.h>
+#include <linux/ratelimit.h>
+#include <linux/reboot.h>
+#include <asm/current.h>
+#include <soc/qcom/restart.h>
+#ifdef CONFIG_DIAG_OVER_USB
+#include <linux/usb/usbdiag.h>
+#endif
+#include "diagchar_hdlc.h"
+#include "diagmem.h"
+#include "diagchar.h"
+#include "diagfwd.h"
+#include "diagfwd_cntl.h"
+#include "diag_dci.h"
+#include "diag_masks.h"
+#include "diagfwd_bridge.h"
+#include "diagfwd_peripheral.h"
+#include "diag_ipc_logging.h"
+
+static struct timer_list dci_drain_timer;
+static int dci_timer_in_progress;
+static struct work_struct dci_data_drain_work;
+
+struct diag_dci_partial_pkt_t partial_pkt;
+
+unsigned int dci_max_reg = 100;
+unsigned int dci_max_clients = 10;
+struct mutex dci_log_mask_mutex;
+struct mutex dci_event_mask_mutex;
+
+/*
+ * DCI_HANDSHAKE_RETRY_TIME: Time to wait (in microseconds) before checking the
+ * connection status again.
+ *
+ * DCI_HANDSHAKE_WAIT_TIME: Timeout (in milliseconds) to check for dci
+ * connection status
+ */
+#define DCI_HANDSHAKE_RETRY_TIME 500000
+#define DCI_HANDSHAKE_WAIT_TIME 200
+
+spinlock_t ws_lock;
+unsigned long ws_lock_flags;
+
+struct dci_ops_tbl_t dci_ops_tbl[NUM_DCI_PROC] = {
+ {
+ .ctx = 0,
+ .send_log_mask = diag_send_dci_log_mask,
+ .send_event_mask = diag_send_dci_event_mask,
+ .peripheral_status = 0,
+ .mempool = 0,
+ },
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+ {
+ .ctx = DIAGFWD_MDM_DCI,
+ .send_log_mask = diag_send_dci_log_mask_remote,
+ .send_event_mask = diag_send_dci_event_mask_remote,
+ .peripheral_status = 0,
+ .mempool = POOL_TYPE_MDM_DCI_WRITE,
+ }
+#endif
+};
+
+struct dci_channel_status_t dci_channel_status[NUM_DCI_PROC] = {
+ {
+ .id = 0,
+ .open = 0,
+ .retry_count = 0
+ },
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+ {
+ .id = DIAGFWD_MDM_DCI,
+ .open = 0,
+ .retry_count = 0
+ }
+#endif
+};
+
+/* Number of milliseconds anticipated to process the DCI data */
+#define DCI_WAKEUP_TIMEOUT 1
+
+#define DCI_CAN_ADD_BUF_TO_LIST(buf) \
+ (buf && buf->data && !buf->in_busy && buf->data_len > 0) \
+
+#ifdef CONFIG_DEBUG_FS
+struct diag_dci_data_info *dci_traffic;
+struct mutex dci_stat_mutex;
+void diag_dci_record_traffic(int read_bytes, uint8_t ch_type,
+ uint8_t peripheral, uint8_t proc)
+{
+ static int curr_dci_data;
+ static unsigned long iteration;
+ struct diag_dci_data_info *temp_data = dci_traffic;
+
+ if (!temp_data)
+ return;
+ mutex_lock(&dci_stat_mutex);
+ if (curr_dci_data == DIAG_DCI_DEBUG_CNT)
+ curr_dci_data = 0;
+ temp_data += curr_dci_data;
+ temp_data->iteration = iteration + 1;
+ temp_data->data_size = read_bytes;
+ temp_data->peripheral = peripheral;
+ temp_data->ch_type = ch_type;
+ temp_data->proc = proc;
+ diag_get_timestamp(temp_data->time_stamp);
+ curr_dci_data++;
+ iteration++;
+ mutex_unlock(&dci_stat_mutex);
+}
+#else
+void diag_dci_record_traffic(int read_bytes, uint8_t ch_type,
+ uint8_t peripheral, uint8_t proc) { }
+#endif
+static void create_dci_log_mask_tbl(unsigned char *mask, uint8_t dirty)
+{
+ unsigned char *temp = mask;
+ uint8_t i;
+
+ if (!mask)
+ return;
+
+ /* create hard coded table for log mask with 16 categories */
+ for (i = 0; i < DCI_MAX_LOG_CODES; i++) {
+ *temp = i;
+ temp++;
+ *temp = dirty ? 1 : 0;
+ temp++;
+ memset(temp, 0, DCI_MAX_ITEMS_PER_LOG_CODE);
+ temp += DCI_MAX_ITEMS_PER_LOG_CODE;
+ }
+}
+
+static void create_dci_event_mask_tbl(unsigned char *tbl_buf)
+{
+ if (tbl_buf)
+ memset(tbl_buf, 0, DCI_EVENT_MASK_SIZE);
+}
+
+void dci_drain_data(unsigned long data)
+{
+ queue_work(driver->diag_dci_wq, &dci_data_drain_work);
+}
+
+static void dci_check_drain_timer(void)
+{
+ if (!dci_timer_in_progress) {
+ dci_timer_in_progress = 1;
+ mod_timer(&dci_drain_timer, jiffies + msecs_to_jiffies(200));
+ }
+}
+
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+static void dci_handshake_work_fn(struct work_struct *work)
+{
+ int err = 0;
+ int max_retries = 5;
+
+ struct dci_channel_status_t *status = container_of(work,
+ struct dci_channel_status_t,
+ handshake_work);
+
+ if (status->open) {
+ pr_debug("diag: In %s, remote dci channel is open, index: %d\n",
+ __func__, status->id);
+ return;
+ }
+
+ if (status->retry_count == max_retries) {
+ status->retry_count = 0;
+ pr_info("diag: dci channel connection handshake timed out, id: %d\n",
+ status->id);
+ err = diagfwd_bridge_close(TOKEN_TO_BRIDGE(status->id));
+ if (err) {
+ pr_err("diag: In %s, unable to close dci channel id: %d, err: %d\n",
+ __func__, status->id, err);
+ }
+ return;
+ }
+ status->retry_count++;
+ /*
+ * Sleep for sometime to check for the connection status again. The
+ * value should be optimum to include a roundabout time for a small
+ * packet to the remote processor.
+ */
+ usleep_range(DCI_HANDSHAKE_RETRY_TIME, DCI_HANDSHAKE_RETRY_TIME + 100);
+ mod_timer(&status->wait_time,
+ jiffies + msecs_to_jiffies(DCI_HANDSHAKE_WAIT_TIME));
+}
+
+static void dci_chk_handshake(unsigned long data)
+{
+ int index = (int)data;
+
+ if (index < 0 || index >= NUM_DCI_PROC)
+ return;
+
+ queue_work(driver->diag_dci_wq,
+ &dci_channel_status[index].handshake_work);
+}
+#endif
+
+static int diag_dci_init_buffer(struct diag_dci_buffer_t *buffer, int type)
+{
+ if (!buffer || buffer->data)
+ return -EINVAL;
+
+ switch (type) {
+ case DCI_BUF_PRIMARY:
+ buffer->capacity = IN_BUF_SIZE;
+ buffer->data = kzalloc(buffer->capacity, GFP_KERNEL);
+ if (!buffer->data)
+ return -ENOMEM;
+ break;
+ case DCI_BUF_SECONDARY:
+ buffer->data = NULL;
+ buffer->capacity = IN_BUF_SIZE;
+ break;
+ case DCI_BUF_CMD:
+ buffer->capacity = DIAG_MAX_REQ_SIZE + DCI_BUF_SIZE;
+ buffer->data = kzalloc(buffer->capacity, GFP_KERNEL);
+ if (!buffer->data)
+ return -ENOMEM;
+ break;
+ default:
+ pr_err("diag: In %s, unknown type %d", __func__, type);
+ return -EINVAL;
+ }
+
+ buffer->data_len = 0;
+ buffer->in_busy = 0;
+ buffer->buf_type = type;
+ mutex_init(&buffer->data_mutex);
+
+ return 0;
+}
+
+static inline int diag_dci_check_buffer(struct diag_dci_buffer_t *buf, int len)
+{
+ if (!buf)
+ return -EINVAL;
+
+ /* Return 1 if the buffer is not busy and can hold new data */
+ if ((buf->data_len + len < buf->capacity) && !buf->in_busy)
+ return 1;
+
+ return 0;
+}
+
+static void dci_add_buffer_to_list(struct diag_dci_client_tbl *client,
+ struct diag_dci_buffer_t *buf)
+{
+ if (!buf || !client || !buf->data)
+ return;
+
+ if (buf->in_list || buf->data_len == 0)
+ return;
+
+ mutex_lock(&client->write_buf_mutex);
+ list_add_tail(&buf->buf_track, &client->list_write_buf);
+ /*
+ * In the case of DCI, there can be multiple packets in one read. To
+ * calculate the wakeup source reference count, we must account for each
+ * packet in a single read.
+ */
+ diag_ws_on_read(DIAG_WS_DCI, buf->data_len);
+ mutex_lock(&buf->data_mutex);
+ buf->in_busy = 1;
+ buf->in_list = 1;
+ mutex_unlock(&buf->data_mutex);
+ mutex_unlock(&client->write_buf_mutex);
+}
+
+static int diag_dci_get_buffer(struct diag_dci_client_tbl *client,
+ int data_source, int len)
+{
+ struct diag_dci_buffer_t *buf_primary = NULL;
+ struct diag_dci_buffer_t *buf_temp = NULL;
+ struct diag_dci_buffer_t *curr = NULL;
+
+ if (!client)
+ return -EINVAL;
+ if (len < 0 || len > IN_BUF_SIZE)
+ return -EINVAL;
+
+ curr = client->buffers[data_source].buf_curr;
+ buf_primary = client->buffers[data_source].buf_primary;
+
+ if (curr && diag_dci_check_buffer(curr, len) == 1)
+ return 0;
+
+ dci_add_buffer_to_list(client, curr);
+ client->buffers[data_source].buf_curr = NULL;
+
+ if (diag_dci_check_buffer(buf_primary, len) == 1) {
+ client->buffers[data_source].buf_curr = buf_primary;
+ return 0;
+ }
+
+ buf_temp = kzalloc(sizeof(struct diag_dci_buffer_t), GFP_KERNEL);
+ if (!buf_temp)
+ return -EIO;
+
+ if (!diag_dci_init_buffer(buf_temp, DCI_BUF_SECONDARY)) {
+ buf_temp->data = diagmem_alloc(driver, IN_BUF_SIZE,
+ POOL_TYPE_DCI);
+ if (!buf_temp->data) {
+ kfree(buf_temp);
+ buf_temp = NULL;
+ return -ENOMEM;
+ }
+ client->buffers[data_source].buf_curr = buf_temp;
+ return 0;
+ }
+
+ kfree(buf_temp);
+ buf_temp = NULL;
+ return -EIO;
+}
+
+void diag_dci_wakeup_clients(void)
+{
+ struct list_head *start, *temp;
+ struct diag_dci_client_tbl *entry = NULL;
+
+ mutex_lock(&driver->dci_mutex);
+ list_for_each_safe(start, temp, &driver->dci_client_list) {
+ entry = list_entry(start, struct diag_dci_client_tbl, track);
+
+ /*
+ * Don't wake up the client when there is no pending buffer to
+ * write or when it is writing to user space
+ */
+ if (!list_empty(&entry->list_write_buf) && !entry->in_service) {
+ mutex_lock(&entry->write_buf_mutex);
+ entry->in_service = 1;
+ mutex_unlock(&entry->write_buf_mutex);
+ diag_update_sleeping_process(entry->client->tgid,
+ DCI_DATA_TYPE);
+ }
+ }
+ mutex_unlock(&driver->dci_mutex);
+}
+
+void dci_data_drain_work_fn(struct work_struct *work)
+{
+ int i;
+ struct list_head *start, *temp;
+ struct diag_dci_client_tbl *entry = NULL;
+ struct diag_dci_buf_peripheral_t *proc_buf = NULL;
+ struct diag_dci_buffer_t *buf_temp = NULL;
+
+ mutex_lock(&driver->dci_mutex);
+ list_for_each_safe(start, temp, &driver->dci_client_list) {
+ entry = list_entry(start, struct diag_dci_client_tbl, track);
+ for (i = 0; i < entry->num_buffers; i++) {
+ proc_buf = &entry->buffers[i];
+
+ mutex_lock(&proc_buf->buf_mutex);
+ buf_temp = proc_buf->buf_primary;
+ if (DCI_CAN_ADD_BUF_TO_LIST(buf_temp))
+ dci_add_buffer_to_list(entry, buf_temp);
+
+ buf_temp = proc_buf->buf_cmd;
+ if (DCI_CAN_ADD_BUF_TO_LIST(buf_temp))
+ dci_add_buffer_to_list(entry, buf_temp);
+
+ buf_temp = proc_buf->buf_curr;
+ if (DCI_CAN_ADD_BUF_TO_LIST(buf_temp)) {
+ dci_add_buffer_to_list(entry, buf_temp);
+ proc_buf->buf_curr = NULL;
+ }
+ mutex_unlock(&proc_buf->buf_mutex);
+ }
+ if (!list_empty(&entry->list_write_buf) && !entry->in_service) {
+ mutex_lock(&entry->write_buf_mutex);
+ entry->in_service = 1;
+ mutex_unlock(&entry->write_buf_mutex);
+ diag_update_sleeping_process(entry->client->tgid,
+ DCI_DATA_TYPE);
+ }
+ }
+ mutex_unlock(&driver->dci_mutex);
+ dci_timer_in_progress = 0;
+}
+
+static int diag_process_single_dci_pkt(unsigned char *buf, int len,
+ int data_source, int token)
+{
+ uint8_t cmd_code = 0;
+
+ if (!buf || len < 0) {
+ pr_err("diag: Invalid input in %s, buf: %pK, len: %d\n",
+ __func__, buf, len);
+ return -EIO;
+ }
+
+ cmd_code = *(uint8_t *)buf;
+
+ switch (cmd_code) {
+ case LOG_CMD_CODE:
+ extract_dci_log(buf, len, data_source, token, NULL);
+ break;
+ case EVENT_CMD_CODE:
+ extract_dci_events(buf, len, data_source, token, NULL);
+ break;
+ case EXT_HDR_CMD_CODE:
+ extract_dci_ext_pkt(buf, len, data_source, token);
+ break;
+ case DCI_PKT_RSP_CODE:
+ case DCI_DELAYED_RSP_CODE:
+ extract_dci_pkt_rsp(buf, len, data_source, token);
+ break;
+ case DCI_CONTROL_PKT_CODE:
+ extract_dci_ctrl_pkt(buf, len, token);
+ break;
+ default:
+ pr_err("diag: Unable to process single DCI packet, cmd_code: %d, data_source: %d",
+ cmd_code, data_source);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Process the data read from apps userspace client */
+void diag_process_apps_dci_read_data(int data_type, void *buf, int recd_bytes)
+{
+ int err = 0;
+
+ if (!buf) {
+ pr_err_ratelimited("diag: In %s, Null buf pointer\n", __func__);
+ return;
+ }
+
+ if (data_type != DATA_TYPE_DCI_LOG && data_type != DATA_TYPE_DCI_EVENT
+ && data_type != DCI_PKT_TYPE) {
+ pr_err("diag: In %s, unsupported data_type: 0x%x\n",
+ __func__, (unsigned int)data_type);
+ return;
+ }
+
+ err = diag_process_single_dci_pkt(buf, recd_bytes, APPS_DATA,
+ DCI_LOCAL_PROC);
+ if (err)
+ return;
+
+ /* wake up all sleeping DCI clients which have some data */
+ diag_dci_wakeup_clients();
+ dci_check_drain_timer();
+}
+
+void diag_process_remote_dci_read_data(int index, void *buf, int recd_bytes)
+{
+ int read_bytes = 0, err = 0;
+ uint16_t dci_pkt_len;
+ struct diag_dci_header_t *header = NULL;
+ int header_len = sizeof(struct diag_dci_header_t);
+ int token = BRIDGE_TO_TOKEN(index);
+
+ if (!buf)
+ return;
+
+ diag_dci_record_traffic(recd_bytes, 0, 0, token);
+
+ if (!partial_pkt.processing)
+ goto start;
+
+ if (partial_pkt.remaining > recd_bytes) {
+ if ((partial_pkt.read_len + recd_bytes) >
+ (MAX_DCI_PACKET_SZ)) {
+ pr_err("diag: Invalid length %d, %d received in %s\n",
+ partial_pkt.read_len, recd_bytes, __func__);
+ goto end;
+ }
+ memcpy(partial_pkt.data + partial_pkt.read_len, buf,
+ recd_bytes);
+ read_bytes += recd_bytes;
+ buf += read_bytes;
+ partial_pkt.read_len += recd_bytes;
+ partial_pkt.remaining -= recd_bytes;
+ } else {
+ if ((partial_pkt.read_len + partial_pkt.remaining) >
+ (MAX_DCI_PACKET_SZ)) {
+ pr_err("diag: Invalid length during partial read %d, %d received in %s\n",
+ partial_pkt.read_len,
+ partial_pkt.remaining, __func__);
+ goto end;
+ }
+ memcpy(partial_pkt.data + partial_pkt.read_len, buf,
+ partial_pkt.remaining);
+ read_bytes += partial_pkt.remaining;
+ buf += read_bytes;
+ partial_pkt.read_len += partial_pkt.remaining;
+ partial_pkt.remaining = 0;
+ }
+
+ if (partial_pkt.remaining == 0) {
+ /*
+ * Retrieve from the DCI control packet after the header = start
+ * (1 byte) + version (1 byte) + length (2 bytes)
+ */
+ diag_process_single_dci_pkt(partial_pkt.data + 4,
+ partial_pkt.read_len - header_len,
+ DCI_REMOTE_DATA, token);
+ partial_pkt.read_len = 0;
+ partial_pkt.total_len = 0;
+ partial_pkt.processing = 0;
+ goto start;
+ }
+ goto end;
+
+start:
+ while (read_bytes < recd_bytes) {
+ header = (struct diag_dci_header_t *)buf;
+ dci_pkt_len = header->length;
+
+ if (header->cmd_code != DCI_CONTROL_PKT_CODE &&
+ driver->num_dci_client == 0) {
+ read_bytes += header_len + dci_pkt_len;
+ buf += header_len + dci_pkt_len;
+ continue;
+ }
+
+ if (dci_pkt_len + header_len > MAX_DCI_PACKET_SZ) {
+ pr_err("diag: Invalid length in the dci packet field %d\n",
+ dci_pkt_len);
+ break;
+ }
+
+ if ((dci_pkt_len + header_len) > (recd_bytes - read_bytes)) {
+ partial_pkt.read_len = recd_bytes - read_bytes;
+ partial_pkt.total_len = dci_pkt_len + header_len;
+ partial_pkt.remaining = partial_pkt.total_len -
+ partial_pkt.read_len;
+ partial_pkt.processing = 1;
+ memcpy(partial_pkt.data, buf, partial_pkt.read_len);
+ break;
+ }
+ /*
+ * Retrieve from the DCI control packet after the header = start
+ * (1 byte) + version (1 byte) + length (2 bytes)
+ */
+ err = diag_process_single_dci_pkt(buf + 4, dci_pkt_len,
+ DCI_REMOTE_DATA, DCI_MDM_PROC);
+ if (err)
+ break;
+ read_bytes += header_len + dci_pkt_len;
+ buf += header_len + dci_pkt_len; /* advance to next DCI pkt */
+ }
+end:
+ if (err)
+ return;
+ /* wake up all sleeping DCI clients which have some data */
+ diag_dci_wakeup_clients();
+ dci_check_drain_timer();
+}
+
+/* Process the data read from the peripheral dci channels */
+void diag_dci_process_peripheral_data(struct diagfwd_info *p_info, void *buf,
+ int recd_bytes)
+{
+ int read_bytes = 0, err = 0;
+ uint16_t dci_pkt_len;
+ struct diag_dci_pkt_header_t *header = NULL;
+ uint8_t recv_pkt_cmd_code;
+
+ if (!buf || !p_info)
+ return;
+
+ /*
+ * Release wakeup source when there are no more clients to
+ * process DCI data
+ */
+ if (driver->num_dci_client == 0) {
+ diag_ws_reset(DIAG_WS_DCI);
+ return;
+ }
+
+ diag_dci_record_traffic(recd_bytes, p_info->type, p_info->peripheral,
+ DCI_LOCAL_PROC);
+ while (read_bytes < recd_bytes) {
+ header = (struct diag_dci_pkt_header_t *)buf;
+ recv_pkt_cmd_code = header->pkt_code;
+ dci_pkt_len = header->len;
+
+ /*
+ * Check if the length of the current packet is lesser than the
+ * remaining bytes in the received buffer. This includes space
+ * for the Start byte (1), Version byte (1), length bytes (2)
+ * and End byte (1)
+ */
+ if ((dci_pkt_len + 5) > (recd_bytes - read_bytes)) {
+ pr_err("diag: Invalid length in %s, len: %d, dci_pkt_len: %d",
+ __func__, recd_bytes, dci_pkt_len);
+ diag_ws_release();
+ return;
+ }
+ /*
+ * Retrieve from the DCI control packet after the header = start
+ * (1 byte) + version (1 byte) + length (2 bytes)
+ */
+ err = diag_process_single_dci_pkt(buf + 4, dci_pkt_len,
+ (int)p_info->peripheral,
+ DCI_LOCAL_PROC);
+ if (err) {
+ diag_ws_release();
+ break;
+ }
+ read_bytes += 5 + dci_pkt_len;
+ buf += 5 + dci_pkt_len; /* advance to next DCI pkt */
+ }
+
+ if (err)
+ return;
+ /* wake up all sleeping DCI clients which have some data */
+ diag_dci_wakeup_clients();
+ dci_check_drain_timer();
+}
+
+int diag_dci_query_log_mask(struct diag_dci_client_tbl *entry,
+ uint16_t log_code)
+{
+ uint16_t item_num;
+ uint8_t equip_id, *log_mask_ptr, byte_mask;
+ int byte_index, offset;
+
+ if (!entry) {
+ pr_err("diag: In %s, invalid client entry\n", __func__);
+ return 0;
+ }
+
+ equip_id = LOG_GET_EQUIP_ID(log_code);
+ item_num = LOG_GET_ITEM_NUM(log_code);
+ byte_index = item_num/8 + 2;
+ byte_mask = 0x01 << (item_num % 8);
+ offset = equip_id * 514;
+
+ if (offset + byte_index > DCI_LOG_MASK_SIZE) {
+ pr_err("diag: In %s, invalid offset: %d, log_code: %d, byte_index: %d\n",
+ __func__, offset, log_code, byte_index);
+ return 0;
+ }
+
+ log_mask_ptr = entry->dci_log_mask;
+ log_mask_ptr = log_mask_ptr + offset + byte_index;
+ return ((*log_mask_ptr & byte_mask) == byte_mask) ? 1 : 0;
+
+}
+
+int diag_dci_query_event_mask(struct diag_dci_client_tbl *entry,
+ uint16_t event_id)
+{
+ uint8_t *event_mask_ptr, byte_mask;
+ int byte_index, bit_index;
+
+ if (!entry) {
+ pr_err("diag: In %s, invalid client entry\n", __func__);
+ return 0;
+ }
+
+ byte_index = event_id/8;
+ bit_index = event_id % 8;
+ byte_mask = 0x1 << bit_index;
+
+ if (byte_index > DCI_EVENT_MASK_SIZE) {
+ pr_err("diag: In %s, invalid, event_id: %d, byte_index: %d\n",
+ __func__, event_id, byte_index);
+ return 0;
+ }
+
+ event_mask_ptr = entry->dci_event_mask;
+ event_mask_ptr = event_mask_ptr + byte_index;
+ return ((*event_mask_ptr & byte_mask) == byte_mask) ? 1 : 0;
+}
+
+static int diag_dci_filter_commands(struct diag_pkt_header_t *header)
+{
+ if (!header)
+ return -ENOMEM;
+
+ switch (header->cmd_code) {
+ case 0x7d: /* Msg Mask Configuration */
+ case 0x73: /* Log Mask Configuration */
+ case 0x81: /* Event Mask Configuration */
+ case 0x82: /* Event Mask Change */
+ case 0x60: /* Event Mask Toggle */
+ return 1;
+ }
+
+ if (header->cmd_code == 0x4b && header->subsys_id == 0x12) {
+ switch (header->subsys_cmd_code) {
+ case 0x60: /* Extended Event Mask Config */
+ case 0x61: /* Extended Msg Mask Config */
+ case 0x62: /* Extended Log Mask Config */
+ case 0x20C: /* Set current Preset ID */
+ case 0x20D: /* Get current Preset ID */
+ case 0x218: /* HDLC Disabled Command */
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static struct dci_pkt_req_entry_t *diag_register_dci_transaction(int uid,
+ int client_id)
+{
+ struct dci_pkt_req_entry_t *entry = NULL;
+
+ entry = kzalloc(sizeof(struct dci_pkt_req_entry_t), GFP_KERNEL);
+ if (!entry)
+ return NULL;
+
+ driver->dci_tag++;
+ entry->client_id = client_id;
+ entry->uid = uid;
+ entry->tag = driver->dci_tag;
+ pr_debug("diag: Registering DCI cmd req, client_id: %d, uid: %d, tag:%d\n",
+ entry->client_id, entry->uid, entry->tag);
+ list_add_tail(&entry->track, &driver->dci_req_list);
+
+ return entry;
+}
+
+static struct dci_pkt_req_entry_t *diag_dci_get_request_entry(int tag)
+{
+ struct list_head *start, *temp;
+ struct dci_pkt_req_entry_t *entry = NULL;
+
+ list_for_each_safe(start, temp, &driver->dci_req_list) {
+ entry = list_entry(start, struct dci_pkt_req_entry_t, track);
+ if (entry->tag == tag)
+ return entry;
+ }
+ return NULL;
+}
+
+static int diag_dci_remove_req_entry(unsigned char *buf, int len,
+ struct dci_pkt_req_entry_t *entry)
+{
+ uint16_t rsp_count = 0, delayed_rsp_id = 0;
+
+ if (!buf || len <= 0 || !entry) {
+ pr_err("diag: In %s, invalid input buf: %pK, len: %d, entry: %pK\n",
+ __func__, buf, len, entry);
+ return -EIO;
+ }
+
+ /* It is an immediate response, delete it from the table */
+ if (*buf != 0x80) {
+ list_del(&entry->track);
+ kfree(entry);
+ return 1;
+ }
+
+ /* It is a delayed response. Check if the length is valid */
+ if (len < MIN_DELAYED_RSP_LEN) {
+ pr_err("diag: Invalid delayed rsp packet length %d\n", len);
+ return -EINVAL;
+ }
+
+ /*
+ * If the delayed response id field (uint16_t at byte 8) is 0 then
+ * there is only one response and we can remove the request entry.
+ */
+ delayed_rsp_id = *(uint16_t *)(buf + 8);
+ if (delayed_rsp_id == 0) {
+ list_del(&entry->track);
+ kfree(entry);
+ return 1;
+ }
+
+ /*
+ * Check the response count field (uint16 at byte 10). The request
+ * entry can be deleted it it is the last response in the sequence.
+ * It is the last response in the sequence if the response count
+ * is 1 or if the signed bit gets dropped.
+ */
+ rsp_count = *(uint16_t *)(buf + 10);
+ if (rsp_count > 0 && rsp_count < 0x1000) {
+ list_del(&entry->track);
+ kfree(entry);
+ return 1;
+ }
+
+ return 0;
+}
+
+static void dci_process_ctrl_status(unsigned char *buf, int len, int token)
+{
+ struct diag_ctrl_dci_status *header = NULL;
+ unsigned char *temp = buf;
+ uint32_t read_len = 0;
+ uint8_t i;
+ int peripheral_mask, status;
+
+ if (!buf || (len < sizeof(struct diag_ctrl_dci_status))) {
+ pr_err("diag: In %s, invalid buf %pK or length: %d\n",
+ __func__, buf, len);
+ return;
+ }
+
+ if (!VALID_DCI_TOKEN(token)) {
+ pr_err("diag: In %s, invalid DCI token %d\n", __func__, token);
+ return;
+ }
+
+ header = (struct diag_ctrl_dci_status *)temp;
+ temp += sizeof(struct diag_ctrl_dci_status);
+ read_len += sizeof(struct diag_ctrl_dci_status);
+
+ for (i = 0; i < header->count; i++) {
+ if (read_len > len) {
+ pr_err("diag: In %s, Invalid length len: %d\n",
+ __func__, len);
+ return;
+ }
+
+ switch (*(uint8_t *)temp) {
+ case PERIPHERAL_MODEM:
+ peripheral_mask = DIAG_CON_MPSS;
+ break;
+ case PERIPHERAL_LPASS:
+ peripheral_mask = DIAG_CON_LPASS;
+ break;
+ case PERIPHERAL_WCNSS:
+ peripheral_mask = DIAG_CON_WCNSS;
+ break;
+ case PERIPHERAL_SENSORS:
+ peripheral_mask = DIAG_CON_SENSORS;
+ break;
+ default:
+ pr_err("diag: In %s, unknown peripheral, peripheral: %d\n",
+ __func__, *(uint8_t *)temp);
+ return;
+ }
+ temp += sizeof(uint8_t);
+ read_len += sizeof(uint8_t);
+
+ status = (*(uint8_t *)temp) ? DIAG_STATUS_OPEN :
+ DIAG_STATUS_CLOSED;
+ temp += sizeof(uint8_t);
+ read_len += sizeof(uint8_t);
+ diag_dci_notify_client(peripheral_mask, status, token);
+ }
+}
+
+static void dci_process_ctrl_handshake_pkt(unsigned char *buf, int len,
+ int token)
+{
+ struct diag_ctrl_dci_handshake_pkt *header = NULL;
+ unsigned char *temp = buf;
+ int err = 0;
+
+ if (!buf || (len < sizeof(struct diag_ctrl_dci_handshake_pkt)))
+ return;
+
+ if (!VALID_DCI_TOKEN(token))
+ return;
+
+ header = (struct diag_ctrl_dci_handshake_pkt *)temp;
+ if (header->magic == DCI_MAGIC) {
+ dci_channel_status[token].open = 1;
+ err = dci_ops_tbl[token].send_log_mask(token);
+ if (err) {
+ pr_err("diag: In %s, unable to send log mask to token: %d, err: %d\n",
+ __func__, token, err);
+ }
+ err = dci_ops_tbl[token].send_event_mask(token);
+ if (err) {
+ pr_err("diag: In %s, unable to send event mask to token: %d, err: %d\n",
+ __func__, token, err);
+ }
+ }
+}
+
+void extract_dci_ctrl_pkt(unsigned char *buf, int len, int token)
+{
+ unsigned char *temp = buf;
+ uint32_t ctrl_pkt_id;
+
+ diag_ws_on_read(DIAG_WS_DCI, len);
+ if (!buf) {
+ pr_err("diag: Invalid buffer in %s\n", __func__);
+ goto err;
+ }
+
+ if (len < (sizeof(uint8_t) + sizeof(uint32_t))) {
+ pr_err("diag: In %s, invalid length %d\n", __func__, len);
+ goto err;
+ }
+
+ /* Skip the Control packet command code */
+ temp += sizeof(uint8_t);
+ len -= sizeof(uint8_t);
+ ctrl_pkt_id = *(uint32_t *)temp;
+ switch (ctrl_pkt_id) {
+ case DIAG_CTRL_MSG_DCI_CONNECTION_STATUS:
+ dci_process_ctrl_status(temp, len, token);
+ break;
+ case DIAG_CTRL_MSG_DCI_HANDSHAKE_PKT:
+ dci_process_ctrl_handshake_pkt(temp, len, token);
+ break;
+ default:
+ pr_debug("diag: In %s, unknown control pkt %d\n",
+ __func__, ctrl_pkt_id);
+ break;
+ }
+
+err:
+ /*
+ * DCI control packets are not consumed by the clients. Mimic client
+ * consumption by setting and clearing the wakeup source copy_count
+ * explicitly.
+ */
+ diag_ws_on_copy_fail(DIAG_WS_DCI);
+}
+
+void extract_dci_pkt_rsp(unsigned char *buf, int len, int data_source,
+ int token)
+{
+ int tag;
+ struct diag_dci_client_tbl *entry = NULL;
+ void *temp_buf = NULL;
+ uint8_t dci_cmd_code, cmd_code_len, delete_flag = 0;
+ uint32_t rsp_len = 0;
+ struct diag_dci_buffer_t *rsp_buf = NULL;
+ struct dci_pkt_req_entry_t *req_entry = NULL;
+ unsigned char *temp = buf;
+ int save_req_uid = 0;
+ struct diag_dci_pkt_rsp_header_t pkt_rsp_header;
+
+ if (!buf) {
+ pr_err("diag: Invalid pointer in %s\n", __func__);
+ return;
+ }
+ dci_cmd_code = *(uint8_t *)(temp);
+ if (dci_cmd_code == DCI_PKT_RSP_CODE) {
+ cmd_code_len = sizeof(uint8_t);
+ } else if (dci_cmd_code == DCI_DELAYED_RSP_CODE) {
+ cmd_code_len = sizeof(uint32_t);
+ } else {
+ pr_err("diag: In %s, invalid command code %d\n", __func__,
+ dci_cmd_code);
+ return;
+ }
+ temp += cmd_code_len;
+ tag = *(int *)temp;
+ temp += sizeof(int);
+
+ /*
+ * The size of the response is (total length) - (length of the command
+ * code, the tag (int)
+ */
+ rsp_len = len - (cmd_code_len + sizeof(int));
+ if ((rsp_len == 0) || (rsp_len > (len - 5))) {
+ pr_err("diag: Invalid length in %s, len: %d, rsp_len: %d",
+ __func__, len, rsp_len);
+ return;
+ }
+
+ mutex_lock(&driver->dci_mutex);
+ req_entry = diag_dci_get_request_entry(tag);
+ if (!req_entry) {
+ pr_err_ratelimited("diag: No matching client for DCI data\n");
+ mutex_unlock(&driver->dci_mutex);
+ return;
+ }
+
+ entry = diag_dci_get_client_entry(req_entry->client_id);
+ if (!entry) {
+ pr_err("diag: In %s, couldn't find client entry, id:%d\n",
+ __func__, req_entry->client_id);
+ mutex_unlock(&driver->dci_mutex);
+ return;
+ }
+
+ save_req_uid = req_entry->uid;
+ /* Remove the headers and send only the response to this function */
+ delete_flag = diag_dci_remove_req_entry(temp, rsp_len, req_entry);
+ if (delete_flag < 0) {
+ mutex_unlock(&driver->dci_mutex);
+ return;
+ }
+
+ mutex_lock(&entry->buffers[data_source].buf_mutex);
+ rsp_buf = entry->buffers[data_source].buf_cmd;
+
+ mutex_lock(&rsp_buf->data_mutex);
+ /*
+ * Check if we can fit the data in the rsp buffer. The total length of
+ * the rsp is the rsp length (write_len) + DCI_PKT_RSP_TYPE header (int)
+ * + field for length (int) + delete_flag (uint8_t)
+ */
+ if ((rsp_buf->data_len + 9 + rsp_len) > rsp_buf->capacity) {
+ pr_alert("diag: create capacity for pkt rsp\n");
+ rsp_buf->capacity += 9 + rsp_len;
+ temp_buf = krealloc(rsp_buf->data, rsp_buf->capacity,
+ GFP_KERNEL);
+ if (!temp_buf) {
+ pr_err("diag: DCI realloc failed\n");
+ mutex_unlock(&rsp_buf->data_mutex);
+ mutex_unlock(&entry->buffers[data_source].buf_mutex);
+ mutex_unlock(&driver->dci_mutex);
+ return;
+ }
+ rsp_buf->data = temp_buf;
+ }
+
+ /* Fill in packet response header information */
+ pkt_rsp_header.type = DCI_PKT_RSP_TYPE;
+ /* Packet Length = Response Length + Length of uid field (int) */
+ pkt_rsp_header.length = rsp_len + sizeof(int);
+ pkt_rsp_header.delete_flag = delete_flag;
+ pkt_rsp_header.uid = save_req_uid;
+ memcpy(rsp_buf->data + rsp_buf->data_len, &pkt_rsp_header,
+ sizeof(struct diag_dci_pkt_rsp_header_t));
+ rsp_buf->data_len += sizeof(struct diag_dci_pkt_rsp_header_t);
+ memcpy(rsp_buf->data + rsp_buf->data_len, temp, rsp_len);
+ rsp_buf->data_len += rsp_len;
+ rsp_buf->data_source = data_source;
+
+ mutex_unlock(&rsp_buf->data_mutex);
+
+ /*
+ * Add directly to the list for writing responses to the
+ * userspace as these shouldn't be buffered and shouldn't wait
+ * for log and event buffers to be full
+ */
+ dci_add_buffer_to_list(entry, rsp_buf);
+ mutex_unlock(&entry->buffers[data_source].buf_mutex);
+ mutex_unlock(&driver->dci_mutex);
+}
+
+static void copy_ext_hdr(struct diag_dci_buffer_t *data_buffer, void *ext_hdr)
+{
+ if (!data_buffer) {
+ pr_err("diag: In %s, data buffer is NULL", __func__);
+ return;
+ }
+
+ *(int *)(data_buffer->data + data_buffer->data_len) =
+ DCI_EXT_HDR_TYPE;
+ data_buffer->data_len += sizeof(int);
+ memcpy(data_buffer->data + data_buffer->data_len, ext_hdr,
+ EXT_HDR_LEN);
+ data_buffer->data_len += EXT_HDR_LEN;
+}
+
+static void copy_dci_event(unsigned char *buf, int len,
+ struct diag_dci_client_tbl *client, int data_source,
+ void *ext_hdr)
+{
+ struct diag_dci_buffer_t *data_buffer = NULL;
+ struct diag_dci_buf_peripheral_t *proc_buf = NULL;
+ int err = 0, total_len = 0;
+
+ if (!buf || !client) {
+ pr_err("diag: Invalid pointers in %s", __func__);
+ return;
+ }
+
+ total_len = sizeof(int) + len;
+ if (ext_hdr)
+ total_len += sizeof(int) + EXT_HDR_LEN;
+
+ proc_buf = &client->buffers[data_source];
+ mutex_lock(&proc_buf->buf_mutex);
+ mutex_lock(&proc_buf->health_mutex);
+ err = diag_dci_get_buffer(client, data_source, total_len);
+ if (err) {
+ if (err == -ENOMEM)
+ proc_buf->health.dropped_events++;
+ else
+ pr_err("diag: In %s, invalid packet\n", __func__);
+ mutex_unlock(&proc_buf->health_mutex);
+ mutex_unlock(&proc_buf->buf_mutex);
+ return;
+ }
+
+ data_buffer = proc_buf->buf_curr;
+
+ proc_buf->health.received_events++;
+ mutex_unlock(&proc_buf->health_mutex);
+ mutex_unlock(&proc_buf->buf_mutex);
+
+ mutex_lock(&data_buffer->data_mutex);
+ if (ext_hdr)
+ copy_ext_hdr(data_buffer, ext_hdr);
+
+ *(int *)(data_buffer->data + data_buffer->data_len) = DCI_EVENT_TYPE;
+ data_buffer->data_len += sizeof(int);
+ memcpy(data_buffer->data + data_buffer->data_len, buf, len);
+ data_buffer->data_len += len;
+ data_buffer->data_source = data_source;
+ mutex_unlock(&data_buffer->data_mutex);
+
+}
+
+void extract_dci_events(unsigned char *buf, int len, int data_source,
+ int token, void *ext_hdr)
+{
+ uint16_t event_id, event_id_packet, length, temp_len;
+ uint8_t payload_len, payload_len_field;
+ uint8_t timestamp[8] = {0}, timestamp_len;
+ unsigned char event_data[MAX_EVENT_SIZE];
+ unsigned int total_event_len;
+ struct list_head *start, *temp;
+ struct diag_dci_client_tbl *entry = NULL;
+
+ length = *(uint16_t *)(buf + 1); /* total length of event series */
+ if (length == 0) {
+ pr_err("diag: Incoming dci event length is invalid\n");
+ return;
+ }
+ /*
+ * Move directly to the start of the event series. 1 byte for
+ * event code and 2 bytes for the length field.
+ * The length field indicates the total length removing the cmd_code
+ * and the length field. The event parsing in that case should happen
+ * till the end.
+ */
+ temp_len = 3;
+ while (temp_len < length) {
+ event_id_packet = *(uint16_t *)(buf + temp_len);
+ event_id = event_id_packet & 0x0FFF; /* extract 12 bits */
+ if (event_id_packet & 0x8000) {
+ /* The packet has the two smallest byte of the
+ * timestamp
+ */
+ timestamp_len = 2;
+ } else {
+ /* The packet has the full timestamp. The first event
+ * will always have full timestamp. Save it in the
+ * timestamp buffer and use it for subsequent events if
+ * necessary.
+ */
+ timestamp_len = 8;
+ memcpy(timestamp, buf + temp_len + 2, timestamp_len);
+ }
+ /* 13th and 14th bit represent the payload length */
+ if (((event_id_packet & 0x6000) >> 13) == 3) {
+ payload_len_field = 1;
+ payload_len = *(uint8_t *)
+ (buf + temp_len + 2 + timestamp_len);
+ if (payload_len < (MAX_EVENT_SIZE - 13)) {
+ /* copy the payload length and the payload */
+ memcpy(event_data + 12, buf + temp_len + 2 +
+ timestamp_len, 1);
+ memcpy(event_data + 13, buf + temp_len + 2 +
+ timestamp_len + 1, payload_len);
+ } else {
+ pr_err("diag: event > %d, payload_len = %d\n",
+ (MAX_EVENT_SIZE - 13), payload_len);
+ return;
+ }
+ } else {
+ payload_len_field = 0;
+ payload_len = (event_id_packet & 0x6000) >> 13;
+ /* copy the payload */
+ memcpy(event_data + 12, buf + temp_len + 2 +
+ timestamp_len, payload_len);
+ }
+
+ /* Before copying the data to userspace, check if we are still
+ * within the buffer limit. This is an error case, don't count
+ * it towards the health statistics.
+ *
+ * Here, the offset of 2 bytes(uint16_t) is for the
+ * event_id_packet length
+ */
+ temp_len += sizeof(uint16_t) + timestamp_len +
+ payload_len_field + payload_len;
+ if (temp_len > len) {
+ pr_err("diag: Invalid length in %s, len: %d, read: %d",
+ __func__, len, temp_len);
+ return;
+ }
+
+ /* 2 bytes for the event id & timestamp len is hard coded to 8,
+ * as individual events have full timestamp.
+ */
+ *(uint16_t *)(event_data) = 10 +
+ payload_len_field + payload_len;
+ *(uint16_t *)(event_data + 2) = event_id_packet & 0x7FFF;
+ memcpy(event_data + 4, timestamp, 8);
+ /* 2 bytes for the event length field which is added to
+ * the event data.
+ */
+ total_event_len = 2 + 10 + payload_len_field + payload_len;
+ /* parse through event mask tbl of each client and check mask */
+ mutex_lock(&driver->dci_mutex);
+ list_for_each_safe(start, temp, &driver->dci_client_list) {
+ entry = list_entry(start, struct diag_dci_client_tbl,
+ track);
+ if (entry->client_info.token != token)
+ continue;
+ if (diag_dci_query_event_mask(entry, event_id)) {
+ /* copy to client buffer */
+ copy_dci_event(event_data, total_event_len,
+ entry, data_source, ext_hdr);
+ }
+ }
+ mutex_unlock(&driver->dci_mutex);
+ }
+}
+
+static void copy_dci_log(unsigned char *buf, int len,
+ struct diag_dci_client_tbl *client, int data_source,
+ void *ext_hdr)
+{
+ uint16_t log_length = 0;
+ struct diag_dci_buffer_t *data_buffer = NULL;
+ struct diag_dci_buf_peripheral_t *proc_buf = NULL;
+ int err = 0, total_len = 0;
+
+ if (!buf || !client) {
+ pr_err("diag: Invalid pointers in %s", __func__);
+ return;
+ }
+
+ log_length = *(uint16_t *)(buf + 2);
+ if (log_length > USHRT_MAX - 4) {
+ pr_err("diag: Integer overflow in %s, log_len: %d",
+ __func__, log_length);
+ return;
+ }
+ total_len = sizeof(int) + log_length;
+ if (ext_hdr)
+ total_len += sizeof(int) + EXT_HDR_LEN;
+
+ /* Check if we are within the len. The check should include the
+ * first 4 bytes for the Log code(2) and the length bytes (2)
+ */
+ if ((log_length + sizeof(uint16_t) + 2) > len) {
+ pr_err("diag: Invalid length in %s, log_len: %d, len: %d",
+ __func__, log_length, len);
+ return;
+ }
+
+ proc_buf = &client->buffers[data_source];
+ mutex_lock(&proc_buf->buf_mutex);
+ mutex_lock(&proc_buf->health_mutex);
+ err = diag_dci_get_buffer(client, data_source, total_len);
+ if (err) {
+ if (err == -ENOMEM)
+ proc_buf->health.dropped_logs++;
+ else
+ pr_err("diag: In %s, invalid packet\n", __func__);
+ mutex_unlock(&proc_buf->health_mutex);
+ mutex_unlock(&proc_buf->buf_mutex);
+ return;
+ }
+
+ data_buffer = proc_buf->buf_curr;
+ proc_buf->health.received_logs++;
+ mutex_unlock(&proc_buf->health_mutex);
+ mutex_unlock(&proc_buf->buf_mutex);
+
+ mutex_lock(&data_buffer->data_mutex);
+ if (!data_buffer->data) {
+ mutex_unlock(&data_buffer->data_mutex);
+ return;
+ }
+ if (ext_hdr)
+ copy_ext_hdr(data_buffer, ext_hdr);
+
+ *(int *)(data_buffer->data + data_buffer->data_len) = DCI_LOG_TYPE;
+ data_buffer->data_len += sizeof(int);
+ memcpy(data_buffer->data + data_buffer->data_len, buf + sizeof(int),
+ log_length);
+ data_buffer->data_len += log_length;
+ data_buffer->data_source = data_source;
+ mutex_unlock(&data_buffer->data_mutex);
+}
+
+void extract_dci_log(unsigned char *buf, int len, int data_source, int token,
+ void *ext_hdr)
+{
+ uint16_t log_code, read_bytes = 0;
+ struct list_head *start, *temp;
+ struct diag_dci_client_tbl *entry = NULL;
+
+ if (!buf) {
+ pr_err("diag: In %s buffer is NULL\n", __func__);
+ return;
+ }
+
+ /* The first six bytes for the incoming log packet contains
+ * Command code (2), the length of the packet (2) and the length
+ * of the log (2)
+ */
+ log_code = *(uint16_t *)(buf + 6);
+ read_bytes += sizeof(uint16_t) + 6;
+ if (read_bytes > len) {
+ pr_err("diag: Invalid length in %s, len: %d, read: %d",
+ __func__, len, read_bytes);
+ return;
+ }
+
+ /* parse through log mask table of each client and check mask */
+ mutex_lock(&driver->dci_mutex);
+ list_for_each_safe(start, temp, &driver->dci_client_list) {
+ entry = list_entry(start, struct diag_dci_client_tbl, track);
+ if (entry->client_info.token != token)
+ continue;
+ if (diag_dci_query_log_mask(entry, log_code)) {
+ pr_debug("\t log code %x needed by client %d",
+ log_code, entry->client->tgid);
+ /* copy to client buffer */
+ copy_dci_log(buf, len, entry, data_source, ext_hdr);
+ }
+ }
+ mutex_unlock(&driver->dci_mutex);
+}
+
+void extract_dci_ext_pkt(unsigned char *buf, int len, int data_source,
+ int token)
+{
+ uint8_t version, pkt_cmd_code = 0;
+ unsigned char *pkt = NULL;
+
+ if (!buf) {
+ pr_err("diag: In %s buffer is NULL\n", __func__);
+ return;
+ }
+
+ version = *(uint8_t *)buf + 1;
+ if (version < EXT_HDR_VERSION) {
+ pr_err("diag: %s, Extended header with invalid version: %d\n",
+ __func__, version);
+ return;
+ }
+
+ pkt = buf + EXT_HDR_LEN;
+ pkt_cmd_code = *(uint8_t *)pkt;
+ len -= EXT_HDR_LEN;
+ if (len < 0) {
+ pr_err("diag: %s, Invalid length len: %d\n", __func__, len);
+ return;
+ }
+
+ switch (pkt_cmd_code) {
+ case LOG_CMD_CODE:
+ extract_dci_log(pkt, len, data_source, token, buf);
+ break;
+ case EVENT_CMD_CODE:
+ extract_dci_events(pkt, len, data_source, token, buf);
+ break;
+ default:
+ pr_err("diag: %s unsupported cmd_code: %d, data_source: %d\n",
+ __func__, pkt_cmd_code, data_source);
+ return;
+ }
+}
+
+void diag_dci_channel_open_work(struct work_struct *work)
+{
+ int i, j;
+ char dirty_bits[16];
+ uint8_t *client_log_mask_ptr;
+ uint8_t *log_mask_ptr;
+ int ret;
+ struct list_head *start, *temp;
+ struct diag_dci_client_tbl *entry = NULL;
+
+ /* Update apps and peripheral(s) with the dci log and event masks */
+ memset(dirty_bits, 0, 16 * sizeof(uint8_t));
+
+ /*
+ * From each log entry used by each client, determine
+ * which log entries in the cumulative logs that need
+ * to be updated on the peripheral.
+ */
+ mutex_lock(&driver->dci_mutex);
+ list_for_each_safe(start, temp, &driver->dci_client_list) {
+ entry = list_entry(start, struct diag_dci_client_tbl, track);
+ if (entry->client_info.token != DCI_LOCAL_PROC)
+ continue;
+ client_log_mask_ptr = entry->dci_log_mask;
+ for (j = 0; j < 16; j++) {
+ if (*(client_log_mask_ptr+1))
+ dirty_bits[j] = 1;
+ client_log_mask_ptr += 514;
+ }
+ }
+ mutex_unlock(&driver->dci_mutex);
+
+ mutex_lock(&dci_log_mask_mutex);
+ /* Update the appropriate dirty bits in the cumulative mask */
+ log_mask_ptr = dci_ops_tbl[DCI_LOCAL_PROC].log_mask_composite;
+ for (i = 0; i < 16; i++) {
+ if (dirty_bits[i])
+ *(log_mask_ptr+1) = dirty_bits[i];
+
+ log_mask_ptr += 514;
+ }
+ mutex_unlock(&dci_log_mask_mutex);
+
+ /* Send updated mask to userspace clients */
+ diag_update_userspace_clients(DCI_LOG_MASKS_TYPE);
+ /* Send updated log mask to peripherals */
+ ret = dci_ops_tbl[DCI_LOCAL_PROC].send_log_mask(DCI_LOCAL_PROC);
+
+ /* Send updated event mask to userspace clients */
+ diag_update_userspace_clients(DCI_EVENT_MASKS_TYPE);
+ /* Send updated event mask to peripheral */
+ ret = dci_ops_tbl[DCI_LOCAL_PROC].send_event_mask(DCI_LOCAL_PROC);
+}
+
+void diag_dci_notify_client(int peripheral_mask, int data, int proc)
+{
+ int stat;
+ struct siginfo info;
+ struct list_head *start, *temp;
+ struct diag_dci_client_tbl *entry = NULL;
+
+ memset(&info, 0, sizeof(struct siginfo));
+ info.si_code = SI_QUEUE;
+ info.si_int = (peripheral_mask | data);
+ if (data == DIAG_STATUS_OPEN)
+ dci_ops_tbl[proc].peripheral_status |= peripheral_mask;
+ else
+ dci_ops_tbl[proc].peripheral_status &= ~peripheral_mask;
+
+ /* Notify the DCI process that the peripheral DCI Channel is up */
+ list_for_each_safe(start, temp, &driver->dci_client_list) {
+ entry = list_entry(start, struct diag_dci_client_tbl, track);
+ if (entry->client_info.token != proc)
+ continue;
+ if (entry->client_info.notification_list & peripheral_mask) {
+ info.si_signo = entry->client_info.signal_type;
+ if (entry->client &&
+ entry->tgid == entry->client->tgid) {
+ DIAG_LOG(DIAG_DEBUG_DCI,
+ "entry tgid = %d, dci client tgid = %d\n",
+ entry->tgid, entry->client->tgid);
+ stat = send_sig_info(
+ entry->client_info.signal_type,
+ &info, entry->client);
+ if (stat)
+ pr_err("diag: Err sending dci signal to client, signal data: 0x%x, stat: %d\n",
+ info.si_int, stat);
+ } else
+ pr_err("diag: client data is corrupted, signal data: 0x%x, stat: %d\n",
+ info.si_int, stat);
+ }
+ }
+}
+
+static int diag_send_dci_pkt(struct diag_cmd_reg_t *entry,
+ unsigned char *buf, int len, int tag)
+{
+ int i, status = DIAG_DCI_NO_ERROR;
+ uint32_t write_len = 0;
+ struct diag_dci_pkt_header_t header;
+
+ if (!entry)
+ return -EIO;
+
+ if (len < 1 || len > DIAG_MAX_REQ_SIZE) {
+ pr_err("diag: dci: In %s, invalid length %d, max_length: %d\n",
+ __func__, len, (int)(DCI_REQ_BUF_SIZE - sizeof(header)));
+ return -EIO;
+ }
+
+ if ((len + sizeof(header) + sizeof(uint8_t)) > DCI_BUF_SIZE) {
+ pr_err("diag: dci: In %s, invalid length %d for apps_dci_buf, max_length: %d\n",
+ __func__, len, DIAG_MAX_REQ_SIZE);
+ return -EIO;
+ }
+
+ mutex_lock(&driver->dci_mutex);
+ /* prepare DCI packet */
+ header.start = CONTROL_CHAR;
+ header.version = 1;
+ header.len = len + sizeof(int) + sizeof(uint8_t);
+ header.pkt_code = DCI_PKT_RSP_CODE;
+ header.tag = tag;
+ memcpy(driver->apps_dci_buf, &header, sizeof(header));
+ write_len += sizeof(header);
+ memcpy(driver->apps_dci_buf + write_len, buf, len);
+ write_len += len;
+ *(uint8_t *)(driver->apps_dci_buf + write_len) = CONTROL_CHAR;
+ write_len += sizeof(uint8_t);
+
+ /* This command is registered locally on the Apps */
+ if (entry->proc == APPS_DATA) {
+ diag_update_pkt_buffer(driver->apps_dci_buf, write_len,
+ DCI_PKT_TYPE);
+ diag_update_sleeping_process(entry->pid, DCI_PKT_TYPE);
+ mutex_unlock(&driver->dci_mutex);
+ return DIAG_DCI_NO_ERROR;
+ }
+
+ for (i = 0; i < NUM_PERIPHERALS; i++)
+ if (entry->proc == i) {
+ status = 1;
+ break;
+ }
+
+ if (status) {
+ status = diag_dci_write_proc(entry->proc,
+ DIAG_DATA_TYPE,
+ driver->apps_dci_buf,
+ write_len);
+ } else {
+ pr_err("diag: Cannot send packet to peripheral %d",
+ entry->proc);
+ status = DIAG_DCI_SEND_DATA_FAIL;
+ }
+ mutex_unlock(&driver->dci_mutex);
+ return status;
+}
+
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+unsigned char *dci_get_buffer_from_bridge(int token)
+{
+ uint8_t retries = 0, max_retries = 3;
+ unsigned char *buf = NULL;
+
+ do {
+ buf = diagmem_alloc(driver, DIAG_MDM_BUF_SIZE,
+ dci_ops_tbl[token].mempool);
+ if (!buf) {
+ usleep_range(5000, 5100);
+ retries++;
+ } else
+ break;
+ } while (retries < max_retries);
+
+ return buf;
+}
+
+int diag_dci_write_bridge(int token, unsigned char *buf, int len)
+{
+ return diagfwd_bridge_write(TOKEN_TO_BRIDGE(token), buf, len);
+}
+
+int diag_dci_write_done_bridge(int index, unsigned char *buf, int len)
+{
+ int token = BRIDGE_TO_TOKEN(index);
+
+ if (!VALID_DCI_TOKEN(token)) {
+ pr_err("diag: Invalid DCI token %d in %s\n", token, __func__);
+ return -EINVAL;
+ }
+ diagmem_free(driver, buf, dci_ops_tbl[token].mempool);
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+static int diag_send_dci_pkt_remote(unsigned char *data, int len, int tag,
+ int token)
+{
+ unsigned char *buf = NULL;
+ struct diag_dci_header_t dci_header;
+ int dci_header_size = sizeof(struct diag_dci_header_t);
+ int ret = DIAG_DCI_NO_ERROR;
+ uint32_t write_len = 0;
+
+ if (!data)
+ return -EIO;
+
+ buf = dci_get_buffer_from_bridge(token);
+ if (!buf) {
+ pr_err("diag: In %s, unable to get dci buffers to write data\n",
+ __func__);
+ return -EAGAIN;
+ }
+
+ dci_header.start = CONTROL_CHAR;
+ dci_header.version = 1;
+ /*
+ * The Length of the DCI packet = length of the command + tag (int) +
+ * the command code size (uint8_t)
+ */
+ dci_header.length = len + sizeof(int) + sizeof(uint8_t);
+ dci_header.cmd_code = DCI_PKT_RSP_CODE;
+
+ memcpy(buf + write_len, &dci_header, dci_header_size);
+ write_len += dci_header_size;
+ *(int *)(buf + write_len) = tag;
+ write_len += sizeof(int);
+ memcpy(buf + write_len, data, len);
+ write_len += len;
+ *(buf + write_len) = CONTROL_CHAR; /* End Terminator */
+ write_len += sizeof(uint8_t);
+
+ ret = diag_dci_write_bridge(token, buf, write_len);
+ if (ret) {
+ pr_err("diag: error writing dci pkt to remote proc, token: %d, err: %d\n",
+ token, ret);
+ diagmem_free(driver, buf, dci_ops_tbl[token].mempool);
+ } else {
+ ret = DIAG_DCI_NO_ERROR;
+ }
+
+ return ret;
+}
+#else
+static int diag_send_dci_pkt_remote(unsigned char *data, int len, int tag,
+ int token)
+{
+ return DIAG_DCI_NO_ERROR;
+}
+#endif
+
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+int diag_dci_send_handshake_pkt(int index)
+{
+ int err = 0;
+ int token = BRIDGE_TO_TOKEN(index);
+ int write_len = 0;
+ struct diag_ctrl_dci_handshake_pkt ctrl_pkt;
+ unsigned char *buf = NULL;
+ struct diag_dci_header_t dci_header;
+
+ if (!VALID_DCI_TOKEN(token)) {
+ pr_err("diag: In %s, invalid DCI token %d\n", __func__, token);
+ return -EINVAL;
+ }
+
+ buf = dci_get_buffer_from_bridge(token);
+ if (!buf) {
+ pr_err("diag: In %s, unable to get dci buffers to write data\n",
+ __func__);
+ return -EAGAIN;
+ }
+
+ dci_header.start = CONTROL_CHAR;
+ dci_header.version = 1;
+ /* Include the cmd code (uint8_t) in the length */
+ dci_header.length = sizeof(ctrl_pkt) + sizeof(uint8_t);
+ dci_header.cmd_code = DCI_CONTROL_PKT_CODE;
+ memcpy(buf, &dci_header, sizeof(dci_header));
+ write_len += sizeof(dci_header);
+
+ ctrl_pkt.ctrl_pkt_id = DIAG_CTRL_MSG_DCI_HANDSHAKE_PKT;
+ /*
+ * The control packet data length accounts for the version (uint32_t)
+ * of the packet and the magic number (uint32_t).
+ */
+ ctrl_pkt.ctrl_pkt_data_len = 2 * sizeof(uint32_t);
+ ctrl_pkt.version = 1;
+ ctrl_pkt.magic = DCI_MAGIC;
+ memcpy(buf + write_len, &ctrl_pkt, sizeof(ctrl_pkt));
+ write_len += sizeof(ctrl_pkt);
+
+ *(uint8_t *)(buf + write_len) = CONTROL_CHAR;
+ write_len += sizeof(uint8_t);
+
+ err = diag_dci_write_bridge(token, buf, write_len);
+ if (err) {
+ pr_err("diag: error writing ack packet to remote proc, token: %d, err: %d\n",
+ token, err);
+ diagmem_free(driver, buf, dci_ops_tbl[token].mempool);
+ return err;
+ }
+
+ mod_timer(&(dci_channel_status[token].wait_time),
+ jiffies + msecs_to_jiffies(DCI_HANDSHAKE_WAIT_TIME));
+
+ return 0;
+}
+#else
+int diag_dci_send_handshake_pkt(int index)
+{
+ return 0;
+}
+#endif
+
+static int diag_dci_process_apps_pkt(struct diag_pkt_header_t *pkt_header,
+ unsigned char *req_buf, int req_len,
+ int tag)
+{
+ uint8_t cmd_code, subsys_id, i, goto_download = 0;
+ uint8_t header_len = sizeof(struct diag_dci_pkt_header_t);
+ uint16_t ss_cmd_code;
+ uint32_t write_len = 0;
+ unsigned char *dest_buf = driver->apps_dci_buf;
+ unsigned char *payload_ptr = driver->apps_dci_buf + header_len;
+ struct diag_dci_pkt_header_t dci_header;
+
+ if (!pkt_header || !req_buf || req_len <= 0 || tag < 0)
+ return -EIO;
+
+ cmd_code = pkt_header->cmd_code;
+ subsys_id = pkt_header->subsys_id;
+ ss_cmd_code = pkt_header->subsys_cmd_code;
+
+ if (cmd_code == DIAG_CMD_DOWNLOAD) {
+ *payload_ptr = DIAG_CMD_DOWNLOAD;
+ write_len = sizeof(uint8_t);
+ goto_download = 1;
+ goto fill_buffer;
+ } else if (cmd_code == DIAG_CMD_VERSION) {
+ if (chk_polling_response()) {
+ for (i = 0; i < 55; i++, write_len++, payload_ptr++)
+ *(payload_ptr) = 0;
+ goto fill_buffer;
+ }
+ } else if (cmd_code == DIAG_CMD_EXT_BUILD) {
+ if (chk_polling_response()) {
+ *payload_ptr = DIAG_CMD_EXT_BUILD;
+ write_len = sizeof(uint8_t);
+ payload_ptr += sizeof(uint8_t);
+ for (i = 0; i < 8; i++, write_len++, payload_ptr++)
+ *(payload_ptr) = 0;
+ *(int *)(payload_ptr) = chk_config_get_id();
+ write_len += sizeof(int);
+ goto fill_buffer;
+ }
+ } else if (cmd_code == DIAG_CMD_LOG_ON_DMND) {
+ write_len = diag_cmd_log_on_demand(req_buf, req_len,
+ payload_ptr,
+ APPS_BUF_SIZE - header_len);
+ goto fill_buffer;
+ } else if (cmd_code != DIAG_CMD_DIAG_SUBSYS) {
+ return DIAG_DCI_TABLE_ERR;
+ }
+
+ if (subsys_id == DIAG_SS_DIAG) {
+ if (ss_cmd_code == DIAG_DIAG_MAX_PKT_SZ) {
+ memcpy(payload_ptr, pkt_header,
+ sizeof(struct diag_pkt_header_t));
+ write_len = sizeof(struct diag_pkt_header_t);
+ *(uint32_t *)(payload_ptr + write_len) =
+ DIAG_MAX_REQ_SIZE;
+ write_len += sizeof(uint32_t);
+ } else if (ss_cmd_code == DIAG_DIAG_STM) {
+ write_len = diag_process_stm_cmd(req_buf, payload_ptr);
+ }
+ } else if (subsys_id == DIAG_SS_PARAMS) {
+ if (ss_cmd_code == DIAG_DIAG_POLL) {
+ if (chk_polling_response()) {
+ memcpy(payload_ptr, pkt_header,
+ sizeof(struct diag_pkt_header_t));
+ write_len = sizeof(struct diag_pkt_header_t);
+ payload_ptr += write_len;
+ for (i = 0; i < 12; i++, write_len++) {
+ *(payload_ptr) = 0;
+ payload_ptr++;
+ }
+ }
+ } else if (ss_cmd_code == DIAG_DEL_RSP_WRAP) {
+ memcpy(payload_ptr, pkt_header,
+ sizeof(struct diag_pkt_header_t));
+ write_len = sizeof(struct diag_pkt_header_t);
+ *(int *)(payload_ptr + write_len) = wrap_enabled;
+ write_len += sizeof(int);
+ } else if (ss_cmd_code == DIAG_DEL_RSP_WRAP_CNT) {
+ wrap_enabled = true;
+ memcpy(payload_ptr, pkt_header,
+ sizeof(struct diag_pkt_header_t));
+ write_len = sizeof(struct diag_pkt_header_t);
+ *(uint16_t *)(payload_ptr + write_len) = wrap_count;
+ write_len += sizeof(uint16_t);
+ } else if (ss_cmd_code == DIAG_EXT_MOBILE_ID) {
+ write_len = diag_cmd_get_mobile_id(req_buf, req_len,
+ payload_ptr,
+ APPS_BUF_SIZE - header_len);
+ }
+ }
+
+fill_buffer:
+ if (write_len > 0) {
+ /* Check if we are within the range of the buffer*/
+ if (write_len + header_len > DIAG_MAX_REQ_SIZE) {
+ pr_err("diag: In %s, invalid length %d\n", __func__,
+ write_len + header_len);
+ return -ENOMEM;
+ }
+ dci_header.start = CONTROL_CHAR;
+ dci_header.version = 1;
+ /*
+ * Length of the rsp pkt = actual data len + pkt rsp code
+ * (uint8_t) + tag (int)
+ */
+ dci_header.len = write_len + sizeof(uint8_t) + sizeof(int);
+ dci_header.pkt_code = DCI_PKT_RSP_CODE;
+ dci_header.tag = tag;
+ driver->in_busy_dcipktdata = 1;
+ memcpy(dest_buf, &dci_header, header_len);
+ diag_process_apps_dci_read_data(DCI_PKT_TYPE, dest_buf + 4,
+ dci_header.len);
+ driver->in_busy_dcipktdata = 0;
+
+ if (goto_download) {
+ /*
+ * Sleep for sometime so that the response reaches the
+ * client. The value 5000 empirically as an optimum
+ * time for the response to reach the client.
+ */
+ usleep_range(5000, 5100);
+ /* call download API */
+ msm_set_restart_mode(RESTART_DLOAD);
+ pr_alert("diag: download mode set, Rebooting SoC..\n");
+ kernel_restart(NULL);
+ }
+ return DIAG_DCI_NO_ERROR;
+ }
+
+ return DIAG_DCI_TABLE_ERR;
+}
+
+static int diag_process_dci_pkt_rsp(unsigned char *buf, int len)
+{
+ int ret = DIAG_DCI_TABLE_ERR;
+ int common_cmd = 0;
+ struct diag_pkt_header_t *header = NULL;
+ unsigned char *temp = buf;
+ unsigned char *req_buf = NULL;
+ uint8_t retry_count = 0, max_retries = 3;
+ uint32_t read_len = 0, req_len = len;
+ struct dci_pkt_req_entry_t *req_entry = NULL;
+ struct diag_dci_client_tbl *dci_entry = NULL;
+ struct dci_pkt_req_t req_hdr;
+ struct diag_cmd_reg_t *reg_item;
+ struct diag_cmd_reg_entry_t reg_entry;
+ struct diag_cmd_reg_entry_t *temp_entry;
+
+ if (!buf)
+ return -EIO;
+
+ if (len <= sizeof(struct dci_pkt_req_t) || len > DCI_REQ_BUF_SIZE) {
+ pr_err("diag: dci: Invalid length %d len in %s", len, __func__);
+ return -EIO;
+ }
+
+ req_hdr = *(struct dci_pkt_req_t *)temp;
+ temp += sizeof(struct dci_pkt_req_t);
+ read_len += sizeof(struct dci_pkt_req_t);
+ req_len -= sizeof(struct dci_pkt_req_t);
+ req_buf = temp; /* Start of the Request */
+ header = (struct diag_pkt_header_t *)temp;
+ temp += sizeof(struct diag_pkt_header_t);
+ read_len += sizeof(struct diag_pkt_header_t);
+ if (read_len >= DCI_REQ_BUF_SIZE) {
+ pr_err("diag: dci: In %s, invalid read_len: %d\n", __func__,
+ read_len);
+ return -EIO;
+ }
+
+ mutex_lock(&driver->dci_mutex);
+ dci_entry = diag_dci_get_client_entry(req_hdr.client_id);
+ if (!dci_entry) {
+ pr_err("diag: Invalid client %d in %s\n",
+ req_hdr.client_id, __func__);
+ mutex_unlock(&driver->dci_mutex);
+ return DIAG_DCI_NO_REG;
+ }
+
+ /* Check if the command is allowed on DCI */
+ if (diag_dci_filter_commands(header)) {
+ pr_debug("diag: command not supported %d %d %d",
+ header->cmd_code, header->subsys_id,
+ header->subsys_cmd_code);
+ mutex_unlock(&driver->dci_mutex);
+ return DIAG_DCI_SEND_DATA_FAIL;
+ }
+
+ common_cmd = diag_check_common_cmd(header);
+ if (common_cmd < 0) {
+ pr_debug("diag: error in checking common command, %d\n",
+ common_cmd);
+ mutex_unlock(&driver->dci_mutex);
+ return DIAG_DCI_SEND_DATA_FAIL;
+ }
+
+ /*
+ * Previous packet is yet to be consumed by the client. Wait
+ * till the buffer is free.
+ */
+ while (retry_count < max_retries) {
+ retry_count++;
+ if (driver->in_busy_dcipktdata)
+ usleep_range(10000, 10100);
+ else
+ break;
+ }
+ /* The buffer is still busy */
+ if (driver->in_busy_dcipktdata) {
+ pr_err("diag: In %s, apps dci buffer is still busy. Dropping packet\n",
+ __func__);
+ mutex_unlock(&driver->dci_mutex);
+ return -EAGAIN;
+ }
+
+ /* Register this new DCI packet */
+ req_entry = diag_register_dci_transaction(req_hdr.uid,
+ req_hdr.client_id);
+ if (!req_entry) {
+ pr_alert("diag: registering new DCI transaction failed\n");
+ mutex_unlock(&driver->dci_mutex);
+ return DIAG_DCI_NO_REG;
+ }
+ mutex_unlock(&driver->dci_mutex);
+
+ /*
+ * If the client has registered for remote data, route the packet to the
+ * remote processor
+ */
+ if (dci_entry->client_info.token > 0) {
+ ret = diag_send_dci_pkt_remote(req_buf, req_len, req_entry->tag,
+ dci_entry->client_info.token);
+ return ret;
+ }
+
+ /* Check if it is a dedicated Apps command */
+ ret = diag_dci_process_apps_pkt(header, req_buf, req_len,
+ req_entry->tag);
+ if ((ret == DIAG_DCI_NO_ERROR && !common_cmd) || ret < 0)
+ return ret;
+
+ reg_entry.cmd_code = header->cmd_code;
+ reg_entry.subsys_id = header->subsys_id;
+ reg_entry.cmd_code_hi = header->subsys_cmd_code;
+ reg_entry.cmd_code_lo = header->subsys_cmd_code;
+
+ temp_entry = diag_cmd_search(®_entry, ALL_PROC);
+ if (temp_entry) {
+ reg_item = container_of(temp_entry, struct diag_cmd_reg_t,
+ entry);
+ ret = diag_send_dci_pkt(reg_item, req_buf, req_len,
+ req_entry->tag);
+ } else {
+ DIAG_LOG(DIAG_DEBUG_DCI, "Command not found: %02x %02x %02x\n",
+ reg_entry.cmd_code, reg_entry.subsys_id,
+ reg_entry.cmd_code_hi);
+ }
+
+ return ret;
+}
+
+int diag_process_dci_transaction(unsigned char *buf, int len)
+{
+ unsigned char *temp = buf;
+ uint16_t log_code, item_num;
+ int ret = -1, found = 0, client_id = 0, client_token = 0;
+ int count, set_mask, num_codes, bit_index, event_id, offset = 0;
+ unsigned int byte_index, read_len = 0;
+ uint8_t equip_id, *log_mask_ptr, *head_log_mask_ptr, byte_mask;
+ uint8_t *event_mask_ptr;
+ struct diag_dci_client_tbl *dci_entry = NULL;
+
+ if (!temp) {
+ pr_err("diag: Invalid buffer in %s\n", __func__);
+ return -ENOMEM;
+ }
+
+ /* This is Pkt request/response transaction */
+ if (*(int *)temp > 0) {
+ return diag_process_dci_pkt_rsp(buf, len);
+ } else if (*(int *)temp == DCI_LOG_TYPE) {
+ /* Minimum length of a log mask config is 12 + 2 bytes for
+ * atleast one log code to be set or reset.
+ */
+ if (len < DCI_LOG_CON_MIN_LEN || len > USER_SPACE_DATA) {
+ pr_err("diag: dci: Invalid length in %s\n", __func__);
+ return -EIO;
+ }
+
+ /* Extract each log code and put in client table */
+ temp += sizeof(int);
+ read_len += sizeof(int);
+ client_id = *(int *)temp;
+ temp += sizeof(int);
+ read_len += sizeof(int);
+ set_mask = *(int *)temp;
+ temp += sizeof(int);
+ read_len += sizeof(int);
+ num_codes = *(int *)temp;
+ temp += sizeof(int);
+ read_len += sizeof(int);
+
+ /* Find client table entry */
+ mutex_lock(&driver->dci_mutex);
+ dci_entry = diag_dci_get_client_entry(client_id);
+ if (!dci_entry) {
+ pr_err("diag: In %s, invalid client\n", __func__);
+ mutex_unlock(&driver->dci_mutex);
+ return ret;
+ }
+ client_token = dci_entry->client_info.token;
+
+ if (num_codes == 0 || (num_codes >= (USER_SPACE_DATA - 8)/2)) {
+ pr_err("diag: dci: Invalid number of log codes %d\n",
+ num_codes);
+ mutex_unlock(&driver->dci_mutex);
+ return -EIO;
+ }
+
+ head_log_mask_ptr = dci_entry->dci_log_mask;
+ if (!head_log_mask_ptr) {
+ pr_err("diag: dci: Invalid Log mask pointer in %s\n",
+ __func__);
+ mutex_unlock(&driver->dci_mutex);
+ return -ENOMEM;
+ }
+ pr_debug("diag: head of dci log mask %pK\n", head_log_mask_ptr);
+ count = 0; /* iterator for extracting log codes */
+
+ while (count < num_codes) {
+ if (read_len >= USER_SPACE_DATA) {
+ pr_err("diag: dci: Invalid length for log type in %s",
+ __func__);
+ mutex_unlock(&driver->dci_mutex);
+ return -EIO;
+ }
+ log_code = *(uint16_t *)temp;
+ equip_id = LOG_GET_EQUIP_ID(log_code);
+ item_num = LOG_GET_ITEM_NUM(log_code);
+ byte_index = item_num/8 + 2;
+ if (byte_index >= (DCI_MAX_ITEMS_PER_LOG_CODE+2)) {
+ pr_err("diag: dci: Log type, invalid byte index\n");
+ mutex_unlock(&driver->dci_mutex);
+ return ret;
+ }
+ byte_mask = 0x01 << (item_num % 8);
+ /*
+ * Parse through log mask table and find
+ * relevant range
+ */
+ log_mask_ptr = head_log_mask_ptr;
+ found = 0;
+ offset = 0;
+ while (log_mask_ptr && (offset < DCI_LOG_MASK_SIZE)) {
+ if (*log_mask_ptr == equip_id) {
+ found = 1;
+ pr_debug("diag: find equip id = %x at %pK\n",
+ equip_id, log_mask_ptr);
+ break;
+ }
+ pr_debug("diag: did not find equip id = %x at %d\n",
+ equip_id, *log_mask_ptr);
+ log_mask_ptr += 514;
+ offset += 514;
+ }
+ if (!found) {
+ pr_err("diag: dci equip id not found\n");
+ mutex_unlock(&driver->dci_mutex);
+ return ret;
+ }
+ *(log_mask_ptr+1) = 1; /* set the dirty byte */
+ log_mask_ptr = log_mask_ptr + byte_index;
+ if (set_mask)
+ *log_mask_ptr |= byte_mask;
+ else
+ *log_mask_ptr &= ~byte_mask;
+ /* add to cumulative mask */
+ update_dci_cumulative_log_mask(
+ offset, byte_index,
+ byte_mask, client_token);
+ temp += 2;
+ read_len += 2;
+ count++;
+ ret = DIAG_DCI_NO_ERROR;
+ }
+ /* send updated mask to userspace clients */
+ if (client_token == DCI_LOCAL_PROC)
+ diag_update_userspace_clients(DCI_LOG_MASKS_TYPE);
+ /* send updated mask to peripherals */
+ ret = dci_ops_tbl[client_token].send_log_mask(client_token);
+ mutex_unlock(&driver->dci_mutex);
+ } else if (*(int *)temp == DCI_EVENT_TYPE) {
+ /* Minimum length of a event mask config is 12 + 4 bytes for
+ * atleast one event id to be set or reset.
+ */
+ if (len < DCI_EVENT_CON_MIN_LEN || len > USER_SPACE_DATA) {
+ pr_err("diag: dci: Invalid length in %s\n", __func__);
+ return -EIO;
+ }
+
+ /* Extract each event id and put in client table */
+ temp += sizeof(int);
+ read_len += sizeof(int);
+ client_id = *(int *)temp;
+ temp += sizeof(int);
+ read_len += sizeof(int);
+ set_mask = *(int *)temp;
+ temp += sizeof(int);
+ read_len += sizeof(int);
+ num_codes = *(int *)temp;
+ temp += sizeof(int);
+ read_len += sizeof(int);
+
+ /* find client table entry */
+ mutex_lock(&driver->dci_mutex);
+ dci_entry = diag_dci_get_client_entry(client_id);
+ if (!dci_entry) {
+ pr_err("diag: In %s, invalid client\n", __func__);
+ mutex_unlock(&driver->dci_mutex);
+ return ret;
+ }
+ client_token = dci_entry->client_info.token;
+
+ /* Check for positive number of event ids. Also, the number of
+ * event ids should fit in the buffer along with set_mask and
+ * num_codes which are 4 bytes each.
+ */
+ if (num_codes == 0 || (num_codes >= (USER_SPACE_DATA - 8)/2)) {
+ pr_err("diag: dci: Invalid number of event ids %d\n",
+ num_codes);
+ mutex_unlock(&driver->dci_mutex);
+ return -EIO;
+ }
+
+ event_mask_ptr = dci_entry->dci_event_mask;
+ if (!event_mask_ptr) {
+ pr_err("diag: dci: Invalid event mask pointer in %s\n",
+ __func__);
+ mutex_unlock(&driver->dci_mutex);
+ return -ENOMEM;
+ }
+ pr_debug("diag: head of dci event mask %pK\n", event_mask_ptr);
+ count = 0; /* iterator for extracting log codes */
+ while (count < num_codes) {
+ if (read_len >= USER_SPACE_DATA) {
+ pr_err("diag: dci: Invalid length for event type in %s",
+ __func__);
+ mutex_unlock(&driver->dci_mutex);
+ return -EIO;
+ }
+ event_id = *(int *)temp;
+ byte_index = event_id/8;
+ if (byte_index >= DCI_EVENT_MASK_SIZE) {
+ pr_err("diag: dci: Event type, invalid byte index\n");
+ mutex_unlock(&driver->dci_mutex);
+ return ret;
+ }
+ bit_index = event_id % 8;
+ byte_mask = 0x1 << bit_index;
+ /*
+ * Parse through event mask table and set
+ * relevant byte & bit combination
+ */
+ if (set_mask)
+ *(event_mask_ptr + byte_index) |= byte_mask;
+ else
+ *(event_mask_ptr + byte_index) &= ~byte_mask;
+ /* add to cumulative mask */
+ update_dci_cumulative_event_mask(byte_index, byte_mask,
+ client_token);
+ temp += sizeof(int);
+ read_len += sizeof(int);
+ count++;
+ ret = DIAG_DCI_NO_ERROR;
+ }
+ /* send updated mask to userspace clients */
+ if (dci_entry->client_info.token == DCI_LOCAL_PROC)
+ diag_update_userspace_clients(DCI_EVENT_MASKS_TYPE);
+ /* send updated mask to peripherals */
+ ret = dci_ops_tbl[client_token].send_event_mask(client_token);
+ mutex_unlock(&driver->dci_mutex);
+ } else {
+ pr_alert("diag: Incorrect DCI transaction\n");
+ }
+ return ret;
+}
+
+
+struct diag_dci_client_tbl *diag_dci_get_client_entry(int client_id)
+{
+ struct list_head *start, *temp;
+ struct diag_dci_client_tbl *entry = NULL;
+
+ list_for_each_safe(start, temp, &driver->dci_client_list) {
+ entry = list_entry(start, struct diag_dci_client_tbl, track);
+ if (entry->client_info.client_id == client_id)
+ return entry;
+ }
+ return NULL;
+}
+
+struct diag_dci_client_tbl *dci_lookup_client_entry_pid(int tgid)
+{
+ struct list_head *start, *temp;
+ struct diag_dci_client_tbl *entry = NULL;
+
+ list_for_each_safe(start, temp, &driver->dci_client_list) {
+ entry = list_entry(start, struct diag_dci_client_tbl, track);
+ if (entry->client->tgid == tgid)
+ return entry;
+ }
+ return NULL;
+}
+
+void update_dci_cumulative_event_mask(int offset, uint8_t byte_mask, int token)
+{
+ uint8_t *event_mask_ptr, *update_ptr = NULL;
+ struct list_head *start, *temp;
+ struct diag_dci_client_tbl *entry = NULL;
+ bool is_set = false;
+
+ mutex_lock(&dci_event_mask_mutex);
+ update_ptr = dci_ops_tbl[token].event_mask_composite;
+ if (!update_ptr) {
+ mutex_unlock(&dci_event_mask_mutex);
+ return;
+ }
+ update_ptr += offset;
+ list_for_each_safe(start, temp, &driver->dci_client_list) {
+ entry = list_entry(start, struct diag_dci_client_tbl, track);
+ if (entry->client_info.token != token)
+ continue;
+ event_mask_ptr = entry->dci_event_mask;
+ event_mask_ptr += offset;
+ if ((*event_mask_ptr & byte_mask) == byte_mask) {
+ is_set = true;
+ /* break even if one client has the event mask set */
+ break;
+ }
+ }
+ if (is_set == false)
+ *update_ptr &= ~byte_mask;
+ else
+ *update_ptr |= byte_mask;
+ mutex_unlock(&dci_event_mask_mutex);
+}
+
+void diag_dci_invalidate_cumulative_event_mask(int token)
+{
+ int i = 0;
+ struct list_head *start, *temp;
+ struct diag_dci_client_tbl *entry = NULL;
+ uint8_t *event_mask_ptr, *update_ptr = NULL;
+
+ mutex_lock(&dci_event_mask_mutex);
+ update_ptr = dci_ops_tbl[token].event_mask_composite;
+ if (!update_ptr) {
+ mutex_unlock(&dci_event_mask_mutex);
+ return;
+ }
+
+ create_dci_event_mask_tbl(update_ptr);
+ list_for_each_safe(start, temp, &driver->dci_client_list) {
+ entry = list_entry(start, struct diag_dci_client_tbl, track);
+ if (entry->client_info.token != token)
+ continue;
+ event_mask_ptr = entry->dci_event_mask;
+ for (i = 0; i < DCI_EVENT_MASK_SIZE; i++)
+ *(update_ptr+i) |= *(event_mask_ptr+i);
+ }
+ mutex_unlock(&dci_event_mask_mutex);
+}
+
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+int diag_send_dci_event_mask_remote(int token)
+{
+ unsigned char *buf = NULL;
+ struct diag_dci_header_t dci_header;
+ struct diag_ctrl_event_mask event_mask;
+ int dci_header_size = sizeof(struct diag_dci_header_t);
+ int event_header_size = sizeof(struct diag_ctrl_event_mask);
+ int i, ret = DIAG_DCI_NO_ERROR, err = DIAG_DCI_NO_ERROR;
+ unsigned char *event_mask_ptr = NULL;
+ uint32_t write_len = 0;
+
+ mutex_lock(&dci_event_mask_mutex);
+ event_mask_ptr = dci_ops_tbl[token].event_mask_composite;
+ if (!event_mask_ptr) {
+ mutex_unlock(&dci_event_mask_mutex);
+ return -EINVAL;
+ }
+ buf = dci_get_buffer_from_bridge(token);
+ if (!buf) {
+ pr_err("diag: In %s, unable to get dci buffers to write data\n",
+ __func__);
+ mutex_unlock(&dci_event_mask_mutex);
+ return -EAGAIN;
+ }
+
+ /* Frame the DCI header */
+ dci_header.start = CONTROL_CHAR;
+ dci_header.version = 1;
+ dci_header.length = event_header_size + DCI_EVENT_MASK_SIZE + 1;
+ dci_header.cmd_code = DCI_CONTROL_PKT_CODE;
+
+ event_mask.cmd_type = DIAG_CTRL_MSG_EVENT_MASK;
+ event_mask.data_len = EVENT_MASK_CTRL_HEADER_LEN + DCI_EVENT_MASK_SIZE;
+ event_mask.stream_id = DCI_MASK_STREAM;
+ event_mask.status = DIAG_CTRL_MASK_VALID;
+ event_mask.event_config = 0; /* event config */
+ event_mask.event_mask_size = DCI_EVENT_MASK_SIZE;
+ for (i = 0; i < DCI_EVENT_MASK_SIZE; i++) {
+ if (event_mask_ptr[i] != 0) {
+ event_mask.event_config = 1;
+ break;
+ }
+ }
+ memcpy(buf + write_len, &dci_header, dci_header_size);
+ write_len += dci_header_size;
+ memcpy(buf + write_len, &event_mask, event_header_size);
+ write_len += event_header_size;
+ memcpy(buf + write_len, event_mask_ptr, DCI_EVENT_MASK_SIZE);
+ write_len += DCI_EVENT_MASK_SIZE;
+ *(buf + write_len) = CONTROL_CHAR; /* End Terminator */
+ write_len += sizeof(uint8_t);
+ err = diag_dci_write_bridge(token, buf, write_len);
+ if (err) {
+ pr_err("diag: error writing event mask to remote proc, token: %d, err: %d\n",
+ token, err);
+ diagmem_free(driver, buf, dci_ops_tbl[token].mempool);
+ ret = err;
+ } else {
+ ret = DIAG_DCI_NO_ERROR;
+ }
+ mutex_unlock(&dci_event_mask_mutex);
+ return ret;
+}
+#endif
+
+int diag_send_dci_event_mask(int token)
+{
+ void *buf = event_mask.update_buf;
+ struct diag_ctrl_event_mask header;
+ int header_size = sizeof(struct diag_ctrl_event_mask);
+ int ret = DIAG_DCI_NO_ERROR, err = DIAG_DCI_NO_ERROR, i;
+ unsigned char *event_mask_ptr = NULL;
+
+ mutex_lock(&dci_event_mask_mutex);
+ event_mask_ptr = dci_ops_tbl[DCI_LOCAL_PROC].event_mask_composite;
+ if (!event_mask_ptr) {
+ mutex_unlock(&dci_event_mask_mutex);
+ return -EINVAL;
+ }
+
+ mutex_lock(&event_mask.lock);
+ /* send event mask update */
+ header.cmd_type = DIAG_CTRL_MSG_EVENT_MASK;
+ header.data_len = EVENT_MASK_CTRL_HEADER_LEN + DCI_EVENT_MASK_SIZE;
+ header.stream_id = DCI_MASK_STREAM;
+ header.status = DIAG_CTRL_MASK_VALID;
+ header.event_config = 0; /* event config */
+ header.event_mask_size = DCI_EVENT_MASK_SIZE;
+ for (i = 0; i < DCI_EVENT_MASK_SIZE; i++) {
+ if (event_mask_ptr[i] != 0) {
+ header.event_config = 1;
+ break;
+ }
+ }
+ memcpy(buf, &header, header_size);
+ memcpy(buf+header_size, event_mask_ptr, DCI_EVENT_MASK_SIZE);
+ for (i = 0; i < NUM_PERIPHERALS; i++) {
+ /*
+ * Don't send to peripheral if its regular channel
+ * is down. It may also mean that the peripheral doesn't
+ * support DCI.
+ */
+ err = diag_dci_write_proc(i, DIAG_CNTL_TYPE, buf,
+ header_size + DCI_EVENT_MASK_SIZE);
+ if (err != DIAG_DCI_NO_ERROR)
+ ret = DIAG_DCI_SEND_DATA_FAIL;
+ }
+
+ mutex_unlock(&event_mask.lock);
+ mutex_unlock(&dci_event_mask_mutex);
+
+ return ret;
+}
+
+void update_dci_cumulative_log_mask(int offset, unsigned int byte_index,
+ uint8_t byte_mask, int token)
+{
+ uint8_t *log_mask_ptr, *update_ptr = NULL;
+ bool is_set = false;
+ struct list_head *start, *temp;
+ struct diag_dci_client_tbl *entry = NULL;
+
+ mutex_lock(&dci_log_mask_mutex);
+ update_ptr = dci_ops_tbl[token].log_mask_composite;
+ if (!update_ptr) {
+ mutex_unlock(&dci_log_mask_mutex);
+ return;
+ }
+
+ update_ptr += offset;
+ /* update the dirty bit */
+ *(update_ptr+1) = 1;
+ update_ptr = update_ptr + byte_index;
+ list_for_each_safe(start, temp, &driver->dci_client_list) {
+ entry = list_entry(start, struct diag_dci_client_tbl, track);
+ if (entry->client_info.token != token)
+ continue;
+ log_mask_ptr = entry->dci_log_mask;
+ log_mask_ptr = log_mask_ptr + offset + byte_index;
+ if ((*log_mask_ptr & byte_mask) == byte_mask) {
+ is_set = true;
+ /* break even if one client has the log mask set */
+ break;
+ }
+ }
+
+ if (is_set == false)
+ *update_ptr &= ~byte_mask;
+ else
+ *update_ptr |= byte_mask;
+ mutex_unlock(&dci_log_mask_mutex);
+}
+
+void diag_dci_invalidate_cumulative_log_mask(int token)
+{
+ int i = 0;
+ struct list_head *start, *temp;
+ struct diag_dci_client_tbl *entry = NULL;
+ uint8_t *log_mask_ptr, *update_ptr = NULL;
+
+ /* Clear the composite mask and redo all the masks */
+ mutex_lock(&dci_log_mask_mutex);
+ update_ptr = dci_ops_tbl[token].log_mask_composite;
+ if (!update_ptr) {
+ mutex_unlock(&dci_log_mask_mutex);
+ return;
+ }
+
+ create_dci_log_mask_tbl(update_ptr, DCI_LOG_MASK_DIRTY);
+ list_for_each_safe(start, temp, &driver->dci_client_list) {
+ entry = list_entry(start, struct diag_dci_client_tbl, track);
+ if (entry->client_info.token != token)
+ continue;
+ log_mask_ptr = entry->dci_log_mask;
+ for (i = 0; i < DCI_LOG_MASK_SIZE; i++)
+ *(update_ptr+i) |= *(log_mask_ptr+i);
+ }
+ mutex_unlock(&dci_log_mask_mutex);
+}
+
+static int dci_fill_log_mask(unsigned char *dest_ptr, unsigned char *src_ptr)
+{
+ struct diag_ctrl_log_mask header;
+ int header_len = sizeof(struct diag_ctrl_log_mask);
+
+ header.cmd_type = DIAG_CTRL_MSG_LOG_MASK;
+ header.num_items = DCI_MAX_ITEMS_PER_LOG_CODE;
+ header.data_len = 11 + DCI_MAX_ITEMS_PER_LOG_CODE;
+ header.stream_id = DCI_MASK_STREAM;
+ header.status = 3;
+ header.equip_id = *src_ptr;
+ header.log_mask_size = DCI_MAX_ITEMS_PER_LOG_CODE;
+ memcpy(dest_ptr, &header, header_len);
+ memcpy(dest_ptr + header_len, src_ptr + 2, DCI_MAX_ITEMS_PER_LOG_CODE);
+
+ return header_len + DCI_MAX_ITEMS_PER_LOG_CODE;
+}
+
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+int diag_send_dci_log_mask_remote(int token)
+{
+
+ unsigned char *buf = NULL;
+ struct diag_dci_header_t dci_header;
+ int dci_header_size = sizeof(struct diag_dci_header_t);
+ int log_header_size = sizeof(struct diag_ctrl_log_mask);
+ uint8_t *log_mask_ptr = NULL;
+ int i, ret = DIAG_DCI_NO_ERROR, err = DIAG_DCI_NO_ERROR;
+ int updated;
+ uint32_t write_len = 0;
+
+ mutex_lock(&dci_log_mask_mutex);
+ log_mask_ptr = dci_ops_tbl[token].log_mask_composite;
+ if (!log_mask_ptr) {
+ mutex_unlock(&dci_log_mask_mutex);
+ return -EINVAL;
+ }
+
+ /* DCI header is common to all equipment IDs */
+ dci_header.start = CONTROL_CHAR;
+ dci_header.version = 1;
+ dci_header.length = log_header_size + DCI_MAX_ITEMS_PER_LOG_CODE + 1;
+ dci_header.cmd_code = DCI_CONTROL_PKT_CODE;
+
+ for (i = 0; i < DCI_MAX_LOG_CODES; i++) {
+ updated = 1;
+ write_len = 0;
+ if (!*(log_mask_ptr + 1)) {
+ log_mask_ptr += 514;
+ continue;
+ }
+
+ buf = dci_get_buffer_from_bridge(token);
+ if (!buf) {
+ pr_err("diag: In %s, unable to get dci buffers to write data\n",
+ __func__);
+ mutex_unlock(&dci_log_mask_mutex);
+ return -EAGAIN;
+ }
+
+ memcpy(buf + write_len, &dci_header, dci_header_size);
+ write_len += dci_header_size;
+ write_len += dci_fill_log_mask(buf + write_len, log_mask_ptr);
+ *(buf + write_len) = CONTROL_CHAR; /* End Terminator */
+ write_len += sizeof(uint8_t);
+ err = diag_dci_write_bridge(token, buf, write_len);
+ if (err) {
+ pr_err("diag: error writing log mask to remote processor, equip_id: %d, token: %d, err: %d\n",
+ i, token, err);
+ diagmem_free(driver, buf, dci_ops_tbl[token].mempool);
+ updated = 0;
+ }
+ if (updated)
+ *(log_mask_ptr + 1) = 0; /* clear dirty byte */
+ log_mask_ptr += 514;
+ }
+ mutex_unlock(&dci_log_mask_mutex);
+ return ret;
+}
+#endif
+
+int diag_send_dci_log_mask(int token)
+{
+ void *buf = log_mask.update_buf;
+ int write_len = 0;
+ uint8_t *log_mask_ptr = NULL;
+ int i, j, ret = DIAG_DCI_NO_ERROR, err = DIAG_DCI_NO_ERROR;
+ int updated;
+
+
+ mutex_lock(&dci_log_mask_mutex);
+ log_mask_ptr = dci_ops_tbl[DCI_LOCAL_PROC].log_mask_composite;
+ if (!log_mask_ptr) {
+ mutex_unlock(&dci_log_mask_mutex);
+ return -EINVAL;
+ }
+
+ mutex_lock(&log_mask.lock);
+ for (i = 0; i < 16; i++) {
+ updated = 1;
+ /* Dirty bit is set don't update the mask for this equip id */
+ if (!(*(log_mask_ptr + 1))) {
+ log_mask_ptr += 514;
+ continue;
+ }
+ write_len = dci_fill_log_mask(buf, log_mask_ptr);
+ for (j = 0; j < NUM_PERIPHERALS && write_len; j++) {
+ err = diag_dci_write_proc(j, DIAG_CNTL_TYPE, buf,
+ write_len);
+ if (err != DIAG_DCI_NO_ERROR) {
+ updated = 0;
+ ret = DIAG_DCI_SEND_DATA_FAIL;
+ }
+ }
+ if (updated)
+ *(log_mask_ptr+1) = 0; /* clear dirty byte */
+ log_mask_ptr += 514;
+ }
+ mutex_unlock(&log_mask.lock);
+ mutex_unlock(&dci_log_mask_mutex);
+ return ret;
+}
+
+static int diag_dci_init_local(void)
+{
+ struct dci_ops_tbl_t *temp = &dci_ops_tbl[DCI_LOCAL_PROC];
+
+ create_dci_log_mask_tbl(temp->log_mask_composite, DCI_LOG_MASK_CLEAN);
+ create_dci_event_mask_tbl(temp->event_mask_composite);
+ temp->peripheral_status |= DIAG_CON_APSS;
+
+ return 0;
+}
+
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+static void diag_dci_init_handshake_remote(void)
+{
+ int i;
+ struct dci_channel_status_t *temp = NULL;
+
+ for (i = DCI_REMOTE_BASE; i < NUM_DCI_PROC; i++) {
+ temp = &dci_channel_status[i];
+ temp->id = i;
+ setup_timer(&temp->wait_time, dci_chk_handshake, i);
+ INIT_WORK(&temp->handshake_work, dci_handshake_work_fn);
+ }
+}
+
+static int diag_dci_init_remote(void)
+{
+ int i;
+ struct dci_ops_tbl_t *temp = NULL;
+
+ diagmem_init(driver, POOL_TYPE_MDM_DCI_WRITE);
+
+ for (i = DCI_REMOTE_BASE; i < DCI_REMOTE_LAST; i++) {
+ temp = &dci_ops_tbl[i];
+ create_dci_log_mask_tbl(temp->log_mask_composite,
+ DCI_LOG_MASK_CLEAN);
+ create_dci_event_mask_tbl(temp->event_mask_composite);
+ }
+
+ partial_pkt.data = kzalloc(MAX_DCI_PACKET_SZ, GFP_KERNEL);
+ if (!partial_pkt.data)
+ return -ENOMEM;
+
+ partial_pkt.total_len = 0;
+ partial_pkt.read_len = 0;
+ partial_pkt.remaining = 0;
+ partial_pkt.processing = 0;
+
+ diag_dci_init_handshake_remote();
+
+ return 0;
+}
+#else
+static int diag_dci_init_remote(void)
+{
+ return 0;
+}
+#endif
+
+static int diag_dci_init_ops_tbl(void)
+{
+ int err = 0;
+
+ err = diag_dci_init_local();
+ if (err)
+ goto err;
+ err = diag_dci_init_remote();
+ if (err)
+ goto err;
+
+ return 0;
+
+err:
+ return -ENOMEM;
+}
+
+int diag_dci_init(void)
+{
+ int ret = 0;
+
+ driver->dci_tag = 0;
+ driver->dci_client_id = 0;
+ driver->num_dci_client = 0;
+ mutex_init(&driver->dci_mutex);
+ mutex_init(&dci_log_mask_mutex);
+ mutex_init(&dci_event_mask_mutex);
+ spin_lock_init(&ws_lock);
+
+ ret = diag_dci_init_ops_tbl();
+ if (ret)
+ goto err;
+
+ if (driver->apps_dci_buf == NULL) {
+ driver->apps_dci_buf = kzalloc(DCI_BUF_SIZE, GFP_KERNEL);
+ if (driver->apps_dci_buf == NULL)
+ goto err;
+ }
+ INIT_LIST_HEAD(&driver->dci_client_list);
+ INIT_LIST_HEAD(&driver->dci_req_list);
+
+ driver->diag_dci_wq = create_singlethread_workqueue("diag_dci_wq");
+ if (!driver->diag_dci_wq)
+ goto err;
+
+ INIT_WORK(&dci_data_drain_work, dci_data_drain_work_fn);
+
+ setup_timer(&dci_drain_timer, dci_drain_data, 0);
+ return DIAG_DCI_NO_ERROR;
+err:
+ pr_err("diag: Could not initialize diag DCI buffers");
+ kfree(driver->apps_dci_buf);
+
+ if (driver->diag_dci_wq)
+ destroy_workqueue(driver->diag_dci_wq);
+ kfree(partial_pkt.data);
+ mutex_destroy(&driver->dci_mutex);
+ mutex_destroy(&dci_log_mask_mutex);
+ mutex_destroy(&dci_event_mask_mutex);
+ return DIAG_DCI_NO_REG;
+}
+
+void diag_dci_channel_init(void)
+{
+ uint8_t peripheral;
+
+ for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+ diagfwd_open(peripheral, TYPE_DCI);
+ diagfwd_open(peripheral, TYPE_DCI_CMD);
+ }
+}
+
+void diag_dci_exit(void)
+{
+ kfree(partial_pkt.data);
+ kfree(driver->apps_dci_buf);
+ mutex_destroy(&driver->dci_mutex);
+ mutex_destroy(&dci_log_mask_mutex);
+ mutex_destroy(&dci_event_mask_mutex);
+ destroy_workqueue(driver->diag_dci_wq);
+}
+
+int diag_dci_clear_log_mask(int client_id)
+{
+ int err = DIAG_DCI_NO_ERROR, token = DCI_LOCAL_PROC;
+ uint8_t *update_ptr;
+ struct diag_dci_client_tbl *entry = NULL;
+
+ entry = diag_dci_get_client_entry(client_id);
+ if (!entry) {
+ pr_err("diag: In %s, invalid client entry\n", __func__);
+ return DIAG_DCI_TABLE_ERR;
+ }
+ token = entry->client_info.token;
+ update_ptr = dci_ops_tbl[token].log_mask_composite;
+
+ create_dci_log_mask_tbl(entry->dci_log_mask, DCI_LOG_MASK_CLEAN);
+ diag_dci_invalidate_cumulative_log_mask(token);
+
+ /*
+ * Send updated mask to userspace clients only if the client
+ * is registered on the local processor
+ */
+ if (token == DCI_LOCAL_PROC)
+ diag_update_userspace_clients(DCI_LOG_MASKS_TYPE);
+ /* Send updated mask to peripherals */
+ err = dci_ops_tbl[token].send_log_mask(token);
+ return err;
+}
+
+int diag_dci_clear_event_mask(int client_id)
+{
+ int err = DIAG_DCI_NO_ERROR, token = DCI_LOCAL_PROC;
+ uint8_t *update_ptr;
+ struct diag_dci_client_tbl *entry = NULL;
+
+ entry = diag_dci_get_client_entry(client_id);
+ if (!entry) {
+ pr_err("diag: In %s, invalid client entry\n", __func__);
+ return DIAG_DCI_TABLE_ERR;
+ }
+ token = entry->client_info.token;
+ update_ptr = dci_ops_tbl[token].event_mask_composite;
+
+ create_dci_event_mask_tbl(entry->dci_event_mask);
+ diag_dci_invalidate_cumulative_event_mask(token);
+
+ /*
+ * Send updated mask to userspace clients only if the client is
+ * registerted on the local processor
+ */
+ if (token == DCI_LOCAL_PROC)
+ diag_update_userspace_clients(DCI_EVENT_MASKS_TYPE);
+ /* Send updated mask to peripherals */
+ err = dci_ops_tbl[token].send_event_mask(token);
+ return err;
+}
+
+uint8_t diag_dci_get_cumulative_real_time(int token)
+{
+ uint8_t real_time = MODE_NONREALTIME;
+ struct list_head *start, *temp;
+ struct diag_dci_client_tbl *entry = NULL;
+
+ list_for_each_safe(start, temp, &driver->dci_client_list) {
+ entry = list_entry(start, struct diag_dci_client_tbl, track);
+ if (entry->real_time == MODE_REALTIME &&
+ entry->client_info.token == token) {
+ real_time = 1;
+ break;
+ }
+ }
+ return real_time;
+}
+
+int diag_dci_set_real_time(struct diag_dci_client_tbl *entry, uint8_t real_time)
+{
+ if (!entry) {
+ pr_err("diag: In %s, invalid client entry\n", __func__);
+ return 0;
+ }
+ entry->real_time = real_time;
+ return 1;
+}
+
+int diag_dci_register_client(struct diag_dci_reg_tbl_t *reg_entry)
+{
+ int i, err = 0;
+ struct diag_dci_client_tbl *new_entry = NULL;
+ struct diag_dci_buf_peripheral_t *proc_buf = NULL;
+
+ if (!reg_entry)
+ return DIAG_DCI_NO_REG;
+ if (!VALID_DCI_TOKEN(reg_entry->token)) {
+ pr_alert("diag: Invalid DCI client token, %d\n",
+ reg_entry->token);
+ return DIAG_DCI_NO_REG;
+ }
+
+ if (driver->dci_state == DIAG_DCI_NO_REG)
+ return DIAG_DCI_NO_REG;
+
+ if (driver->num_dci_client >= MAX_DCI_CLIENTS)
+ return DIAG_DCI_NO_REG;
+
+ new_entry = kzalloc(sizeof(struct diag_dci_client_tbl), GFP_KERNEL);
+ if (!new_entry)
+ return DIAG_DCI_NO_REG;
+
+ mutex_lock(&driver->dci_mutex);
+
+ new_entry->client = current;
+ new_entry->tgid = current->tgid;
+ new_entry->client_info.notification_list =
+ reg_entry->notification_list;
+ new_entry->client_info.signal_type =
+ reg_entry->signal_type;
+ new_entry->client_info.token = reg_entry->token;
+ switch (reg_entry->token) {
+ case DCI_LOCAL_PROC:
+ new_entry->num_buffers = NUM_DCI_PERIPHERALS;
+ break;
+ case DCI_MDM_PROC:
+ new_entry->num_buffers = 1;
+ break;
+ }
+ new_entry->real_time = MODE_REALTIME;
+ new_entry->in_service = 0;
+ INIT_LIST_HEAD(&new_entry->list_write_buf);
+ mutex_init(&new_entry->write_buf_mutex);
+ new_entry->dci_log_mask = kzalloc(DCI_LOG_MASK_SIZE, GFP_KERNEL);
+ if (!new_entry->dci_log_mask) {
+ pr_err("diag: Unable to create log mask for client, %d",
+ driver->dci_client_id);
+ goto fail_alloc;
+ }
+ create_dci_log_mask_tbl(new_entry->dci_log_mask, DCI_LOG_MASK_CLEAN);
+
+ new_entry->dci_event_mask = kzalloc(DCI_EVENT_MASK_SIZE, GFP_KERNEL);
+ if (!new_entry->dci_event_mask)
+ goto fail_alloc;
+ create_dci_event_mask_tbl(new_entry->dci_event_mask);
+
+ new_entry->buffers = kzalloc(new_entry->num_buffers *
+ sizeof(struct diag_dci_buf_peripheral_t),
+ GFP_KERNEL);
+ if (!new_entry->buffers) {
+ pr_err("diag: Unable to allocate buffers for peripherals in %s\n",
+ __func__);
+ goto fail_alloc;
+ }
+
+ for (i = 0; i < new_entry->num_buffers; i++) {
+ proc_buf = &new_entry->buffers[i];
+ if (!proc_buf)
+ goto fail_alloc;
+
+ mutex_init(&proc_buf->health_mutex);
+ mutex_init(&proc_buf->buf_mutex);
+ proc_buf->health.dropped_events = 0;
+ proc_buf->health.dropped_logs = 0;
+ proc_buf->health.received_events = 0;
+ proc_buf->health.received_logs = 0;
+ proc_buf->buf_primary = kzalloc(
+ sizeof(struct diag_dci_buffer_t),
+ GFP_KERNEL);
+ if (!proc_buf->buf_primary)
+ goto fail_alloc;
+ proc_buf->buf_cmd = kzalloc(sizeof(struct diag_dci_buffer_t),
+ GFP_KERNEL);
+ if (!proc_buf->buf_cmd)
+ goto fail_alloc;
+ err = diag_dci_init_buffer(proc_buf->buf_primary,
+ DCI_BUF_PRIMARY);
+ if (err)
+ goto fail_alloc;
+ err = diag_dci_init_buffer(proc_buf->buf_cmd, DCI_BUF_CMD);
+ if (err)
+ goto fail_alloc;
+ proc_buf->buf_curr = proc_buf->buf_primary;
+ }
+
+ list_add_tail(&new_entry->track, &driver->dci_client_list);
+ driver->dci_client_id++;
+ new_entry->client_info.client_id = driver->dci_client_id;
+ reg_entry->client_id = driver->dci_client_id;
+ driver->num_dci_client++;
+ if (driver->num_dci_client == 1)
+ diag_update_proc_vote(DIAG_PROC_DCI, VOTE_UP, reg_entry->token);
+ queue_work(driver->diag_real_time_wq, &driver->diag_real_time_work);
+ mutex_unlock(&driver->dci_mutex);
+
+ return driver->dci_client_id;
+
+fail_alloc:
+ if (new_entry) {
+ for (i = 0; i < new_entry->num_buffers; i++) {
+ proc_buf = &new_entry->buffers[i];
+ if (proc_buf) {
+ mutex_destroy(&proc_buf->health_mutex);
+ if (proc_buf->buf_primary) {
+ kfree(proc_buf->buf_primary->data);
+ mutex_destroy(
+ &proc_buf->buf_primary->data_mutex);
+ }
+ kfree(proc_buf->buf_primary);
+ if (proc_buf->buf_cmd) {
+ kfree(proc_buf->buf_cmd->data);
+ mutex_destroy(
+ &proc_buf->buf_cmd->data_mutex);
+ }
+ kfree(proc_buf->buf_cmd);
+ }
+ }
+ kfree(new_entry->dci_event_mask);
+ kfree(new_entry->dci_log_mask);
+ kfree(new_entry->buffers);
+ kfree(new_entry);
+ }
+ mutex_unlock(&driver->dci_mutex);
+ return DIAG_DCI_NO_REG;
+}
+
+int diag_dci_deinit_client(struct diag_dci_client_tbl *entry)
+{
+ int ret = DIAG_DCI_NO_ERROR, real_time = MODE_REALTIME, i, peripheral;
+ struct diag_dci_buf_peripheral_t *proc_buf = NULL;
+ struct diag_dci_buffer_t *buf_entry, *temp;
+ struct list_head *start, *req_temp;
+ struct dci_pkt_req_entry_t *req_entry = NULL;
+ int token = DCI_LOCAL_PROC;
+
+ if (!entry)
+ return DIAG_DCI_NOT_SUPPORTED;
+
+ token = entry->client_info.token;
+ /*
+ * Remove the entry from the list before freeing the buffers
+ * to ensure that we don't have any invalid access.
+ */
+ if (!list_empty(&entry->track))
+ list_del(&entry->track);
+ driver->num_dci_client--;
+ /*
+ * Clear the client's log and event masks, update the cumulative
+ * masks and send the masks to peripherals
+ */
+ kfree(entry->dci_log_mask);
+ diag_dci_invalidate_cumulative_log_mask(token);
+ if (token == DCI_LOCAL_PROC)
+ diag_update_userspace_clients(DCI_LOG_MASKS_TYPE);
+ ret = dci_ops_tbl[token].send_log_mask(token);
+ if (ret != DIAG_DCI_NO_ERROR)
+ return ret;
+ kfree(entry->dci_event_mask);
+ diag_dci_invalidate_cumulative_event_mask(token);
+ if (token == DCI_LOCAL_PROC)
+ diag_update_userspace_clients(DCI_EVENT_MASKS_TYPE);
+ ret = dci_ops_tbl[token].send_event_mask(token);
+ if (ret != DIAG_DCI_NO_ERROR)
+ return ret;
+
+ list_for_each_safe(start, req_temp, &driver->dci_req_list) {
+ req_entry = list_entry(start, struct dci_pkt_req_entry_t,
+ track);
+ if (req_entry->client_id == entry->client_info.client_id) {
+ if (!list_empty(&req_entry->track))
+ list_del(&req_entry->track);
+ kfree(req_entry);
+ }
+ }
+
+ /* Clean up any buffer that is pending write */
+ mutex_lock(&entry->write_buf_mutex);
+ list_for_each_entry_safe(buf_entry, temp, &entry->list_write_buf,
+ buf_track) {
+ if (!list_empty(&buf_entry->buf_track))
+ list_del(&buf_entry->buf_track);
+ if (buf_entry->buf_type == DCI_BUF_SECONDARY) {
+ mutex_lock(&buf_entry->data_mutex);
+ diagmem_free(driver, buf_entry->data, POOL_TYPE_DCI);
+ buf_entry->data = NULL;
+ mutex_unlock(&buf_entry->data_mutex);
+ kfree(buf_entry);
+ } else if (buf_entry->buf_type == DCI_BUF_CMD) {
+ peripheral = buf_entry->data_source;
+ if (peripheral == APPS_DATA)
+ continue;
+ }
+ /*
+ * These are buffers that can't be written to the client which
+ * means that the copy cannot be completed. Make sure that we
+ * remove those references in DCI wakeup source.
+ */
+ diag_ws_on_copy_fail(DIAG_WS_DCI);
+ }
+ mutex_unlock(&entry->write_buf_mutex);
+
+ for (i = 0; i < entry->num_buffers; i++) {
+ proc_buf = &entry->buffers[i];
+ buf_entry = proc_buf->buf_curr;
+ mutex_lock(&proc_buf->buf_mutex);
+ /* Clean up secondary buffer from mempool that is active */
+ if (buf_entry && buf_entry->buf_type == DCI_BUF_SECONDARY) {
+ mutex_lock(&buf_entry->data_mutex);
+ diagmem_free(driver, buf_entry->data, POOL_TYPE_DCI);
+ buf_entry->data = NULL;
+ mutex_unlock(&buf_entry->data_mutex);
+ mutex_destroy(&buf_entry->data_mutex);
+ kfree(buf_entry);
+ }
+
+ mutex_lock(&proc_buf->buf_primary->data_mutex);
+ kfree(proc_buf->buf_primary->data);
+ mutex_unlock(&proc_buf->buf_primary->data_mutex);
+
+ mutex_lock(&proc_buf->buf_cmd->data_mutex);
+ kfree(proc_buf->buf_cmd->data);
+ mutex_unlock(&proc_buf->buf_cmd->data_mutex);
+
+ mutex_destroy(&proc_buf->health_mutex);
+ mutex_destroy(&proc_buf->buf_primary->data_mutex);
+ mutex_destroy(&proc_buf->buf_cmd->data_mutex);
+
+ kfree(proc_buf->buf_primary);
+ kfree(proc_buf->buf_cmd);
+ mutex_unlock(&proc_buf->buf_mutex);
+ }
+ mutex_destroy(&entry->write_buf_mutex);
+
+ kfree(entry->buffers);
+ kfree(entry);
+
+ if (driver->num_dci_client == 0) {
+ diag_update_proc_vote(DIAG_PROC_DCI, VOTE_DOWN, token);
+ } else {
+ real_time = diag_dci_get_cumulative_real_time(token);
+ diag_update_real_time_vote(DIAG_PROC_DCI, real_time, token);
+ }
+ queue_work(driver->diag_real_time_wq, &driver->diag_real_time_work);
+
+ return DIAG_DCI_NO_ERROR;
+}
+
+int diag_dci_write_proc(uint8_t peripheral, int pkt_type, char *buf, int len)
+{
+ uint8_t dest_channel = TYPE_DATA;
+ int err = 0;
+
+ if (!buf || peripheral >= NUM_PERIPHERALS || len < 0 ||
+ !(driver->feature[PERIPHERAL_MODEM].rcvd_feature_mask)) {
+ DIAG_LOG(DIAG_DEBUG_DCI,
+ "buf: 0x%pK, p: %d, len: %d, f_mask: %d\n",
+ buf, peripheral, len,
+ driver->feature[PERIPHERAL_MODEM].rcvd_feature_mask);
+ return -EINVAL;
+ }
+
+ if (pkt_type == DIAG_DATA_TYPE) {
+ dest_channel = TYPE_DCI_CMD;
+ } else if (pkt_type == DIAG_CNTL_TYPE) {
+ dest_channel = TYPE_CNTL;
+ } else {
+ pr_err("diag: Invalid DCI pkt type in %s", __func__);
+ return -EINVAL;
+ }
+
+ err = diagfwd_write(peripheral, dest_channel, buf, len);
+ if (err && err != -ENODEV) {
+ pr_err("diag: In %s, unable to write to peripheral: %d, type: %d, len: %d, err: %d\n",
+ __func__, peripheral, dest_channel, len, err);
+ } else {
+ err = DIAG_DCI_NO_ERROR;
+ }
+
+ return err;
+}
+
+int diag_dci_copy_health_stats(struct diag_dci_health_stats_proc *stats_proc)
+{
+ struct diag_dci_client_tbl *entry = NULL;
+ struct diag_dci_health_t *health = NULL;
+ struct diag_dci_health_stats *stats = NULL;
+ int i, proc;
+
+ if (!stats_proc)
+ return -EINVAL;
+
+ stats = &stats_proc->health;
+ proc = stats_proc->proc;
+ if (proc < ALL_PROC || proc > APPS_DATA)
+ return -EINVAL;
+
+ entry = diag_dci_get_client_entry(stats_proc->client_id);
+ if (!entry)
+ return DIAG_DCI_NOT_SUPPORTED;
+
+ /*
+ * If the client has registered for remote processor, the
+ * proc field doesn't have any effect as they have only one buffer.
+ */
+ if (entry->client_info.token)
+ proc = 0;
+
+ stats->stats.dropped_logs = 0;
+ stats->stats.dropped_events = 0;
+ stats->stats.received_logs = 0;
+ stats->stats.received_events = 0;
+
+ if (proc != ALL_PROC) {
+ health = &entry->buffers[proc].health;
+ stats->stats.dropped_logs = health->dropped_logs;
+ stats->stats.dropped_events = health->dropped_events;
+ stats->stats.received_logs = health->received_logs;
+ stats->stats.received_events = health->received_events;
+ if (stats->reset_status) {
+ mutex_lock(&entry->buffers[proc].health_mutex);
+ health->dropped_logs = 0;
+ health->dropped_events = 0;
+ health->received_logs = 0;
+ health->received_events = 0;
+ mutex_unlock(&entry->buffers[proc].health_mutex);
+ }
+ return DIAG_DCI_NO_ERROR;
+ }
+
+ for (i = 0; i < entry->num_buffers; i++) {
+ health = &entry->buffers[i].health;
+ stats->stats.dropped_logs += health->dropped_logs;
+ stats->stats.dropped_events += health->dropped_events;
+ stats->stats.received_logs += health->received_logs;
+ stats->stats.received_events += health->received_events;
+ if (stats->reset_status) {
+ mutex_lock(&entry->buffers[i].health_mutex);
+ health->dropped_logs = 0;
+ health->dropped_events = 0;
+ health->received_logs = 0;
+ health->received_events = 0;
+ mutex_unlock(&entry->buffers[i].health_mutex);
+ }
+ }
+ return DIAG_DCI_NO_ERROR;
+}
+
+int diag_dci_get_support_list(struct diag_dci_peripherals_t *support_list)
+{
+ if (!support_list)
+ return -ENOMEM;
+
+ if (!VALID_DCI_TOKEN(support_list->proc))
+ return -EIO;
+
+ support_list->list = dci_ops_tbl[support_list->proc].peripheral_status;
+ return DIAG_DCI_NO_ERROR;
+}
diff --git a/drivers/char/diag/diag_dci.h b/drivers/char/diag/diag_dci.h
new file mode 100644
index 0000000..61eb3f5
--- /dev/null
+++ b/drivers/char/diag/diag_dci.h
@@ -0,0 +1,328 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef DIAG_DCI_H
+#define DIAG_DCI_H
+
+#define MAX_DCI_CLIENTS 10
+#define DCI_PKT_RSP_CODE 0x93
+#define DCI_DELAYED_RSP_CODE 0x94
+#define DCI_CONTROL_PKT_CODE 0x9A
+#define EXT_HDR_CMD_CODE 0x98
+#define LOG_CMD_CODE 0x10
+#define EVENT_CMD_CODE 0x60
+#define DCI_PKT_RSP_TYPE 0
+#define DCI_LOG_TYPE -1
+#define DCI_EVENT_TYPE -2
+#define DCI_EXT_HDR_TYPE -3
+#define SET_LOG_MASK 1
+#define DISABLE_LOG_MASK 0
+#define MAX_EVENT_SIZE 512
+#define DCI_CLIENT_INDEX_INVALID -1
+#define DCI_LOG_CON_MIN_LEN 14
+#define DCI_EVENT_CON_MIN_LEN 16
+
+#define EXT_HDR_LEN 8
+#define EXT_HDR_VERSION 1
+
+#define DCI_BUF_PRIMARY 1
+#define DCI_BUF_SECONDARY 2
+#define DCI_BUF_CMD 3
+
+#ifdef CONFIG_DEBUG_FS
+#define DIAG_DCI_DEBUG_CNT 100
+#define DIAG_DCI_DEBUG_LEN 100
+#endif
+
+/* 16 log code categories, each has:
+ * 1 bytes equip id + 1 dirty byte + 512 byte max log mask
+ */
+#define DCI_LOG_MASK_SIZE (16*514)
+#define DCI_EVENT_MASK_SIZE 512
+#define DCI_MASK_STREAM 2
+#define DCI_MAX_LOG_CODES 16
+#define DCI_MAX_ITEMS_PER_LOG_CODE 512
+
+#define DCI_LOG_MASK_CLEAN 0
+#define DCI_LOG_MASK_DIRTY 1
+
+#define MIN_DELAYED_RSP_LEN 12
+/*
+ * Maximum data size that peripherals send = 8.5K log +
+ * DCI header + footer (6 bytes)
+ */
+#define MAX_DCI_PACKET_SZ 8710
+
+extern unsigned int dci_max_reg;
+extern unsigned int dci_max_clients;
+
+#define DCI_LOCAL_PROC 0
+#define DCI_REMOTE_BASE 1
+#define DCI_MDM_PROC DCI_REMOTE_BASE
+#define DCI_REMOTE_LAST (DCI_REMOTE_BASE + 1)
+
+#ifndef CONFIG_DIAGFWD_BRIDGE_CODE
+#define NUM_DCI_PROC 1
+#else
+#define NUM_DCI_PROC DCI_REMOTE_LAST
+#endif
+
+#define DCI_REMOTE_DATA 0
+
+#define VALID_DCI_TOKEN(x) ((x >= 0 && x < NUM_DCI_PROC) ? 1 : 0)
+#define BRIDGE_TO_TOKEN(x) (x - DIAGFWD_MDM_DCI + DCI_REMOTE_BASE)
+#define TOKEN_TO_BRIDGE(x) (dci_ops_tbl[x].ctx)
+
+#define DCI_MAGIC (0xAABB1122)
+
+struct dci_pkt_req_t {
+ int uid;
+ int client_id;
+} __packed;
+
+struct dci_stream_req_t {
+ int type;
+ int client_id;
+ int set_flag;
+ int count;
+} __packed;
+
+struct dci_pkt_req_entry_t {
+ int client_id;
+ int uid;
+ int tag;
+ struct list_head track;
+} __packed;
+
+struct diag_dci_reg_tbl_t {
+ int client_id;
+ uint16_t notification_list;
+ int signal_type;
+ int token;
+} __packed;
+
+struct diag_dci_health_t {
+ int dropped_logs;
+ int dropped_events;
+ int received_logs;
+ int received_events;
+};
+
+struct diag_dci_partial_pkt_t {
+ unsigned char *data;
+ uint32_t total_len;
+ uint32_t read_len;
+ uint32_t remaining;
+ uint8_t processing;
+} __packed;
+
+struct diag_dci_buffer_t {
+ unsigned char *data;
+ unsigned int data_len;
+ struct mutex data_mutex;
+ uint8_t in_busy;
+ uint8_t buf_type;
+ int data_source;
+ int capacity;
+ uint8_t in_list;
+ struct list_head buf_track;
+};
+
+struct diag_dci_buf_peripheral_t {
+ struct diag_dci_buffer_t *buf_curr;
+ struct diag_dci_buffer_t *buf_primary;
+ struct diag_dci_buffer_t *buf_cmd;
+ struct diag_dci_health_t health;
+ struct mutex health_mutex;
+ struct mutex buf_mutex;
+};
+
+struct diag_dci_client_tbl {
+ int tgid;
+ struct diag_dci_reg_tbl_t client_info;
+ struct task_struct *client;
+ unsigned char *dci_log_mask;
+ unsigned char *dci_event_mask;
+ uint8_t real_time;
+ struct list_head track;
+ struct diag_dci_buf_peripheral_t *buffers;
+ uint8_t num_buffers;
+ uint8_t in_service;
+ struct list_head list_write_buf;
+ struct mutex write_buf_mutex;
+};
+
+struct diag_dci_health_stats {
+ struct diag_dci_health_t stats;
+ int reset_status;
+};
+
+struct diag_dci_health_stats_proc {
+ int client_id;
+ struct diag_dci_health_stats health;
+ int proc;
+} __packed;
+
+struct diag_dci_peripherals_t {
+ int proc;
+ uint16_t list;
+} __packed;
+
+/* This is used for querying DCI Log or Event Mask */
+struct diag_log_event_stats {
+ int client_id;
+ uint16_t code;
+ int is_set;
+} __packed;
+
+struct diag_dci_pkt_rsp_header_t {
+ int type;
+ int length;
+ uint8_t delete_flag;
+ int uid;
+} __packed;
+
+struct diag_dci_pkt_header_t {
+ uint8_t start;
+ uint8_t version;
+ uint16_t len;
+ uint8_t pkt_code;
+ int tag;
+} __packed;
+
+struct diag_dci_header_t {
+ uint8_t start;
+ uint8_t version;
+ uint16_t length;
+ uint8_t cmd_code;
+} __packed;
+
+struct dci_ops_tbl_t {
+ int ctx;
+ int mempool;
+ unsigned char log_mask_composite[DCI_LOG_MASK_SIZE];
+ unsigned char event_mask_composite[DCI_EVENT_MASK_SIZE];
+ int (*send_log_mask)(int token);
+ int (*send_event_mask)(int token);
+ uint16_t peripheral_status;
+} __packed;
+
+struct dci_channel_status_t {
+ int id;
+ int open;
+ int retry_count;
+ struct timer_list wait_time;
+ struct work_struct handshake_work;
+} __packed;
+
+extern struct dci_ops_tbl_t dci_ops_tbl[NUM_DCI_PROC];
+
+enum {
+ DIAG_DCI_NO_ERROR = 1001, /* No error */
+ DIAG_DCI_NO_REG, /* Could not register */
+ DIAG_DCI_NO_MEM, /* Failed memory allocation */
+ DIAG_DCI_NOT_SUPPORTED, /* This particular client is not supported */
+ DIAG_DCI_HUGE_PACKET, /* Request/Response Packet too huge */
+ DIAG_DCI_SEND_DATA_FAIL,/* writing to kernel or peripheral fails */
+ DIAG_DCI_TABLE_ERR /* Error dealing with registration tables */
+};
+
+#define DCI_HDR_SIZE \
+ ((sizeof(struct diag_dci_pkt_header_t) > \
+ sizeof(struct diag_dci_header_t)) ? \
+ (sizeof(struct diag_dci_pkt_header_t) + 1) : \
+ (sizeof(struct diag_dci_header_t) + 1)) \
+
+#define DCI_BUF_SIZE (uint32_t)(DIAG_MAX_REQ_SIZE + DCI_HDR_SIZE)
+
+#define DCI_REQ_HDR_SIZE \
+ ((sizeof(struct dci_pkt_req_t) > \
+ sizeof(struct dci_stream_req_t)) ? \
+ (sizeof(struct dci_pkt_req_t)) : \
+ (sizeof(struct dci_stream_req_t))) \
+
+#define DCI_REQ_BUF_SIZE (uint32_t)(DIAG_MAX_REQ_SIZE + DCI_REQ_HDR_SIZE)
+
+#ifdef CONFIG_DEBUG_FS
+/* To collect debug information during each socket read */
+struct diag_dci_data_info {
+ unsigned long iteration;
+ int data_size;
+ char time_stamp[DIAG_TS_SIZE];
+ uint8_t peripheral;
+ uint8_t ch_type;
+ uint8_t proc;
+};
+
+extern struct diag_dci_data_info *dci_traffic;
+extern struct mutex dci_stat_mutex;
+#endif
+
+int diag_dci_init(void);
+void diag_dci_channel_init(void);
+void diag_dci_exit(void);
+int diag_dci_register_client(struct diag_dci_reg_tbl_t *reg_entry);
+int diag_dci_deinit_client(struct diag_dci_client_tbl *entry);
+void diag_dci_channel_open_work(struct work_struct *work);
+void diag_dci_notify_client(int peripheral_mask, int data, int proc);
+void diag_dci_wakeup_clients(void);
+void diag_process_apps_dci_read_data(int data_type, void *buf, int recd_bytes);
+void diag_dci_process_peripheral_data(struct diagfwd_info *p_info, void *buf,
+ int recd_bytes);
+int diag_process_dci_transaction(unsigned char *buf, int len);
+void extract_dci_pkt_rsp(unsigned char *buf, int len, int data_source,
+ int token);
+void extract_dci_ctrl_pkt(unsigned char *buf, int len, int token);
+struct diag_dci_client_tbl *diag_dci_get_client_entry(int client_id);
+struct diag_dci_client_tbl *dci_lookup_client_entry_pid(int tgid);
+void diag_process_remote_dci_read_data(int index, void *buf, int recd_bytes);
+int diag_dci_get_support_list(struct diag_dci_peripherals_t *support_list);
+/* DCI Log streaming functions */
+void update_dci_cumulative_log_mask(int offset, unsigned int byte_index,
+ uint8_t byte_mask, int token);
+void diag_dci_invalidate_cumulative_log_mask(int token);
+int diag_send_dci_log_mask(int token);
+void extract_dci_log(unsigned char *buf, int len, int data_source, int token,
+ void *ext_hdr);
+int diag_dci_clear_log_mask(int client_id);
+int diag_dci_query_log_mask(struct diag_dci_client_tbl *entry,
+ uint16_t log_code);
+/* DCI event streaming functions */
+void update_dci_cumulative_event_mask(int offset, uint8_t byte_mask, int token);
+void diag_dci_invalidate_cumulative_event_mask(int token);
+int diag_send_dci_event_mask(int token);
+void extract_dci_events(unsigned char *buf, int len, int data_source,
+ int token, void *ext_hdr);
+/* DCI extended header handling functions */
+void extract_dci_ext_pkt(unsigned char *buf, int len, int data_source,
+ int token);
+int diag_dci_clear_event_mask(int client_id);
+int diag_dci_query_event_mask(struct diag_dci_client_tbl *entry,
+ uint16_t event_id);
+void diag_dci_record_traffic(int read_bytes, uint8_t ch_type,
+ uint8_t peripheral, uint8_t proc);
+uint8_t diag_dci_get_cumulative_real_time(int token);
+int diag_dci_set_real_time(struct diag_dci_client_tbl *entry,
+ uint8_t real_time);
+int diag_dci_copy_health_stats(struct diag_dci_health_stats_proc *stats_proc);
+int diag_dci_write_proc(uint8_t peripheral, int pkt_type, char *buf, int len);
+void dci_drain_data(unsigned long data);
+
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+int diag_send_dci_log_mask_remote(int token);
+int diag_send_dci_event_mask_remote(int token);
+unsigned char *dci_get_buffer_from_bridge(int token);
+int diag_dci_write_bridge(int token, unsigned char *buf, int len);
+int diag_dci_write_done_bridge(int index, unsigned char *buf, int len);
+int diag_dci_send_handshake_pkt(int index);
+#endif
+
+#endif
diff --git a/drivers/char/diag/diag_debugfs.c b/drivers/char/diag/diag_debugfs.c
new file mode 100644
index 0000000..89fba64
--- /dev/null
+++ b/drivers/char/diag/diag_debugfs.c
@@ -0,0 +1,1074 @@
+/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifdef CONFIG_DEBUG_FS
+
+#include <linux/slab.h>
+#include <linux/debugfs.h>
+#include <linux/atomic.h>
+#include <linux/uaccess.h>
+#include "diagchar.h"
+#include "diagfwd.h"
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+#include "diagfwd_bridge.h"
+#endif
+#ifdef CONFIG_USB_QCOM_DIAG_BRIDGE
+#include "diagfwd_hsic.h"
+#include "diagfwd_smux.h"
+#endif
+#ifdef CONFIG_MSM_MHI
+#include "diagfwd_mhi.h"
+#endif
+#include "diagmem.h"
+#include "diag_dci.h"
+#include "diag_usb.h"
+#include "diagfwd_peripheral.h"
+#include "diagfwd_socket.h"
+#include "diagfwd_glink.h"
+#include "diag_debugfs.h"
+#include "diag_ipc_logging.h"
+
+#define DEBUG_BUF_SIZE 4096
+static struct dentry *diag_dbgfs_dent;
+static int diag_dbgfs_table_index;
+static int diag_dbgfs_mempool_index;
+static int diag_dbgfs_usbinfo_index;
+static int diag_dbgfs_socketinfo_index;
+static int diag_dbgfs_glinkinfo_index;
+static int diag_dbgfs_hsicinfo_index;
+static int diag_dbgfs_mhiinfo_index;
+static int diag_dbgfs_bridgeinfo_index;
+static int diag_dbgfs_finished;
+static int diag_dbgfs_dci_data_index;
+static int diag_dbgfs_dci_finished;
+
+static ssize_t diag_dbgfs_read_status(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ char *buf;
+ int ret, i;
+ unsigned int buf_size;
+
+ buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ buf_size = ksize(buf);
+ ret = scnprintf(buf, buf_size,
+ "CPU Tools ID: %d\n"
+ "Check Polling Response: %d\n"
+ "Polling Registered: %d\n"
+ "Uses Device Tree: %d\n"
+ "Apps Supports Separate CMDRSP: %d\n"
+ "Apps Supports HDLC Encoding: %d\n"
+ "Apps Supports Sockets: %d\n"
+ "Logging Mode: %d\n"
+ "RSP Buffer is Busy: %d\n"
+ "HDLC Disabled: %d\n"
+ "Time Sync Enabled: %d\n"
+ "MD session mode: %d\n"
+ "MD session mask: %d\n"
+ "Uses Time API: %d\n",
+ chk_config_get_id(),
+ chk_polling_response(),
+ driver->polling_reg_flag,
+ driver->use_device_tree,
+ driver->supports_separate_cmdrsp,
+ driver->supports_apps_hdlc_encoding,
+ driver->supports_sockets,
+ driver->logging_mode,
+ driver->rsp_buf_busy,
+ driver->hdlc_disabled,
+ driver->time_sync_enabled,
+ driver->md_session_mode,
+ driver->md_session_mask,
+ driver->uses_time_api);
+
+ for (i = 0; i < NUM_PERIPHERALS; i++) {
+ ret += scnprintf(buf+ret, buf_size-ret,
+ "p: %s Feature: %02x %02x |%c%c%c%c%c%c%c%c|\n",
+ PERIPHERAL_STRING(i),
+ driver->feature[i].feature_mask[0],
+ driver->feature[i].feature_mask[1],
+ driver->feature[i].rcvd_feature_mask ? 'F':'f',
+ driver->feature[i].separate_cmd_rsp ? 'C':'c',
+ driver->feature[i].encode_hdlc ? 'H':'h',
+ driver->feature[i].peripheral_buffering ? 'B':'b',
+ driver->feature[i].mask_centralization ? 'M':'m',
+ driver->feature[i].stm_support ? 'Q':'q',
+ driver->feature[i].sockets_enabled ? 'S':'s',
+ driver->feature[i].sent_feature_mask ? 'T':'t');
+ }
+
+#ifdef CONFIG_DIAG_OVER_USB
+ ret += scnprintf(buf+ret, buf_size-ret,
+ "USB Connected: %d\n",
+ driver->usb_connected);
+#endif
+
+ for (i = 0; i < DIAG_NUM_PROC; i++) {
+ ret += scnprintf(buf+ret, buf_size-ret,
+ "Real Time Mode: %d: %d\n", i,
+ driver->real_time_mode[i]);
+ }
+
+ ret = simple_read_from_buffer(ubuf, count, ppos, buf, ret);
+
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t diag_dbgfs_read_dcistats(struct file *file,
+ char __user *ubuf, size_t count, loff_t *ppos)
+{
+ char *buf = NULL;
+ unsigned int bytes_remaining, bytes_written = 0;
+ unsigned int bytes_in_buf = 0, i = 0;
+ struct diag_dci_data_info *temp_data = dci_traffic;
+ unsigned int buf_size;
+
+ buf_size = (count > DEBUG_BUF_SIZE) ? DEBUG_BUF_SIZE : count;
+
+ if (diag_dbgfs_dci_finished) {
+ diag_dbgfs_dci_finished = 0;
+ return 0;
+ }
+
+ buf = kcalloc(buf_size, sizeof(char), GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR(buf))
+ return -ENOMEM;
+
+ buf_size = ksize(buf);
+ bytes_remaining = buf_size;
+
+ if (diag_dbgfs_dci_data_index == 0) {
+ bytes_written =
+ scnprintf(buf, buf_size,
+ "number of clients: %d\n"
+ "dci proc active: %d\n"
+ "dci real time vote: %d\n",
+ driver->num_dci_client,
+ (driver->proc_active_mask & DIAG_PROC_DCI) ? 1 : 0,
+ (driver->proc_rt_vote_mask[DIAG_LOCAL_PROC] &
+ DIAG_PROC_DCI) ? 1 : 0);
+ bytes_in_buf += bytes_written;
+ bytes_remaining -= bytes_written;
+#ifdef CONFIG_DIAG_OVER_USB
+ bytes_written = scnprintf(buf+bytes_in_buf, bytes_remaining,
+ "usb_connected: %d\n",
+ driver->usb_connected);
+ bytes_in_buf += bytes_written;
+ bytes_remaining -= bytes_written;
+#endif
+ bytes_written = scnprintf(buf+bytes_in_buf,
+ bytes_remaining,
+ "dci power: active, relax: %lu, %lu\n",
+ driver->diag_dev->power.wakeup->
+ active_count,
+ driver->diag_dev->
+ power.wakeup->relax_count);
+ bytes_in_buf += bytes_written;
+ bytes_remaining -= bytes_written;
+
+ }
+ temp_data += diag_dbgfs_dci_data_index;
+ for (i = diag_dbgfs_dci_data_index; i < DIAG_DCI_DEBUG_CNT; i++) {
+ if (temp_data->iteration != 0) {
+ bytes_written = scnprintf(
+ buf + bytes_in_buf, bytes_remaining,
+ "i %-5ld\t"
+ "s %-5d\t"
+ "p %-5d\t"
+ "r %-5d\t"
+ "c %-5d\t"
+ "t %-15s\n",
+ temp_data->iteration,
+ temp_data->data_size,
+ temp_data->peripheral,
+ temp_data->proc,
+ temp_data->ch_type,
+ temp_data->time_stamp);
+ bytes_in_buf += bytes_written;
+ bytes_remaining -= bytes_written;
+ /* Check if there is room for another entry */
+ if (bytes_remaining < bytes_written)
+ break;
+ }
+ temp_data++;
+ }
+
+ diag_dbgfs_dci_data_index = (i >= DIAG_DCI_DEBUG_CNT) ? 0 : i + 1;
+ bytes_written = simple_read_from_buffer(ubuf, count, ppos, buf,
+ bytes_in_buf);
+ kfree(buf);
+ diag_dbgfs_dci_finished = 1;
+ return bytes_written;
+}
+
+static ssize_t diag_dbgfs_read_power(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ char *buf;
+ int ret;
+ unsigned int buf_size;
+
+ buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ buf_size = ksize(buf);
+ ret = scnprintf(buf, buf_size,
+ "DCI reference count: %d\n"
+ "DCI copy count: %d\n"
+ "DCI Client Count: %d\n\n"
+ "Memory Device reference count: %d\n"
+ "Memory Device copy count: %d\n"
+ "Logging mode: %d\n\n"
+ "Wakeup source active count: %lu\n"
+ "Wakeup source relax count: %lu\n\n",
+ driver->dci_ws.ref_count,
+ driver->dci_ws.copy_count,
+ driver->num_dci_client,
+ driver->md_ws.ref_count,
+ driver->md_ws.copy_count,
+ driver->logging_mode,
+ driver->diag_dev->power.wakeup->active_count,
+ driver->diag_dev->power.wakeup->relax_count);
+
+ ret = simple_read_from_buffer(ubuf, count, ppos, buf, ret);
+
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t diag_dbgfs_read_table(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ char *buf;
+ int ret = 0;
+ int i = 0;
+ int is_polling = 0;
+ unsigned int bytes_remaining;
+ unsigned int bytes_in_buffer = 0;
+ unsigned int bytes_written;
+ unsigned int buf_size;
+ struct list_head *start;
+ struct list_head *temp;
+ struct diag_cmd_reg_t *item = NULL;
+
+ if (diag_dbgfs_table_index == driver->cmd_reg_count) {
+ diag_dbgfs_table_index = 0;
+ return 0;
+ }
+
+ buf_size = (count > DEBUG_BUF_SIZE) ? DEBUG_BUF_SIZE : count;
+
+ buf = kcalloc(buf_size, sizeof(char), GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR(buf)) {
+ pr_err("diag: %s, Error allocating memory\n", __func__);
+ return -ENOMEM;
+ }
+ buf_size = ksize(buf);
+ bytes_remaining = buf_size;
+
+ if (diag_dbgfs_table_index == 0) {
+ bytes_written = scnprintf(buf+bytes_in_buffer, bytes_remaining,
+ "Client ids: Modem: %d, LPASS: %d, WCNSS: %d, SLPI: %d, APPS: %d\n",
+ PERIPHERAL_MODEM, PERIPHERAL_LPASS,
+ PERIPHERAL_WCNSS, PERIPHERAL_SENSORS,
+ APPS_DATA);
+ bytes_in_buffer += bytes_written;
+ bytes_remaining -= bytes_written;
+ }
+
+ list_for_each_safe(start, temp, &driver->cmd_reg_list) {
+ item = list_entry(start, struct diag_cmd_reg_t, link);
+ if (i < diag_dbgfs_table_index) {
+ i++;
+ continue;
+ }
+
+ is_polling = diag_cmd_chk_polling(&item->entry);
+ bytes_written = scnprintf(buf+bytes_in_buffer, bytes_remaining,
+ "i: %3d, cmd_code: %4x, subsys_id: %4x, cmd_code_lo: %4x, cmd_code_hi: %4x, proc: %d, process_id: %5d %s\n",
+ i++,
+ item->entry.cmd_code,
+ item->entry.subsys_id,
+ item->entry.cmd_code_lo,
+ item->entry.cmd_code_hi,
+ item->proc,
+ item->pid,
+ (is_polling == DIAG_CMD_POLLING) ?
+ "<-- Polling Cmd" : "");
+
+ bytes_in_buffer += bytes_written;
+
+ /* Check if there is room to add another table entry */
+ bytes_remaining = buf_size - bytes_in_buffer;
+
+ if (bytes_remaining < bytes_written)
+ break;
+ }
+ diag_dbgfs_table_index = i;
+
+ *ppos = 0;
+ ret = simple_read_from_buffer(ubuf, count, ppos, buf, bytes_in_buffer);
+
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t diag_dbgfs_read_mempool(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ char *buf = NULL;
+ int ret = 0;
+ int i = 0;
+ unsigned int buf_size;
+ unsigned int bytes_remaining = 0;
+ unsigned int bytes_written = 0;
+ unsigned int bytes_in_buffer = 0;
+ struct diag_mempool_t *mempool = NULL;
+
+ if (diag_dbgfs_mempool_index >= NUM_MEMORY_POOLS) {
+ /* Done. Reset to prepare for future requests */
+ diag_dbgfs_mempool_index = 0;
+ return 0;
+ }
+
+ buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR(buf)) {
+ pr_err("diag: %s, Error allocating memory\n", __func__);
+ return -ENOMEM;
+ }
+
+ buf_size = ksize(buf);
+ bytes_remaining = buf_size;
+ bytes_written = scnprintf(buf+bytes_in_buffer, bytes_remaining,
+ "%-24s\t"
+ "%-10s\t"
+ "%-5s\t"
+ "%-5s\t"
+ "%-5s\n",
+ "POOL", "HANDLE", "COUNT", "SIZE", "ITEMSIZE");
+ bytes_in_buffer += bytes_written;
+ bytes_remaining = buf_size - bytes_in_buffer;
+
+ for (i = diag_dbgfs_mempool_index; i < NUM_MEMORY_POOLS; i++) {
+ mempool = &diag_mempools[i];
+ bytes_written = scnprintf(buf+bytes_in_buffer, bytes_remaining,
+ "%-24s\t"
+ "%-10p\t"
+ "%-5d\t"
+ "%-5d\t"
+ "%-5d\n",
+ mempool->name,
+ mempool->pool,
+ mempool->count,
+ mempool->poolsize,
+ mempool->itemsize);
+ bytes_in_buffer += bytes_written;
+
+ /* Check if there is room to add another table entry */
+ bytes_remaining = buf_size - bytes_in_buffer;
+
+ if (bytes_remaining < bytes_written)
+ break;
+ }
+ diag_dbgfs_mempool_index = i+1;
+ *ppos = 0;
+ ret = simple_read_from_buffer(ubuf, count, ppos, buf, bytes_in_buffer);
+
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t diag_dbgfs_read_usbinfo(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ char *buf = NULL;
+ int ret = 0;
+ int i = 0;
+ unsigned int buf_size;
+ unsigned int bytes_remaining = 0;
+ unsigned int bytes_written = 0;
+ unsigned int bytes_in_buffer = 0;
+ struct diag_usb_info *usb_info = NULL;
+
+ if (diag_dbgfs_usbinfo_index >= NUM_DIAG_USB_DEV) {
+ /* Done. Reset to prepare for future requests */
+ diag_dbgfs_usbinfo_index = 0;
+ return 0;
+ }
+
+ buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR(buf)) {
+ pr_err("diag: %s, Error allocating memory\n", __func__);
+ return -ENOMEM;
+ }
+
+ buf_size = ksize(buf);
+ bytes_remaining = buf_size;
+ for (i = diag_dbgfs_usbinfo_index; i < NUM_DIAG_USB_DEV; i++) {
+ usb_info = &diag_usb[i];
+ if (!usb_info->enabled)
+ continue;
+ bytes_written = scnprintf(buf+bytes_in_buffer, bytes_remaining,
+ "id: %d\n"
+ "name: %s\n"
+ "hdl: %pK\n"
+ "connected: %d\n"
+ "diag state: %d\n"
+ "enabled: %d\n"
+ "mempool: %s\n"
+ "read pending: %d\n"
+ "read count: %lu\n"
+ "write count: %lu\n"
+ "read work pending: %d\n"
+ "read done work pending: %d\n"
+ "connect work pending: %d\n"
+ "disconnect work pending: %d\n"
+ "max size supported: %d\n\n",
+ usb_info->id,
+ usb_info->name,
+ usb_info->hdl,
+ atomic_read(&usb_info->connected),
+ atomic_read(&usb_info->diag_state),
+ usb_info->enabled,
+ DIAG_MEMPOOL_GET_NAME(usb_info->mempool),
+ atomic_read(&usb_info->read_pending),
+ usb_info->read_cnt,
+ usb_info->write_cnt,
+ work_pending(&usb_info->read_work),
+ work_pending(&usb_info->read_done_work),
+ work_pending(&usb_info->connect_work),
+ work_pending(&usb_info->disconnect_work),
+ usb_info->max_size);
+ bytes_in_buffer += bytes_written;
+
+ /* Check if there is room to add another table entry */
+ bytes_remaining = buf_size - bytes_in_buffer;
+
+ if (bytes_remaining < bytes_written)
+ break;
+ }
+ diag_dbgfs_usbinfo_index = i+1;
+ *ppos = 0;
+ ret = simple_read_from_buffer(ubuf, count, ppos, buf, bytes_in_buffer);
+
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t diag_dbgfs_read_socketinfo(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ char *buf = NULL;
+ int ret = 0;
+ int i = 0;
+ int j = 0;
+ unsigned int buf_size;
+ unsigned int bytes_remaining = 0;
+ unsigned int bytes_written = 0;
+ unsigned int bytes_in_buffer = 0;
+ struct diag_socket_info *info = NULL;
+ struct diagfwd_info *fwd_ctxt = NULL;
+
+ if (diag_dbgfs_socketinfo_index >= NUM_PERIPHERALS) {
+ /* Done. Reset to prepare for future requests */
+ diag_dbgfs_socketinfo_index = 0;
+ return 0;
+ }
+
+ buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR(buf)) {
+ pr_err("diag: %s, Error allocating memory\n", __func__);
+ return -ENOMEM;
+ }
+
+ buf_size = ksize(buf);
+ bytes_remaining = buf_size;
+ for (i = 0; i < NUM_TYPES; i++) {
+ for (j = 0; j < NUM_PERIPHERALS; j++) {
+ switch (i) {
+ case TYPE_DATA:
+ info = &socket_data[j];
+ break;
+ case TYPE_CNTL:
+ info = &socket_cntl[j];
+ break;
+ case TYPE_DCI:
+ info = &socket_dci[j];
+ break;
+ case TYPE_CMD:
+ info = &socket_cmd[j];
+ break;
+ case TYPE_DCI_CMD:
+ info = &socket_dci_cmd[j];
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ fwd_ctxt = (struct diagfwd_info *)(info->fwd_ctxt);
+
+ bytes_written = scnprintf(buf+bytes_in_buffer,
+ bytes_remaining,
+ "name\t\t:\t%s\n"
+ "hdl\t\t:\t%pK\n"
+ "inited\t\t:\t%d\n"
+ "opened\t\t:\t%d\n"
+ "diag_state\t:\t%d\n"
+ "buf_1 busy\t:\t%d\n"
+ "buf_2 busy\t:\t%d\n"
+ "flow ctrl count\t:\t%d\n"
+ "data_ready\t:\t%d\n"
+ "init pending\t:\t%d\n"
+ "read pending\t:\t%d\n"
+ "bytes read\t:\t%lu\n"
+ "bytes written\t:\t%lu\n"
+ "fwd inited\t:\t%d\n"
+ "fwd opened\t:\t%d\n"
+ "fwd ch_open\t:\t%d\n\n",
+ info->name,
+ info->hdl,
+ info->inited,
+ atomic_read(&info->opened),
+ atomic_read(&info->diag_state),
+ (fwd_ctxt && fwd_ctxt->buf_1) ?
+ atomic_read(&fwd_ctxt->buf_1->in_busy) : -1,
+ (fwd_ctxt && fwd_ctxt->buf_2) ?
+ atomic_read(&fwd_ctxt->buf_2->in_busy) : -1,
+ atomic_read(&info->flow_cnt),
+ info->data_ready,
+ work_pending(&info->init_work),
+ work_pending(&info->read_work),
+ (fwd_ctxt) ? fwd_ctxt->read_bytes : 0,
+ (fwd_ctxt) ? fwd_ctxt->write_bytes : 0,
+ (fwd_ctxt) ? fwd_ctxt->inited : -1,
+ (fwd_ctxt) ?
+ atomic_read(&fwd_ctxt->opened) : -1,
+ (fwd_ctxt) ? fwd_ctxt->ch_open : -1);
+ bytes_in_buffer += bytes_written;
+
+ /* Check if there is room to add another table entry */
+ bytes_remaining = buf_size - bytes_in_buffer;
+
+ if (bytes_remaining < bytes_written)
+ break;
+ }
+ }
+ diag_dbgfs_socketinfo_index = i+1;
+ *ppos = 0;
+ ret = simple_read_from_buffer(ubuf, count, ppos, buf, bytes_in_buffer);
+
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t diag_dbgfs_read_glinkinfo(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ char *buf = NULL;
+ int ret = 0;
+ int i = 0;
+ int j = 0;
+ unsigned int buf_size;
+ unsigned int bytes_remaining = 0;
+ unsigned int bytes_written = 0;
+ unsigned int bytes_in_buffer = 0;
+ struct diag_glink_info *info = NULL;
+ struct diagfwd_info *fwd_ctxt = NULL;
+
+ if (diag_dbgfs_glinkinfo_index >= NUM_PERIPHERALS) {
+ /* Done. Reset to prepare for future requests */
+ diag_dbgfs_socketinfo_index = 0;
+ return 0;
+ }
+
+ buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ buf_size = ksize(buf);
+ bytes_remaining = buf_size;
+ for (i = 0; i < NUM_TYPES; i++) {
+ for (j = 0; j < NUM_PERIPHERALS; j++) {
+ switch (i) {
+ case TYPE_DATA:
+ info = &glink_data[j];
+ break;
+ case TYPE_CNTL:
+ info = &glink_cntl[j];
+ break;
+ case TYPE_DCI:
+ info = &glink_dci[j];
+ break;
+ case TYPE_CMD:
+ info = &glink_cmd[j];
+ break;
+ case TYPE_DCI_CMD:
+ info = &glink_dci_cmd[j];
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ fwd_ctxt = (struct diagfwd_info *)(info->fwd_ctxt);
+
+ bytes_written = scnprintf(buf+bytes_in_buffer,
+ bytes_remaining,
+ "name\t\t:\t%s\n"
+ "hdl\t\t:\t%pK\n"
+ "inited\t\t:\t%d\n"
+ "opened\t\t:\t%d\n"
+ "diag_state\t:\t%d\n"
+ "buf_1 busy\t:\t%d\n"
+ "buf_2 busy\t:\t%d\n"
+ "tx_intent_ready\t:\t%d\n"
+ "open pending\t:\t%d\n"
+ "close pending\t:\t%d\n"
+ "read pending\t:\t%d\n"
+ "bytes read\t:\t%lu\n"
+ "bytes written\t:\t%lu\n"
+ "fwd inited\t:\t%d\n"
+ "fwd opened\t:\t%d\n"
+ "fwd ch_open\t:\t%d\n\n",
+ info->name,
+ info->hdl,
+ info->inited,
+ atomic_read(&info->opened),
+ atomic_read(&info->diag_state),
+ (fwd_ctxt && fwd_ctxt->buf_1) ?
+ atomic_read(&fwd_ctxt->buf_1->in_busy) : -1,
+ (fwd_ctxt && fwd_ctxt->buf_2) ?
+ atomic_read(&fwd_ctxt->buf_2->in_busy) : -1,
+ atomic_read(&info->tx_intent_ready),
+ work_pending(&info->open_work),
+ work_pending(&info->close_work),
+ work_pending(&info->read_work),
+ (fwd_ctxt) ? fwd_ctxt->read_bytes : 0,
+ (fwd_ctxt) ? fwd_ctxt->write_bytes : 0,
+ (fwd_ctxt) ? fwd_ctxt->inited : -1,
+ (fwd_ctxt) ?
+ atomic_read(&fwd_ctxt->opened) : -1,
+ (fwd_ctxt) ? fwd_ctxt->ch_open : -1);
+ bytes_in_buffer += bytes_written;
+
+ /* Check if there is room to add another table entry */
+ bytes_remaining = buf_size - bytes_in_buffer;
+
+ if (bytes_remaining < bytes_written)
+ break;
+ }
+ }
+ diag_dbgfs_glinkinfo_index = i+1;
+ *ppos = 0;
+ ret = simple_read_from_buffer(ubuf, count, ppos, buf, bytes_in_buffer);
+
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t diag_dbgfs_write_debug(struct file *fp, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ const int size = 10;
+ unsigned char cmd[size];
+ long value = 0;
+ int len = 0;
+
+ if (count < 1)
+ return -EINVAL;
+
+ len = (count < (size - 1)) ? count : size - 1;
+ if (copy_from_user(cmd, buf, len))
+ return -EFAULT;
+
+ cmd[len] = 0;
+ if (cmd[len-1] == '\n') {
+ cmd[len-1] = 0;
+ len--;
+ }
+
+ if (kstrtol(cmd, 10, &value))
+ return -EINVAL;
+
+ if (value < 0)
+ return -EINVAL;
+
+ diag_debug_mask = (uint16_t)value;
+ return count;
+}
+
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+#ifdef CONFIG_USB_QCOM_DIAG_BRIDGE
+static ssize_t diag_dbgfs_read_hsicinfo(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ char *buf = NULL;
+ int ret = 0;
+ int i = 0;
+ unsigned int buf_size;
+ unsigned int bytes_remaining = 0;
+ unsigned int bytes_written = 0;
+ unsigned int bytes_in_buffer = 0;
+ struct diag_hsic_info *hsic_info = NULL;
+
+ if (diag_dbgfs_hsicinfo_index >= NUM_DIAG_USB_DEV) {
+ /* Done. Reset to prepare for future requests */
+ diag_dbgfs_hsicinfo_index = 0;
+ return 0;
+ }
+
+ buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR(buf)) {
+ pr_err("diag: %s, Error allocating memory\n", __func__);
+ return -ENOMEM;
+ }
+
+ buf_size = ksize(buf);
+ bytes_remaining = buf_size;
+ for (i = diag_dbgfs_hsicinfo_index; i < NUM_HSIC_DEV; i++) {
+ hsic_info = &diag_hsic[i];
+ if (!hsic_info->enabled)
+ continue;
+ bytes_written = scnprintf(buf+bytes_in_buffer, bytes_remaining,
+ "id: %d\n"
+ "name: %s\n"
+ "bridge index: %s\n"
+ "opened: %d\n"
+ "enabled: %d\n"
+ "suspended: %d\n"
+ "mempool: %s\n"
+ "read work pending: %d\n"
+ "open work pending: %d\n"
+ "close work pending: %d\n\n",
+ hsic_info->id,
+ hsic_info->name,
+ DIAG_BRIDGE_GET_NAME(hsic_info->dev_id),
+ hsic_info->opened,
+ hsic_info->enabled,
+ hsic_info->suspended,
+ DIAG_MEMPOOL_GET_NAME(hsic_info->mempool),
+ work_pending(&hsic_info->read_work),
+ work_pending(&hsic_info->open_work),
+ work_pending(&hsic_info->close_work));
+ bytes_in_buffer += bytes_written;
+
+ /* Check if there is room to add another table entry */
+ bytes_remaining = buf_size - bytes_in_buffer;
+
+ if (bytes_remaining < bytes_written)
+ break;
+ }
+ diag_dbgfs_hsicinfo_index = i+1;
+ *ppos = 0;
+ ret = simple_read_from_buffer(ubuf, count, ppos, buf, bytes_in_buffer);
+
+ kfree(buf);
+ return ret;
+}
+
+const struct file_operations diag_dbgfs_hsicinfo_ops = {
+ .read = diag_dbgfs_read_hsicinfo,
+};
+#endif
+#ifdef CONFIG_MSM_MHI
+static ssize_t diag_dbgfs_read_mhiinfo(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ char *buf = NULL;
+ int ret = 0;
+ int i = 0;
+ unsigned int buf_size;
+ unsigned int bytes_remaining = 0;
+ unsigned int bytes_written = 0;
+ unsigned int bytes_in_buffer = 0;
+ struct diag_mhi_info *mhi_info = NULL;
+
+ if (diag_dbgfs_mhiinfo_index >= NUM_MHI_DEV) {
+ /* Done. Reset to prepare for future requests */
+ diag_dbgfs_mhiinfo_index = 0;
+ return 0;
+ }
+
+ buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR(buf)) {
+ pr_err("diag: %s, Error allocating memory\n", __func__);
+ return -ENOMEM;
+ }
+
+ buf_size = ksize(buf);
+ bytes_remaining = buf_size;
+ for (i = diag_dbgfs_mhiinfo_index; i < NUM_MHI_DEV; i++) {
+ mhi_info = &diag_mhi[i];
+ bytes_written = scnprintf(buf+bytes_in_buffer, bytes_remaining,
+ "id: %d\n"
+ "name: %s\n"
+ "bridge index: %s\n"
+ "mempool: %s\n"
+ "read ch opened: %d\n"
+ "read ch hdl: %pK\n"
+ "write ch opened: %d\n"
+ "write ch hdl: %pK\n"
+ "read work pending: %d\n"
+ "read done work pending: %d\n"
+ "open work pending: %d\n"
+ "close work pending: %d\n\n",
+ mhi_info->id,
+ mhi_info->name,
+ DIAG_BRIDGE_GET_NAME(mhi_info->dev_id),
+ DIAG_MEMPOOL_GET_NAME(mhi_info->mempool),
+ atomic_read(&mhi_info->read_ch.opened),
+ mhi_info->read_ch.hdl,
+ atomic_read(&mhi_info->write_ch.opened),
+ mhi_info->write_ch.hdl,
+ work_pending(&mhi_info->read_work),
+ work_pending(&mhi_info->read_done_work),
+ work_pending(&mhi_info->open_work),
+ work_pending(&mhi_info->close_work));
+ bytes_in_buffer += bytes_written;
+
+ /* Check if there is room to add another table entry */
+ bytes_remaining = buf_size - bytes_in_buffer;
+
+ if (bytes_remaining < bytes_written)
+ break;
+ }
+ diag_dbgfs_mhiinfo_index = i+1;
+ *ppos = 0;
+ ret = simple_read_from_buffer(ubuf, count, ppos, buf, bytes_in_buffer);
+
+ kfree(buf);
+ return ret;
+}
+
+
+const struct file_operations diag_dbgfs_mhiinfo_ops = {
+ .read = diag_dbgfs_read_mhiinfo,
+};
+
+#endif
+static ssize_t diag_dbgfs_read_bridge(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ char *buf = NULL;
+ int ret = 0;
+ int i = 0;
+ unsigned int buf_size;
+ unsigned int bytes_remaining = 0;
+ unsigned int bytes_written = 0;
+ unsigned int bytes_in_buffer = 0;
+ struct diagfwd_bridge_info *info = NULL;
+
+ if (diag_dbgfs_bridgeinfo_index >= NUM_DIAG_USB_DEV) {
+ /* Done. Reset to prepare for future requests */
+ diag_dbgfs_bridgeinfo_index = 0;
+ return 0;
+ }
+
+ buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR(buf)) {
+ pr_err("diag: %s, Error allocating memory\n", __func__);
+ return -ENOMEM;
+ }
+
+ buf_size = ksize(buf);
+ bytes_remaining = buf_size;
+ for (i = diag_dbgfs_bridgeinfo_index; i < NUM_REMOTE_DEV; i++) {
+ info = &bridge_info[i];
+ if (!info->inited)
+ continue;
+ bytes_written = scnprintf(buf+bytes_in_buffer, bytes_remaining,
+ "id: %d\n"
+ "name: %s\n"
+ "type: %d\n"
+ "inited: %d\n"
+ "ctxt: %d\n"
+ "dev_ops: %pK\n"
+ "dci_read_buf: %pK\n"
+ "dci_read_ptr: %pK\n"
+ "dci_read_len: %d\n\n",
+ info->id,
+ info->name,
+ info->type,
+ info->inited,
+ info->ctxt,
+ info->dev_ops,
+ info->dci_read_buf,
+ info->dci_read_ptr,
+ info->dci_read_len);
+ bytes_in_buffer += bytes_written;
+
+ /* Check if there is room to add another table entry */
+ bytes_remaining = buf_size - bytes_in_buffer;
+
+ if (bytes_remaining < bytes_written)
+ break;
+ }
+ diag_dbgfs_bridgeinfo_index = i+1;
+ *ppos = 0;
+ ret = simple_read_from_buffer(ubuf, count, ppos, buf, bytes_in_buffer);
+
+ kfree(buf);
+ return ret;
+}
+
+const struct file_operations diag_dbgfs_bridge_ops = {
+ .read = diag_dbgfs_read_bridge,
+};
+
+#endif
+
+const struct file_operations diag_dbgfs_status_ops = {
+ .read = diag_dbgfs_read_status,
+};
+
+const struct file_operations diag_dbgfs_socketinfo_ops = {
+ .read = diag_dbgfs_read_socketinfo,
+};
+
+const struct file_operations diag_dbgfs_glinkinfo_ops = {
+ .read = diag_dbgfs_read_glinkinfo,
+};
+
+const struct file_operations diag_dbgfs_table_ops = {
+ .read = diag_dbgfs_read_table,
+};
+
+const struct file_operations diag_dbgfs_mempool_ops = {
+ .read = diag_dbgfs_read_mempool,
+};
+
+const struct file_operations diag_dbgfs_usbinfo_ops = {
+ .read = diag_dbgfs_read_usbinfo,
+};
+
+const struct file_operations diag_dbgfs_dcistats_ops = {
+ .read = diag_dbgfs_read_dcistats,
+};
+
+const struct file_operations diag_dbgfs_power_ops = {
+ .read = diag_dbgfs_read_power,
+};
+
+const struct file_operations diag_dbgfs_debug_ops = {
+ .write = diag_dbgfs_write_debug
+};
+
+int diag_debugfs_init(void)
+{
+ struct dentry *entry = NULL;
+
+ diag_dbgfs_dent = debugfs_create_dir("diag", 0);
+ if (IS_ERR(diag_dbgfs_dent))
+ return -ENOMEM;
+
+ entry = debugfs_create_file("status", 0444, diag_dbgfs_dent, 0,
+ &diag_dbgfs_status_ops);
+ if (!entry)
+ goto err;
+
+ entry = debugfs_create_file("socketinfo", 0444, diag_dbgfs_dent, 0,
+ &diag_dbgfs_socketinfo_ops);
+ if (!entry)
+ goto err;
+
+ entry = debugfs_create_file("glinkinfo", 0444, diag_dbgfs_dent, 0,
+ &diag_dbgfs_glinkinfo_ops);
+ if (!entry)
+ goto err;
+
+ entry = debugfs_create_file("table", 0444, diag_dbgfs_dent, 0,
+ &diag_dbgfs_table_ops);
+ if (!entry)
+ goto err;
+
+ entry = debugfs_create_file("mempool", 0444, diag_dbgfs_dent, 0,
+ &diag_dbgfs_mempool_ops);
+ if (!entry)
+ goto err;
+
+ entry = debugfs_create_file("usbinfo", 0444, diag_dbgfs_dent, 0,
+ &diag_dbgfs_usbinfo_ops);
+ if (!entry)
+ goto err;
+
+ entry = debugfs_create_file("dci_stats", 0444, diag_dbgfs_dent, 0,
+ &diag_dbgfs_dcistats_ops);
+ if (!entry)
+ goto err;
+
+ entry = debugfs_create_file("power", 0444, diag_dbgfs_dent, 0,
+ &diag_dbgfs_power_ops);
+ if (!entry)
+ goto err;
+
+ entry = debugfs_create_file("debug", 0444, diag_dbgfs_dent, 0,
+ &diag_dbgfs_debug_ops);
+ if (!entry)
+ goto err;
+
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+ entry = debugfs_create_file("bridge", 0444, diag_dbgfs_dent, 0,
+ &diag_dbgfs_bridge_ops);
+ if (!entry)
+ goto err;
+#ifdef CONFIG_USB_QCOM_DIAG_BRIDGE
+ entry = debugfs_create_file("hsicinfo", 0444, diag_dbgfs_dent, 0,
+ &diag_dbgfs_hsicinfo_ops);
+ if (!entry)
+ goto err;
+#endif
+#ifdef CONFIG_MSM_MHI
+ entry = debugfs_create_file("mhiinfo", 0444, diag_dbgfs_dent, 0,
+ &diag_dbgfs_mhiinfo_ops);
+ if (!entry)
+ goto err;
+#endif
+#endif
+ diag_dbgfs_table_index = 0;
+ diag_dbgfs_mempool_index = 0;
+ diag_dbgfs_usbinfo_index = 0;
+ diag_dbgfs_socketinfo_index = 0;
+ diag_dbgfs_hsicinfo_index = 0;
+ diag_dbgfs_bridgeinfo_index = 0;
+ diag_dbgfs_mhiinfo_index = 0;
+ diag_dbgfs_finished = 0;
+ diag_dbgfs_dci_data_index = 0;
+ diag_dbgfs_dci_finished = 0;
+
+ /* DCI related structures */
+ dci_traffic = kzalloc(sizeof(struct diag_dci_data_info) *
+ DIAG_DCI_DEBUG_CNT, GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR(dci_traffic))
+ pr_warn("diag: could not allocate memory for dci debug info\n");
+
+ mutex_init(&dci_stat_mutex);
+ return 0;
+err:
+ kfree(dci_traffic);
+ debugfs_remove_recursive(diag_dbgfs_dent);
+ return -ENOMEM;
+}
+
+void diag_debugfs_cleanup(void)
+{
+ debugfs_remove_recursive(diag_dbgfs_dent);
+ diag_dbgfs_dent = NULL;
+ kfree(dci_traffic);
+ mutex_destroy(&dci_stat_mutex);
+}
+#else
+int diag_debugfs_init(void) { return 0; }
+void diag_debugfs_cleanup(void) { }
+#endif
diff --git a/drivers/char/diag/diag_debugfs.h b/drivers/char/diag/diag_debugfs.h
new file mode 100644
index 0000000..e8db56e
--- /dev/null
+++ b/drivers/char/diag/diag_debugfs.h
@@ -0,0 +1,19 @@
+/* Copyright (c) 2012, 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAG_DEBUGFS_H
+#define DIAG_DEBUGFS_H
+
+int diag_debugfs_init(void);
+void diag_debugfs_cleanup(void);
+
+#endif
diff --git a/drivers/char/diag/diag_ipc_logging.h b/drivers/char/diag/diag_ipc_logging.h
new file mode 100644
index 0000000..b9958a4
--- /dev/null
+++ b/drivers/char/diag/diag_ipc_logging.h
@@ -0,0 +1,45 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAGIPCLOG_H
+#define DIAGIPCLOG_H
+
+#include <linux/ipc_logging.h>
+
+#define DIAG_IPC_LOG_PAGES 50
+
+#define DIAG_DEBUG_USERSPACE 0x0001
+#define DIAG_DEBUG_MUX 0x0002
+#define DIAG_DEBUG_DCI 0x0004
+#define DIAG_DEBUG_PERIPHERALS 0x0008
+#define DIAG_DEBUG_MASKS 0x0010
+#define DIAG_DEBUG_POWER 0x0020
+#define DIAG_DEBUG_BRIDGE 0x0040
+
+#define DIAG_DEBUG
+
+#ifdef DIAG_DEBUG
+extern uint16_t diag_debug_mask;
+extern void *diag_ipc_log;
+
+#define DIAG_LOG(log_lvl, msg, ...) \
+ do { \
+ if (diag_ipc_log && (log_lvl & diag_debug_mask)) { \
+ ipc_log_string(diag_ipc_log, \
+ "[%s] " msg, __func__, ##__VA_ARGS__); \
+ } \
+ } while (0)
+#else
+#define DIAG_LOG(log_lvl, msg, ...)
+#endif
+
+#endif
diff --git a/drivers/char/diag/diag_masks.c b/drivers/char/diag/diag_masks.c
new file mode 100644
index 0000000..b831d9e
--- /dev/null
+++ b/drivers/char/diag/diag_masks.c
@@ -0,0 +1,2013 @@
+/* Copyright (c) 2008-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/diagchar.h>
+#include <linux/kmemleak.h>
+#include <linux/workqueue.h>
+#include <linux/uaccess.h>
+#include "diagchar.h"
+#include "diagfwd_cntl.h"
+#include "diag_masks.h"
+#include "diagfwd_peripheral.h"
+#include "diag_ipc_logging.h"
+
+#define ALL_EQUIP_ID 100
+#define ALL_SSID -1
+
+#define DIAG_SET_FEATURE_MASK(x) (feature_bytes[(x)/8] |= (1 << (x & 0x7)))
+
+#define diag_check_update(x) \
+ (!info || (info && (info->peripheral_mask & MD_PERIPHERAL_MASK(x)))) \
+
+struct diag_mask_info msg_mask;
+struct diag_mask_info msg_bt_mask;
+struct diag_mask_info log_mask;
+struct diag_mask_info event_mask;
+
+static const struct diag_ssid_range_t msg_mask_tbl[] = {
+ { .ssid_first = MSG_SSID_0, .ssid_last = MSG_SSID_0_LAST },
+ { .ssid_first = MSG_SSID_1, .ssid_last = MSG_SSID_1_LAST },
+ { .ssid_first = MSG_SSID_2, .ssid_last = MSG_SSID_2_LAST },
+ { .ssid_first = MSG_SSID_3, .ssid_last = MSG_SSID_3_LAST },
+ { .ssid_first = MSG_SSID_4, .ssid_last = MSG_SSID_4_LAST },
+ { .ssid_first = MSG_SSID_5, .ssid_last = MSG_SSID_5_LAST },
+ { .ssid_first = MSG_SSID_6, .ssid_last = MSG_SSID_6_LAST },
+ { .ssid_first = MSG_SSID_7, .ssid_last = MSG_SSID_7_LAST },
+ { .ssid_first = MSG_SSID_8, .ssid_last = MSG_SSID_8_LAST },
+ { .ssid_first = MSG_SSID_9, .ssid_last = MSG_SSID_9_LAST },
+ { .ssid_first = MSG_SSID_10, .ssid_last = MSG_SSID_10_LAST },
+ { .ssid_first = MSG_SSID_11, .ssid_last = MSG_SSID_11_LAST },
+ { .ssid_first = MSG_SSID_12, .ssid_last = MSG_SSID_12_LAST },
+ { .ssid_first = MSG_SSID_13, .ssid_last = MSG_SSID_13_LAST },
+ { .ssid_first = MSG_SSID_14, .ssid_last = MSG_SSID_14_LAST },
+ { .ssid_first = MSG_SSID_15, .ssid_last = MSG_SSID_15_LAST },
+ { .ssid_first = MSG_SSID_16, .ssid_last = MSG_SSID_16_LAST },
+ { .ssid_first = MSG_SSID_17, .ssid_last = MSG_SSID_17_LAST },
+ { .ssid_first = MSG_SSID_18, .ssid_last = MSG_SSID_18_LAST },
+ { .ssid_first = MSG_SSID_19, .ssid_last = MSG_SSID_19_LAST },
+ { .ssid_first = MSG_SSID_20, .ssid_last = MSG_SSID_20_LAST },
+ { .ssid_first = MSG_SSID_21, .ssid_last = MSG_SSID_21_LAST },
+ { .ssid_first = MSG_SSID_22, .ssid_last = MSG_SSID_22_LAST },
+ { .ssid_first = MSG_SSID_23, .ssid_last = MSG_SSID_23_LAST },
+ { .ssid_first = MSG_SSID_24, .ssid_last = MSG_SSID_24_LAST }
+};
+
+static int diag_apps_responds(void)
+{
+ /*
+ * Apps processor should respond to mask commands only if the
+ * Modem channel is up, the feature mask is received from Modem
+ * and if Modem supports Mask Centralization.
+ */
+ if (!chk_apps_only())
+ return 0;
+
+ if (driver->diagfwd_cntl[PERIPHERAL_MODEM] &&
+ driver->diagfwd_cntl[PERIPHERAL_MODEM]->ch_open &&
+ driver->feature[PERIPHERAL_MODEM].rcvd_feature_mask) {
+ if (driver->feature[PERIPHERAL_MODEM].mask_centralization)
+ return 1;
+ return 0;
+ }
+ return 1;
+}
+
+static void diag_send_log_mask_update(uint8_t peripheral, int equip_id)
+{
+ int i;
+ int err = 0;
+ int send_once = 0;
+ int header_len = sizeof(struct diag_ctrl_log_mask);
+ uint8_t *buf = NULL;
+ uint8_t *temp = NULL;
+ uint32_t mask_size = 0;
+ struct diag_ctrl_log_mask ctrl_pkt;
+ struct diag_mask_info *mask_info = NULL;
+ struct diag_log_mask_t *mask = NULL;
+
+ if (peripheral >= NUM_PERIPHERALS)
+ return;
+
+ if (!driver->diagfwd_cntl[peripheral] ||
+ !driver->diagfwd_cntl[peripheral]->ch_open) {
+ pr_debug("diag: In %s, control channel is not open, p: %d\n",
+ __func__, peripheral);
+ return;
+ }
+
+ if (driver->md_session_mask != 0 &&
+ driver->md_session_mask & MD_PERIPHERAL_MASK(peripheral))
+ mask_info = driver->md_session_map[peripheral]->log_mask;
+ else
+ mask_info = &log_mask;
+
+ if (!mask_info)
+ return;
+
+ mask = (struct diag_log_mask_t *)mask_info->ptr;
+ buf = mask_info->update_buf;
+
+ switch (mask_info->status) {
+ case DIAG_CTRL_MASK_ALL_DISABLED:
+ ctrl_pkt.equip_id = 0;
+ ctrl_pkt.num_items = 0;
+ ctrl_pkt.log_mask_size = 0;
+ send_once = 1;
+ break;
+ case DIAG_CTRL_MASK_ALL_ENABLED:
+ ctrl_pkt.equip_id = 0;
+ ctrl_pkt.num_items = 0;
+ ctrl_pkt.log_mask_size = 0;
+ send_once = 1;
+ break;
+ case DIAG_CTRL_MASK_VALID:
+ send_once = 0;
+ break;
+ default:
+ pr_debug("diag: In %s, invalid log_mask status\n", __func__);
+ return;
+ }
+
+ mutex_lock(&mask_info->lock);
+ for (i = 0; i < MAX_EQUIP_ID; i++, mask++) {
+ if (equip_id != i && equip_id != ALL_EQUIP_ID)
+ continue;
+
+ mutex_lock(&mask->lock);
+ ctrl_pkt.cmd_type = DIAG_CTRL_MSG_LOG_MASK;
+ ctrl_pkt.stream_id = 1;
+ ctrl_pkt.status = mask_info->status;
+ if (mask_info->status == DIAG_CTRL_MASK_VALID) {
+ mask_size = LOG_ITEMS_TO_SIZE(mask->num_items_tools);
+ ctrl_pkt.equip_id = i;
+ ctrl_pkt.num_items = mask->num_items_tools;
+ ctrl_pkt.log_mask_size = mask_size;
+ }
+ ctrl_pkt.data_len = LOG_MASK_CTRL_HEADER_LEN + mask_size;
+
+ if (header_len + mask_size > mask_info->update_buf_len) {
+ temp = krealloc(buf, header_len + mask_size,
+ GFP_KERNEL);
+ if (!temp) {
+ pr_err_ratelimited("diag: Unable to realloc log update buffer, new size: %d, equip_id: %d\n",
+ header_len + mask_size, equip_id);
+ mutex_unlock(&mask->lock);
+ break;
+ }
+ mask_info->update_buf = temp;
+ mask_info->update_buf_len = header_len + mask_size;
+ }
+
+ memcpy(buf, &ctrl_pkt, header_len);
+ if (mask_size > 0)
+ memcpy(buf + header_len, mask->ptr, mask_size);
+ mutex_unlock(&mask->lock);
+
+ DIAG_LOG(DIAG_DEBUG_MASKS,
+ "sending ctrl pkt to %d, e %d num_items %d size %d\n",
+ peripheral, i, ctrl_pkt.num_items,
+ ctrl_pkt.log_mask_size);
+
+ err = diagfwd_write(peripheral, TYPE_CNTL,
+ buf, header_len + mask_size);
+ if (err && err != -ENODEV)
+ pr_err_ratelimited("diag: Unable to send log masks to peripheral %d, equip_id: %d, err: %d\n",
+ peripheral, i, err);
+ if (send_once || equip_id != ALL_EQUIP_ID)
+ break;
+
+ }
+ mutex_unlock(&mask_info->lock);
+}
+
+static void diag_send_event_mask_update(uint8_t peripheral)
+{
+ uint8_t *buf = NULL;
+ uint8_t *temp = NULL;
+ struct diag_ctrl_event_mask header;
+ struct diag_mask_info *mask_info = NULL;
+ int num_bytes = EVENT_COUNT_TO_BYTES(driver->last_event_id);
+ int write_len = 0;
+ int err = 0;
+ int temp_len = 0;
+
+ if (num_bytes <= 0 || num_bytes > driver->event_mask_size) {
+ pr_debug("diag: In %s, invalid event mask length %d\n",
+ __func__, num_bytes);
+ return;
+ }
+
+ if (peripheral >= NUM_PERIPHERALS)
+ return;
+
+ if (!driver->diagfwd_cntl[peripheral] ||
+ !driver->diagfwd_cntl[peripheral]->ch_open) {
+ pr_debug("diag: In %s, control channel is not open, p: %d\n",
+ __func__, peripheral);
+ return;
+ }
+
+ if (driver->md_session_mask != 0 &&
+ (driver->md_session_mask & MD_PERIPHERAL_MASK(peripheral)))
+ mask_info = driver->md_session_map[peripheral]->event_mask;
+ else
+ mask_info = &event_mask;
+
+ if (!mask_info)
+ return;
+
+ buf = mask_info->update_buf;
+ mutex_lock(&mask_info->lock);
+ header.cmd_type = DIAG_CTRL_MSG_EVENT_MASK;
+ header.stream_id = 1;
+ header.status = mask_info->status;
+
+ switch (mask_info->status) {
+ case DIAG_CTRL_MASK_ALL_DISABLED:
+ header.event_config = 0;
+ header.event_mask_size = 0;
+ break;
+ case DIAG_CTRL_MASK_ALL_ENABLED:
+ header.event_config = 1;
+ header.event_mask_size = 0;
+ break;
+ case DIAG_CTRL_MASK_VALID:
+ header.event_config = 1;
+ header.event_mask_size = num_bytes;
+ if (num_bytes + sizeof(header) > mask_info->update_buf_len) {
+ temp_len = num_bytes + sizeof(header);
+ temp = krealloc(buf, temp_len, GFP_KERNEL);
+ if (!temp) {
+ pr_err("diag: Unable to realloc event mask update buffer\n");
+ goto err;
+ } else {
+ mask_info->update_buf = temp;
+ mask_info->update_buf_len = temp_len;
+ }
+ }
+ memcpy(buf + sizeof(header), mask_info->ptr, num_bytes);
+ write_len += num_bytes;
+ break;
+ default:
+ pr_debug("diag: In %s, invalid status %d\n", __func__,
+ mask_info->status);
+ goto err;
+ }
+ header.data_len = EVENT_MASK_CTRL_HEADER_LEN + header.event_mask_size;
+ memcpy(buf, &header, sizeof(header));
+ write_len += sizeof(header);
+
+ err = diagfwd_write(peripheral, TYPE_CNTL, buf, write_len);
+ if (err && err != -ENODEV)
+ pr_err_ratelimited("diag: Unable to send event masks to peripheral %d\n",
+ peripheral);
+err:
+ mutex_unlock(&mask_info->lock);
+}
+
+static void diag_send_msg_mask_update(uint8_t peripheral, int first, int last)
+{
+ int i;
+ int err = 0;
+ int header_len = sizeof(struct diag_ctrl_msg_mask);
+ int temp_len = 0;
+ uint8_t *buf = NULL;
+ uint8_t *temp = NULL;
+ uint32_t mask_size = 0;
+ struct diag_mask_info *mask_info = NULL;
+ struct diag_msg_mask_t *mask = NULL;
+ struct diag_ctrl_msg_mask header;
+
+ if (peripheral >= NUM_PERIPHERALS)
+ return;
+
+ if (!driver->diagfwd_cntl[peripheral] ||
+ !driver->diagfwd_cntl[peripheral]->ch_open) {
+ pr_debug("diag: In %s, control channel is not open, p: %d\n",
+ __func__, peripheral);
+ return;
+ }
+
+ if (driver->md_session_mask != 0 &&
+ (driver->md_session_mask & MD_PERIPHERAL_MASK(peripheral)))
+ mask_info = driver->md_session_map[peripheral]->msg_mask;
+ else
+ mask_info = &msg_mask;
+
+ if (!mask_info)
+ return;
+
+ mask = (struct diag_msg_mask_t *)mask_info->ptr;
+ buf = mask_info->update_buf;
+ mutex_lock(&mask_info->lock);
+ switch (mask_info->status) {
+ case DIAG_CTRL_MASK_ALL_DISABLED:
+ mask_size = 0;
+ break;
+ case DIAG_CTRL_MASK_ALL_ENABLED:
+ mask_size = 1;
+ break;
+ case DIAG_CTRL_MASK_VALID:
+ break;
+ default:
+ pr_debug("diag: In %s, invalid status: %d\n", __func__,
+ mask_info->status);
+ goto err;
+ }
+
+ for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
+ if (((first < mask->ssid_first) ||
+ (last > mask->ssid_last_tools)) && first != ALL_SSID) {
+ continue;
+ }
+
+ mutex_lock(&mask->lock);
+ if (mask_info->status == DIAG_CTRL_MASK_VALID) {
+ mask_size =
+ mask->ssid_last_tools - mask->ssid_first + 1;
+ temp_len = mask_size * sizeof(uint32_t);
+ if (temp_len + header_len <= mask_info->update_buf_len)
+ goto proceed;
+ temp = krealloc(mask_info->update_buf, temp_len,
+ GFP_KERNEL);
+ if (!temp) {
+ pr_err("diag: In %s, unable to realloc msg_mask update buffer\n",
+ __func__);
+ mask_size = (mask_info->update_buf_len -
+ header_len) / sizeof(uint32_t);
+ } else {
+ mask_info->update_buf = temp;
+ mask_info->update_buf_len = temp_len;
+ pr_debug("diag: In %s, successfully reallocated msg_mask update buffer to len: %d\n",
+ __func__, mask_info->update_buf_len);
+ }
+ } else if (mask_info->status == DIAG_CTRL_MASK_ALL_ENABLED) {
+ mask_size = 1;
+ }
+proceed:
+ header.cmd_type = DIAG_CTRL_MSG_F3_MASK;
+ header.status = mask_info->status;
+ header.stream_id = 1;
+ header.msg_mode = 0;
+ header.ssid_first = mask->ssid_first;
+ header.ssid_last = mask->ssid_last_tools;
+ header.msg_mask_size = mask_size;
+ mask_size *= sizeof(uint32_t);
+ header.data_len = MSG_MASK_CTRL_HEADER_LEN + mask_size;
+ memcpy(buf, &header, header_len);
+ if (mask_size > 0)
+ memcpy(buf + header_len, mask->ptr, mask_size);
+ mutex_unlock(&mask->lock);
+
+ err = diagfwd_write(peripheral, TYPE_CNTL, buf,
+ header_len + mask_size);
+ if (err && err != -ENODEV)
+ pr_err_ratelimited("diag: Unable to send msg masks to peripheral %d\n",
+ peripheral);
+
+ if (first != ALL_SSID)
+ break;
+ }
+err:
+ mutex_unlock(&mask_info->lock);
+}
+
+static void diag_send_time_sync_update(uint8_t peripheral)
+{
+ struct diag_ctrl_msg_time_sync time_sync_msg;
+ int msg_size = sizeof(struct diag_ctrl_msg_time_sync);
+ int err = 0;
+
+ if (peripheral >= NUM_PERIPHERALS) {
+ pr_err("diag: In %s, Invalid peripheral, %d\n",
+ __func__, peripheral);
+ return;
+ }
+
+ if (!driver->diagfwd_cntl[peripheral] ||
+ !driver->diagfwd_cntl[peripheral]->ch_open) {
+ pr_err("diag: In %s, control channel is not open, p: %d, %pK\n",
+ __func__, peripheral, driver->diagfwd_cntl[peripheral]);
+ return;
+ }
+
+ mutex_lock(&driver->diag_cntl_mutex);
+ time_sync_msg.ctrl_pkt_id = DIAG_CTRL_MSG_TIME_SYNC_PKT;
+ time_sync_msg.ctrl_pkt_data_len = 5;
+ time_sync_msg.version = 1;
+ time_sync_msg.time_api = driver->uses_time_api;
+
+ err = diagfwd_write(peripheral, TYPE_CNTL, &time_sync_msg, msg_size);
+ if (err)
+ pr_err("diag: In %s, unable to write to peripheral: %d, type: %d, len: %d, err: %d\n",
+ __func__, peripheral, TYPE_CNTL,
+ msg_size, err);
+ mutex_unlock(&driver->diag_cntl_mutex);
+}
+
+static void diag_send_feature_mask_update(uint8_t peripheral)
+{
+ void *buf = driver->buf_feature_mask_update;
+ int header_size = sizeof(struct diag_ctrl_feature_mask);
+ uint8_t feature_bytes[FEATURE_MASK_LEN] = {0, 0};
+ struct diag_ctrl_feature_mask feature_mask;
+ int total_len = 0;
+ int err = 0;
+
+ if (peripheral >= NUM_PERIPHERALS) {
+ pr_err("diag: In %s, Invalid peripheral, %d\n",
+ __func__, peripheral);
+ return;
+ }
+
+ if (!driver->diagfwd_cntl[peripheral] ||
+ !driver->diagfwd_cntl[peripheral]->ch_open) {
+ pr_err("diag: In %s, control channel is not open, p: %d, %pK\n",
+ __func__, peripheral, driver->diagfwd_cntl[peripheral]);
+ return;
+ }
+
+ mutex_lock(&driver->diag_cntl_mutex);
+ /* send feature mask update */
+ feature_mask.ctrl_pkt_id = DIAG_CTRL_MSG_FEATURE;
+ feature_mask.ctrl_pkt_data_len = sizeof(uint32_t) + FEATURE_MASK_LEN;
+ feature_mask.feature_mask_len = FEATURE_MASK_LEN;
+ memcpy(buf, &feature_mask, header_size);
+ DIAG_SET_FEATURE_MASK(F_DIAG_FEATURE_MASK_SUPPORT);
+ DIAG_SET_FEATURE_MASK(F_DIAG_LOG_ON_DEMAND_APPS);
+ DIAG_SET_FEATURE_MASK(F_DIAG_STM);
+ DIAG_SET_FEATURE_MASK(F_DIAG_DCI_EXTENDED_HEADER_SUPPORT);
+ if (driver->supports_separate_cmdrsp)
+ DIAG_SET_FEATURE_MASK(F_DIAG_REQ_RSP_SUPPORT);
+ if (driver->supports_apps_hdlc_encoding)
+ DIAG_SET_FEATURE_MASK(F_DIAG_APPS_HDLC_ENCODE);
+ DIAG_SET_FEATURE_MASK(F_DIAG_MASK_CENTRALIZATION);
+ if (driver->supports_sockets)
+ DIAG_SET_FEATURE_MASK(F_DIAG_SOCKETS_ENABLED);
+
+ memcpy(buf + header_size, &feature_bytes, FEATURE_MASK_LEN);
+ total_len = header_size + FEATURE_MASK_LEN;
+
+ err = diagfwd_write(peripheral, TYPE_CNTL, buf, total_len);
+ if (err) {
+ pr_err_ratelimited("diag: In %s, unable to write feature mask to peripheral: %d, type: %d, len: %d, err: %d\n",
+ __func__, peripheral, TYPE_CNTL,
+ total_len, err);
+ mutex_unlock(&driver->diag_cntl_mutex);
+ return;
+ }
+ driver->feature[peripheral].sent_feature_mask = 1;
+ mutex_unlock(&driver->diag_cntl_mutex);
+}
+
+static int diag_cmd_get_ssid_range(unsigned char *src_buf, int src_len,
+ unsigned char *dest_buf, int dest_len,
+ struct diag_md_session_t *info)
+{
+ int i;
+ int write_len = 0;
+ struct diag_msg_mask_t *mask_ptr = NULL;
+ struct diag_msg_ssid_query_t rsp;
+ struct diag_ssid_range_t ssid_range;
+ struct diag_mask_info *mask_info = NULL;
+
+ mask_info = (!info) ? &msg_mask : info->msg_mask;
+ if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 ||
+ !mask_info) {
+ pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n",
+ __func__, src_buf, src_len, dest_buf, dest_len,
+ mask_info);
+ return -EINVAL;
+ }
+
+ if (!diag_apps_responds())
+ return 0;
+
+ rsp.cmd_code = DIAG_CMD_MSG_CONFIG;
+ rsp.sub_cmd = DIAG_CMD_OP_GET_SSID_RANGE;
+ rsp.status = MSG_STATUS_SUCCESS;
+ rsp.padding = 0;
+ rsp.count = driver->msg_mask_tbl_count;
+ memcpy(dest_buf, &rsp, sizeof(rsp));
+ write_len += sizeof(rsp);
+
+ mask_ptr = (struct diag_msg_mask_t *)mask_info->ptr;
+ for (i = 0; i < driver->msg_mask_tbl_count; i++, mask_ptr++) {
+ if (write_len + sizeof(ssid_range) > dest_len) {
+ pr_err("diag: In %s, Truncating response due to size limitations of rsp buffer\n",
+ __func__);
+ break;
+ }
+ ssid_range.ssid_first = mask_ptr->ssid_first;
+ ssid_range.ssid_last = mask_ptr->ssid_last_tools;
+ memcpy(dest_buf + write_len, &ssid_range, sizeof(ssid_range));
+ write_len += sizeof(ssid_range);
+ }
+
+ return write_len;
+}
+
+static int diag_cmd_get_build_mask(unsigned char *src_buf, int src_len,
+ unsigned char *dest_buf, int dest_len,
+ struct diag_md_session_t *info)
+{
+ int i = 0;
+ int write_len = 0;
+ int num_entries = 0;
+ int copy_len = 0;
+ struct diag_msg_mask_t *build_mask = NULL;
+ struct diag_build_mask_req_t *req = NULL;
+ struct diag_msg_build_mask_t rsp;
+
+ if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0) {
+ pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d\n",
+ __func__, src_buf, src_len, dest_buf, dest_len);
+ return -EINVAL;
+ }
+
+ if (!diag_apps_responds())
+ return 0;
+
+ req = (struct diag_build_mask_req_t *)src_buf;
+ rsp.cmd_code = DIAG_CMD_MSG_CONFIG;
+ rsp.sub_cmd = DIAG_CMD_OP_GET_BUILD_MASK;
+ rsp.ssid_first = req->ssid_first;
+ rsp.ssid_last = req->ssid_last;
+ rsp.status = MSG_STATUS_FAIL;
+ rsp.padding = 0;
+
+ build_mask = (struct diag_msg_mask_t *)msg_bt_mask.ptr;
+ for (i = 0; i < driver->msg_mask_tbl_count; i++, build_mask++) {
+ if (build_mask->ssid_first != req->ssid_first)
+ continue;
+ num_entries = req->ssid_last - req->ssid_first + 1;
+ if (num_entries > build_mask->range) {
+ pr_warn("diag: In %s, truncating ssid range for ssid_first: %d ssid_last %d\n",
+ __func__, req->ssid_first, req->ssid_last);
+ num_entries = build_mask->range;
+ req->ssid_last = req->ssid_first + build_mask->range;
+ }
+ copy_len = num_entries * sizeof(uint32_t);
+ if (copy_len + sizeof(rsp) > dest_len)
+ copy_len = dest_len - sizeof(rsp);
+ memcpy(dest_buf + sizeof(rsp), build_mask->ptr, copy_len);
+ write_len += copy_len;
+ rsp.ssid_last = build_mask->ssid_last;
+ rsp.status = MSG_STATUS_SUCCESS;
+ break;
+ }
+ memcpy(dest_buf, &rsp, sizeof(rsp));
+ write_len += sizeof(rsp);
+
+ return write_len;
+}
+
+static int diag_cmd_get_msg_mask(unsigned char *src_buf, int src_len,
+ unsigned char *dest_buf, int dest_len,
+ struct diag_md_session_t *info)
+{
+ int i;
+ int write_len = 0;
+ uint32_t mask_size = 0;
+ struct diag_msg_mask_t *mask = NULL;
+ struct diag_build_mask_req_t *req = NULL;
+ struct diag_msg_build_mask_t rsp;
+ struct diag_mask_info *mask_info = NULL;
+
+ mask_info = (!info) ? &msg_mask : info->msg_mask;
+ if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 ||
+ !mask_info) {
+ pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n",
+ __func__, src_buf, src_len, dest_buf, dest_len,
+ mask_info);
+ return -EINVAL;
+ }
+
+ if (!diag_apps_responds())
+ return 0;
+
+ req = (struct diag_build_mask_req_t *)src_buf;
+ rsp.cmd_code = DIAG_CMD_MSG_CONFIG;
+ rsp.sub_cmd = DIAG_CMD_OP_GET_MSG_MASK;
+ rsp.ssid_first = req->ssid_first;
+ rsp.ssid_last = req->ssid_last;
+ rsp.status = MSG_STATUS_FAIL;
+ rsp.padding = 0;
+
+ mask = (struct diag_msg_mask_t *)mask_info->ptr;
+ for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
+ if ((req->ssid_first < mask->ssid_first) ||
+ (req->ssid_first > mask->ssid_last_tools)) {
+ continue;
+ }
+ mask_size = mask->range * sizeof(uint32_t);
+ /* Copy msg mask only till the end of the rsp buffer */
+ if (mask_size + sizeof(rsp) > dest_len)
+ mask_size = dest_len - sizeof(rsp);
+ memcpy(dest_buf + sizeof(rsp), mask->ptr, mask_size);
+ write_len += mask_size;
+ rsp.status = MSG_STATUS_SUCCESS;
+ break;
+ }
+ memcpy(dest_buf, &rsp, sizeof(rsp));
+ write_len += sizeof(rsp);
+
+ return write_len;
+}
+
+static int diag_cmd_set_msg_mask(unsigned char *src_buf, int src_len,
+ unsigned char *dest_buf, int dest_len,
+ struct diag_md_session_t *info)
+{
+ int i;
+ int write_len = 0;
+ int header_len = sizeof(struct diag_msg_build_mask_t);
+ int found = 0;
+ uint32_t mask_size = 0;
+ uint32_t offset = 0;
+ struct diag_msg_mask_t *mask = NULL;
+ struct diag_msg_build_mask_t *req = NULL;
+ struct diag_msg_build_mask_t rsp;
+ struct diag_mask_info *mask_info = NULL;
+ struct diag_msg_mask_t *mask_next = NULL;
+ uint32_t *temp = NULL;
+
+ mask_info = (!info) ? &msg_mask : info->msg_mask;
+ if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 ||
+ !mask_info) {
+ pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n",
+ __func__, src_buf, src_len, dest_buf, dest_len,
+ mask_info);
+ return -EINVAL;
+ }
+
+ req = (struct diag_msg_build_mask_t *)src_buf;
+
+ mutex_lock(&mask_info->lock);
+ mask = (struct diag_msg_mask_t *)mask_info->ptr;
+ for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
+ if (i < (driver->msg_mask_tbl_count - 1)) {
+ mask_next = mask;
+ mask_next++;
+ } else
+ mask_next = NULL;
+
+ if ((req->ssid_first < mask->ssid_first) ||
+ (req->ssid_first > mask->ssid_first + MAX_SSID_PER_RANGE) ||
+ (mask_next && (req->ssid_first >= mask_next->ssid_first))) {
+ continue;
+ }
+ mask_next = NULL;
+ found = 1;
+ mutex_lock(&mask->lock);
+ mask_size = req->ssid_last - req->ssid_first + 1;
+ if (mask_size > MAX_SSID_PER_RANGE) {
+ pr_warn("diag: In %s, truncating ssid range, %d-%d to max allowed: %d\n",
+ __func__, mask->ssid_first, mask->ssid_last,
+ MAX_SSID_PER_RANGE);
+ mask_size = MAX_SSID_PER_RANGE;
+ mask->range_tools = MAX_SSID_PER_RANGE;
+ mask->ssid_last_tools =
+ mask->ssid_first + mask->range_tools;
+ }
+ if (req->ssid_last > mask->ssid_last_tools) {
+ pr_debug("diag: Msg SSID range mismatch\n");
+ if (mask_size != MAX_SSID_PER_RANGE)
+ mask->ssid_last_tools = req->ssid_last;
+ mask->range_tools =
+ mask->ssid_last_tools - mask->ssid_first + 1;
+ temp = krealloc(mask->ptr,
+ mask->range_tools * sizeof(uint32_t),
+ GFP_KERNEL);
+ if (!temp) {
+ pr_err_ratelimited("diag: In %s, unable to allocate memory for msg mask ptr, mask_size: %d\n",
+ __func__, mask_size);
+ mutex_unlock(&mask->lock);
+ return -ENOMEM;
+ }
+ mask->ptr = temp;
+ }
+
+ offset = req->ssid_first - mask->ssid_first;
+ if (offset + mask_size > mask->range_tools) {
+ pr_err("diag: In %s, Not in msg mask range, mask_size: %d, offset: %d\n",
+ __func__, mask_size, offset);
+ mutex_unlock(&mask->lock);
+ break;
+ }
+ mask_size = mask_size * sizeof(uint32_t);
+ memcpy(mask->ptr + offset, src_buf + header_len, mask_size);
+ mutex_unlock(&mask->lock);
+ mask_info->status = DIAG_CTRL_MASK_VALID;
+ break;
+ }
+ mutex_unlock(&mask_info->lock);
+
+ if (diag_check_update(APPS_DATA))
+ diag_update_userspace_clients(MSG_MASKS_TYPE);
+
+ /*
+ * Apps processor must send the response to this command. Frame the
+ * response.
+ */
+ rsp.cmd_code = DIAG_CMD_MSG_CONFIG;
+ rsp.sub_cmd = DIAG_CMD_OP_SET_MSG_MASK;
+ rsp.ssid_first = req->ssid_first;
+ rsp.ssid_last = req->ssid_last;
+ rsp.status = found;
+ rsp.padding = 0;
+ memcpy(dest_buf, &rsp, header_len);
+ write_len += header_len;
+ if (!found)
+ goto end;
+ if (mask_size + write_len > dest_len)
+ mask_size = dest_len - write_len;
+ memcpy(dest_buf + write_len, src_buf + header_len, mask_size);
+ write_len += mask_size;
+ for (i = 0; i < NUM_PERIPHERALS; i++) {
+ if (!diag_check_update(i))
+ continue;
+ diag_send_msg_mask_update(i, req->ssid_first, req->ssid_last);
+ }
+end:
+ return write_len;
+}
+
+static int diag_cmd_set_all_msg_mask(unsigned char *src_buf, int src_len,
+ unsigned char *dest_buf, int dest_len,
+ struct diag_md_session_t *info)
+{
+ int i;
+ int write_len = 0;
+ int header_len = sizeof(struct diag_msg_config_rsp_t);
+ struct diag_msg_config_rsp_t rsp;
+ struct diag_msg_config_rsp_t *req = NULL;
+ struct diag_msg_mask_t *mask = NULL;
+ struct diag_mask_info *mask_info = NULL;
+
+ mask_info = (!info) ? &msg_mask : info->msg_mask;
+ if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 ||
+ !mask_info) {
+ pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n",
+ __func__, src_buf, src_len, dest_buf, dest_len,
+ mask_info);
+ return -EINVAL;
+ }
+
+ req = (struct diag_msg_config_rsp_t *)src_buf;
+
+ mask = (struct diag_msg_mask_t *)mask_info->ptr;
+ mutex_lock(&mask_info->lock);
+ mask_info->status = (req->rt_mask) ? DIAG_CTRL_MASK_ALL_ENABLED :
+ DIAG_CTRL_MASK_ALL_DISABLED;
+ for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
+ mutex_lock(&mask->lock);
+ memset(mask->ptr, req->rt_mask,
+ mask->range * sizeof(uint32_t));
+ mutex_unlock(&mask->lock);
+ }
+ mutex_unlock(&mask_info->lock);
+
+ if (diag_check_update(APPS_DATA))
+ diag_update_userspace_clients(MSG_MASKS_TYPE);
+
+ /*
+ * Apps processor must send the response to this command. Frame the
+ * response.
+ */
+ rsp.cmd_code = DIAG_CMD_MSG_CONFIG;
+ rsp.sub_cmd = DIAG_CMD_OP_SET_ALL_MSG_MASK;
+ rsp.status = MSG_STATUS_SUCCESS;
+ rsp.padding = 0;
+ rsp.rt_mask = req->rt_mask;
+ memcpy(dest_buf, &rsp, header_len);
+ write_len += header_len;
+
+ for (i = 0; i < NUM_PERIPHERALS; i++) {
+ if (!diag_check_update(i))
+ continue;
+ diag_send_msg_mask_update(i, ALL_SSID, ALL_SSID);
+ }
+
+ return write_len;
+}
+
+static int diag_cmd_get_event_mask(unsigned char *src_buf, int src_len,
+ unsigned char *dest_buf, int dest_len,
+ struct diag_md_session_t *info)
+{
+ int write_len = 0;
+ uint32_t mask_size;
+ struct diag_event_mask_config_t rsp;
+
+ if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0) {
+ pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d\n",
+ __func__, src_buf, src_len, dest_buf, dest_len);
+ return -EINVAL;
+ }
+
+ if (!diag_apps_responds())
+ return 0;
+
+ mask_size = EVENT_COUNT_TO_BYTES(driver->last_event_id);
+ if (mask_size + sizeof(rsp) > dest_len) {
+ pr_err("diag: In %s, invalid mask size: %d\n", __func__,
+ mask_size);
+ return -ENOMEM;
+ }
+
+ rsp.cmd_code = DIAG_CMD_GET_EVENT_MASK;
+ rsp.status = EVENT_STATUS_SUCCESS;
+ rsp.padding = 0;
+ rsp.num_bits = driver->last_event_id + 1;
+ memcpy(dest_buf, &rsp, sizeof(rsp));
+ write_len += sizeof(rsp);
+ memcpy(dest_buf + write_len, event_mask.ptr, mask_size);
+ write_len += mask_size;
+
+ return write_len;
+}
+
+static int diag_cmd_update_event_mask(unsigned char *src_buf, int src_len,
+ unsigned char *dest_buf, int dest_len,
+ struct diag_md_session_t *info)
+{
+ int i;
+ int write_len = 0;
+ int mask_len = 0;
+ int header_len = sizeof(struct diag_event_mask_config_t);
+ struct diag_event_mask_config_t rsp;
+ struct diag_event_mask_config_t *req;
+ struct diag_mask_info *mask_info = NULL;
+
+ mask_info = (!info) ? &event_mask : info->event_mask;
+ if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 ||
+ !mask_info) {
+ pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n",
+ __func__, src_buf, src_len, dest_buf, dest_len,
+ mask_info);
+ return -EINVAL;
+ }
+
+ req = (struct diag_event_mask_config_t *)src_buf;
+ mask_len = EVENT_COUNT_TO_BYTES(req->num_bits);
+ if (mask_len <= 0 || mask_len > event_mask.mask_len) {
+ pr_err("diag: In %s, invalid event mask len: %d\n", __func__,
+ mask_len);
+ return -EIO;
+ }
+
+ mutex_lock(&mask_info->lock);
+ memcpy(mask_info->ptr, src_buf + header_len, mask_len);
+ mask_info->status = DIAG_CTRL_MASK_VALID;
+ mutex_unlock(&mask_info->lock);
+ if (diag_check_update(APPS_DATA))
+ diag_update_userspace_clients(EVENT_MASKS_TYPE);
+
+ /*
+ * Apps processor must send the response to this command. Frame the
+ * response.
+ */
+ rsp.cmd_code = DIAG_CMD_SET_EVENT_MASK;
+ rsp.status = EVENT_STATUS_SUCCESS;
+ rsp.padding = 0;
+ rsp.num_bits = driver->last_event_id + 1;
+ memcpy(dest_buf, &rsp, header_len);
+ write_len += header_len;
+ memcpy(dest_buf + write_len, mask_info->ptr, mask_len);
+ write_len += mask_len;
+
+ for (i = 0; i < NUM_PERIPHERALS; i++) {
+ if (!diag_check_update(i))
+ continue;
+ diag_send_event_mask_update(i);
+ }
+
+ return write_len;
+}
+
+static int diag_cmd_toggle_events(unsigned char *src_buf, int src_len,
+ unsigned char *dest_buf, int dest_len,
+ struct diag_md_session_t *info)
+{
+ int i;
+ int write_len = 0;
+ uint8_t toggle = 0;
+ struct diag_event_report_t header;
+ struct diag_mask_info *mask_info = NULL;
+
+ mask_info = (!info) ? &event_mask : info->event_mask;
+ if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 ||
+ !mask_info) {
+ pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n",
+ __func__, src_buf, src_len, dest_buf, dest_len,
+ mask_info);
+ return -EINVAL;
+ }
+
+ toggle = *(src_buf + 1);
+ mutex_lock(&mask_info->lock);
+ if (toggle) {
+ mask_info->status = DIAG_CTRL_MASK_ALL_ENABLED;
+ memset(mask_info->ptr, 0xFF, mask_info->mask_len);
+ } else {
+ mask_info->status = DIAG_CTRL_MASK_ALL_DISABLED;
+ memset(mask_info->ptr, 0, mask_info->mask_len);
+ }
+ mutex_unlock(&mask_info->lock);
+ if (diag_check_update(APPS_DATA))
+ diag_update_userspace_clients(EVENT_MASKS_TYPE);
+
+ /*
+ * Apps processor must send the response to this command. Frame the
+ * response.
+ */
+ header.cmd_code = DIAG_CMD_EVENT_TOGGLE;
+ header.padding = 0;
+ for (i = 0; i < NUM_PERIPHERALS; i++) {
+ if (!diag_check_update(i))
+ continue;
+ diag_send_event_mask_update(i);
+ }
+ memcpy(dest_buf, &header, sizeof(header));
+ write_len += sizeof(header);
+
+ return write_len;
+}
+
+static int diag_cmd_get_log_mask(unsigned char *src_buf, int src_len,
+ unsigned char *dest_buf, int dest_len,
+ struct diag_md_session_t *info)
+{
+ int i;
+ int status = LOG_STATUS_INVALID;
+ int write_len = 0;
+ int read_len = 0;
+ int req_header_len = sizeof(struct diag_log_config_req_t);
+ int rsp_header_len = sizeof(struct diag_log_config_rsp_t);
+ uint32_t mask_size = 0;
+ struct diag_log_mask_t *log_item = NULL;
+ struct diag_log_config_req_t *req;
+ struct diag_log_config_rsp_t rsp;
+ struct diag_mask_info *mask_info = NULL;
+
+ mask_info = (!info) ? &log_mask : info->log_mask;
+ if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 ||
+ !mask_info) {
+ pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n",
+ __func__, src_buf, src_len, dest_buf, dest_len,
+ mask_info);
+ return -EINVAL;
+ }
+
+ if (!diag_apps_responds())
+ return 0;
+
+ req = (struct diag_log_config_req_t *)src_buf;
+ read_len += req_header_len;
+
+ rsp.cmd_code = DIAG_CMD_LOG_CONFIG;
+ rsp.padding[0] = 0;
+ rsp.padding[1] = 0;
+ rsp.padding[2] = 0;
+ rsp.sub_cmd = DIAG_CMD_OP_GET_LOG_MASK;
+ /*
+ * Don't copy the response header now. Copy at the end after
+ * calculating the status field value
+ */
+ write_len += rsp_header_len;
+
+ log_item = (struct diag_log_mask_t *)mask_info->ptr;
+ for (i = 0; i < MAX_EQUIP_ID; i++, log_item++) {
+ if (log_item->equip_id != req->equip_id)
+ continue;
+ mutex_lock(&log_item->lock);
+ mask_size = LOG_ITEMS_TO_SIZE(log_item->num_items_tools);
+ /*
+ * Make sure we have space to fill the response in the buffer.
+ * Destination buffer should atleast be able to hold equip_id
+ * (uint32_t), num_items(uint32_t), mask (mask_size) and the
+ * response header.
+ */
+ if ((mask_size + (2 * sizeof(uint32_t)) + rsp_header_len) >
+ dest_len) {
+ pr_err("diag: In %s, invalid length: %d, max rsp_len: %d\n",
+ __func__, mask_size, dest_len);
+ status = LOG_STATUS_FAIL;
+ mutex_unlock(&log_item->lock);
+ break;
+ }
+ *(uint32_t *)(dest_buf + write_len) = log_item->equip_id;
+ write_len += sizeof(uint32_t);
+ *(uint32_t *)(dest_buf + write_len) = log_item->num_items_tools;
+ write_len += sizeof(uint32_t);
+ if (mask_size > 0) {
+ memcpy(dest_buf + write_len, log_item->ptr, mask_size);
+ write_len += mask_size;
+ }
+ DIAG_LOG(DIAG_DEBUG_MASKS,
+ "sending log e %d num_items %d size %d\n",
+ log_item->equip_id, log_item->num_items_tools,
+ log_item->range_tools);
+ mutex_unlock(&log_item->lock);
+ status = LOG_STATUS_SUCCESS;
+ break;
+ }
+
+ rsp.status = status;
+ memcpy(dest_buf, &rsp, rsp_header_len);
+
+ return write_len;
+}
+
+static int diag_cmd_get_log_range(unsigned char *src_buf, int src_len,
+ unsigned char *dest_buf, int dest_len,
+ struct diag_md_session_t *info)
+{
+ int i;
+ int write_len = 0;
+ struct diag_log_config_rsp_t rsp;
+ struct diag_mask_info *mask_info = NULL;
+ struct diag_log_mask_t *mask = (struct diag_log_mask_t *)log_mask.ptr;
+
+ if (!diag_apps_responds())
+ return 0;
+
+ mask_info = (!info) ? &log_mask : info->log_mask;
+ if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 ||
+ !mask_info) {
+ pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n",
+ __func__, src_buf, src_len, dest_buf, dest_len,
+ mask_info);
+ return -EINVAL;
+ }
+
+ rsp.cmd_code = DIAG_CMD_LOG_CONFIG;
+ rsp.padding[0] = 0;
+ rsp.padding[1] = 0;
+ rsp.padding[2] = 0;
+ rsp.sub_cmd = DIAG_CMD_OP_GET_LOG_RANGE;
+ rsp.status = LOG_STATUS_SUCCESS;
+ memcpy(dest_buf, &rsp, sizeof(rsp));
+ write_len += sizeof(rsp);
+
+ for (i = 0; i < MAX_EQUIP_ID && write_len < dest_len; i++, mask++) {
+ *(uint32_t *)(dest_buf + write_len) = mask->num_items_tools;
+ write_len += sizeof(uint32_t);
+ }
+
+ return write_len;
+}
+
+static int diag_cmd_set_log_mask(unsigned char *src_buf, int src_len,
+ unsigned char *dest_buf, int dest_len,
+ struct diag_md_session_t *info)
+{
+ int i;
+ int write_len = 0;
+ int status = LOG_STATUS_SUCCESS;
+ int read_len = 0;
+ int payload_len = 0;
+ int req_header_len = sizeof(struct diag_log_config_req_t);
+ int rsp_header_len = sizeof(struct diag_log_config_set_rsp_t);
+ uint32_t mask_size = 0;
+ struct diag_log_config_req_t *req;
+ struct diag_log_config_set_rsp_t rsp;
+ struct diag_log_mask_t *mask = NULL;
+ unsigned char *temp_buf = NULL;
+ struct diag_mask_info *mask_info = NULL;
+
+ mask_info = (!info) ? &log_mask : info->log_mask;
+ if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 ||
+ !mask_info) {
+ pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n",
+ __func__, src_buf, src_len, dest_buf, dest_len,
+ mask_info);
+ return -EINVAL;
+ }
+
+ req = (struct diag_log_config_req_t *)src_buf;
+ read_len += req_header_len;
+ mask = (struct diag_log_mask_t *)mask_info->ptr;
+
+ if (req->equip_id >= MAX_EQUIP_ID) {
+ pr_err("diag: In %s, Invalid logging mask request, equip_id: %d\n",
+ __func__, req->equip_id);
+ status = LOG_STATUS_INVALID;
+ }
+
+ if (req->num_items == 0) {
+ pr_err("diag: In %s, Invalid number of items in log mask request, equip_id: %d\n",
+ __func__, req->equip_id);
+ status = LOG_STATUS_INVALID;
+ }
+
+ mutex_lock(&mask_info->lock);
+ for (i = 0; i < MAX_EQUIP_ID && !status; i++, mask++) {
+ if (mask->equip_id != req->equip_id)
+ continue;
+ mutex_lock(&mask->lock);
+
+ DIAG_LOG(DIAG_DEBUG_MASKS, "e: %d current: %d %d new: %d %d",
+ mask->equip_id, mask->num_items_tools,
+ mask->range_tools, req->num_items,
+ LOG_ITEMS_TO_SIZE(req->num_items));
+ /*
+ * If the size of the log mask cannot fit into our
+ * buffer, trim till we have space left in the buffer.
+ * num_items should then reflect the items that we have
+ * in our buffer.
+ */
+ mask->num_items_tools = (req->num_items > MAX_ITEMS_ALLOWED) ?
+ MAX_ITEMS_ALLOWED : req->num_items;
+ mask_size = LOG_ITEMS_TO_SIZE(mask->num_items_tools);
+ memset(mask->ptr, 0, mask->range_tools);
+ if (mask_size > mask->range_tools) {
+ DIAG_LOG(DIAG_DEBUG_MASKS,
+ "log range mismatch, e: %d old: %d new: %d\n",
+ req->equip_id, mask->range_tools,
+ LOG_ITEMS_TO_SIZE(mask->num_items_tools));
+ /* Change in the mask reported by tools */
+ temp_buf = krealloc(mask->ptr, mask_size, GFP_KERNEL);
+ if (!temp_buf) {
+ mask_info->status = DIAG_CTRL_MASK_INVALID;
+ mutex_unlock(&mask->lock);
+ break;
+ }
+ mask->ptr = temp_buf;
+ memset(mask->ptr, 0, mask_size);
+ mask->range_tools = mask_size;
+ }
+ req->num_items = mask->num_items_tools;
+ if (mask_size > 0)
+ memcpy(mask->ptr, src_buf + read_len, mask_size);
+ DIAG_LOG(DIAG_DEBUG_MASKS,
+ "copying log mask, e %d num %d range %d size %d\n",
+ req->equip_id, mask->num_items_tools,
+ mask->range_tools, mask_size);
+ mutex_unlock(&mask->lock);
+ mask_info->status = DIAG_CTRL_MASK_VALID;
+ break;
+ }
+ mutex_unlock(&mask_info->lock);
+ if (diag_check_update(APPS_DATA))
+ diag_update_userspace_clients(LOG_MASKS_TYPE);
+
+ /*
+ * Apps processor must send the response to this command. Frame the
+ * response.
+ */
+ payload_len = LOG_ITEMS_TO_SIZE(req->num_items);
+ if ((payload_len + rsp_header_len > dest_len) || (payload_len == 0)) {
+ pr_err("diag: In %s, invalid length, payload_len: %d, header_len: %d, dest_len: %d\n",
+ __func__, payload_len, rsp_header_len, dest_len);
+ status = LOG_STATUS_FAIL;
+ }
+ rsp.cmd_code = DIAG_CMD_LOG_CONFIG;
+ rsp.padding[0] = 0;
+ rsp.padding[1] = 0;
+ rsp.padding[2] = 0;
+ rsp.sub_cmd = DIAG_CMD_OP_SET_LOG_MASK;
+ rsp.status = status;
+ rsp.equip_id = req->equip_id;
+ rsp.num_items = req->num_items;
+ memcpy(dest_buf, &rsp, rsp_header_len);
+ write_len += rsp_header_len;
+ if (status != LOG_STATUS_SUCCESS)
+ goto end;
+ memcpy(dest_buf + write_len, src_buf + read_len, payload_len);
+ write_len += payload_len;
+
+ for (i = 0; i < NUM_PERIPHERALS; i++) {
+ if (!diag_check_update(i))
+ continue;
+ diag_send_log_mask_update(i, req->equip_id);
+ }
+end:
+ return write_len;
+}
+
+static int diag_cmd_disable_log_mask(unsigned char *src_buf, int src_len,
+ unsigned char *dest_buf, int dest_len,
+ struct diag_md_session_t *info)
+{
+ struct diag_mask_info *mask_info = NULL;
+ struct diag_log_mask_t *mask = NULL;
+ struct diag_log_config_rsp_t header;
+ int write_len = 0;
+ int i;
+
+ mask_info = (!info) ? &log_mask : info->log_mask;
+ if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 ||
+ !mask_info) {
+ pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n",
+ __func__, src_buf, src_len, dest_buf, dest_len,
+ mask_info);
+ return -EINVAL;
+ }
+
+ mask = (struct diag_log_mask_t *)mask_info->ptr;
+
+ for (i = 0; i < MAX_EQUIP_ID; i++, mask++) {
+ mutex_lock(&mask->lock);
+ memset(mask->ptr, 0, mask->range);
+ mutex_unlock(&mask->lock);
+ }
+ mask_info->status = DIAG_CTRL_MASK_ALL_DISABLED;
+ if (diag_check_update(APPS_DATA))
+ diag_update_userspace_clients(LOG_MASKS_TYPE);
+
+ /*
+ * Apps processor must send the response to this command. Frame the
+ * response.
+ */
+ header.cmd_code = DIAG_CMD_LOG_CONFIG;
+ header.padding[0] = 0;
+ header.padding[1] = 0;
+ header.padding[2] = 0;
+ header.sub_cmd = DIAG_CMD_OP_LOG_DISABLE;
+ header.status = LOG_STATUS_SUCCESS;
+ memcpy(dest_buf, &header, sizeof(struct diag_log_config_rsp_t));
+ write_len += sizeof(struct diag_log_config_rsp_t);
+ for (i = 0; i < NUM_PERIPHERALS; i++) {
+ if (!diag_check_update(i))
+ continue;
+ diag_send_log_mask_update(i, ALL_EQUIP_ID);
+ }
+
+ return write_len;
+}
+
+int diag_create_msg_mask_table_entry(struct diag_msg_mask_t *msg_mask,
+ struct diag_ssid_range_t *range)
+{
+ if (!msg_mask || !range)
+ return -EIO;
+ if (range->ssid_last < range->ssid_first)
+ return -EINVAL;
+ msg_mask->ssid_first = range->ssid_first;
+ msg_mask->ssid_last = range->ssid_last;
+ msg_mask->ssid_last_tools = range->ssid_last;
+ msg_mask->range = msg_mask->ssid_last - msg_mask->ssid_first + 1;
+ if (msg_mask->range < MAX_SSID_PER_RANGE)
+ msg_mask->range = MAX_SSID_PER_RANGE;
+ msg_mask->range_tools = msg_mask->range;
+ mutex_init(&msg_mask->lock);
+ if (msg_mask->range > 0) {
+ msg_mask->ptr = kcalloc(msg_mask->range, sizeof(uint32_t),
+ GFP_KERNEL);
+ if (!msg_mask->ptr)
+ return -ENOMEM;
+ kmemleak_not_leak(msg_mask->ptr);
+ }
+ return 0;
+}
+
+static int diag_create_msg_mask_table(void)
+{
+ int i;
+ int err = 0;
+ struct diag_msg_mask_t *mask = (struct diag_msg_mask_t *)msg_mask.ptr;
+ struct diag_ssid_range_t range;
+
+ mutex_lock(&msg_mask.lock);
+ driver->msg_mask_tbl_count = MSG_MASK_TBL_CNT;
+ for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
+ range.ssid_first = msg_mask_tbl[i].ssid_first;
+ range.ssid_last = msg_mask_tbl[i].ssid_last;
+ err = diag_create_msg_mask_table_entry(mask, &range);
+ if (err)
+ break;
+ }
+ mutex_unlock(&msg_mask.lock);
+ return err;
+}
+
+static int diag_create_build_time_mask(void)
+{
+ int i;
+ int err = 0;
+ const uint32_t *tbl = NULL;
+ uint32_t tbl_size = 0;
+ struct diag_msg_mask_t *build_mask = NULL;
+ struct diag_ssid_range_t range;
+
+ mutex_lock(&msg_bt_mask.lock);
+ build_mask = (struct diag_msg_mask_t *)msg_bt_mask.ptr;
+ for (i = 0; i < driver->msg_mask_tbl_count; i++, build_mask++) {
+ range.ssid_first = msg_mask_tbl[i].ssid_first;
+ range.ssid_last = msg_mask_tbl[i].ssid_last;
+ err = diag_create_msg_mask_table_entry(build_mask, &range);
+ if (err)
+ break;
+ switch (build_mask->ssid_first) {
+ case MSG_SSID_0:
+ tbl = msg_bld_masks_0;
+ tbl_size = sizeof(msg_bld_masks_0);
+ break;
+ case MSG_SSID_1:
+ tbl = msg_bld_masks_1;
+ tbl_size = sizeof(msg_bld_masks_1);
+ break;
+ case MSG_SSID_2:
+ tbl = msg_bld_masks_2;
+ tbl_size = sizeof(msg_bld_masks_2);
+ break;
+ case MSG_SSID_3:
+ tbl = msg_bld_masks_3;
+ tbl_size = sizeof(msg_bld_masks_3);
+ break;
+ case MSG_SSID_4:
+ tbl = msg_bld_masks_4;
+ tbl_size = sizeof(msg_bld_masks_4);
+ break;
+ case MSG_SSID_5:
+ tbl = msg_bld_masks_5;
+ tbl_size = sizeof(msg_bld_masks_5);
+ break;
+ case MSG_SSID_6:
+ tbl = msg_bld_masks_6;
+ tbl_size = sizeof(msg_bld_masks_6);
+ break;
+ case MSG_SSID_7:
+ tbl = msg_bld_masks_7;
+ tbl_size = sizeof(msg_bld_masks_7);
+ break;
+ case MSG_SSID_8:
+ tbl = msg_bld_masks_8;
+ tbl_size = sizeof(msg_bld_masks_8);
+ break;
+ case MSG_SSID_9:
+ tbl = msg_bld_masks_9;
+ tbl_size = sizeof(msg_bld_masks_9);
+ break;
+ case MSG_SSID_10:
+ tbl = msg_bld_masks_10;
+ tbl_size = sizeof(msg_bld_masks_10);
+ break;
+ case MSG_SSID_11:
+ tbl = msg_bld_masks_11;
+ tbl_size = sizeof(msg_bld_masks_11);
+ break;
+ case MSG_SSID_12:
+ tbl = msg_bld_masks_12;
+ tbl_size = sizeof(msg_bld_masks_12);
+ break;
+ case MSG_SSID_13:
+ tbl = msg_bld_masks_13;
+ tbl_size = sizeof(msg_bld_masks_13);
+ break;
+ case MSG_SSID_14:
+ tbl = msg_bld_masks_14;
+ tbl_size = sizeof(msg_bld_masks_14);
+ break;
+ case MSG_SSID_15:
+ tbl = msg_bld_masks_15;
+ tbl_size = sizeof(msg_bld_masks_15);
+ break;
+ case MSG_SSID_16:
+ tbl = msg_bld_masks_16;
+ tbl_size = sizeof(msg_bld_masks_16);
+ break;
+ case MSG_SSID_17:
+ tbl = msg_bld_masks_17;
+ tbl_size = sizeof(msg_bld_masks_17);
+ break;
+ case MSG_SSID_18:
+ tbl = msg_bld_masks_18;
+ tbl_size = sizeof(msg_bld_masks_18);
+ break;
+ case MSG_SSID_19:
+ tbl = msg_bld_masks_19;
+ tbl_size = sizeof(msg_bld_masks_19);
+ break;
+ case MSG_SSID_20:
+ tbl = msg_bld_masks_20;
+ tbl_size = sizeof(msg_bld_masks_20);
+ break;
+ case MSG_SSID_21:
+ tbl = msg_bld_masks_21;
+ tbl_size = sizeof(msg_bld_masks_21);
+ break;
+ case MSG_SSID_22:
+ tbl = msg_bld_masks_22;
+ tbl_size = sizeof(msg_bld_masks_22);
+ break;
+ }
+ if (!tbl)
+ continue;
+ if (tbl_size > build_mask->range * sizeof(uint32_t)) {
+ pr_warn("diag: In %s, table %d has more ssid than max, ssid_first: %d, ssid_last: %d\n",
+ __func__, i, build_mask->ssid_first,
+ build_mask->ssid_last);
+ tbl_size = build_mask->range * sizeof(uint32_t);
+ }
+ memcpy(build_mask->ptr, tbl, tbl_size);
+ }
+ mutex_unlock(&msg_bt_mask.lock);
+
+ return err;
+}
+
+static int diag_create_log_mask_table(void)
+{
+ struct diag_log_mask_t *mask = NULL;
+ uint8_t i;
+ int err = 0;
+
+ mutex_lock(&log_mask.lock);
+ mask = (struct diag_log_mask_t *)(log_mask.ptr);
+ for (i = 0; i < MAX_EQUIP_ID; i++, mask++) {
+ mask->equip_id = i;
+ mask->num_items = LOG_GET_ITEM_NUM(log_code_last_tbl[i]);
+ mask->num_items_tools = mask->num_items;
+ mutex_init(&mask->lock);
+ if (LOG_ITEMS_TO_SIZE(mask->num_items) > MAX_ITEMS_PER_EQUIP_ID)
+ mask->range = LOG_ITEMS_TO_SIZE(mask->num_items);
+ else
+ mask->range = MAX_ITEMS_PER_EQUIP_ID;
+ mask->range_tools = mask->range;
+ mask->ptr = kzalloc(mask->range, GFP_KERNEL);
+ if (!mask->ptr) {
+ err = -ENOMEM;
+ break;
+ }
+ kmemleak_not_leak(mask->ptr);
+ }
+ mutex_unlock(&log_mask.lock);
+ return err;
+}
+
+static int __diag_mask_init(struct diag_mask_info *mask_info, int mask_len,
+ int update_buf_len)
+{
+ if (!mask_info || mask_len < 0 || update_buf_len < 0)
+ return -EINVAL;
+
+ mask_info->status = DIAG_CTRL_MASK_INVALID;
+ mask_info->mask_len = mask_len;
+ mask_info->update_buf_len = update_buf_len;
+ if (mask_len > 0) {
+ mask_info->ptr = kzalloc(mask_len, GFP_KERNEL);
+ if (!mask_info->ptr)
+ return -ENOMEM;
+ kmemleak_not_leak(mask_info->ptr);
+ }
+ if (update_buf_len > 0) {
+ mask_info->update_buf = kzalloc(update_buf_len, GFP_KERNEL);
+ if (!mask_info->update_buf) {
+ kfree(mask_info->ptr);
+ return -ENOMEM;
+ }
+ kmemleak_not_leak(mask_info->update_buf);
+ }
+ mutex_init(&mask_info->lock);
+ return 0;
+}
+
+static void __diag_mask_exit(struct diag_mask_info *mask_info)
+{
+ if (!mask_info)
+ return;
+
+ mutex_lock(&mask_info->lock);
+ kfree(mask_info->ptr);
+ mask_info->ptr = NULL;
+ kfree(mask_info->update_buf);
+ mask_info->update_buf = NULL;
+ mutex_unlock(&mask_info->lock);
+}
+
+int diag_log_mask_copy(struct diag_mask_info *dest, struct diag_mask_info *src)
+{
+ int i;
+ int err = 0;
+ struct diag_log_mask_t *src_mask = NULL;
+ struct diag_log_mask_t *dest_mask = NULL;
+
+ if (!src)
+ return -EINVAL;
+
+ err = __diag_mask_init(dest, LOG_MASK_SIZE, APPS_BUF_SIZE);
+ if (err)
+ return err;
+
+ mutex_lock(&dest->lock);
+ src_mask = (struct diag_log_mask_t *)(src->ptr);
+ dest_mask = (struct diag_log_mask_t *)(dest->ptr);
+
+ dest->mask_len = src->mask_len;
+ dest->status = src->status;
+
+ for (i = 0; i < MAX_EQUIP_ID; i++, src_mask++, dest_mask++) {
+ dest_mask->equip_id = src_mask->equip_id;
+ dest_mask->num_items = src_mask->num_items;
+ dest_mask->num_items_tools = src_mask->num_items_tools;
+ mutex_init(&dest_mask->lock);
+ dest_mask->range = src_mask->range;
+ dest_mask->range_tools = src_mask->range_tools;
+ dest_mask->ptr = kzalloc(dest_mask->range_tools, GFP_KERNEL);
+ if (!dest_mask->ptr) {
+ err = -ENOMEM;
+ break;
+ }
+ kmemleak_not_leak(dest_mask->ptr);
+ memcpy(dest_mask->ptr, src_mask->ptr, dest_mask->range_tools);
+ }
+ mutex_unlock(&dest->lock);
+
+ return err;
+}
+
+void diag_log_mask_free(struct diag_mask_info *mask_info)
+{
+ int i;
+ struct diag_log_mask_t *mask = NULL;
+
+ if (!mask_info)
+ return;
+
+ mutex_lock(&mask_info->lock);
+ mask = (struct diag_log_mask_t *)mask_info->ptr;
+ for (i = 0; i < MAX_EQUIP_ID; i++, mask++) {
+ kfree(mask->ptr);
+ mask->ptr = NULL;
+ }
+ mutex_unlock(&mask_info->lock);
+
+ __diag_mask_exit(mask_info);
+
+}
+
+static int diag_msg_mask_init(void)
+{
+ int err = 0;
+ int i;
+
+ err = __diag_mask_init(&msg_mask, MSG_MASK_SIZE, APPS_BUF_SIZE);
+ if (err)
+ return err;
+ err = diag_create_msg_mask_table();
+ if (err) {
+ pr_err("diag: Unable to create msg masks, err: %d\n", err);
+ return err;
+ }
+ driver->msg_mask = &msg_mask;
+
+ for (i = 0; i < NUM_PERIPHERALS; i++)
+ driver->max_ssid_count[i] = 0;
+
+ return 0;
+}
+
+int diag_msg_mask_copy(struct diag_mask_info *dest, struct diag_mask_info *src)
+{
+ int i;
+ int err = 0;
+ struct diag_msg_mask_t *src_mask = NULL;
+ struct diag_msg_mask_t *dest_mask = NULL;
+ struct diag_ssid_range_t range;
+
+ if (!src || !dest)
+ return -EINVAL;
+
+ err = __diag_mask_init(dest, MSG_MASK_SIZE, APPS_BUF_SIZE);
+ if (err)
+ return err;
+
+ mutex_lock(&dest->lock);
+ src_mask = (struct diag_msg_mask_t *)src->ptr;
+ dest_mask = (struct diag_msg_mask_t *)dest->ptr;
+
+ dest->mask_len = src->mask_len;
+ dest->status = src->status;
+ for (i = 0; i < driver->msg_mask_tbl_count; i++) {
+ range.ssid_first = src_mask->ssid_first;
+ range.ssid_last = src_mask->ssid_last;
+ err = diag_create_msg_mask_table_entry(dest_mask, &range);
+ if (err)
+ break;
+ memcpy(dest_mask->ptr, src_mask->ptr,
+ dest_mask->range * sizeof(uint32_t));
+ src_mask++;
+ dest_mask++;
+ }
+ mutex_unlock(&dest->lock);
+
+ return err;
+}
+
+void diag_msg_mask_free(struct diag_mask_info *mask_info)
+{
+ int i;
+ struct diag_msg_mask_t *mask = NULL;
+
+ if (!mask_info)
+ return;
+
+ mutex_lock(&mask_info->lock);
+ mask = (struct diag_msg_mask_t *)mask_info->ptr;
+ for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
+ kfree(mask->ptr);
+ mask->ptr = NULL;
+ }
+ mutex_unlock(&mask_info->lock);
+
+ __diag_mask_exit(mask_info);
+}
+
+static void diag_msg_mask_exit(void)
+{
+ int i;
+ struct diag_msg_mask_t *mask = NULL;
+
+ mask = (struct diag_msg_mask_t *)(msg_mask.ptr);
+ if (mask) {
+ for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++)
+ kfree(mask->ptr);
+ kfree(msg_mask.ptr);
+ }
+
+ kfree(msg_mask.update_buf);
+}
+
+static int diag_build_time_mask_init(void)
+{
+ int err = 0;
+
+ /* There is no need for update buffer for Build Time masks */
+ err = __diag_mask_init(&msg_bt_mask, MSG_MASK_SIZE, 0);
+ if (err)
+ return err;
+ err = diag_create_build_time_mask();
+ if (err) {
+ pr_err("diag: Unable to create msg build time masks, err: %d\n",
+ err);
+ return err;
+ }
+ driver->build_time_mask = &msg_bt_mask;
+ return 0;
+}
+
+static void diag_build_time_mask_exit(void)
+{
+ int i;
+ struct diag_msg_mask_t *mask = NULL;
+
+ mask = (struct diag_msg_mask_t *)(msg_bt_mask.ptr);
+ if (mask) {
+ for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++)
+ kfree(mask->ptr);
+ kfree(msg_mask.ptr);
+ }
+}
+
+static int diag_log_mask_init(void)
+{
+ int err = 0;
+ int i;
+
+ err = __diag_mask_init(&log_mask, LOG_MASK_SIZE, APPS_BUF_SIZE);
+ if (err)
+ return err;
+ err = diag_create_log_mask_table();
+ if (err)
+ return err;
+ driver->log_mask = &log_mask;
+
+ for (i = 0; i < NUM_PERIPHERALS; i++)
+ driver->num_equip_id[i] = 0;
+
+ return 0;
+}
+
+static void diag_log_mask_exit(void)
+{
+ int i;
+ struct diag_log_mask_t *mask = NULL;
+
+ mask = (struct diag_log_mask_t *)(log_mask.ptr);
+ if (mask) {
+ for (i = 0; i < MAX_EQUIP_ID; i++, mask++)
+ kfree(mask->ptr);
+ kfree(log_mask.ptr);
+ }
+
+ kfree(log_mask.update_buf);
+}
+
+static int diag_event_mask_init(void)
+{
+ int err = 0;
+ int i;
+
+ err = __diag_mask_init(&event_mask, EVENT_MASK_SIZE, APPS_BUF_SIZE);
+ if (err)
+ return err;
+ driver->event_mask_size = EVENT_MASK_SIZE;
+ driver->last_event_id = APPS_EVENT_LAST_ID;
+ driver->event_mask = &event_mask;
+
+ for (i = 0; i < NUM_PERIPHERALS; i++)
+ driver->num_event_id[i] = 0;
+
+ return 0;
+}
+
+int diag_event_mask_copy(struct diag_mask_info *dest,
+ struct diag_mask_info *src)
+{
+ int err = 0;
+
+ if (!src || !dest)
+ return -EINVAL;
+
+ err = __diag_mask_init(dest, EVENT_MASK_SIZE, APPS_BUF_SIZE);
+ if (err)
+ return err;
+
+ mutex_lock(&dest->lock);
+ dest->mask_len = src->mask_len;
+ dest->status = src->status;
+ memcpy(dest->ptr, src->ptr, dest->mask_len);
+ mutex_unlock(&dest->lock);
+
+ return err;
+}
+
+void diag_event_mask_free(struct diag_mask_info *mask_info)
+{
+ if (!mask_info)
+ return;
+
+ __diag_mask_exit(mask_info);
+}
+
+static void diag_event_mask_exit(void)
+{
+ kfree(event_mask.ptr);
+ kfree(event_mask.update_buf);
+}
+
+int diag_copy_to_user_msg_mask(char __user *buf, size_t count,
+ struct diag_md_session_t *info)
+{
+ int i;
+ int err = 0;
+ int len = 0;
+ int copy_len = 0;
+ int total_len = 0;
+ struct diag_msg_mask_userspace_t header;
+ struct diag_mask_info *mask_info = NULL;
+ struct diag_msg_mask_t *mask = NULL;
+ unsigned char *ptr = NULL;
+
+ if (!buf || count == 0)
+ return -EINVAL;
+
+ mask_info = (!info) ? &msg_mask : info->msg_mask;
+ if (!mask_info)
+ return -EIO;
+
+ mutex_lock(&driver->diag_maskclear_mutex);
+ if (driver->mask_clear) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag:%s: count = %zu\n", __func__, count);
+ mutex_unlock(&driver->diag_maskclear_mutex);
+ return -EIO;
+ }
+ mutex_unlock(&driver->diag_maskclear_mutex);
+
+ mutex_lock(&mask_info->lock);
+ mask = (struct diag_msg_mask_t *)(mask_info->ptr);
+ for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
+ ptr = mask_info->update_buf;
+ len = 0;
+ mutex_lock(&mask->lock);
+ header.ssid_first = mask->ssid_first;
+ header.ssid_last = mask->ssid_last_tools;
+ header.range = mask->range_tools;
+ memcpy(ptr, &header, sizeof(header));
+ len += sizeof(header);
+ copy_len = (sizeof(uint32_t) * mask->range_tools);
+ if ((len + copy_len) > mask_info->update_buf_len) {
+ pr_err("diag: In %s, no space to update msg mask, first: %d, last: %d\n",
+ __func__, mask->ssid_first,
+ mask->ssid_last_tools);
+ mutex_unlock(&mask->lock);
+ continue;
+ }
+ memcpy(ptr + len, mask->ptr, copy_len);
+ len += copy_len;
+ mutex_unlock(&mask->lock);
+ /* + sizeof(int) to account for data_type already in buf */
+ if (total_len + sizeof(int) + len > count) {
+ pr_err("diag: In %s, unable to send msg masks to user space, total_len: %d, count: %zu\n",
+ __func__, total_len, count);
+ err = -ENOMEM;
+ break;
+ }
+ err = copy_to_user(buf + total_len, (void *)ptr, len);
+ if (err) {
+ pr_err("diag: In %s Unable to send msg masks to user space clients, err: %d\n",
+ __func__, err);
+ break;
+ }
+ total_len += len;
+ }
+ mutex_unlock(&mask_info->lock);
+
+ return err ? err : total_len;
+}
+
+int diag_copy_to_user_log_mask(char __user *buf, size_t count,
+ struct diag_md_session_t *info)
+{
+ int i;
+ int err = 0;
+ int len = 0;
+ int copy_len = 0;
+ int total_len = 0;
+ struct diag_log_mask_userspace_t header;
+ struct diag_log_mask_t *mask = NULL;
+ struct diag_mask_info *mask_info = NULL;
+ unsigned char *ptr = NULL;
+
+ if (!buf || count == 0)
+ return -EINVAL;
+
+ mask_info = (!info) ? &log_mask : info->log_mask;
+ if (!mask_info)
+ return -EIO;
+
+ mutex_lock(&mask_info->lock);
+ mask = (struct diag_log_mask_t *)(mask_info->ptr);
+ for (i = 0; i < MAX_EQUIP_ID; i++, mask++) {
+ ptr = mask_info->update_buf;
+ len = 0;
+ mutex_lock(&mask->lock);
+ header.equip_id = mask->equip_id;
+ header.num_items = mask->num_items_tools;
+ memcpy(ptr, &header, sizeof(header));
+ len += sizeof(header);
+ copy_len = LOG_ITEMS_TO_SIZE(header.num_items);
+ if ((len + copy_len) > mask_info->update_buf_len) {
+ pr_err("diag: In %s, no space to update log mask, equip_id: %d\n",
+ __func__, mask->equip_id);
+ mutex_unlock(&mask->lock);
+ continue;
+ }
+ memcpy(ptr + len, mask->ptr, copy_len);
+ len += copy_len;
+ mutex_unlock(&mask->lock);
+ /* + sizeof(int) to account for data_type already in buf */
+ if (total_len + sizeof(int) + len > count) {
+ pr_err("diag: In %s, unable to send log masks to user space, total_len: %d, count: %zu\n",
+ __func__, total_len, count);
+ err = -ENOMEM;
+ break;
+ }
+ err = copy_to_user(buf + total_len, (void *)ptr, len);
+ if (err) {
+ pr_err("diag: In %s Unable to send log masks to user space clients, err: %d\n",
+ __func__, err);
+ break;
+ }
+ total_len += len;
+ }
+ mutex_unlock(&mask_info->lock);
+
+ return err ? err : total_len;
+}
+
+void diag_send_updates_peripheral(uint8_t peripheral)
+{
+ diag_send_feature_mask_update(peripheral);
+ if (driver->time_sync_enabled)
+ diag_send_time_sync_update(peripheral);
+ diag_send_msg_mask_update(peripheral, ALL_SSID, ALL_SSID);
+ diag_send_log_mask_update(peripheral, ALL_EQUIP_ID);
+ diag_send_event_mask_update(peripheral);
+ diag_send_real_time_update(peripheral,
+ driver->real_time_mode[DIAG_LOCAL_PROC]);
+ diag_send_peripheral_buffering_mode(
+ &driver->buffering_mode[peripheral]);
+}
+
+int diag_process_apps_masks(unsigned char *buf, int len,
+ struct diag_md_session_t *info)
+{
+ int size = 0;
+ int sub_cmd = 0;
+ int (*hdlr)(unsigned char *src_buf, int src_len,
+ unsigned char *dest_buf, int dest_len,
+ struct diag_md_session_t *info) = NULL;
+
+ if (!buf || len <= 0)
+ return -EINVAL;
+
+ if (*buf == DIAG_CMD_LOG_CONFIG) {
+ sub_cmd = *(int *)(buf + sizeof(int));
+ switch (sub_cmd) {
+ case DIAG_CMD_OP_LOG_DISABLE:
+ hdlr = diag_cmd_disable_log_mask;
+ break;
+ case DIAG_CMD_OP_GET_LOG_RANGE:
+ hdlr = diag_cmd_get_log_range;
+ break;
+ case DIAG_CMD_OP_SET_LOG_MASK:
+ hdlr = diag_cmd_set_log_mask;
+ break;
+ case DIAG_CMD_OP_GET_LOG_MASK:
+ hdlr = diag_cmd_get_log_mask;
+ break;
+ }
+ } else if (*buf == DIAG_CMD_MSG_CONFIG) {
+ sub_cmd = *(uint8_t *)(buf + sizeof(uint8_t));
+ switch (sub_cmd) {
+ case DIAG_CMD_OP_GET_SSID_RANGE:
+ hdlr = diag_cmd_get_ssid_range;
+ break;
+ case DIAG_CMD_OP_GET_BUILD_MASK:
+ hdlr = diag_cmd_get_build_mask;
+ break;
+ case DIAG_CMD_OP_GET_MSG_MASK:
+ hdlr = diag_cmd_get_msg_mask;
+ break;
+ case DIAG_CMD_OP_SET_MSG_MASK:
+ hdlr = diag_cmd_set_msg_mask;
+ break;
+ case DIAG_CMD_OP_SET_ALL_MSG_MASK:
+ hdlr = diag_cmd_set_all_msg_mask;
+ break;
+ }
+ } else if (*buf == DIAG_CMD_GET_EVENT_MASK) {
+ hdlr = diag_cmd_get_event_mask;
+ } else if (*buf == DIAG_CMD_SET_EVENT_MASK) {
+ hdlr = diag_cmd_update_event_mask;
+ } else if (*buf == DIAG_CMD_EVENT_TOGGLE) {
+ hdlr = diag_cmd_toggle_events;
+ }
+
+ if (hdlr)
+ size = hdlr(buf, len, driver->apps_rsp_buf,
+ DIAG_MAX_RSP_SIZE, info);
+
+ return (size > 0) ? size : 0;
+}
+
+int diag_masks_init(void)
+{
+ int err = 0;
+
+ err = diag_msg_mask_init();
+ if (err)
+ goto fail;
+
+ err = diag_build_time_mask_init();
+ if (err)
+ goto fail;
+
+ err = diag_log_mask_init();
+ if (err)
+ goto fail;
+
+ err = diag_event_mask_init();
+ if (err)
+ goto fail;
+
+ if (driver->buf_feature_mask_update == NULL) {
+ driver->buf_feature_mask_update = kzalloc(sizeof(
+ struct diag_ctrl_feature_mask) +
+ FEATURE_MASK_LEN, GFP_KERNEL);
+ if (driver->buf_feature_mask_update == NULL)
+ goto fail;
+ kmemleak_not_leak(driver->buf_feature_mask_update);
+ }
+
+ return 0;
+fail:
+ pr_err("diag: Could not initialize diag mask buffers\n");
+ diag_masks_exit();
+ return -ENOMEM;
+}
+
+void diag_masks_exit(void)
+{
+ diag_msg_mask_exit();
+ diag_build_time_mask_exit();
+ diag_log_mask_exit();
+ diag_event_mask_exit();
+ kfree(driver->buf_feature_mask_update);
+}
diff --git a/drivers/char/diag/diag_masks.h b/drivers/char/diag/diag_masks.h
new file mode 100644
index 0000000..1a52f94
--- /dev/null
+++ b/drivers/char/diag/diag_masks.h
@@ -0,0 +1,180 @@
+/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAG_MASKS_H
+#define DIAG_MASKS_H
+
+#include "diagfwd.h"
+
+struct diag_log_mask_t {
+ uint8_t equip_id;
+ uint32_t num_items;
+ uint32_t num_items_tools;
+ uint32_t range;
+ uint32_t range_tools;
+ struct mutex lock;
+ uint8_t *ptr;
+};
+
+struct diag_ssid_range_t {
+ uint16_t ssid_first;
+ uint16_t ssid_last;
+} __packed;
+
+struct diag_msg_mask_t {
+ uint32_t ssid_first;
+ uint32_t ssid_last;
+ uint32_t ssid_last_tools;
+ uint32_t range;
+ uint32_t range_tools;
+ struct mutex lock;
+ uint32_t *ptr;
+};
+
+struct diag_log_config_req_t {
+ uint8_t cmd_code;
+ uint8_t padding[3];
+ uint32_t sub_cmd;
+ uint32_t equip_id;
+ uint32_t num_items;
+} __packed;
+
+struct diag_log_config_rsp_t {
+ uint8_t cmd_code;
+ uint8_t padding[3];
+ uint32_t sub_cmd;
+ uint32_t status;
+} __packed;
+
+struct diag_log_config_set_rsp_t {
+ uint8_t cmd_code;
+ uint8_t padding[3];
+ uint32_t sub_cmd;
+ uint32_t status;
+ uint32_t equip_id;
+ uint32_t num_items;
+} __packed;
+
+struct diag_log_on_demand_rsp_t {
+ uint8_t cmd_code;
+ uint16_t log_code;
+ uint8_t status;
+} __packed;
+
+struct diag_event_report_t {
+ uint8_t cmd_code;
+ uint16_t padding;
+} __packed;
+
+struct diag_event_mask_config_t {
+ uint8_t cmd_code;
+ uint8_t status;
+ uint16_t padding;
+ uint16_t num_bits;
+} __packed;
+
+struct diag_msg_config_rsp_t {
+ uint8_t cmd_code;
+ uint8_t sub_cmd;
+ uint8_t status;
+ uint8_t padding;
+ uint32_t rt_mask;
+} __packed;
+
+struct diag_msg_ssid_query_t {
+ uint8_t cmd_code;
+ uint8_t sub_cmd;
+ uint8_t status;
+ uint8_t padding;
+ uint32_t count;
+} __packed;
+
+struct diag_build_mask_req_t {
+ uint8_t cmd_code;
+ uint8_t sub_cmd;
+ uint16_t ssid_first;
+ uint16_t ssid_last;
+} __packed;
+
+struct diag_msg_build_mask_t {
+ uint8_t cmd_code;
+ uint8_t sub_cmd;
+ uint16_t ssid_first;
+ uint16_t ssid_last;
+ uint8_t status;
+ uint8_t padding;
+} __packed;
+
+struct diag_msg_mask_userspace_t {
+ uint32_t ssid_first;
+ uint32_t ssid_last;
+ uint32_t range;
+} __packed;
+
+struct diag_log_mask_userspace_t {
+ uint8_t equip_id;
+ uint32_t num_items;
+} __packed;
+
+#define MAX_EQUIP_ID 16
+#define MSG_MASK_SIZE (MSG_MASK_TBL_CNT * sizeof(struct diag_msg_mask_t))
+#define LOG_MASK_SIZE (MAX_EQUIP_ID * sizeof(struct diag_log_mask_t))
+#define EVENT_MASK_SIZE 513
+#define MAX_ITEMS_PER_EQUIP_ID 512
+#define MAX_ITEMS_ALLOWED 0xFFF
+
+#define LOG_MASK_CTRL_HEADER_LEN 11
+#define MSG_MASK_CTRL_HEADER_LEN 11
+#define EVENT_MASK_CTRL_HEADER_LEN 7
+
+#define LOG_STATUS_SUCCESS 0
+#define LOG_STATUS_INVALID 1
+#define LOG_STATUS_FAIL 2
+
+#define MSG_STATUS_FAIL 0
+#define MSG_STATUS_SUCCESS 1
+
+#define EVENT_STATUS_SUCCESS 0
+#define EVENT_STATUS_FAIL 1
+
+#define DIAG_CTRL_MASK_INVALID 0
+#define DIAG_CTRL_MASK_ALL_DISABLED 1
+#define DIAG_CTRL_MASK_ALL_ENABLED 2
+#define DIAG_CTRL_MASK_VALID 3
+
+extern struct diag_mask_info msg_mask;
+extern struct diag_mask_info msg_bt_mask;
+extern struct diag_mask_info log_mask;
+extern struct diag_mask_info event_mask;
+
+int diag_masks_init(void);
+void diag_masks_exit(void);
+int diag_log_mask_copy(struct diag_mask_info *dest,
+ struct diag_mask_info *src);
+int diag_msg_mask_copy(struct diag_mask_info *dest,
+ struct diag_mask_info *src);
+int diag_event_mask_copy(struct diag_mask_info *dest,
+ struct diag_mask_info *src);
+void diag_log_mask_free(struct diag_mask_info *mask_info);
+void diag_msg_mask_free(struct diag_mask_info *mask_info);
+void diag_event_mask_free(struct diag_mask_info *mask_info);
+int diag_process_apps_masks(unsigned char *buf, int len,
+ struct diag_md_session_t *info);
+void diag_send_updates_peripheral(uint8_t peripheral);
+
+extern int diag_create_msg_mask_table_entry(struct diag_msg_mask_t *msg_mask,
+ struct diag_ssid_range_t *range);
+extern int diag_copy_to_user_msg_mask(char __user *buf, size_t count,
+ struct diag_md_session_t *info);
+extern int diag_copy_to_user_log_mask(char __user *buf, size_t count,
+ struct diag_md_session_t *info);
+#endif
diff --git a/drivers/char/diag/diag_memorydevice.c b/drivers/char/diag/diag_memorydevice.c
new file mode 100644
index 0000000..558e362
--- /dev/null
+++ b/drivers/char/diag/diag_memorydevice.c
@@ -0,0 +1,379 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/sched.h>
+#include <linux/ratelimit.h>
+#include <linux/workqueue.h>
+#include <linux/diagchar.h>
+#include <linux/delay.h>
+#include <linux/kmemleak.h>
+#include <linux/uaccess.h>
+#include "diagchar.h"
+#include "diag_memorydevice.h"
+#include "diagfwd_bridge.h"
+#include "diag_mux.h"
+#include "diagmem.h"
+#include "diagfwd.h"
+#include "diagfwd_peripheral.h"
+
+struct diag_md_info diag_md[NUM_DIAG_MD_DEV] = {
+ {
+ .id = DIAG_MD_LOCAL,
+ .ctx = 0,
+ .mempool = POOL_TYPE_MUX_APPS,
+ .num_tbl_entries = 0,
+ .tbl = NULL,
+ .ops = NULL,
+ },
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+ {
+ .id = DIAG_MD_MDM,
+ .ctx = 0,
+ .mempool = POOL_TYPE_MDM_MUX,
+ .num_tbl_entries = 0,
+ .tbl = NULL,
+ .ops = NULL,
+ },
+ {
+ .id = DIAG_MD_MDM2,
+ .ctx = 0,
+ .mempool = POOL_TYPE_MDM2_MUX,
+ .num_tbl_entries = 0,
+ .tbl = NULL,
+ .ops = NULL,
+ },
+ {
+ .id = DIAG_MD_SMUX,
+ .ctx = 0,
+ .mempool = POOL_TYPE_QSC_MUX,
+ .num_tbl_entries = 0,
+ .tbl = NULL,
+ .ops = NULL,
+ }
+#endif
+};
+
+int diag_md_register(int id, int ctx, struct diag_mux_ops *ops)
+{
+ if (id < 0 || id >= NUM_DIAG_MD_DEV || !ops)
+ return -EINVAL;
+
+ diag_md[id].ops = ops;
+ diag_md[id].ctx = ctx;
+ return 0;
+}
+
+void diag_md_open_all(void)
+{
+ int i;
+ struct diag_md_info *ch = NULL;
+
+ for (i = 0; i < NUM_DIAG_MD_DEV; i++) {
+ ch = &diag_md[i];
+ if (ch->ops && ch->ops->open)
+ ch->ops->open(ch->ctx, DIAG_MEMORY_DEVICE_MODE);
+ }
+}
+
+void diag_md_close_all(void)
+{
+ int i, j;
+ unsigned long flags;
+ struct diag_md_info *ch = NULL;
+ struct diag_buf_tbl_t *entry = NULL;
+
+ for (i = 0; i < NUM_DIAG_MD_DEV; i++) {
+ ch = &diag_md[i];
+
+ if (ch->ops && ch->ops->close)
+ ch->ops->close(ch->ctx, DIAG_MEMORY_DEVICE_MODE);
+
+ /*
+ * When we close the Memory device mode, make sure we flush the
+ * internal buffers in the table so that there are no stale
+ * entries.
+ */
+ spin_lock_irqsave(&ch->lock, flags);
+ for (j = 0; j < ch->num_tbl_entries; j++) {
+ entry = &ch->tbl[j];
+ if (entry->len <= 0)
+ continue;
+ if (ch->ops && ch->ops->write_done)
+ ch->ops->write_done(entry->buf, entry->len,
+ entry->ctx,
+ DIAG_MEMORY_DEVICE_MODE);
+ entry->buf = NULL;
+ entry->len = 0;
+ entry->ctx = 0;
+ }
+ spin_unlock_irqrestore(&ch->lock, flags);
+ }
+
+ diag_ws_reset(DIAG_WS_MUX);
+}
+
+int diag_md_write(int id, unsigned char *buf, int len, int ctx)
+{
+ int i;
+ uint8_t found = 0;
+ unsigned long flags;
+ struct diag_md_info *ch = NULL;
+ uint8_t peripheral;
+ struct diag_md_session_t *session_info = NULL;
+
+ if (id < 0 || id >= NUM_DIAG_MD_DEV || id >= DIAG_NUM_PROC)
+ return -EINVAL;
+
+ if (!buf || len < 0)
+ return -EINVAL;
+
+ peripheral = GET_BUF_PERIPHERAL(ctx);
+ if (peripheral > NUM_PERIPHERALS)
+ return -EINVAL;
+
+ session_info = diag_md_session_get_peripheral(peripheral);
+ if (!session_info)
+ return -EIO;
+
+ ch = &diag_md[id];
+
+ spin_lock_irqsave(&ch->lock, flags);
+ for (i = 0; i < ch->num_tbl_entries && !found; i++) {
+ if (ch->tbl[i].buf != buf)
+ continue;
+ found = 1;
+ pr_err_ratelimited("diag: trying to write the same buffer buf: %pK, ctxt: %d len: %d at i: %d back to the table, proc: %d, mode: %d\n",
+ buf, ctx, ch->tbl[i].len,
+ i, id, driver->logging_mode);
+ }
+ spin_unlock_irqrestore(&ch->lock, flags);
+
+ if (found)
+ return -ENOMEM;
+
+ spin_lock_irqsave(&ch->lock, flags);
+ for (i = 0; i < ch->num_tbl_entries && !found; i++) {
+ if (ch->tbl[i].len == 0) {
+ ch->tbl[i].buf = buf;
+ ch->tbl[i].len = len;
+ ch->tbl[i].ctx = ctx;
+ found = 1;
+ diag_ws_on_read(DIAG_WS_MUX, len);
+ }
+ }
+ spin_unlock_irqrestore(&ch->lock, flags);
+
+ if (!found) {
+ pr_err_ratelimited("diag: Unable to find an empty space in table, please reduce logging rate, proc: %d\n",
+ id);
+ return -ENOMEM;
+ }
+
+ found = 0;
+ for (i = 0; i < driver->num_clients && !found; i++) {
+ if ((driver->client_map[i].pid !=
+ session_info->pid) ||
+ (driver->client_map[i].pid == 0))
+ continue;
+
+ found = 1;
+ driver->data_ready[i] |= USER_SPACE_DATA_TYPE;
+ pr_debug("diag: wake up logging process\n");
+ wake_up_interruptible(&driver->wait_q);
+ }
+
+ if (!found)
+ return -EINVAL;
+
+ return 0;
+}
+
+int diag_md_copy_to_user(char __user *buf, int *pret, size_t buf_size,
+ struct diag_md_session_t *info)
+{
+ int i, j;
+ int err = 0;
+ int ret = *pret;
+ int num_data = 0;
+ int remote_token;
+ unsigned long flags;
+ struct diag_md_info *ch = NULL;
+ struct diag_buf_tbl_t *entry = NULL;
+ uint8_t drain_again = 0;
+ uint8_t peripheral = 0;
+ struct diag_md_session_t *session_info = NULL;
+
+ for (i = 0; i < NUM_DIAG_MD_DEV && !err; i++) {
+ ch = &diag_md[i];
+ for (j = 0; j < ch->num_tbl_entries && !err; j++) {
+ entry = &ch->tbl[j];
+ if (entry->len <= 0)
+ continue;
+ peripheral = GET_BUF_PERIPHERAL(entry->ctx);
+ /* Account for Apps data as well */
+ if (peripheral > NUM_PERIPHERALS)
+ goto drop_data;
+ session_info =
+ diag_md_session_get_peripheral(peripheral);
+ if (session_info && info &&
+ (session_info->pid != info->pid))
+ continue;
+ if ((info && (info->peripheral_mask &
+ MD_PERIPHERAL_MASK(peripheral)) == 0))
+ goto drop_data;
+ /*
+ * If the data is from remote processor, copy the remote
+ * token first
+ */
+ if (i > 0) {
+ if ((ret + (3 * sizeof(int)) + entry->len) >=
+ buf_size) {
+ drain_again = 1;
+ break;
+ }
+ } else {
+ if ((ret + (2 * sizeof(int)) + entry->len) >=
+ buf_size) {
+ drain_again = 1;
+ break;
+ }
+ }
+ if (i > 0) {
+ remote_token = diag_get_remote(i);
+ err = copy_to_user(buf + ret, &remote_token,
+ sizeof(int));
+ if (err)
+ goto drop_data;
+ ret += sizeof(int);
+ }
+
+ /* Copy the length of data being passed */
+ err = copy_to_user(buf + ret, (void *)&(entry->len),
+ sizeof(int));
+ if (err)
+ goto drop_data;
+ ret += sizeof(int);
+
+ /* Copy the actual data being passed */
+ err = copy_to_user(buf + ret, (void *)entry->buf,
+ entry->len);
+ if (err)
+ goto drop_data;
+ ret += entry->len;
+
+ /*
+ * The data is now copied to the user space client,
+ * Notify that the write is complete and delete its
+ * entry from the table
+ */
+ num_data++;
+drop_data:
+ spin_lock_irqsave(&ch->lock, flags);
+ if (ch->ops && ch->ops->write_done)
+ ch->ops->write_done(entry->buf, entry->len,
+ entry->ctx,
+ DIAG_MEMORY_DEVICE_MODE);
+ diag_ws_on_copy(DIAG_WS_MUX);
+ entry->buf = NULL;
+ entry->len = 0;
+ entry->ctx = 0;
+ spin_unlock_irqrestore(&ch->lock, flags);
+ }
+ }
+
+ *pret = ret;
+ err = copy_to_user(buf + sizeof(int), (void *)&num_data, sizeof(int));
+ diag_ws_on_copy_complete(DIAG_WS_MUX);
+ if (drain_again)
+ chk_logging_wakeup();
+
+ return err;
+}
+
+int diag_md_close_peripheral(int id, uint8_t peripheral)
+{
+ int i;
+ uint8_t found = 0;
+ unsigned long flags;
+ struct diag_md_info *ch = NULL;
+ struct diag_buf_tbl_t *entry = NULL;
+
+ if (id < 0 || id >= NUM_DIAG_MD_DEV || id >= DIAG_NUM_PROC)
+ return -EINVAL;
+
+ ch = &diag_md[id];
+
+ spin_lock_irqsave(&ch->lock, flags);
+ for (i = 0; i < ch->num_tbl_entries && !found; i++) {
+ entry = &ch->tbl[i];
+ if (GET_BUF_PERIPHERAL(entry->ctx) != peripheral)
+ continue;
+ found = 1;
+ if (ch->ops && ch->ops->write_done) {
+ ch->ops->write_done(entry->buf, entry->len,
+ entry->ctx,
+ DIAG_MEMORY_DEVICE_MODE);
+ entry->buf = NULL;
+ entry->len = 0;
+ entry->ctx = 0;
+ }
+ }
+ spin_unlock_irqrestore(&ch->lock, flags);
+ return 0;
+}
+
+int diag_md_init(void)
+{
+ int i, j;
+ struct diag_md_info *ch = NULL;
+
+ for (i = 0; i < NUM_DIAG_MD_DEV; i++) {
+ ch = &diag_md[i];
+ ch->num_tbl_entries = diag_mempools[ch->mempool].poolsize;
+ ch->tbl = kzalloc(ch->num_tbl_entries *
+ sizeof(struct diag_buf_tbl_t),
+ GFP_KERNEL);
+ if (!ch->tbl)
+ goto fail;
+
+ for (j = 0; j < ch->num_tbl_entries; j++) {
+ ch->tbl[j].buf = NULL;
+ ch->tbl[j].len = 0;
+ ch->tbl[j].ctx = 0;
+ spin_lock_init(&(ch->lock));
+ }
+ }
+
+ return 0;
+
+fail:
+ diag_md_exit();
+ return -ENOMEM;
+}
+
+void diag_md_exit(void)
+{
+ int i;
+ struct diag_md_info *ch = NULL;
+
+ for (i = 0; i < NUM_DIAG_MD_DEV; i++) {
+ ch = &diag_md[i];
+ kfree(ch->tbl);
+ ch->num_tbl_entries = 0;
+ ch->ops = NULL;
+ }
+}
diff --git a/drivers/char/diag/diag_memorydevice.h b/drivers/char/diag/diag_memorydevice.h
new file mode 100644
index 0000000..35a1ee3
--- /dev/null
+++ b/drivers/char/diag/diag_memorydevice.h
@@ -0,0 +1,57 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAG_MEMORYDEVICE_H
+#define DIAG_MEMORYDEVICE_H
+
+#define DIAG_MD_LOCAL 0
+#define DIAG_MD_LOCAL_LAST 1
+#define DIAG_MD_BRIDGE_BASE DIAG_MD_LOCAL_LAST
+#define DIAG_MD_MDM (DIAG_MD_BRIDGE_BASE)
+#define DIAG_MD_MDM2 (DIAG_MD_BRIDGE_BASE + 1)
+#define DIAG_MD_SMUX (DIAG_MD_BRIDGE_BASE + 2)
+#define DIAG_MD_BRIDGE_LAST (DIAG_MD_BRIDGE_BASE + 3)
+
+#ifndef CONFIG_DIAGFWD_BRIDGE_CODE
+#define NUM_DIAG_MD_DEV DIAG_MD_LOCAL_LAST
+#else
+#define NUM_DIAG_MD_DEV DIAG_MD_BRIDGE_LAST
+#endif
+
+struct diag_buf_tbl_t {
+ unsigned char *buf;
+ int len;
+ int ctx;
+};
+
+struct diag_md_info {
+ int id;
+ int ctx;
+ int mempool;
+ int num_tbl_entries;
+ spinlock_t lock;
+ struct diag_buf_tbl_t *tbl;
+ struct diag_mux_ops *ops;
+};
+
+extern struct diag_md_info diag_md[NUM_DIAG_MD_DEV];
+
+int diag_md_init(void);
+void diag_md_exit(void);
+void diag_md_open_all(void);
+void diag_md_close_all(void);
+int diag_md_register(int id, int ctx, struct diag_mux_ops *ops);
+int diag_md_close_peripheral(int id, uint8_t peripheral);
+int diag_md_write(int id, unsigned char *buf, int len, int ctx);
+int diag_md_copy_to_user(char __user *buf, int *pret, size_t buf_size,
+ struct diag_md_session_t *info);
+#endif
diff --git a/drivers/char/diag/diag_mux.c b/drivers/char/diag/diag_mux.c
new file mode 100644
index 0000000..8f5a002
--- /dev/null
+++ b/drivers/char/diag/diag_mux.c
@@ -0,0 +1,243 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/uaccess.h>
+#include <linux/diagchar.h>
+#include <linux/sched.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/workqueue.h>
+#include <linux/pm_runtime.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/ratelimit.h>
+#include "diagchar.h"
+#include "diagfwd.h"
+#include "diag_mux.h"
+#include "diag_usb.h"
+#include "diag_memorydevice.h"
+
+
+struct diag_mux_state_t *diag_mux;
+static struct diag_logger_t usb_logger;
+static struct diag_logger_t md_logger;
+
+static struct diag_logger_ops usb_log_ops = {
+ .open = diag_usb_connect_all,
+ .close = diag_usb_disconnect_all,
+ .queue_read = diag_usb_queue_read,
+ .write = diag_usb_write,
+ .close_peripheral = NULL
+};
+
+static struct diag_logger_ops md_log_ops = {
+ .open = diag_md_open_all,
+ .close = diag_md_close_all,
+ .queue_read = NULL,
+ .write = diag_md_write,
+ .close_peripheral = diag_md_close_peripheral,
+};
+
+int diag_mux_init(void)
+{
+ diag_mux = kzalloc(sizeof(struct diag_mux_state_t),
+ GFP_KERNEL);
+ if (!diag_mux)
+ return -ENOMEM;
+ kmemleak_not_leak(diag_mux);
+
+ usb_logger.mode = DIAG_USB_MODE;
+ usb_logger.log_ops = &usb_log_ops;
+
+ md_logger.mode = DIAG_MEMORY_DEVICE_MODE;
+ md_logger.log_ops = &md_log_ops;
+ diag_md_init();
+
+ /*
+ * Set USB logging as the default logger. This is the mode
+ * Diag should be in when it initializes.
+ */
+ diag_mux->usb_ptr = &usb_logger;
+ diag_mux->md_ptr = &md_logger;
+ diag_mux->logger = &usb_logger;
+ diag_mux->mux_mask = 0;
+ diag_mux->mode = DIAG_USB_MODE;
+ return 0;
+}
+
+void diag_mux_exit(void)
+{
+ kfree(diag_mux);
+}
+
+int diag_mux_register(int proc, int ctx, struct diag_mux_ops *ops)
+{
+ int err = 0;
+
+ if (!ops)
+ return -EINVAL;
+
+ if (proc < 0 || proc >= NUM_MUX_PROC)
+ return 0;
+
+ /* Register with USB logger */
+ usb_logger.ops[proc] = ops;
+ err = diag_usb_register(proc, ctx, ops);
+ if (err) {
+ pr_err("diag: MUX: unable to register usb operations for proc: %d, err: %d\n",
+ proc, err);
+ return err;
+ }
+
+ md_logger.ops[proc] = ops;
+ err = diag_md_register(proc, ctx, ops);
+ if (err) {
+ pr_err("diag: MUX: unable to register md operations for proc: %d, err: %d\n",
+ proc, err);
+ return err;
+ }
+
+ return 0;
+}
+
+int diag_mux_queue_read(int proc)
+{
+ struct diag_logger_t *logger = NULL;
+
+ if (proc < 0 || proc >= NUM_MUX_PROC)
+ return -EINVAL;
+ if (!diag_mux)
+ return -EIO;
+
+ if (diag_mux->mode == DIAG_MULTI_MODE)
+ logger = diag_mux->usb_ptr;
+ else
+ logger = diag_mux->logger;
+
+ if (logger && logger->log_ops && logger->log_ops->queue_read)
+ return logger->log_ops->queue_read(proc);
+
+ return 0;
+}
+
+int diag_mux_write(int proc, unsigned char *buf, int len, int ctx)
+{
+ struct diag_logger_t *logger = NULL;
+ int peripheral;
+
+ if (proc < 0 || proc >= NUM_MUX_PROC)
+ return -EINVAL;
+ if (!diag_mux)
+ return -EIO;
+
+ peripheral = GET_BUF_PERIPHERAL(ctx);
+ if (peripheral > NUM_PERIPHERALS)
+ return -EINVAL;
+
+ if (MD_PERIPHERAL_MASK(peripheral) & diag_mux->mux_mask)
+ logger = diag_mux->md_ptr;
+ else
+ logger = diag_mux->usb_ptr;
+
+ if (logger && logger->log_ops && logger->log_ops->write)
+ return logger->log_ops->write(proc, buf, len, ctx);
+ return 0;
+}
+
+int diag_mux_close_peripheral(int proc, uint8_t peripheral)
+{
+ struct diag_logger_t *logger = NULL;
+
+ if (proc < 0 || proc >= NUM_MUX_PROC)
+ return -EINVAL;
+ /* Peripheral should account for Apps data as well */
+ if (peripheral > NUM_PERIPHERALS)
+ return -EINVAL;
+ if (!diag_mux)
+ return -EIO;
+
+ if (MD_PERIPHERAL_MASK(peripheral) & diag_mux->mux_mask)
+ logger = diag_mux->md_ptr;
+ else
+ logger = diag_mux->logger;
+
+ if (logger && logger->log_ops && logger->log_ops->close_peripheral)
+ return logger->log_ops->close_peripheral(proc, peripheral);
+ return 0;
+}
+
+int diag_mux_switch_logging(int *req_mode, int *peripheral_mask)
+{
+ unsigned int new_mask = 0;
+
+ if (!req_mode)
+ return -EINVAL;
+
+ if (*peripheral_mask <= 0 || *peripheral_mask > DIAG_CON_ALL) {
+ pr_err("diag: mask %d in %s\n", *peripheral_mask, __func__);
+ return -EINVAL;
+ }
+
+ switch (*req_mode) {
+ case DIAG_USB_MODE:
+ new_mask = ~(*peripheral_mask) & diag_mux->mux_mask;
+ if (new_mask != DIAG_CON_NONE)
+ *req_mode = DIAG_MULTI_MODE;
+ break;
+ case DIAG_MEMORY_DEVICE_MODE:
+ new_mask = (*peripheral_mask) | diag_mux->mux_mask;
+ if (new_mask != DIAG_CON_ALL)
+ *req_mode = DIAG_MULTI_MODE;
+ break;
+ default:
+ pr_err("diag: Invalid mode %d in %s\n", *req_mode, __func__);
+ return -EINVAL;
+ }
+
+ switch (diag_mux->mode) {
+ case DIAG_USB_MODE:
+ if (*req_mode == DIAG_MEMORY_DEVICE_MODE) {
+ diag_mux->usb_ptr->log_ops->close();
+ diag_mux->logger = diag_mux->md_ptr;
+ diag_mux->md_ptr->log_ops->open();
+ } else if (*req_mode == DIAG_MULTI_MODE) {
+ diag_mux->md_ptr->log_ops->open();
+ diag_mux->logger = NULL;
+ }
+ break;
+ case DIAG_MEMORY_DEVICE_MODE:
+ if (*req_mode == DIAG_USB_MODE) {
+ diag_mux->md_ptr->log_ops->close();
+ diag_mux->logger = diag_mux->usb_ptr;
+ diag_mux->usb_ptr->log_ops->open();
+ } else if (*req_mode == DIAG_MULTI_MODE) {
+ diag_mux->usb_ptr->log_ops->open();
+ diag_mux->logger = NULL;
+ }
+ break;
+ case DIAG_MULTI_MODE:
+ if (*req_mode == DIAG_USB_MODE) {
+ diag_mux->md_ptr->log_ops->close();
+ diag_mux->logger = diag_mux->usb_ptr;
+ } else if (*req_mode == DIAG_MEMORY_DEVICE_MODE) {
+ diag_mux->usb_ptr->log_ops->close();
+ diag_mux->logger = diag_mux->md_ptr;
+ }
+ break;
+ }
+ diag_mux->mode = *req_mode;
+ diag_mux->mux_mask = new_mask;
+ *peripheral_mask = new_mask;
+ return 0;
+}
diff --git a/drivers/char/diag/diag_mux.h b/drivers/char/diag/diag_mux.h
new file mode 100644
index 0000000..e1fcebb
--- /dev/null
+++ b/drivers/char/diag/diag_mux.h
@@ -0,0 +1,76 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef DIAG_MUX_H
+#define DIAG_MUX_H
+#include "diagchar.h"
+
+struct diag_mux_state_t {
+ struct diag_logger_t *logger;
+ struct diag_logger_t *usb_ptr;
+ struct diag_logger_t *md_ptr;
+ unsigned int mux_mask;
+ unsigned int mode;
+};
+
+struct diag_mux_ops {
+ int (*open)(int id, int mode);
+ int (*close)(int id, int mode);
+ int (*read_done)(unsigned char *buf, int len, int id);
+ int (*write_done)(unsigned char *buf, int len, int buf_ctx,
+ int id);
+};
+
+#define DIAG_USB_MODE 0
+#define DIAG_MEMORY_DEVICE_MODE 1
+#define DIAG_NO_LOGGING_MODE 2
+#define DIAG_MULTI_MODE 3
+
+#define DIAG_MUX_LOCAL 0
+#define DIAG_MUX_LOCAL_LAST 1
+#define DIAG_MUX_BRIDGE_BASE DIAG_MUX_LOCAL_LAST
+#define DIAG_MUX_MDM (DIAG_MUX_BRIDGE_BASE)
+#define DIAG_MUX_MDM2 (DIAG_MUX_BRIDGE_BASE + 1)
+#define DIAG_MUX_SMUX (DIAG_MUX_BRIDGE_BASE + 2)
+#define DIAG_MUX_BRIDGE_LAST (DIAG_MUX_BRIDGE_BASE + 3)
+
+#ifndef CONFIG_DIAGFWD_BRIDGE_CODE
+#define NUM_MUX_PROC DIAG_MUX_LOCAL_LAST
+#else
+#define NUM_MUX_PROC DIAG_MUX_BRIDGE_LAST
+#endif
+
+struct diag_logger_ops {
+ void (*open)(void);
+ void (*close)(void);
+ int (*queue_read)(int id);
+ int (*write)(int id, unsigned char *buf, int len, int ctx);
+ int (*close_peripheral)(int id, uint8_t peripheral);
+};
+
+struct diag_logger_t {
+ int mode;
+ struct diag_mux_ops *ops[NUM_MUX_PROC];
+ struct diag_logger_ops *log_ops;
+};
+
+extern struct diag_mux_state_t *diag_mux;
+
+int diag_mux_init(void);
+void diag_mux_exit(void);
+int diag_mux_register(int proc, int ctx, struct diag_mux_ops *ops);
+int diag_mux_queue_read(int proc);
+int diag_mux_write(int proc, unsigned char *buf, int len, int ctx);
+int diag_mux_close_peripheral(int proc, uint8_t peripheral);
+int diag_mux_open_all(struct diag_logger_t *logger);
+int diag_mux_close_all(void);
+int diag_mux_switch_logging(int *new_mode, int *peripheral_mask);
+#endif
diff --git a/drivers/char/diag/diag_usb.c b/drivers/char/diag/diag_usb.c
new file mode 100644
index 0000000..ac8a6d0
--- /dev/null
+++ b/drivers/char/diag/diag_usb.c
@@ -0,0 +1,684 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/sched.h>
+#include <linux/ratelimit.h>
+#include <linux/workqueue.h>
+#include <linux/diagchar.h>
+#include <linux/delay.h>
+#include <linux/kmemleak.h>
+#include <linux/list.h>
+#ifdef CONFIG_DIAG_OVER_USB
+#include <linux/usb/usbdiag.h>
+#endif
+#include "diag_usb.h"
+#include "diag_mux.h"
+#include "diagmem.h"
+#include "diag_ipc_logging.h"
+
+#define DIAG_USB_STRING_SZ 10
+#define DIAG_USB_MAX_SIZE 16384
+
+struct diag_usb_info diag_usb[NUM_DIAG_USB_DEV] = {
+ {
+ .id = DIAG_USB_LOCAL,
+ .name = DIAG_LEGACY,
+ .enabled = 0,
+ .mempool = POOL_TYPE_MUX_APPS,
+ .hdl = NULL,
+ .ops = NULL,
+ .read_buf = NULL,
+ .read_ptr = NULL,
+ .usb_wq = NULL,
+ .read_cnt = 0,
+ .write_cnt = 0,
+ .max_size = DIAG_USB_MAX_SIZE,
+ },
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+ {
+ .id = DIAG_USB_MDM,
+ .name = DIAG_MDM,
+ .enabled = 0,
+ .mempool = POOL_TYPE_MDM_MUX,
+ .hdl = NULL,
+ .ops = NULL,
+ .read_buf = NULL,
+ .read_ptr = NULL,
+ .usb_wq = NULL,
+ .read_cnt = 0,
+ .write_cnt = 0,
+ .max_size = DIAG_USB_MAX_SIZE,
+ },
+ {
+ .id = DIAG_USB_MDM2,
+ .name = DIAG_MDM2,
+ .enabled = 0,
+ .mempool = POOL_TYPE_MDM2_MUX,
+ .hdl = NULL,
+ .ops = NULL,
+ .read_buf = NULL,
+ .read_ptr = NULL,
+ .usb_wq = NULL,
+ .read_cnt = 0,
+ .write_cnt = 0,
+ .max_size = DIAG_USB_MAX_SIZE,
+ },
+ {
+ .id = DIAG_USB_QSC,
+ .name = DIAG_QSC,
+ .enabled = 0,
+ .mempool = POOL_TYPE_QSC_MUX,
+ .hdl = NULL,
+ .ops = NULL,
+ .read_buf = NULL,
+ .read_ptr = NULL,
+ .usb_wq = NULL,
+ .read_cnt = 0,
+ .write_cnt = 0,
+ .max_size = DIAG_USB_MAX_SIZE,
+ }
+#endif
+};
+
+static int diag_usb_buf_tbl_add(struct diag_usb_info *usb_info,
+ unsigned char *buf, uint32_t len, int ctxt)
+{
+ struct list_head *start, *temp;
+ struct diag_usb_buf_tbl_t *entry = NULL;
+
+ list_for_each_safe(start, temp, &usb_info->buf_tbl) {
+ entry = list_entry(start, struct diag_usb_buf_tbl_t, track);
+ if (entry->buf == buf) {
+ atomic_inc(&entry->ref_count);
+ return 0;
+ }
+ }
+
+ /* New buffer, not found in the list */
+ entry = kzalloc(sizeof(struct diag_usb_buf_tbl_t), GFP_ATOMIC);
+ if (!entry)
+ return -ENOMEM;
+
+ entry->buf = buf;
+ entry->ctxt = ctxt;
+ entry->len = len;
+ atomic_set(&entry->ref_count, 1);
+ INIT_LIST_HEAD(&entry->track);
+ list_add_tail(&entry->track, &usb_info->buf_tbl);
+
+ return 0;
+}
+
+static void diag_usb_buf_tbl_remove(struct diag_usb_info *usb_info,
+ unsigned char *buf)
+{
+ struct list_head *start, *temp;
+ struct diag_usb_buf_tbl_t *entry = NULL;
+
+ list_for_each_safe(start, temp, &usb_info->buf_tbl) {
+ entry = list_entry(start, struct diag_usb_buf_tbl_t, track);
+ if (entry->buf == buf) {
+ DIAG_LOG(DIAG_DEBUG_MUX, "ref_count-- for %pK\n", buf);
+ atomic_dec(&entry->ref_count);
+ /*
+ * Remove reference from the table if it is the
+ * only instance of the buffer
+ */
+ if (atomic_read(&entry->ref_count) == 0)
+ list_del(&entry->track);
+ break;
+ }
+ }
+}
+
+static struct diag_usb_buf_tbl_t *diag_usb_buf_tbl_get(
+ struct diag_usb_info *usb_info,
+ unsigned char *buf)
+{
+ struct list_head *start, *temp;
+ struct diag_usb_buf_tbl_t *entry = NULL;
+
+ list_for_each_safe(start, temp, &usb_info->buf_tbl) {
+ entry = list_entry(start, struct diag_usb_buf_tbl_t, track);
+ if (entry->buf == buf) {
+ DIAG_LOG(DIAG_DEBUG_MUX, "ref_count-- for %pK\n", buf);
+ atomic_dec(&entry->ref_count);
+ return entry;
+ }
+ }
+
+ return NULL;
+}
+
+/*
+ * This function is called asynchronously when USB is connected and
+ * synchronously when Diag wants to connect to USB explicitly.
+ */
+static void usb_connect(struct diag_usb_info *ch)
+{
+ int err = 0;
+ int num_write = 0;
+ int num_read = 1; /* Only one read buffer for any USB channel */
+
+ if (!ch || !atomic_read(&ch->connected))
+ return;
+
+ num_write = diag_mempools[ch->mempool].poolsize;
+ err = usb_diag_alloc_req(ch->hdl, num_write, num_read);
+ if (err) {
+ pr_err("diag: Unable to allocate usb requests for %s, write: %d read: %d, err: %d\n",
+ ch->name, num_write, num_read, err);
+ return;
+ }
+
+ if (ch->ops && ch->ops->open) {
+ if (atomic_read(&ch->diag_state)) {
+ ch->ops->open(ch->ctxt, DIAG_USB_MODE);
+ } else {
+ /*
+ * This case indicates that the USB is connected
+ * but the logging is still happening in MEMORY
+ * DEVICE MODE. Continue the logging without
+ * resetting the buffers.
+ */
+ }
+ }
+ /* As soon as we open the channel, queue a read */
+ queue_work(ch->usb_wq, &(ch->read_work));
+}
+
+static void usb_connect_work_fn(struct work_struct *work)
+{
+ struct diag_usb_info *ch = container_of(work, struct diag_usb_info,
+ connect_work);
+ usb_connect(ch);
+}
+
+/*
+ * This function is called asynchronously when USB is disconnected
+ * and synchronously when Diag wants to disconnect from USB
+ * explicitly.
+ */
+static void usb_disconnect(struct diag_usb_info *ch)
+{
+ if (!ch)
+ return;
+
+ if (!atomic_read(&ch->connected) && driver->usb_connected)
+ diag_clear_masks(NULL);
+
+ if (ch && ch->ops && ch->ops->close)
+ ch->ops->close(ch->ctxt, DIAG_USB_MODE);
+}
+
+static void usb_disconnect_work_fn(struct work_struct *work)
+{
+ struct diag_usb_info *ch = container_of(work, struct diag_usb_info,
+ disconnect_work);
+ usb_disconnect(ch);
+}
+
+static void usb_read_work_fn(struct work_struct *work)
+{
+ int err = 0;
+ unsigned long flags;
+ struct diag_request *req = NULL;
+ struct diag_usb_info *ch = container_of(work, struct diag_usb_info,
+ read_work);
+ if (!ch)
+ return;
+
+ if (!atomic_read(&ch->connected) || !ch->enabled ||
+ atomic_read(&ch->read_pending) || !atomic_read(&ch->diag_state)) {
+ pr_debug_ratelimited("diag: Discarding USB read, ch: %s e: %d, c: %d, p: %d, d: %d\n",
+ ch->name, ch->enabled,
+ atomic_read(&ch->connected),
+ atomic_read(&ch->read_pending),
+ atomic_read(&ch->diag_state));
+ return;
+ }
+
+ spin_lock_irqsave(&ch->lock, flags);
+ req = ch->read_ptr;
+ if (req) {
+ atomic_set(&ch->read_pending, 1);
+ req->buf = ch->read_buf;
+ req->length = USB_MAX_OUT_BUF;
+ err = usb_diag_read(ch->hdl, req);
+ if (err) {
+ pr_debug("diag: In %s, error in reading from USB %s, err: %d\n",
+ __func__, ch->name, err);
+ atomic_set(&ch->read_pending, 0);
+ queue_work(ch->usb_wq, &(ch->read_work));
+ }
+ } else {
+ pr_err_ratelimited("diag: In %s invalid read req\n", __func__);
+ }
+ spin_unlock_irqrestore(&ch->lock, flags);
+}
+
+static void usb_read_done_work_fn(struct work_struct *work)
+{
+ struct diag_request *req = NULL;
+ struct diag_usb_info *ch = container_of(work, struct diag_usb_info,
+ read_done_work);
+ if (!ch)
+ return;
+
+ /*
+ * USB is disconnected/Disabled before the previous read completed.
+ * Discard the packet and don't do any further processing.
+ */
+ if (!atomic_read(&ch->connected) || !ch->enabled ||
+ !atomic_read(&ch->diag_state))
+ return;
+
+ req = ch->read_ptr;
+ ch->read_cnt++;
+
+ if (ch->ops && ch->ops->read_done && req->status >= 0)
+ ch->ops->read_done(req->buf, req->actual, ch->ctxt);
+}
+
+static void diag_usb_write_done(struct diag_usb_info *ch,
+ struct diag_request *req)
+{
+ int ctxt = 0;
+ int len = 0;
+ struct diag_usb_buf_tbl_t *entry = NULL;
+ unsigned char *buf = NULL;
+ unsigned long flags;
+
+ if (!ch || !req)
+ return;
+
+ ch->write_cnt++;
+ entry = diag_usb_buf_tbl_get(ch, req->context);
+ if (!entry) {
+ pr_err_ratelimited("diag: In %s, unable to find entry %pK in the table\n",
+ __func__, req->context);
+ return;
+ }
+ if (atomic_read(&entry->ref_count) != 0) {
+ DIAG_LOG(DIAG_DEBUG_MUX, "partial write_done ref %d\n",
+ atomic_read(&entry->ref_count));
+ diag_ws_on_copy_complete(DIAG_WS_MUX);
+ diagmem_free(driver, req, ch->mempool);
+ return;
+ }
+ DIAG_LOG(DIAG_DEBUG_MUX, "full write_done, ctxt: %d\n",
+ ctxt);
+ spin_lock_irqsave(&ch->write_lock, flags);
+ list_del(&entry->track);
+ ctxt = entry->ctxt;
+ buf = entry->buf;
+ len = entry->len;
+ kfree(entry);
+ diag_ws_on_copy_complete(DIAG_WS_MUX);
+
+ if (ch->ops && ch->ops->write_done)
+ ch->ops->write_done(buf, len, ctxt, DIAG_USB_MODE);
+ buf = NULL;
+ len = 0;
+ ctxt = 0;
+ spin_unlock_irqrestore(&ch->write_lock, flags);
+ diagmem_free(driver, req, ch->mempool);
+}
+
+static void diag_usb_notifier(void *priv, unsigned int event,
+ struct diag_request *d_req)
+{
+ int id = 0;
+ unsigned long flags;
+ struct diag_usb_info *usb_info = NULL;
+
+ id = (int)(uintptr_t)priv;
+ if (id < 0 || id >= NUM_DIAG_USB_DEV)
+ return;
+ usb_info = &diag_usb[id];
+
+ switch (event) {
+ case USB_DIAG_CONNECT:
+ usb_info->max_size = usb_diag_request_size(usb_info->hdl);
+ atomic_set(&usb_info->connected, 1);
+ pr_info("diag: USB channel %s connected\n", usb_info->name);
+ queue_work(usb_info->usb_wq,
+ &usb_info->connect_work);
+ break;
+ case USB_DIAG_DISCONNECT:
+ atomic_set(&usb_info->connected, 0);
+ pr_info("diag: USB channel %s disconnected\n", usb_info->name);
+ queue_work(usb_info->usb_wq,
+ &usb_info->disconnect_work);
+ break;
+ case USB_DIAG_READ_DONE:
+ spin_lock_irqsave(&usb_info->lock, flags);
+ usb_info->read_ptr = d_req;
+ spin_unlock_irqrestore(&usb_info->lock, flags);
+ atomic_set(&usb_info->read_pending, 0);
+ queue_work(usb_info->usb_wq,
+ &usb_info->read_done_work);
+ break;
+ case USB_DIAG_WRITE_DONE:
+ diag_usb_write_done(usb_info, d_req);
+ break;
+ default:
+ pr_err_ratelimited("diag: Unknown event from USB diag\n");
+ break;
+ }
+}
+
+int diag_usb_queue_read(int id)
+{
+ if (id < 0 || id >= NUM_DIAG_USB_DEV) {
+ pr_err_ratelimited("diag: In %s, Incorrect id %d\n",
+ __func__, id);
+ return -EINVAL;
+ }
+ queue_work(diag_usb[id].usb_wq, &(diag_usb[id].read_work));
+ return 0;
+}
+
+static int diag_usb_write_ext(struct diag_usb_info *usb_info,
+ unsigned char *buf, int len, int ctxt)
+{
+ int err = 0;
+ int write_len = 0;
+ int bytes_remaining = len;
+ int offset = 0;
+ unsigned long flags;
+ struct diag_request *req = NULL;
+
+ if (!usb_info || !buf || len <= 0) {
+ pr_err_ratelimited("diag: In %s, usb_info: %pK buf: %pK, len: %d\n",
+ __func__, usb_info, buf, len);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&usb_info->write_lock, flags);
+ while (bytes_remaining > 0) {
+ req = diagmem_alloc(driver, sizeof(struct diag_request),
+ usb_info->mempool);
+ if (!req) {
+ /*
+ * This should never happen. It either means that we are
+ * trying to write more buffers than the max supported
+ * by this particualar diag USB channel at any given
+ * instance, or the previous write ptrs are stuck in
+ * the USB layer.
+ */
+ pr_err_ratelimited("diag: In %s, cannot retrieve USB write ptrs for USB channel %s\n",
+ __func__, usb_info->name);
+ spin_unlock_irqrestore(&usb_info->write_lock, flags);
+ return -ENOMEM;
+ }
+
+ write_len = (bytes_remaining > usb_info->max_size) ?
+ usb_info->max_size : (bytes_remaining);
+
+ req->buf = buf + offset;
+ req->length = write_len;
+ req->context = (void *)buf;
+
+ if (!usb_info->hdl || !atomic_read(&usb_info->connected) ||
+ !atomic_read(&usb_info->diag_state)) {
+ pr_debug_ratelimited("diag: USB ch %s is not connected\n",
+ usb_info->name);
+ diagmem_free(driver, req, usb_info->mempool);
+ spin_unlock_irqrestore(&usb_info->write_lock, flags);
+ return -ENODEV;
+ }
+
+ if (diag_usb_buf_tbl_add(usb_info, buf, len, ctxt)) {
+ diagmem_free(driver, req, usb_info->mempool);
+ spin_unlock_irqrestore(&usb_info->write_lock, flags);
+ return -ENOMEM;
+ }
+
+ diag_ws_on_read(DIAG_WS_MUX, len);
+ err = usb_diag_write(usb_info->hdl, req);
+ diag_ws_on_copy(DIAG_WS_MUX);
+ if (err) {
+ pr_err_ratelimited("diag: In %s, error writing to usb channel %s, err: %d\n",
+ __func__, usb_info->name, err);
+ DIAG_LOG(DIAG_DEBUG_MUX,
+ "ERR! unable to write t usb, err: %d\n", err);
+ diag_ws_on_copy_fail(DIAG_WS_MUX);
+ diag_usb_buf_tbl_remove(usb_info, buf);
+ diagmem_free(driver, req, usb_info->mempool);
+ spin_unlock_irqrestore(&usb_info->write_lock, flags);
+ return err;
+ }
+ offset += write_len;
+ bytes_remaining -= write_len;
+ DIAG_LOG(DIAG_DEBUG_MUX,
+ "bytes_remaining: %d write_len: %d, len: %d\n",
+ bytes_remaining, write_len, len);
+ }
+ DIAG_LOG(DIAG_DEBUG_MUX, "done writing!");
+ spin_unlock_irqrestore(&usb_info->write_lock, flags);
+
+ return 0;
+}
+
+int diag_usb_write(int id, unsigned char *buf, int len, int ctxt)
+{
+ int err = 0;
+ struct diag_request *req = NULL;
+ struct diag_usb_info *usb_info = NULL;
+ unsigned long flags;
+
+ if (id < 0 || id >= NUM_DIAG_USB_DEV) {
+ pr_err_ratelimited("diag: In %s, Incorrect id %d\n",
+ __func__, id);
+ return -EINVAL;
+ }
+
+ usb_info = &diag_usb[id];
+
+ if (len > usb_info->max_size) {
+ DIAG_LOG(DIAG_DEBUG_MUX, "len: %d, max_size: %d\n",
+ len, usb_info->max_size);
+ return diag_usb_write_ext(usb_info, buf, len, ctxt);
+ }
+
+ req = diagmem_alloc(driver, sizeof(struct diag_request),
+ usb_info->mempool);
+ if (!req) {
+ /*
+ * This should never happen. It either means that we are
+ * trying to write more buffers than the max supported by
+ * this particualar diag USB channel at any given instance,
+ * or the previous write ptrs are stuck in the USB layer.
+ */
+ pr_err_ratelimited("diag: In %s, cannot retrieve USB write ptrs for USB channel %s\n",
+ __func__, usb_info->name);
+ return -ENOMEM;
+ }
+
+ req->buf = buf;
+ req->length = len;
+ req->context = (void *)buf;
+
+ if (!usb_info->hdl || !atomic_read(&usb_info->connected) ||
+ !atomic_read(&usb_info->diag_state)) {
+ pr_debug_ratelimited("diag: USB ch %s is not connected\n",
+ usb_info->name);
+ diagmem_free(driver, req, usb_info->mempool);
+ return -ENODEV;
+ }
+
+ spin_lock_irqsave(&usb_info->write_lock, flags);
+ if (diag_usb_buf_tbl_add(usb_info, buf, len, ctxt)) {
+ DIAG_LOG(DIAG_DEBUG_MUX,
+ "ERR! unable to add buf %pK to table\n",
+ buf);
+ diagmem_free(driver, req, usb_info->mempool);
+ spin_unlock_irqrestore(&usb_info->write_lock, flags);
+ return -ENOMEM;
+ }
+
+ diag_ws_on_read(DIAG_WS_MUX, len);
+ err = usb_diag_write(usb_info->hdl, req);
+ diag_ws_on_copy(DIAG_WS_MUX);
+ if (err) {
+ pr_err_ratelimited("diag: In %s, error writing to usb channel %s, err: %d\n",
+ __func__, usb_info->name, err);
+ diag_ws_on_copy_fail(DIAG_WS_MUX);
+ DIAG_LOG(DIAG_DEBUG_MUX,
+ "ERR! unable to write t usb, err: %d\n", err);
+ diag_usb_buf_tbl_remove(usb_info, buf);
+ diagmem_free(driver, req, usb_info->mempool);
+ }
+ spin_unlock_irqrestore(&usb_info->write_lock, flags);
+
+ return err;
+}
+
+/*
+ * This functions performs USB connect operations wrt Diag synchronously. It
+ * doesn't translate to actual USB connect. This is used when Diag switches
+ * logging to USB mode and wants to mimic USB connection.
+ */
+void diag_usb_connect_all(void)
+{
+ int i = 0;
+ struct diag_usb_info *usb_info = NULL;
+
+ for (i = 0; i < NUM_DIAG_USB_DEV; i++) {
+ usb_info = &diag_usb[i];
+ if (!usb_info->enabled)
+ continue;
+ atomic_set(&usb_info->diag_state, 1);
+ usb_connect(usb_info);
+ }
+}
+
+/*
+ * This functions performs USB disconnect operations wrt Diag synchronously.
+ * It doesn't translate to actual USB disconnect. This is used when Diag
+ * switches logging from USB mode and want to mimic USB disconnect.
+ */
+void diag_usb_disconnect_all(void)
+{
+ int i = 0;
+ struct diag_usb_info *usb_info = NULL;
+
+ for (i = 0; i < NUM_DIAG_USB_DEV; i++) {
+ usb_info = &diag_usb[i];
+ if (!usb_info->enabled)
+ continue;
+ atomic_set(&usb_info->diag_state, 0);
+ usb_disconnect(usb_info);
+ }
+}
+
+int diag_usb_register(int id, int ctxt, struct diag_mux_ops *ops)
+{
+ struct diag_usb_info *ch = NULL;
+ unsigned char wq_name[DIAG_USB_NAME_SZ + DIAG_USB_STRING_SZ];
+
+ if (id < 0 || id >= NUM_DIAG_USB_DEV) {
+ pr_err("diag: Unable to register with USB, id: %d\n", id);
+ return -EIO;
+ }
+
+ if (!ops) {
+ pr_err("diag: Invalid operations for USB\n");
+ return -EIO;
+ }
+
+ ch = &diag_usb[id];
+ ch->ops = ops;
+ ch->ctxt = ctxt;
+ spin_lock_init(&ch->lock);
+ spin_lock_init(&ch->write_lock);
+ ch->read_buf = kzalloc(USB_MAX_OUT_BUF, GFP_KERNEL);
+ if (!ch->read_buf)
+ goto err;
+ ch->read_ptr = kzalloc(sizeof(struct diag_request), GFP_KERNEL);
+ if (!ch->read_ptr)
+ goto err;
+ atomic_set(&ch->connected, 0);
+ atomic_set(&ch->read_pending, 0);
+ /*
+ * This function is called when the mux registers with Diag-USB.
+ * The registration happens during boot up and Diag always starts
+ * in USB mode. Set the state to 1.
+ */
+ atomic_set(&ch->diag_state, 1);
+ INIT_LIST_HEAD(&ch->buf_tbl);
+ diagmem_init(driver, ch->mempool);
+ INIT_WORK(&(ch->read_work), usb_read_work_fn);
+ INIT_WORK(&(ch->read_done_work), usb_read_done_work_fn);
+ INIT_WORK(&(ch->connect_work), usb_connect_work_fn);
+ INIT_WORK(&(ch->disconnect_work), usb_disconnect_work_fn);
+ strlcpy(wq_name, "DIAG_USB_", DIAG_USB_STRING_SZ);
+ strlcat(wq_name, ch->name, sizeof(ch->name));
+ ch->usb_wq = create_singlethread_workqueue(wq_name);
+ if (!ch->usb_wq)
+ goto err;
+ ch->hdl = usb_diag_open(ch->name, (void *)(uintptr_t)id,
+ diag_usb_notifier);
+ if (IS_ERR(ch->hdl)) {
+ pr_err("diag: Unable to open USB channel %s\n", ch->name);
+ goto err;
+ }
+ ch->enabled = 1;
+ pr_debug("diag: Successfully registered USB %s\n", ch->name);
+ return 0;
+
+err:
+ if (ch->usb_wq)
+ destroy_workqueue(ch->usb_wq);
+ kfree(ch->read_ptr);
+ kfree(ch->read_buf);
+ return -ENOMEM;
+}
+
+void diag_usb_exit(int id)
+{
+ struct diag_usb_info *ch = NULL;
+
+ if (id < 0 || id >= NUM_DIAG_USB_DEV) {
+ pr_err("diag: In %s, incorrect id %d\n", __func__, id);
+ return;
+ }
+
+ ch = &diag_usb[id];
+ ch->ops = NULL;
+ atomic_set(&ch->connected, 0);
+ atomic_set(&ch->read_pending, 0);
+ atomic_set(&ch->diag_state, 0);
+ ch->enabled = 0;
+ ch->ctxt = 0;
+ ch->read_cnt = 0;
+ ch->write_cnt = 0;
+ diagmem_exit(driver, ch->mempool);
+ ch->mempool = 0;
+ if (ch->hdl) {
+ usb_diag_close(ch->hdl);
+ ch->hdl = NULL;
+ }
+ if (ch->usb_wq)
+ destroy_workqueue(ch->usb_wq);
+ kfree(ch->read_ptr);
+ ch->read_ptr = NULL;
+ kfree(ch->read_buf);
+ ch->read_buf = NULL;
+}
+
diff --git a/drivers/char/diag/diag_usb.h b/drivers/char/diag/diag_usb.h
new file mode 100644
index 0000000..62ed7b3
--- /dev/null
+++ b/drivers/char/diag/diag_usb.h
@@ -0,0 +1,107 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAGUSB_H
+#define DIAGUSB_H
+
+#ifdef CONFIG_DIAG_OVER_USB
+#include <linux/usb/usbdiag.h>
+#endif
+#include "diagchar.h"
+#include "diag_mux.h"
+
+#define DIAG_USB_LOCAL 0
+#define DIAG_USB_LOCAL_LAST 1
+#define DIAG_USB_BRIDGE_BASE DIAG_USB_LOCAL_LAST
+#define DIAG_USB_MDM (DIAG_USB_BRIDGE_BASE)
+#define DIAG_USB_MDM2 (DIAG_USB_BRIDGE_BASE + 1)
+#define DIAG_USB_QSC (DIAG_USB_BRIDGE_BASE + 2)
+#define DIAG_USB_BRIDGE_LAST (DIAG_USB_BRIDGE_BASE + 3)
+
+#ifndef CONFIG_DIAGFWD_BRIDGE_CODE
+#define NUM_DIAG_USB_DEV DIAG_USB_LOCAL_LAST
+#else
+#define NUM_DIAG_USB_DEV DIAG_USB_BRIDGE_LAST
+#endif
+
+#define DIAG_USB_NAME_SZ 24
+#define DIAG_USB_GET_NAME(x) (diag_usb[x].name)
+
+#define DIAG_USB_MODE 0
+
+struct diag_usb_buf_tbl_t {
+ struct list_head track;
+ unsigned char *buf;
+ uint32_t len;
+ atomic_t ref_count;
+ int ctxt;
+};
+
+struct diag_usb_info {
+ int id;
+ int ctxt;
+ char name[DIAG_USB_NAME_SZ];
+ atomic_t connected;
+ atomic_t diag_state;
+ atomic_t read_pending;
+ int enabled;
+ int mempool;
+ int max_size;
+ struct list_head buf_tbl;
+ unsigned long read_cnt;
+ unsigned long write_cnt;
+ spinlock_t lock;
+ spinlock_t write_lock;
+ struct usb_diag_ch *hdl;
+ struct diag_mux_ops *ops;
+ unsigned char *read_buf;
+ struct diag_request *read_ptr;
+ struct work_struct read_work;
+ struct work_struct read_done_work;
+ struct work_struct connect_work;
+ struct work_struct disconnect_work;
+ struct workqueue_struct *usb_wq;
+};
+
+#ifdef CONFIG_DIAG_OVER_USB
+extern struct diag_usb_info diag_usb[NUM_DIAG_USB_DEV];
+int diag_usb_register(int id, int ctxt, struct diag_mux_ops *ops);
+int diag_usb_queue_read(int id);
+int diag_usb_write(int id, unsigned char *buf, int len, int ctxt);
+void diag_usb_connect_all(void);
+void diag_usb_disconnect_all(void);
+void diag_usb_exit(int id);
+#else
+int diag_usb_register(int id, int ctxt, struct diag_mux_ops *ops)
+{
+ return 0;
+}
+int diag_usb_queue_read(int id)
+{
+ return 0;
+}
+int diag_usb_write(int id, unsigned char *buf, int len, int ctxt)
+{
+ return 0;
+}
+void diag_usb_connect_all(void)
+{
+}
+void diag_usb_disconnect_all(void)
+{
+}
+void diag_usb_exit(int id)
+{
+}
+#endif
+
+#endif
diff --git a/drivers/char/diag/diagchar.h b/drivers/char/diag/diagchar.h
new file mode 100644
index 0000000..768eb62
--- /dev/null
+++ b/drivers/char/diag/diagchar.h
@@ -0,0 +1,638 @@
+/* Copyright (c) 2008-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAGCHAR_H
+#define DIAGCHAR_H
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mempool.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/sched.h>
+#include <linux/wakelock.h>
+#include <linux/atomic.h>
+#include "diagfwd_bridge.h"
+
+/* Size of the USB buffers used for read and write*/
+#define USB_MAX_OUT_BUF 4096
+#define APPS_BUF_SIZE 4096
+#define IN_BUF_SIZE 16384
+#define MAX_SYNC_OBJ_NAME_SIZE 32
+
+#define DIAG_MAX_REQ_SIZE (16 * 1024)
+#define DIAG_MAX_RSP_SIZE (16 * 1024)
+#define APF_DIAG_PADDING 256
+/*
+ * In the worst case, the HDLC buffer can be atmost twice the size of the
+ * original packet. Add 3 bytes for 16 bit CRC (2 bytes) and a delimiter
+ * (1 byte)
+ */
+#define DIAG_MAX_HDLC_BUF_SIZE ((DIAG_MAX_REQ_SIZE * 2) + 3)
+
+/* The header of callback data type has remote processor token (of type int) */
+#define CALLBACK_HDR_SIZE (sizeof(int))
+#define CALLBACK_BUF_SIZE (DIAG_MAX_REQ_SIZE + CALLBACK_HDR_SIZE)
+
+#define MAX_SSID_PER_RANGE 200
+
+#define ALL_PROC -1
+
+#define REMOTE_DATA 4
+
+#define USER_SPACE_DATA 16384
+
+#define DIAG_CTRL_MSG_LOG_MASK 9
+#define DIAG_CTRL_MSG_EVENT_MASK 10
+#define DIAG_CTRL_MSG_F3_MASK 11
+#define CONTROL_CHAR 0x7E
+
+#define DIAG_CON_APSS (0x0001) /* Bit mask for APSS */
+#define DIAG_CON_MPSS (0x0002) /* Bit mask for MPSS */
+#define DIAG_CON_LPASS (0x0004) /* Bit mask for LPASS */
+#define DIAG_CON_WCNSS (0x0008) /* Bit mask for WCNSS */
+#define DIAG_CON_SENSORS (0x0010) /* Bit mask for Sensors */
+#define DIAG_CON_WDSP (0x0020) /* Bit mask for WDSP */
+
+#define DIAG_CON_NONE (0x0000) /* Bit mask for No SS*/
+#define DIAG_CON_ALL (DIAG_CON_APSS | DIAG_CON_MPSS \
+ | DIAG_CON_LPASS | DIAG_CON_WCNSS \
+ | DIAG_CON_SENSORS | DIAG_CON_WDSP)
+
+#define DIAG_STM_MODEM 0x01
+#define DIAG_STM_LPASS 0x02
+#define DIAG_STM_WCNSS 0x04
+#define DIAG_STM_APPS 0x08
+#define DIAG_STM_SENSORS 0x10
+#define DIAG_STM_WDSP 0x20
+
+#define INVALID_PID -1
+#define DIAG_CMD_FOUND 1
+#define DIAG_CMD_NOT_FOUND 0
+#define DIAG_CMD_POLLING 1
+#define DIAG_CMD_NOT_POLLING 0
+#define DIAG_CMD_ADD 1
+#define DIAG_CMD_REMOVE 0
+
+#define DIAG_CMD_VERSION 0
+#define DIAG_CMD_ERROR 0x13
+#define DIAG_CMD_DOWNLOAD 0x3A
+#define DIAG_CMD_DIAG_SUBSYS 0x4B
+#define DIAG_CMD_LOG_CONFIG 0x73
+#define DIAG_CMD_LOG_ON_DMND 0x78
+#define DIAG_CMD_EXT_BUILD 0x7c
+#define DIAG_CMD_MSG_CONFIG 0x7D
+#define DIAG_CMD_GET_EVENT_MASK 0x81
+#define DIAG_CMD_SET_EVENT_MASK 0x82
+#define DIAG_CMD_EVENT_TOGGLE 0x60
+#define DIAG_CMD_NO_SUBSYS 0xFF
+#define DIAG_CMD_STATUS 0x0C
+#define DIAG_SS_WCDMA 0x04
+#define DIAG_CMD_QUERY_CALL 0x0E
+#define DIAG_SS_GSM 0x08
+#define DIAG_CMD_QUERY_TMC 0x02
+#define DIAG_SS_TDSCDMA 0x57
+#define DIAG_CMD_TDSCDMA_STATUS 0x0E
+#define DIAG_CMD_DIAG_SUBSYS_DELAY 0x80
+
+#define DIAG_SS_DIAG 0x12
+#define DIAG_SS_PARAMS 0x32
+#define DIAG_SS_FILE_READ_MODEM 0x0816
+#define DIAG_SS_FILE_READ_ADSP 0x0E10
+#define DIAG_SS_FILE_READ_WCNSS 0x141F
+#define DIAG_SS_FILE_READ_SLPI 0x01A18
+#define DIAG_SS_FILE_READ_APPS 0x020F
+
+#define DIAG_DIAG_MAX_PKT_SZ 0x55
+#define DIAG_DIAG_STM 0x214
+#define DIAG_DIAG_POLL 0x03
+#define DIAG_DEL_RSP_WRAP 0x04
+#define DIAG_DEL_RSP_WRAP_CNT 0x05
+#define DIAG_EXT_MOBILE_ID 0x06
+#define DIAG_GET_TIME_API 0x21B
+#define DIAG_SET_TIME_API 0x21C
+#define DIAG_SWITCH_COMMAND 0x081B
+#define DIAG_BUFFERING_MODE 0x080C
+
+#define DIAG_CMD_OP_LOG_DISABLE 0
+#define DIAG_CMD_OP_GET_LOG_RANGE 1
+#define DIAG_CMD_OP_SET_LOG_MASK 3
+#define DIAG_CMD_OP_GET_LOG_MASK 4
+
+#define DIAG_CMD_OP_GET_SSID_RANGE 1
+#define DIAG_CMD_OP_GET_BUILD_MASK 2
+#define DIAG_CMD_OP_GET_MSG_MASK 3
+#define DIAG_CMD_OP_SET_MSG_MASK 4
+#define DIAG_CMD_OP_SET_ALL_MSG_MASK 5
+
+#define DIAG_CMD_OP_GET_MSG_ALLOC 0x33
+#define DIAG_CMD_OP_GET_MSG_DROP 0x30
+#define DIAG_CMD_OP_RESET_MSG_STATS 0x2F
+#define DIAG_CMD_OP_GET_LOG_ALLOC 0x31
+#define DIAG_CMD_OP_GET_LOG_DROP 0x2C
+#define DIAG_CMD_OP_RESET_LOG_STATS 0x2B
+#define DIAG_CMD_OP_GET_EVENT_ALLOC 0x32
+#define DIAG_CMD_OP_GET_EVENT_DROP 0x2E
+#define DIAG_CMD_OP_RESET_EVENT_STATS 0x2D
+
+#define DIAG_CMD_OP_HDLC_DISABLE 0x218
+
+#define BAD_PARAM_RESPONSE_MESSAGE 20
+
+#define PERSIST_TIME_SUCCESS 0
+#define PERSIST_TIME_FAILURE 1
+#define PERSIST_TIME_NOT_SUPPORTED 2
+
+#define MODE_CMD 41
+#define RESET_ID 2
+
+#define PKT_DROP 0
+#define PKT_ALLOC 1
+#define PKT_RESET 2
+
+#define FEATURE_MASK_LEN 2
+
+#define DIAG_MD_NONE 0
+#define DIAG_MD_PERIPHERAL 1
+
+/*
+ * The status bit masks when received in a signal handler are to be
+ * used in conjunction with the peripheral list bit mask to determine the
+ * status for a peripheral. For instance, 0x00010002 would denote an open
+ * status on the MPSS
+ */
+#define DIAG_STATUS_OPEN (0x00010000) /* DCI channel open status mask */
+#define DIAG_STATUS_CLOSED (0x00020000) /* DCI channel closed status mask */
+
+#define MODE_NONREALTIME 0
+#define MODE_REALTIME 1
+#define MODE_UNKNOWN 2
+
+#define DIAG_BUFFERING_MODE_STREAMING 0
+#define DIAG_BUFFERING_MODE_THRESHOLD 1
+#define DIAG_BUFFERING_MODE_CIRCULAR 2
+
+#define DIAG_MIN_WM_VAL 0
+#define DIAG_MAX_WM_VAL 100
+
+#define DEFAULT_LOW_WM_VAL 15
+#define DEFAULT_HIGH_WM_VAL 85
+
+#define TYPE_DATA 0
+#define TYPE_CNTL 1
+#define TYPE_DCI 2
+#define TYPE_CMD 3
+#define TYPE_DCI_CMD 4
+#define NUM_TYPES 5
+
+#define PERIPHERAL_MODEM 0
+#define PERIPHERAL_LPASS 1
+#define PERIPHERAL_WCNSS 2
+#define PERIPHERAL_SENSORS 3
+#define PERIPHERAL_WDSP 4
+#define NUM_PERIPHERALS 5
+#define APPS_DATA (NUM_PERIPHERALS)
+
+/* Number of sessions possible in Memory Device Mode. +1 for Apps data */
+#define NUM_MD_SESSIONS (NUM_PERIPHERALS + 1)
+
+#define MD_PERIPHERAL_MASK(x) (1 << x)
+
+/*
+ * Number of stm processors includes all the peripherals and
+ * apps.Added 1 below to indicate apps
+ */
+#define NUM_STM_PROCESSORS (NUM_PERIPHERALS + 1)
+/*
+ * Indicates number of peripherals that can support DCI and Apps
+ * processor. This doesn't mean that a peripheral has the
+ * feature.
+ */
+#define NUM_DCI_PERIPHERALS (NUM_PERIPHERALS + 1)
+
+#define DIAG_PROC_DCI 1
+#define DIAG_PROC_MEMORY_DEVICE 2
+
+/* Flags to vote the DCI or Memory device process up or down
+ * when it becomes active or inactive.
+ */
+#define VOTE_DOWN 0
+#define VOTE_UP 1
+
+#define DIAG_TS_SIZE 50
+
+#define DIAG_MDM_BUF_SIZE 2048
+/* The Maximum request size is 2k + DCI header + footer (6 bytes) */
+#define DIAG_MDM_DCI_BUF_SIZE (2048 + 6)
+
+#define DIAG_LOCAL_PROC 0
+
+#ifndef CONFIG_DIAGFWD_BRIDGE_CODE
+/* Local Processor only */
+#define DIAG_NUM_PROC 1
+#else
+/* Local Processor + Remote Devices */
+#define DIAG_NUM_PROC (1 + NUM_REMOTE_DEV)
+#endif
+
+#define DIAG_WS_DCI 0
+#define DIAG_WS_MUX 1
+
+#define DIAG_DATA_TYPE 1
+#define DIAG_CNTL_TYPE 2
+#define DIAG_DCI_TYPE 3
+
+/* List of remote processor supported */
+enum remote_procs {
+ MDM = 1,
+ MDM2 = 2,
+ QSC = 5,
+};
+
+struct diag_pkt_header_t {
+ uint8_t cmd_code;
+ uint8_t subsys_id;
+ uint16_t subsys_cmd_code;
+} __packed;
+
+struct diag_cmd_ext_mobile_rsp_t {
+ struct diag_pkt_header_t header;
+ uint8_t version;
+ uint8_t padding[3];
+ uint32_t family;
+ uint32_t chip_id;
+} __packed;
+
+struct diag_cmd_time_sync_query_req_t {
+ struct diag_pkt_header_t header;
+ uint8_t version;
+};
+
+struct diag_cmd_time_sync_query_rsp_t {
+ struct diag_pkt_header_t header;
+ uint8_t version;
+ uint8_t time_api;
+};
+
+struct diag_cmd_time_sync_switch_req_t {
+ struct diag_pkt_header_t header;
+ uint8_t version;
+ uint8_t time_api;
+ uint8_t persist_time;
+};
+
+struct diag_cmd_time_sync_switch_rsp_t {
+ struct diag_pkt_header_t header;
+ uint8_t version;
+ uint8_t time_api;
+ uint8_t time_api_status;
+ uint8_t persist_time_status;
+};
+
+struct diag_cmd_reg_entry_t {
+ uint16_t cmd_code;
+ uint16_t subsys_id;
+ uint16_t cmd_code_lo;
+ uint16_t cmd_code_hi;
+} __packed;
+
+struct diag_cmd_reg_t {
+ struct list_head link;
+ struct diag_cmd_reg_entry_t entry;
+ uint8_t proc;
+ int pid;
+};
+
+/*
+ * @sync_obj_name: name of the synchronization object associated with this proc
+ * @count: number of entries in the bind
+ * @entries: the actual packet registrations
+ */
+struct diag_cmd_reg_tbl_t {
+ char sync_obj_name[MAX_SYNC_OBJ_NAME_SIZE];
+ uint32_t count;
+ struct diag_cmd_reg_entry_t *entries;
+};
+
+struct diag_client_map {
+ char name[20];
+ int pid;
+};
+
+struct real_time_vote_t {
+ int client_id;
+ uint16_t proc;
+ uint8_t real_time_vote;
+} __packed;
+
+struct real_time_query_t {
+ int real_time;
+ int proc;
+} __packed;
+
+struct diag_buffering_mode_t {
+ uint8_t peripheral;
+ uint8_t mode;
+ uint8_t high_wm_val;
+ uint8_t low_wm_val;
+} __packed;
+
+struct diag_callback_reg_t {
+ int proc;
+} __packed;
+
+struct diag_ws_ref_t {
+ int ref_count;
+ int copy_count;
+ spinlock_t lock;
+};
+
+/* This structure is defined in USB header file */
+#ifndef CONFIG_DIAG_OVER_USB
+struct diag_request {
+ char *buf;
+ int length;
+ int actual;
+ int status;
+ void *context;
+};
+#endif
+
+struct diag_pkt_stats_t {
+ uint32_t alloc_count;
+ uint32_t drop_count;
+};
+
+struct diag_cmd_stats_rsp_t {
+ struct diag_pkt_header_t header;
+ uint32_t payload;
+};
+
+struct diag_cmd_hdlc_disable_rsp_t {
+ struct diag_pkt_header_t header;
+ uint8_t framing_version;
+ uint8_t result;
+};
+
+struct diag_pkt_frame_t {
+ uint8_t start;
+ uint8_t version;
+ uint16_t length;
+};
+
+struct diag_partial_pkt_t {
+ uint32_t total_len;
+ uint32_t read_len;
+ uint32_t remaining;
+ uint32_t capacity;
+ uint8_t processing;
+ unsigned char *data;
+} __packed;
+
+struct diag_logging_mode_param_t {
+ uint32_t req_mode;
+ uint32_t peripheral_mask;
+ uint8_t mode_param;
+} __packed;
+
+struct diag_md_session_t {
+ int pid;
+ int peripheral_mask;
+ uint8_t hdlc_disabled;
+ struct timer_list hdlc_reset_timer;
+ struct diag_mask_info *msg_mask;
+ struct diag_mask_info *log_mask;
+ struct diag_mask_info *event_mask;
+ struct task_struct *task;
+};
+
+/*
+ * High level structure for storing Diag masks.
+ *
+ * @ptr: Pointer to the buffer that stores the masks
+ * @mask_len: Length of the buffer pointed by ptr
+ * @update_buf: Buffer for performing mask updates to peripherals
+ * @update_buf_len: Length of the buffer pointed by buf
+ * @status: status of the mask - all enable, disabled, valid
+ * @lock: To protect access to the mask variables
+ */
+struct diag_mask_info {
+ uint8_t *ptr;
+ int mask_len;
+ uint8_t *update_buf;
+ int update_buf_len;
+ uint8_t status;
+ struct mutex lock;
+};
+
+struct diag_md_proc_info {
+ int pid;
+ struct task_struct *socket_process;
+ struct task_struct *callback_process;
+ struct task_struct *mdlog_process;
+};
+
+struct diag_feature_t {
+ uint8_t feature_mask[FEATURE_MASK_LEN];
+ uint8_t rcvd_feature_mask;
+ uint8_t log_on_demand;
+ uint8_t separate_cmd_rsp;
+ uint8_t encode_hdlc;
+ uint8_t peripheral_buffering;
+ uint8_t mask_centralization;
+ uint8_t stm_support;
+ uint8_t sockets_enabled;
+ uint8_t sent_feature_mask;
+};
+
+struct diagchar_dev {
+
+ /* State for the char driver */
+ unsigned int major;
+ unsigned int minor_start;
+ int num;
+ struct cdev *cdev;
+ char *name;
+ struct class *diagchar_class;
+ struct device *diag_dev;
+ int ref_count;
+ int mask_clear;
+ struct mutex diag_maskclear_mutex;
+ struct mutex diagchar_mutex;
+ struct mutex diag_file_mutex;
+ wait_queue_head_t wait_q;
+ struct diag_client_map *client_map;
+ int *data_ready;
+ int num_clients;
+ int polling_reg_flag;
+ int use_device_tree;
+ int supports_separate_cmdrsp;
+ int supports_apps_hdlc_encoding;
+ int supports_sockets;
+ /* The state requested in the STM command */
+ int stm_state_requested[NUM_STM_PROCESSORS];
+ /* The current STM state */
+ int stm_state[NUM_STM_PROCESSORS];
+ uint16_t stm_peripheral;
+ struct work_struct stm_update_work;
+ uint16_t mask_update;
+ struct work_struct mask_update_work;
+ uint16_t close_transport;
+ struct work_struct close_transport_work;
+ struct workqueue_struct *cntl_wq;
+ struct mutex cntl_lock;
+ /* Whether or not the peripheral supports STM */
+ /* Delayed response Variables */
+ uint16_t delayed_rsp_id;
+ struct mutex delayed_rsp_mutex;
+ /* DCI related variables */
+ struct list_head dci_req_list;
+ struct list_head dci_client_list;
+ int dci_tag;
+ int dci_client_id;
+ struct mutex dci_mutex;
+ int num_dci_client;
+ unsigned char *apps_dci_buf;
+ int dci_state;
+ struct workqueue_struct *diag_dci_wq;
+ struct list_head cmd_reg_list;
+ struct mutex cmd_reg_mutex;
+ uint32_t cmd_reg_count;
+ struct mutex diagfwd_channel_mutex;
+ /* Sizes that reflect memory pool sizes */
+ unsigned int poolsize;
+ unsigned int poolsize_hdlc;
+ unsigned int poolsize_dci;
+ unsigned int poolsize_user;
+ /* Buffers for masks */
+ struct mutex diag_cntl_mutex;
+ /* Members for Sending response */
+ unsigned char *encoded_rsp_buf;
+ int encoded_rsp_len;
+ uint8_t rsp_buf_busy;
+ spinlock_t rsp_buf_busy_lock;
+ int rsp_buf_ctxt;
+ struct diagfwd_info *diagfwd_data[NUM_PERIPHERALS];
+ struct diagfwd_info *diagfwd_cntl[NUM_PERIPHERALS];
+ struct diagfwd_info *diagfwd_dci[NUM_PERIPHERALS];
+ struct diagfwd_info *diagfwd_cmd[NUM_PERIPHERALS];
+ struct diagfwd_info *diagfwd_dci_cmd[NUM_PERIPHERALS];
+ struct diag_feature_t feature[NUM_PERIPHERALS];
+ struct diag_buffering_mode_t buffering_mode[NUM_PERIPHERALS];
+ uint8_t buffering_flag[NUM_PERIPHERALS];
+ struct mutex mode_lock;
+ unsigned char *user_space_data_buf;
+ uint8_t user_space_data_busy;
+ struct diag_pkt_stats_t msg_stats;
+ struct diag_pkt_stats_t log_stats;
+ struct diag_pkt_stats_t event_stats;
+ /* buffer for updating mask to peripherals */
+ unsigned char *buf_feature_mask_update;
+ uint8_t hdlc_disabled;
+ struct mutex hdlc_disable_mutex;
+ struct timer_list hdlc_reset_timer;
+ struct mutex diag_hdlc_mutex;
+ unsigned char *hdlc_buf;
+ uint32_t hdlc_buf_len;
+ unsigned char *apps_rsp_buf;
+ struct diag_partial_pkt_t incoming_pkt;
+ int in_busy_pktdata;
+ /* Variables for non real time mode */
+ int real_time_mode[DIAG_NUM_PROC];
+ int real_time_update_busy;
+ uint16_t proc_active_mask;
+ uint16_t proc_rt_vote_mask[DIAG_NUM_PROC];
+ struct mutex real_time_mutex;
+ struct work_struct diag_real_time_work;
+ struct workqueue_struct *diag_real_time_wq;
+#ifdef CONFIG_DIAG_OVER_USB
+ int usb_connected;
+#endif
+ struct workqueue_struct *diag_wq;
+ struct work_struct diag_drain_work;
+ struct work_struct update_user_clients;
+ struct work_struct update_md_clients;
+ struct workqueue_struct *diag_cntl_wq;
+ uint8_t log_on_demand_support;
+ uint8_t *apps_req_buf;
+ uint32_t apps_req_buf_len;
+ uint8_t *dci_pkt_buf; /* For Apps DCI packets */
+ uint32_t dci_pkt_length;
+ int in_busy_dcipktdata;
+ int logging_mode;
+ int logging_mask;
+ int mask_check;
+ uint32_t md_session_mask;
+ uint8_t md_session_mode;
+ struct diag_md_session_t *md_session_map[NUM_MD_SESSIONS];
+ struct mutex md_session_lock;
+ /* Power related variables */
+ struct diag_ws_ref_t dci_ws;
+ struct diag_ws_ref_t md_ws;
+ /* Pointers to Diag Masks */
+ struct diag_mask_info *msg_mask;
+ struct diag_mask_info *log_mask;
+ struct diag_mask_info *event_mask;
+ struct diag_mask_info *build_time_mask;
+ uint8_t msg_mask_tbl_count;
+ uint16_t event_mask_size;
+ uint16_t last_event_id;
+ /* Variables for Mask Centralization */
+ uint16_t num_event_id[NUM_PERIPHERALS];
+ uint32_t num_equip_id[NUM_PERIPHERALS];
+ uint32_t max_ssid_count[NUM_PERIPHERALS];
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+ /* For sending command requests in callback mode */
+ unsigned char *hdlc_encode_buf;
+ int hdlc_encode_buf_len;
+#endif
+ int time_sync_enabled;
+ uint8_t uses_time_api;
+};
+
+extern struct diagchar_dev *driver;
+
+extern int wrap_enabled;
+extern uint16_t wrap_count;
+
+void diag_get_timestamp(char *time_str);
+void check_drain_timer(void);
+int diag_get_remote(int remote_info);
+
+void diag_ws_init(void);
+void diag_ws_on_notify(void);
+void diag_ws_on_read(int type, int pkt_len);
+void diag_ws_on_copy(int type);
+void diag_ws_on_copy_fail(int type);
+void diag_ws_on_copy_complete(int type);
+void diag_ws_reset(int type);
+void diag_ws_release(void);
+void chk_logging_wakeup(void);
+int diag_cmd_add_reg(struct diag_cmd_reg_entry_t *new_entry, uint8_t proc,
+ int pid);
+struct diag_cmd_reg_entry_t *diag_cmd_search(
+ struct diag_cmd_reg_entry_t *entry,
+ int proc);
+void diag_cmd_remove_reg(struct diag_cmd_reg_entry_t *entry, uint8_t proc);
+void diag_cmd_remove_reg_by_pid(int pid);
+void diag_cmd_remove_reg_by_proc(int proc);
+int diag_cmd_chk_polling(struct diag_cmd_reg_entry_t *entry);
+void diag_clear_masks(struct diag_md_session_t *info);
+
+void diag_record_stats(int type, int flag);
+
+struct diag_md_session_t *diag_md_session_get_pid(int pid);
+struct diag_md_session_t *diag_md_session_get_peripheral(uint8_t peripheral);
+
+#endif
diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c
new file mode 100644
index 0000000..3b9f4e9
--- /dev/null
+++ b/drivers/char/diag/diagchar_core.c
@@ -0,0 +1,3553 @@
+/* Copyright (c) 2008-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/cdev.h>
+#include <linux/fs.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/uaccess.h>
+#include <linux/diagchar.h>
+#include <linux/sched.h>
+#include <linux/ratelimit.h>
+#include <linux/timer.h>
+#ifdef CONFIG_DIAG_OVER_USB
+#include <linux/usb/usbdiag.h>
+#endif
+#include <asm/current.h>
+#include "diagchar_hdlc.h"
+#include "diagmem.h"
+#include "diagchar.h"
+#include "diagfwd.h"
+#include "diagfwd_cntl.h"
+#include "diag_dci.h"
+#include "diag_debugfs.h"
+#include "diag_masks.h"
+#include "diagfwd_bridge.h"
+#include "diag_usb.h"
+#include "diag_memorydevice.h"
+#include "diag_mux.h"
+#include "diag_ipc_logging.h"
+#include "diagfwd_peripheral.h"
+
+#include <linux/coresight-stm.h>
+#include <linux/kernel.h>
+#ifdef CONFIG_COMPAT
+#include <linux/compat.h>
+#endif
+
+MODULE_DESCRIPTION("Diag Char Driver");
+MODULE_LICENSE("GPL v2");
+
+#define MIN_SIZ_ALLOW 4
+#define INIT 1
+#define EXIT -1
+struct diagchar_dev *driver;
+struct diagchar_priv {
+ int pid;
+};
+
+#define USER_SPACE_RAW_DATA 0
+#define USER_SPACE_HDLC_DATA 1
+
+/* Memory pool variables */
+/* Used for copying any incoming packet from user space clients. */
+static unsigned int poolsize = 12;
+module_param(poolsize, uint, 0000);
+
+/*
+ * Used for HDLC encoding packets coming from the user
+ * space.
+ */
+static unsigned int poolsize_hdlc = 10;
+module_param(poolsize_hdlc, uint, 0000);
+
+/*
+ * This is used for incoming DCI requests from the user space clients.
+ * Don't expose itemsize as it is internal.
+ */
+static unsigned int poolsize_user = 8;
+module_param(poolsize_user, uint, 0000);
+
+/*
+ * USB structures allocated for writing Diag data generated on the Apps to USB.
+ * Don't expose itemsize as it is constant.
+ */
+static unsigned int itemsize_usb_apps = sizeof(struct diag_request);
+static unsigned int poolsize_usb_apps = 10;
+module_param(poolsize_usb_apps, uint, 0000);
+
+/* Used for DCI client buffers. Don't expose itemsize as it is constant. */
+static unsigned int poolsize_dci = 10;
+module_param(poolsize_dci, uint, 0000);
+
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+/* Used for reading data from the remote device. */
+static unsigned int itemsize_mdm = DIAG_MDM_BUF_SIZE;
+static unsigned int poolsize_mdm = 18;
+module_param(itemsize_mdm, uint, 0000);
+module_param(poolsize_mdm, uint, 0000);
+
+/*
+ * Used for reading DCI data from the remote device.
+ * Don't expose poolsize for DCI data. There is only one read buffer
+ */
+static unsigned int itemsize_mdm_dci = DIAG_MDM_BUF_SIZE;
+static unsigned int poolsize_mdm_dci = 1;
+module_param(itemsize_mdm_dci, uint, 0000);
+
+/*
+ * Used for USB structues associated with a remote device.
+ * Don't expose the itemsize since it is constant.
+ */
+static unsigned int itemsize_mdm_usb = sizeof(struct diag_request);
+static unsigned int poolsize_mdm_usb = 18;
+module_param(poolsize_mdm_usb, uint, 0000);
+
+/*
+ * Used for writing read DCI data to remote peripherals. Don't
+ * expose poolsize for DCI data. There is only one read
+ * buffer. Add 6 bytes for DCI header information: Start (1),
+ * Version (1), Length (2), Tag (2)
+ */
+static unsigned int itemsize_mdm_dci_write = DIAG_MDM_DCI_BUF_SIZE;
+static unsigned int poolsize_mdm_dci_write = 1;
+module_param(itemsize_mdm_dci_write, uint, 0000);
+
+/*
+ * Used for USB structures associated with a remote SMUX
+ * device Don't expose the itemsize since it is constant
+ */
+static unsigned int itemsize_qsc_usb = sizeof(struct diag_request);
+static unsigned int poolsize_qsc_usb = 8;
+module_param(poolsize_qsc_usb, uint, 0000);
+#endif
+
+/* This is the max number of user-space clients supported at initialization*/
+static unsigned int max_clients = 15;
+static unsigned int threshold_client_limit = 50;
+module_param(max_clients, uint, 0000);
+
+/* Timer variables */
+static struct timer_list drain_timer;
+static int timer_in_progress;
+
+struct diag_apps_data_t {
+ void *buf;
+ uint32_t len;
+ int ctxt;
+};
+
+static struct diag_apps_data_t hdlc_data;
+static struct diag_apps_data_t non_hdlc_data;
+static struct mutex apps_data_mutex;
+
+#define DIAGPKT_MAX_DELAYED_RSP 0xFFFF
+
+#ifdef DIAG_DEBUG
+uint16_t diag_debug_mask;
+void *diag_ipc_log;
+#endif
+
+static void diag_md_session_close(struct diag_md_session_t *session_info);
+
+/*
+ * Returns the next delayed rsp id. If wrapping is enabled,
+ * wraps the delayed rsp id to DIAGPKT_MAX_DELAYED_RSP.
+ */
+static uint16_t diag_get_next_delayed_rsp_id(void)
+{
+ uint16_t rsp_id = 0;
+
+ mutex_lock(&driver->delayed_rsp_mutex);
+ rsp_id = driver->delayed_rsp_id;
+ if (rsp_id < DIAGPKT_MAX_DELAYED_RSP)
+ rsp_id++;
+ else {
+ if (wrap_enabled) {
+ rsp_id = 1;
+ wrap_count++;
+ } else
+ rsp_id = DIAGPKT_MAX_DELAYED_RSP;
+ }
+ driver->delayed_rsp_id = rsp_id;
+ mutex_unlock(&driver->delayed_rsp_mutex);
+
+ return rsp_id;
+}
+
+static int diag_switch_logging(struct diag_logging_mode_param_t *param);
+
+#define COPY_USER_SPACE_OR_ERR(buf, data, length) \
+do { \
+ if ((count < ret+length) || (copy_to_user(buf, \
+ (void *)&data, length))) { \
+ ret = -EFAULT; \
+ } \
+ ret += length; \
+} while (0)
+
+static void drain_timer_func(unsigned long data)
+{
+ queue_work(driver->diag_wq, &(driver->diag_drain_work));
+}
+
+static void diag_drain_apps_data(struct diag_apps_data_t *data)
+{
+ int err = 0;
+
+ if (!data || !data->buf)
+ return;
+
+ err = diag_mux_write(DIAG_LOCAL_PROC, data->buf, data->len,
+ data->ctxt);
+ if (err)
+ diagmem_free(driver, data->buf, POOL_TYPE_HDLC);
+
+ data->buf = NULL;
+ data->len = 0;
+}
+
+void diag_update_user_client_work_fn(struct work_struct *work)
+{
+ diag_update_userspace_clients(HDLC_SUPPORT_TYPE);
+}
+
+static void diag_update_md_client_work_fn(struct work_struct *work)
+{
+ diag_update_md_clients(HDLC_SUPPORT_TYPE);
+}
+
+void diag_drain_work_fn(struct work_struct *work)
+{
+ struct diag_md_session_t *session_info = NULL;
+ uint8_t hdlc_disabled = 0;
+
+ timer_in_progress = 0;
+ mutex_lock(&apps_data_mutex);
+ session_info = diag_md_session_get_peripheral(APPS_DATA);
+ if (session_info)
+ hdlc_disabled = session_info->hdlc_disabled;
+ else
+ hdlc_disabled = driver->hdlc_disabled;
+
+ if (!hdlc_disabled)
+ diag_drain_apps_data(&hdlc_data);
+ else
+ diag_drain_apps_data(&non_hdlc_data);
+ mutex_unlock(&apps_data_mutex);
+}
+
+void check_drain_timer(void)
+{
+ int ret = 0;
+
+ if (!timer_in_progress) {
+ timer_in_progress = 1;
+ ret = mod_timer(&drain_timer, jiffies + msecs_to_jiffies(200));
+ }
+}
+
+void diag_add_client(int i, struct file *file)
+{
+ struct diagchar_priv *diagpriv_data;
+
+ driver->client_map[i].pid = current->tgid;
+ diagpriv_data = kmalloc(sizeof(struct diagchar_priv),
+ GFP_KERNEL);
+ if (diagpriv_data)
+ diagpriv_data->pid = current->tgid;
+ file->private_data = diagpriv_data;
+ strlcpy(driver->client_map[i].name, current->comm, 20);
+ driver->client_map[i].name[19] = '\0';
+}
+
+static void diag_mempool_init(void)
+{
+ uint32_t itemsize = DIAG_MAX_REQ_SIZE;
+ uint32_t itemsize_hdlc = DIAG_MAX_HDLC_BUF_SIZE + APF_DIAG_PADDING;
+ uint32_t itemsize_dci = IN_BUF_SIZE;
+ uint32_t itemsize_user = DCI_REQ_BUF_SIZE;
+
+ itemsize += ((DCI_HDR_SIZE > CALLBACK_HDR_SIZE) ? DCI_HDR_SIZE :
+ CALLBACK_HDR_SIZE);
+ diagmem_setsize(POOL_TYPE_COPY, itemsize, poolsize);
+ diagmem_setsize(POOL_TYPE_HDLC, itemsize_hdlc, poolsize_hdlc);
+ diagmem_setsize(POOL_TYPE_DCI, itemsize_dci, poolsize_dci);
+ diagmem_setsize(POOL_TYPE_USER, itemsize_user, poolsize_user);
+
+ diagmem_init(driver, POOL_TYPE_COPY);
+ diagmem_init(driver, POOL_TYPE_HDLC);
+ diagmem_init(driver, POOL_TYPE_USER);
+ diagmem_init(driver, POOL_TYPE_DCI);
+}
+
+static void diag_mempool_exit(void)
+{
+ diagmem_exit(driver, POOL_TYPE_COPY);
+ diagmem_exit(driver, POOL_TYPE_HDLC);
+ diagmem_exit(driver, POOL_TYPE_USER);
+ diagmem_exit(driver, POOL_TYPE_DCI);
+}
+
+static int diagchar_open(struct inode *inode, struct file *file)
+{
+ int i = 0;
+ void *temp;
+
+ if (driver) {
+ mutex_lock(&driver->diagchar_mutex);
+
+ for (i = 0; i < driver->num_clients; i++)
+ if (driver->client_map[i].pid == 0)
+ break;
+
+ if (i < driver->num_clients) {
+ diag_add_client(i, file);
+ } else {
+ if (i < threshold_client_limit) {
+ driver->num_clients++;
+ temp = krealloc(driver->client_map
+ , (driver->num_clients) * sizeof(struct
+ diag_client_map), GFP_KERNEL);
+ if (!temp)
+ goto fail;
+ else
+ driver->client_map = temp;
+ temp = krealloc(driver->data_ready
+ , (driver->num_clients) * sizeof(int),
+ GFP_KERNEL);
+ if (!temp)
+ goto fail;
+ else
+ driver->data_ready = temp;
+ diag_add_client(i, file);
+ } else {
+ mutex_unlock(&driver->diagchar_mutex);
+ pr_err_ratelimited("diag: Max client limit for DIAG reached\n");
+ pr_err_ratelimited("diag: Cannot open handle %s %d",
+ current->comm, current->tgid);
+ for (i = 0; i < driver->num_clients; i++)
+ pr_debug("%d) %s PID=%d", i, driver->
+ client_map[i].name,
+ driver->client_map[i].pid);
+ return -ENOMEM;
+ }
+ }
+ driver->data_ready[i] = 0x0;
+ driver->data_ready[i] |= MSG_MASKS_TYPE;
+ driver->data_ready[i] |= EVENT_MASKS_TYPE;
+ driver->data_ready[i] |= LOG_MASKS_TYPE;
+ driver->data_ready[i] |= DCI_LOG_MASKS_TYPE;
+ driver->data_ready[i] |= DCI_EVENT_MASKS_TYPE;
+
+ if (driver->ref_count == 0)
+ diag_mempool_init();
+ driver->ref_count++;
+ mutex_unlock(&driver->diagchar_mutex);
+ return 0;
+ }
+ return -ENOMEM;
+
+fail:
+ mutex_unlock(&driver->diagchar_mutex);
+ driver->num_clients--;
+ pr_err_ratelimited("diag: Insufficient memory for new client");
+ return -ENOMEM;
+}
+
+static uint32_t diag_translate_kernel_to_user_mask(uint32_t peripheral_mask)
+{
+ uint32_t ret = 0;
+
+ if (peripheral_mask & MD_PERIPHERAL_MASK(APPS_DATA))
+ ret |= DIAG_CON_APSS;
+ if (peripheral_mask & MD_PERIPHERAL_MASK(PERIPHERAL_MODEM))
+ ret |= DIAG_CON_MPSS;
+ if (peripheral_mask & MD_PERIPHERAL_MASK(PERIPHERAL_LPASS))
+ ret |= DIAG_CON_LPASS;
+ if (peripheral_mask & MD_PERIPHERAL_MASK(PERIPHERAL_WCNSS))
+ ret |= DIAG_CON_WCNSS;
+ if (peripheral_mask & MD_PERIPHERAL_MASK(PERIPHERAL_SENSORS))
+ ret |= DIAG_CON_SENSORS;
+ if (peripheral_mask & MD_PERIPHERAL_MASK(PERIPHERAL_WDSP))
+ ret |= DIAG_CON_WDSP;
+
+ return ret;
+}
+
+void diag_clear_masks(struct diag_md_session_t *info)
+{
+ int ret;
+ char cmd_disable_log_mask[] = { 0x73, 0, 0, 0, 0, 0, 0, 0};
+ char cmd_disable_msg_mask[] = { 0x7D, 0x05, 0, 0, 0, 0, 0, 0};
+ char cmd_disable_event_mask[] = { 0x60, 0};
+
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: %s: masks clear request upon %s\n", __func__,
+ ((info) ? "ODL exit" : "USB Disconnection"));
+
+ ret = diag_process_apps_masks(cmd_disable_log_mask,
+ sizeof(cmd_disable_log_mask), info);
+ ret = diag_process_apps_masks(cmd_disable_msg_mask,
+ sizeof(cmd_disable_msg_mask), info);
+ ret = diag_process_apps_masks(cmd_disable_event_mask,
+ sizeof(cmd_disable_event_mask), info);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag:%s: masks cleared successfully\n", __func__);
+}
+
+static void diag_close_logging_process(const int pid)
+{
+ int i;
+ int session_peripheral_mask;
+ struct diag_md_session_t *session_info = NULL;
+ struct diag_logging_mode_param_t params;
+
+ session_info = diag_md_session_get_pid(pid);
+ if (!session_info)
+ return;
+
+ diag_clear_masks(session_info);
+
+ mutex_lock(&driver->diag_maskclear_mutex);
+ driver->mask_clear = 1;
+ mutex_unlock(&driver->diag_maskclear_mutex);
+
+ session_peripheral_mask = session_info->peripheral_mask;
+ diag_md_session_close(session_info);
+ for (i = 0; i < NUM_MD_SESSIONS; i++)
+ if (MD_PERIPHERAL_MASK(i) & session_peripheral_mask)
+ diag_mux_close_peripheral(DIAG_LOCAL_PROC, i);
+
+ params.req_mode = USB_MODE;
+ params.mode_param = 0;
+ params.peripheral_mask =
+ diag_translate_kernel_to_user_mask(session_peripheral_mask);
+ mutex_lock(&driver->diagchar_mutex);
+ diag_switch_logging(¶ms);
+ mutex_unlock(&driver->diagchar_mutex);
+}
+
+static int diag_remove_client_entry(struct file *file)
+{
+ int i = -1;
+ struct diagchar_priv *diagpriv_data = NULL;
+ struct diag_dci_client_tbl *dci_entry = NULL;
+
+ if (!driver)
+ return -ENOMEM;
+
+ mutex_lock(&driver->diag_file_mutex);
+ if (!file) {
+ DIAG_LOG(DIAG_DEBUG_USERSPACE, "Invalid file pointer\n");
+ mutex_unlock(&driver->diag_file_mutex);
+ return -ENOENT;
+ }
+ if (!(file->private_data)) {
+ DIAG_LOG(DIAG_DEBUG_USERSPACE, "Invalid private data\n");
+ mutex_unlock(&driver->diag_file_mutex);
+ return -EINVAL;
+ }
+
+ diagpriv_data = file->private_data;
+
+ /*
+ * clean up any DCI registrations, if this is a DCI client
+ * This will specially help in case of ungraceful exit of any DCI client
+ * This call will remove any pending registrations of such client
+ */
+ mutex_lock(&driver->dci_mutex);
+ dci_entry = dci_lookup_client_entry_pid(current->tgid);
+ if (dci_entry)
+ diag_dci_deinit_client(dci_entry);
+ mutex_unlock(&driver->dci_mutex);
+
+ diag_close_logging_process(current->tgid);
+
+ /* Delete the pkt response table entry for the exiting process */
+ diag_cmd_remove_reg_by_pid(current->tgid);
+
+ mutex_lock(&driver->diagchar_mutex);
+ driver->ref_count--;
+ if (driver->ref_count == 0)
+ diag_mempool_exit();
+
+ for (i = 0; i < driver->num_clients; i++) {
+ if (diagpriv_data && diagpriv_data->pid ==
+ driver->client_map[i].pid) {
+ driver->client_map[i].pid = 0;
+ kfree(diagpriv_data);
+ diagpriv_data = NULL;
+ file->private_data = 0;
+ break;
+ }
+ }
+ mutex_unlock(&driver->diagchar_mutex);
+ mutex_unlock(&driver->diag_file_mutex);
+ return 0;
+}
+static int diagchar_close(struct inode *inode, struct file *file)
+{
+ int ret;
+
+ DIAG_LOG(DIAG_DEBUG_USERSPACE, "diag: process exit %s\n",
+ current->comm);
+ ret = diag_remove_client_entry(file);
+ mutex_lock(&driver->diag_maskclear_mutex);
+ driver->mask_clear = 0;
+ mutex_unlock(&driver->diag_maskclear_mutex);
+ return ret;
+}
+
+void diag_record_stats(int type, int flag)
+{
+ struct diag_pkt_stats_t *pkt_stats = NULL;
+
+ switch (type) {
+ case DATA_TYPE_EVENT:
+ pkt_stats = &driver->event_stats;
+ break;
+ case DATA_TYPE_F3:
+ pkt_stats = &driver->msg_stats;
+ break;
+ case DATA_TYPE_LOG:
+ pkt_stats = &driver->log_stats;
+ break;
+ case DATA_TYPE_RESPONSE:
+ if (flag != PKT_DROP)
+ return;
+ pr_err_ratelimited("diag: In %s, dropping response. This shouldn't happen\n",
+ __func__);
+ return;
+ case DATA_TYPE_DELAYED_RESPONSE:
+ /* No counters to increase for Delayed responses */
+ return;
+ default:
+ pr_err_ratelimited("diag: In %s, invalid pkt_type: %d\n",
+ __func__, type);
+ return;
+ }
+
+ switch (flag) {
+ case PKT_ALLOC:
+ atomic_add(1, (atomic_t *)&pkt_stats->alloc_count);
+ break;
+ case PKT_DROP:
+ atomic_add(1, (atomic_t *)&pkt_stats->drop_count);
+ break;
+ case PKT_RESET:
+ atomic_set((atomic_t *)&pkt_stats->alloc_count, 0);
+ atomic_set((atomic_t *)&pkt_stats->drop_count, 0);
+ break;
+ default:
+ pr_err_ratelimited("diag: In %s, invalid flag: %d\n",
+ __func__, flag);
+ return;
+ }
+}
+
+void diag_get_timestamp(char *time_str)
+{
+ struct timeval t;
+ struct tm broken_tm;
+
+ do_gettimeofday(&t);
+ if (!time_str)
+ return;
+ time_to_tm(t.tv_sec, 0, &broken_tm);
+ scnprintf(time_str, DIAG_TS_SIZE, "%d:%d:%d:%ld", broken_tm.tm_hour,
+ broken_tm.tm_min, broken_tm.tm_sec, t.tv_usec);
+}
+
+int diag_get_remote(int remote_info)
+{
+ int val = (remote_info < 0) ? -remote_info : remote_info;
+ int remote_val;
+
+ switch (val) {
+ case MDM:
+ case MDM2:
+ case QSC:
+ remote_val = -remote_info;
+ break;
+ default:
+ remote_val = 0;
+ break;
+ }
+
+ return remote_val;
+}
+
+int diag_cmd_chk_polling(struct diag_cmd_reg_entry_t *entry)
+{
+ int polling = DIAG_CMD_NOT_POLLING;
+
+ if (!entry)
+ return -EIO;
+
+ if (entry->cmd_code == DIAG_CMD_NO_SUBSYS) {
+ if (entry->subsys_id == DIAG_CMD_NO_SUBSYS &&
+ entry->cmd_code_hi >= DIAG_CMD_STATUS &&
+ entry->cmd_code_lo <= DIAG_CMD_STATUS)
+ polling = DIAG_CMD_POLLING;
+ else if (entry->subsys_id == DIAG_SS_WCDMA &&
+ entry->cmd_code_hi >= DIAG_CMD_QUERY_CALL &&
+ entry->cmd_code_lo <= DIAG_CMD_QUERY_CALL)
+ polling = DIAG_CMD_POLLING;
+ else if (entry->subsys_id == DIAG_SS_GSM &&
+ entry->cmd_code_hi >= DIAG_CMD_QUERY_TMC &&
+ entry->cmd_code_lo <= DIAG_CMD_QUERY_TMC)
+ polling = DIAG_CMD_POLLING;
+ else if (entry->subsys_id == DIAG_SS_PARAMS &&
+ entry->cmd_code_hi >= DIAG_DIAG_POLL &&
+ entry->cmd_code_lo <= DIAG_DIAG_POLL)
+ polling = DIAG_CMD_POLLING;
+ else if (entry->subsys_id == DIAG_SS_TDSCDMA &&
+ entry->cmd_code_hi >= DIAG_CMD_TDSCDMA_STATUS &&
+ entry->cmd_code_lo <= DIAG_CMD_TDSCDMA_STATUS)
+ polling = DIAG_CMD_POLLING;
+ }
+
+ return polling;
+}
+
+static void diag_cmd_invalidate_polling(int change_flag)
+{
+ int polling = DIAG_CMD_NOT_POLLING;
+ struct list_head *start;
+ struct list_head *temp;
+ struct diag_cmd_reg_t *item = NULL;
+
+ if (change_flag == DIAG_CMD_ADD) {
+ if (driver->polling_reg_flag)
+ return;
+ }
+
+ driver->polling_reg_flag = 0;
+ list_for_each_safe(start, temp, &driver->cmd_reg_list) {
+ item = list_entry(start, struct diag_cmd_reg_t, link);
+ polling = diag_cmd_chk_polling(&item->entry);
+ if (polling == DIAG_CMD_POLLING) {
+ driver->polling_reg_flag = 1;
+ break;
+ }
+ }
+}
+
+int diag_cmd_add_reg(struct diag_cmd_reg_entry_t *new_entry, uint8_t proc,
+ int pid)
+{
+ struct diag_cmd_reg_t *new_item = NULL;
+
+ if (!new_entry) {
+ pr_err("diag: In %s, invalid new entry\n", __func__);
+ return -EINVAL;
+ }
+
+ if (proc > APPS_DATA) {
+ pr_err("diag: In %s, invalid peripheral %d\n", __func__, proc);
+ return -EINVAL;
+ }
+
+ if (proc != APPS_DATA)
+ pid = INVALID_PID;
+
+ new_item = kzalloc(sizeof(struct diag_cmd_reg_t), GFP_KERNEL);
+ if (!new_item)
+ return -ENOMEM;
+ kmemleak_not_leak(new_item);
+
+ new_item->pid = pid;
+ new_item->proc = proc;
+ memcpy(&new_item->entry, new_entry,
+ sizeof(struct diag_cmd_reg_entry_t));
+ INIT_LIST_HEAD(&new_item->link);
+
+ mutex_lock(&driver->cmd_reg_mutex);
+ list_add_tail(&new_item->link, &driver->cmd_reg_list);
+ driver->cmd_reg_count++;
+ diag_cmd_invalidate_polling(DIAG_CMD_ADD);
+ mutex_unlock(&driver->cmd_reg_mutex);
+
+ return 0;
+}
+
+struct diag_cmd_reg_entry_t *diag_cmd_search(
+ struct diag_cmd_reg_entry_t *entry, int proc)
+{
+ struct list_head *start;
+ struct list_head *temp;
+ struct diag_cmd_reg_t *item = NULL;
+ struct diag_cmd_reg_entry_t *temp_entry = NULL;
+
+ if (!entry) {
+ pr_err("diag: In %s, invalid entry\n", __func__);
+ return NULL;
+ }
+
+ list_for_each_safe(start, temp, &driver->cmd_reg_list) {
+ item = list_entry(start, struct diag_cmd_reg_t, link);
+ temp_entry = &item->entry;
+ if (temp_entry->cmd_code == entry->cmd_code &&
+ temp_entry->subsys_id == entry->subsys_id &&
+ temp_entry->cmd_code_hi >= entry->cmd_code_hi &&
+ temp_entry->cmd_code_lo <= entry->cmd_code_lo &&
+ (proc == item->proc || proc == ALL_PROC)) {
+ return &item->entry;
+ } else if (temp_entry->cmd_code == DIAG_CMD_NO_SUBSYS &&
+ entry->cmd_code == DIAG_CMD_DIAG_SUBSYS) {
+ if (temp_entry->subsys_id == entry->subsys_id &&
+ temp_entry->cmd_code_hi >= entry->cmd_code_hi &&
+ temp_entry->cmd_code_lo <= entry->cmd_code_lo &&
+ (proc == item->proc || proc == ALL_PROC)) {
+ return &item->entry;
+ }
+ } else if (temp_entry->cmd_code == DIAG_CMD_NO_SUBSYS &&
+ temp_entry->subsys_id == DIAG_CMD_NO_SUBSYS) {
+ if ((temp_entry->cmd_code_hi >= entry->cmd_code) &&
+ (temp_entry->cmd_code_lo <= entry->cmd_code) &&
+ (proc == item->proc || proc == ALL_PROC)) {
+ if (entry->cmd_code == MODE_CMD) {
+ if (entry->subsys_id == RESET_ID &&
+ item->proc != APPS_DATA) {
+ continue;
+ }
+ if (entry->subsys_id != RESET_ID &&
+ item->proc == APPS_DATA) {
+ continue;
+ }
+ }
+ return &item->entry;
+ }
+ }
+ }
+
+ return NULL;
+}
+
+void diag_cmd_remove_reg(struct diag_cmd_reg_entry_t *entry, uint8_t proc)
+{
+ struct diag_cmd_reg_t *item = NULL;
+ struct diag_cmd_reg_entry_t *temp_entry;
+
+ if (!entry) {
+ pr_err("diag: In %s, invalid entry\n", __func__);
+ return;
+ }
+
+ mutex_lock(&driver->cmd_reg_mutex);
+ temp_entry = diag_cmd_search(entry, proc);
+ if (temp_entry) {
+ item = container_of(temp_entry, struct diag_cmd_reg_t, entry);
+ if (!item) {
+ mutex_unlock(&driver->cmd_reg_mutex);
+ return;
+ }
+ list_del(&item->link);
+ kfree(item);
+ driver->cmd_reg_count--;
+ }
+ diag_cmd_invalidate_polling(DIAG_CMD_REMOVE);
+ mutex_unlock(&driver->cmd_reg_mutex);
+}
+
+void diag_cmd_remove_reg_by_pid(int pid)
+{
+ struct list_head *start;
+ struct list_head *temp;
+ struct diag_cmd_reg_t *item = NULL;
+
+ mutex_lock(&driver->cmd_reg_mutex);
+ list_for_each_safe(start, temp, &driver->cmd_reg_list) {
+ item = list_entry(start, struct diag_cmd_reg_t, link);
+ if (item->pid == pid) {
+ list_del(&item->link);
+ kfree(item);
+ driver->cmd_reg_count--;
+ }
+ }
+ mutex_unlock(&driver->cmd_reg_mutex);
+}
+
+void diag_cmd_remove_reg_by_proc(int proc)
+{
+ struct list_head *start;
+ struct list_head *temp;
+ struct diag_cmd_reg_t *item = NULL;
+
+ mutex_lock(&driver->cmd_reg_mutex);
+ list_for_each_safe(start, temp, &driver->cmd_reg_list) {
+ item = list_entry(start, struct diag_cmd_reg_t, link);
+ if (item->proc == proc) {
+ list_del(&item->link);
+ kfree(item);
+ driver->cmd_reg_count--;
+ }
+ }
+ diag_cmd_invalidate_polling(DIAG_CMD_REMOVE);
+ mutex_unlock(&driver->cmd_reg_mutex);
+}
+
+static int diag_copy_dci(char __user *buf, size_t count,
+ struct diag_dci_client_tbl *entry, int *pret)
+{
+ int total_data_len = 0;
+ int ret = 0;
+ int exit_stat = 1;
+ uint8_t drain_again = 0;
+ struct diag_dci_buffer_t *buf_entry, *temp;
+
+ if (!buf || !entry || !pret)
+ return exit_stat;
+
+ ret = *pret;
+
+ ret += sizeof(int);
+ if (ret >= count) {
+ pr_err("diag: In %s, invalid value for ret: %d, count: %zu\n",
+ __func__, ret, count);
+ return -EINVAL;
+ }
+
+ mutex_lock(&entry->write_buf_mutex);
+ list_for_each_entry_safe(buf_entry, temp, &entry->list_write_buf,
+ buf_track) {
+
+ if ((ret + buf_entry->data_len) > count) {
+ drain_again = 1;
+ break;
+ }
+
+ list_del(&buf_entry->buf_track);
+ mutex_lock(&buf_entry->data_mutex);
+ if ((buf_entry->data_len > 0) &&
+ (buf_entry->in_busy) &&
+ (buf_entry->data)) {
+ if (copy_to_user(buf+ret, (void *)buf_entry->data,
+ buf_entry->data_len))
+ goto drop;
+ ret += buf_entry->data_len;
+ total_data_len += buf_entry->data_len;
+ diag_ws_on_copy(DIAG_WS_DCI);
+drop:
+ buf_entry->in_busy = 0;
+ buf_entry->data_len = 0;
+ buf_entry->in_list = 0;
+ if (buf_entry->buf_type == DCI_BUF_CMD) {
+ mutex_unlock(&buf_entry->data_mutex);
+ continue;
+ } else if (buf_entry->buf_type == DCI_BUF_SECONDARY) {
+ diagmem_free(driver, buf_entry->data,
+ POOL_TYPE_DCI);
+ buf_entry->data = NULL;
+ mutex_unlock(&buf_entry->data_mutex);
+ kfree(buf_entry);
+ continue;
+ }
+
+ }
+ mutex_unlock(&buf_entry->data_mutex);
+ }
+
+ if (total_data_len > 0) {
+ /* Copy the total data length */
+ COPY_USER_SPACE_OR_ERR(buf+8, total_data_len, 4);
+ if (ret == -EFAULT)
+ goto exit;
+ ret -= 4;
+ } else {
+ pr_debug("diag: In %s, Trying to copy ZERO bytes, total_data_len: %d\n",
+ __func__, total_data_len);
+ }
+
+ exit_stat = 0;
+exit:
+ entry->in_service = 0;
+ mutex_unlock(&entry->write_buf_mutex);
+ *pret = ret;
+ if (drain_again)
+ dci_drain_data(0);
+
+ return exit_stat;
+}
+
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+static int diag_remote_init(void)
+{
+ diagmem_setsize(POOL_TYPE_MDM, itemsize_mdm, poolsize_mdm);
+ diagmem_setsize(POOL_TYPE_MDM2, itemsize_mdm, poolsize_mdm);
+ diagmem_setsize(POOL_TYPE_MDM_DCI, itemsize_mdm_dci, poolsize_mdm_dci);
+ diagmem_setsize(POOL_TYPE_MDM2_DCI, itemsize_mdm_dci,
+ poolsize_mdm_dci);
+ diagmem_setsize(POOL_TYPE_MDM_MUX, itemsize_mdm_usb, poolsize_mdm_usb);
+ diagmem_setsize(POOL_TYPE_MDM2_MUX, itemsize_mdm_usb, poolsize_mdm_usb);
+ diagmem_setsize(POOL_TYPE_MDM_DCI_WRITE, itemsize_mdm_dci_write,
+ poolsize_mdm_dci_write);
+ diagmem_setsize(POOL_TYPE_MDM2_DCI_WRITE, itemsize_mdm_dci_write,
+ poolsize_mdm_dci_write);
+ diagmem_setsize(POOL_TYPE_QSC_MUX, itemsize_qsc_usb,
+ poolsize_qsc_usb);
+ driver->hdlc_encode_buf = kzalloc(DIAG_MAX_HDLC_BUF_SIZE, GFP_KERNEL);
+ if (!driver->hdlc_encode_buf)
+ return -ENOMEM;
+ driver->hdlc_encode_buf_len = 0;
+ return 0;
+}
+
+static void diag_remote_exit(void)
+{
+ kfree(driver->hdlc_encode_buf);
+}
+
+static int diag_send_raw_data_remote(int proc, void *buf, int len,
+ uint8_t hdlc_flag)
+{
+ int err = 0;
+ int max_len = 0;
+ uint8_t retry_count = 0;
+ uint8_t max_retries = 3;
+ uint16_t payload = 0;
+ struct diag_send_desc_type send = { NULL, NULL, DIAG_STATE_START, 0 };
+ struct diag_hdlc_dest_type enc = { NULL, NULL, 0 };
+ int bridge_index = proc - 1;
+ struct diag_md_session_t *session_info = NULL;
+ uint8_t hdlc_disabled = 0;
+
+ if (!buf)
+ return -EINVAL;
+
+ if (len <= 0) {
+ pr_err("diag: In %s, invalid len: %d", __func__, len);
+ return -EBADMSG;
+ }
+
+ if (bridge_index < 0 || bridge_index > NUM_REMOTE_DEV) {
+ pr_err("diag: In %s, invalid bridge index: %d\n", __func__,
+ bridge_index);
+ return -EINVAL;
+ }
+
+ do {
+ if (driver->hdlc_encode_buf_len == 0)
+ break;
+ usleep_range(10000, 10100);
+ retry_count++;
+ } while (retry_count < max_retries);
+
+ if (driver->hdlc_encode_buf_len != 0)
+ return -EAGAIN;
+ session_info = diag_md_session_get_peripheral(APPS_DATA);
+ if (session_info)
+ hdlc_disabled = session_info->hdlc_disabled;
+ else
+ hdlc_disabled = driver->hdlc_disabled;
+ if (hdlc_disabled) {
+ payload = *(uint16_t *)(buf + 2);
+ driver->hdlc_encode_buf_len = payload;
+ /*
+ * Adding 4 bytes for start (1 byte), version (1 byte) and
+ * payload (2 bytes)
+ */
+ memcpy(driver->hdlc_encode_buf, buf + 4, payload);
+ goto send_data;
+ }
+
+ if (hdlc_flag) {
+ if (len > DIAG_MAX_HDLC_BUF_SIZE) {
+ pr_err("diag: Dropping packet, HDLC encoded packet payload size crosses buffer limit. Current payload size %d\n",
+ len);
+ return -EBADMSG;
+ }
+ driver->hdlc_encode_buf_len = len;
+ memcpy(driver->hdlc_encode_buf, buf, len);
+ goto send_data;
+ }
+
+ /*
+ * The worst case length will be twice as the incoming packet length.
+ * Add 3 bytes for CRC bytes (2 bytes) and delimiter (1 byte)
+ */
+ max_len = (2 * len) + 3;
+ if (max_len > DIAG_MAX_HDLC_BUF_SIZE) {
+ pr_err("diag: Dropping packet, HDLC encoded packet payload size crosses buffer limit. Current payload size %d\n",
+ max_len);
+ return -EBADMSG;
+ }
+
+ /* Perform HDLC encoding on incoming data */
+ send.state = DIAG_STATE_START;
+ send.pkt = (void *)(buf);
+ send.last = (void *)(buf + len - 1);
+ send.terminate = 1;
+
+ enc.dest = driver->hdlc_encode_buf;
+ enc.dest_last = (void *)(driver->hdlc_encode_buf + max_len - 1);
+ diag_hdlc_encode(&send, &enc);
+ driver->hdlc_encode_buf_len = (int)(enc.dest -
+ (void *)driver->hdlc_encode_buf);
+
+send_data:
+ err = diagfwd_bridge_write(bridge_index, driver->hdlc_encode_buf,
+ driver->hdlc_encode_buf_len);
+ if (err) {
+ pr_err_ratelimited("diag: Error writing Callback packet to proc: %d, err: %d\n",
+ proc, err);
+ driver->hdlc_encode_buf_len = 0;
+ }
+
+ return err;
+}
+
+static int diag_process_userspace_remote(int proc, void *buf, int len)
+{
+ int bridge_index = proc - 1;
+
+ if (!buf || len < 0) {
+ pr_err("diag: Invalid input in %s, buf: %pK, len: %d\n",
+ __func__, buf, len);
+ return -EINVAL;
+ }
+
+ if (bridge_index < 0 || bridge_index > NUM_REMOTE_DEV) {
+ pr_err("diag: In %s, invalid bridge index: %d\n", __func__,
+ bridge_index);
+ return -EINVAL;
+ }
+
+ driver->user_space_data_busy = 1;
+ return diagfwd_bridge_write(bridge_index, buf, len);
+}
+#else
+static int diag_remote_init(void)
+{
+ return 0;
+}
+
+static void diag_remote_exit(void)
+{
+}
+
+int diagfwd_bridge_init(void)
+{
+ return 0;
+}
+
+void diagfwd_bridge_exit(void)
+{
+}
+
+uint16_t diag_get_remote_device_mask(void)
+{
+ return 0;
+}
+
+static int diag_send_raw_data_remote(int proc, void *buf, int len,
+ uint8_t hdlc_flag)
+{
+ return -EINVAL;
+}
+
+static int diag_process_userspace_remote(int proc, void *buf, int len)
+{
+ return 0;
+}
+#endif
+
+static int mask_request_validate(unsigned char mask_buf[])
+{
+ uint8_t packet_id;
+ uint8_t subsys_id;
+ uint16_t ss_cmd;
+
+ packet_id = mask_buf[0];
+
+ if (packet_id == DIAG_CMD_DIAG_SUBSYS_DELAY) {
+ subsys_id = mask_buf[1];
+ ss_cmd = *(uint16_t *)(mask_buf + 2);
+ switch (subsys_id) {
+ case DIAG_SS_DIAG:
+ if ((ss_cmd == DIAG_SS_FILE_READ_MODEM) ||
+ (ss_cmd == DIAG_SS_FILE_READ_ADSP) ||
+ (ss_cmd == DIAG_SS_FILE_READ_WCNSS) ||
+ (ss_cmd == DIAG_SS_FILE_READ_SLPI) ||
+ (ss_cmd == DIAG_SS_FILE_READ_APPS))
+ return 1;
+ break;
+ default:
+ return 0;
+ }
+ } else if (packet_id == 0x4B) {
+ subsys_id = mask_buf[1];
+ ss_cmd = *(uint16_t *)(mask_buf + 2);
+ /* Packets with SSID which are allowed */
+ switch (subsys_id) {
+ case 0x04: /* DIAG_SUBSYS_WCDMA */
+ if ((ss_cmd == 0) || (ss_cmd == 0xF))
+ return 1;
+ break;
+ case 0x08: /* DIAG_SUBSYS_GSM */
+ if ((ss_cmd == 0) || (ss_cmd == 0x1))
+ return 1;
+ break;
+ case 0x09: /* DIAG_SUBSYS_UMTS */
+ case 0x0F: /* DIAG_SUBSYS_CM */
+ if (ss_cmd == 0)
+ return 1;
+ break;
+ case 0x0C: /* DIAG_SUBSYS_OS */
+ if ((ss_cmd == 2) || (ss_cmd == 0x100))
+ return 1; /* MPU and APU */
+ break;
+ case 0x12: /* DIAG_SUBSYS_DIAG_SERV */
+ if ((ss_cmd == 0) || (ss_cmd == 0x6) || (ss_cmd == 0x7))
+ return 1;
+ else if (ss_cmd == 0x218) /* HDLC Disabled Command*/
+ return 0;
+ else if (ss_cmd == DIAG_GET_TIME_API)
+ return 1;
+ else if (ss_cmd == DIAG_SET_TIME_API)
+ return 1;
+ else if (ss_cmd == DIAG_SWITCH_COMMAND)
+ return 1;
+ else if (ss_cmd == DIAG_BUFFERING_MODE)
+ return 1;
+ break;
+ case 0x13: /* DIAG_SUBSYS_FS */
+ if ((ss_cmd == 0) || (ss_cmd == 0x1))
+ return 1;
+ break;
+ default:
+ return 0;
+ }
+ } else {
+ switch (packet_id) {
+ case 0x00: /* Version Number */
+ case 0x0C: /* CDMA status packet */
+ case 0x1C: /* Diag Version */
+ case 0x1D: /* Time Stamp */
+ case 0x60: /* Event Report Control */
+ case 0x63: /* Status snapshot */
+ case 0x73: /* Logging Configuration */
+ case 0x7C: /* Extended build ID */
+ case 0x7D: /* Extended Message configuration */
+ case 0x81: /* Event get mask */
+ case 0x82: /* Set the event mask */
+ return 1;
+ default:
+ return 0;
+ }
+ }
+ return 0;
+}
+
+static void diag_md_session_init(void)
+{
+ int i;
+
+ mutex_init(&driver->md_session_lock);
+ driver->md_session_mask = 0;
+ driver->md_session_mode = DIAG_MD_NONE;
+ for (i = 0; i < NUM_MD_SESSIONS; i++)
+ driver->md_session_map[i] = NULL;
+}
+
+static void diag_md_session_exit(void)
+{
+ int i;
+ struct diag_md_session_t *session_info = NULL;
+
+ for (i = 0; i < NUM_MD_SESSIONS; i++) {
+ if (driver->md_session_map[i]) {
+ session_info = driver->md_session_map[i];
+ diag_log_mask_free(session_info->log_mask);
+ kfree(session_info->log_mask);
+ session_info->log_mask = NULL;
+ diag_msg_mask_free(session_info->msg_mask);
+ kfree(session_info->msg_mask);
+ session_info->msg_mask = NULL;
+ diag_event_mask_free(session_info->event_mask);
+ kfree(session_info->event_mask);
+ session_info->event_mask = NULL;
+ kfree(session_info);
+ session_info = NULL;
+ driver->md_session_map[i] = NULL;
+ }
+ }
+ mutex_destroy(&driver->md_session_lock);
+ driver->md_session_mask = 0;
+ driver->md_session_mode = DIAG_MD_NONE;
+}
+
+int diag_md_session_create(int mode, int peripheral_mask, int proc)
+{
+ int i;
+ int err = 0;
+ struct diag_md_session_t *new_session = NULL;
+
+ /*
+ * If a session is running with a peripheral mask and a new session
+ * request comes in with same peripheral mask value then return
+ * invalid param
+ */
+ if (driver->md_session_mode == DIAG_MD_PERIPHERAL &&
+ (driver->md_session_mask & peripheral_mask) != 0)
+ return -EINVAL;
+
+ mutex_lock(&driver->md_session_lock);
+ new_session = kzalloc(sizeof(struct diag_md_session_t), GFP_KERNEL);
+ if (!new_session) {
+ mutex_unlock(&driver->md_session_lock);
+ return -ENOMEM;
+ }
+
+ new_session->peripheral_mask = 0;
+ new_session->pid = current->tgid;
+ new_session->task = current;
+
+ new_session->log_mask = kzalloc(sizeof(struct diag_mask_info),
+ GFP_KERNEL);
+ if (!new_session->log_mask) {
+ err = -ENOMEM;
+ goto fail_peripheral;
+ }
+ new_session->event_mask = kzalloc(sizeof(struct diag_mask_info),
+ GFP_KERNEL);
+ if (!new_session->event_mask) {
+ err = -ENOMEM;
+ goto fail_peripheral;
+ }
+ new_session->msg_mask = kzalloc(sizeof(struct diag_mask_info),
+ GFP_KERNEL);
+ if (!new_session->msg_mask) {
+ err = -ENOMEM;
+ goto fail_peripheral;
+ }
+
+ err = diag_log_mask_copy(new_session->log_mask, &log_mask);
+ if (err) {
+ DIAG_LOG(DIAG_DEBUG_USERSPACE,
+ "return value of log copy. err %d\n", err);
+ goto fail_peripheral;
+ }
+ err = diag_event_mask_copy(new_session->event_mask, &event_mask);
+ if (err) {
+ DIAG_LOG(DIAG_DEBUG_USERSPACE,
+ "return value of event copy. err %d\n", err);
+ goto fail_peripheral;
+ }
+ err = diag_msg_mask_copy(new_session->msg_mask, &msg_mask);
+ if (err) {
+ DIAG_LOG(DIAG_DEBUG_USERSPACE,
+ "return value of msg copy. err %d\n", err);
+ goto fail_peripheral;
+ }
+ for (i = 0; i < NUM_MD_SESSIONS; i++) {
+ if ((MD_PERIPHERAL_MASK(i) & peripheral_mask) == 0)
+ continue;
+ if (driver->md_session_map[i] != NULL) {
+ DIAG_LOG(DIAG_DEBUG_USERSPACE,
+ "another instance present for %d\n", i);
+ err = -EEXIST;
+ goto fail_peripheral;
+ }
+ new_session->peripheral_mask |= MD_PERIPHERAL_MASK(i);
+ driver->md_session_map[i] = new_session;
+ driver->md_session_mask |= MD_PERIPHERAL_MASK(i);
+ }
+ setup_timer(&new_session->hdlc_reset_timer,
+ diag_md_hdlc_reset_timer_func,
+ new_session->pid);
+
+ driver->md_session_mode = DIAG_MD_PERIPHERAL;
+ mutex_unlock(&driver->md_session_lock);
+ DIAG_LOG(DIAG_DEBUG_USERSPACE,
+ "created session in peripheral mode\n");
+ return 0;
+
+fail_peripheral:
+ diag_log_mask_free(new_session->log_mask);
+ kfree(new_session->log_mask);
+ new_session->log_mask = NULL;
+ diag_event_mask_free(new_session->event_mask);
+ kfree(new_session->event_mask);
+ new_session->event_mask = NULL;
+ diag_msg_mask_free(new_session->msg_mask);
+ kfree(new_session->msg_mask);
+ new_session->msg_mask = NULL;
+ kfree(new_session);
+ new_session = NULL;
+ mutex_unlock(&driver->md_session_lock);
+ return err;
+}
+
+static void diag_md_session_close(struct diag_md_session_t *session_info)
+{
+ int i;
+ uint8_t found = 0;
+
+ if (!session_info)
+ return;
+
+ mutex_lock(&driver->md_session_lock);
+ for (i = 0; i < NUM_MD_SESSIONS; i++) {
+ if (driver->md_session_map[i] != session_info)
+ continue;
+ driver->md_session_map[i] = NULL;
+ driver->md_session_mask &= ~session_info->peripheral_mask;
+ }
+ diag_log_mask_free(session_info->log_mask);
+ kfree(session_info->log_mask);
+ session_info->log_mask = NULL;
+ diag_msg_mask_free(session_info->msg_mask);
+ kfree(session_info->msg_mask);
+ session_info->msg_mask = NULL;
+ diag_event_mask_free(session_info->event_mask);
+ kfree(session_info->event_mask);
+ session_info->event_mask = NULL;
+ del_timer(&session_info->hdlc_reset_timer);
+
+ for (i = 0; i < NUM_MD_SESSIONS && !found; i++) {
+ if (driver->md_session_map[i] != NULL)
+ found = 1;
+ }
+
+ driver->md_session_mode = (found) ? DIAG_MD_PERIPHERAL : DIAG_MD_NONE;
+ kfree(session_info);
+ session_info = NULL;
+ mutex_unlock(&driver->md_session_lock);
+ DIAG_LOG(DIAG_DEBUG_USERSPACE, "cleared up session\n");
+}
+
+struct diag_md_session_t *diag_md_session_get_pid(int pid)
+{
+ int i;
+
+ for (i = 0; i < NUM_MD_SESSIONS; i++) {
+ if (driver->md_session_map[i] &&
+ driver->md_session_map[i]->pid == pid)
+ return driver->md_session_map[i];
+ }
+ return NULL;
+}
+
+struct diag_md_session_t *diag_md_session_get_peripheral(uint8_t peripheral)
+{
+ if (peripheral >= NUM_MD_SESSIONS)
+ return NULL;
+ return driver->md_session_map[peripheral];
+}
+
+static int diag_md_peripheral_switch(struct diag_md_session_t *session_info,
+ int peripheral_mask, int req_mode) {
+ int i, bit = 0;
+
+ if (!session_info)
+ return -EINVAL;
+ if (req_mode != DIAG_USB_MODE || req_mode != DIAG_MEMORY_DEVICE_MODE)
+ return -EINVAL;
+
+ /*
+ * check that md_session_map for i == session_info,
+ * if not then race condition occurred and bail
+ */
+ mutex_lock(&driver->md_session_lock);
+ for (i = 0; i < NUM_MD_SESSIONS; i++) {
+ bit = MD_PERIPHERAL_MASK(i) & peripheral_mask;
+ if (!bit)
+ continue;
+ if (req_mode == DIAG_USB_MODE) {
+ if (driver->md_session_map[i] != session_info) {
+ mutex_unlock(&driver->md_session_lock);
+ return -EINVAL;
+ }
+ driver->md_session_map[i] = NULL;
+ driver->md_session_mask &= ~bit;
+ session_info->peripheral_mask &= ~bit;
+
+ } else {
+ if (driver->md_session_map[i] != NULL) {
+ mutex_unlock(&driver->md_session_lock);
+ return -EINVAL;
+ }
+ driver->md_session_map[i] = session_info;
+ driver->md_session_mask |= bit;
+ session_info->peripheral_mask |= bit;
+
+ }
+ }
+
+ driver->md_session_mode = DIAG_MD_PERIPHERAL;
+ mutex_unlock(&driver->md_session_lock);
+ DIAG_LOG(DIAG_DEBUG_USERSPACE, "Changed Peripherals:0x%x to mode:%d\n",
+ peripheral_mask, req_mode);
+}
+
+static int diag_md_session_check(int curr_mode, int req_mode,
+ const struct diag_logging_mode_param_t *param,
+ uint8_t *change_mode)
+{
+ int i, bit = 0, err = 0;
+ int change_mask = 0;
+ struct diag_md_session_t *session_info = NULL;
+
+ if (!param || !change_mode)
+ return -EIO;
+
+ *change_mode = 0;
+
+ switch (curr_mode) {
+ case DIAG_USB_MODE:
+ case DIAG_MEMORY_DEVICE_MODE:
+ case DIAG_MULTI_MODE:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (req_mode != DIAG_USB_MODE && req_mode != DIAG_MEMORY_DEVICE_MODE)
+ return -EINVAL;
+
+ if (req_mode == DIAG_USB_MODE) {
+ if (curr_mode == DIAG_USB_MODE)
+ return 0;
+ if (driver->md_session_mode == DIAG_MD_NONE
+ && driver->md_session_mask == 0 && driver->logging_mask) {
+ *change_mode = 1;
+ return 0;
+ }
+
+ /*
+ * curr_mode is either DIAG_MULTI_MODE or DIAG_MD_MODE
+ * Check if requested peripherals are already in usb mode
+ */
+ for (i = 0; i < NUM_MD_SESSIONS; i++) {
+ bit = MD_PERIPHERAL_MASK(i) & param->peripheral_mask;
+ if (!bit)
+ continue;
+ if (bit & driver->logging_mask)
+ change_mask |= bit;
+ }
+ if (!change_mask)
+ return 0;
+
+ /*
+ * Change is needed. Check if this md_session has set all the
+ * requested peripherals. If another md session set a requested
+ * peripheral then we cannot switch that peripheral to USB.
+ * If this session owns all the requested peripherals, then
+ * call function to switch the modes/masks for the md_session
+ */
+ session_info = diag_md_session_get_pid(current->tgid);
+ if (!session_info) {
+ *change_mode = 1;
+ return 0;
+ }
+ if ((change_mask & session_info->peripheral_mask)
+ != change_mask) {
+ DIAG_LOG(DIAG_DEBUG_USERSPACE,
+ "Another MD Session owns a requested peripheral\n");
+ return -EINVAL;
+ }
+ *change_mode = 1;
+
+ /* If all peripherals are being set to USB Mode, call close */
+ if (~change_mask & session_info->peripheral_mask) {
+ err = diag_md_peripheral_switch(session_info,
+ change_mask, DIAG_USB_MODE);
+ } else
+ diag_md_session_close(session_info);
+
+ return err;
+
+ } else if (req_mode == DIAG_MEMORY_DEVICE_MODE) {
+ /*
+ * Get bit mask that represents what peripherals already have
+ * been set. Check that requested peripherals already set are
+ * owned by this md session
+ */
+ change_mask = driver->md_session_mask & param->peripheral_mask;
+ session_info = diag_md_session_get_pid(current->tgid);
+
+ if (session_info) {
+ if ((session_info->peripheral_mask & change_mask)
+ != change_mask) {
+ DIAG_LOG(DIAG_DEBUG_USERSPACE,
+ "Another MD Session owns a requested peripheral\n");
+ return -EINVAL;
+ }
+ err = diag_md_peripheral_switch(session_info,
+ change_mask, DIAG_USB_MODE);
+ } else {
+ if (change_mask) {
+ DIAG_LOG(DIAG_DEBUG_USERSPACE,
+ "Another MD Session owns a requested peripheral\n");
+ return -EINVAL;
+ }
+ err = diag_md_session_create(DIAG_MD_PERIPHERAL,
+ param->peripheral_mask, DIAG_LOCAL_PROC);
+ }
+ *change_mode = 1;
+ return err;
+ }
+ return -EINVAL;
+}
+
+static uint32_t diag_translate_mask(uint32_t peripheral_mask)
+{
+ uint32_t ret = 0;
+
+ if (peripheral_mask & DIAG_CON_APSS)
+ ret |= (1 << APPS_DATA);
+ if (peripheral_mask & DIAG_CON_MPSS)
+ ret |= (1 << PERIPHERAL_MODEM);
+ if (peripheral_mask & DIAG_CON_LPASS)
+ ret |= (1 << PERIPHERAL_LPASS);
+ if (peripheral_mask & DIAG_CON_WCNSS)
+ ret |= (1 << PERIPHERAL_WCNSS);
+ if (peripheral_mask & DIAG_CON_SENSORS)
+ ret |= (1 << PERIPHERAL_SENSORS);
+ if (peripheral_mask & DIAG_CON_WDSP)
+ ret |= (1 << PERIPHERAL_WDSP);
+
+ return ret;
+}
+
+static int diag_switch_logging(struct diag_logging_mode_param_t *param)
+{
+ int new_mode;
+ int curr_mode;
+ int err = 0;
+ uint8_t do_switch = 1;
+ uint32_t peripheral_mask = 0;
+
+ if (!param)
+ return -EINVAL;
+
+ if (!param->peripheral_mask) {
+ DIAG_LOG(DIAG_DEBUG_USERSPACE,
+ "asking for mode switch with no peripheral mask set\n");
+ return -EINVAL;
+ }
+
+ peripheral_mask = diag_translate_mask(param->peripheral_mask);
+ param->peripheral_mask = peripheral_mask;
+
+ switch (param->req_mode) {
+ case CALLBACK_MODE:
+ case UART_MODE:
+ case SOCKET_MODE:
+ case MEMORY_DEVICE_MODE:
+ new_mode = DIAG_MEMORY_DEVICE_MODE;
+ break;
+ case USB_MODE:
+ new_mode = DIAG_USB_MODE;
+ break;
+ default:
+ pr_err("diag: In %s, request to switch to invalid mode: %d\n",
+ __func__, param->req_mode);
+ return -EINVAL;
+ }
+
+ curr_mode = driver->logging_mode;
+ DIAG_LOG(DIAG_DEBUG_USERSPACE,
+ "request to switch logging from %d mask:%0x to %d mask:%0x\n",
+ curr_mode, driver->md_session_mask, new_mode, peripheral_mask);
+
+ err = diag_md_session_check(curr_mode, new_mode, param, &do_switch);
+ if (err) {
+ DIAG_LOG(DIAG_DEBUG_USERSPACE,
+ "err from diag_md_session_check, err: %d\n", err);
+ return err;
+ }
+
+ if (do_switch == 0) {
+ DIAG_LOG(DIAG_DEBUG_USERSPACE,
+ "not switching modes c: %d n: %d\n",
+ curr_mode, new_mode);
+ return 0;
+ }
+
+ diag_ws_reset(DIAG_WS_MUX);
+ err = diag_mux_switch_logging(&new_mode, &peripheral_mask);
+ if (err) {
+ pr_err("diag: In %s, unable to switch mode from %d to %d, err: %d\n",
+ __func__, curr_mode, new_mode, err);
+ driver->logging_mode = curr_mode;
+ goto fail;
+ }
+ driver->logging_mode = new_mode;
+ driver->logging_mask = peripheral_mask;
+ DIAG_LOG(DIAG_DEBUG_USERSPACE,
+ "Switch logging to %d mask:%0x\n", new_mode, peripheral_mask);
+
+ /* Update to take peripheral_mask */
+ if (new_mode != DIAG_MEMORY_DEVICE_MODE) {
+ diag_update_real_time_vote(DIAG_PROC_MEMORY_DEVICE,
+ MODE_REALTIME, ALL_PROC);
+ } else {
+ diag_update_proc_vote(DIAG_PROC_MEMORY_DEVICE, VOTE_UP,
+ ALL_PROC);
+ }
+
+ if (!(new_mode == DIAG_MEMORY_DEVICE_MODE &&
+ curr_mode == DIAG_USB_MODE)) {
+ queue_work(driver->diag_real_time_wq,
+ &driver->diag_real_time_work);
+ }
+
+ return 0;
+fail:
+ return err;
+}
+
+static int diag_ioctl_dci_reg(unsigned long ioarg)
+{
+ int result = -EINVAL;
+ struct diag_dci_reg_tbl_t dci_reg_params;
+
+ if (copy_from_user(&dci_reg_params, (void __user *)ioarg,
+ sizeof(struct diag_dci_reg_tbl_t)))
+ return -EFAULT;
+
+ result = diag_dci_register_client(&dci_reg_params);
+
+ return result;
+}
+
+static int diag_ioctl_dci_health_stats(unsigned long ioarg)
+{
+ int result = -EINVAL;
+ struct diag_dci_health_stats_proc stats;
+
+ if (copy_from_user(&stats, (void __user *)ioarg,
+ sizeof(struct diag_dci_health_stats_proc)))
+ return -EFAULT;
+
+ result = diag_dci_copy_health_stats(&stats);
+ if (result == DIAG_DCI_NO_ERROR) {
+ if (copy_to_user((void __user *)ioarg, &stats,
+ sizeof(struct diag_dci_health_stats_proc)))
+ return -EFAULT;
+ }
+
+ return result;
+}
+
+static int diag_ioctl_dci_log_status(unsigned long ioarg)
+{
+ struct diag_log_event_stats le_stats;
+ struct diag_dci_client_tbl *dci_client = NULL;
+
+ if (copy_from_user(&le_stats, (void __user *)ioarg,
+ sizeof(struct diag_log_event_stats)))
+ return -EFAULT;
+
+ dci_client = diag_dci_get_client_entry(le_stats.client_id);
+ if (!dci_client)
+ return DIAG_DCI_NOT_SUPPORTED;
+ le_stats.is_set = diag_dci_query_log_mask(dci_client, le_stats.code);
+ if (copy_to_user((void __user *)ioarg, &le_stats,
+ sizeof(struct diag_log_event_stats)))
+ return -EFAULT;
+
+ return DIAG_DCI_NO_ERROR;
+}
+
+static int diag_ioctl_dci_event_status(unsigned long ioarg)
+{
+ struct diag_log_event_stats le_stats;
+ struct diag_dci_client_tbl *dci_client = NULL;
+
+ if (copy_from_user(&le_stats, (void __user *)ioarg,
+ sizeof(struct diag_log_event_stats)))
+ return -EFAULT;
+
+ dci_client = diag_dci_get_client_entry(le_stats.client_id);
+ if (!dci_client)
+ return DIAG_DCI_NOT_SUPPORTED;
+
+ le_stats.is_set = diag_dci_query_event_mask(dci_client, le_stats.code);
+ if (copy_to_user((void __user *)ioarg, &le_stats,
+ sizeof(struct diag_log_event_stats)))
+ return -EFAULT;
+
+ return DIAG_DCI_NO_ERROR;
+}
+
+static int diag_ioctl_lsm_deinit(void)
+{
+ int i;
+
+ for (i = 0; i < driver->num_clients; i++)
+ if (driver->client_map[i].pid == current->tgid)
+ break;
+
+ if (i == driver->num_clients)
+ return -EINVAL;
+
+ driver->data_ready[i] |= DEINIT_TYPE;
+ wake_up_interruptible(&driver->wait_q);
+
+ return 1;
+}
+
+static int diag_ioctl_vote_real_time(unsigned long ioarg)
+{
+ int real_time = 0;
+ int temp_proc = ALL_PROC;
+ struct real_time_vote_t vote;
+ struct diag_dci_client_tbl *dci_client = NULL;
+
+ if (copy_from_user(&vote, (void __user *)ioarg,
+ sizeof(struct real_time_vote_t)))
+ return -EFAULT;
+
+ if (vote.proc > DIAG_PROC_MEMORY_DEVICE ||
+ vote.real_time_vote > MODE_UNKNOWN ||
+ vote.client_id < 0) {
+ pr_err("diag: %s, invalid params, proc: %d, vote: %d, client_id: %d\n",
+ __func__, vote.proc, vote.real_time_vote,
+ vote.client_id);
+ return -EINVAL;
+ }
+
+ driver->real_time_update_busy++;
+ if (vote.proc == DIAG_PROC_DCI) {
+ dci_client = diag_dci_get_client_entry(vote.client_id);
+ if (!dci_client) {
+ driver->real_time_update_busy--;
+ return DIAG_DCI_NOT_SUPPORTED;
+ }
+ diag_dci_set_real_time(dci_client, vote.real_time_vote);
+ real_time = diag_dci_get_cumulative_real_time(
+ dci_client->client_info.token);
+ diag_update_real_time_vote(vote.proc, real_time,
+ dci_client->client_info.token);
+ } else {
+ real_time = vote.real_time_vote;
+ temp_proc = vote.client_id;
+ diag_update_real_time_vote(vote.proc, real_time,
+ temp_proc);
+ }
+ queue_work(driver->diag_real_time_wq, &driver->diag_real_time_work);
+ return 0;
+}
+
+static int diag_ioctl_get_real_time(unsigned long ioarg)
+{
+ int i;
+ int retry_count = 0;
+ int timer = 0;
+ struct real_time_query_t rt_query;
+
+ if (copy_from_user(&rt_query, (void __user *)ioarg,
+ sizeof(struct real_time_query_t)))
+ return -EFAULT;
+ while (retry_count < 3) {
+ if (driver->real_time_update_busy > 0) {
+ retry_count++;
+ /*
+ * The value 10000 was chosen empirically as an
+ * optimum value in order to give the work in
+ * diag_real_time_wq to complete processing.
+ */
+ for (timer = 0; timer < 5; timer++)
+ usleep_range(10000, 10100);
+ } else {
+ break;
+ }
+ }
+
+ if (driver->real_time_update_busy > 0)
+ return -EAGAIN;
+
+ if (rt_query.proc < 0 || rt_query.proc >= DIAG_NUM_PROC) {
+ pr_err("diag: Invalid proc %d in %s\n", rt_query.proc,
+ __func__);
+ return -EINVAL;
+ }
+ rt_query.real_time = driver->real_time_mode[rt_query.proc];
+ /*
+ * For the local processor, if any of the peripherals is in buffering
+ * mode, overwrite the value of real time with UNKNOWN_MODE
+ */
+ if (rt_query.proc == DIAG_LOCAL_PROC) {
+ for (i = 0; i < NUM_PERIPHERALS; i++) {
+ if (!driver->feature[i].peripheral_buffering)
+ continue;
+ switch (driver->buffering_mode[i].mode) {
+ case DIAG_BUFFERING_MODE_CIRCULAR:
+ case DIAG_BUFFERING_MODE_THRESHOLD:
+ rt_query.real_time = MODE_UNKNOWN;
+ break;
+ }
+ }
+ }
+
+ if (copy_to_user((void __user *)ioarg, &rt_query,
+ sizeof(struct real_time_query_t)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int diag_ioctl_set_buffering_mode(unsigned long ioarg)
+{
+ struct diag_buffering_mode_t params;
+
+ if (copy_from_user(¶ms, (void __user *)ioarg, sizeof(params)))
+ return -EFAULT;
+
+ if (params.peripheral >= NUM_PERIPHERALS)
+ return -EINVAL;
+
+ mutex_lock(&driver->mode_lock);
+ driver->buffering_flag[params.peripheral] = 1;
+ mutex_unlock(&driver->mode_lock);
+
+ return diag_send_peripheral_buffering_mode(¶ms);
+}
+
+static int diag_ioctl_peripheral_drain_immediate(unsigned long ioarg)
+{
+ uint8_t peripheral;
+
+ if (copy_from_user(&peripheral, (void __user *)ioarg, sizeof(uint8_t)))
+ return -EFAULT;
+
+ if (peripheral >= NUM_PERIPHERALS) {
+ pr_err("diag: In %s, invalid peripheral %d\n", __func__,
+ peripheral);
+ return -EINVAL;
+ }
+
+ if (!driver->feature[peripheral].peripheral_buffering) {
+ pr_err("diag: In %s, peripheral %d doesn't support buffering\n",
+ __func__, peripheral);
+ return -EIO;
+ }
+
+ return diag_send_peripheral_drain_immediate(peripheral);
+}
+
+static int diag_ioctl_dci_support(unsigned long ioarg)
+{
+ struct diag_dci_peripherals_t dci_support;
+ int result = -EINVAL;
+
+ if (copy_from_user(&dci_support, (void __user *)ioarg,
+ sizeof(struct diag_dci_peripherals_t)))
+ return -EFAULT;
+
+ result = diag_dci_get_support_list(&dci_support);
+ if (result == DIAG_DCI_NO_ERROR)
+ if (copy_to_user((void __user *)ioarg, &dci_support,
+ sizeof(struct diag_dci_peripherals_t)))
+ return -EFAULT;
+
+ return result;
+}
+
+static int diag_ioctl_hdlc_toggle(unsigned long ioarg)
+{
+ uint8_t hdlc_support;
+ struct diag_md_session_t *session_info = NULL;
+
+ session_info = diag_md_session_get_pid(current->tgid);
+ if (copy_from_user(&hdlc_support, (void __user *)ioarg,
+ sizeof(uint8_t)))
+ return -EFAULT;
+ mutex_lock(&driver->hdlc_disable_mutex);
+ if (session_info) {
+ mutex_lock(&driver->md_session_lock);
+ session_info->hdlc_disabled = hdlc_support;
+ mutex_unlock(&driver->md_session_lock);
+ } else
+ driver->hdlc_disabled = hdlc_support;
+ mutex_unlock(&driver->hdlc_disable_mutex);
+ diag_update_md_clients(HDLC_SUPPORT_TYPE);
+
+ return 0;
+}
+
+static int diag_ioctl_register_callback(unsigned long ioarg)
+{
+ int err = 0;
+ struct diag_callback_reg_t reg;
+
+ if (copy_from_user(®, (void __user *)ioarg,
+ sizeof(struct diag_callback_reg_t))) {
+ return -EFAULT;
+ }
+
+ if (reg.proc < 0 || reg.proc >= DIAG_NUM_PROC) {
+ pr_err("diag: In %s, invalid proc %d for callback registration\n",
+ __func__, reg.proc);
+ return -EINVAL;
+ }
+
+ if (driver->md_session_mode == DIAG_MD_PERIPHERAL)
+ return -EIO;
+
+ return err;
+}
+
+static int diag_cmd_register_tbl(struct diag_cmd_reg_tbl_t *reg_tbl)
+{
+ int i;
+ int err = 0;
+ uint32_t count = 0;
+ struct diag_cmd_reg_entry_t *entries = NULL;
+ const uint16_t entry_len = sizeof(struct diag_cmd_reg_entry_t);
+
+
+ if (!reg_tbl) {
+ pr_err("diag: In %s, invalid registration table\n", __func__);
+ return -EINVAL;
+ }
+
+ count = reg_tbl->count;
+ if ((UINT_MAX / entry_len) < count) {
+ pr_warn("diag: In %s, possbile integer overflow.\n", __func__);
+ return -EFAULT;
+ }
+
+ entries = kzalloc(count * entry_len, GFP_KERNEL);
+ if (!entries)
+ return -ENOMEM;
+
+
+ err = copy_from_user(entries, reg_tbl->entries, count * entry_len);
+ if (err) {
+ pr_err("diag: In %s, error copying data from userspace, err: %d\n",
+ __func__, err);
+ kfree(entries);
+ return -EFAULT;
+ }
+
+ for (i = 0; i < count; i++) {
+ err = diag_cmd_add_reg(&entries[i], APPS_DATA, current->tgid);
+ if (err) {
+ pr_err("diag: In %s, unable to register command, err: %d\n",
+ __func__, err);
+ break;
+ }
+ }
+
+ kfree(entries);
+ return err;
+}
+
+static int diag_ioctl_cmd_reg(unsigned long ioarg)
+{
+ struct diag_cmd_reg_tbl_t reg_tbl;
+
+ if (copy_from_user(®_tbl, (void __user *)ioarg,
+ sizeof(struct diag_cmd_reg_tbl_t))) {
+ return -EFAULT;
+ }
+
+ return diag_cmd_register_tbl(®_tbl);
+}
+
+static int diag_ioctl_cmd_dereg(void)
+{
+ diag_cmd_remove_reg_by_pid(current->tgid);
+ return 0;
+}
+
+#ifdef CONFIG_COMPAT
+/*
+ * @sync_obj_name: name of the synchronization object associated with this proc
+ * @count: number of entries in the bind
+ * @params: the actual packet registrations
+ */
+struct diag_cmd_reg_tbl_compat_t {
+ char sync_obj_name[MAX_SYNC_OBJ_NAME_SIZE];
+ uint32_t count;
+ compat_uptr_t entries;
+};
+
+static int diag_ioctl_cmd_reg_compat(unsigned long ioarg)
+{
+ struct diag_cmd_reg_tbl_compat_t reg_tbl_compat;
+ struct diag_cmd_reg_tbl_t reg_tbl;
+
+ if (copy_from_user(®_tbl_compat, (void __user *)ioarg,
+ sizeof(struct diag_cmd_reg_tbl_compat_t))) {
+ return -EFAULT;
+ }
+
+ strlcpy(reg_tbl.sync_obj_name, reg_tbl_compat.sync_obj_name,
+ MAX_SYNC_OBJ_NAME_SIZE);
+ reg_tbl.count = reg_tbl_compat.count;
+ reg_tbl.entries = (struct diag_cmd_reg_entry_t *)
+ (uintptr_t)reg_tbl_compat.entries;
+
+ return diag_cmd_register_tbl(®_tbl);
+}
+
+long diagchar_compat_ioctl(struct file *filp,
+ unsigned int iocmd, unsigned long ioarg)
+{
+ int result = -EINVAL;
+ int client_id = 0;
+ uint16_t delayed_rsp_id = 0;
+ uint16_t remote_dev;
+ struct diag_dci_client_tbl *dci_client = NULL;
+ struct diag_logging_mode_param_t mode_param;
+
+ switch (iocmd) {
+ case DIAG_IOCTL_COMMAND_REG:
+ result = diag_ioctl_cmd_reg_compat(ioarg);
+ break;
+ case DIAG_IOCTL_COMMAND_DEREG:
+ result = diag_ioctl_cmd_dereg();
+ break;
+ case DIAG_IOCTL_GET_DELAYED_RSP_ID:
+ delayed_rsp_id = diag_get_next_delayed_rsp_id();
+ if (copy_to_user((void __user *)ioarg, &delayed_rsp_id,
+ sizeof(uint16_t)))
+ result = -EFAULT;
+ else
+ result = 0;
+ break;
+ case DIAG_IOCTL_DCI_REG:
+ result = diag_ioctl_dci_reg(ioarg);
+ break;
+ case DIAG_IOCTL_DCI_DEINIT:
+ mutex_lock(&driver->dci_mutex);
+ if (copy_from_user((void *)&client_id, (void __user *)ioarg,
+ sizeof(int))) {
+ mutex_unlock(&driver->dci_mutex);
+ return -EFAULT;
+ }
+ dci_client = diag_dci_get_client_entry(client_id);
+ if (!dci_client) {
+ mutex_unlock(&driver->dci_mutex);
+ return DIAG_DCI_NOT_SUPPORTED;
+ }
+ result = diag_dci_deinit_client(dci_client);
+ mutex_unlock(&driver->dci_mutex);
+ break;
+ case DIAG_IOCTL_DCI_SUPPORT:
+ result = diag_ioctl_dci_support(ioarg);
+ break;
+ case DIAG_IOCTL_DCI_HEALTH_STATS:
+ mutex_lock(&driver->dci_mutex);
+ result = diag_ioctl_dci_health_stats(ioarg);
+ mutex_unlock(&driver->dci_mutex);
+ break;
+ case DIAG_IOCTL_DCI_LOG_STATUS:
+ mutex_lock(&driver->dci_mutex);
+ result = diag_ioctl_dci_log_status(ioarg);
+ mutex_unlock(&driver->dci_mutex);
+ break;
+ case DIAG_IOCTL_DCI_EVENT_STATUS:
+ mutex_lock(&driver->dci_mutex);
+ result = diag_ioctl_dci_event_status(ioarg);
+ mutex_unlock(&driver->dci_mutex);
+ break;
+ case DIAG_IOCTL_DCI_CLEAR_LOGS:
+ mutex_lock(&driver->dci_mutex);
+ if (copy_from_user((void *)&client_id, (void __user *)ioarg,
+ sizeof(int))) {
+ mutex_unlock(&driver->dci_mutex);
+ return -EFAULT;
+ }
+ result = diag_dci_clear_log_mask(client_id);
+ mutex_unlock(&driver->dci_mutex);
+ break;
+ case DIAG_IOCTL_DCI_CLEAR_EVENTS:
+ mutex_lock(&driver->dci_mutex);
+ if (copy_from_user(&client_id, (void __user *)ioarg,
+ sizeof(int))) {
+ mutex_unlock(&driver->dci_mutex);
+ return -EFAULT;
+ }
+ result = diag_dci_clear_event_mask(client_id);
+ mutex_unlock(&driver->dci_mutex);
+ break;
+ case DIAG_IOCTL_LSM_DEINIT:
+ result = diag_ioctl_lsm_deinit();
+ break;
+ case DIAG_IOCTL_SWITCH_LOGGING:
+ if (copy_from_user((void *)&mode_param, (void __user *)ioarg,
+ sizeof(mode_param)))
+ return -EFAULT;
+ mutex_lock(&driver->diagchar_mutex);
+ result = diag_switch_logging(&mode_param);
+ mutex_unlock(&driver->diagchar_mutex);
+ break;
+ case DIAG_IOCTL_REMOTE_DEV:
+ remote_dev = diag_get_remote_device_mask();
+ if (copy_to_user((void __user *)ioarg, &remote_dev,
+ sizeof(uint16_t)))
+ result = -EFAULT;
+ else
+ result = 1;
+ break;
+ case DIAG_IOCTL_VOTE_REAL_TIME:
+ mutex_lock(&driver->dci_mutex);
+ result = diag_ioctl_vote_real_time(ioarg);
+ mutex_unlock(&driver->dci_mutex);
+ break;
+ case DIAG_IOCTL_GET_REAL_TIME:
+ result = diag_ioctl_get_real_time(ioarg);
+ break;
+ case DIAG_IOCTL_PERIPHERAL_BUF_CONFIG:
+ result = diag_ioctl_set_buffering_mode(ioarg);
+ break;
+ case DIAG_IOCTL_PERIPHERAL_BUF_DRAIN:
+ result = diag_ioctl_peripheral_drain_immediate(ioarg);
+ break;
+ case DIAG_IOCTL_REGISTER_CALLBACK:
+ result = diag_ioctl_register_callback(ioarg);
+ break;
+ case DIAG_IOCTL_HDLC_TOGGLE:
+ result = diag_ioctl_hdlc_toggle(ioarg);
+ break;
+ }
+ return result;
+}
+#endif
+
+long diagchar_ioctl(struct file *filp,
+ unsigned int iocmd, unsigned long ioarg)
+{
+ int result = -EINVAL;
+ int client_id = 0;
+ uint16_t delayed_rsp_id;
+ uint16_t remote_dev;
+ struct diag_dci_client_tbl *dci_client = NULL;
+ struct diag_logging_mode_param_t mode_param;
+
+ switch (iocmd) {
+ case DIAG_IOCTL_COMMAND_REG:
+ result = diag_ioctl_cmd_reg(ioarg);
+ break;
+ case DIAG_IOCTL_COMMAND_DEREG:
+ result = diag_ioctl_cmd_dereg();
+ break;
+ case DIAG_IOCTL_GET_DELAYED_RSP_ID:
+ delayed_rsp_id = diag_get_next_delayed_rsp_id();
+ if (copy_to_user((void __user *)ioarg, &delayed_rsp_id,
+ sizeof(uint16_t)))
+ result = -EFAULT;
+ else
+ result = 0;
+ break;
+ case DIAG_IOCTL_DCI_REG:
+ result = diag_ioctl_dci_reg(ioarg);
+ break;
+ case DIAG_IOCTL_DCI_DEINIT:
+ mutex_lock(&driver->dci_mutex);
+ if (copy_from_user((void *)&client_id, (void __user *)ioarg,
+ sizeof(int))) {
+ mutex_unlock(&driver->dci_mutex);
+ return -EFAULT;
+ }
+ dci_client = diag_dci_get_client_entry(client_id);
+ if (!dci_client) {
+ mutex_unlock(&driver->dci_mutex);
+ return DIAG_DCI_NOT_SUPPORTED;
+ }
+ result = diag_dci_deinit_client(dci_client);
+ mutex_unlock(&driver->dci_mutex);
+ break;
+ case DIAG_IOCTL_DCI_SUPPORT:
+ result = diag_ioctl_dci_support(ioarg);
+ break;
+ case DIAG_IOCTL_DCI_HEALTH_STATS:
+ mutex_lock(&driver->dci_mutex);
+ result = diag_ioctl_dci_health_stats(ioarg);
+ mutex_unlock(&driver->dci_mutex);
+ break;
+ case DIAG_IOCTL_DCI_LOG_STATUS:
+ mutex_lock(&driver->dci_mutex);
+ result = diag_ioctl_dci_log_status(ioarg);
+ mutex_unlock(&driver->dci_mutex);
+ break;
+ case DIAG_IOCTL_DCI_EVENT_STATUS:
+ result = diag_ioctl_dci_event_status(ioarg);
+ break;
+ case DIAG_IOCTL_DCI_CLEAR_LOGS:
+ mutex_lock(&driver->dci_mutex);
+ if (copy_from_user((void *)&client_id, (void __user *)ioarg,
+ sizeof(int))) {
+ mutex_unlock(&driver->dci_mutex);
+ return -EFAULT;
+ }
+ result = diag_dci_clear_log_mask(client_id);
+ mutex_unlock(&driver->dci_mutex);
+ break;
+ case DIAG_IOCTL_DCI_CLEAR_EVENTS:
+ mutex_lock(&driver->dci_mutex);
+ if (copy_from_user(&client_id, (void __user *)ioarg,
+ sizeof(int))) {
+ mutex_unlock(&driver->dci_mutex);
+ return -EFAULT;
+ }
+ result = diag_dci_clear_event_mask(client_id);
+ mutex_unlock(&driver->dci_mutex);
+ break;
+ case DIAG_IOCTL_LSM_DEINIT:
+ result = diag_ioctl_lsm_deinit();
+ break;
+ case DIAG_IOCTL_SWITCH_LOGGING:
+ if (copy_from_user((void *)&mode_param, (void __user *)ioarg,
+ sizeof(mode_param)))
+ return -EFAULT;
+ mutex_lock(&driver->diagchar_mutex);
+ result = diag_switch_logging(&mode_param);
+ mutex_unlock(&driver->diagchar_mutex);
+ break;
+ case DIAG_IOCTL_REMOTE_DEV:
+ remote_dev = diag_get_remote_device_mask();
+ if (copy_to_user((void __user *)ioarg, &remote_dev,
+ sizeof(uint16_t)))
+ result = -EFAULT;
+ else
+ result = 1;
+ break;
+ case DIAG_IOCTL_VOTE_REAL_TIME:
+ mutex_lock(&driver->dci_mutex);
+ result = diag_ioctl_vote_real_time(ioarg);
+ mutex_unlock(&driver->dci_mutex);
+ break;
+ case DIAG_IOCTL_GET_REAL_TIME:
+ result = diag_ioctl_get_real_time(ioarg);
+ break;
+ case DIAG_IOCTL_PERIPHERAL_BUF_CONFIG:
+ result = diag_ioctl_set_buffering_mode(ioarg);
+ break;
+ case DIAG_IOCTL_PERIPHERAL_BUF_DRAIN:
+ result = diag_ioctl_peripheral_drain_immediate(ioarg);
+ break;
+ case DIAG_IOCTL_REGISTER_CALLBACK:
+ result = diag_ioctl_register_callback(ioarg);
+ break;
+ case DIAG_IOCTL_HDLC_TOGGLE:
+ result = diag_ioctl_hdlc_toggle(ioarg);
+ break;
+ }
+ return result;
+}
+
+static int diag_process_apps_data_hdlc(unsigned char *buf, int len,
+ int pkt_type)
+{
+ int err = 0;
+ int ret = PKT_DROP;
+ struct diag_apps_data_t *data = &hdlc_data;
+ struct diag_send_desc_type send = { NULL, NULL, DIAG_STATE_START, 0 };
+ struct diag_hdlc_dest_type enc = { NULL, NULL, 0 };
+ /*
+ * The maximum encoded size of the buffer can be atmost twice the length
+ * of the packet. Add three bytes foe footer - 16 bit CRC (2 bytes) +
+ * delimiter (1 byte).
+ */
+ const uint32_t max_encoded_size = ((2 * len) + 3);
+
+ if (!buf || len <= 0) {
+ pr_err("diag: In %s, invalid buf: %pK len: %d\n",
+ __func__, buf, len);
+ return -EIO;
+ }
+
+ if (max_encoded_size > DIAG_MAX_HDLC_BUF_SIZE) {
+ pr_err_ratelimited("diag: In %s, encoded data is larger %d than the buffer size %d\n",
+ __func__, max_encoded_size, DIAG_MAX_HDLC_BUF_SIZE);
+ return -EBADMSG;
+ }
+
+ send.state = DIAG_STATE_START;
+ send.pkt = buf;
+ send.last = (void *)(buf + len - 1);
+ send.terminate = 1;
+
+ if (!data->buf)
+ data->buf = diagmem_alloc(driver, DIAG_MAX_HDLC_BUF_SIZE +
+ APF_DIAG_PADDING,
+ POOL_TYPE_HDLC);
+ if (!data->buf) {
+ ret = PKT_DROP;
+ goto fail_ret;
+ }
+
+ if ((DIAG_MAX_HDLC_BUF_SIZE - data->len) <= max_encoded_size) {
+ err = diag_mux_write(DIAG_LOCAL_PROC, data->buf, data->len,
+ data->ctxt);
+ if (err) {
+ ret = -EIO;
+ goto fail_free_buf;
+ }
+ data->buf = NULL;
+ data->len = 0;
+ data->buf = diagmem_alloc(driver, DIAG_MAX_HDLC_BUF_SIZE +
+ APF_DIAG_PADDING,
+ POOL_TYPE_HDLC);
+ if (!data->buf) {
+ ret = PKT_DROP;
+ goto fail_ret;
+ }
+ }
+
+ enc.dest = data->buf + data->len;
+ enc.dest_last = (void *)(data->buf + data->len + max_encoded_size);
+ diag_hdlc_encode(&send, &enc);
+
+ /*
+ * This is to check if after HDLC encoding, we are still within
+ * the limits of aggregation buffer. If not, we write out the
+ * current buffer and start aggregation in a newly allocated
+ * buffer.
+ */
+ if ((uintptr_t)enc.dest >= (uintptr_t)(data->buf +
+ DIAG_MAX_HDLC_BUF_SIZE)) {
+ err = diag_mux_write(DIAG_LOCAL_PROC, data->buf, data->len,
+ data->ctxt);
+ if (err) {
+ ret = -EIO;
+ goto fail_free_buf;
+ }
+ data->buf = NULL;
+ data->len = 0;
+ data->buf = diagmem_alloc(driver, DIAG_MAX_HDLC_BUF_SIZE +
+ APF_DIAG_PADDING,
+ POOL_TYPE_HDLC);
+ if (!data->buf) {
+ ret = PKT_DROP;
+ goto fail_ret;
+ }
+
+ enc.dest = data->buf + data->len;
+ enc.dest_last = (void *)(data->buf + data->len +
+ max_encoded_size);
+ diag_hdlc_encode(&send, &enc);
+ }
+
+ data->len = (((uintptr_t)enc.dest - (uintptr_t)data->buf) <
+ DIAG_MAX_HDLC_BUF_SIZE) ?
+ ((uintptr_t)enc.dest - (uintptr_t)data->buf) :
+ DIAG_MAX_HDLC_BUF_SIZE;
+
+ if (pkt_type == DATA_TYPE_RESPONSE) {
+ err = diag_mux_write(DIAG_LOCAL_PROC, data->buf, data->len,
+ data->ctxt);
+ if (err) {
+ ret = -EIO;
+ goto fail_free_buf;
+ }
+ data->buf = NULL;
+ data->len = 0;
+ }
+
+ return PKT_ALLOC;
+
+fail_free_buf:
+ diagmem_free(driver, data->buf, POOL_TYPE_HDLC);
+ data->buf = NULL;
+ data->len = 0;
+
+fail_ret:
+ return ret;
+}
+
+static int diag_process_apps_data_non_hdlc(unsigned char *buf, int len,
+ int pkt_type)
+{
+ int err = 0;
+ int ret = PKT_DROP;
+ struct diag_pkt_frame_t header;
+ struct diag_apps_data_t *data = &non_hdlc_data;
+ /*
+ * The maximum packet size, when the data is non hdlc encoded is equal
+ * to the size of the packet frame header and the length. Add 1 for the
+ * delimiter 0x7E at the end.
+ */
+ const uint32_t max_pkt_size = sizeof(header) + len + 1;
+
+ if (!buf || len <= 0) {
+ pr_err("diag: In %s, invalid buf: %pK len: %d\n",
+ __func__, buf, len);
+ return -EIO;
+ }
+
+ if (!data->buf) {
+ data->buf = diagmem_alloc(driver, DIAG_MAX_HDLC_BUF_SIZE +
+ APF_DIAG_PADDING,
+ POOL_TYPE_HDLC);
+ if (!data->buf) {
+ ret = PKT_DROP;
+ goto fail_ret;
+ }
+ }
+
+ if ((DIAG_MAX_HDLC_BUF_SIZE - data->len) <= max_pkt_size) {
+ err = diag_mux_write(DIAG_LOCAL_PROC, data->buf, data->len,
+ data->ctxt);
+ if (err) {
+ ret = -EIO;
+ goto fail_free_buf;
+ }
+ data->buf = NULL;
+ data->len = 0;
+ data->buf = diagmem_alloc(driver, DIAG_MAX_HDLC_BUF_SIZE +
+ APF_DIAG_PADDING,
+ POOL_TYPE_HDLC);
+ if (!data->buf) {
+ ret = PKT_DROP;
+ goto fail_ret;
+ }
+ }
+
+ header.start = CONTROL_CHAR;
+ header.version = 1;
+ header.length = len;
+ memcpy(data->buf + data->len, &header, sizeof(header));
+ data->len += sizeof(header);
+ memcpy(data->buf + data->len, buf, len);
+ data->len += len;
+ *(uint8_t *)(data->buf + data->len) = CONTROL_CHAR;
+ data->len += sizeof(uint8_t);
+ if (pkt_type == DATA_TYPE_RESPONSE) {
+ err = diag_mux_write(DIAG_LOCAL_PROC, data->buf, data->len,
+ data->ctxt);
+ if (err) {
+ ret = -EIO;
+ goto fail_free_buf;
+ }
+ data->buf = NULL;
+ data->len = 0;
+ }
+
+ return PKT_ALLOC;
+
+fail_free_buf:
+ diagmem_free(driver, data->buf, POOL_TYPE_HDLC);
+ data->buf = NULL;
+ data->len = 0;
+
+fail_ret:
+ return ret;
+}
+
+static int diag_user_process_dci_data(const char __user *buf, int len)
+{
+ int err = 0;
+ const int mempool = POOL_TYPE_USER;
+ unsigned char *user_space_data = NULL;
+
+ if (!buf || len <= 0 || len > diag_mempools[mempool].itemsize) {
+ pr_err_ratelimited("diag: In %s, invalid buf %pK len: %d\n",
+ __func__, buf, len);
+ return -EBADMSG;
+ }
+
+ user_space_data = diagmem_alloc(driver, len, mempool);
+ if (!user_space_data)
+ return -ENOMEM;
+
+ err = copy_from_user(user_space_data, buf, len);
+ if (err) {
+ pr_err_ratelimited("diag: In %s, unable to copy data from userspace, err: %d\n",
+ __func__, err);
+ err = DIAG_DCI_SEND_DATA_FAIL;
+ goto fail;
+ }
+
+ err = diag_process_dci_transaction(user_space_data, len);
+fail:
+ diagmem_free(driver, user_space_data, mempool);
+ user_space_data = NULL;
+ return err;
+}
+
+static int diag_user_process_dci_apps_data(const char __user *buf, int len,
+ int pkt_type)
+{
+ int err = 0;
+ const int mempool = POOL_TYPE_COPY;
+ unsigned char *user_space_data = NULL;
+
+ if (!buf || len <= 0 || len > diag_mempools[mempool].itemsize) {
+ pr_err_ratelimited("diag: In %s, invalid buf %pK len: %d\n",
+ __func__, buf, len);
+ return -EBADMSG;
+ }
+
+ pkt_type &= (DCI_PKT_TYPE | DATA_TYPE_DCI_LOG | DATA_TYPE_DCI_EVENT);
+ if (!pkt_type) {
+ pr_err_ratelimited("diag: In %s, invalid pkt_type: %d\n",
+ __func__, pkt_type);
+ return -EBADMSG;
+ }
+
+ user_space_data = diagmem_alloc(driver, len, mempool);
+ if (!user_space_data)
+ return -ENOMEM;
+
+ err = copy_from_user(user_space_data, buf, len);
+ if (err) {
+ pr_alert("diag: In %s, unable to copy data from userspace, err: %d\n",
+ __func__, err);
+ goto fail;
+ }
+
+ diag_process_apps_dci_read_data(pkt_type, user_space_data, len);
+fail:
+ diagmem_free(driver, user_space_data, mempool);
+ user_space_data = NULL;
+ return err;
+}
+
+static int diag_user_process_raw_data(const char __user *buf, int len)
+{
+ int err = 0;
+ int ret = 0;
+ int token_offset = 0;
+ int remote_proc = 0;
+ const int mempool = POOL_TYPE_COPY;
+ unsigned char *user_space_data = NULL;
+ struct diag_md_session_t *info = NULL;
+
+ if (!buf || len <= 0 || len > CALLBACK_BUF_SIZE) {
+ pr_err_ratelimited("diag: In %s, invalid buf %pK len: %d\n",
+ __func__, buf, len);
+ return -EBADMSG;
+ }
+
+ user_space_data = diagmem_alloc(driver, len, mempool);
+ if (!user_space_data)
+ return -ENOMEM;
+
+ err = copy_from_user(user_space_data, buf, len);
+ if (err) {
+ pr_err("diag: copy failed for user space data\n");
+ goto fail;
+ }
+
+ /* Check for proc_type */
+ remote_proc = diag_get_remote(*(int *)user_space_data);
+ if (remote_proc) {
+ token_offset = sizeof(int);
+ if (len <= MIN_SIZ_ALLOW) {
+ pr_err("diag: In %s, possible integer underflow, payload size: %d\n",
+ __func__, len);
+ diagmem_free(driver, user_space_data, mempool);
+ user_space_data = NULL;
+ return -EBADMSG;
+ }
+ len -= sizeof(int);
+ }
+ if (driver->mask_check) {
+ if (!mask_request_validate(user_space_data +
+ token_offset)) {
+ pr_alert("diag: mask request Invalid\n");
+ diagmem_free(driver, user_space_data, mempool);
+ user_space_data = NULL;
+ return -EFAULT;
+ }
+ }
+ if (remote_proc) {
+ ret = diag_send_raw_data_remote(remote_proc,
+ (void *)(user_space_data + token_offset),
+ len, USER_SPACE_RAW_DATA);
+ if (ret) {
+ pr_err("diag: Error sending data to remote proc %d, err: %d\n",
+ remote_proc, ret);
+ }
+ } else {
+ wait_event_interruptible(driver->wait_q,
+ (driver->in_busy_pktdata == 0));
+ info = diag_md_session_get_pid(current->tgid);
+ ret = diag_process_apps_pkt(user_space_data, len, info);
+ if (ret == 1)
+ diag_send_error_rsp((void *)(user_space_data), len);
+ }
+fail:
+ diagmem_free(driver, user_space_data, mempool);
+ user_space_data = NULL;
+ return ret;
+}
+
+static int diag_user_process_userspace_data(const char __user *buf, int len)
+{
+ int err = 0;
+ int max_retries = 3;
+ int retry_count = 0;
+ int remote_proc = 0;
+ int token_offset = 0;
+ struct diag_md_session_t *session_info = NULL;
+ uint8_t hdlc_disabled;
+
+ if (!buf || len <= 0 || len > USER_SPACE_DATA) {
+ pr_err_ratelimited("diag: In %s, invalid buf %pK len: %d\n",
+ __func__, buf, len);
+ return -EBADMSG;
+ }
+
+ do {
+ if (!driver->user_space_data_busy)
+ break;
+ retry_count++;
+ usleep_range(10000, 10100);
+ } while (retry_count < max_retries);
+
+ if (driver->user_space_data_busy)
+ return -EAGAIN;
+
+ err = copy_from_user(driver->user_space_data_buf, buf, len);
+ if (err) {
+ pr_err("diag: In %s, failed to copy data from userspace, err: %d\n",
+ __func__, err);
+ return -EIO;
+ }
+
+ /* Check for proc_type */
+ remote_proc = diag_get_remote(*(int *)driver->user_space_data_buf);
+ if (remote_proc) {
+ if (len <= MIN_SIZ_ALLOW) {
+ pr_err("diag: Integer underflow in %s, payload size: %d",
+ __func__, len);
+ return -EBADMSG;
+ }
+ token_offset = sizeof(int);
+ len -= sizeof(int);
+ }
+
+ /* Check masks for On-Device logging */
+ if (driver->mask_check) {
+ if (!mask_request_validate(driver->user_space_data_buf +
+ token_offset)) {
+ pr_alert("diag: mask request Invalid\n");
+ return -EFAULT;
+ }
+ }
+
+ /* send masks to local processor now */
+ if (!remote_proc) {
+ session_info = diag_md_session_get_pid(current->tgid);
+ if (!session_info) {
+ pr_err("diag:In %s request came from invalid md session pid:%d",
+ __func__, current->tgid);
+ return -EINVAL;
+ }
+ if (session_info)
+ hdlc_disabled = session_info->hdlc_disabled;
+ else
+ hdlc_disabled = driver->hdlc_disabled;
+ if (!hdlc_disabled)
+ diag_process_hdlc_pkt((void *)
+ (driver->user_space_data_buf),
+ len, session_info);
+ else
+ diag_process_non_hdlc_pkt((char *)
+ (driver->user_space_data_buf),
+ len, session_info);
+ return 0;
+ }
+
+ err = diag_process_userspace_remote(remote_proc,
+ driver->user_space_data_buf +
+ token_offset, len);
+ if (err) {
+ driver->user_space_data_busy = 0;
+ pr_err("diag: Error sending mask to remote proc %d, err: %d\n",
+ remote_proc, err);
+ }
+
+ return err;
+}
+
+static int diag_user_process_apps_data(const char __user *buf, int len,
+ int pkt_type)
+{
+ int ret = 0;
+ int stm_size = 0;
+ const int mempool = POOL_TYPE_COPY;
+ unsigned char *user_space_data = NULL;
+ struct diag_md_session_t *session_info = NULL;
+ uint8_t hdlc_disabled;
+
+ if (!buf || len <= 0 || len > DIAG_MAX_RSP_SIZE) {
+ pr_err_ratelimited("diag: In %s, invalid buf %pK len: %d\n",
+ __func__, buf, len);
+ return -EBADMSG;
+ }
+
+ switch (pkt_type) {
+ case DATA_TYPE_EVENT:
+ case DATA_TYPE_F3:
+ case DATA_TYPE_LOG:
+ case DATA_TYPE_RESPONSE:
+ case DATA_TYPE_DELAYED_RESPONSE:
+ break;
+ default:
+ pr_err_ratelimited("diag: In %s, invalid pkt_type: %d\n",
+ __func__, pkt_type);
+ return -EBADMSG;
+ }
+
+ user_space_data = diagmem_alloc(driver, len, mempool);
+ if (!user_space_data) {
+ diag_record_stats(pkt_type, PKT_DROP);
+ return -ENOMEM;
+ }
+
+ ret = copy_from_user(user_space_data, buf, len);
+ if (ret) {
+ pr_alert("diag: In %s, unable to copy data from userspace, err: %d\n",
+ __func__, ret);
+ diagmem_free(driver, user_space_data, mempool);
+ user_space_data = NULL;
+ diag_record_stats(pkt_type, PKT_DROP);
+ return -EBADMSG;
+ }
+
+ if (driver->stm_state[APPS_DATA] &&
+ (pkt_type >= DATA_TYPE_EVENT) && (pkt_type <= DATA_TYPE_LOG)) {
+ stm_size = stm_log_inv_ts(OST_ENTITY_DIAG, 0, user_space_data,
+ len);
+ if (stm_size == 0) {
+ pr_debug("diag: In %s, stm_log_inv_ts returned size of 0\n",
+ __func__);
+ }
+ diagmem_free(driver, user_space_data, mempool);
+ user_space_data = NULL;
+
+ return 0;
+ }
+
+ mutex_lock(&apps_data_mutex);
+ mutex_lock(&driver->hdlc_disable_mutex);
+ session_info = diag_md_session_get_peripheral(APPS_DATA);
+ if (session_info)
+ hdlc_disabled = session_info->hdlc_disabled;
+ else
+ hdlc_disabled = driver->hdlc_disabled;
+ if (hdlc_disabled)
+ ret = diag_process_apps_data_non_hdlc(user_space_data, len,
+ pkt_type);
+ else
+ ret = diag_process_apps_data_hdlc(user_space_data, len,
+ pkt_type);
+ mutex_unlock(&driver->hdlc_disable_mutex);
+ mutex_unlock(&apps_data_mutex);
+
+ diagmem_free(driver, user_space_data, mempool);
+ user_space_data = NULL;
+
+ check_drain_timer();
+
+ if (ret == PKT_DROP)
+ diag_record_stats(pkt_type, PKT_DROP);
+ else if (ret == PKT_ALLOC)
+ diag_record_stats(pkt_type, PKT_ALLOC);
+ else
+ return ret;
+
+ return 0;
+}
+
+static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ struct diag_dci_client_tbl *entry;
+ struct list_head *start, *temp;
+ int index = -1, i = 0, ret = 0;
+ int data_type;
+ int copy_dci_data = 0;
+ int exit_stat = 0;
+ int write_len = 0;
+ struct diag_md_session_t *session_info = NULL;
+
+ for (i = 0; i < driver->num_clients; i++)
+ if (driver->client_map[i].pid == current->tgid)
+ index = i;
+
+ if (index == -1) {
+ pr_err("diag: Client PID not found in table");
+ return -EINVAL;
+ }
+ if (!buf) {
+ pr_err("diag: bad address from user side\n");
+ return -EFAULT;
+ }
+ wait_event_interruptible(driver->wait_q, driver->data_ready[index]);
+
+ mutex_lock(&driver->diagchar_mutex);
+
+ if ((driver->data_ready[index] & USER_SPACE_DATA_TYPE) &&
+ (driver->logging_mode == DIAG_MEMORY_DEVICE_MODE ||
+ driver->logging_mode == DIAG_MULTI_MODE)) {
+ pr_debug("diag: process woken up\n");
+ /*Copy the type of data being passed*/
+ data_type = driver->data_ready[index] & USER_SPACE_DATA_TYPE;
+ driver->data_ready[index] ^= USER_SPACE_DATA_TYPE;
+ COPY_USER_SPACE_OR_ERR(buf, data_type, sizeof(int));
+ if (ret == -EFAULT)
+ goto exit;
+ /* place holder for number of data field */
+ ret += sizeof(int);
+ session_info = diag_md_session_get_pid(current->tgid);
+ exit_stat = diag_md_copy_to_user(buf, &ret, count,
+ session_info);
+ goto exit;
+ } else if (driver->data_ready[index] & USER_SPACE_DATA_TYPE) {
+ /* In case, the thread wakes up and the logging mode is not
+ * memory device any more, the condition needs to be cleared.
+ */
+ driver->data_ready[index] ^= USER_SPACE_DATA_TYPE;
+ }
+
+ if (driver->data_ready[index] & HDLC_SUPPORT_TYPE) {
+ data_type = driver->data_ready[index] & HDLC_SUPPORT_TYPE;
+ driver->data_ready[index] ^= HDLC_SUPPORT_TYPE;
+ COPY_USER_SPACE_OR_ERR(buf, data_type, sizeof(int));
+ if (ret == -EFAULT)
+ goto exit;
+
+ session_info = diag_md_session_get_pid(current->tgid);
+ if (session_info) {
+ COPY_USER_SPACE_OR_ERR(buf+4,
+ session_info->hdlc_disabled,
+ sizeof(uint8_t));
+ if (ret == -EFAULT)
+ goto exit;
+ }
+ goto exit;
+ }
+
+ if (driver->data_ready[index] & DEINIT_TYPE) {
+ /*Copy the type of data being passed*/
+ data_type = driver->data_ready[index] & DEINIT_TYPE;
+ COPY_USER_SPACE_OR_ERR(buf, data_type, 4);
+ if (ret == -EFAULT)
+ goto exit;
+ driver->data_ready[index] ^= DEINIT_TYPE;
+ mutex_unlock(&driver->diagchar_mutex);
+ diag_remove_client_entry(file);
+ return ret;
+ }
+
+ if (driver->data_ready[index] & MSG_MASKS_TYPE) {
+ /*Copy the type of data being passed*/
+ data_type = driver->data_ready[index] & MSG_MASKS_TYPE;
+ session_info = diag_md_session_get_peripheral(APPS_DATA);
+ COPY_USER_SPACE_OR_ERR(buf, data_type, sizeof(int));
+ if (ret == -EFAULT)
+ goto exit;
+ write_len = diag_copy_to_user_msg_mask(buf + ret, count,
+ session_info);
+ if (write_len > 0)
+ ret += write_len;
+ driver->data_ready[index] ^= MSG_MASKS_TYPE;
+ goto exit;
+ }
+
+ if (driver->data_ready[index] & EVENT_MASKS_TYPE) {
+ /*Copy the type of data being passed*/
+ data_type = driver->data_ready[index] & EVENT_MASKS_TYPE;
+ session_info = diag_md_session_get_peripheral(APPS_DATA);
+ COPY_USER_SPACE_OR_ERR(buf, data_type, 4);
+ if (ret == -EFAULT)
+ goto exit;
+
+ if (session_info && session_info->event_mask &&
+ session_info->event_mask->ptr) {
+ COPY_USER_SPACE_OR_ERR(buf + sizeof(int),
+ *(session_info->event_mask->ptr),
+ session_info->event_mask->mask_len);
+ if (ret == -EFAULT)
+ goto exit;
+ } else {
+ COPY_USER_SPACE_OR_ERR(buf + sizeof(int),
+ *(event_mask.ptr),
+ event_mask.mask_len);
+ if (ret == -EFAULT)
+ goto exit;
+ }
+ driver->data_ready[index] ^= EVENT_MASKS_TYPE;
+ goto exit;
+ }
+
+ if (driver->data_ready[index] & LOG_MASKS_TYPE) {
+ /*Copy the type of data being passed*/
+ data_type = driver->data_ready[index] & LOG_MASKS_TYPE;
+ session_info = diag_md_session_get_peripheral(APPS_DATA);
+ COPY_USER_SPACE_OR_ERR(buf, data_type, sizeof(int));
+ if (ret == -EFAULT)
+ goto exit;
+
+ write_len = diag_copy_to_user_log_mask(buf + ret, count,
+ session_info);
+ if (write_len > 0)
+ ret += write_len;
+ driver->data_ready[index] ^= LOG_MASKS_TYPE;
+ goto exit;
+ }
+
+ if (driver->data_ready[index] & PKT_TYPE) {
+ /*Copy the type of data being passed*/
+ data_type = driver->data_ready[index] & PKT_TYPE;
+ COPY_USER_SPACE_OR_ERR(buf, data_type, sizeof(data_type));
+ if (ret == -EFAULT)
+ goto exit;
+
+ COPY_USER_SPACE_OR_ERR(buf + sizeof(data_type),
+ *(driver->apps_req_buf),
+ driver->apps_req_buf_len);
+ if (ret == -EFAULT)
+ goto exit;
+ driver->data_ready[index] ^= PKT_TYPE;
+ driver->in_busy_pktdata = 0;
+ goto exit;
+ }
+
+ if (driver->data_ready[index] & DCI_PKT_TYPE) {
+ /* Copy the type of data being passed */
+ data_type = driver->data_ready[index] & DCI_PKT_TYPE;
+ COPY_USER_SPACE_OR_ERR(buf, data_type, 4);
+ if (ret == -EFAULT)
+ goto exit;
+
+ COPY_USER_SPACE_OR_ERR(buf+4, *(driver->dci_pkt_buf),
+ driver->dci_pkt_length);
+ if (ret == -EFAULT)
+ goto exit;
+
+ driver->data_ready[index] ^= DCI_PKT_TYPE;
+ driver->in_busy_dcipktdata = 0;
+ goto exit;
+ }
+
+ if (driver->data_ready[index] & DCI_EVENT_MASKS_TYPE) {
+ /*Copy the type of data being passed*/
+ data_type = driver->data_ready[index] & DCI_EVENT_MASKS_TYPE;
+ COPY_USER_SPACE_OR_ERR(buf, data_type, 4);
+ if (ret == -EFAULT)
+ goto exit;
+
+ COPY_USER_SPACE_OR_ERR(buf+4, driver->num_dci_client, 4);
+ if (ret == -EFAULT)
+ goto exit;
+
+ COPY_USER_SPACE_OR_ERR(buf + 8, (dci_ops_tbl[DCI_LOCAL_PROC].
+ event_mask_composite), DCI_EVENT_MASK_SIZE);
+ if (ret == -EFAULT)
+ goto exit;
+
+ driver->data_ready[index] ^= DCI_EVENT_MASKS_TYPE;
+ goto exit;
+ }
+
+ if (driver->data_ready[index] & DCI_LOG_MASKS_TYPE) {
+ /*Copy the type of data being passed*/
+ data_type = driver->data_ready[index] & DCI_LOG_MASKS_TYPE;
+ COPY_USER_SPACE_OR_ERR(buf, data_type, 4);
+ if (ret == -EFAULT)
+ goto exit;
+
+ COPY_USER_SPACE_OR_ERR(buf+4, driver->num_dci_client, 4);
+ if (ret == -EFAULT)
+ goto exit;
+
+ COPY_USER_SPACE_OR_ERR(buf+8, (dci_ops_tbl[DCI_LOCAL_PROC].
+ log_mask_composite), DCI_LOG_MASK_SIZE);
+ if (ret == -EFAULT)
+ goto exit;
+ driver->data_ready[index] ^= DCI_LOG_MASKS_TYPE;
+ goto exit;
+ }
+
+exit:
+ mutex_unlock(&driver->diagchar_mutex);
+ if (driver->data_ready[index] & DCI_DATA_TYPE) {
+ mutex_lock(&driver->dci_mutex);
+ /* Copy the type of data being passed */
+ data_type = driver->data_ready[index] & DCI_DATA_TYPE;
+ list_for_each_safe(start, temp, &driver->dci_client_list) {
+ entry = list_entry(start, struct diag_dci_client_tbl,
+ track);
+ if (entry->client->tgid != current->tgid)
+ continue;
+ if (!entry->in_service)
+ continue;
+ if (copy_to_user(buf + ret, &data_type, sizeof(int))) {
+ mutex_unlock(&driver->dci_mutex);
+ goto end;
+ }
+ ret += sizeof(int);
+ if (copy_to_user(buf + ret, &entry->client_info.token,
+ sizeof(int))) {
+ mutex_unlock(&driver->dci_mutex);
+ goto end;
+ }
+ ret += sizeof(int);
+ copy_dci_data = 1;
+ exit_stat = diag_copy_dci(buf, count, entry, &ret);
+ mutex_lock(&driver->diagchar_mutex);
+ driver->data_ready[index] ^= DCI_DATA_TYPE;
+ mutex_unlock(&driver->diagchar_mutex);
+ if (exit_stat == 1) {
+ mutex_unlock(&driver->dci_mutex);
+ goto end;
+ }
+ }
+ mutex_unlock(&driver->dci_mutex);
+ goto end;
+ }
+end:
+ /*
+ * Flush any read that is currently pending on DCI data and
+ * command channnels. This will ensure that the next read is not
+ * missed.
+ */
+ if (copy_dci_data) {
+ diag_ws_on_copy_complete(DIAG_WS_DCI);
+ flush_workqueue(driver->diag_dci_wq);
+ }
+ return ret;
+}
+
+static ssize_t diagchar_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ int err = 0;
+ int pkt_type = 0;
+ int payload_len = 0;
+ const char __user *payload_buf = NULL;
+
+ /*
+ * The data coming from the user sapce should at least have the
+ * packet type heeader.
+ */
+ if (count < sizeof(int)) {
+ pr_err("diag: In %s, client is sending short data, len: %d\n",
+ __func__, (int)count);
+ return -EBADMSG;
+ }
+
+ err = copy_from_user((&pkt_type), buf, sizeof(int));
+ if (err) {
+ pr_err_ratelimited("diag: In %s, unable to copy pkt_type from userspace, err: %d\n",
+ __func__, err);
+ return -EIO;
+ }
+
+ if (driver->logging_mode == DIAG_USB_MODE && !driver->usb_connected) {
+ if (!((pkt_type == DCI_DATA_TYPE) ||
+ (pkt_type == DCI_PKT_TYPE) ||
+ (pkt_type & DATA_TYPE_DCI_LOG) ||
+ (pkt_type & DATA_TYPE_DCI_EVENT))) {
+ pr_debug("diag: In %s, Dropping non DCI packet type\n",
+ __func__);
+ return -EIO;
+ }
+ }
+
+ payload_buf = buf + sizeof(int);
+ payload_len = count - sizeof(int);
+
+ if (pkt_type == DCI_PKT_TYPE)
+ return diag_user_process_dci_apps_data(payload_buf,
+ payload_len,
+ pkt_type);
+ else if (pkt_type == DCI_DATA_TYPE)
+ return diag_user_process_dci_data(payload_buf, payload_len);
+ else if (pkt_type == USER_SPACE_RAW_DATA_TYPE)
+ return diag_user_process_raw_data(payload_buf,
+ payload_len);
+ else if (pkt_type == USER_SPACE_DATA_TYPE)
+ return diag_user_process_userspace_data(payload_buf,
+ payload_len);
+ if (pkt_type & (DATA_TYPE_DCI_LOG | DATA_TYPE_DCI_EVENT)) {
+ err = diag_user_process_dci_apps_data(payload_buf, payload_len,
+ pkt_type);
+ if (pkt_type & DATA_TYPE_DCI_LOG)
+ pkt_type ^= DATA_TYPE_DCI_LOG;
+ if (pkt_type & DATA_TYPE_DCI_EVENT)
+ pkt_type ^= DATA_TYPE_DCI_EVENT;
+ /*
+ * Check if the log or event is selected even on the regular
+ * stream. If USB is not connected and we are not in memory
+ * device mode, we should not process these logs/events.
+ */
+ if (pkt_type && driver->logging_mode == DIAG_USB_MODE &&
+ !driver->usb_connected)
+ return err;
+ }
+
+ switch (pkt_type) {
+ case DATA_TYPE_EVENT:
+ case DATA_TYPE_F3:
+ case DATA_TYPE_LOG:
+ case DATA_TYPE_DELAYED_RESPONSE:
+ case DATA_TYPE_RESPONSE:
+ return diag_user_process_apps_data(payload_buf, payload_len,
+ pkt_type);
+ default:
+ pr_err_ratelimited("diag: In %s, invalid pkt_type: %d\n",
+ __func__, pkt_type);
+ return -EINVAL;
+ }
+
+ return err;
+}
+
+void diag_ws_init(void)
+{
+ driver->dci_ws.ref_count = 0;
+ driver->dci_ws.copy_count = 0;
+ spin_lock_init(&driver->dci_ws.lock);
+
+ driver->md_ws.ref_count = 0;
+ driver->md_ws.copy_count = 0;
+ spin_lock_init(&driver->md_ws.lock);
+}
+
+static void diag_stats_init(void)
+{
+ if (!driver)
+ return;
+
+ driver->msg_stats.alloc_count = 0;
+ driver->msg_stats.drop_count = 0;
+
+ driver->log_stats.alloc_count = 0;
+ driver->log_stats.drop_count = 0;
+
+ driver->event_stats.alloc_count = 0;
+ driver->event_stats.drop_count = 0;
+}
+
+void diag_ws_on_notify(void)
+{
+ /*
+ * Do not deal with reference count here as there can be spurious
+ * interrupts.
+ */
+ pm_stay_awake(driver->diag_dev);
+}
+
+void diag_ws_on_read(int type, int pkt_len)
+{
+ unsigned long flags;
+ struct diag_ws_ref_t *ws_ref = NULL;
+
+ switch (type) {
+ case DIAG_WS_DCI:
+ ws_ref = &driver->dci_ws;
+ break;
+ case DIAG_WS_MUX:
+ ws_ref = &driver->md_ws;
+ break;
+ default:
+ pr_err_ratelimited("diag: In %s, invalid type: %d\n",
+ __func__, type);
+ return;
+ }
+
+ spin_lock_irqsave(&ws_ref->lock, flags);
+ if (pkt_len > 0) {
+ ws_ref->ref_count++;
+ } else {
+ if (ws_ref->ref_count < 1) {
+ ws_ref->ref_count = 0;
+ ws_ref->copy_count = 0;
+ }
+ diag_ws_release();
+ }
+ spin_unlock_irqrestore(&ws_ref->lock, flags);
+}
+
+
+void diag_ws_on_copy(int type)
+{
+ unsigned long flags;
+ struct diag_ws_ref_t *ws_ref = NULL;
+
+ switch (type) {
+ case DIAG_WS_DCI:
+ ws_ref = &driver->dci_ws;
+ break;
+ case DIAG_WS_MUX:
+ ws_ref = &driver->md_ws;
+ break;
+ default:
+ pr_err_ratelimited("diag: In %s, invalid type: %d\n",
+ __func__, type);
+ return;
+ }
+
+ spin_lock_irqsave(&ws_ref->lock, flags);
+ ws_ref->copy_count++;
+ spin_unlock_irqrestore(&ws_ref->lock, flags);
+}
+
+void diag_ws_on_copy_fail(int type)
+{
+ unsigned long flags;
+ struct diag_ws_ref_t *ws_ref = NULL;
+
+ switch (type) {
+ case DIAG_WS_DCI:
+ ws_ref = &driver->dci_ws;
+ break;
+ case DIAG_WS_MUX:
+ ws_ref = &driver->md_ws;
+ break;
+ default:
+ pr_err_ratelimited("diag: In %s, invalid type: %d\n",
+ __func__, type);
+ return;
+ }
+
+ spin_lock_irqsave(&ws_ref->lock, flags);
+ ws_ref->ref_count--;
+ spin_unlock_irqrestore(&ws_ref->lock, flags);
+
+ diag_ws_release();
+}
+
+void diag_ws_on_copy_complete(int type)
+{
+ unsigned long flags;
+ struct diag_ws_ref_t *ws_ref = NULL;
+
+ switch (type) {
+ case DIAG_WS_DCI:
+ ws_ref = &driver->dci_ws;
+ break;
+ case DIAG_WS_MUX:
+ ws_ref = &driver->md_ws;
+ break;
+ default:
+ pr_err_ratelimited("diag: In %s, invalid type: %d\n",
+ __func__, type);
+ return;
+ }
+
+ spin_lock_irqsave(&ws_ref->lock, flags);
+ ws_ref->ref_count -= ws_ref->copy_count;
+ if (ws_ref->ref_count < 1)
+ ws_ref->ref_count = 0;
+ ws_ref->copy_count = 0;
+ spin_unlock_irqrestore(&ws_ref->lock, flags);
+
+ diag_ws_release();
+}
+
+void diag_ws_reset(int type)
+{
+ unsigned long flags;
+ struct diag_ws_ref_t *ws_ref = NULL;
+
+ switch (type) {
+ case DIAG_WS_DCI:
+ ws_ref = &driver->dci_ws;
+ break;
+ case DIAG_WS_MUX:
+ ws_ref = &driver->md_ws;
+ break;
+ default:
+ pr_err_ratelimited("diag: In %s, invalid type: %d\n",
+ __func__, type);
+ return;
+ }
+
+ spin_lock_irqsave(&ws_ref->lock, flags);
+ ws_ref->ref_count = 0;
+ ws_ref->copy_count = 0;
+ spin_unlock_irqrestore(&ws_ref->lock, flags);
+
+ diag_ws_release();
+}
+
+void diag_ws_release(void)
+{
+ if (driver->dci_ws.ref_count == 0 && driver->md_ws.ref_count == 0)
+ pm_relax(driver->diag_dev);
+}
+
+#ifdef DIAG_DEBUG
+static void diag_debug_init(void)
+{
+ diag_ipc_log = ipc_log_context_create(DIAG_IPC_LOG_PAGES, "diag", 0);
+ if (!diag_ipc_log)
+ pr_err("diag: Failed to create IPC logging context\n");
+ /*
+ * Set the bit mask here as per diag_ipc_logging.h to enable debug logs
+ * to be logged to IPC
+ */
+ diag_debug_mask = DIAG_DEBUG_PERIPHERALS | DIAG_DEBUG_DCI |
+ DIAG_DEBUG_BRIDGE;
+}
+#else
+static void diag_debug_init(void)
+{
+
+}
+#endif
+
+static int diag_real_time_info_init(void)
+{
+ int i;
+
+ if (!driver)
+ return -EIO;
+ for (i = 0; i < DIAG_NUM_PROC; i++) {
+ driver->real_time_mode[i] = 1;
+ driver->proc_rt_vote_mask[i] |= DIAG_PROC_DCI;
+ driver->proc_rt_vote_mask[i] |= DIAG_PROC_MEMORY_DEVICE;
+ }
+ driver->real_time_update_busy = 0;
+ driver->proc_active_mask = 0;
+ driver->diag_real_time_wq = create_singlethread_workqueue(
+ "diag_real_time_wq");
+ if (!driver->diag_real_time_wq)
+ return -ENOMEM;
+ INIT_WORK(&(driver->diag_real_time_work), diag_real_time_work_fn);
+ mutex_init(&driver->real_time_mutex);
+ return 0;
+}
+
+static const struct file_operations diagcharfops = {
+ .owner = THIS_MODULE,
+ .read = diagchar_read,
+ .write = diagchar_write,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = diagchar_compat_ioctl,
+#endif
+ .unlocked_ioctl = diagchar_ioctl,
+ .open = diagchar_open,
+ .release = diagchar_close
+};
+
+static int diagchar_setup_cdev(dev_t devno)
+{
+
+ int err;
+
+ cdev_init(driver->cdev, &diagcharfops);
+
+ driver->cdev->owner = THIS_MODULE;
+ driver->cdev->ops = &diagcharfops;
+
+ err = cdev_add(driver->cdev, devno, 1);
+
+ if (err) {
+ pr_info("diagchar cdev registration failed !\n");
+ return err;
+ }
+
+ driver->diagchar_class = class_create(THIS_MODULE, "diag");
+
+ if (IS_ERR(driver->diagchar_class)) {
+ pr_err("Error creating diagchar class.\n");
+ return PTR_ERR(driver->diagchar_class);
+ }
+
+ driver->diag_dev = device_create(driver->diagchar_class, NULL, devno,
+ (void *)driver, "diag");
+
+ if (!driver->diag_dev)
+ return -EIO;
+
+ driver->diag_dev->power.wakeup = wakeup_source_register("DIAG_WS");
+ return 0;
+
+}
+
+static int diagchar_cleanup(void)
+{
+ if (driver) {
+ if (driver->cdev) {
+ /* TODO - Check if device exists before deleting */
+ device_destroy(driver->diagchar_class,
+ MKDEV(driver->major,
+ driver->minor_start));
+ cdev_del(driver->cdev);
+ }
+ if (!IS_ERR(driver->diagchar_class))
+ class_destroy(driver->diagchar_class);
+ kfree(driver);
+ }
+ return 0;
+}
+
+static int __init diagchar_init(void)
+{
+ dev_t dev;
+ int ret;
+
+ pr_debug("diagfwd initializing ..\n");
+ ret = 0;
+ driver = kzalloc(sizeof(struct diagchar_dev) + 5, GFP_KERNEL);
+ if (!driver)
+ return -ENOMEM;
+ kmemleak_not_leak(driver);
+
+ timer_in_progress = 0;
+ driver->delayed_rsp_id = 0;
+ driver->hdlc_disabled = 0;
+ driver->dci_state = DIAG_DCI_NO_ERROR;
+ setup_timer(&drain_timer, drain_timer_func, 1234);
+ driver->supports_sockets = 1;
+ driver->time_sync_enabled = 0;
+ driver->uses_time_api = 0;
+ driver->poolsize = poolsize;
+ driver->poolsize_hdlc = poolsize_hdlc;
+ driver->poolsize_dci = poolsize_dci;
+ driver->poolsize_user = poolsize_user;
+ /*
+ * POOL_TYPE_MUX_APPS is for the buffers in the Diag MUX layer.
+ * The number of buffers encompasses Diag data generated on
+ * the Apss processor + 1 for the responses generated exclusively on
+ * the Apps processor + data from data channels (4 channels per
+ * peripheral) + data from command channels (2)
+ */
+ diagmem_setsize(POOL_TYPE_MUX_APPS, itemsize_usb_apps,
+ poolsize_usb_apps + 1 + (NUM_PERIPHERALS * 6));
+ driver->num_clients = max_clients;
+ driver->logging_mode = DIAG_USB_MODE;
+ driver->mask_check = 0;
+ driver->in_busy_pktdata = 0;
+ driver->in_busy_dcipktdata = 0;
+ driver->rsp_buf_ctxt = SET_BUF_CTXT(APPS_DATA, TYPE_CMD, 1);
+ hdlc_data.ctxt = SET_BUF_CTXT(APPS_DATA, TYPE_DATA, 1);
+ hdlc_data.len = 0;
+ non_hdlc_data.ctxt = SET_BUF_CTXT(APPS_DATA, TYPE_DATA, 1);
+ non_hdlc_data.len = 0;
+ mutex_init(&driver->hdlc_disable_mutex);
+ mutex_init(&driver->diagchar_mutex);
+ mutex_init(&driver->diag_maskclear_mutex);
+ mutex_init(&driver->diag_file_mutex);
+ mutex_init(&driver->delayed_rsp_mutex);
+ mutex_init(&apps_data_mutex);
+ mutex_init(&driver->diagfwd_channel_mutex);
+ init_waitqueue_head(&driver->wait_q);
+ INIT_WORK(&(driver->diag_drain_work), diag_drain_work_fn);
+ INIT_WORK(&(driver->update_user_clients),
+ diag_update_user_client_work_fn);
+ INIT_WORK(&(driver->update_md_clients),
+ diag_update_md_client_work_fn);
+ diag_ws_init();
+ diag_stats_init();
+ diag_debug_init();
+ diag_md_session_init();
+
+ driver->incoming_pkt.capacity = DIAG_MAX_REQ_SIZE;
+ driver->incoming_pkt.data = kzalloc(DIAG_MAX_REQ_SIZE, GFP_KERNEL);
+ if (!driver->incoming_pkt.data) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+ kmemleak_not_leak(driver->incoming_pkt.data);
+ driver->incoming_pkt.processing = 0;
+ driver->incoming_pkt.read_len = 0;
+ driver->incoming_pkt.remaining = 0;
+ driver->incoming_pkt.total_len = 0;
+
+ ret = diag_real_time_info_init();
+ if (ret)
+ goto fail;
+ ret = diag_debugfs_init();
+ if (ret)
+ goto fail;
+ ret = diag_masks_init();
+ if (ret)
+ goto fail;
+ ret = diag_remote_init();
+ if (ret)
+ goto fail;
+ ret = diag_mux_init();
+ if (ret)
+ goto fail;
+ ret = diagfwd_init();
+ if (ret)
+ goto fail;
+ ret = diagfwd_cntl_init();
+ if (ret)
+ goto fail;
+ driver->dci_state = diag_dci_init();
+ ret = diagfwd_peripheral_init();
+ if (ret)
+ goto fail;
+ diagfwd_cntl_channel_init();
+ if (driver->dci_state == DIAG_DCI_NO_ERROR)
+ diag_dci_channel_init();
+ pr_debug("diagchar initializing ..\n");
+ driver->num = 1;
+ driver->name = ((void *)driver) + sizeof(struct diagchar_dev);
+ strlcpy(driver->name, "diag", 4);
+ /* Get major number from kernel and initialize */
+ ret = alloc_chrdev_region(&dev, driver->minor_start,
+ driver->num, driver->name);
+ if (!ret) {
+ driver->major = MAJOR(dev);
+ driver->minor_start = MINOR(dev);
+ } else {
+ pr_err("diag: Major number not allocated\n");
+ goto fail;
+ }
+ driver->cdev = cdev_alloc();
+ ret = diagchar_setup_cdev(dev);
+ if (ret)
+ goto fail;
+
+ pr_debug("diagchar initialized now");
+ ret = diagfwd_bridge_init();
+ if (ret)
+ diagfwd_bridge_exit();
+ return 0;
+
+fail:
+ pr_err("diagchar is not initialized, ret: %d\n", ret);
+ diag_debugfs_cleanup();
+ diagchar_cleanup();
+ diag_mux_exit();
+ diagfwd_peripheral_exit();
+ diagfwd_bridge_exit();
+ diagfwd_exit();
+ diagfwd_cntl_exit();
+ diag_dci_exit();
+ diag_masks_exit();
+ diag_remote_exit();
+ return ret;
+
+}
+
+static void diagchar_exit(void)
+{
+ pr_info("diagchar exiting...\n");
+ diag_mempool_exit();
+ diag_mux_exit();
+ diagfwd_peripheral_exit();
+ diagfwd_exit();
+ diagfwd_cntl_exit();
+ diag_dci_exit();
+ diag_masks_exit();
+ diag_md_session_exit();
+ diag_remote_exit();
+ diag_debugfs_cleanup();
+ diagchar_cleanup();
+ pr_info("done diagchar exit\n");
+}
+
+module_init(diagchar_init);
+module_exit(diagchar_exit);
diff --git a/drivers/char/diag/diagchar_hdlc.c b/drivers/char/diag/diagchar_hdlc.c
new file mode 100644
index 0000000..6dd571f
--- /dev/null
+++ b/drivers/char/diag/diagchar_hdlc.c
@@ -0,0 +1,251 @@
+/* Copyright (c) 2008-2009, 2012-2014, 2016 The Linux Foundation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/cdev.h>
+#include <linux/fs.h>
+#include <linux/device.h>
+#include <linux/uaccess.h>
+#include <linux/ratelimit.h>
+#include <linux/crc-ccitt.h>
+#include "diagchar_hdlc.h"
+#include "diagchar.h"
+
+
+MODULE_LICENSE("GPL v2");
+
+#define CRC_16_L_SEED 0xFFFF
+
+#define CRC_16_L_STEP(xx_crc, xx_c) \
+ crc_ccitt_byte(xx_crc, xx_c)
+
+void diag_hdlc_encode(struct diag_send_desc_type *src_desc,
+ struct diag_hdlc_dest_type *enc)
+{
+ uint8_t *dest;
+ uint8_t *dest_last;
+ const uint8_t *src;
+ const uint8_t *src_last;
+ uint16_t crc;
+ unsigned char src_byte = 0;
+ enum diag_send_state_enum_type state;
+ unsigned int used = 0;
+
+ if (!src_desc || !enc)
+ return;
+
+ /* Copy parts to local variables. */
+ src = src_desc->pkt;
+ src_last = src_desc->last;
+ state = src_desc->state;
+ dest = enc->dest;
+ dest_last = enc->dest_last;
+
+ if (state == DIAG_STATE_START) {
+ crc = CRC_16_L_SEED;
+ state++;
+ } else {
+ /* Get a local copy of the CRC */
+ crc = enc->crc;
+ }
+
+ /* dest or dest_last may be NULL to trigger a
+ * state transition only.
+ */
+ if (dest && dest_last) {
+ /* This condition needs to include the possibility
+ * of 2 dest bytes for an escaped byte
+ */
+ while (src <= src_last && dest <= dest_last) {
+
+ src_byte = *src++;
+ if ((src_byte == CONTROL_CHAR) ||
+ (src_byte == ESC_CHAR)) {
+ /* If the escape character is not the
+ * last byte
+ */
+ if (dest != dest_last) {
+ crc = CRC_16_L_STEP(crc, src_byte);
+ *dest++ = ESC_CHAR;
+ used++;
+ *dest++ = src_byte ^ ESC_MASK;
+ used++;
+ } else {
+ src--;
+ break;
+ }
+ } else {
+ crc = CRC_16_L_STEP(crc, src_byte);
+ *dest++ = src_byte;
+ used++;
+ }
+ }
+
+ if (src > src_last) {
+ if (state == DIAG_STATE_BUSY) {
+ if (src_desc->terminate) {
+ crc = ~crc;
+ state++;
+ } else {
+ /* Done with fragment */
+ state = DIAG_STATE_COMPLETE;
+ }
+ }
+
+ while (dest <= dest_last && state >= DIAG_STATE_CRC1
+ && state < DIAG_STATE_TERM) {
+ /* Encode a byte of the CRC next */
+ src_byte = crc & 0xFF;
+
+ if ((src_byte == CONTROL_CHAR)
+ || (src_byte == ESC_CHAR)) {
+
+ if (dest != dest_last) {
+ *dest++ = ESC_CHAR;
+ used++;
+ *dest++ = src_byte ^ ESC_MASK;
+ used++;
+ crc >>= 8;
+ } else
+ break;
+ } else {
+
+ crc >>= 8;
+ *dest++ = src_byte;
+ used++;
+ }
+ state++;
+ }
+
+ if (state == DIAG_STATE_TERM) {
+ if (dest_last >= dest) {
+ *dest++ = CONTROL_CHAR;
+ used++;
+ state++; /* Complete */
+ }
+ }
+ }
+ }
+
+ /* Copy local variables back into the encode structure. */
+ enc->dest = dest;
+ enc->dest_last = dest_last;
+ enc->crc = crc;
+ src_desc->pkt = src;
+ src_desc->last = src_last;
+ src_desc->state = state;
+}
+
+
+int diag_hdlc_decode(struct diag_hdlc_decode_type *hdlc)
+{
+ uint8_t *src_ptr = NULL, *dest_ptr = NULL;
+ unsigned int src_length = 0, dest_length = 0;
+
+ unsigned int len = 0;
+ unsigned int i;
+ uint8_t src_byte;
+
+ int pkt_bnd = HDLC_INCOMPLETE;
+ int msg_start;
+
+ if (hdlc && hdlc->src_ptr && hdlc->dest_ptr &&
+ (hdlc->src_size > hdlc->src_idx) &&
+ (hdlc->dest_size > hdlc->dest_idx)) {
+
+ msg_start = (hdlc->src_idx == 0) ? 1 : 0;
+
+ src_ptr = hdlc->src_ptr;
+ src_ptr = &src_ptr[hdlc->src_idx];
+ src_length = hdlc->src_size - hdlc->src_idx;
+
+ dest_ptr = hdlc->dest_ptr;
+ dest_ptr = &dest_ptr[hdlc->dest_idx];
+ dest_length = hdlc->dest_size - hdlc->dest_idx;
+
+ for (i = 0; i < src_length; i++) {
+
+ src_byte = src_ptr[i];
+
+ if (hdlc->escaping) {
+ dest_ptr[len++] = src_byte ^ ESC_MASK;
+ hdlc->escaping = 0;
+ } else if (src_byte == ESC_CHAR) {
+ if (i == (src_length - 1)) {
+ hdlc->escaping = 1;
+ i++;
+ break;
+ }
+ dest_ptr[len++] = src_ptr[++i] ^ ESC_MASK;
+ } else if (src_byte == CONTROL_CHAR) {
+ if (msg_start && i == 0 && src_length > 1)
+ continue;
+ /* Byte 0x7E will be considered as end of
+ * packet
+ */
+ dest_ptr[len++] = src_byte;
+ i++;
+ pkt_bnd = HDLC_COMPLETE;
+ break;
+ } else {
+ dest_ptr[len++] = src_byte;
+ }
+
+ if (len >= dest_length) {
+ i++;
+ break;
+ }
+ }
+
+ hdlc->src_idx += i;
+ hdlc->dest_idx += len;
+ }
+
+ return pkt_bnd;
+}
+
+int crc_check(uint8_t *buf, uint16_t len)
+{
+ uint16_t crc = CRC_16_L_SEED;
+ uint8_t sent_crc[2] = {0, 0};
+
+ /*
+ * The minimum length of a valid incoming packet is 4. 1 byte
+ * of data and 3 bytes for CRC
+ */
+ if (!buf || len < 4) {
+ pr_err_ratelimited("diag: In %s, invalid packet or length, buf: 0x%p, len: %d",
+ __func__, buf, len);
+ return -EIO;
+ }
+
+ /*
+ * Run CRC check for the original input. Skip the last 3 CRC
+ * bytes
+ */
+ crc = crc_ccitt(crc, buf, len-3);
+ crc ^= CRC_16_L_SEED;
+
+ /* Check the computed CRC against the original CRC bytes. */
+ sent_crc[0] = buf[len-3];
+ sent_crc[1] = buf[len-2];
+ if (crc != *((uint16_t *)sent_crc)) {
+ pr_debug("diag: In %s, crc mismatch. expected: %x, sent %x.\n",
+ __func__, crc, *((uint16_t *)sent_crc));
+ return -EIO;
+ }
+
+ return 0;
+}
diff --git a/drivers/char/diag/diagchar_hdlc.h b/drivers/char/diag/diagchar_hdlc.h
new file mode 100644
index 0000000..357651eb
--- /dev/null
+++ b/drivers/char/diag/diagchar_hdlc.h
@@ -0,0 +1,66 @@
+/* Copyright (c) 2008-2009, 2012-2014, 2016 The Linux Foundation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAGCHAR_HDLC
+#define DIAGCHAR_HDLC
+
+enum diag_send_state_enum_type {
+ DIAG_STATE_START,
+ DIAG_STATE_BUSY,
+ DIAG_STATE_CRC1,
+ DIAG_STATE_CRC2,
+ DIAG_STATE_TERM,
+ DIAG_STATE_COMPLETE
+};
+
+struct diag_send_desc_type {
+ const void *pkt;
+ const void *last; /* Address of last byte to send. */
+ enum diag_send_state_enum_type state;
+ /* True if this fragment terminates the packet */
+ unsigned char terminate;
+};
+
+struct diag_hdlc_dest_type {
+ void *dest;
+ void *dest_last;
+ /* Below: internal use only */
+ uint16_t crc;
+};
+
+struct diag_hdlc_decode_type {
+ uint8_t *src_ptr;
+ unsigned int src_idx;
+ unsigned int src_size;
+ uint8_t *dest_ptr;
+ unsigned int dest_idx;
+ unsigned int dest_size;
+ int escaping;
+
+};
+
+void diag_hdlc_encode(struct diag_send_desc_type *src_desc,
+ struct diag_hdlc_dest_type *enc);
+
+int diag_hdlc_decode(struct diag_hdlc_decode_type *hdlc);
+
+int crc_check(uint8_t *buf, uint16_t len);
+
+#define ESC_CHAR 0x7D
+#define ESC_MASK 0x20
+
+#define HDLC_INCOMPLETE 0
+#define HDLC_COMPLETE 1
+
+#define HDLC_FOOTER_LEN 3
+#endif
diff --git a/drivers/char/diag/diagfwd.c b/drivers/char/diag/diagfwd.c
new file mode 100644
index 0000000..e132f36
--- /dev/null
+++ b/drivers/char/diag/diagfwd.c
@@ -0,0 +1,1686 @@
+/* Copyright (c) 2008-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/sched.h>
+#include <linux/ratelimit.h>
+#include <linux/workqueue.h>
+#include <linux/pm_runtime.h>
+#include <linux/diagchar.h>
+#include <linux/delay.h>
+#include <linux/reboot.h>
+#include <linux/of.h>
+#include <linux/kmemleak.h>
+#ifdef CONFIG_DIAG_OVER_USB
+#include <linux/usb/usbdiag.h>
+#endif
+#include <soc/qcom/socinfo.h>
+#include <soc/qcom/restart.h>
+#include "diagmem.h"
+#include "diagchar.h"
+#include "diagfwd.h"
+#include "diagfwd_peripheral.h"
+#include "diagfwd_cntl.h"
+#include "diagchar_hdlc.h"
+#include "diag_dci.h"
+#include "diag_masks.h"
+#include "diag_usb.h"
+#include "diag_mux.h"
+
+#define STM_CMD_VERSION_OFFSET 4
+#define STM_CMD_MASK_OFFSET 5
+#define STM_CMD_DATA_OFFSET 6
+#define STM_CMD_NUM_BYTES 7
+
+#define STM_RSP_SUPPORTED_INDEX 7
+#define STM_RSP_STATUS_INDEX 8
+#define STM_RSP_NUM_BYTES 9
+
+static int timestamp_switch;
+module_param(timestamp_switch, int, 0644);
+
+int wrap_enabled;
+uint16_t wrap_count;
+static struct diag_hdlc_decode_type *hdlc_decode;
+
+#define DIAG_NUM_COMMON_CMD 1
+static uint8_t common_cmds[DIAG_NUM_COMMON_CMD] = {
+ DIAG_CMD_LOG_ON_DMND
+};
+
+static uint8_t hdlc_timer_in_progress;
+
+/* Determine if this device uses a device tree */
+#ifdef CONFIG_OF
+static int has_device_tree(void)
+{
+ struct device_node *node;
+
+ node = of_find_node_by_path("/");
+ if (node) {
+ of_node_put(node);
+ return 1;
+ }
+ return 0;
+}
+#else
+static int has_device_tree(void)
+{
+ return 0;
+}
+#endif
+
+int chk_config_get_id(void)
+{
+ switch (socinfo_get_msm_cpu()) {
+ case MSM_CPU_8X60:
+ return APQ8060_TOOLS_ID;
+ case MSM_CPU_8960:
+ case MSM_CPU_8960AB:
+ return AO8960_TOOLS_ID;
+ case MSM_CPU_8064:
+ case MSM_CPU_8064AB:
+ case MSM_CPU_8064AA:
+ return APQ8064_TOOLS_ID;
+ case MSM_CPU_8930:
+ case MSM_CPU_8930AA:
+ case MSM_CPU_8930AB:
+ return MSM8930_TOOLS_ID;
+ case MSM_CPU_8974:
+ return MSM8974_TOOLS_ID;
+ case MSM_CPU_8625:
+ return MSM8625_TOOLS_ID;
+ case MSM_CPU_8084:
+ return APQ8084_TOOLS_ID;
+ case MSM_CPU_8916:
+ return MSM8916_TOOLS_ID;
+ case MSM_CPU_8939:
+ return MSM8939_TOOLS_ID;
+ case MSM_CPU_8994:
+ return MSM8994_TOOLS_ID;
+ case MSM_CPU_8226:
+ return APQ8026_TOOLS_ID;
+ case MSM_CPU_8909:
+ return MSM8909_TOOLS_ID;
+ case MSM_CPU_8992:
+ return MSM8992_TOOLS_ID;
+ case MSM_CPU_8996:
+ return MSM_8996_TOOLS_ID;
+ default:
+ if (driver->use_device_tree) {
+ if (machine_is_msm8974())
+ return MSM8974_TOOLS_ID;
+ else if (machine_is_apq8074())
+ return APQ8074_TOOLS_ID;
+ else
+ return 0;
+ } else {
+ return 0;
+ }
+ }
+}
+
+/*
+ * This will return TRUE for targets which support apps only mode and hence SSR.
+ * This applies to 8960 and newer targets.
+ */
+int chk_apps_only(void)
+{
+ if (driver->use_device_tree)
+ return 1;
+
+ switch (socinfo_get_msm_cpu()) {
+ case MSM_CPU_8960:
+ case MSM_CPU_8960AB:
+ case MSM_CPU_8064:
+ case MSM_CPU_8064AB:
+ case MSM_CPU_8064AA:
+ case MSM_CPU_8930:
+ case MSM_CPU_8930AA:
+ case MSM_CPU_8930AB:
+ case MSM_CPU_8627:
+ case MSM_CPU_9615:
+ case MSM_CPU_8974:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+/*
+ * This will return TRUE for targets which support apps as master.
+ * Thus, SW DLOAD and Mode Reset are supported on apps processor.
+ * This applies to 8960 and newer targets.
+ */
+int chk_apps_master(void)
+{
+ if (driver->use_device_tree)
+ return 1;
+ else
+ return 0;
+}
+
+int chk_polling_response(void)
+{
+ if (!(driver->polling_reg_flag) && chk_apps_master())
+ /*
+ * If the apps processor is master and no other processor
+ * has registered to respond for polling
+ */
+ return 1;
+ else if (!(driver->diagfwd_cntl[PERIPHERAL_MODEM] &&
+ driver->diagfwd_cntl[PERIPHERAL_MODEM]->ch_open) &&
+ (driver->feature[PERIPHERAL_MODEM].rcvd_feature_mask))
+ /*
+ * If the apps processor is not the master and the modem
+ * is not up or we did not receive the feature masks from Modem
+ */
+ return 1;
+ else
+ return 0;
+}
+
+/*
+ * This function should be called if you feel that the logging process may
+ * need to be woken up. For instance, if the logging mode is MEMORY_DEVICE MODE
+ * and while trying to read data from data channel there are no buffers
+ * available to read the data into, then this function should be called to
+ * determine if the logging process needs to be woken up.
+ */
+void chk_logging_wakeup(void)
+{
+ int i;
+ int j;
+ int pid = 0;
+
+ for (j = 0; j < NUM_MD_SESSIONS; j++) {
+ if (!driver->md_session_map[j])
+ continue;
+ pid = driver->md_session_map[j]->pid;
+
+ /* Find the index of the logging process */
+ for (i = 0; i < driver->num_clients; i++) {
+ if (driver->client_map[i].pid != pid)
+ continue;
+ if (driver->data_ready[i] & USER_SPACE_DATA_TYPE)
+ continue;
+ /*
+ * At very high logging rates a race condition can
+ * occur where the buffers containing the data from
+ * a channel are all in use, but the data_ready flag
+ * is cleared. In this case, the buffers never have
+ * their data read/logged. Detect and remedy this
+ * situation.
+ */
+ driver->data_ready[i] |= USER_SPACE_DATA_TYPE;
+ pr_debug("diag: Force wakeup of logging process\n");
+ wake_up_interruptible(&driver->wait_q);
+ break;
+ }
+ /*
+ * Diag Memory Device is in normal. Check only for the first
+ * index as all the indices point to the same session
+ * structure.
+ */
+ if ((driver->md_session_mask == DIAG_CON_ALL) && (j == 0))
+ break;
+ }
+}
+
+static void pack_rsp_and_send(unsigned char *buf, int len)
+{
+ int err;
+ int retry_count = 0;
+ uint32_t write_len = 0;
+ unsigned long flags;
+ unsigned char *rsp_ptr = driver->encoded_rsp_buf;
+ struct diag_pkt_frame_t header;
+
+ if (!rsp_ptr || !buf)
+ return;
+
+ if (len > DIAG_MAX_RSP_SIZE || len < 0) {
+ pr_err("diag: In %s, invalid len %d, permissible len %d\n",
+ __func__, len, DIAG_MAX_RSP_SIZE);
+ return;
+ }
+
+ /*
+ * Keep trying till we get the buffer back. It should probably
+ * take one or two iterations. When this loops till UINT_MAX, it
+ * means we did not get a write complete for the previous
+ * response.
+ */
+ while (retry_count < UINT_MAX) {
+ if (!driver->rsp_buf_busy)
+ break;
+ /*
+ * Wait for sometime and try again. The value 10000 was chosen
+ * empirically as an optimum value for USB to complete a write
+ */
+ usleep_range(10000, 10100);
+ retry_count++;
+
+ /*
+ * There can be a race conditon that clears the data ready flag
+ * for responses. Make sure we don't miss previous wakeups for
+ * draining responses when we are in Memory Device Mode.
+ */
+ if (driver->logging_mode == DIAG_MEMORY_DEVICE_MODE ||
+ driver->logging_mode == DIAG_MULTI_MODE)
+ chk_logging_wakeup();
+ }
+ if (driver->rsp_buf_busy) {
+ pr_err("diag: unable to get hold of response buffer\n");
+ return;
+ }
+
+ driver->rsp_buf_busy = 1;
+ header.start = CONTROL_CHAR;
+ header.version = 1;
+ header.length = len;
+ memcpy(rsp_ptr, &header, sizeof(header));
+ write_len += sizeof(header);
+ memcpy(rsp_ptr + write_len, buf, len);
+ write_len += len;
+ *(uint8_t *)(rsp_ptr + write_len) = CONTROL_CHAR;
+ write_len += sizeof(uint8_t);
+
+ err = diag_mux_write(DIAG_LOCAL_PROC, rsp_ptr, write_len,
+ driver->rsp_buf_ctxt);
+ if (err) {
+ pr_err("diag: In %s, unable to write to mux, err: %d\n",
+ __func__, err);
+ spin_lock_irqsave(&driver->rsp_buf_busy_lock, flags);
+ driver->rsp_buf_busy = 0;
+ spin_unlock_irqrestore(&driver->rsp_buf_busy_lock, flags);
+ }
+}
+
+static void encode_rsp_and_send(unsigned char *buf, int len)
+{
+ struct diag_send_desc_type send = { NULL, NULL, DIAG_STATE_START, 0 };
+ struct diag_hdlc_dest_type enc = { NULL, NULL, 0 };
+ unsigned char *rsp_ptr = driver->encoded_rsp_buf;
+ int err, retry_count = 0;
+ unsigned long flags;
+
+ if (!rsp_ptr || !buf)
+ return;
+
+ if (len > DIAG_MAX_RSP_SIZE || len < 0) {
+ pr_err("diag: In %s, invalid len %d, permissible len %d\n",
+ __func__, len, DIAG_MAX_RSP_SIZE);
+ return;
+ }
+
+ /*
+ * Keep trying till we get the buffer back. It should probably
+ * take one or two iterations. When this loops till UINT_MAX, it
+ * means we did not get a write complete for the previous
+ * response.
+ */
+ while (retry_count < UINT_MAX) {
+ if (!driver->rsp_buf_busy)
+ break;
+ /*
+ * Wait for sometime and try again. The value 10000 was chosen
+ * empirically as an optimum value for USB to complete a write
+ */
+ usleep_range(10000, 10100);
+ retry_count++;
+
+ /*
+ * There can be a race conditon that clears the data ready flag
+ * for responses. Make sure we don't miss previous wakeups for
+ * draining responses when we are in Memory Device Mode.
+ */
+ if (driver->logging_mode == DIAG_MEMORY_DEVICE_MODE ||
+ driver->logging_mode == DIAG_MULTI_MODE)
+ chk_logging_wakeup();
+ }
+
+ if (driver->rsp_buf_busy) {
+ pr_err("diag: unable to get hold of response buffer\n");
+ return;
+ }
+
+ spin_lock_irqsave(&driver->rsp_buf_busy_lock, flags);
+ driver->rsp_buf_busy = 1;
+ spin_unlock_irqrestore(&driver->rsp_buf_busy_lock, flags);
+ send.state = DIAG_STATE_START;
+ send.pkt = buf;
+ send.last = (void *)(buf + len - 1);
+ send.terminate = 1;
+ enc.dest = rsp_ptr;
+ enc.dest_last = (void *)(rsp_ptr + DIAG_MAX_HDLC_BUF_SIZE - 1);
+ diag_hdlc_encode(&send, &enc);
+ driver->encoded_rsp_len = (int)(enc.dest - (void *)rsp_ptr);
+ err = diag_mux_write(DIAG_LOCAL_PROC, rsp_ptr, driver->encoded_rsp_len,
+ driver->rsp_buf_ctxt);
+ if (err) {
+ pr_err("diag: In %s, Unable to write to device, err: %d\n",
+ __func__, err);
+ spin_lock_irqsave(&driver->rsp_buf_busy_lock, flags);
+ driver->rsp_buf_busy = 0;
+ spin_unlock_irqrestore(&driver->rsp_buf_busy_lock, flags);
+ }
+ memset(buf, '\0', DIAG_MAX_RSP_SIZE);
+}
+
+void diag_send_rsp(unsigned char *buf, int len)
+{
+ struct diag_md_session_t *session_info = NULL;
+ uint8_t hdlc_disabled;
+
+ session_info = diag_md_session_get_peripheral(APPS_DATA);
+ if (session_info)
+ hdlc_disabled = session_info->hdlc_disabled;
+ else
+ hdlc_disabled = driver->hdlc_disabled;
+
+ if (hdlc_disabled)
+ pack_rsp_and_send(buf, len);
+ else
+ encode_rsp_and_send(buf, len);
+}
+
+void diag_update_pkt_buffer(unsigned char *buf, uint32_t len, int type)
+{
+ unsigned char *ptr = NULL;
+ unsigned char *temp = buf;
+ int *in_busy = NULL;
+ uint32_t *length = NULL;
+ uint32_t max_len = 0;
+
+ if (!buf || len == 0) {
+ pr_err("diag: In %s, Invalid ptr %pK and length %d\n",
+ __func__, buf, len);
+ return;
+ }
+
+ switch (type) {
+ case PKT_TYPE:
+ ptr = driver->apps_req_buf;
+ length = &driver->apps_req_buf_len;
+ max_len = DIAG_MAX_REQ_SIZE;
+ in_busy = &driver->in_busy_pktdata;
+ break;
+ case DCI_PKT_TYPE:
+ ptr = driver->dci_pkt_buf;
+ length = &driver->dci_pkt_length;
+ max_len = DCI_BUF_SIZE;
+ in_busy = &driver->in_busy_dcipktdata;
+ break;
+ default:
+ pr_err("diag: Invalid type %d in %s\n", type, __func__);
+ return;
+ }
+
+ mutex_lock(&driver->diagchar_mutex);
+ if (CHK_OVERFLOW(ptr, ptr, ptr + max_len, len)) {
+ memcpy(ptr, temp, len);
+ *length = len;
+ *in_busy = 1;
+ } else {
+ pr_alert("diag: In %s, no space for response packet, len: %d, type: %d\n",
+ __func__, len, type);
+ }
+ mutex_unlock(&driver->diagchar_mutex);
+}
+
+void diag_update_userspace_clients(unsigned int type)
+{
+ int i;
+
+ mutex_lock(&driver->diagchar_mutex);
+ for (i = 0; i < driver->num_clients; i++)
+ if (driver->client_map[i].pid != 0)
+ driver->data_ready[i] |= type;
+ wake_up_interruptible(&driver->wait_q);
+ mutex_unlock(&driver->diagchar_mutex);
+}
+
+void diag_update_md_clients(unsigned int type)
+{
+ int i, j;
+
+ mutex_lock(&driver->diagchar_mutex);
+ for (i = 0; i < NUM_MD_SESSIONS; i++) {
+ if (driver->md_session_map[i] != NULL)
+ for (j = 0; j < driver->num_clients; j++) {
+ if (driver->client_map[j].pid != 0 &&
+ driver->client_map[j].pid ==
+ driver->md_session_map[i]->pid) {
+ driver->data_ready[j] |= type;
+ break;
+ }
+ }
+ }
+ wake_up_interruptible(&driver->wait_q);
+ mutex_unlock(&driver->diagchar_mutex);
+}
+void diag_update_sleeping_process(int process_id, int data_type)
+{
+ int i;
+
+ mutex_lock(&driver->diagchar_mutex);
+ for (i = 0; i < driver->num_clients; i++)
+ if (driver->client_map[i].pid == process_id) {
+ driver->data_ready[i] |= data_type;
+ break;
+ }
+ wake_up_interruptible(&driver->wait_q);
+ mutex_unlock(&driver->diagchar_mutex);
+}
+
+static int diag_send_data(struct diag_cmd_reg_t *entry, unsigned char *buf,
+ int len)
+{
+ if (!entry)
+ return -EIO;
+
+ if (entry->proc == APPS_DATA) {
+ diag_update_pkt_buffer(buf, len, PKT_TYPE);
+ diag_update_sleeping_process(entry->pid, PKT_TYPE);
+ return 0;
+ }
+
+ return diagfwd_write(entry->proc, TYPE_CMD, buf, len);
+}
+
+void diag_process_stm_mask(uint8_t cmd, uint8_t data_mask, int data_type)
+{
+ int status = 0;
+
+ if (data_type >= PERIPHERAL_MODEM && data_type <= PERIPHERAL_SENSORS) {
+ if (driver->feature[data_type].stm_support) {
+ status = diag_send_stm_state(data_type, cmd);
+ if (status == 0)
+ driver->stm_state[data_type] = cmd;
+ }
+ driver->stm_state_requested[data_type] = cmd;
+ } else if (data_type == APPS_DATA) {
+ driver->stm_state[data_type] = cmd;
+ driver->stm_state_requested[data_type] = cmd;
+ }
+}
+
+int diag_process_stm_cmd(unsigned char *buf, unsigned char *dest_buf)
+{
+ uint8_t version, mask, cmd;
+ uint8_t rsp_supported = 0;
+ uint8_t rsp_status = 0;
+ int i;
+
+ if (!buf || !dest_buf) {
+ pr_err("diag: Invalid pointers buf: %pK, dest_buf %pK in %s\n",
+ buf, dest_buf, __func__);
+ return -EIO;
+ }
+
+ version = *(buf + STM_CMD_VERSION_OFFSET);
+ mask = *(buf + STM_CMD_MASK_OFFSET);
+ cmd = *(buf + STM_CMD_DATA_OFFSET);
+
+ /*
+ * Check if command is valid. If the command is asking for
+ * status, then the processor mask field is to be ignored.
+ */
+ if ((version != 2) || (cmd > STATUS_STM) ||
+ ((cmd != STATUS_STM) && ((mask == 0) || (0 != (mask >> 4))))) {
+ /* Command is invalid. Send bad param message response */
+ dest_buf[0] = BAD_PARAM_RESPONSE_MESSAGE;
+ for (i = 0; i < STM_CMD_NUM_BYTES; i++)
+ dest_buf[i+1] = *(buf + i);
+ return STM_CMD_NUM_BYTES+1;
+ } else if (cmd != STATUS_STM) {
+ if (mask & DIAG_STM_MODEM)
+ diag_process_stm_mask(cmd, DIAG_STM_MODEM,
+ PERIPHERAL_MODEM);
+
+ if (mask & DIAG_STM_LPASS)
+ diag_process_stm_mask(cmd, DIAG_STM_LPASS,
+ PERIPHERAL_LPASS);
+
+ if (mask & DIAG_STM_WCNSS)
+ diag_process_stm_mask(cmd, DIAG_STM_WCNSS,
+ PERIPHERAL_WCNSS);
+
+ if (mask & DIAG_STM_SENSORS)
+ diag_process_stm_mask(cmd, DIAG_STM_SENSORS,
+ PERIPHERAL_SENSORS);
+ if (mask & DIAG_STM_WDSP)
+ diag_process_stm_mask(cmd, DIAG_STM_WDSP,
+ PERIPHERAL_WDSP);
+
+ if (mask & DIAG_STM_APPS)
+ diag_process_stm_mask(cmd, DIAG_STM_APPS, APPS_DATA);
+ }
+
+ for (i = 0; i < STM_CMD_NUM_BYTES; i++)
+ dest_buf[i] = *(buf + i);
+
+ /* Set mask denoting which peripherals support STM */
+ if (driver->feature[PERIPHERAL_MODEM].stm_support)
+ rsp_supported |= DIAG_STM_MODEM;
+
+ if (driver->feature[PERIPHERAL_LPASS].stm_support)
+ rsp_supported |= DIAG_STM_LPASS;
+
+ if (driver->feature[PERIPHERAL_WCNSS].stm_support)
+ rsp_supported |= DIAG_STM_WCNSS;
+
+ if (driver->feature[PERIPHERAL_SENSORS].stm_support)
+ rsp_supported |= DIAG_STM_SENSORS;
+
+ if (driver->feature[PERIPHERAL_WDSP].stm_support)
+ rsp_supported |= DIAG_STM_WDSP;
+
+ rsp_supported |= DIAG_STM_APPS;
+
+ /* Set mask denoting STM state/status for each peripheral/APSS */
+ if (driver->stm_state[PERIPHERAL_MODEM])
+ rsp_status |= DIAG_STM_MODEM;
+
+ if (driver->stm_state[PERIPHERAL_LPASS])
+ rsp_status |= DIAG_STM_LPASS;
+
+ if (driver->stm_state[PERIPHERAL_WCNSS])
+ rsp_status |= DIAG_STM_WCNSS;
+
+ if (driver->stm_state[PERIPHERAL_SENSORS])
+ rsp_status |= DIAG_STM_SENSORS;
+
+ if (driver->stm_state[PERIPHERAL_WDSP])
+ rsp_status |= DIAG_STM_WDSP;
+
+ if (driver->stm_state[APPS_DATA])
+ rsp_status |= DIAG_STM_APPS;
+
+ dest_buf[STM_RSP_SUPPORTED_INDEX] = rsp_supported;
+ dest_buf[STM_RSP_STATUS_INDEX] = rsp_status;
+
+ return STM_RSP_NUM_BYTES;
+}
+
+int diag_process_time_sync_query_cmd(unsigned char *src_buf, int src_len,
+ unsigned char *dest_buf, int dest_len)
+{
+ int write_len = 0;
+ struct diag_cmd_time_sync_query_req_t *req = NULL;
+ struct diag_cmd_time_sync_query_rsp_t rsp;
+
+ if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0) {
+ pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d",
+ __func__, src_buf, src_len, dest_buf, dest_len);
+ return -EINVAL;
+ }
+
+ req = (struct diag_cmd_time_sync_query_req_t *)src_buf;
+ rsp.header.cmd_code = req->header.cmd_code;
+ rsp.header.subsys_id = req->header.subsys_id;
+ rsp.header.subsys_cmd_code = req->header.subsys_cmd_code;
+ rsp.version = req->version;
+ rsp.time_api = driver->uses_time_api;
+ memcpy(dest_buf, &rsp, sizeof(rsp));
+ write_len = sizeof(rsp);
+ return write_len;
+}
+
+int diag_process_time_sync_switch_cmd(unsigned char *src_buf, int src_len,
+ unsigned char *dest_buf, int dest_len)
+{
+ uint8_t peripheral, status = 0;
+ struct diag_cmd_time_sync_switch_req_t *req = NULL;
+ struct diag_cmd_time_sync_switch_rsp_t rsp;
+ struct diag_ctrl_msg_time_sync time_sync_msg;
+ int msg_size = sizeof(struct diag_ctrl_msg_time_sync);
+ int err = 0, write_len = 0;
+
+ if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0) {
+ pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d",
+ __func__, src_buf, src_len, dest_buf, dest_len);
+ return -EINVAL;
+ }
+
+ req = (struct diag_cmd_time_sync_switch_req_t *)src_buf;
+ rsp.header.cmd_code = req->header.cmd_code;
+ rsp.header.subsys_id = req->header.subsys_id;
+ rsp.header.subsys_cmd_code = req->header.subsys_cmd_code;
+ rsp.version = req->version;
+ rsp.time_api = req->time_api;
+ if ((req->version > 1) || (req->time_api > 1) ||
+ (req->persist_time > 0)) {
+ dest_buf[0] = BAD_PARAM_RESPONSE_MESSAGE;
+ rsp.time_api_status = 0;
+ rsp.persist_time_status = PERSIST_TIME_NOT_SUPPORTED;
+ memcpy(dest_buf + 1, &rsp, sizeof(rsp));
+ write_len = sizeof(rsp) + 1;
+ timestamp_switch = 0;
+ return write_len;
+ }
+
+ time_sync_msg.ctrl_pkt_id = DIAG_CTRL_MSG_TIME_SYNC_PKT;
+ time_sync_msg.ctrl_pkt_data_len = 5;
+ time_sync_msg.version = 1;
+ time_sync_msg.time_api = req->time_api;
+
+ for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+ err = diagfwd_write(peripheral, TYPE_CNTL, &time_sync_msg,
+ msg_size);
+ if (err && err != -ENODEV) {
+ pr_err("diag: In %s, unable to write to peripheral: %d, type: %d, len: %d, err: %d\n",
+ __func__, peripheral, TYPE_CNTL,
+ msg_size, err);
+ status |= (1 << peripheral);
+ }
+ }
+
+ driver->time_sync_enabled = 1;
+ driver->uses_time_api = req->time_api;
+
+ switch (req->time_api) {
+ case 0:
+ timestamp_switch = 0;
+ break;
+ case 1:
+ timestamp_switch = 1;
+ break;
+ default:
+ timestamp_switch = 0;
+ break;
+ }
+
+ rsp.time_api_status = status;
+ rsp.persist_time_status = PERSIST_TIME_NOT_SUPPORTED;
+ memcpy(dest_buf, &rsp, sizeof(rsp));
+ write_len = sizeof(rsp);
+ return write_len;
+}
+
+int diag_cmd_log_on_demand(unsigned char *src_buf, int src_len,
+ unsigned char *dest_buf, int dest_len)
+{
+ int write_len = 0;
+ struct diag_log_on_demand_rsp_t header;
+
+ if (!driver->diagfwd_cntl[PERIPHERAL_MODEM] ||
+ !driver->diagfwd_cntl[PERIPHERAL_MODEM]->ch_open ||
+ !driver->log_on_demand_support)
+ return 0;
+
+ if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0) {
+ pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d",
+ __func__, src_buf, src_len, dest_buf, dest_len);
+ return -EINVAL;
+ }
+
+ header.cmd_code = DIAG_CMD_LOG_ON_DMND;
+ header.log_code = *(uint16_t *)(src_buf + 1);
+ header.status = 1;
+ memcpy(dest_buf, &header, sizeof(struct diag_log_on_demand_rsp_t));
+ write_len += sizeof(struct diag_log_on_demand_rsp_t);
+
+ return write_len;
+}
+
+int diag_cmd_get_mobile_id(unsigned char *src_buf, int src_len,
+ unsigned char *dest_buf, int dest_len)
+{
+ int write_len = 0;
+ struct diag_pkt_header_t *header = NULL;
+ struct diag_cmd_ext_mobile_rsp_t rsp;
+
+ if (!src_buf || src_len != sizeof(*header) || !dest_buf ||
+ dest_len < sizeof(rsp))
+ return -EIO;
+
+ header = (struct diag_pkt_header_t *)src_buf;
+ rsp.header.cmd_code = header->cmd_code;
+ rsp.header.subsys_id = header->subsys_id;
+ rsp.header.subsys_cmd_code = header->subsys_cmd_code;
+ rsp.version = 2;
+ rsp.padding[0] = 0;
+ rsp.padding[1] = 0;
+ rsp.padding[2] = 0;
+ rsp.family = 0;
+ rsp.chip_id = (uint32_t)socinfo_get_id();
+
+ memcpy(dest_buf, &rsp, sizeof(rsp));
+ write_len += sizeof(rsp);
+
+ return write_len;
+}
+
+int diag_check_common_cmd(struct diag_pkt_header_t *header)
+{
+ int i;
+
+ if (!header)
+ return -EIO;
+
+ for (i = 0; i < DIAG_NUM_COMMON_CMD; i++) {
+ if (header->cmd_code == common_cmds[i])
+ return 1;
+ }
+
+ return 0;
+}
+
+static int diag_cmd_chk_stats(unsigned char *src_buf, int src_len,
+ unsigned char *dest_buf, int dest_len)
+{
+ int payload = 0;
+ int write_len = 0;
+ struct diag_pkt_header_t *header = NULL;
+ struct diag_cmd_stats_rsp_t rsp;
+
+ if (!src_buf || src_len < sizeof(struct diag_pkt_header_t) ||
+ !dest_buf || dest_len < sizeof(rsp))
+ return -EINVAL;
+
+ header = (struct diag_pkt_header_t *)src_buf;
+
+ if (header->cmd_code != DIAG_CMD_DIAG_SUBSYS ||
+ header->subsys_id != DIAG_SS_DIAG)
+ return -EINVAL;
+
+ switch (header->subsys_cmd_code) {
+ case DIAG_CMD_OP_GET_MSG_ALLOC:
+ payload = driver->msg_stats.alloc_count;
+ break;
+ case DIAG_CMD_OP_GET_MSG_DROP:
+ payload = driver->msg_stats.drop_count;
+ break;
+ case DIAG_CMD_OP_RESET_MSG_STATS:
+ diag_record_stats(DATA_TYPE_F3, PKT_RESET);
+ break;
+ case DIAG_CMD_OP_GET_LOG_ALLOC:
+ payload = driver->log_stats.alloc_count;
+ break;
+ case DIAG_CMD_OP_GET_LOG_DROP:
+ payload = driver->log_stats.drop_count;
+ break;
+ case DIAG_CMD_OP_RESET_LOG_STATS:
+ diag_record_stats(DATA_TYPE_LOG, PKT_RESET);
+ break;
+ case DIAG_CMD_OP_GET_EVENT_ALLOC:
+ payload = driver->event_stats.alloc_count;
+ break;
+ case DIAG_CMD_OP_GET_EVENT_DROP:
+ payload = driver->event_stats.drop_count;
+ break;
+ case DIAG_CMD_OP_RESET_EVENT_STATS:
+ diag_record_stats(DATA_TYPE_EVENT, PKT_RESET);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ memcpy(&rsp.header, header, sizeof(struct diag_pkt_header_t));
+ rsp.payload = payload;
+ write_len = sizeof(rsp);
+ memcpy(dest_buf, &rsp, sizeof(rsp));
+
+ return write_len;
+}
+
+static int diag_cmd_disable_hdlc(unsigned char *src_buf, int src_len,
+ unsigned char *dest_buf, int dest_len)
+{
+ struct diag_pkt_header_t *header = NULL;
+ struct diag_cmd_hdlc_disable_rsp_t rsp;
+ int write_len = 0;
+
+ if (!src_buf || src_len < sizeof(*header) ||
+ !dest_buf || dest_len < sizeof(rsp)) {
+ return -EIO;
+ }
+
+ header = (struct diag_pkt_header_t *)src_buf;
+ if (header->cmd_code != DIAG_CMD_DIAG_SUBSYS ||
+ header->subsys_id != DIAG_SS_DIAG ||
+ header->subsys_cmd_code != DIAG_CMD_OP_HDLC_DISABLE) {
+ return -EINVAL;
+ }
+
+ memcpy(&rsp.header, header, sizeof(struct diag_pkt_header_t));
+ rsp.framing_version = 1;
+ rsp.result = 0;
+ write_len = sizeof(rsp);
+ memcpy(dest_buf, &rsp, sizeof(rsp));
+
+ return write_len;
+}
+
+void diag_send_error_rsp(unsigned char *buf, int len)
+{
+ /* -1 to accommodate the first byte 0x13 */
+ if (len > (DIAG_MAX_RSP_SIZE - 1)) {
+ pr_err("diag: cannot send err rsp, huge length: %d\n", len);
+ return;
+ }
+
+ *(uint8_t *)driver->apps_rsp_buf = DIAG_CMD_ERROR;
+ memcpy((driver->apps_rsp_buf + sizeof(uint8_t)), buf, len);
+ diag_send_rsp(driver->apps_rsp_buf, len + 1);
+}
+
+int diag_process_apps_pkt(unsigned char *buf, int len,
+ struct diag_md_session_t *info)
+{
+ int i;
+ int mask_ret;
+ int write_len = 0;
+ unsigned char *temp = NULL;
+ struct diag_cmd_reg_entry_t entry;
+ struct diag_cmd_reg_entry_t *temp_entry = NULL;
+ struct diag_cmd_reg_t *reg_item = NULL;
+
+ if (!buf)
+ return -EIO;
+
+ /* Check if the command is a supported mask command */
+ mask_ret = diag_process_apps_masks(buf, len, info);
+ if (mask_ret > 0) {
+ diag_send_rsp(driver->apps_rsp_buf, mask_ret);
+ return 0;
+ }
+
+ temp = buf;
+ entry.cmd_code = (uint16_t)(*(uint8_t *)temp);
+ temp += sizeof(uint8_t);
+ entry.subsys_id = (uint16_t)(*(uint8_t *)temp);
+ temp += sizeof(uint8_t);
+ entry.cmd_code_hi = (uint16_t)(*(uint16_t *)temp);
+ entry.cmd_code_lo = (uint16_t)(*(uint16_t *)temp);
+ temp += sizeof(uint16_t);
+
+ pr_debug("diag: In %s, received cmd %02x %02x %02x\n",
+ __func__, entry.cmd_code, entry.subsys_id, entry.cmd_code_hi);
+
+ if (*buf == DIAG_CMD_LOG_ON_DMND && driver->log_on_demand_support &&
+ driver->feature[PERIPHERAL_MODEM].rcvd_feature_mask) {
+ write_len = diag_cmd_log_on_demand(buf, len,
+ driver->apps_rsp_buf,
+ DIAG_MAX_RSP_SIZE);
+ if (write_len > 0)
+ diag_send_rsp(driver->apps_rsp_buf, write_len);
+ return 0;
+ }
+
+ mutex_lock(&driver->cmd_reg_mutex);
+ temp_entry = diag_cmd_search(&entry, ALL_PROC);
+ if (temp_entry) {
+ reg_item = container_of(temp_entry, struct diag_cmd_reg_t,
+ entry);
+ if (info) {
+ if (MD_PERIPHERAL_MASK(reg_item->proc) &
+ info->peripheral_mask)
+ write_len = diag_send_data(reg_item, buf, len);
+ } else {
+ if (MD_PERIPHERAL_MASK(reg_item->proc) &
+ driver->logging_mask)
+ diag_send_error_rsp(buf, len);
+ else
+ write_len = diag_send_data(reg_item, buf, len);
+ }
+ mutex_unlock(&driver->cmd_reg_mutex);
+ return write_len;
+ }
+ mutex_unlock(&driver->cmd_reg_mutex);
+
+#if defined(CONFIG_DIAG_OVER_USB)
+ /* Check for the command/respond msg for the maximum packet length */
+ if ((*buf == 0x4b) && (*(buf+1) == 0x12) &&
+ (*(uint16_t *)(buf+2) == 0x0055)) {
+ for (i = 0; i < 4; i++)
+ *(driver->apps_rsp_buf+i) = *(buf+i);
+ *(uint32_t *)(driver->apps_rsp_buf+4) = DIAG_MAX_REQ_SIZE;
+ diag_send_rsp(driver->apps_rsp_buf, 8);
+ return 0;
+ } else if ((*buf == 0x4b) && (*(buf+1) == 0x12) &&
+ (*(uint16_t *)(buf+2) == DIAG_DIAG_STM)) {
+ len = diag_process_stm_cmd(buf, driver->apps_rsp_buf);
+ if (len > 0) {
+ diag_send_rsp(driver->apps_rsp_buf, len);
+ return 0;
+ }
+ return len;
+ }
+ /* Check for time sync query command */
+ else if ((*buf == DIAG_CMD_DIAG_SUBSYS) &&
+ (*(buf+1) == DIAG_SS_DIAG) &&
+ (*(uint16_t *)(buf+2) == DIAG_GET_TIME_API)) {
+ write_len = diag_process_time_sync_query_cmd(buf, len,
+ driver->apps_rsp_buf,
+ DIAG_MAX_RSP_SIZE);
+ if (write_len > 0)
+ diag_send_rsp(driver->apps_rsp_buf, write_len);
+ return 0;
+ }
+ /* Check for time sync switch command */
+ else if ((*buf == DIAG_CMD_DIAG_SUBSYS) &&
+ (*(buf+1) == DIAG_SS_DIAG) &&
+ (*(uint16_t *)(buf+2) == DIAG_SET_TIME_API)) {
+ write_len = diag_process_time_sync_switch_cmd(buf, len,
+ driver->apps_rsp_buf,
+ DIAG_MAX_RSP_SIZE);
+ if (write_len > 0)
+ diag_send_rsp(driver->apps_rsp_buf, write_len);
+ return 0;
+ }
+ /* Check for download command */
+ else if ((chk_apps_master()) && (*buf == 0x3A)) {
+ /* send response back */
+ driver->apps_rsp_buf[0] = *buf;
+ diag_send_rsp(driver->apps_rsp_buf, 1);
+ msleep(5000);
+ /* call download API */
+ msm_set_restart_mode(RESTART_DLOAD);
+ pr_crit("diag: download mode set, Rebooting SoC..\n");
+ kernel_restart(NULL);
+ /* Not required, represents that command isn't sent to modem */
+ return 0;
+ }
+ /* Check for polling for Apps only DIAG */
+ else if ((*buf == 0x4b) && (*(buf+1) == 0x32) &&
+ (*(buf+2) == 0x03)) {
+ /* If no one has registered for polling */
+ if (chk_polling_response()) {
+ /* Respond to polling for Apps only DIAG */
+ for (i = 0; i < 3; i++)
+ driver->apps_rsp_buf[i] = *(buf+i);
+ for (i = 0; i < 13; i++)
+ driver->apps_rsp_buf[i+3] = 0;
+
+ diag_send_rsp(driver->apps_rsp_buf, 16);
+ return 0;
+ }
+ }
+ /* Return the Delayed Response Wrap Status */
+ else if ((*buf == 0x4b) && (*(buf+1) == 0x32) &&
+ (*(buf+2) == 0x04) && (*(buf+3) == 0x0)) {
+ memcpy(driver->apps_rsp_buf, buf, 4);
+ driver->apps_rsp_buf[4] = wrap_enabled;
+ diag_send_rsp(driver->apps_rsp_buf, 5);
+ return 0;
+ }
+ /* Wrap the Delayed Rsp ID */
+ else if ((*buf == 0x4b) && (*(buf+1) == 0x32) &&
+ (*(buf+2) == 0x05) && (*(buf+3) == 0x0)) {
+ wrap_enabled = true;
+ memcpy(driver->apps_rsp_buf, buf, 4);
+ driver->apps_rsp_buf[4] = wrap_count;
+ diag_send_rsp(driver->apps_rsp_buf, 6);
+ return 0;
+ }
+ /* Mobile ID Rsp */
+ else if ((*buf == DIAG_CMD_DIAG_SUBSYS) &&
+ (*(buf+1) == DIAG_SS_PARAMS) &&
+ (*(buf+2) == DIAG_EXT_MOBILE_ID) && (*(buf+3) == 0x0)) {
+ write_len = diag_cmd_get_mobile_id(buf, len,
+ driver->apps_rsp_buf,
+ DIAG_MAX_RSP_SIZE);
+ if (write_len > 0) {
+ diag_send_rsp(driver->apps_rsp_buf, write_len);
+ return 0;
+ }
+ }
+ /*
+ * If the apps processor is master and no other
+ * processor has registered for polling command.
+ * If modem is not up and we have not received feature
+ * mask update from modem, in that case APPS should
+ * respond for 0X7C command
+ */
+ else if (chk_apps_master() &&
+ !(driver->polling_reg_flag) &&
+ !(driver->diagfwd_cntl[PERIPHERAL_MODEM]->ch_open) &&
+ !(driver->feature[PERIPHERAL_MODEM].rcvd_feature_mask)) {
+ /* respond to 0x0 command */
+ if (*buf == 0x00) {
+ for (i = 0; i < 55; i++)
+ driver->apps_rsp_buf[i] = 0;
+
+ diag_send_rsp(driver->apps_rsp_buf, 55);
+ return 0;
+ }
+ /* respond to 0x7c command */
+ else if (*buf == 0x7c) {
+ driver->apps_rsp_buf[0] = 0x7c;
+ for (i = 1; i < 8; i++)
+ driver->apps_rsp_buf[i] = 0;
+ /* Tools ID for APQ 8060 */
+ *(int *)(driver->apps_rsp_buf + 8) =
+ chk_config_get_id();
+ *(unsigned char *)(driver->apps_rsp_buf + 12) = '\0';
+ *(unsigned char *)(driver->apps_rsp_buf + 13) = '\0';
+ diag_send_rsp(driver->apps_rsp_buf, 14);
+ return 0;
+ }
+ }
+ write_len = diag_cmd_chk_stats(buf, len, driver->apps_rsp_buf,
+ DIAG_MAX_RSP_SIZE);
+ if (write_len > 0) {
+ diag_send_rsp(driver->apps_rsp_buf, write_len);
+ return 0;
+ }
+ write_len = diag_cmd_disable_hdlc(buf, len, driver->apps_rsp_buf,
+ DIAG_MAX_RSP_SIZE);
+ if (write_len > 0) {
+ /*
+ * This mutex lock is necessary since we need to drain all the
+ * pending buffers from peripherals which may be HDLC encoded
+ * before disabling HDLC encoding on Apps processor.
+ */
+ mutex_lock(&driver->hdlc_disable_mutex);
+ diag_send_rsp(driver->apps_rsp_buf, write_len);
+ /*
+ * Set the value of hdlc_disabled after sending the response to
+ * the tools. This is required since the tools is expecting a
+ * HDLC encoded response for this request.
+ */
+ pr_debug("diag: In %s, disabling HDLC encoding\n",
+ __func__);
+ if (info)
+ info->hdlc_disabled = 1;
+ else
+ driver->hdlc_disabled = 1;
+ diag_update_md_clients(HDLC_SUPPORT_TYPE);
+ mutex_unlock(&driver->hdlc_disable_mutex);
+ return 0;
+ }
+#endif
+
+ /* We have now come to the end of the function. */
+ if (chk_apps_only())
+ diag_send_error_rsp(buf, len);
+
+ return 0;
+}
+
+void diag_process_hdlc_pkt(void *data, unsigned int len,
+ struct diag_md_session_t *info)
+{
+ int err = 0;
+ int ret = 0;
+
+ if (len > DIAG_MAX_HDLC_BUF_SIZE) {
+ pr_err("diag: In %s, invalid length: %d\n", __func__, len);
+ return;
+ }
+
+ mutex_lock(&driver->diag_hdlc_mutex);
+ pr_debug("diag: In %s, received packet of length: %d, req_buf_len: %d\n",
+ __func__, len, driver->hdlc_buf_len);
+
+ if (driver->hdlc_buf_len >= DIAG_MAX_REQ_SIZE) {
+ pr_err("diag: In %s, request length is more than supported len. Dropping packet.\n",
+ __func__);
+ goto fail;
+ }
+
+ hdlc_decode->dest_ptr = driver->hdlc_buf + driver->hdlc_buf_len;
+ hdlc_decode->dest_size = DIAG_MAX_HDLC_BUF_SIZE - driver->hdlc_buf_len;
+ hdlc_decode->src_ptr = data;
+ hdlc_decode->src_size = len;
+ hdlc_decode->src_idx = 0;
+ hdlc_decode->dest_idx = 0;
+
+ ret = diag_hdlc_decode(hdlc_decode);
+ /*
+ * driver->hdlc_buf is of size DIAG_MAX_HDLC_BUF_SIZE. But the decoded
+ * packet should be within DIAG_MAX_REQ_SIZE.
+ */
+ if (driver->hdlc_buf_len + hdlc_decode->dest_idx <= DIAG_MAX_REQ_SIZE) {
+ driver->hdlc_buf_len += hdlc_decode->dest_idx;
+ } else {
+ pr_err_ratelimited("diag: In %s, Dropping packet. pkt_size: %d, max: %d\n",
+ __func__,
+ driver->hdlc_buf_len + hdlc_decode->dest_idx,
+ DIAG_MAX_REQ_SIZE);
+ goto fail;
+ }
+
+ if (ret == HDLC_COMPLETE) {
+ err = crc_check(driver->hdlc_buf, driver->hdlc_buf_len);
+ if (err) {
+ /* CRC check failed. */
+ pr_err_ratelimited("diag: In %s, bad CRC. Dropping packet\n",
+ __func__);
+ goto fail;
+ }
+ driver->hdlc_buf_len -= HDLC_FOOTER_LEN;
+
+ if (driver->hdlc_buf_len < 1) {
+ pr_err_ratelimited("diag: In %s, message is too short, len: %d, dest len: %d\n",
+ __func__, driver->hdlc_buf_len,
+ hdlc_decode->dest_idx);
+ goto fail;
+ }
+
+ err = diag_process_apps_pkt(driver->hdlc_buf,
+ driver->hdlc_buf_len, info);
+ if (err < 0)
+ goto fail;
+ } else {
+ goto end;
+ }
+
+ driver->hdlc_buf_len = 0;
+ mutex_unlock(&driver->diag_hdlc_mutex);
+ return;
+
+fail:
+ /*
+ * Tools needs to get a response in order to start its
+ * recovery algorithm. Send an error response if the
+ * packet is not in expected format.
+ */
+ diag_send_error_rsp(driver->hdlc_buf, driver->hdlc_buf_len);
+ driver->hdlc_buf_len = 0;
+end:
+ mutex_unlock(&driver->diag_hdlc_mutex);
+}
+
+static int diagfwd_mux_open(int id, int mode)
+{
+ uint8_t i;
+ unsigned long flags;
+
+ switch (mode) {
+ case DIAG_USB_MODE:
+ driver->usb_connected = 1;
+ break;
+ case DIAG_MEMORY_DEVICE_MODE:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (driver->rsp_buf_busy) {
+ /*
+ * When a client switches from callback mode to USB mode
+ * explicitly, there can be a situation when the last response
+ * is not drained to the user space application. Reset the
+ * in_busy flag in this case.
+ */
+ spin_lock_irqsave(&driver->rsp_buf_busy_lock, flags);
+ driver->rsp_buf_busy = 0;
+ spin_unlock_irqrestore(&driver->rsp_buf_busy_lock, flags);
+ }
+ for (i = 0; i < NUM_PERIPHERALS; i++) {
+ diagfwd_open(i, TYPE_DATA);
+ diagfwd_open(i, TYPE_CMD);
+ }
+ queue_work(driver->diag_real_time_wq, &driver->diag_real_time_work);
+ return 0;
+}
+
+static int diagfwd_mux_close(int id, int mode)
+{
+ switch (mode) {
+ case DIAG_USB_MODE:
+ driver->usb_connected = 0;
+ break;
+ case DIAG_MEMORY_DEVICE_MODE:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if ((driver->logging_mode == DIAG_MULTI_MODE &&
+ driver->md_session_mode == DIAG_MD_NONE) ||
+ (driver->md_session_mode == DIAG_MD_PERIPHERAL)) {
+ /*
+ * This case indicates that the USB is removed
+ * but there is a client running in background
+ * with Memory Device mode.
+ */
+ } else {
+ /*
+ * With clearing of masks on ODL exit and
+ * USB disconnection, closing of the channel is
+ * not needed.This enables read and drop of stale packets.
+ */
+ pr_debug("diag: In %s, re-enabling HDLC encoding\n",
+ __func__);
+ mutex_lock(&driver->hdlc_disable_mutex);
+ if (driver->md_session_mode == DIAG_MD_NONE)
+ driver->hdlc_disabled = 0;
+ mutex_unlock(&driver->hdlc_disable_mutex);
+ queue_work(driver->diag_wq,
+ &(driver->update_user_clients));
+ }
+ queue_work(driver->diag_real_time_wq,
+ &driver->diag_real_time_work);
+ return 0;
+}
+
+static uint8_t hdlc_reset;
+
+static void hdlc_reset_timer_start(struct diag_md_session_t *info)
+{
+ if (!hdlc_timer_in_progress) {
+ hdlc_timer_in_progress = 1;
+ if (info)
+ mod_timer(&info->hdlc_reset_timer,
+ jiffies + msecs_to_jiffies(200));
+ else
+ mod_timer(&driver->hdlc_reset_timer,
+ jiffies + msecs_to_jiffies(200));
+ }
+}
+
+static void hdlc_reset_timer_func(unsigned long data)
+{
+ pr_debug("diag: In %s, re-enabling HDLC encoding\n",
+ __func__);
+ if (hdlc_reset) {
+ driver->hdlc_disabled = 0;
+ queue_work(driver->diag_wq,
+ &(driver->update_user_clients));
+ }
+ hdlc_timer_in_progress = 0;
+}
+
+void diag_md_hdlc_reset_timer_func(unsigned long pid)
+{
+ struct diag_md_session_t *session_info = NULL;
+
+ pr_debug("diag: In %s, re-enabling HDLC encoding\n",
+ __func__);
+ if (hdlc_reset) {
+ session_info = diag_md_session_get_pid(pid);
+ if (session_info)
+ session_info->hdlc_disabled = 0;
+ queue_work(driver->diag_wq,
+ &(driver->update_md_clients));
+ }
+ hdlc_timer_in_progress = 0;
+}
+
+static void diag_hdlc_start_recovery(unsigned char *buf, int len,
+ struct diag_md_session_t *info)
+{
+ int i;
+ static uint32_t bad_byte_counter;
+ unsigned char *start_ptr = NULL;
+ struct diag_pkt_frame_t *actual_pkt = NULL;
+
+ hdlc_reset = 1;
+ hdlc_reset_timer_start(info);
+
+ actual_pkt = (struct diag_pkt_frame_t *)buf;
+ for (i = 0; i < len; i++) {
+ if (actual_pkt->start == CONTROL_CHAR &&
+ actual_pkt->version == 1 &&
+ actual_pkt->length < len &&
+ (*(uint8_t *)(buf +
+ sizeof(struct diag_pkt_frame_t) +
+ actual_pkt->length) == CONTROL_CHAR)) {
+ start_ptr = &buf[i];
+ break;
+ }
+ bad_byte_counter++;
+ if (bad_byte_counter > (DIAG_MAX_REQ_SIZE +
+ sizeof(struct diag_pkt_frame_t) + 1)) {
+ bad_byte_counter = 0;
+ pr_err("diag: In %s, re-enabling HDLC encoding\n",
+ __func__);
+ mutex_lock(&driver->hdlc_disable_mutex);
+ if (info)
+ info->hdlc_disabled = 0;
+ else
+ driver->hdlc_disabled = 0;
+ mutex_unlock(&driver->hdlc_disable_mutex);
+ diag_update_md_clients(HDLC_SUPPORT_TYPE);
+
+ return;
+ }
+ }
+
+ if (start_ptr) {
+ /* Discard any partial packet reads */
+ driver->incoming_pkt.processing = 0;
+ diag_process_non_hdlc_pkt(start_ptr, len - i, info);
+ }
+}
+
+void diag_process_non_hdlc_pkt(unsigned char *buf, int len,
+ struct diag_md_session_t *info)
+{
+ int err = 0;
+ uint16_t pkt_len = 0;
+ uint32_t read_bytes = 0;
+ const uint32_t header_len = sizeof(struct diag_pkt_frame_t);
+ struct diag_pkt_frame_t *actual_pkt = NULL;
+ unsigned char *data_ptr = NULL;
+ struct diag_partial_pkt_t *partial_pkt = &driver->incoming_pkt;
+
+ if (!buf || len <= 0)
+ return;
+
+ if (!partial_pkt->processing)
+ goto start;
+
+ if (partial_pkt->remaining > len) {
+ if ((partial_pkt->read_len + len) > partial_pkt->capacity) {
+ pr_err("diag: Invalid length %d, %d received in %s\n",
+ partial_pkt->read_len, len, __func__);
+ goto end;
+ }
+ memcpy(partial_pkt->data + partial_pkt->read_len, buf, len);
+ read_bytes += len;
+ buf += read_bytes;
+ partial_pkt->read_len += len;
+ partial_pkt->remaining -= len;
+ } else {
+ if ((partial_pkt->read_len + partial_pkt->remaining) >
+ partial_pkt->capacity) {
+ pr_err("diag: Invalid length during partial read %d, %d received in %s\n",
+ partial_pkt->read_len,
+ partial_pkt->remaining, __func__);
+ goto end;
+ }
+ memcpy(partial_pkt->data + partial_pkt->read_len, buf,
+ partial_pkt->remaining);
+ read_bytes += partial_pkt->remaining;
+ buf += read_bytes;
+ partial_pkt->read_len += partial_pkt->remaining;
+ partial_pkt->remaining = 0;
+ }
+
+ if (partial_pkt->remaining == 0) {
+ actual_pkt = (struct diag_pkt_frame_t *)(partial_pkt->data);
+ data_ptr = partial_pkt->data + header_len;
+ if (*(uint8_t *)(data_ptr + actual_pkt->length) != CONTROL_CHAR)
+ diag_hdlc_start_recovery(buf, len, info);
+ err = diag_process_apps_pkt(data_ptr,
+ actual_pkt->length, info);
+ if (err) {
+ pr_err("diag: In %s, unable to process incoming data packet, err: %d\n",
+ __func__, err);
+ goto end;
+ }
+ partial_pkt->read_len = 0;
+ partial_pkt->total_len = 0;
+ partial_pkt->processing = 0;
+ goto start;
+ }
+ goto end;
+
+start:
+ while (read_bytes < len) {
+ actual_pkt = (struct diag_pkt_frame_t *)buf;
+ pkt_len = actual_pkt->length;
+
+ if (actual_pkt->start != CONTROL_CHAR) {
+ diag_hdlc_start_recovery(buf, len, info);
+ diag_send_error_rsp(buf, len);
+ goto end;
+ }
+
+ if (pkt_len + header_len > partial_pkt->capacity) {
+ pr_err("diag: In %s, incoming data is too large for the request buffer %d\n",
+ __func__, pkt_len);
+ diag_hdlc_start_recovery(buf, len, info);
+ break;
+ }
+
+ if ((pkt_len + header_len) > (len - read_bytes)) {
+ partial_pkt->read_len = len - read_bytes;
+ partial_pkt->total_len = pkt_len + header_len;
+ partial_pkt->remaining = partial_pkt->total_len -
+ partial_pkt->read_len;
+ partial_pkt->processing = 1;
+ memcpy(partial_pkt->data, buf, partial_pkt->read_len);
+ break;
+ }
+ data_ptr = buf + header_len;
+ if (*(uint8_t *)(data_ptr + actual_pkt->length) != CONTROL_CHAR)
+ diag_hdlc_start_recovery(buf, len, info);
+ else
+ hdlc_reset = 0;
+ err = diag_process_apps_pkt(data_ptr,
+ actual_pkt->length, info);
+ if (err)
+ break;
+ read_bytes += header_len + pkt_len + 1;
+ buf += header_len + pkt_len + 1; /* advance to next pkt */
+ }
+end:
+ return;
+}
+
+static int diagfwd_mux_read_done(unsigned char *buf, int len, int ctxt)
+{
+ if (!buf || len <= 0)
+ return -EINVAL;
+
+ if (!driver->hdlc_disabled)
+ diag_process_hdlc_pkt(buf, len, NULL);
+ else
+ diag_process_non_hdlc_pkt(buf, len, NULL);
+
+ diag_mux_queue_read(ctxt);
+ return 0;
+}
+
+static int diagfwd_mux_write_done(unsigned char *buf, int len, int buf_ctxt,
+ int ctxt)
+{
+ unsigned long flags;
+ int peripheral = -1;
+ int type = -1;
+ int num = -1;
+
+ if (!buf || len < 0)
+ return -EINVAL;
+
+ peripheral = GET_BUF_PERIPHERAL(buf_ctxt);
+ type = GET_BUF_TYPE(buf_ctxt);
+ num = GET_BUF_NUM(buf_ctxt);
+
+ switch (type) {
+ case TYPE_DATA:
+ if (peripheral >= 0 && peripheral < NUM_PERIPHERALS) {
+ diagfwd_write_done(peripheral, type, num);
+ diag_ws_on_copy(DIAG_WS_MUX);
+ } else if (peripheral == APPS_DATA) {
+ diagmem_free(driver, (unsigned char *)buf,
+ POOL_TYPE_HDLC);
+ buf = NULL;
+ } else {
+ pr_err_ratelimited("diag: Invalid peripheral %d in %s, type: %d\n",
+ peripheral, __func__, type);
+ }
+ break;
+ case TYPE_CMD:
+ if (peripheral >= 0 && peripheral < NUM_PERIPHERALS) {
+ diagfwd_write_done(peripheral, type, num);
+ } else if (peripheral == APPS_DATA) {
+ spin_lock_irqsave(&driver->rsp_buf_busy_lock, flags);
+ driver->rsp_buf_busy = 0;
+ driver->encoded_rsp_len = 0;
+ spin_unlock_irqrestore(&driver->rsp_buf_busy_lock,
+ flags);
+ } else {
+ pr_err_ratelimited("diag: Invalid peripheral %d in %s, type: %d\n",
+ peripheral, __func__, type);
+ }
+ break;
+ default:
+ pr_err_ratelimited("diag: Incorrect data type %d, buf_ctxt: %d in %s\n",
+ type, buf_ctxt, __func__);
+ break;
+ }
+
+ return 0;
+}
+
+static struct diag_mux_ops diagfwd_mux_ops = {
+ .open = diagfwd_mux_open,
+ .close = diagfwd_mux_close,
+ .read_done = diagfwd_mux_read_done,
+ .write_done = diagfwd_mux_write_done
+};
+
+int diagfwd_init(void)
+{
+ int ret;
+ int i;
+
+ wrap_enabled = 0;
+ wrap_count = 0;
+ driver->use_device_tree = has_device_tree();
+ for (i = 0; i < DIAG_NUM_PROC; i++)
+ driver->real_time_mode[i] = 1;
+ driver->supports_separate_cmdrsp = 1;
+ driver->supports_apps_hdlc_encoding = 1;
+ mutex_init(&driver->diag_hdlc_mutex);
+ mutex_init(&driver->diag_cntl_mutex);
+ mutex_init(&driver->mode_lock);
+ driver->encoded_rsp_buf = kzalloc(DIAG_MAX_HDLC_BUF_SIZE +
+ APF_DIAG_PADDING, GFP_KERNEL);
+ if (!driver->encoded_rsp_buf)
+ goto err;
+ kmemleak_not_leak(driver->encoded_rsp_buf);
+ hdlc_decode = kzalloc(sizeof(struct diag_hdlc_decode_type),
+ GFP_KERNEL);
+ if (!hdlc_decode)
+ goto err;
+ setup_timer(&driver->hdlc_reset_timer, hdlc_reset_timer_func, 0);
+ kmemleak_not_leak(hdlc_decode);
+ driver->encoded_rsp_len = 0;
+ driver->rsp_buf_busy = 0;
+ spin_lock_init(&driver->rsp_buf_busy_lock);
+ driver->user_space_data_busy = 0;
+ driver->hdlc_buf_len = 0;
+ INIT_LIST_HEAD(&driver->cmd_reg_list);
+ driver->cmd_reg_count = 0;
+ mutex_init(&driver->cmd_reg_mutex);
+
+ for (i = 0; i < NUM_PERIPHERALS; i++) {
+ driver->feature[i].separate_cmd_rsp = 0;
+ driver->feature[i].stm_support = DISABLE_STM;
+ driver->feature[i].rcvd_feature_mask = 0;
+ driver->feature[i].peripheral_buffering = 0;
+ driver->feature[i].encode_hdlc = 0;
+ driver->feature[i].mask_centralization = 0;
+ driver->feature[i].log_on_demand = 0;
+ driver->feature[i].sent_feature_mask = 0;
+ driver->buffering_mode[i].peripheral = i;
+ driver->buffering_mode[i].mode = DIAG_BUFFERING_MODE_STREAMING;
+ driver->buffering_mode[i].high_wm_val = DEFAULT_HIGH_WM_VAL;
+ driver->buffering_mode[i].low_wm_val = DEFAULT_LOW_WM_VAL;
+ }
+
+ for (i = 0; i < NUM_STM_PROCESSORS; i++) {
+ driver->stm_state_requested[i] = DISABLE_STM;
+ driver->stm_state[i] = DISABLE_STM;
+ }
+
+ if (driver->hdlc_buf == NULL) {
+ driver->hdlc_buf = kzalloc(DIAG_MAX_HDLC_BUF_SIZE, GFP_KERNEL);
+ if (!driver->hdlc_buf)
+ goto err;
+ kmemleak_not_leak(driver->hdlc_buf);
+ }
+ if (driver->user_space_data_buf == NULL)
+ driver->user_space_data_buf = kzalloc(USER_SPACE_DATA,
+ GFP_KERNEL);
+ if (driver->user_space_data_buf == NULL)
+ goto err;
+ kmemleak_not_leak(driver->user_space_data_buf);
+
+ if (!driver->client_map) {
+ driver->client_map = kcalloc(driver->num_clients,
+ sizeof(struct diag_client_map), GFP_KERNEL);
+ if (!driver->client_map)
+ goto err;
+ }
+ kmemleak_not_leak(driver->client_map);
+
+ if (!driver->data_ready) {
+ driver->data_ready = kcalloc(driver->num_clients,
+ sizeof(int), GFP_KERNEL);
+ if (!driver->data_ready)
+ goto err;
+ }
+ kmemleak_not_leak(driver->data_ready);
+
+ if (driver->apps_req_buf == NULL) {
+ driver->apps_req_buf = kzalloc(DIAG_MAX_REQ_SIZE, GFP_KERNEL);
+ if (!driver->apps_req_buf)
+ goto err;
+ kmemleak_not_leak(driver->apps_req_buf);
+ }
+ if (driver->dci_pkt_buf == NULL) {
+ driver->dci_pkt_buf = kzalloc(DCI_BUF_SIZE, GFP_KERNEL);
+ if (!driver->dci_pkt_buf)
+ goto err;
+ kmemleak_not_leak(driver->dci_pkt_buf);
+ }
+ if (driver->apps_rsp_buf == NULL) {
+ driver->apps_rsp_buf = kzalloc(DIAG_MAX_RSP_SIZE, GFP_KERNEL);
+ if (driver->apps_rsp_buf == NULL)
+ goto err;
+ kmemleak_not_leak(driver->apps_rsp_buf);
+ }
+ driver->diag_wq = create_singlethread_workqueue("diag_wq");
+ if (!driver->diag_wq)
+ goto err;
+ ret = diag_mux_register(DIAG_LOCAL_PROC, DIAG_LOCAL_PROC,
+ &diagfwd_mux_ops);
+ if (ret) {
+ pr_err("diag: Unable to register with USB, err: %d\n", ret);
+ goto err;
+ }
+
+ return 0;
+err:
+ pr_err("diag: In %s, couldn't initialize diag\n", __func__);
+
+ diag_usb_exit(DIAG_USB_LOCAL);
+ kfree(driver->encoded_rsp_buf);
+ kfree(driver->hdlc_buf);
+ kfree(driver->client_map);
+ kfree(driver->data_ready);
+ kfree(driver->apps_req_buf);
+ kfree(driver->dci_pkt_buf);
+ kfree(driver->apps_rsp_buf);
+ kfree(hdlc_decode);
+ kfree(driver->user_space_data_buf);
+ if (driver->diag_wq)
+ destroy_workqueue(driver->diag_wq);
+ return -ENOMEM;
+}
+
+void diagfwd_exit(void)
+{
+ kfree(driver->encoded_rsp_buf);
+ kfree(driver->hdlc_buf);
+ kfree(hdlc_decode);
+ kfree(driver->client_map);
+ kfree(driver->data_ready);
+ kfree(driver->apps_req_buf);
+ kfree(driver->dci_pkt_buf);
+ kfree(driver->apps_rsp_buf);
+ kfree(driver->user_space_data_buf);
+ destroy_workqueue(driver->diag_wq);
+}
diff --git a/drivers/char/diag/diagfwd.h b/drivers/char/diag/diagfwd.h
new file mode 100644
index 0000000..47c8555
--- /dev/null
+++ b/drivers/char/diag/diagfwd.h
@@ -0,0 +1,53 @@
+/* Copyright (c) 2008-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAGFWD_H
+#define DIAGFWD_H
+
+/*
+ * The context applies to Diag SMD data buffers. It is used to identify the
+ * buffer once these buffers are writtent to USB.
+ */
+#define SET_BUF_CTXT(p, d, n) \
+ (((p & 0xFF) << 16) | ((d & 0xFF) << 8) | (n & 0xFF))
+#define GET_BUF_PERIPHERAL(p) ((p & 0xFF0000) >> 16)
+#define GET_BUF_TYPE(d) ((d & 0x00FF00) >> 8)
+#define GET_BUF_NUM(n) ((n & 0x0000FF))
+
+#define CHK_OVERFLOW(bufStart, start, end, length) \
+ ((((bufStart) <= (start)) && ((end) - (start) >= (length))) ? 1 : 0)
+
+int diagfwd_init(void);
+void diagfwd_exit(void);
+void diag_process_hdlc_pkt(void *data, unsigned int len,
+ struct diag_md_session_t *info);
+void diag_process_non_hdlc_pkt(unsigned char *data, int len,
+ struct diag_md_session_t *info);
+int chk_config_get_id(void);
+int chk_apps_only(void);
+int chk_apps_master(void);
+int chk_polling_response(void);
+int diag_cmd_log_on_demand(unsigned char *src_buf, int src_len,
+ unsigned char *dest_buf, int dest_len);
+int diag_cmd_get_mobile_id(unsigned char *src_buf, int src_len,
+ unsigned char *dest_buf, int dest_len);
+int diag_check_common_cmd(struct diag_pkt_header_t *header);
+void diag_update_userspace_clients(unsigned int type);
+void diag_update_sleeping_process(int process_id, int data_type);
+int diag_process_apps_pkt(unsigned char *buf, int len,
+ struct diag_md_session_t *info);
+void diag_send_error_rsp(unsigned char *buf, int len);
+void diag_update_pkt_buffer(unsigned char *buf, uint32_t len, int type);
+int diag_process_stm_cmd(unsigned char *buf, unsigned char *dest_buf);
+void diag_md_hdlc_reset_timer_func(unsigned long pid);
+void diag_update_md_clients(unsigned int type);
+#endif
diff --git a/drivers/char/diag/diagfwd_bridge.c b/drivers/char/diag/diagfwd_bridge.c
new file mode 100644
index 0000000..3684b8d
--- /dev/null
+++ b/drivers/char/diag/diagfwd_bridge.c
@@ -0,0 +1,317 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/diagchar.h>
+#include <linux/kmemleak.h>
+#include <linux/err.h>
+#include <linux/workqueue.h>
+#include <linux/ratelimit.h>
+#include <linux/platform_device.h>
+#ifdef USB_QCOM_DIAG_BRIDGE
+#include <linux/smux.h>
+#endif
+#include "diag_mux.h"
+#include "diagfwd_bridge.h"
+#ifdef USB_QCOM_DIAG_BRIDGE
+#include "diagfwd_hsic.h"
+#include "diagfwd_smux.h"
+#endif
+#include "diagfwd_mhi.h"
+#include "diag_dci.h"
+
+#ifdef CONFIG_MSM_MHI
+#define diag_mdm_init diag_mhi_init
+#else
+#define diag_mdm_init diag_hsic_init
+#endif
+
+#define BRIDGE_TO_MUX(x) (x + DIAG_MUX_BRIDGE_BASE)
+
+struct diagfwd_bridge_info bridge_info[NUM_REMOTE_DEV] = {
+ {
+ .id = DIAGFWD_MDM,
+ .type = DIAG_DATA_TYPE,
+ .name = "MDM",
+ .inited = 0,
+ .ctxt = 0,
+ .dev_ops = NULL,
+ .dci_read_ptr = NULL,
+ .dci_read_buf = NULL,
+ .dci_read_len = 0,
+ .dci_wq = NULL,
+ },
+ {
+ .id = DIAGFWD_SMUX,
+ .type = DIAG_DATA_TYPE,
+ .name = "SMUX",
+ .inited = 0,
+ .ctxt = 0,
+ .dci_read_ptr = NULL,
+ .dev_ops = NULL,
+ .dci_read_buf = NULL,
+ .dci_read_len = 0,
+ .dci_wq = NULL,
+ },
+ {
+ .id = DIAGFWD_MDM_DCI,
+ .type = DIAG_DCI_TYPE,
+ .name = "MDM_DCI",
+ .inited = 0,
+ .ctxt = 0,
+ .dci_read_ptr = NULL,
+ .dev_ops = NULL,
+ .dci_read_buf = NULL,
+ .dci_read_len = 0,
+ .dci_wq = NULL,
+ },
+};
+
+static int diagfwd_bridge_mux_connect(int id, int mode)
+{
+ if (id < 0 || id >= NUM_REMOTE_DEV)
+ return -EINVAL;
+ if (bridge_info[id].dev_ops && bridge_info[id].dev_ops->open)
+ bridge_info[id].dev_ops->open(bridge_info[id].ctxt);
+ return 0;
+}
+
+static int diagfwd_bridge_mux_disconnect(int id, int mode)
+{
+ if (id < 0 || id >= NUM_REMOTE_DEV)
+ return -EINVAL;
+ if (bridge_info[id].dev_ops && bridge_info[id].dev_ops->close)
+ bridge_info[id].dev_ops->close(bridge_info[id].ctxt);
+ return 0;
+}
+
+static int diagfwd_bridge_mux_read_done(unsigned char *buf, int len, int id)
+{
+ return diagfwd_bridge_write(id, buf, len);
+}
+
+static int diagfwd_bridge_mux_write_done(unsigned char *buf, int len,
+ int buf_ctx, int id)
+{
+ struct diagfwd_bridge_info *ch = NULL;
+
+ if (id < 0 || id >= NUM_REMOTE_DEV)
+ return -EINVAL;
+ ch = &bridge_info[buf_ctx];
+ if (ch->dev_ops && ch->dev_ops->fwd_complete)
+ ch->dev_ops->fwd_complete(ch->ctxt, buf, len, 0);
+ return 0;
+}
+
+static struct diag_mux_ops diagfwd_bridge_mux_ops = {
+ .open = diagfwd_bridge_mux_connect,
+ .close = diagfwd_bridge_mux_disconnect,
+ .read_done = diagfwd_bridge_mux_read_done,
+ .write_done = diagfwd_bridge_mux_write_done
+};
+
+static void bridge_dci_read_work_fn(struct work_struct *work)
+{
+ struct diagfwd_bridge_info *ch = container_of(work,
+ struct diagfwd_bridge_info,
+ dci_read_work);
+ if (!ch)
+ return;
+ diag_process_remote_dci_read_data(ch->id, ch->dci_read_buf,
+ ch->dci_read_len);
+ if (ch->dev_ops && ch->dev_ops->fwd_complete) {
+ ch->dev_ops->fwd_complete(ch->ctxt, ch->dci_read_ptr,
+ ch->dci_read_len, 0);
+ }
+}
+
+int diagfwd_bridge_register(int id, int ctxt, struct diag_remote_dev_ops *ops)
+{
+ int err = 0;
+ struct diagfwd_bridge_info *ch = NULL;
+ char wq_name[DIAG_BRIDGE_NAME_SZ + 10];
+
+ if (!ops) {
+ pr_err("diag: Invalid pointers ops: %pK ctxt: %d\n", ops, ctxt);
+ return -EINVAL;
+ }
+
+ if (id < 0 || id >= NUM_REMOTE_DEV)
+ return -EINVAL;
+
+ ch = &bridge_info[id];
+ ch->ctxt = ctxt;
+ ch->dev_ops = ops;
+ switch (ch->type) {
+ case DIAG_DATA_TYPE:
+ err = diag_mux_register(BRIDGE_TO_MUX(id), id,
+ &diagfwd_bridge_mux_ops);
+ if (err)
+ return err;
+ break;
+ case DIAG_DCI_TYPE:
+ ch->dci_read_buf = kzalloc(DIAG_MDM_BUF_SIZE, GFP_KERNEL);
+ if (!ch->dci_read_buf)
+ return -ENOMEM;
+ ch->dci_read_len = 0;
+ strlcpy(wq_name, "diag_dci_", 10);
+ strlcat(wq_name, ch->name, sizeof(ch->name));
+ INIT_WORK(&(ch->dci_read_work), bridge_dci_read_work_fn);
+ ch->dci_wq = create_singlethread_workqueue(wq_name);
+ if (!ch->dci_wq) {
+ kfree(ch->dci_read_buf);
+ return -ENOMEM;
+ }
+ break;
+ default:
+ pr_err("diag: Invalid channel type %d in %s\n", ch->type,
+ __func__);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int diag_remote_dev_open(int id)
+{
+ if (id < 0 || id >= NUM_REMOTE_DEV)
+ return -EINVAL;
+ bridge_info[id].inited = 1;
+ if (bridge_info[id].type == DIAG_DATA_TYPE)
+ return diag_mux_queue_read(BRIDGE_TO_MUX(id));
+ else if (bridge_info[id].type == DIAG_DCI_TYPE)
+ return diag_dci_send_handshake_pkt(bridge_info[id].id);
+
+ return 0;
+}
+
+void diag_remote_dev_close(int id)
+{
+}
+
+int diag_remote_dev_read_done(int id, unsigned char *buf, int len)
+{
+ int err = 0;
+ struct diagfwd_bridge_info *ch = NULL;
+
+ if (id < 0 || id >= NUM_REMOTE_DEV)
+ return -EINVAL;
+ ch = &bridge_info[id];
+ if (ch->type == DIAG_DATA_TYPE) {
+ err = diag_mux_write(BRIDGE_TO_MUX(id), buf, len, id);
+ if (ch->dev_ops && ch->dev_ops->queue_read)
+ ch->dev_ops->queue_read(ch->ctxt);
+ return err;
+ }
+ /*
+ * For DCI channels copy to the internal buffer. Don't queue any
+ * further reads. A read should be queued once we are done processing
+ * the current packet
+ */
+ if (len <= 0 || len > DIAG_MDM_BUF_SIZE) {
+ pr_err_ratelimited("diag: Invalid len %d in %s, ch: %s\n",
+ len, __func__, ch->name);
+ return -EINVAL;
+ }
+ ch->dci_read_ptr = buf;
+ memcpy(ch->dci_read_buf, buf, len);
+ ch->dci_read_len = len;
+ queue_work(ch->dci_wq, &ch->dci_read_work);
+ return 0;
+}
+
+int diag_remote_dev_write_done(int id, unsigned char *buf, int len, int ctxt)
+{
+ int err = 0;
+
+ if (id < 0 || id >= NUM_REMOTE_DEV)
+ return -EINVAL;
+
+ if (bridge_info[id].type == DIAG_DATA_TYPE) {
+ if (buf == driver->hdlc_encode_buf)
+ driver->hdlc_encode_buf_len = 0;
+ /*
+ * For remote processor, the token offset is stripped from the
+ * buffer. Account for the token offset while checking against
+ * the original buffer
+ */
+ if (buf == (driver->user_space_data_buf + sizeof(int)))
+ driver->user_space_data_busy = 0;
+ err = diag_mux_queue_read(BRIDGE_TO_MUX(id));
+ } else {
+ err = diag_dci_write_done_bridge(id, buf, len);
+ }
+ return err;
+}
+
+int diagfwd_bridge_init(void)
+{
+ int err = 0;
+
+ err = diag_mdm_init();
+ if (err)
+ goto fail;
+ #ifdef USB_QCOM_DIAG_BRIDGE
+ err = diag_smux_init();
+ if (err)
+ goto fail;
+ #endif
+ return 0;
+
+fail:
+ pr_err("diag: Unable to initialze diagfwd bridge, err: %d\n", err);
+ return err;
+}
+
+void diagfwd_bridge_exit(void)
+{
+ #ifdef USB_QCOM_DIAG_BRIDGE
+ diag_hsic_exit();
+ diag_smux_exit();
+ #endif
+}
+
+int diagfwd_bridge_close(int id)
+{
+ if (id < 0 || id >= NUM_REMOTE_DEV)
+ return -EINVAL;
+ if (bridge_info[id].dev_ops && bridge_info[id].dev_ops->close)
+ return bridge_info[id].dev_ops->close(bridge_info[id].ctxt);
+ return 0;
+}
+
+int diagfwd_bridge_write(int id, unsigned char *buf, int len)
+{
+ if (id < 0 || id >= NUM_REMOTE_DEV)
+ return -EINVAL;
+ if (bridge_info[id].dev_ops && bridge_info[id].dev_ops->write) {
+ return bridge_info[id].dev_ops->write(bridge_info[id].ctxt,
+ buf, len, 0);
+ }
+ return 0;
+}
+
+uint16_t diag_get_remote_device_mask(void)
+{
+ int i;
+ uint16_t remote_dev = 0;
+
+ for (i = 0; i < NUM_REMOTE_DEV; i++) {
+ if (bridge_info[i].inited &&
+ bridge_info[i].type == DIAG_DATA_TYPE) {
+ remote_dev |= 1 << i;
+ }
+ }
+
+ return remote_dev;
+}
+
diff --git a/drivers/char/diag/diagfwd_bridge.h b/drivers/char/diag/diagfwd_bridge.h
new file mode 100644
index 0000000..62d6b08
--- /dev/null
+++ b/drivers/char/diag/diagfwd_bridge.h
@@ -0,0 +1,67 @@
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAGFWD_BRIDGE_H
+#define DIAGFWD_BRIDGE_H
+
+/*
+ * Add Data channels at the top half and the DCI channels at the
+ * bottom half of this list.
+ */
+#define DIAGFWD_MDM 0
+#define DIAGFWD_SMUX 1
+#define NUM_REMOTE_DATA_DEV 2
+#define DIAGFWD_MDM_DCI NUM_REMOTE_DATA_DEV
+#define NUM_REMOTE_DCI_DEV (DIAGFWD_MDM_DCI - NUM_REMOTE_DATA_DEV + 1)
+#define NUM_REMOTE_DEV (NUM_REMOTE_DATA_DEV + NUM_REMOTE_DCI_DEV)
+
+#define DIAG_BRIDGE_NAME_SZ 24
+#define DIAG_BRIDGE_GET_NAME(x) (bridge_info[x].name)
+
+struct diag_remote_dev_ops {
+ int (*open)(int id);
+ int (*close)(int id);
+ int (*queue_read)(int id);
+ int (*write)(int id, unsigned char *buf, int len, int ctxt);
+ int (*fwd_complete)(int id, unsigned char *buf, int len, int ctxt);
+};
+
+struct diagfwd_bridge_info {
+ int id;
+ int type;
+ int inited;
+ int ctxt;
+ char name[DIAG_BRIDGE_NAME_SZ];
+ struct diag_remote_dev_ops *dev_ops;
+ /* DCI related variables. These would be NULL for data channels */
+ void *dci_read_ptr;
+ unsigned char *dci_read_buf;
+ int dci_read_len;
+ struct workqueue_struct *dci_wq;
+ struct work_struct dci_read_work;
+};
+
+extern struct diagfwd_bridge_info bridge_info[NUM_REMOTE_DEV];
+int diagfwd_bridge_init(void);
+void diagfwd_bridge_exit(void);
+int diagfwd_bridge_close(int id);
+int diagfwd_bridge_write(int id, unsigned char *buf, int len);
+uint16_t diag_get_remote_device_mask(void);
+
+/* The following functions must be called by Diag remote devices only. */
+int diagfwd_bridge_register(int id, int ctxt, struct diag_remote_dev_ops *ops);
+int diag_remote_dev_open(int id);
+void diag_remote_dev_close(int id);
+int diag_remote_dev_read_done(int id, unsigned char *buf, int len);
+int diag_remote_dev_write_done(int id, unsigned char *buf, int len, int ctxt);
+
+#endif
diff --git a/drivers/char/diag/diagfwd_cntl.c b/drivers/char/diag/diagfwd_cntl.c
new file mode 100644
index 0000000..4cbd9da
--- /dev/null
+++ b/drivers/char/diag/diagfwd_cntl.c
@@ -0,0 +1,1321 @@
+/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/diagchar.h>
+#include <linux/kmemleak.h>
+#include <linux/delay.h>
+#include "diagchar.h"
+#include "diagfwd.h"
+#include "diagfwd_cntl.h"
+#include "diagfwd_peripheral.h"
+#include "diagfwd_bridge.h"
+#include "diag_dci.h"
+#include "diagmem.h"
+#include "diag_masks.h"
+#include "diag_ipc_logging.h"
+#include "diag_mux.h"
+
+#define FEATURE_SUPPORTED(x) ((feature_mask << (i * 8)) & (1 << x))
+
+/* tracks which peripheral is undergoing SSR */
+static uint16_t reg_dirty;
+static void diag_notify_md_client(uint8_t peripheral, int data);
+
+static void diag_mask_update_work_fn(struct work_struct *work)
+{
+ uint8_t peripheral;
+
+ for (peripheral = 0; peripheral <= NUM_PERIPHERALS; peripheral++) {
+ if (!(driver->mask_update & PERIPHERAL_MASK(peripheral)))
+ continue;
+ mutex_lock(&driver->cntl_lock);
+ driver->mask_update ^= PERIPHERAL_MASK(peripheral);
+ mutex_unlock(&driver->cntl_lock);
+ diag_send_updates_peripheral(peripheral);
+ }
+}
+
+void diag_cntl_channel_open(struct diagfwd_info *p_info)
+{
+ if (!p_info)
+ return;
+ driver->mask_update |= PERIPHERAL_MASK(p_info->peripheral);
+ queue_work(driver->cntl_wq, &driver->mask_update_work);
+ diag_notify_md_client(p_info->peripheral, DIAG_STATUS_OPEN);
+}
+
+void diag_cntl_channel_close(struct diagfwd_info *p_info)
+{
+ uint8_t peripheral;
+
+ if (!p_info)
+ return;
+
+ peripheral = p_info->peripheral;
+ if (peripheral >= NUM_PERIPHERALS)
+ return;
+
+ driver->feature[peripheral].sent_feature_mask = 0;
+ driver->feature[peripheral].rcvd_feature_mask = 0;
+ flush_workqueue(driver->cntl_wq);
+ reg_dirty |= PERIPHERAL_MASK(peripheral);
+ diag_cmd_remove_reg_by_proc(peripheral);
+ driver->feature[peripheral].stm_support = DISABLE_STM;
+ driver->feature[peripheral].log_on_demand = 0;
+ driver->stm_state[peripheral] = DISABLE_STM;
+ driver->stm_state_requested[peripheral] = DISABLE_STM;
+ reg_dirty ^= PERIPHERAL_MASK(peripheral);
+ diag_notify_md_client(peripheral, DIAG_STATUS_CLOSED);
+}
+
+static void diag_stm_update_work_fn(struct work_struct *work)
+{
+ uint8_t i;
+ uint16_t peripheral_mask = 0;
+ int err = 0;
+
+ mutex_lock(&driver->cntl_lock);
+ peripheral_mask = driver->stm_peripheral;
+ driver->stm_peripheral = 0;
+ mutex_unlock(&driver->cntl_lock);
+
+ if (peripheral_mask == 0)
+ return;
+
+ for (i = 0; i < NUM_PERIPHERALS; i++) {
+ if (!driver->feature[i].stm_support)
+ continue;
+ if (peripheral_mask & PERIPHERAL_MASK(i)) {
+ err = diag_send_stm_state(i,
+ (uint8_t)(driver->stm_state_requested[i]));
+ if (!err) {
+ driver->stm_state[i] =
+ driver->stm_state_requested[i];
+ }
+ }
+ }
+}
+
+void diag_notify_md_client(uint8_t peripheral, int data)
+{
+ int stat = 0;
+ struct siginfo info;
+
+ if (peripheral > NUM_PERIPHERALS)
+ return;
+
+ if (driver->logging_mode != DIAG_MEMORY_DEVICE_MODE)
+ return;
+
+ mutex_lock(&driver->md_session_lock);
+ memset(&info, 0, sizeof(struct siginfo));
+ info.si_code = SI_QUEUE;
+ info.si_int = (PERIPHERAL_MASK(peripheral) | data);
+ info.si_signo = SIGCONT;
+ if (driver->md_session_map[peripheral] &&
+ driver->md_session_map[peripheral]->task) {
+ if (driver->md_session_map[peripheral]->pid ==
+ driver->md_session_map[peripheral]->task->tgid) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "md_session %d pid = %d, md_session %d task tgid = %d\n",
+ peripheral,
+ driver->md_session_map[peripheral]->pid,
+ peripheral,
+ driver->md_session_map[peripheral]->task->tgid);
+ stat = send_sig_info(info.si_signo, &info,
+ driver->md_session_map[peripheral]->task);
+ if (stat)
+ pr_err("diag: Err sending signal to memory device client, signal data: 0x%x, stat: %d\n",
+ info.si_int, stat);
+ } else
+ pr_err("diag: md_session_map[%d] data is corrupted, signal data: 0x%x, stat: %d\n",
+ peripheral, info.si_int, stat);
+ }
+ mutex_unlock(&driver->md_session_lock);
+}
+
+static void process_pd_status(uint8_t *buf, uint32_t len,
+ uint8_t peripheral)
+{
+ struct diag_ctrl_msg_pd_status *pd_msg = NULL;
+ uint32_t pd;
+ int status = DIAG_STATUS_CLOSED;
+
+ if (!buf || peripheral >= NUM_PERIPHERALS || len < sizeof(*pd_msg))
+ return;
+
+ pd_msg = (struct diag_ctrl_msg_pd_status *)buf;
+ pd = pd_msg->pd_id;
+ status = (pd_msg->status == 0) ? DIAG_STATUS_OPEN : DIAG_STATUS_CLOSED;
+ diag_notify_md_client(peripheral, status);
+}
+
+static void enable_stm_feature(uint8_t peripheral)
+{
+ if (peripheral >= NUM_PERIPHERALS)
+ return;
+
+ mutex_lock(&driver->cntl_lock);
+ driver->feature[peripheral].stm_support = ENABLE_STM;
+ driver->stm_peripheral |= PERIPHERAL_MASK(peripheral);
+ mutex_unlock(&driver->cntl_lock);
+
+ queue_work(driver->cntl_wq, &(driver->stm_update_work));
+}
+
+static void enable_socket_feature(uint8_t peripheral)
+{
+ if (peripheral >= NUM_PERIPHERALS)
+ return;
+
+ if (driver->supports_sockets)
+ driver->feature[peripheral].sockets_enabled = 1;
+ else
+ driver->feature[peripheral].sockets_enabled = 0;
+}
+
+static void process_hdlc_encoding_feature(uint8_t peripheral)
+{
+ if (peripheral >= NUM_PERIPHERALS)
+ return;
+
+ if (driver->supports_apps_hdlc_encoding) {
+ driver->feature[peripheral].encode_hdlc =
+ ENABLE_APPS_HDLC_ENCODING;
+ } else {
+ driver->feature[peripheral].encode_hdlc =
+ DISABLE_APPS_HDLC_ENCODING;
+ }
+}
+
+static void process_command_deregistration(uint8_t *buf, uint32_t len,
+ uint8_t peripheral)
+{
+ uint8_t *ptr = buf;
+ int i;
+ int header_len = sizeof(struct diag_ctrl_cmd_dereg);
+ int read_len = 0;
+ struct diag_ctrl_cmd_dereg *dereg = NULL;
+ struct cmd_code_range *range = NULL;
+ struct diag_cmd_reg_entry_t del_entry;
+
+ /*
+ * Perform Basic sanity. The len field is the size of the data payload.
+ * This doesn't include the header size.
+ */
+ if (!buf || peripheral >= NUM_PERIPHERALS || len == 0)
+ return;
+
+ dereg = (struct diag_ctrl_cmd_dereg *)ptr;
+ ptr += header_len;
+ /* Don't account for pkt_id and length */
+ read_len += header_len - (2 * sizeof(uint32_t));
+
+ if (dereg->count_entries == 0) {
+ pr_debug("diag: In %s, received reg tbl with no entries\n",
+ __func__);
+ return;
+ }
+
+ for (i = 0; i < dereg->count_entries && read_len < len; i++) {
+ range = (struct cmd_code_range *)ptr;
+ ptr += sizeof(struct cmd_code_range) - sizeof(uint32_t);
+ read_len += sizeof(struct cmd_code_range) - sizeof(uint32_t);
+ del_entry.cmd_code = dereg->cmd_code;
+ del_entry.subsys_id = dereg->subsysid;
+ del_entry.cmd_code_hi = range->cmd_code_hi;
+ del_entry.cmd_code_lo = range->cmd_code_lo;
+ diag_cmd_remove_reg(&del_entry, peripheral);
+ }
+
+ if (i != dereg->count_entries) {
+ pr_err("diag: In %s, reading less than available, read_len: %d, len: %d count: %d\n",
+ __func__, read_len, len, dereg->count_entries);
+ }
+}
+static void process_command_registration(uint8_t *buf, uint32_t len,
+ uint8_t peripheral)
+{
+ uint8_t *ptr = buf;
+ int i;
+ int header_len = sizeof(struct diag_ctrl_cmd_reg);
+ int read_len = 0;
+ struct diag_ctrl_cmd_reg *reg = NULL;
+ struct cmd_code_range *range = NULL;
+ struct diag_cmd_reg_entry_t new_entry;
+
+ /*
+ * Perform Basic sanity. The len field is the size of the data payload.
+ * This doesn't include the header size.
+ */
+ if (!buf || peripheral >= NUM_PERIPHERALS || len == 0)
+ return;
+
+ reg = (struct diag_ctrl_cmd_reg *)ptr;
+ ptr += header_len;
+ /* Don't account for pkt_id and length */
+ read_len += header_len - (2 * sizeof(uint32_t));
+
+ if (reg->count_entries == 0) {
+ pr_debug("diag: In %s, received reg tbl with no entries\n",
+ __func__);
+ return;
+ }
+
+ for (i = 0; i < reg->count_entries && read_len < len; i++) {
+ range = (struct cmd_code_range *)ptr;
+ ptr += sizeof(struct cmd_code_range);
+ read_len += sizeof(struct cmd_code_range);
+ new_entry.cmd_code = reg->cmd_code;
+ new_entry.subsys_id = reg->subsysid;
+ new_entry.cmd_code_hi = range->cmd_code_hi;
+ new_entry.cmd_code_lo = range->cmd_code_lo;
+ diag_cmd_add_reg(&new_entry, peripheral, INVALID_PID);
+ }
+
+ if (i != reg->count_entries) {
+ pr_err("diag: In %s, reading less than available, read_len: %d, len: %d count: %d\n",
+ __func__, read_len, len, reg->count_entries);
+ }
+}
+
+static void diag_close_transport_work_fn(struct work_struct *work)
+{
+ uint8_t transport;
+ uint8_t peripheral;
+
+ mutex_lock(&driver->cntl_lock);
+ for (peripheral = 0; peripheral <= NUM_PERIPHERALS; peripheral++) {
+ if (!(driver->close_transport & PERIPHERAL_MASK(peripheral)))
+ continue;
+ driver->close_transport ^= PERIPHERAL_MASK(peripheral);
+ transport = driver->feature[peripheral].sockets_enabled ?
+ TRANSPORT_GLINK : TRANSPORT_SOCKET;
+ diagfwd_close_transport(transport, peripheral);
+ }
+ mutex_unlock(&driver->cntl_lock);
+}
+
+static void process_socket_feature(uint8_t peripheral)
+{
+ if (peripheral >= NUM_PERIPHERALS)
+ return;
+
+ mutex_lock(&driver->cntl_lock);
+ driver->close_transport |= PERIPHERAL_MASK(peripheral);
+ queue_work(driver->cntl_wq, &driver->close_transport_work);
+ mutex_unlock(&driver->cntl_lock);
+}
+
+static void process_log_on_demand_feature(uint8_t peripheral)
+{
+ /* Log On Demand command is registered only on Modem */
+ if (peripheral != PERIPHERAL_MODEM)
+ return;
+
+ if (driver->feature[PERIPHERAL_MODEM].log_on_demand)
+ driver->log_on_demand_support = 1;
+ else
+ driver->log_on_demand_support = 0;
+}
+
+static void process_incoming_feature_mask(uint8_t *buf, uint32_t len,
+ uint8_t peripheral)
+{
+ int i;
+ int header_len = sizeof(struct diag_ctrl_feature_mask);
+ int read_len = 0;
+ struct diag_ctrl_feature_mask *header = NULL;
+ uint32_t feature_mask_len = 0;
+ uint32_t feature_mask = 0;
+ uint8_t *ptr = buf;
+
+ if (!buf || peripheral >= NUM_PERIPHERALS || len == 0)
+ return;
+
+ header = (struct diag_ctrl_feature_mask *)ptr;
+ ptr += header_len;
+ feature_mask_len = header->feature_mask_len;
+
+ if (feature_mask_len == 0) {
+ pr_debug("diag: In %s, received invalid feature mask from peripheral %d\n",
+ __func__, peripheral);
+ return;
+ }
+
+ if (feature_mask_len > FEATURE_MASK_LEN) {
+ pr_alert("diag: Receiving feature mask length more than Apps support\n");
+ feature_mask_len = FEATURE_MASK_LEN;
+ }
+
+ driver->feature[peripheral].rcvd_feature_mask = 1;
+
+ for (i = 0; i < feature_mask_len && read_len < len; i++) {
+ feature_mask = *(uint8_t *)ptr;
+ driver->feature[peripheral].feature_mask[i] = feature_mask;
+ ptr += sizeof(uint8_t);
+ read_len += sizeof(uint8_t);
+
+ if (FEATURE_SUPPORTED(F_DIAG_LOG_ON_DEMAND_APPS))
+ driver->feature[peripheral].log_on_demand = 1;
+ if (FEATURE_SUPPORTED(F_DIAG_REQ_RSP_SUPPORT))
+ driver->feature[peripheral].separate_cmd_rsp = 1;
+ if (FEATURE_SUPPORTED(F_DIAG_APPS_HDLC_ENCODE))
+ process_hdlc_encoding_feature(peripheral);
+ if (FEATURE_SUPPORTED(F_DIAG_STM))
+ enable_stm_feature(peripheral);
+ if (FEATURE_SUPPORTED(F_DIAG_MASK_CENTRALIZATION))
+ driver->feature[peripheral].mask_centralization = 1;
+ if (FEATURE_SUPPORTED(F_DIAG_PERIPHERAL_BUFFERING))
+ driver->feature[peripheral].peripheral_buffering = 1;
+ if (FEATURE_SUPPORTED(F_DIAG_SOCKETS_ENABLED))
+ enable_socket_feature(peripheral);
+ }
+
+ process_socket_feature(peripheral);
+ process_log_on_demand_feature(peripheral);
+}
+
+static void process_last_event_report(uint8_t *buf, uint32_t len,
+ uint8_t peripheral)
+{
+ struct diag_ctrl_last_event_report *header = NULL;
+ uint8_t *ptr = buf;
+ uint8_t *temp = NULL;
+ uint32_t pkt_len = sizeof(uint32_t) + sizeof(uint16_t);
+ uint16_t event_size = 0;
+
+ if (!buf || peripheral >= NUM_PERIPHERALS || len != pkt_len)
+ return;
+
+ mutex_lock(&event_mask.lock);
+ header = (struct diag_ctrl_last_event_report *)ptr;
+ event_size = ((header->event_last_id / 8) + 1);
+ if (event_size >= driver->event_mask_size) {
+ pr_debug("diag: In %s, receiving event mask size more that Apps can handle\n",
+ __func__);
+ temp = krealloc(driver->event_mask->ptr, event_size,
+ GFP_KERNEL);
+ if (!temp) {
+ pr_err("diag: In %s, unable to reallocate event mask to support events from %d\n",
+ __func__, peripheral);
+ goto err;
+ }
+ driver->event_mask->ptr = temp;
+ driver->event_mask_size = event_size;
+ }
+
+ driver->num_event_id[peripheral] = header->event_last_id;
+ if (header->event_last_id > driver->last_event_id)
+ driver->last_event_id = header->event_last_id;
+err:
+ mutex_unlock(&event_mask.lock);
+}
+
+static void process_log_range_report(uint8_t *buf, uint32_t len,
+ uint8_t peripheral)
+{
+ int i;
+ int read_len = 0;
+ int header_len = sizeof(struct diag_ctrl_log_range_report);
+ uint8_t *ptr = buf;
+ struct diag_ctrl_log_range_report *header = NULL;
+ struct diag_ctrl_log_range *log_range = NULL;
+ struct diag_log_mask_t *mask_ptr = NULL;
+
+ if (!buf || peripheral >= NUM_PERIPHERALS || len < 0)
+ return;
+
+ header = (struct diag_ctrl_log_range_report *)ptr;
+ ptr += header_len;
+ /* Don't account for pkt_id and length */
+ read_len += header_len - (2 * sizeof(uint32_t));
+
+ driver->num_equip_id[peripheral] = header->num_ranges;
+ for (i = 0; i < header->num_ranges && read_len < len; i++) {
+ log_range = (struct diag_ctrl_log_range *)ptr;
+ ptr += sizeof(struct diag_ctrl_log_range);
+ read_len += sizeof(struct diag_ctrl_log_range);
+
+ if (log_range->equip_id >= MAX_EQUIP_ID) {
+ pr_err("diag: receiving log equip id %d more than supported equip id: %d from peripheral: %d\n",
+ log_range->equip_id, MAX_EQUIP_ID, peripheral);
+ continue;
+ }
+ mask_ptr = (struct diag_log_mask_t *)log_mask.ptr;
+ mask_ptr = &mask_ptr[log_range->equip_id];
+
+ mutex_lock(&(mask_ptr->lock));
+ mask_ptr->num_items = log_range->num_items;
+ mask_ptr->range = LOG_ITEMS_TO_SIZE(log_range->num_items);
+ mutex_unlock(&(mask_ptr->lock));
+ }
+}
+
+static int update_msg_mask_tbl_entry(struct diag_msg_mask_t *mask,
+ struct diag_ssid_range_t *range)
+{
+ uint32_t temp_range;
+
+ if (!mask || !range)
+ return -EIO;
+ if (range->ssid_last < range->ssid_first) {
+ pr_err("diag: In %s, invalid ssid range, first: %d, last: %d\n",
+ __func__, range->ssid_first, range->ssid_last);
+ return -EINVAL;
+ }
+ if (range->ssid_last >= mask->ssid_last) {
+ temp_range = range->ssid_last - mask->ssid_first + 1;
+ mask->ssid_last = range->ssid_last;
+ mask->range = temp_range;
+ }
+
+ return 0;
+}
+
+static void process_ssid_range_report(uint8_t *buf, uint32_t len,
+ uint8_t peripheral)
+{
+ int i;
+ int j;
+ int read_len = 0;
+ int found = 0;
+ int new_size = 0;
+ int err = 0;
+ struct diag_ctrl_ssid_range_report *header = NULL;
+ struct diag_ssid_range_t *ssid_range = NULL;
+ int header_len = sizeof(struct diag_ctrl_ssid_range_report);
+ struct diag_msg_mask_t *mask_ptr = NULL;
+ uint8_t *ptr = buf;
+ uint8_t *temp = NULL;
+ uint32_t min_len = header_len - sizeof(struct diag_ctrl_pkt_header_t);
+
+ if (!buf || peripheral >= NUM_PERIPHERALS || len < min_len)
+ return;
+
+ header = (struct diag_ctrl_ssid_range_report *)ptr;
+ ptr += header_len;
+ /* Don't account for pkt_id and length */
+ read_len += header_len - (2 * sizeof(uint32_t));
+
+ driver->max_ssid_count[peripheral] = header->count;
+ for (i = 0; i < header->count && read_len < len; i++) {
+ ssid_range = (struct diag_ssid_range_t *)ptr;
+ ptr += sizeof(struct diag_ssid_range_t);
+ read_len += sizeof(struct diag_ssid_range_t);
+ mask_ptr = (struct diag_msg_mask_t *)msg_mask.ptr;
+ found = 0;
+ for (j = 0; j < driver->msg_mask_tbl_count; j++, mask_ptr++) {
+ if (mask_ptr->ssid_first != ssid_range->ssid_first)
+ continue;
+ mutex_lock(&mask_ptr->lock);
+ err = update_msg_mask_tbl_entry(mask_ptr, ssid_range);
+ mutex_unlock(&mask_ptr->lock);
+ if (err == -ENOMEM) {
+ pr_err("diag: In %s, unable to increase the msg mask table range\n",
+ __func__);
+ }
+ found = 1;
+ break;
+ }
+
+ if (found)
+ continue;
+
+ new_size = (driver->msg_mask_tbl_count + 1) *
+ sizeof(struct diag_msg_mask_t);
+ temp = krealloc(msg_mask.ptr, new_size, GFP_KERNEL);
+ if (!temp) {
+ pr_err("diag: In %s, Unable to add new ssid table to msg mask, ssid first: %d, last: %d\n",
+ __func__, ssid_range->ssid_first,
+ ssid_range->ssid_last);
+ continue;
+ }
+ msg_mask.ptr = temp;
+ err = diag_create_msg_mask_table_entry(mask_ptr, ssid_range);
+ if (err) {
+ pr_err("diag: In %s, Unable to create a new msg mask table entry, first: %d last: %d err: %d\n",
+ __func__, ssid_range->ssid_first,
+ ssid_range->ssid_last, err);
+ continue;
+ }
+ driver->msg_mask_tbl_count += 1;
+ }
+}
+
+static void diag_build_time_mask_update(uint8_t *buf,
+ struct diag_ssid_range_t *range)
+{
+ int i;
+ int j;
+ int num_items = 0;
+ int err = 0;
+ int found = 0;
+ int new_size = 0;
+ uint8_t *temp = NULL;
+ uint32_t *mask_ptr = (uint32_t *)buf;
+ uint32_t *dest_ptr = NULL;
+ struct diag_msg_mask_t *build_mask = NULL;
+
+ if (!range || !buf)
+ return;
+
+ if (range->ssid_last < range->ssid_first) {
+ pr_err("diag: In %s, invalid ssid range, first: %d, last: %d\n",
+ __func__, range->ssid_first, range->ssid_last);
+ return;
+ }
+
+ build_mask = (struct diag_msg_mask_t *)(driver->build_time_mask->ptr);
+ num_items = range->ssid_last - range->ssid_first + 1;
+
+ for (i = 0; i < driver->msg_mask_tbl_count; i++, build_mask++) {
+ if (build_mask->ssid_first != range->ssid_first)
+ continue;
+ found = 1;
+ mutex_lock(&build_mask->lock);
+ err = update_msg_mask_tbl_entry(build_mask, range);
+ if (err == -ENOMEM) {
+ pr_err("diag: In %s, unable to increase the msg build mask table range\n",
+ __func__);
+ }
+ dest_ptr = build_mask->ptr;
+ for (j = 0; j < build_mask->range; j++, mask_ptr++, dest_ptr++)
+ *(uint32_t *)dest_ptr |= *mask_ptr;
+ mutex_unlock(&build_mask->lock);
+ break;
+ }
+
+ if (found)
+ goto end;
+ new_size = (driver->msg_mask_tbl_count + 1) *
+ sizeof(struct diag_msg_mask_t);
+ temp = krealloc(driver->build_time_mask->ptr, new_size, GFP_KERNEL);
+ if (!temp) {
+ pr_err("diag: In %s, unable to create a new entry for build time mask\n",
+ __func__);
+ goto end;
+ }
+ driver->build_time_mask->ptr = temp;
+ err = diag_create_msg_mask_table_entry(build_mask, range);
+ if (err) {
+ pr_err("diag: In %s, Unable to create a new msg mask table entry, err: %d\n",
+ __func__, err);
+ goto end;
+ }
+ driver->msg_mask_tbl_count += 1;
+end:
+ return;
+}
+
+static void process_build_mask_report(uint8_t *buf, uint32_t len,
+ uint8_t peripheral)
+{
+ int i;
+ int read_len = 0;
+ int num_items = 0;
+ int header_len = sizeof(struct diag_ctrl_build_mask_report);
+ uint8_t *ptr = buf;
+ struct diag_ctrl_build_mask_report *header = NULL;
+ struct diag_ssid_range_t *range = NULL;
+
+ if (!buf || peripheral >= NUM_PERIPHERALS || len < header_len)
+ return;
+
+ header = (struct diag_ctrl_build_mask_report *)ptr;
+ ptr += header_len;
+ /* Don't account for pkt_id and length */
+ read_len += header_len - (2 * sizeof(uint32_t));
+
+ for (i = 0; i < header->count && read_len < len; i++) {
+ range = (struct diag_ssid_range_t *)ptr;
+ ptr += sizeof(struct diag_ssid_range_t);
+ read_len += sizeof(struct diag_ssid_range_t);
+ num_items = range->ssid_last - range->ssid_first + 1;
+ diag_build_time_mask_update(ptr, range);
+ ptr += num_items * sizeof(uint32_t);
+ read_len += num_items * sizeof(uint32_t);
+ }
+}
+
+void diag_cntl_process_read_data(struct diagfwd_info *p_info, void *buf,
+ int len)
+{
+ uint32_t read_len = 0;
+ uint32_t header_len = sizeof(struct diag_ctrl_pkt_header_t);
+ uint8_t *ptr = buf;
+ struct diag_ctrl_pkt_header_t *ctrl_pkt = NULL;
+
+ if (!buf || len <= 0 || !p_info)
+ return;
+
+ if (reg_dirty & PERIPHERAL_MASK(p_info->peripheral)) {
+ pr_err_ratelimited("diag: dropping command registration from peripheral %d\n",
+ p_info->peripheral);
+ return;
+ }
+
+ while (read_len + header_len < len) {
+ ctrl_pkt = (struct diag_ctrl_pkt_header_t *)ptr;
+ switch (ctrl_pkt->pkt_id) {
+ case DIAG_CTRL_MSG_REG:
+ process_command_registration(ptr, ctrl_pkt->len,
+ p_info->peripheral);
+ break;
+ case DIAG_CTRL_MSG_DEREG:
+ process_command_deregistration(ptr, ctrl_pkt->len,
+ p_info->peripheral);
+ break;
+ case DIAG_CTRL_MSG_FEATURE:
+ process_incoming_feature_mask(ptr, ctrl_pkt->len,
+ p_info->peripheral);
+ break;
+ case DIAG_CTRL_MSG_LAST_EVENT_REPORT:
+ process_last_event_report(ptr, ctrl_pkt->len,
+ p_info->peripheral);
+ break;
+ case DIAG_CTRL_MSG_LOG_RANGE_REPORT:
+ process_log_range_report(ptr, ctrl_pkt->len,
+ p_info->peripheral);
+ break;
+ case DIAG_CTRL_MSG_SSID_RANGE_REPORT:
+ process_ssid_range_report(ptr, ctrl_pkt->len,
+ p_info->peripheral);
+ break;
+ case DIAG_CTRL_MSG_BUILD_MASK_REPORT:
+ process_build_mask_report(ptr, ctrl_pkt->len,
+ p_info->peripheral);
+ break;
+ case DIAG_CTRL_MSG_PD_STATUS:
+ process_pd_status(ptr, ctrl_pkt->len,
+ p_info->peripheral);
+ break;
+ default:
+ pr_debug("diag: Control packet %d not supported\n",
+ ctrl_pkt->pkt_id);
+ }
+ ptr += header_len + ctrl_pkt->len;
+ read_len += header_len + ctrl_pkt->len;
+ }
+}
+
+static int diag_compute_real_time(int idx)
+{
+ int real_time = MODE_REALTIME;
+
+ if (driver->proc_active_mask == 0) {
+ /*
+ * There are no DCI or Memory Device processes. Diag should
+ * be in Real Time mode irrespective of USB connection
+ */
+ real_time = MODE_REALTIME;
+ } else if (driver->proc_rt_vote_mask[idx] & driver->proc_active_mask) {
+ /*
+ * Atleast one process is alive and is voting for Real Time
+ * data - Diag should be in real time mode irrespective of USB
+ * connection.
+ */
+ real_time = MODE_REALTIME;
+ } else if (driver->usb_connected) {
+ /*
+ * If USB is connected, check individual process. If Memory
+ * Device Mode is active, set the mode requested by Memory
+ * Device process. Set to realtime mode otherwise.
+ */
+ if ((driver->proc_rt_vote_mask[idx] &
+ DIAG_PROC_MEMORY_DEVICE) == 0)
+ real_time = MODE_NONREALTIME;
+ else
+ real_time = MODE_REALTIME;
+ } else {
+ /*
+ * We come here if USB is not connected and the active
+ * processes are voting for Non realtime mode.
+ */
+ real_time = MODE_NONREALTIME;
+ }
+ return real_time;
+}
+
+static void diag_create_diag_mode_ctrl_pkt(unsigned char *dest_buf,
+ int real_time)
+{
+ struct diag_ctrl_msg_diagmode diagmode;
+ int msg_size = sizeof(struct diag_ctrl_msg_diagmode);
+
+ if (!dest_buf)
+ return;
+
+ diagmode.ctrl_pkt_id = DIAG_CTRL_MSG_DIAGMODE;
+ diagmode.ctrl_pkt_data_len = DIAG_MODE_PKT_LEN;
+ diagmode.version = 1;
+ diagmode.sleep_vote = real_time ? 1 : 0;
+ /*
+ * 0 - Disables real-time logging (to prevent
+ * frequent APPS wake-ups, etc.).
+ * 1 - Enable real-time logging
+ */
+ diagmode.real_time = real_time;
+ diagmode.use_nrt_values = 0;
+ diagmode.commit_threshold = 0;
+ diagmode.sleep_threshold = 0;
+ diagmode.sleep_time = 0;
+ diagmode.drain_timer_val = 0;
+ diagmode.event_stale_timer_val = 0;
+
+ memcpy(dest_buf, &diagmode, msg_size);
+}
+
+void diag_update_proc_vote(uint16_t proc, uint8_t vote, int index)
+{
+ int i;
+
+ mutex_lock(&driver->real_time_mutex);
+ if (vote)
+ driver->proc_active_mask |= proc;
+ else {
+ driver->proc_active_mask &= ~proc;
+ if (index == ALL_PROC) {
+ for (i = 0; i < DIAG_NUM_PROC; i++)
+ driver->proc_rt_vote_mask[i] |= proc;
+ } else {
+ driver->proc_rt_vote_mask[index] |= proc;
+ }
+ }
+ mutex_unlock(&driver->real_time_mutex);
+}
+
+void diag_update_real_time_vote(uint16_t proc, uint8_t real_time, int index)
+{
+ int i;
+
+ if (index >= DIAG_NUM_PROC) {
+ pr_err("diag: In %s, invalid index %d\n", __func__, index);
+ return;
+ }
+
+ mutex_lock(&driver->real_time_mutex);
+ if (index == ALL_PROC) {
+ for (i = 0; i < DIAG_NUM_PROC; i++) {
+ if (real_time)
+ driver->proc_rt_vote_mask[i] |= proc;
+ else
+ driver->proc_rt_vote_mask[i] &= ~proc;
+ }
+ } else {
+ if (real_time)
+ driver->proc_rt_vote_mask[index] |= proc;
+ else
+ driver->proc_rt_vote_mask[index] &= ~proc;
+ }
+ mutex_unlock(&driver->real_time_mutex);
+}
+
+
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+static void diag_send_diag_mode_update_remote(int token, int real_time)
+{
+ unsigned char *buf = NULL;
+ int err = 0;
+ struct diag_dci_header_t dci_header;
+ int dci_header_size = sizeof(struct diag_dci_header_t);
+ int msg_size = sizeof(struct diag_ctrl_msg_diagmode);
+ uint32_t write_len = 0;
+
+ if (token < 0 || token >= NUM_DCI_PROC) {
+ pr_err("diag: Invalid remote device channel in %s, token: %d\n",
+ __func__, token);
+ return;
+ }
+
+ if (real_time != MODE_REALTIME && real_time != MODE_NONREALTIME) {
+ pr_err("diag: Invalid real time value in %s, type: %d\n",
+ __func__, real_time);
+ return;
+ }
+
+ buf = dci_get_buffer_from_bridge(token);
+ if (!buf) {
+ pr_err("diag: In %s, unable to get dci buffers to write data\n",
+ __func__);
+ return;
+ }
+ /* Frame the DCI header */
+ dci_header.start = CONTROL_CHAR;
+ dci_header.version = 1;
+ dci_header.length = msg_size + 1;
+ dci_header.cmd_code = DCI_CONTROL_PKT_CODE;
+
+ memcpy(buf + write_len, &dci_header, dci_header_size);
+ write_len += dci_header_size;
+ diag_create_diag_mode_ctrl_pkt(buf + write_len, real_time);
+ write_len += msg_size;
+ *(buf + write_len) = CONTROL_CHAR; /* End Terminator */
+ write_len += sizeof(uint8_t);
+ err = diagfwd_bridge_write(TOKEN_TO_BRIDGE(token), buf, write_len);
+ if (err != write_len) {
+ pr_err("diag: cannot send nrt mode ctrl pkt, err: %d\n", err);
+ diagmem_free(driver, buf, dci_ops_tbl[token].mempool);
+ } else {
+ driver->real_time_mode[token + 1] = real_time;
+ }
+}
+#else
+static inline void diag_send_diag_mode_update_remote(int token, int real_time)
+{
+}
+#endif
+
+#ifdef CONFIG_DIAG_OVER_USB
+void diag_real_time_work_fn(struct work_struct *work)
+{
+ int temp_real_time = MODE_REALTIME, i, j;
+ uint8_t send_update = 1;
+
+ /*
+ * If any peripheral in the local processor is in either threshold or
+ * circular buffering mode, don't send the real time mode control
+ * packet.
+ */
+ for (i = 0; i < NUM_PERIPHERALS; i++) {
+ if (!driver->feature[i].peripheral_buffering)
+ continue;
+ switch (driver->buffering_mode[i].mode) {
+ case DIAG_BUFFERING_MODE_THRESHOLD:
+ case DIAG_BUFFERING_MODE_CIRCULAR:
+ send_update = 0;
+ break;
+ }
+ }
+
+ mutex_lock(&driver->mode_lock);
+ for (i = 0; i < DIAG_NUM_PROC; i++) {
+ temp_real_time = diag_compute_real_time(i);
+ if (temp_real_time == driver->real_time_mode[i]) {
+ pr_debug("diag: did not update real time mode on proc %d, already in the req mode %d",
+ i, temp_real_time);
+ continue;
+ }
+
+ if (i == DIAG_LOCAL_PROC) {
+ if (!send_update) {
+ pr_debug("diag: In %s, cannot send real time mode pkt since one of the periperhal is in buffering mode\n",
+ __func__);
+ break;
+ }
+ for (j = 0; j < NUM_PERIPHERALS; j++)
+ diag_send_real_time_update(j,
+ temp_real_time);
+ } else {
+ diag_send_diag_mode_update_remote(i - 1,
+ temp_real_time);
+ }
+ }
+ mutex_unlock(&driver->mode_lock);
+
+ if (driver->real_time_update_busy > 0)
+ driver->real_time_update_busy--;
+}
+#else
+void diag_real_time_work_fn(struct work_struct *work)
+{
+ int temp_real_time = MODE_REALTIME, i, j;
+
+ for (i = 0; i < DIAG_NUM_PROC; i++) {
+ if (driver->proc_active_mask == 0) {
+ /*
+ * There are no DCI or Memory Device processes.
+ * Diag should be in Real Time mode.
+ */
+ temp_real_time = MODE_REALTIME;
+ } else if (!(driver->proc_rt_vote_mask[i] &
+ driver->proc_active_mask)) {
+ /* No active process is voting for real time mode */
+ temp_real_time = MODE_NONREALTIME;
+ }
+ if (temp_real_time == driver->real_time_mode[i]) {
+ pr_debug("diag: did not update real time mode on proc %d, already in the req mode %d",
+ i, temp_real_time);
+ continue;
+ }
+
+ if (i == DIAG_LOCAL_PROC) {
+ for (j = 0; j < NUM_PERIPHERALS; j++)
+ diag_send_real_time_update(
+ j, temp_real_time);
+ } else {
+ diag_send_diag_mode_update_remote(i - 1,
+ temp_real_time);
+ }
+ }
+
+ if (driver->real_time_update_busy > 0)
+ driver->real_time_update_busy--;
+}
+#endif
+
+static int __diag_send_real_time_update(uint8_t peripheral, int real_time)
+{
+ char buf[sizeof(struct diag_ctrl_msg_diagmode)];
+ int msg_size = sizeof(struct diag_ctrl_msg_diagmode);
+ int err = 0;
+
+ if (peripheral >= NUM_PERIPHERALS)
+ return -EINVAL;
+
+ if (!driver->diagfwd_cntl[peripheral] ||
+ !driver->diagfwd_cntl[peripheral]->ch_open) {
+ pr_debug("diag: In %s, control channel is not open, p: %d\n",
+ __func__, peripheral);
+ return err;
+ }
+
+ if (real_time != MODE_NONREALTIME && real_time != MODE_REALTIME) {
+ pr_err("diag: In %s, invalid real time mode %d, peripheral: %d\n",
+ __func__, real_time, peripheral);
+ return -EINVAL;
+ }
+
+ diag_create_diag_mode_ctrl_pkt(buf, real_time);
+
+ mutex_lock(&driver->diag_cntl_mutex);
+ err = diagfwd_write(peripheral, TYPE_CNTL, buf, msg_size);
+ if (err && err != -ENODEV) {
+ pr_err("diag: In %s, unable to write to socket, peripheral: %d, type: %d, len: %d, err: %d\n",
+ __func__, peripheral, TYPE_CNTL,
+ msg_size, err);
+ } else {
+ driver->real_time_mode[DIAG_LOCAL_PROC] = real_time;
+ }
+
+ mutex_unlock(&driver->diag_cntl_mutex);
+
+ return err;
+}
+
+int diag_send_real_time_update(uint8_t peripheral, int real_time)
+{
+ int i;
+
+ for (i = 0; i < NUM_PERIPHERALS; i++) {
+ if (!driver->buffering_flag[i])
+ continue;
+ /*
+ * One of the peripherals is in buffering mode. Don't set
+ * the RT value.
+ */
+ return -EINVAL;
+ }
+
+ return __diag_send_real_time_update(peripheral, real_time);
+}
+
+int diag_send_peripheral_buffering_mode(struct diag_buffering_mode_t *params)
+{
+ int err = 0;
+ int mode = MODE_REALTIME;
+ uint8_t peripheral = 0;
+
+ if (!params)
+ return -EIO;
+
+ peripheral = params->peripheral;
+ if (peripheral >= NUM_PERIPHERALS) {
+ pr_err("diag: In %s, invalid peripheral %d\n", __func__,
+ peripheral);
+ return -EINVAL;
+ }
+
+ if (!driver->buffering_flag[peripheral])
+ return -EINVAL;
+
+ switch (params->mode) {
+ case DIAG_BUFFERING_MODE_STREAMING:
+ mode = MODE_REALTIME;
+ break;
+ case DIAG_BUFFERING_MODE_THRESHOLD:
+ case DIAG_BUFFERING_MODE_CIRCULAR:
+ mode = MODE_NONREALTIME;
+ break;
+ default:
+ pr_err("diag: In %s, invalid tx mode %d\n", __func__,
+ params->mode);
+ return -EINVAL;
+ }
+
+ if (!driver->feature[peripheral].peripheral_buffering) {
+ pr_debug("diag: In %s, peripheral %d doesn't support buffering\n",
+ __func__, peripheral);
+ driver->buffering_flag[peripheral] = 0;
+ return -EIO;
+ }
+
+ /*
+ * Perform sanity on watermark values. These values must be
+ * checked irrespective of the buffering mode.
+ */
+ if (((params->high_wm_val > DIAG_MAX_WM_VAL) ||
+ (params->low_wm_val > DIAG_MAX_WM_VAL)) ||
+ (params->low_wm_val > params->high_wm_val) ||
+ ((params->low_wm_val == params->high_wm_val) &&
+ (params->low_wm_val != DIAG_MIN_WM_VAL))) {
+ pr_err("diag: In %s, invalid watermark values, high: %d, low: %d, peripheral: %d\n",
+ __func__, params->high_wm_val, params->low_wm_val,
+ peripheral);
+ return -EINVAL;
+ }
+
+ mutex_lock(&driver->mode_lock);
+ err = diag_send_buffering_tx_mode_pkt(peripheral, params);
+ if (err) {
+ pr_err("diag: In %s, unable to send buffering mode packet to peripheral %d, err: %d\n",
+ __func__, peripheral, err);
+ goto fail;
+ }
+ err = diag_send_buffering_wm_values(peripheral, params);
+ if (err) {
+ pr_err("diag: In %s, unable to send buffering wm value packet to peripheral %d, err: %d\n",
+ __func__, peripheral, err);
+ goto fail;
+ }
+ err = __diag_send_real_time_update(peripheral, mode);
+ if (err) {
+ pr_err("diag: In %s, unable to send mode update to peripheral %d, mode: %d, err: %d\n",
+ __func__, peripheral, mode, err);
+ goto fail;
+ }
+ driver->buffering_mode[peripheral].peripheral = peripheral;
+ driver->buffering_mode[peripheral].mode = params->mode;
+ driver->buffering_mode[peripheral].low_wm_val = params->low_wm_val;
+ driver->buffering_mode[peripheral].high_wm_val = params->high_wm_val;
+ if (params->mode == DIAG_BUFFERING_MODE_STREAMING)
+ driver->buffering_flag[peripheral] = 0;
+fail:
+ mutex_unlock(&driver->mode_lock);
+ return err;
+}
+
+int diag_send_stm_state(uint8_t peripheral, uint8_t stm_control_data)
+{
+ struct diag_ctrl_msg_stm stm_msg;
+ int msg_size = sizeof(struct diag_ctrl_msg_stm);
+ int err = 0;
+
+ if (peripheral >= NUM_PERIPHERALS)
+ return -EIO;
+
+ if (!driver->diagfwd_cntl[peripheral] ||
+ !driver->diagfwd_cntl[peripheral]->ch_open) {
+ pr_debug("diag: In %s, control channel is not open, p: %d\n",
+ __func__, peripheral);
+ return -ENODEV;
+ }
+
+ if (driver->feature[peripheral].stm_support == DISABLE_STM)
+ return -EINVAL;
+
+ stm_msg.ctrl_pkt_id = 21;
+ stm_msg.ctrl_pkt_data_len = 5;
+ stm_msg.version = 1;
+ stm_msg.control_data = stm_control_data;
+ err = diagfwd_write(peripheral, TYPE_CNTL, &stm_msg, msg_size);
+ if (err && err != -ENODEV) {
+ pr_err("diag: In %s, unable to write to socket, peripheral: %d, type: %d, len: %d, err: %d\n",
+ __func__, peripheral, TYPE_CNTL,
+ msg_size, err);
+ }
+
+ return err;
+}
+
+int diag_send_peripheral_drain_immediate(uint8_t peripheral)
+{
+ int err = 0;
+ struct diag_ctrl_drain_immediate ctrl_pkt;
+
+ if (!driver->feature[peripheral].peripheral_buffering) {
+ pr_debug("diag: In %s, peripheral %d doesn't support buffering\n",
+ __func__, peripheral);
+ return -EINVAL;
+ }
+
+ if (!driver->diagfwd_cntl[peripheral] ||
+ !driver->diagfwd_cntl[peripheral]->ch_open) {
+ pr_debug("diag: In %s, control channel is not open, p: %d\n",
+ __func__, peripheral);
+ return -ENODEV;
+ }
+
+ ctrl_pkt.pkt_id = DIAG_CTRL_MSG_PERIPHERAL_BUF_DRAIN_IMM;
+ /* The length of the ctrl pkt is size of version and stream id */
+ ctrl_pkt.len = sizeof(uint32_t) + sizeof(uint8_t);
+ ctrl_pkt.version = 1;
+ ctrl_pkt.stream_id = 1;
+
+ err = diagfwd_write(peripheral, TYPE_CNTL, &ctrl_pkt, sizeof(ctrl_pkt));
+ if (err && err != -ENODEV) {
+ pr_err("diag: Unable to send drain immediate ctrl packet to peripheral %d, err: %d\n",
+ peripheral, err);
+ }
+
+ return err;
+}
+
+int diag_send_buffering_tx_mode_pkt(uint8_t peripheral,
+ struct diag_buffering_mode_t *params)
+{
+ int err = 0;
+ struct diag_ctrl_peripheral_tx_mode ctrl_pkt;
+
+ if (!params)
+ return -EIO;
+
+ if (peripheral >= NUM_PERIPHERALS)
+ return -EINVAL;
+
+ if (!driver->feature[peripheral].peripheral_buffering) {
+ pr_debug("diag: In %s, peripheral %d doesn't support buffering\n",
+ __func__, peripheral);
+ return -EINVAL;
+ }
+
+ if (params->peripheral != peripheral)
+ return -EINVAL;
+
+ switch (params->mode) {
+ case DIAG_BUFFERING_MODE_STREAMING:
+ case DIAG_BUFFERING_MODE_THRESHOLD:
+ case DIAG_BUFFERING_MODE_CIRCULAR:
+ break;
+ default:
+ pr_err("diag: In %s, invalid tx mode: %d\n", __func__,
+ params->mode);
+ return -EINVAL;
+ }
+
+ ctrl_pkt.pkt_id = DIAG_CTRL_MSG_CONFIG_PERIPHERAL_TX_MODE;
+ /* Control packet length is size of version, stream_id and tx_mode */
+ ctrl_pkt.len = sizeof(uint32_t) + (2 * sizeof(uint8_t));
+ ctrl_pkt.version = 1;
+ ctrl_pkt.stream_id = 1;
+ ctrl_pkt.tx_mode = params->mode;
+
+ err = diagfwd_write(peripheral, TYPE_CNTL, &ctrl_pkt, sizeof(ctrl_pkt));
+ if (err && err != -ENODEV) {
+ pr_err("diag: Unable to send tx_mode ctrl packet to peripheral %d, err: %d\n",
+ peripheral, err);
+ goto fail;
+ }
+ driver->buffering_mode[peripheral].mode = params->mode;
+
+fail:
+ return err;
+}
+
+int diag_send_buffering_wm_values(uint8_t peripheral,
+ struct diag_buffering_mode_t *params)
+{
+ int err = 0;
+ struct diag_ctrl_set_wq_val ctrl_pkt;
+
+ if (!params)
+ return -EIO;
+
+ if (peripheral >= NUM_PERIPHERALS)
+ return -EINVAL;
+
+ if (!driver->feature[peripheral].peripheral_buffering) {
+ pr_debug("diag: In %s, peripheral %d doesn't support buffering\n",
+ __func__, peripheral);
+ return -EINVAL;
+ }
+
+ if (!driver->diagfwd_cntl[peripheral] ||
+ !driver->diagfwd_cntl[peripheral]->ch_open) {
+ pr_debug("diag: In %s, control channel is not open, p: %d\n",
+ __func__, peripheral);
+ return -ENODEV;
+ }
+
+ if (params->peripheral != peripheral)
+ return -EINVAL;
+
+ switch (params->mode) {
+ case DIAG_BUFFERING_MODE_STREAMING:
+ case DIAG_BUFFERING_MODE_THRESHOLD:
+ case DIAG_BUFFERING_MODE_CIRCULAR:
+ break;
+ default:
+ pr_err("diag: In %s, invalid tx mode: %d\n", __func__,
+ params->mode);
+ return -EINVAL;
+ }
+
+ ctrl_pkt.pkt_id = DIAG_CTRL_MSG_CONFIG_PERIPHERAL_WMQ_VAL;
+ /* Control packet length is size of version, stream_id and wmq values */
+ ctrl_pkt.len = sizeof(uint32_t) + (3 * sizeof(uint8_t));
+ ctrl_pkt.version = 1;
+ ctrl_pkt.stream_id = 1;
+ ctrl_pkt.high_wm_val = params->high_wm_val;
+ ctrl_pkt.low_wm_val = params->low_wm_val;
+
+ err = diagfwd_write(peripheral, TYPE_CNTL, &ctrl_pkt,
+ sizeof(ctrl_pkt));
+ if (err && err != -ENODEV) {
+ pr_err("diag: Unable to send watermark values to peripheral %d, err: %d\n",
+ peripheral, err);
+ }
+
+ return err;
+}
+
+int diagfwd_cntl_init(void)
+{
+ uint8_t peripheral = 0;
+
+ reg_dirty = 0;
+ driver->polling_reg_flag = 0;
+ driver->log_on_demand_support = 1;
+ driver->stm_peripheral = 0;
+ driver->close_transport = 0;
+ for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++)
+ driver->buffering_flag[peripheral] = 0;
+
+ mutex_init(&driver->cntl_lock);
+ INIT_WORK(&(driver->stm_update_work), diag_stm_update_work_fn);
+ INIT_WORK(&(driver->mask_update_work), diag_mask_update_work_fn);
+ INIT_WORK(&(driver->close_transport_work),
+ diag_close_transport_work_fn);
+
+ driver->cntl_wq = create_singlethread_workqueue("diag_cntl_wq");
+ if (!driver->cntl_wq)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void diagfwd_cntl_channel_init(void)
+{
+ uint8_t peripheral;
+
+ for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+ diagfwd_early_open(peripheral);
+ diagfwd_open(peripheral, TYPE_CNTL);
+ }
+}
+
+void diagfwd_cntl_exit(void)
+{
+ if (driver->cntl_wq)
+ destroy_workqueue(driver->cntl_wq);
+}
diff --git a/drivers/char/diag/diagfwd_cntl.h b/drivers/char/diag/diagfwd_cntl.h
new file mode 100644
index 0000000..129cb1f
--- /dev/null
+++ b/drivers/char/diag/diagfwd_cntl.h
@@ -0,0 +1,282 @@
+/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAGFWD_CNTL_H
+#define DIAGFWD_CNTL_H
+
+/* Message registration commands */
+#define DIAG_CTRL_MSG_REG 1
+/* Message passing for DTR events */
+#define DIAG_CTRL_MSG_DTR 2
+/* Control Diag sleep vote, buffering etc */
+#define DIAG_CTRL_MSG_DIAGMODE 3
+/* Diag data based on "light" diag mask */
+#define DIAG_CTRL_MSG_DIAGDATA 4
+/* Send diag internal feature mask 'diag_int_feature_mask' */
+#define DIAG_CTRL_MSG_FEATURE 8
+/* Send Diag log mask for a particular equip id */
+#define DIAG_CTRL_MSG_EQUIP_LOG_MASK 9
+/* Send Diag event mask */
+#define DIAG_CTRL_MSG_EVENT_MASK_V2 10
+/* Send Diag F3 mask */
+#define DIAG_CTRL_MSG_F3_MASK_V2 11
+#define DIAG_CTRL_MSG_NUM_PRESETS 12
+#define DIAG_CTRL_MSG_SET_PRESET_ID 13
+#define DIAG_CTRL_MSG_LOG_MASK_WITH_PRESET_ID 14
+#define DIAG_CTRL_MSG_EVENT_MASK_WITH_PRESET_ID 15
+#define DIAG_CTRL_MSG_F3_MASK_WITH_PRESET_ID 16
+#define DIAG_CTRL_MSG_CONFIG_PERIPHERAL_TX_MODE 17
+#define DIAG_CTRL_MSG_PERIPHERAL_BUF_DRAIN_IMM 18
+#define DIAG_CTRL_MSG_CONFIG_PERIPHERAL_WMQ_VAL 19
+#define DIAG_CTRL_MSG_DCI_CONNECTION_STATUS 20
+#define DIAG_CTRL_MSG_LAST_EVENT_REPORT 22
+#define DIAG_CTRL_MSG_LOG_RANGE_REPORT 23
+#define DIAG_CTRL_MSG_SSID_RANGE_REPORT 24
+#define DIAG_CTRL_MSG_BUILD_MASK_REPORT 25
+#define DIAG_CTRL_MSG_DEREG 27
+#define DIAG_CTRL_MSG_DCI_HANDSHAKE_PKT 29
+#define DIAG_CTRL_MSG_PD_STATUS 30
+#define DIAG_CTRL_MSG_TIME_SYNC_PKT 31
+
+/*
+ * Feature Mask Definitions: Feature mask is used to specify Diag features
+ * supported by the Apps processor
+ *
+ * F_DIAG_FEATURE_MASK_SUPPORT - Denotes we support sending and receiving
+ * feature masks
+ * F_DIAG_LOG_ON_DEMAND_APPS - Apps responds to Log on Demand request
+ * F_DIAG_REQ_RSP_SUPPORT - Apps supported dedicated request response Channel
+ * F_DIAG_APPS_HDLC_ENCODE - HDLC encoding is done on the forward channel
+ * F_DIAG_STM - Denotes Apps supports Diag over STM
+ */
+#define F_DIAG_FEATURE_MASK_SUPPORT 0
+#define F_DIAG_LOG_ON_DEMAND_APPS 2
+#define F_DIAG_REQ_RSP_SUPPORT 4
+#define F_DIAG_APPS_HDLC_ENCODE 6
+#define F_DIAG_STM 9
+#define F_DIAG_PERIPHERAL_BUFFERING 10
+#define F_DIAG_MASK_CENTRALIZATION 11
+#define F_DIAG_SOCKETS_ENABLED 13
+#define F_DIAG_DCI_EXTENDED_HEADER_SUPPORT 14
+
+#define ENABLE_SEPARATE_CMDRSP 1
+#define DISABLE_SEPARATE_CMDRSP 0
+
+#define DISABLE_STM 0
+#define ENABLE_STM 1
+#define STATUS_STM 2
+
+#define UPDATE_PERIPHERAL_STM_STATE 1
+#define CLEAR_PERIPHERAL_STM_STATE 2
+
+#define ENABLE_APPS_HDLC_ENCODING 1
+#define DISABLE_APPS_HDLC_ENCODING 0
+
+#define DIAG_MODE_PKT_LEN 36
+
+struct diag_ctrl_pkt_header_t {
+ uint32_t pkt_id;
+ uint32_t len;
+};
+
+struct cmd_code_range {
+ uint16_t cmd_code_lo;
+ uint16_t cmd_code_hi;
+ uint32_t data;
+};
+
+struct diag_ctrl_cmd_reg {
+ uint32_t pkt_id;
+ uint32_t len;
+ uint32_t version;
+ uint16_t cmd_code;
+ uint16_t subsysid;
+ uint16_t count_entries;
+ uint16_t port;
+};
+
+struct diag_ctrl_cmd_dereg {
+ uint32_t pkt_id;
+ uint32_t len;
+ uint32_t version;
+ uint16_t cmd_code;
+ uint16_t subsysid;
+ uint16_t count_entries;
+} __packed;
+
+struct diag_ctrl_event_mask {
+ uint32_t cmd_type;
+ uint32_t data_len;
+ uint8_t stream_id;
+ uint8_t status;
+ uint8_t event_config;
+ uint32_t event_mask_size;
+ /* Copy event mask here */
+} __packed;
+
+struct diag_ctrl_log_mask {
+ uint32_t cmd_type;
+ uint32_t data_len;
+ uint8_t stream_id;
+ uint8_t status;
+ uint8_t equip_id;
+ uint32_t num_items; /* Last log code for this equip_id */
+ uint32_t log_mask_size; /* Size of log mask stored in log_mask[] */
+ /* Copy log mask here */
+} __packed;
+
+struct diag_ctrl_msg_mask {
+ uint32_t cmd_type;
+ uint32_t data_len;
+ uint8_t stream_id;
+ uint8_t status;
+ uint8_t msg_mode;
+ uint16_t ssid_first; /* Start of range of supported SSIDs */
+ uint16_t ssid_last; /* Last SSID in range */
+ uint32_t msg_mask_size; /* ssid_last - ssid_first + 1 */
+ /* Copy msg mask here */
+} __packed;
+
+struct diag_ctrl_feature_mask {
+ uint32_t ctrl_pkt_id;
+ uint32_t ctrl_pkt_data_len;
+ uint32_t feature_mask_len;
+ /* Copy feature mask here */
+} __packed;
+
+struct diag_ctrl_msg_diagmode {
+ uint32_t ctrl_pkt_id;
+ uint32_t ctrl_pkt_data_len;
+ uint32_t version;
+ uint32_t sleep_vote;
+ uint32_t real_time;
+ uint32_t use_nrt_values;
+ uint32_t commit_threshold;
+ uint32_t sleep_threshold;
+ uint32_t sleep_time;
+ uint32_t drain_timer_val;
+ uint32_t event_stale_timer_val;
+} __packed;
+
+struct diag_ctrl_msg_stm {
+ uint32_t ctrl_pkt_id;
+ uint32_t ctrl_pkt_data_len;
+ uint32_t version;
+ uint8_t control_data;
+} __packed;
+
+struct diag_ctrl_msg_time_sync {
+ uint32_t ctrl_pkt_id;
+ uint32_t ctrl_pkt_data_len;
+ uint32_t version;
+ uint8_t time_api;
+} __packed;
+
+struct diag_ctrl_dci_status {
+ uint32_t ctrl_pkt_id;
+ uint32_t ctrl_pkt_data_len;
+ uint32_t version;
+ uint8_t count;
+} __packed;
+
+struct diag_ctrl_dci_handshake_pkt {
+ uint32_t ctrl_pkt_id;
+ uint32_t ctrl_pkt_data_len;
+ uint32_t version;
+ uint32_t magic;
+} __packed;
+
+struct diag_ctrl_msg_pd_status {
+ uint32_t ctrl_pkt_id;
+ uint32_t ctrl_pkt_data_len;
+ uint32_t version;
+ uint32_t pd_id;
+ uint8_t status;
+} __packed;
+
+struct diag_ctrl_last_event_report {
+ uint32_t pkt_id;
+ uint32_t len;
+ uint32_t version;
+ uint16_t event_last_id;
+} __packed;
+
+struct diag_ctrl_log_range_report {
+ uint32_t pkt_id;
+ uint32_t len;
+ uint32_t version;
+ uint32_t last_equip_id;
+ uint32_t num_ranges;
+} __packed;
+
+struct diag_ctrl_log_range {
+ uint32_t equip_id;
+ uint32_t num_items;
+} __packed;
+
+struct diag_ctrl_ssid_range_report {
+ uint32_t pkt_id;
+ uint32_t len;
+ uint32_t version;
+ uint32_t count;
+} __packed;
+
+struct diag_ctrl_build_mask_report {
+ uint32_t pkt_id;
+ uint32_t len;
+ uint32_t version;
+ uint32_t count;
+} __packed;
+
+struct diag_ctrl_peripheral_tx_mode {
+ uint32_t pkt_id;
+ uint32_t len;
+ uint32_t version;
+ uint8_t stream_id;
+ uint8_t tx_mode;
+} __packed;
+
+struct diag_ctrl_drain_immediate {
+ uint32_t pkt_id;
+ uint32_t len;
+ uint32_t version;
+ uint8_t stream_id;
+} __packed;
+
+struct diag_ctrl_set_wq_val {
+ uint32_t pkt_id;
+ uint32_t len;
+ uint32_t version;
+ uint8_t stream_id;
+ uint8_t high_wm_val;
+ uint8_t low_wm_val;
+} __packed;
+
+int diagfwd_cntl_init(void);
+void diagfwd_cntl_channel_init(void);
+void diagfwd_cntl_exit(void);
+void diag_cntl_channel_open(struct diagfwd_info *p_info);
+void diag_cntl_channel_close(struct diagfwd_info *p_info);
+void diag_cntl_process_read_data(struct diagfwd_info *p_info, void *buf,
+ int len);
+int diag_send_real_time_update(uint8_t peripheral, int real_time);
+int diag_send_peripheral_buffering_mode(struct diag_buffering_mode_t *params);
+void diag_update_proc_vote(uint16_t proc, uint8_t vote, int index);
+void diag_update_real_time_vote(uint16_t proc, uint8_t real_time, int index);
+void diag_real_time_work_fn(struct work_struct *work);
+int diag_send_stm_state(uint8_t peripheral, uint8_t stm_control_data);
+int diag_send_peripheral_drain_immediate(uint8_t peripheral);
+int diag_send_buffering_tx_mode_pkt(uint8_t peripheral,
+ struct diag_buffering_mode_t *params);
+int diag_send_buffering_wm_values(uint8_t peripheral,
+ struct diag_buffering_mode_t *params);
+#endif
diff --git a/drivers/char/diag/diagfwd_glink.c b/drivers/char/diag/diagfwd_glink.c
new file mode 100644
index 0000000..74f7dc7
--- /dev/null
+++ b/drivers/char/diag/diagfwd_glink.c
@@ -0,0 +1,706 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/sched.h>
+#include <linux/ratelimit.h>
+#include <linux/workqueue.h>
+#include <linux/pm_runtime.h>
+#include <linux/delay.h>
+#include <linux/atomic.h>
+#include <linux/diagchar.h>
+#include <linux/of.h>
+#include <linux/kmemleak.h>
+#include <soc/qcom/glink.h>
+#include "diagchar.h"
+#include "diagfwd.h"
+#include "diagfwd_peripheral.h"
+#include "diagfwd_glink.h"
+#include "diag_ipc_logging.h"
+
+struct diag_glink_info glink_data[NUM_PERIPHERALS] = {
+ {
+ .peripheral = PERIPHERAL_MODEM,
+ .type = TYPE_DATA,
+ .edge = "mpss",
+ .name = "DIAG_DATA",
+ .hdl = NULL
+ },
+ {
+ .peripheral = PERIPHERAL_LPASS,
+ .type = TYPE_DATA,
+ .edge = "lpass",
+ .name = "DIAG_DATA",
+ .hdl = NULL
+ },
+ {
+ .peripheral = PERIPHERAL_WCNSS,
+ .type = TYPE_DATA,
+ .edge = "wcnss",
+ .name = "DIAG_DATA",
+ .hdl = NULL
+ },
+ {
+ .peripheral = PERIPHERAL_SENSORS,
+ .type = TYPE_DATA,
+ .edge = "dsps",
+ .name = "DIAG_DATA",
+ .hdl = NULL
+ },
+ {
+ .peripheral = PERIPHERAL_WDSP,
+ .type = TYPE_DATA,
+ .edge = "wdsp",
+ .name = "DIAG_DATA",
+ .hdl = NULL
+ }
+};
+
+struct diag_glink_info glink_cntl[NUM_PERIPHERALS] = {
+ {
+ .peripheral = PERIPHERAL_MODEM,
+ .type = TYPE_CNTL,
+ .edge = "mpss",
+ .name = "DIAG_CTRL",
+ .hdl = NULL
+ },
+ {
+ .peripheral = PERIPHERAL_LPASS,
+ .type = TYPE_CNTL,
+ .edge = "lpass",
+ .name = "DIAG_CTRL",
+ .hdl = NULL
+ },
+ {
+ .peripheral = PERIPHERAL_WCNSS,
+ .type = TYPE_CNTL,
+ .edge = "wcnss",
+ .name = "DIAG_CTRL",
+ .hdl = NULL
+ },
+ {
+ .peripheral = PERIPHERAL_SENSORS,
+ .type = TYPE_CNTL,
+ .edge = "dsps",
+ .name = "DIAG_CTRL",
+ .hdl = NULL
+ },
+ {
+ .peripheral = PERIPHERAL_WDSP,
+ .type = TYPE_CNTL,
+ .edge = "wdsp",
+ .name = "DIAG_CTRL",
+ .hdl = NULL
+ }
+};
+
+struct diag_glink_info glink_dci[NUM_PERIPHERALS] = {
+ {
+ .peripheral = PERIPHERAL_MODEM,
+ .type = TYPE_DCI,
+ .edge = "mpss",
+ .name = "DIAG_DCI_DATA",
+ .hdl = NULL
+ },
+ {
+ .peripheral = PERIPHERAL_LPASS,
+ .type = TYPE_DCI,
+ .edge = "lpass",
+ .name = "DIAG_DCI_DATA",
+ .hdl = NULL
+ },
+ {
+ .peripheral = PERIPHERAL_WCNSS,
+ .type = TYPE_DCI,
+ .edge = "wcnss",
+ .name = "DIAG_DCI_DATA",
+ .hdl = NULL
+ },
+ {
+ .peripheral = PERIPHERAL_SENSORS,
+ .type = TYPE_DCI,
+ .edge = "dsps",
+ .name = "DIAG_DCI_DATA",
+ .hdl = NULL
+ },
+ {
+ .peripheral = PERIPHERAL_WDSP,
+ .type = TYPE_DCI,
+ .edge = "wdsp",
+ .name = "DIAG_DCI_DATA",
+ .hdl = NULL
+ }
+};
+
+struct diag_glink_info glink_cmd[NUM_PERIPHERALS] = {
+ {
+ .peripheral = PERIPHERAL_MODEM,
+ .type = TYPE_CMD,
+ .edge = "mpss",
+ .name = "DIAG_CMD",
+ .hdl = NULL
+ },
+ {
+ .peripheral = PERIPHERAL_LPASS,
+ .type = TYPE_CMD,
+ .edge = "lpass",
+ .name = "DIAG_CMD",
+ .hdl = NULL
+ },
+ {
+ .peripheral = PERIPHERAL_WCNSS,
+ .type = TYPE_CMD,
+ .edge = "wcnss",
+ .name = "DIAG_CMD",
+ .hdl = NULL
+ },
+ {
+ .peripheral = PERIPHERAL_SENSORS,
+ .type = TYPE_CMD,
+ .edge = "dsps",
+ .name = "DIAG_CMD",
+ .hdl = NULL
+ },
+ {
+ .peripheral = PERIPHERAL_WDSP,
+ .type = TYPE_CMD,
+ .edge = "wdsp",
+ .name = "DIAG_CMD",
+ .hdl = NULL
+ }
+};
+
+struct diag_glink_info glink_dci_cmd[NUM_PERIPHERALS] = {
+ {
+ .peripheral = PERIPHERAL_MODEM,
+ .type = TYPE_DCI_CMD,
+ .edge = "mpss",
+ .name = "DIAG_DCI_CMD",
+ .hdl = NULL
+ },
+ {
+ .peripheral = PERIPHERAL_LPASS,
+ .type = TYPE_DCI_CMD,
+ .edge = "lpass",
+ .name = "DIAG_DCI_CMD",
+ .hdl = NULL
+ },
+ {
+ .peripheral = PERIPHERAL_WCNSS,
+ .type = TYPE_DCI_CMD,
+ .edge = "wcnss",
+ .name = "DIAG_DCI_CMD",
+ .hdl = NULL
+ },
+ {
+ .peripheral = PERIPHERAL_SENSORS,
+ .type = TYPE_DCI_CMD,
+ .edge = "dsps",
+ .name = "DIAG_DCI_CMD",
+ .hdl = NULL
+ },
+ {
+ .peripheral = PERIPHERAL_WDSP,
+ .type = TYPE_DCI_CMD,
+ .edge = "wdsp",
+ .name = "DIAG_DCI_CMD",
+ .hdl = NULL
+ }
+};
+
+static void diag_state_open_glink(void *ctxt);
+static void diag_state_close_glink(void *ctxt);
+static int diag_glink_write(void *ctxt, unsigned char *buf, int len);
+static int diag_glink_read(void *ctxt, unsigned char *buf, int buf_len);
+static void diag_glink_queue_read(void *ctxt);
+
+static struct diag_peripheral_ops glink_ops = {
+ .open = diag_state_open_glink,
+ .close = diag_state_close_glink,
+ .write = diag_glink_write,
+ .read = diag_glink_read,
+ .queue_read = diag_glink_queue_read
+};
+
+static void diag_state_open_glink(void *ctxt)
+{
+ struct diag_glink_info *glink_info = NULL;
+
+ if (!ctxt)
+ return;
+
+ glink_info = (struct diag_glink_info *)(ctxt);
+ atomic_set(&glink_info->diag_state, 1);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "%s setting diag state to 1", glink_info->name);
+}
+
+static void diag_glink_queue_read(void *ctxt)
+{
+ struct diag_glink_info *glink_info = NULL;
+
+ if (!ctxt)
+ return;
+
+ glink_info = (struct diag_glink_info *)ctxt;
+ if (glink_info->hdl && glink_info->wq &&
+ atomic_read(&glink_info->opened))
+ queue_work(glink_info->wq, &(glink_info->read_work));
+}
+
+static void diag_state_close_glink(void *ctxt)
+{
+ struct diag_glink_info *glink_info = NULL;
+
+ if (!ctxt)
+ return;
+
+ glink_info = (struct diag_glink_info *)(ctxt);
+ atomic_set(&glink_info->diag_state, 0);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "%s setting diag state to 0", glink_info->name);
+ wake_up_interruptible(&glink_info->read_wait_q);
+ flush_workqueue(glink_info->wq);
+}
+
+int diag_glink_check_state(void *ctxt)
+{
+ struct diag_glink_info *info = NULL;
+
+ if (!ctxt)
+ return 0;
+
+ info = (struct diag_glink_info *)ctxt;
+ return (int)(atomic_read(&info->diag_state));
+}
+
+static int diag_glink_read(void *ctxt, unsigned char *buf, int buf_len)
+{
+ struct diag_glink_info *glink_info = NULL;
+ int ret_val = 0;
+
+ if (!ctxt || !buf || buf_len <= 0)
+ return -EIO;
+
+ glink_info = (struct diag_glink_info *)ctxt;
+ if (!glink_info || !atomic_read(&glink_info->opened) ||
+ !glink_info->hdl || !glink_info->inited) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag:Glink channel not opened");
+ return -EIO;
+ }
+
+ ret_val = glink_queue_rx_intent(glink_info->hdl, buf, buf_len);
+ if (ret_val == 0)
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: queued an rx intent ch:%s perip:%d buf:%pK of len:%d\n",
+ glink_info->name, glink_info->peripheral, buf, buf_len);
+
+ return ret_val;
+}
+
+static void diag_glink_read_work_fn(struct work_struct *work)
+{
+ struct diag_glink_info *glink_info = container_of(work,
+ struct diag_glink_info,
+ read_work);
+
+ if (!glink_info || !atomic_read(&glink_info->opened))
+ return;
+
+ if (!glink_info->inited) {
+ diag_ws_release();
+ return;
+ }
+
+ diagfwd_channel_read(glink_info->fwd_ctxt);
+}
+
+static void diag_glink_notify_rx(void *hdl, const void *priv,
+ const void *pkt_priv, const void *ptr,
+ size_t size)
+{
+ struct diag_glink_info *glink_info = (struct diag_glink_info *)priv;
+ int err = 0;
+
+ if (!glink_info || !glink_info->hdl || !ptr || !pkt_priv || !hdl)
+ return;
+
+ if (size <= 0)
+ return;
+
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: received a packet %pK of len:%d from periph:%d ch:%d\n",
+ ptr, (int)size, glink_info->peripheral, glink_info->type);
+
+ memcpy((void *)pkt_priv, ptr, size);
+ err = diagfwd_channel_read_done(glink_info->fwd_ctxt,
+ (unsigned char *)pkt_priv, size);
+ glink_rx_done(glink_info->hdl, ptr, false);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: Rx done for packet %pK of len:%d periph:%d ch:%d\n",
+ ptr, (int)size, glink_info->peripheral, glink_info->type);
+}
+
+static void diag_glink_notify_remote_rx_intent(void *hdl, const void *priv,
+ size_t size)
+{
+ struct diag_glink_info *glink_info = (struct diag_glink_info *)priv;
+
+ if (!glink_info)
+ return;
+
+ atomic_inc(&glink_info->tx_intent_ready);
+ wake_up_interruptible(&glink_info->wait_q);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag:received remote rx intent for %d type %d\n",
+ glink_info->peripheral, glink_info->type);
+}
+
+static void diag_glink_notify_tx_done(void *hdl, const void *priv,
+ const void *pkt_priv,
+ const void *ptr)
+{
+ struct diag_glink_info *glink_info = NULL;
+ struct diagfwd_info *fwd_info = NULL;
+ int found = 0;
+
+ glink_info = (struct diag_glink_info *)priv;
+ if (!glink_info)
+ return;
+
+ fwd_info = glink_info->fwd_ctxt;
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: Received glink tx done notify for ptr%pK pkt_priv %pK\n",
+ ptr, pkt_priv);
+ found = diagfwd_write_buffer_done(fwd_info, ptr);
+ if (!found)
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Received Tx done on invalid buffer ptr %pK\n", ptr);
+}
+
+static int diag_glink_write(void *ctxt, unsigned char *buf, int len)
+{
+ struct diag_glink_info *glink_info = NULL;
+ int err = 0;
+ uint32_t tx_flags = GLINK_TX_REQ_INTENT;
+
+ if (!ctxt || !buf)
+ return -EIO;
+
+ glink_info = (struct diag_glink_info *)ctxt;
+ if (!glink_info || len <= 0) {
+ pr_err_ratelimited("diag: In %s, invalid params, glink_info: %pK, buf: %pK, len: %d\n",
+ __func__, glink_info, buf, len);
+ return -EINVAL;
+ }
+
+ if (!glink_info->inited || !glink_info->hdl ||
+ !atomic_read(&glink_info->opened)) {
+ pr_err_ratelimited("diag: In %s, glink not inited, glink_info: %pK, buf: %pK, len: %d\n",
+ __func__, glink_info, buf, len);
+ return -ENODEV;
+ }
+
+ if (atomic_read(&glink_info->tx_intent_ready)) {
+ atomic_dec(&glink_info->tx_intent_ready);
+ err = glink_tx(glink_info->hdl, glink_info, buf, len, tx_flags);
+ if (!err) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "%s wrote to glink, len: %d\n",
+ glink_info->name, len);
+ }
+ } else
+ err = -ENOMEM;
+
+ return err;
+
+}
+static void diag_glink_transport_notify_state(void *handle, const void *priv,
+ unsigned int event)
+{
+ struct diag_glink_info *glink_info = (struct diag_glink_info *)priv;
+
+ if (!glink_info)
+ return;
+
+ switch (event) {
+ case GLINK_CONNECTED:
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "%s received channel connect for periph:%d\n",
+ glink_info->name, glink_info->peripheral);
+ atomic_set(&glink_info->opened, 1);
+ diagfwd_channel_open(glink_info->fwd_ctxt);
+ diagfwd_late_open(glink_info->fwd_ctxt);
+ break;
+ case GLINK_LOCAL_DISCONNECTED:
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "%s received channel disconnect for periph:%d\n",
+ glink_info->name, glink_info->peripheral);
+
+ break;
+ case GLINK_REMOTE_DISCONNECTED:
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "%s received channel remote disconnect for periph:%d\n",
+ glink_info->name, glink_info->peripheral);
+ atomic_set(&glink_info->opened, 0);
+ diagfwd_channel_close(glink_info->fwd_ctxt);
+ atomic_set(&glink_info->tx_intent_ready, 0);
+ break;
+ default:
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "%s received invalid notification\n",
+ glink_info->name);
+ break;
+ }
+
+}
+static void diag_glink_open_work_fn(struct work_struct *work)
+{
+ struct diag_glink_info *glink_info = container_of(work,
+ struct diag_glink_info,
+ open_work);
+ struct glink_open_config open_cfg;
+ void *handle = NULL;
+
+ if (!glink_info || glink_info->hdl)
+ return;
+
+ memset(&open_cfg, 0, sizeof(struct glink_open_config));
+ open_cfg.priv = glink_info;
+ open_cfg.edge = glink_info->edge;
+ open_cfg.name = glink_info->name;
+ open_cfg.notify_rx = diag_glink_notify_rx;
+ open_cfg.notify_tx_done = diag_glink_notify_tx_done;
+ open_cfg.notify_state = diag_glink_transport_notify_state;
+ open_cfg.notify_remote_rx_intent = diag_glink_notify_remote_rx_intent;
+ handle = glink_open(&open_cfg);
+ if (IS_ERR_OR_NULL(handle)) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "error opening channel %s",
+ glink_info->name);
+ } else
+ glink_info->hdl = handle;
+}
+
+static void diag_glink_close_work_fn(struct work_struct *work)
+{
+ struct diag_glink_info *glink_info = container_of(work,
+ struct diag_glink_info,
+ close_work);
+ if (!glink_info || !glink_info->inited || !glink_info->hdl)
+ return;
+
+ glink_close(glink_info->hdl);
+ atomic_set(&glink_info->opened, 0);
+ atomic_set(&glink_info->tx_intent_ready, 0);
+ glink_info->hdl = NULL;
+ diagfwd_channel_close(glink_info->fwd_ctxt);
+}
+
+static void diag_glink_notify_cb(struct glink_link_state_cb_info *cb_info,
+ void *priv)
+{
+ struct diag_glink_info *glink_info = NULL;
+
+ glink_info = (struct diag_glink_info *)priv;
+ if (!glink_info)
+ return;
+ if (!cb_info)
+ return;
+
+ switch (cb_info->link_state) {
+ case GLINK_LINK_STATE_UP:
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "%s channel opened for periph:%d\n",
+ glink_info->name, glink_info->peripheral);
+ queue_work(glink_info->wq, &glink_info->open_work);
+ break;
+ case GLINK_LINK_STATE_DOWN:
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "%s channel closed for periph:%d\n",
+ glink_info->name, glink_info->peripheral);
+ queue_work(glink_info->wq, &glink_info->close_work);
+ break;
+ default:
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Invalid link state notification for ch:%s\n",
+ glink_info->name);
+ break;
+
+ }
+}
+
+static void glink_late_init(struct diag_glink_info *glink_info)
+{
+ struct diagfwd_info *fwd_info = NULL;
+
+ if (!glink_info)
+ return;
+
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s entering\n",
+ glink_info->name);
+
+ diagfwd_register(TRANSPORT_GLINK, glink_info->peripheral,
+ glink_info->type, (void *)glink_info,
+ &glink_ops, &glink_info->fwd_ctxt);
+ fwd_info = glink_info->fwd_ctxt;
+ if (!fwd_info)
+ return;
+
+ glink_info->inited = 1;
+
+ if (atomic_read(&glink_info->opened))
+ diagfwd_channel_open(glink_info->fwd_ctxt);
+
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s exiting\n",
+ glink_info->name);
+}
+
+int diag_glink_init_peripheral(uint8_t peripheral)
+{
+ if (peripheral >= NUM_PERIPHERALS) {
+ pr_err("diag: In %s, invalid peripheral %d\n",
+ __func__, peripheral);
+ return -EINVAL;
+ }
+
+ glink_late_init(&glink_data[peripheral]);
+ glink_late_init(&glink_dci[peripheral]);
+ glink_late_init(&glink_cmd[peripheral]);
+ glink_late_init(&glink_dci_cmd[peripheral]);
+
+ return 0;
+}
+
+static void __diag_glink_init(struct diag_glink_info *glink_info)
+{
+ char wq_name[DIAG_GLINK_NAME_SZ + 12];
+ struct glink_link_info link_info;
+ void *link_state_handle = NULL;
+
+ if (!glink_info)
+ return;
+
+ init_waitqueue_head(&glink_info->wait_q);
+ init_waitqueue_head(&glink_info->read_wait_q);
+ mutex_init(&glink_info->lock);
+ strlcpy(wq_name, "DIAG_GLINK_", 12);
+ strlcat(wq_name, glink_info->name, sizeof(glink_info->name));
+ glink_info->wq = create_singlethread_workqueue(wq_name);
+ if (!glink_info->wq) {
+ pr_err("diag: In %s, unable to create workqueue for glink ch:%s\n",
+ __func__, glink_info->name);
+ return;
+ }
+ INIT_WORK(&(glink_info->open_work), diag_glink_open_work_fn);
+ INIT_WORK(&(glink_info->close_work), diag_glink_close_work_fn);
+ INIT_WORK(&(glink_info->read_work), diag_glink_read_work_fn);
+ link_info.glink_link_state_notif_cb = diag_glink_notify_cb;
+ link_info.transport = NULL;
+ link_info.edge = glink_info->edge;
+ glink_info->link_state_handle = NULL;
+ link_state_handle = glink_register_link_state_cb(&link_info,
+ (void *)glink_info);
+ if (IS_ERR_OR_NULL(link_state_handle)) {
+ pr_err("diag: In %s, unable to register for glink channel %s\n",
+ __func__, glink_info->name);
+ destroy_workqueue(glink_info->wq);
+ return;
+ }
+ glink_info->link_state_handle = link_state_handle;
+ glink_info->fwd_ctxt = NULL;
+ atomic_set(&glink_info->tx_intent_ready, 0);
+ atomic_set(&glink_info->opened, 0);
+ atomic_set(&glink_info->diag_state, 0);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "%s initialized fwd_ctxt: %pK hdl: %pK\n",
+ glink_info->name, glink_info->fwd_ctxt,
+ glink_info->link_state_handle);
+}
+
+void diag_glink_invalidate(void *ctxt, struct diagfwd_info *fwd_ctxt)
+{
+ struct diag_glink_info *info = NULL;
+
+ if (!ctxt || !fwd_ctxt)
+ return;
+
+ info = (struct diag_glink_info *)ctxt;
+ info->fwd_ctxt = fwd_ctxt;
+}
+
+int diag_glink_init(void)
+{
+ uint8_t peripheral;
+ struct diag_glink_info *glink_info = NULL;
+
+ for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+ glink_info = &glink_cntl[peripheral];
+ __diag_glink_init(glink_info);
+ diagfwd_cntl_register(TRANSPORT_GLINK, glink_info->peripheral,
+ (void *)glink_info, &glink_ops,
+ &(glink_info->fwd_ctxt));
+ glink_info->inited = 1;
+ __diag_glink_init(&glink_data[peripheral]);
+ __diag_glink_init(&glink_cmd[peripheral]);
+ __diag_glink_init(&glink_dci[peripheral]);
+ __diag_glink_init(&glink_dci_cmd[peripheral]);
+ }
+ return 0;
+}
+
+static void __diag_glink_exit(struct diag_glink_info *glink_info)
+{
+ if (!glink_info)
+ return;
+
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s entering\n",
+ glink_info->name);
+
+ diagfwd_deregister(glink_info->peripheral, glink_info->type,
+ (void *)glink_info);
+ glink_info->fwd_ctxt = NULL;
+ glink_info->hdl = NULL;
+ if (glink_info->wq)
+ destroy_workqueue(glink_info->wq);
+
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s exiting\n",
+ glink_info->name);
+}
+
+void diag_glink_early_exit(void)
+{
+ int peripheral = 0;
+
+ for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+ __diag_glink_exit(&glink_cntl[peripheral]);
+ glink_unregister_link_state_cb(&glink_cntl[peripheral].hdl);
+ }
+}
+
+void diag_glink_exit(void)
+{
+ int peripheral = 0;
+
+ for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+ __diag_glink_exit(&glink_data[peripheral]);
+ __diag_glink_exit(&glink_cmd[peripheral]);
+ __diag_glink_exit(&glink_dci[peripheral]);
+ __diag_glink_exit(&glink_dci_cmd[peripheral]);
+ glink_unregister_link_state_cb(&glink_data[peripheral].hdl);
+ glink_unregister_link_state_cb(&glink_cmd[peripheral].hdl);
+ glink_unregister_link_state_cb(&glink_dci[peripheral].hdl);
+ glink_unregister_link_state_cb(&glink_dci_cmd[peripheral].hdl);
+ }
+}
diff --git a/drivers/char/diag/diagfwd_glink.h b/drivers/char/diag/diagfwd_glink.h
new file mode 100644
index 0000000..bad4629
--- /dev/null
+++ b/drivers/char/diag/diagfwd_glink.h
@@ -0,0 +1,54 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAGFWD_GLINK_H
+#define DIAGFWD_GLINK_H
+
+#define DIAG_GLINK_NAME_SZ 24
+#define GLINK_DRAIN_BUF_SIZE 4096
+
+struct diag_glink_info {
+ uint8_t peripheral;
+ uint8_t type;
+ uint8_t inited;
+ atomic_t opened;
+ atomic_t diag_state;
+ uint32_t fifo_size;
+ atomic_t tx_intent_ready;
+ void *hdl;
+ void *link_state_handle;
+ char edge[DIAG_GLINK_NAME_SZ];
+ char name[DIAG_GLINK_NAME_SZ];
+ struct mutex lock;
+ wait_queue_head_t read_wait_q;
+ wait_queue_head_t wait_q;
+ struct workqueue_struct *wq;
+ struct work_struct open_work;
+ struct work_struct close_work;
+ struct work_struct read_work;
+ struct diagfwd_info *fwd_ctxt;
+};
+
+extern struct diag_glink_info glink_data[NUM_PERIPHERALS];
+extern struct diag_glink_info glink_cntl[NUM_PERIPHERALS];
+extern struct diag_glink_info glink_cmd[NUM_PERIPHERALS];
+extern struct diag_glink_info glink_dci_cmd[NUM_PERIPHERALS];
+extern struct diag_glink_info glink_dci[NUM_PERIPHERALS];
+
+int diag_glink_init_peripheral(uint8_t peripheral);
+void diag_glink_exit(void);
+int diag_glink_init(void);
+void diag_glink_early_exit(void);
+void diag_glink_invalidate(void *ctxt, struct diagfwd_info *fwd_ctxt);
+int diag_glink_check_state(void *ctxt);
+
+#endif
diff --git a/drivers/char/diag/diagfwd_hsic.c b/drivers/char/diag/diagfwd_hsic.c
new file mode 100644
index 0000000..81afcae
--- /dev/null
+++ b/drivers/char/diag/diagfwd_hsic.c
@@ -0,0 +1,451 @@
+/* Copyright (c) 2012-2014, 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/uaccess.h>
+#include <linux/diagchar.h>
+#include <linux/sched.h>
+#include <linux/err.h>
+#include <linux/ratelimit.h>
+#include <linux/workqueue.h>
+#include <linux/pm_runtime.h>
+#include <linux/platform_device.h>
+#include <asm/current.h>
+#include "diagmem.h"
+#include "diagfwd_bridge.h"
+#include "diagfwd_hsic.h"
+
+#define DIAG_HSIC_STRING_SZ 11
+
+struct diag_hsic_info diag_hsic[NUM_HSIC_DEV] = {
+ {
+ .id = HSIC_1,
+ .dev_id = DIAGFWD_MDM,
+ .name = "MDM",
+ .mempool = POOL_TYPE_MDM,
+ .opened = 0,
+ .enabled = 0,
+ .suspended = 0,
+ .hsic_wq = NULL
+ },
+ {
+ .id = HSIC_2,
+ .dev_id = DIAGFWD_MDM_DCI,
+ .name = "MDM_DCI",
+ .mempool = POOL_TYPE_MDM_DCI,
+ .opened = 0,
+ .enabled = 0,
+ .suspended = 0,
+ .hsic_wq = NULL
+ }
+};
+
+static void diag_hsic_read_complete(void *ctxt, char *buf, int len,
+ int actual_size)
+{
+ int err = 0;
+ int index = (int)(uintptr_t)ctxt;
+ struct diag_hsic_info *ch = NULL;
+
+ if (index < 0 || index >= NUM_HSIC_DEV) {
+ pr_err_ratelimited("diag: In %s, invalid HSIC index %d\n",
+ __func__, index);
+ return;
+ }
+ ch = &diag_hsic[index];
+
+ /*
+ * Don't pass on the buffer if the channel is closed when a pending read
+ * completes. Also, actual size can be negative error codes - do not
+ * pass on the buffer.
+ */
+ if (!ch->opened || actual_size <= 0)
+ goto fail;
+ err = diag_remote_dev_read_done(ch->dev_id, buf, actual_size);
+ if (err)
+ goto fail;
+ return;
+
+fail:
+ diagmem_free(driver, buf, ch->mempool);
+ queue_work(ch->hsic_wq, &ch->read_work);
+}
+
+static void diag_hsic_write_complete(void *ctxt, char *buf, int len,
+ int actual_size)
+{
+ int index = (int)(uintptr_t)ctxt;
+ struct diag_hsic_info *ch = NULL;
+
+ if (index < 0 || index >= NUM_HSIC_DEV) {
+ pr_err_ratelimited("diag: In %s, invalid HSIC index %d\n",
+ __func__, index);
+ return;
+ }
+
+ ch = &diag_hsic[index];
+ diag_remote_dev_write_done(ch->dev_id, buf, actual_size, ch->id);
+}
+
+static int diag_hsic_suspend(void *ctxt)
+{
+ int index = (int)(uintptr_t)ctxt;
+ unsigned long flags;
+ struct diag_hsic_info *ch = NULL;
+
+ if (index < 0 || index >= NUM_HSIC_DEV) {
+ pr_err_ratelimited("diag: In %s, invalid HSIC index %d\n",
+ __func__, index);
+ return -EINVAL;
+ }
+
+ ch = &diag_hsic[index];
+ spin_lock_irqsave(&ch->lock, flags);
+ ch->suspended = 1;
+ spin_unlock_irqrestore(&ch->lock, flags);
+ return 0;
+}
+
+static void diag_hsic_resume(void *ctxt)
+{
+ int index = (int)(uintptr_t)ctxt;
+ unsigned long flags;
+ struct diag_hsic_info *ch = NULL;
+
+ if (index < 0 || index >= NUM_HSIC_DEV) {
+ pr_err_ratelimited("diag: In %s, invalid HSIC index %d\n",
+ __func__, index);
+ return;
+ }
+ ch = &diag_hsic[index];
+ spin_lock_irqsave(&ch->lock, flags);
+ ch->suspended = 0;
+ spin_unlock_irqrestore(&ch->lock, flags);
+ queue_work(ch->hsic_wq, &(ch->read_work));
+}
+
+static struct diag_bridge_ops diag_hsic_ops[NUM_HSIC_DEV] = {
+ {
+ .ctxt = (void *)HSIC_1,
+ .read_complete_cb = diag_hsic_read_complete,
+ .write_complete_cb = diag_hsic_write_complete,
+ .suspend = diag_hsic_suspend,
+ .resume = diag_hsic_resume,
+ },
+ {
+ .ctxt = (void *)HSIC_2,
+ .read_complete_cb = diag_hsic_read_complete,
+ .write_complete_cb = diag_hsic_write_complete,
+ .suspend = diag_hsic_suspend,
+ .resume = diag_hsic_resume,
+ }
+};
+
+static int hsic_open(int id)
+{
+ int err = 0;
+ unsigned long flags;
+ struct diag_hsic_info *ch = NULL;
+
+ if (id < 0 || id >= NUM_HSIC_DEV) {
+ pr_err("diag: Invalid index %d in %s\n", id, __func__);
+ return -EINVAL;
+ }
+
+ ch = &diag_hsic[id];
+ if (!ch->enabled)
+ return -ENODEV;
+
+ if (ch->opened) {
+ pr_debug("diag: HSIC channel %d is already opened\n", ch->id);
+ return -ENODEV;
+ }
+
+ err = diag_bridge_open(ch->id, &diag_hsic_ops[ch->id]);
+ if (err) {
+ pr_err("diag: Unable to open HSIC channel %d, err: %d",
+ ch->id, err);
+ return err;
+ }
+ spin_lock_irqsave(&ch->lock, flags);
+ ch->opened = 1;
+ spin_unlock_irqrestore(&ch->lock, flags);
+ diagmem_init(driver, ch->mempool);
+ /* Notify the bridge that the channel is open */
+ diag_remote_dev_open(ch->dev_id);
+ queue_work(ch->hsic_wq, &(ch->read_work));
+ return 0;
+}
+
+static void hsic_open_work_fn(struct work_struct *work)
+{
+ struct diag_hsic_info *ch = container_of(work, struct diag_hsic_info,
+ open_work);
+ if (ch)
+ hsic_open(ch->id);
+}
+
+static int hsic_close(int id)
+{
+ unsigned long flags;
+ struct diag_hsic_info *ch = NULL;
+
+ if (id < 0 || id >= NUM_HSIC_DEV) {
+ pr_err("diag: Invalid index %d in %s\n", id, __func__);
+ return -EINVAL;
+ }
+
+ ch = &diag_hsic[id];
+ if (!ch->enabled)
+ return -ENODEV;
+
+ if (!ch->opened) {
+ pr_debug("diag: HSIC channel %d is already closed\n", ch->id);
+ return -ENODEV;
+ }
+
+ spin_lock_irqsave(&ch->lock, flags);
+ ch->opened = 0;
+ spin_unlock_irqrestore(&ch->lock, flags);
+ diag_bridge_close(ch->id);
+ diagmem_exit(driver, ch->mempool);
+ diag_remote_dev_close(ch->dev_id);
+ return 0;
+}
+
+static void hsic_close_work_fn(struct work_struct *work)
+{
+ struct diag_hsic_info *ch = container_of(work, struct diag_hsic_info,
+ close_work);
+ if (ch)
+ hsic_close(ch->id);
+}
+
+static void hsic_read_work_fn(struct work_struct *work)
+{
+ int err = 0;
+ unsigned char *buf = NULL;
+ struct diag_hsic_info *ch = container_of(work, struct diag_hsic_info,
+ read_work);
+ if (!ch || !ch->enabled || !ch->opened)
+ return;
+
+ do {
+ buf = diagmem_alloc(driver, DIAG_MDM_BUF_SIZE, ch->mempool);
+ if (!buf) {
+ err = -ENOMEM;
+ break;
+ }
+
+ err = diag_bridge_read(ch->id, buf, DIAG_MDM_BUF_SIZE);
+ if (err) {
+ diagmem_free(driver, buf, ch->mempool);
+ pr_err_ratelimited("diag: Unable to read from HSIC channel %d, err: %d\n",
+ ch->id, err);
+ break;
+ }
+ } while (buf);
+
+ /* Read from the HSIC channel continuously if the channel is present */
+ if (!err)
+ queue_work(ch->hsic_wq, &ch->read_work);
+}
+
+static int diag_hsic_probe(struct platform_device *pdev)
+{
+ unsigned long flags;
+ struct diag_hsic_info *ch = NULL;
+
+ if (!pdev)
+ return -EIO;
+
+ pr_debug("diag: hsic probe pdev: %d\n", pdev->id);
+ if (pdev->id >= NUM_HSIC_DEV) {
+ pr_err("diag: No support for HSIC device %d\n", pdev->id);
+ return -EIO;
+ }
+
+ ch = &diag_hsic[pdev->id];
+ if (!ch->enabled) {
+ spin_lock_irqsave(&ch->lock, flags);
+ ch->enabled = 1;
+ spin_unlock_irqrestore(&ch->lock, flags);
+ }
+ queue_work(ch->hsic_wq, &(ch->open_work));
+ return 0;
+}
+
+static int diag_hsic_remove(struct platform_device *pdev)
+{
+ struct diag_hsic_info *ch = NULL;
+
+ if (!pdev)
+ return -EIO;
+
+ pr_debug("diag: hsic close pdev: %d\n", pdev->id);
+ if (pdev->id >= NUM_HSIC_DEV) {
+ pr_err("diag: No support for HSIC device %d\n", pdev->id);
+ return -EIO;
+ }
+
+ ch = &diag_hsic[pdev->id];
+ queue_work(ch->hsic_wq, &(ch->close_work));
+ return 0;
+}
+
+static int diagfwd_hsic_runtime_suspend(struct device *dev)
+{
+ dev_dbg(dev, "pm_runtime: suspending...\n");
+ return 0;
+}
+
+static int diagfwd_hsic_runtime_resume(struct device *dev)
+{
+ dev_dbg(dev, "pm_runtime: resuming...\n");
+ return 0;
+}
+
+static const struct dev_pm_ops diagfwd_hsic_dev_pm_ops = {
+ .runtime_suspend = diagfwd_hsic_runtime_suspend,
+ .runtime_resume = diagfwd_hsic_runtime_resume,
+};
+
+static struct platform_driver msm_hsic_ch_driver = {
+ .probe = diag_hsic_probe,
+ .remove = diag_hsic_remove,
+ .driver = {
+ .name = "diag_bridge",
+ .owner = THIS_MODULE,
+ .pm = &diagfwd_hsic_dev_pm_ops,
+ },
+};
+
+static int hsic_queue_read(int id)
+{
+ if (id < 0 || id >= NUM_HSIC_DEV) {
+ pr_err_ratelimited("diag: In %s, invalid index %d\n",
+ __func__, id);
+ return -EINVAL;
+ }
+ queue_work(diag_hsic[id].hsic_wq, &(diag_hsic[id].read_work));
+ return 0;
+}
+
+static int hsic_write(int id, unsigned char *buf, int len, int ctxt)
+{
+ int err = 0;
+ struct diag_hsic_info *ch = NULL;
+
+ if (id < 0 || id >= NUM_HSIC_DEV) {
+ pr_err_ratelimited("diag: In %s, invalid index %d\n",
+ __func__, id);
+ return -EINVAL;
+ }
+ if (!buf || len <= 0) {
+ pr_err_ratelimited("diag: In %s, ch %d, invalid buf %pK len %d\n",
+ __func__, id, buf, len);
+ return -EINVAL;
+ }
+
+ ch = &diag_hsic[id];
+ if (!ch->opened || !ch->enabled) {
+ pr_debug_ratelimited("diag: In %s, ch %d is disabled. opened %d enabled: %d\n",
+ __func__, id, ch->opened, ch->enabled);
+ return -EIO;
+ }
+
+ err = diag_bridge_write(ch->id, buf, len);
+ if (err) {
+ pr_err_ratelimited("diag: cannot write to HSIC ch %d, err: %d\n",
+ ch->id, err);
+ }
+ return err;
+}
+
+static int hsic_fwd_complete(int id, unsigned char *buf, int len, int ctxt)
+{
+ if (id < 0 || id >= NUM_HSIC_DEV) {
+ pr_err_ratelimited("diag: In %s, invalid index %d\n",
+ __func__, id);
+ return -EINVAL;
+ }
+ if (!buf)
+ return -EIO;
+ diagmem_free(driver, buf, diag_hsic[id].mempool);
+ queue_work(diag_hsic[id].hsic_wq, &(diag_hsic[id].read_work));
+ return 0;
+}
+
+static struct diag_remote_dev_ops diag_hsic_fwd_ops = {
+ .open = hsic_open,
+ .close = hsic_close,
+ .queue_read = hsic_queue_read,
+ .write = hsic_write,
+ .fwd_complete = hsic_fwd_complete,
+};
+
+int diag_hsic_init(void)
+{
+ int i;
+ int err = 0;
+ struct diag_hsic_info *ch = NULL;
+ char wq_name[DIAG_HSIC_NAME_SZ + DIAG_HSIC_STRING_SZ];
+
+ for (i = 0; i < NUM_HSIC_DEV; i++) {
+ ch = &diag_hsic[i];
+ spin_lock_init(&ch->lock);
+ INIT_WORK(&(ch->read_work), hsic_read_work_fn);
+ INIT_WORK(&(ch->open_work), hsic_open_work_fn);
+ INIT_WORK(&(ch->close_work), hsic_close_work_fn);
+ strlcpy(wq_name, "DIAG_HSIC_", DIAG_HSIC_STRING_SZ);
+ strlcat(wq_name, ch->name, sizeof(ch->name));
+ ch->hsic_wq = create_singlethread_workqueue(wq_name);
+ if (!ch->hsic_wq)
+ goto fail;
+ err = diagfwd_bridge_register(ch->dev_id, ch->id,
+ &diag_hsic_fwd_ops);
+ if (err) {
+ pr_err("diag: Unable to register HSIC channel %d with bridge, err: %d\n",
+ i, err);
+ goto fail;
+ }
+ }
+
+ err = platform_driver_register(&msm_hsic_ch_driver);
+ if (err) {
+ pr_err("diag: could not register HSIC device, err: %d\n", err);
+ goto fail;
+ }
+
+ return 0;
+fail:
+ diag_hsic_exit();
+ return -ENOMEM;
+}
+
+void diag_hsic_exit(void)
+{
+ int i;
+ struct diag_hsic_info *ch = NULL;
+
+ for (i = 0; i < NUM_HSIC_DEV; i++) {
+ ch = &diag_hsic[i];
+ ch->enabled = 0;
+ ch->opened = 0;
+ ch->suspended = 0;
+ if (ch->hsic_wq)
+ destroy_workqueue(ch->hsic_wq);
+ }
+ platform_driver_unregister(&msm_hsic_ch_driver);
+}
+
diff --git a/drivers/char/diag/diagfwd_hsic.h b/drivers/char/diag/diagfwd_hsic.h
new file mode 100644
index 0000000..c4d87a2
--- /dev/null
+++ b/drivers/char/diag/diagfwd_hsic.h
@@ -0,0 +1,47 @@
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAGFWD_HSIC_H
+#define DIAGFWD_HSIC_H
+#ifdef CONFIG_DIAG_OVER_USB
+#include <linux/usb/usbdiag.h>
+#endif
+#include <linux/usb/diag_bridge.h>
+
+#define HSIC_1 0
+#define HSIC_2 1
+#define NUM_HSIC_DEV 2
+
+#define DIAG_HSIC_NAME_SZ 24
+
+struct diag_hsic_info {
+ int id;
+ int dev_id;
+ int mempool;
+ uint8_t opened;
+ uint8_t enabled;
+ uint8_t suspended;
+ char name[DIAG_HSIC_NAME_SZ];
+ struct work_struct read_work;
+ struct work_struct open_work;
+ struct work_struct close_work;
+ struct workqueue_struct *hsic_wq;
+ spinlock_t lock;
+};
+
+extern struct diag_hsic_info diag_hsic[NUM_HSIC_DEV];
+
+int diag_hsic_init(void);
+void diag_hsic_exit(void);
+
+#endif
+
diff --git a/drivers/char/diag/diagfwd_mhi.c b/drivers/char/diag/diagfwd_mhi.c
new file mode 100644
index 0000000..f27f358
--- /dev/null
+++ b/drivers/char/diag/diagfwd_mhi.c
@@ -0,0 +1,733 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/uaccess.h>
+#include <linux/diagchar.h>
+#include <linux/sched.h>
+#include <linux/err.h>
+#include <linux/ratelimit.h>
+#include <linux/workqueue.h>
+#include <linux/pm_runtime.h>
+#include <linux/platform_device.h>
+#include <linux/msm_mhi.h>
+#include <linux/delay.h>
+#include <linux/vmalloc.h>
+#include <asm/current.h>
+#include <linux/atomic.h>
+#include "diagmem.h"
+#include "diagfwd_bridge.h"
+#include "diagfwd_mhi.h"
+#include "diag_ipc_logging.h"
+
+#define SET_CH_CTXT(index, type) (((index & 0xFF) << 8) | (type & 0xFF))
+#define GET_INFO_INDEX(val) ((val & 0xFF00) >> 8)
+#define GET_CH_TYPE(val) ((val & 0x00FF))
+
+#define CHANNELS_OPENED 0
+#define OPEN_CHANNELS 1
+#define CHANNELS_CLOSED 0
+#define CLOSE_CHANNELS 1
+
+#define DIAG_MHI_STRING_SZ 11
+
+struct diag_mhi_info diag_mhi[NUM_MHI_DEV] = {
+ {
+ .id = MHI_1,
+ .dev_id = DIAGFWD_MDM,
+ .name = "MDM",
+ .enabled = 0,
+ .num_read = 0,
+ .mempool = POOL_TYPE_MDM,
+ .mempool_init = 0,
+ .mhi_wq = NULL,
+ .read_ch = {
+ .chan = MHI_CLIENT_DIAG_IN,
+ .type = TYPE_MHI_READ_CH,
+ .hdl = NULL,
+ },
+ .write_ch = {
+ .chan = MHI_CLIENT_DIAG_OUT,
+ .type = TYPE_MHI_WRITE_CH,
+ .hdl = NULL,
+ }
+ },
+ {
+ .id = MHI_DCI_1,
+ .dev_id = DIAGFWD_MDM_DCI,
+ .name = "MDM_DCI",
+ .enabled = 0,
+ .num_read = 0,
+ .mempool = POOL_TYPE_MDM_DCI,
+ .mempool_init = 0,
+ .mhi_wq = NULL,
+ .read_ch = {
+ .chan = MHI_CLIENT_DCI_IN,
+ .type = TYPE_MHI_READ_CH,
+ .hdl = NULL,
+ },
+ .write_ch = {
+ .chan = MHI_CLIENT_DCI_OUT,
+ .type = TYPE_MHI_WRITE_CH,
+ .hdl = NULL,
+ }
+ }
+};
+
+static int mhi_ch_open(struct diag_mhi_ch_t *ch)
+{
+ int err = 0;
+
+ if (!ch)
+ return -EINVAL;
+
+ if (atomic_read(&ch->opened)) {
+ pr_debug("diag: In %s, channel is already opened, id: %d\n",
+ __func__, ch->type);
+ return 0;
+ }
+ err = mhi_open_channel(ch->hdl);
+ if (err) {
+ pr_err("diag: In %s, unable to open ch, type: %d, err: %d\n",
+ __func__, ch->type, err);
+ return err;
+ }
+
+ atomic_set(&ch->opened, 1);
+ INIT_LIST_HEAD(&ch->buf_tbl);
+ return 0;
+}
+
+static int mhi_buf_tbl_add(struct diag_mhi_info *mhi_info, int type,
+ void *buf, int len)
+{
+ unsigned long flags;
+ struct diag_mhi_buf_tbl_t *item;
+ struct diag_mhi_ch_t *ch = NULL;
+
+ if (!mhi_info || !buf || len < 0)
+ return -EINVAL;
+
+ switch (type) {
+ case TYPE_MHI_READ_CH:
+ ch = &mhi_info->read_ch;
+ break;
+ case TYPE_MHI_WRITE_CH:
+ ch = &mhi_info->write_ch;
+ break;
+ default:
+ pr_err_ratelimited("diag: In %s, invalid type: %d\n",
+ __func__, type);
+ return -EINVAL;
+ }
+
+ item = kzalloc(sizeof(struct diag_mhi_buf_tbl_t), GFP_KERNEL);
+ if (!item)
+ return -ENOMEM;
+ kmemleak_not_leak(item);
+
+ spin_lock_irqsave(&ch->lock, flags);
+ item->buf = buf;
+ item->len = len;
+ list_add_tail(&item->link, &ch->buf_tbl);
+ spin_unlock_irqrestore(&ch->lock, flags);
+
+ return 0;
+}
+
+static void mhi_buf_tbl_remove(struct diag_mhi_info *mhi_info, int type,
+ void *buf, int len)
+{
+ int found = 0;
+ unsigned long flags;
+ struct list_head *start, *temp;
+ struct diag_mhi_buf_tbl_t *item = NULL;
+ struct diag_mhi_ch_t *ch = NULL;
+
+ if (!mhi_info || !buf || len < 0)
+ return;
+
+ switch (type) {
+ case TYPE_MHI_READ_CH:
+ ch = &mhi_info->read_ch;
+ break;
+ case TYPE_MHI_WRITE_CH:
+ ch = &mhi_info->write_ch;
+ break;
+ default:
+ pr_err_ratelimited("diag: In %s, invalid type: %d\n",
+ __func__, type);
+ return;
+ }
+
+ spin_lock_irqsave(&ch->lock, flags);
+ list_for_each_safe(start, temp, &ch->buf_tbl) {
+ item = list_entry(start, struct diag_mhi_buf_tbl_t, link);
+ if (item->buf != buf)
+ continue;
+ list_del(&item->link);
+ if (type == TYPE_MHI_READ_CH)
+ diagmem_free(driver, item->buf, mhi_info->mempool);
+ kfree(item);
+ found = 1;
+ }
+ spin_unlock_irqrestore(&ch->lock, flags);
+
+ if (!found) {
+ pr_err_ratelimited("diag: In %s, unable to find buffer, ch: %pK, type: %d, buf: %pK\n",
+ __func__, ch, ch->type, buf);
+ }
+}
+
+static void mhi_buf_tbl_clear(struct diag_mhi_info *mhi_info)
+{
+ unsigned long flags;
+ struct list_head *start, *temp;
+ struct diag_mhi_buf_tbl_t *item = NULL;
+ struct diag_mhi_ch_t *ch = NULL;
+
+ if (!mhi_info || !mhi_info->enabled)
+ return;
+
+ /* Clear all the pending reads */
+ ch = &mhi_info->read_ch;
+ /* At this point, the channel should already by closed */
+ if (!(atomic_read(&ch->opened))) {
+ spin_lock_irqsave(&ch->lock, flags);
+ list_for_each_safe(start, temp, &ch->buf_tbl) {
+ item = list_entry(start, struct diag_mhi_buf_tbl_t,
+ link);
+ list_del(&item->link);
+ diagmem_free(driver, item->buf, mhi_info->mempool);
+ kfree(item);
+
+ }
+ spin_unlock_irqrestore(&ch->lock, flags);
+ }
+
+ /* Clear all the pending writes */
+ ch = &mhi_info->write_ch;
+ /* At this point, the channel should already by closed */
+ if (!(atomic_read(&ch->opened))) {
+ spin_lock_irqsave(&ch->lock, flags);
+ list_for_each_safe(start, temp, &ch->buf_tbl) {
+ item = list_entry(start, struct diag_mhi_buf_tbl_t,
+ link);
+ list_del(&item->link);
+ diag_remote_dev_write_done(mhi_info->dev_id, item->buf,
+ item->len, mhi_info->id);
+ kfree(item);
+
+ }
+ spin_unlock_irqrestore(&ch->lock, flags);
+ }
+}
+
+static int __mhi_close(struct diag_mhi_info *mhi_info, int close_flag)
+{
+ if (!mhi_info)
+ return -EIO;
+
+ if (!mhi_info->enabled)
+ return -ENODEV;
+
+ if (close_flag == CLOSE_CHANNELS) {
+ atomic_set(&(mhi_info->read_ch.opened), 0);
+ atomic_set(&(mhi_info->write_ch.opened), 0);
+ }
+
+ if (!(atomic_read(&(mhi_info->read_ch.opened)))) {
+ flush_workqueue(mhi_info->mhi_wq);
+ mhi_close_channel(mhi_info->read_ch.hdl);
+ }
+
+ if (!(atomic_read(&(mhi_info->write_ch.opened)))) {
+ flush_workqueue(mhi_info->mhi_wq);
+ mhi_close_channel(mhi_info->write_ch.hdl);
+ }
+
+ mhi_buf_tbl_clear(mhi_info);
+ diag_remote_dev_close(mhi_info->dev_id);
+ return 0;
+}
+
+static int mhi_close(int id)
+{
+ if (id < 0 || id >= NUM_MHI_DEV) {
+ pr_err("diag: In %s, invalid index %d\n", __func__, id);
+ return -EINVAL;
+ }
+
+ if (!diag_mhi[id].enabled)
+ return -ENODEV;
+ /*
+ * This function is called whenever the channel needs to be closed
+ * explicitly by Diag. Close both the read and write channels (denoted
+ * by CLOSE_CHANNELS flag)
+ */
+ return __mhi_close(&diag_mhi[id], CLOSE_CHANNELS);
+}
+
+static void mhi_close_work_fn(struct work_struct *work)
+{
+ struct diag_mhi_info *mhi_info = container_of(work,
+ struct diag_mhi_info,
+ close_work);
+ /*
+ * This is a part of work function which is queued after the channels
+ * are explicitly closed. Do not close channels again (denoted by
+ * CHANNELS_CLOSED flag)
+ */
+ if (mhi_info)
+ __mhi_close(mhi_info, CHANNELS_CLOSED);
+}
+
+static int __mhi_open(struct diag_mhi_info *mhi_info, int open_flag)
+{
+ int err = 0;
+ unsigned long flags;
+
+ if (!mhi_info)
+ return -EIO;
+
+ if (open_flag == OPEN_CHANNELS) {
+ if (!atomic_read(&mhi_info->read_ch.opened)) {
+ err = mhi_ch_open(&mhi_info->read_ch);
+ if (err)
+ goto fail;
+ DIAG_LOG(DIAG_DEBUG_BRIDGE,
+ "opened mhi read channel, port: %d\n",
+ mhi_info->id);
+ }
+ if (!atomic_read(&mhi_info->write_ch.opened)) {
+ err = mhi_ch_open(&mhi_info->write_ch);
+ if (err)
+ goto fail;
+ DIAG_LOG(DIAG_DEBUG_BRIDGE,
+ "opened mhi write channel, port: %d\n",
+ mhi_info->id);
+ }
+ } else if (open_flag == CHANNELS_OPENED) {
+ if (!atomic_read(&(mhi_info->read_ch.opened)) ||
+ !atomic_read(&(mhi_info->write_ch.opened))) {
+ return -ENODEV;
+ }
+ }
+
+ spin_lock_irqsave(&mhi_info->lock, flags);
+ mhi_info->enabled = 1;
+ spin_unlock_irqrestore(&mhi_info->lock, flags);
+ diag_remote_dev_open(mhi_info->dev_id);
+ queue_work(mhi_info->mhi_wq, &(mhi_info->read_work));
+ return 0;
+
+fail:
+ pr_err("diag: Failed to open mhi channlels, err: %d\n", err);
+ mhi_close(mhi_info->id);
+ return err;
+}
+
+static int mhi_open(int id)
+{
+ if (id < 0 || id >= NUM_MHI_DEV) {
+ pr_err("diag: In %s, invalid index %d\n", __func__, id);
+ return -EINVAL;
+ }
+
+ if (!diag_mhi[id].enabled)
+ return -ENODEV;
+ /*
+ * This function is called whenever the channel needs to be opened
+ * explicitly by Diag. Open both the read and write channels (denoted by
+ * OPEN_CHANNELS flag)
+ */
+ __mhi_open(&diag_mhi[id], OPEN_CHANNELS);
+ diag_remote_dev_open(diag_mhi[id].dev_id);
+ queue_work(diag_mhi[id].mhi_wq, &(diag_mhi[id].read_work));
+
+ return 0;
+}
+
+static void mhi_open_work_fn(struct work_struct *work)
+{
+ struct diag_mhi_info *mhi_info = container_of(work,
+ struct diag_mhi_info,
+ open_work);
+ /*
+ * This is a part of work function which is queued after the channels
+ * are explicitly opened. Do not open channels again (denoted by
+ * CHANNELS_OPENED flag)
+ */
+ if (mhi_info) {
+ diag_remote_dev_open(mhi_info->dev_id);
+ queue_work(mhi_info->mhi_wq, &(mhi_info->read_work));
+ }
+}
+
+static void mhi_read_done_work_fn(struct work_struct *work)
+{
+ unsigned char *buf = NULL;
+ struct mhi_result result;
+ int err = 0;
+ struct diag_mhi_info *mhi_info = container_of(work,
+ struct diag_mhi_info,
+ read_done_work);
+ if (!mhi_info)
+ return;
+
+ do {
+ if (!(atomic_read(&(mhi_info->read_ch.opened))))
+ break;
+ err = mhi_poll_inbound(mhi_info->read_ch.hdl, &result);
+ if (err) {
+ pr_debug("diag: In %s, err %d\n", __func__, err);
+ break;
+ }
+ buf = result.buf_addr;
+ if (!buf)
+ break;
+ DIAG_LOG(DIAG_DEBUG_BRIDGE,
+ "read from mhi port %d buf %pK\n",
+ mhi_info->id, buf);
+ /*
+ * The read buffers can come after the MHI channels are closed.
+ * If the channels are closed at the time of read, discard the
+ * buffers here and do not forward them to the mux layer.
+ */
+ if ((atomic_read(&(mhi_info->read_ch.opened)))) {
+ err = diag_remote_dev_read_done(mhi_info->dev_id, buf,
+ result.bytes_xferd);
+ if (err)
+ mhi_buf_tbl_remove(mhi_info, TYPE_MHI_READ_CH,
+ buf, result.bytes_xferd);
+ } else {
+ mhi_buf_tbl_remove(mhi_info, TYPE_MHI_READ_CH, buf,
+ result.bytes_xferd);
+ }
+ } while (buf);
+}
+
+static void mhi_read_work_fn(struct work_struct *work)
+{
+ int err = 0;
+ unsigned char *buf = NULL;
+ enum MHI_FLAGS mhi_flags = MHI_EOT;
+ struct diag_mhi_ch_t *read_ch = NULL;
+ unsigned long flags;
+ struct diag_mhi_info *mhi_info = container_of(work,
+ struct diag_mhi_info,
+ read_work);
+ if (!mhi_info)
+ return;
+
+ read_ch = &mhi_info->read_ch;
+ do {
+ if (!(atomic_read(&(read_ch->opened))))
+ break;
+
+ buf = diagmem_alloc(driver, DIAG_MDM_BUF_SIZE,
+ mhi_info->mempool);
+ if (!buf)
+ break;
+
+ err = mhi_buf_tbl_add(mhi_info, TYPE_MHI_READ_CH, buf,
+ DIAG_MDM_BUF_SIZE);
+ if (err)
+ goto fail;
+
+ DIAG_LOG(DIAG_DEBUG_BRIDGE,
+ "queueing a read buf %pK, ch: %s\n",
+ buf, mhi_info->name);
+ spin_lock_irqsave(&read_ch->lock, flags);
+ err = mhi_queue_xfer(read_ch->hdl, buf, DIAG_MDM_BUF_SIZE,
+ mhi_flags);
+ spin_unlock_irqrestore(&read_ch->lock, flags);
+ if (err) {
+ pr_err_ratelimited("diag: Unable to read from MHI channel %s, err: %d\n",
+ mhi_info->name, err);
+ goto fail;
+ }
+ } while (buf);
+
+ return;
+fail:
+ mhi_buf_tbl_remove(mhi_info, TYPE_MHI_READ_CH, buf, DIAG_MDM_BUF_SIZE);
+ queue_work(mhi_info->mhi_wq, &mhi_info->read_work);
+}
+
+static int mhi_queue_read(int id)
+{
+ if (id < 0 || id >= NUM_MHI_DEV) {
+ pr_err_ratelimited("diag: In %s, invalid index %d\n", __func__,
+ id);
+ return -EINVAL;
+ }
+ queue_work(diag_mhi[id].mhi_wq, &(diag_mhi[id].read_work));
+ return 0;
+}
+
+static int mhi_write(int id, unsigned char *buf, int len, int ctxt)
+{
+ int err = 0;
+ enum MHI_FLAGS mhi_flags = MHI_EOT;
+ unsigned long flags;
+ struct diag_mhi_ch_t *ch = NULL;
+
+ if (id < 0 || id >= NUM_MHI_DEV) {
+ pr_err_ratelimited("diag: In %s, invalid index %d\n", __func__,
+ id);
+ return -EINVAL;
+ }
+
+ if (!buf || len <= 0) {
+ pr_err("diag: In %s, ch %d, invalid buf %pK len %d\n",
+ __func__, id, buf, len);
+ return -EINVAL;
+ }
+
+ if (!diag_mhi[id].enabled) {
+ pr_err_ratelimited("diag: In %s, MHI channel %s is not enabled\n",
+ __func__, diag_mhi[id].name);
+ return -EIO;
+ }
+
+ ch = &diag_mhi[id].write_ch;
+ if (!(atomic_read(&(ch->opened)))) {
+ pr_err_ratelimited("diag: In %s, MHI write channel %s is not open\n",
+ __func__, diag_mhi[id].name);
+ return -EIO;
+ }
+
+ err = mhi_buf_tbl_add(&diag_mhi[id], TYPE_MHI_WRITE_CH, buf,
+ len);
+ if (err)
+ goto fail;
+
+ spin_lock_irqsave(&ch->lock, flags);
+ err = mhi_queue_xfer(ch->hdl, buf, len, mhi_flags);
+ spin_unlock_irqrestore(&ch->lock, flags);
+ if (err) {
+ pr_err_ratelimited("diag: In %s, cannot write to MHI channel %pK, len %d, err: %d\n",
+ __func__, diag_mhi[id].name, len, err);
+ mhi_buf_tbl_remove(&diag_mhi[id], TYPE_MHI_WRITE_CH, buf, len);
+ goto fail;
+ }
+
+ return 0;
+fail:
+ return err;
+}
+
+static int mhi_fwd_complete(int id, unsigned char *buf, int len, int ctxt)
+{
+ if (id < 0 || id >= NUM_MHI_DEV) {
+ pr_err_ratelimited("diag: In %s, invalid index %d\n", __func__,
+ id);
+ return -EINVAL;
+ }
+
+ if (!buf)
+ return -EINVAL;
+
+ mhi_buf_tbl_remove(&diag_mhi[id], TYPE_MHI_READ_CH, buf, len);
+ queue_work(diag_mhi[id].mhi_wq, &(diag_mhi[id].read_work));
+ return 0;
+}
+
+static void mhi_notifier(struct mhi_cb_info *cb_info)
+{
+ int index;
+ int type;
+ int err = 0;
+ struct mhi_result *result = NULL;
+ struct diag_mhi_ch_t *ch = NULL;
+ void *buf = NULL;
+
+ if (!cb_info)
+ return;
+
+ result = cb_info->result;
+ if (!result) {
+ pr_err_ratelimited("diag: failed to obtain mhi result from callback\n");
+ return;
+ }
+
+ index = GET_INFO_INDEX((uintptr_t)cb_info->result->user_data);
+ if (index < 0 || index >= NUM_MHI_DEV) {
+ pr_err_ratelimited("diag: In %s, invalid MHI index %d\n",
+ __func__, index);
+ return;
+ }
+
+ type = GET_CH_TYPE((uintptr_t)cb_info->result->user_data);
+ switch (type) {
+ case TYPE_MHI_READ_CH:
+ ch = &diag_mhi[index].read_ch;
+ break;
+ case TYPE_MHI_WRITE_CH:
+ ch = &diag_mhi[index].write_ch;
+ break;
+ default:
+ pr_err_ratelimited("diag: In %s, invalid channel type %d\n",
+ __func__, type);
+ return;
+ }
+
+ switch (cb_info->cb_reason) {
+ case MHI_CB_MHI_ENABLED:
+ DIAG_LOG(DIAG_DEBUG_BRIDGE,
+ "received mhi enabled notifiation port: %d ch: %d\n",
+ index, ch->type);
+ err = mhi_ch_open(ch);
+ if (err)
+ break;
+ if (ch->type == TYPE_MHI_READ_CH) {
+ diag_mhi[index].num_read = mhi_get_free_desc(ch->hdl);
+ if (diag_mhi[index].num_read <= 0) {
+ pr_err("diag: In %s, invalid number of descriptors %d\n",
+ __func__, diag_mhi[index].num_read);
+ break;
+ }
+ }
+ __mhi_open(&diag_mhi[index], CHANNELS_OPENED);
+ queue_work(diag_mhi[index].mhi_wq,
+ &(diag_mhi[index].open_work));
+ break;
+ case MHI_CB_MHI_DISABLED:
+ DIAG_LOG(DIAG_DEBUG_BRIDGE,
+ "received mhi disabled notifiation port: %d ch: %d\n",
+ index, ch->type);
+ atomic_set(&(ch->opened), 0);
+ __mhi_close(&diag_mhi[index], CHANNELS_CLOSED);
+ break;
+ case MHI_CB_XFER:
+ /*
+ * If the channel is a read channel, this is a read
+ * complete notification - write complete if the channel is
+ * a write channel.
+ */
+ if (type == TYPE_MHI_READ_CH) {
+ if (!atomic_read(&(diag_mhi[index].read_ch.opened)))
+ break;
+
+ queue_work(diag_mhi[index].mhi_wq,
+ &(diag_mhi[index].read_done_work));
+ break;
+ }
+ buf = result->buf_addr;
+ if (!buf) {
+ pr_err_ratelimited("diag: In %s, unable to de-serialize the data\n",
+ __func__);
+ break;
+ }
+ mhi_buf_tbl_remove(&diag_mhi[index], TYPE_MHI_WRITE_CH, buf,
+ result->bytes_xferd);
+ diag_remote_dev_write_done(diag_mhi[index].dev_id, buf,
+ result->bytes_xferd,
+ diag_mhi[index].id);
+ break;
+ default:
+ pr_err("diag: In %s, invalid cb reason 0x%x\n", __func__,
+ cb_info->cb_reason);
+ break;
+ }
+}
+
+static struct diag_remote_dev_ops diag_mhi_fwd_ops = {
+ .open = mhi_open,
+ .close = mhi_close,
+ .queue_read = mhi_queue_read,
+ .write = mhi_write,
+ .fwd_complete = mhi_fwd_complete,
+};
+
+static int diag_mhi_register_ch(int id, struct diag_mhi_ch_t *ch)
+{
+ int ctxt = 0;
+
+ if (!ch)
+ return -EIO;
+ if (id < 0 || id >= NUM_MHI_DEV)
+ return -EINVAL;
+ spin_lock_init(&ch->lock);
+ atomic_set(&(ch->opened), 0);
+ ctxt = SET_CH_CTXT(id, ch->type);
+ ch->client_info.mhi_client_cb = mhi_notifier;
+ return mhi_register_channel(&ch->hdl, ch->chan, 0, &ch->client_info,
+ (void *)(uintptr_t)ctxt);
+}
+
+int diag_mhi_init(void)
+{
+ int i;
+ int err = 0;
+ struct diag_mhi_info *mhi_info = NULL;
+ char wq_name[DIAG_MHI_NAME_SZ + DIAG_MHI_STRING_SZ];
+
+ for (i = 0; i < NUM_MHI_DEV; i++) {
+ mhi_info = &diag_mhi[i];
+ spin_lock_init(&mhi_info->lock);
+ INIT_WORK(&(mhi_info->read_work), mhi_read_work_fn);
+ INIT_WORK(&(mhi_info->read_done_work), mhi_read_done_work_fn);
+ INIT_WORK(&(mhi_info->open_work), mhi_open_work_fn);
+ INIT_WORK(&(mhi_info->close_work), mhi_close_work_fn);
+ strlcpy(wq_name, "diag_mhi_", DIAG_MHI_STRING_SZ);
+ strlcat(wq_name, mhi_info->name, sizeof(mhi_info->name));
+ diagmem_init(driver, mhi_info->mempool);
+ mhi_info->mempool_init = 1;
+ mhi_info->mhi_wq = create_singlethread_workqueue(wq_name);
+ if (!mhi_info->mhi_wq)
+ goto fail;
+ err = diagfwd_bridge_register(mhi_info->dev_id, mhi_info->id,
+ &diag_mhi_fwd_ops);
+ if (err) {
+ pr_err("diag: Unable to register MHI channel %d with bridge, err: %d\n",
+ i, err);
+ goto fail;
+ }
+ err = diag_mhi_register_ch(mhi_info->id, &mhi_info->read_ch);
+ if (err) {
+ pr_err("diag: Unable to register MHI read channel for %d, err: %d\n",
+ i, err);
+ goto fail;
+ }
+ err = diag_mhi_register_ch(mhi_info->id, &mhi_info->write_ch);
+ if (err) {
+ pr_err("diag: Unable to register MHI write channel for %d, err: %d\n",
+ i, err);
+ goto fail;
+ }
+ DIAG_LOG(DIAG_DEBUG_BRIDGE, "mhi port %d is initailzed\n", i);
+ }
+
+ return 0;
+fail:
+ diag_mhi_exit();
+ return -ENOMEM;
+}
+
+void diag_mhi_exit(void)
+{
+ int i;
+ struct diag_mhi_info *mhi_info = NULL;
+
+ for (i = 0; i < NUM_MHI_DEV; i++) {
+ mhi_info = &diag_mhi[i];
+ if (mhi_info->mhi_wq)
+ destroy_workqueue(mhi_info->mhi_wq);
+ mhi_close(mhi_info->id);
+ if (mhi_info->mempool_init)
+ diagmem_exit(driver, mhi_info->mempool);
+ }
+}
+
diff --git a/drivers/char/diag/diagfwd_mhi.h b/drivers/char/diag/diagfwd_mhi.h
new file mode 100644
index 0000000..a446697
--- /dev/null
+++ b/drivers/char/diag/diagfwd_mhi.h
@@ -0,0 +1,88 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAGFWD_MHI_H
+#define DIAGFWD_MHI_H
+
+#include "diagchar.h"
+#include <linux/msm_mhi.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/completion.h>
+#include <linux/fs.h>
+#include <linux/types.h>
+#include <linux/cdev.h>
+#include <linux/list.h>
+#include <linux/wait.h>
+#include <linux/pci.h>
+#include <linux/vmalloc.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/platform_device.h>
+#include <linux/tty.h>
+#include <linux/delay.h>
+#include <linux/ipc_logging.h>
+#include <linux/msm_mhi.h>
+
+#define MHI_1 0
+#define MHI_DCI_1 1
+#define NUM_MHI_DEV 2
+
+#define TYPE_MHI_READ_CH 0
+#define TYPE_MHI_WRITE_CH 1
+
+#define DIAG_MHI_NAME_SZ 24
+
+struct diag_mhi_buf_tbl_t {
+ struct list_head link;
+ unsigned char *buf;
+ int len;
+};
+
+struct diag_mhi_ch_t {
+ uint8_t type;
+ u32 channel;
+ enum MHI_CLIENT_CHANNEL chan;
+ atomic_t opened;
+ spinlock_t lock;
+ struct mhi_client_info_t client_info;
+ struct mhi_client_handle *hdl;
+ struct list_head buf_tbl;
+};
+
+struct diag_mhi_info {
+ int id;
+ int dev_id;
+ int mempool;
+ int mempool_init;
+ int num_read;
+ uint8_t enabled;
+ char name[DIAG_MHI_NAME_SZ];
+ struct work_struct read_work;
+ struct work_struct read_done_work;
+ struct work_struct open_work;
+ struct work_struct close_work;
+ struct workqueue_struct *mhi_wq;
+ wait_queue_head_t mhi_wait_q;
+ struct diag_mhi_ch_t read_ch;
+ struct diag_mhi_ch_t write_ch;
+ spinlock_t lock;
+};
+
+extern struct diag_mhi_info diag_mhi[NUM_MHI_DEV];
+
+int diag_mhi_init(void);
+void diag_mhi_exit(void);
+
+#endif
diff --git a/drivers/char/diag/diagfwd_peripheral.c b/drivers/char/diag/diagfwd_peripheral.c
new file mode 100644
index 0000000..4f7c1e0
--- /dev/null
+++ b/drivers/char/diag/diagfwd_peripheral.c
@@ -0,0 +1,1250 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/sched.h>
+#include <linux/ratelimit.h>
+#include <linux/workqueue.h>
+#include <linux/diagchar.h>
+#include <linux/of.h>
+#include <linux/kmemleak.h>
+#include <linux/delay.h>
+#include <linux/atomic.h>
+#include "diagchar.h"
+#include "diagchar_hdlc.h"
+#include "diagfwd_peripheral.h"
+#include "diagfwd_cntl.h"
+#include "diag_masks.h"
+#include "diag_dci.h"
+#include "diagfwd.h"
+#include "diagfwd_socket.h"
+#include "diag_mux.h"
+#include "diag_ipc_logging.h"
+#include "diagfwd_glink.h"
+
+struct data_header {
+ uint8_t control_char;
+ uint8_t version;
+ uint16_t length;
+};
+
+static struct diagfwd_info *early_init_info[NUM_TRANSPORT];
+
+static void diagfwd_queue_read(struct diagfwd_info *fwd_info);
+static void diagfwd_buffers_exit(struct diagfwd_info *fwd_info);
+static void diagfwd_cntl_open(struct diagfwd_info *fwd_info);
+static void diagfwd_cntl_close(struct diagfwd_info *fwd_info);
+static void diagfwd_dci_open(struct diagfwd_info *fwd_info);
+static void diagfwd_dci_close(struct diagfwd_info *fwd_info);
+static void diagfwd_data_read_done(struct diagfwd_info *fwd_info,
+ unsigned char *buf, int len);
+static void diagfwd_cntl_read_done(struct diagfwd_info *fwd_info,
+ unsigned char *buf, int len);
+static void diagfwd_dci_read_done(struct diagfwd_info *fwd_info,
+ unsigned char *buf, int len);
+static void diagfwd_write_buffers_init(struct diagfwd_info *fwd_info);
+static void diagfwd_write_buffers_exit(struct diagfwd_info *fwd_info);
+struct diagfwd_info peripheral_info[NUM_TYPES][NUM_PERIPHERALS];
+
+static struct diag_channel_ops data_ch_ops = {
+ .open = NULL,
+ .close = NULL,
+ .read_done = diagfwd_data_read_done
+};
+
+static struct diag_channel_ops cntl_ch_ops = {
+ .open = diagfwd_cntl_open,
+ .close = diagfwd_cntl_close,
+ .read_done = diagfwd_cntl_read_done
+};
+
+static struct diag_channel_ops dci_ch_ops = {
+ .open = diagfwd_dci_open,
+ .close = diagfwd_dci_close,
+ .read_done = diagfwd_dci_read_done
+};
+
+static void diagfwd_cntl_open(struct diagfwd_info *fwd_info)
+{
+ if (!fwd_info)
+ return;
+ diag_cntl_channel_open(fwd_info);
+}
+
+static void diagfwd_cntl_close(struct diagfwd_info *fwd_info)
+{
+ if (!fwd_info)
+ return;
+ diag_cntl_channel_close(fwd_info);
+}
+
+static void diagfwd_dci_open(struct diagfwd_info *fwd_info)
+{
+ if (!fwd_info)
+ return;
+
+ diag_dci_notify_client(PERIPHERAL_MASK(fwd_info->peripheral),
+ DIAG_STATUS_OPEN, DCI_LOCAL_PROC);
+}
+
+static void diagfwd_dci_close(struct diagfwd_info *fwd_info)
+{
+ if (!fwd_info)
+ return;
+
+ diag_dci_notify_client(PERIPHERAL_MASK(fwd_info->peripheral),
+ DIAG_STATUS_CLOSED, DCI_LOCAL_PROC);
+}
+
+static int diag_add_hdlc_encoding(unsigned char *dest_buf, int *dest_len,
+ unsigned char *buf, int len)
+{
+ struct diag_send_desc_type send = { NULL, NULL, DIAG_STATE_START, 0 };
+ struct diag_hdlc_dest_type enc = { NULL, NULL, 0 };
+ struct data_header *header;
+ int header_size = sizeof(struct data_header);
+ uint8_t *end_control_char = NULL;
+ uint8_t *payload = NULL;
+ uint8_t *temp_buf = NULL;
+ uint8_t *temp_encode_buf = NULL;
+ int src_pkt_len;
+ int encoded_pkt_length;
+ int max_size;
+ int total_processed = 0;
+ int bytes_remaining;
+ int err = 0;
+ uint8_t loop_count = 0;
+
+ if (!dest_buf || !dest_len || !buf)
+ return -EIO;
+
+ temp_buf = buf;
+ temp_encode_buf = dest_buf;
+ bytes_remaining = *dest_len;
+
+ while (total_processed < len) {
+ loop_count++;
+ header = (struct data_header *)temp_buf;
+ /* Perform initial error checking */
+ if (header->control_char != CONTROL_CHAR ||
+ header->version != 1) {
+ err = -EINVAL;
+ break;
+ }
+
+ if (header->length >= bytes_remaining)
+ break;
+
+ payload = temp_buf + header_size;
+ end_control_char = payload + header->length;
+ if (*end_control_char != CONTROL_CHAR) {
+ err = -EINVAL;
+ break;
+ }
+
+ max_size = 2 * header->length + 3;
+ if (bytes_remaining < max_size) {
+ err = -EINVAL;
+ break;
+ }
+
+ /* Prepare for encoding the data */
+ send.state = DIAG_STATE_START;
+ send.pkt = payload;
+ send.last = (void *)(payload + header->length - 1);
+ send.terminate = 1;
+
+ enc.dest = temp_encode_buf;
+ enc.dest_last = (void *)(temp_encode_buf + max_size);
+ enc.crc = 0;
+ diag_hdlc_encode(&send, &enc);
+
+ /* Prepare for next packet */
+ src_pkt_len = (header_size + header->length + 1);
+ total_processed += src_pkt_len;
+ temp_buf += src_pkt_len;
+
+ encoded_pkt_length = (uint8_t *)enc.dest - temp_encode_buf;
+ bytes_remaining -= encoded_pkt_length;
+ temp_encode_buf = enc.dest;
+ }
+
+ *dest_len = (int)(temp_encode_buf - dest_buf);
+
+ return err;
+}
+
+static int check_bufsize_for_encoding(struct diagfwd_buf_t *buf, uint32_t len)
+{
+ uint32_t max_size = 0;
+ unsigned char *temp_buf = NULL;
+
+ if (!buf || len == 0)
+ return -EINVAL;
+
+ max_size = (2 * len) + 3;
+ if (max_size > PERIPHERAL_BUF_SZ) {
+ if (max_size > MAX_PERIPHERAL_HDLC_BUF_SZ) {
+ pr_err("diag: In %s, max_size is going beyond limit %d\n",
+ __func__, max_size);
+ max_size = MAX_PERIPHERAL_HDLC_BUF_SZ;
+ }
+
+ if (buf->len < max_size) {
+ temp_buf = krealloc(buf->data, max_size +
+ APF_DIAG_PADDING,
+ GFP_KERNEL);
+ if (!temp_buf)
+ return -ENOMEM;
+ buf->data = temp_buf;
+ buf->len = max_size;
+ }
+ }
+
+ return buf->len;
+}
+
+static void diagfwd_data_read_done(struct diagfwd_info *fwd_info,
+ unsigned char *buf, int len)
+{
+ int err = 0;
+ int write_len = 0;
+ unsigned char *write_buf = NULL;
+ struct diagfwd_buf_t *temp_buf = NULL;
+ struct diag_md_session_t *session_info = NULL;
+ uint8_t hdlc_disabled = 0;
+
+ if (!fwd_info || !buf || len <= 0) {
+ diag_ws_release();
+ return;
+ }
+
+ switch (fwd_info->type) {
+ case TYPE_DATA:
+ case TYPE_CMD:
+ break;
+ default:
+ pr_err_ratelimited("diag: In %s, invalid type %d for peripheral %d\n",
+ __func__, fwd_info->type,
+ fwd_info->peripheral);
+ diag_ws_release();
+ return;
+ }
+
+ mutex_lock(&driver->hdlc_disable_mutex);
+ mutex_lock(&fwd_info->data_mutex);
+ session_info = diag_md_session_get_peripheral(fwd_info->peripheral);
+ if (session_info)
+ hdlc_disabled = session_info->hdlc_disabled;
+ else
+ hdlc_disabled = driver->hdlc_disabled;
+
+ if (!driver->feature[fwd_info->peripheral].encode_hdlc) {
+ if (fwd_info->buf_1 && fwd_info->buf_1->data == buf) {
+ temp_buf = fwd_info->buf_1;
+ write_buf = fwd_info->buf_1->data;
+ } else if (fwd_info->buf_2 && fwd_info->buf_2->data == buf) {
+ temp_buf = fwd_info->buf_2;
+ write_buf = fwd_info->buf_2->data;
+ } else {
+ pr_err("diag: In %s, no match for buffer %pK, peripheral %d, type: %d\n",
+ __func__, buf, fwd_info->peripheral,
+ fwd_info->type);
+ goto end;
+ }
+ write_len = len;
+ } else if (hdlc_disabled) {
+ /* The data is raw and and on APPS side HDLC is disabled */
+ if (fwd_info->buf_1 && fwd_info->buf_1->data_raw == buf) {
+ temp_buf = fwd_info->buf_1;
+ } else if (fwd_info->buf_2 &&
+ fwd_info->buf_2->data_raw == buf) {
+ temp_buf = fwd_info->buf_2;
+ } else {
+ pr_err("diag: In %s, no match for non encode buffer %pK, peripheral %d, type: %d\n",
+ __func__, buf, fwd_info->peripheral,
+ fwd_info->type);
+ goto end;
+ }
+ if (len > PERIPHERAL_BUF_SZ) {
+ pr_err("diag: In %s, Incoming buffer too large %d, peripheral %d, type: %d\n",
+ __func__, len, fwd_info->peripheral,
+ fwd_info->type);
+ goto end;
+ }
+ write_len = len;
+ write_buf = buf;
+ } else {
+ if (fwd_info->buf_1 && fwd_info->buf_1->data_raw == buf) {
+ temp_buf = fwd_info->buf_1;
+ } else if (fwd_info->buf_2 &&
+ fwd_info->buf_2->data_raw == buf) {
+ temp_buf = fwd_info->buf_2;
+ } else {
+ pr_err("diag: In %s, no match for non encode buffer %pK, peripheral %d, type: %d\n",
+ __func__, buf, fwd_info->peripheral,
+ fwd_info->type);
+ goto end;
+ }
+ write_len = check_bufsize_for_encoding(temp_buf, len);
+ if (write_len <= 0) {
+ pr_err("diag: error in checking buf for encoding\n");
+ goto end;
+ }
+ write_buf = temp_buf->data;
+ err = diag_add_hdlc_encoding(write_buf, &write_len, buf, len);
+ if (err) {
+ pr_err("diag: error in adding hdlc encoding\n");
+ goto end;
+ }
+ }
+
+ if (write_len > 0) {
+ err = diag_mux_write(DIAG_LOCAL_PROC, write_buf, write_len,
+ temp_buf->ctxt);
+ if (err) {
+ pr_err_ratelimited("diag: In %s, unable to write to mux error: %d\n",
+ __func__, err);
+ goto end;
+ }
+ }
+ mutex_unlock(&fwd_info->data_mutex);
+ mutex_unlock(&driver->hdlc_disable_mutex);
+ diagfwd_queue_read(fwd_info);
+ return;
+
+end:
+ diag_ws_release();
+ mutex_unlock(&fwd_info->data_mutex);
+ mutex_unlock(&driver->hdlc_disable_mutex);
+ if (temp_buf) {
+ diagfwd_write_done(fwd_info->peripheral, fwd_info->type,
+ GET_BUF_NUM(temp_buf->ctxt));
+ }
+ diagfwd_queue_read(fwd_info);
+}
+
+static void diagfwd_cntl_read_done(struct diagfwd_info *fwd_info,
+ unsigned char *buf, int len)
+{
+ if (!fwd_info) {
+ diag_ws_release();
+ return;
+ }
+
+ if (fwd_info->type != TYPE_CNTL) {
+ pr_err("diag: In %s, invalid type %d for peripheral %d\n",
+ __func__, fwd_info->type, fwd_info->peripheral);
+ diag_ws_release();
+ return;
+ }
+
+ diag_ws_on_read(DIAG_WS_MUX, len);
+ diag_cntl_process_read_data(fwd_info, buf, len);
+ /*
+ * Control packets are not consumed by the clients. Mimic
+ * consumption by setting and clearing the wakeup source copy_count
+ * explicitly.
+ */
+ diag_ws_on_copy_fail(DIAG_WS_MUX);
+ /* Reset the buffer in_busy value after processing the data */
+ if (fwd_info->buf_1)
+ atomic_set(&fwd_info->buf_1->in_busy, 0);
+
+ diagfwd_queue_read(fwd_info);
+ diagfwd_queue_read(&peripheral_info[TYPE_DATA][fwd_info->peripheral]);
+ diagfwd_queue_read(&peripheral_info[TYPE_CMD][fwd_info->peripheral]);
+}
+
+static void diagfwd_dci_read_done(struct diagfwd_info *fwd_info,
+ unsigned char *buf, int len)
+{
+ if (!fwd_info)
+ return;
+
+ switch (fwd_info->type) {
+ case TYPE_DCI:
+ case TYPE_DCI_CMD:
+ break;
+ default:
+ pr_err("diag: In %s, invalid type %d for peripheral %d\n",
+ __func__, fwd_info->type, fwd_info->peripheral);
+ return;
+ }
+
+ diag_dci_process_peripheral_data(fwd_info, (void *)buf, len);
+ /* Reset the buffer in_busy value after processing the data */
+ if (fwd_info->buf_1)
+ atomic_set(&fwd_info->buf_1->in_busy, 0);
+
+ diagfwd_queue_read(fwd_info);
+}
+
+static void diagfwd_reset_buffers(struct diagfwd_info *fwd_info,
+ unsigned char *buf)
+{
+ if (!fwd_info || !buf)
+ return;
+
+ if (!driver->feature[fwd_info->peripheral].encode_hdlc) {
+ if (fwd_info->buf_1 && fwd_info->buf_1->data == buf)
+ atomic_set(&fwd_info->buf_1->in_busy, 0);
+ else if (fwd_info->buf_2 && fwd_info->buf_2->data == buf)
+ atomic_set(&fwd_info->buf_2->in_busy, 0);
+ } else {
+ if (fwd_info->buf_1 && fwd_info->buf_1->data_raw == buf)
+ atomic_set(&fwd_info->buf_1->in_busy, 0);
+ else if (fwd_info->buf_2 && fwd_info->buf_2->data_raw == buf)
+ atomic_set(&fwd_info->buf_2->in_busy, 0);
+ }
+}
+
+int diagfwd_peripheral_init(void)
+{
+ uint8_t peripheral;
+ uint8_t transport;
+ uint8_t type;
+ struct diagfwd_info *fwd_info = NULL;
+
+ for (transport = 0; transport < NUM_TRANSPORT; transport++) {
+ early_init_info[transport] = kzalloc(
+ sizeof(struct diagfwd_info) * NUM_PERIPHERALS,
+ GFP_KERNEL);
+ if (!early_init_info[transport])
+ return -ENOMEM;
+ kmemleak_not_leak(early_init_info[transport]);
+ }
+
+ for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+ for (transport = 0; transport < NUM_TRANSPORT; transport++) {
+ fwd_info = &early_init_info[transport][peripheral];
+ fwd_info->peripheral = peripheral;
+ fwd_info->type = TYPE_CNTL;
+ fwd_info->transport = transport;
+ fwd_info->ctxt = NULL;
+ fwd_info->p_ops = NULL;
+ fwd_info->ch_open = 0;
+ fwd_info->inited = 1;
+ fwd_info->read_bytes = 0;
+ fwd_info->write_bytes = 0;
+ spin_lock_init(&fwd_info->buf_lock);
+ spin_lock_init(&fwd_info->write_buf_lock);
+ mutex_init(&fwd_info->data_mutex);
+ }
+ }
+
+ for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+ for (type = 0; type < NUM_TYPES; type++) {
+ fwd_info = &peripheral_info[type][peripheral];
+ fwd_info->peripheral = peripheral;
+ fwd_info->type = type;
+ fwd_info->ctxt = NULL;
+ fwd_info->p_ops = NULL;
+ fwd_info->ch_open = 0;
+ fwd_info->read_bytes = 0;
+ fwd_info->write_bytes = 0;
+ spin_lock_init(&fwd_info->buf_lock);
+ spin_lock_init(&fwd_info->write_buf_lock);
+ mutex_init(&fwd_info->data_mutex);
+ /*
+ * This state shouldn't be set for Control channels
+ * during initialization. This is set when the feature
+ * mask is received for the first time.
+ */
+ if (type != TYPE_CNTL)
+ fwd_info->inited = 1;
+ }
+ driver->diagfwd_data[peripheral] =
+ &peripheral_info[TYPE_DATA][peripheral];
+ driver->diagfwd_cntl[peripheral] =
+ &peripheral_info[TYPE_CNTL][peripheral];
+ driver->diagfwd_dci[peripheral] =
+ &peripheral_info[TYPE_DCI][peripheral];
+ driver->diagfwd_cmd[peripheral] =
+ &peripheral_info[TYPE_CMD][peripheral];
+ driver->diagfwd_dci_cmd[peripheral] =
+ &peripheral_info[TYPE_DCI_CMD][peripheral];
+ }
+
+ if (driver->supports_sockets)
+ diag_socket_init();
+ diag_glink_init();
+
+ return 0;
+}
+
+void diagfwd_peripheral_exit(void)
+{
+ uint8_t peripheral;
+ uint8_t type;
+ struct diagfwd_info *fwd_info = NULL;
+
+ diag_socket_exit();
+
+ for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+ for (type = 0; type < NUM_TYPES; type++) {
+ fwd_info = &peripheral_info[type][peripheral];
+ fwd_info->ctxt = NULL;
+ fwd_info->p_ops = NULL;
+ fwd_info->ch_open = 0;
+ diagfwd_buffers_exit(fwd_info);
+ }
+ }
+
+ for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+ driver->diagfwd_data[peripheral] = NULL;
+ driver->diagfwd_cntl[peripheral] = NULL;
+ driver->diagfwd_dci[peripheral] = NULL;
+ driver->diagfwd_cmd[peripheral] = NULL;
+ driver->diagfwd_dci_cmd[peripheral] = NULL;
+ }
+
+ kfree(early_init_info);
+}
+
+int diagfwd_cntl_register(uint8_t transport, uint8_t peripheral, void *ctxt,
+ struct diag_peripheral_ops *ops,
+ struct diagfwd_info **fwd_ctxt)
+{
+ struct diagfwd_info *fwd_info = NULL;
+
+ if (!ctxt || !ops)
+ return -EIO;
+
+ if (transport >= NUM_TRANSPORT || peripheral >= NUM_PERIPHERALS)
+ return -EINVAL;
+
+ fwd_info = &early_init_info[transport][peripheral];
+ *fwd_ctxt = &early_init_info[transport][peripheral];
+ fwd_info->ctxt = ctxt;
+ fwd_info->p_ops = ops;
+ fwd_info->c_ops = &cntl_ch_ops;
+
+ return 0;
+}
+
+int diagfwd_register(uint8_t transport, uint8_t peripheral, uint8_t type,
+ void *ctxt, struct diag_peripheral_ops *ops,
+ struct diagfwd_info **fwd_ctxt)
+{
+ struct diagfwd_info *fwd_info = NULL;
+
+ if (peripheral >= NUM_PERIPHERALS || type >= NUM_TYPES ||
+ !ctxt || !ops || transport >= NUM_TRANSPORT) {
+ pr_err("diag: In %s, returning error\n", __func__);
+ return -EIO;
+ }
+
+ fwd_info = &peripheral_info[type][peripheral];
+ *fwd_ctxt = &peripheral_info[type][peripheral];
+ fwd_info->ctxt = ctxt;
+ fwd_info->p_ops = ops;
+ fwd_info->transport = transport;
+ fwd_info->ch_open = 0;
+
+ switch (type) {
+ case TYPE_DATA:
+ case TYPE_CMD:
+ fwd_info->c_ops = &data_ch_ops;
+ break;
+ case TYPE_DCI:
+ case TYPE_DCI_CMD:
+ fwd_info->c_ops = &dci_ch_ops;
+ break;
+ default:
+ pr_err("diag: In %s, invalid type: %d\n", __func__, type);
+ return -EINVAL;
+ }
+
+ if (atomic_read(&fwd_info->opened) &&
+ fwd_info->p_ops && fwd_info->p_ops->open) {
+ /*
+ * The registration can happen late, like in the case of
+ * sockets. fwd_info->opened reflects diag_state. Propagate the
+ * state to the peipherals.
+ */
+ fwd_info->p_ops->open(fwd_info->ctxt);
+ }
+
+ return 0;
+}
+
+void diagfwd_deregister(uint8_t peripheral, uint8_t type, void *ctxt)
+{
+ struct diagfwd_info *fwd_info = NULL;
+
+ if (peripheral >= NUM_PERIPHERALS || type >= NUM_TYPES || !ctxt)
+ return;
+
+ fwd_info = &peripheral_info[type][peripheral];
+ if (fwd_info->ctxt != ctxt) {
+ pr_err("diag: In %s, unable to find a match for p: %d t: %d\n",
+ __func__, peripheral, type);
+ return;
+ }
+ fwd_info->ctxt = NULL;
+ fwd_info->p_ops = NULL;
+ fwd_info->ch_open = 0;
+ diagfwd_buffers_exit(fwd_info);
+
+ switch (type) {
+ case TYPE_DATA:
+ driver->diagfwd_data[peripheral] = NULL;
+ break;
+ case TYPE_CNTL:
+ driver->diagfwd_cntl[peripheral] = NULL;
+ break;
+ case TYPE_DCI:
+ driver->diagfwd_dci[peripheral] = NULL;
+ break;
+ case TYPE_CMD:
+ driver->diagfwd_cmd[peripheral] = NULL;
+ break;
+ case TYPE_DCI_CMD:
+ driver->diagfwd_dci_cmd[peripheral] = NULL;
+ break;
+ }
+}
+
+void diagfwd_close_transport(uint8_t transport, uint8_t peripheral)
+{
+ struct diagfwd_info *fwd_info = NULL;
+ struct diagfwd_info *dest_info = NULL;
+ int (*init_fn)(uint8_t) = NULL;
+ void (*invalidate_fn)(void *, struct diagfwd_info *) = NULL;
+ int (*check_channel_state)(void *) = NULL;
+ uint8_t transport_open = 0;
+ int i = 0;
+
+ if (peripheral >= NUM_PERIPHERALS)
+ return;
+
+ switch (transport) {
+ case TRANSPORT_GLINK:
+ transport_open = TRANSPORT_SOCKET;
+ init_fn = diag_socket_init_peripheral;
+ invalidate_fn = diag_socket_invalidate;
+ check_channel_state = diag_socket_check_state;
+ break;
+ case TRANSPORT_SOCKET:
+ transport_open = TRANSPORT_GLINK;
+ init_fn = diag_glink_init_peripheral;
+ invalidate_fn = diag_glink_invalidate;
+ check_channel_state = diag_glink_check_state;
+ break;
+ default:
+ return;
+
+ }
+
+ mutex_lock(&driver->diagfwd_channel_mutex);
+ fwd_info = &early_init_info[transport][peripheral];
+ if (fwd_info->p_ops && fwd_info->p_ops->close)
+ fwd_info->p_ops->close(fwd_info->ctxt);
+ fwd_info = &early_init_info[transport_open][peripheral];
+ dest_info = &peripheral_info[TYPE_CNTL][peripheral];
+ dest_info->inited = 1;
+ dest_info->ctxt = fwd_info->ctxt;
+ dest_info->p_ops = fwd_info->p_ops;
+ dest_info->c_ops = fwd_info->c_ops;
+ dest_info->ch_open = fwd_info->ch_open;
+ dest_info->read_bytes = fwd_info->read_bytes;
+ dest_info->write_bytes = fwd_info->write_bytes;
+ dest_info->inited = fwd_info->inited;
+ dest_info->buf_1 = fwd_info->buf_1;
+ dest_info->buf_2 = fwd_info->buf_2;
+ dest_info->transport = fwd_info->transport;
+ invalidate_fn(dest_info->ctxt, dest_info);
+ for (i = 0; i < NUM_WRITE_BUFFERS; i++)
+ dest_info->buf_ptr[i] = fwd_info->buf_ptr[i];
+ if (!check_channel_state(dest_info->ctxt))
+ diagfwd_late_open(dest_info);
+ diagfwd_cntl_open(dest_info);
+ init_fn(peripheral);
+ mutex_unlock(&driver->diagfwd_channel_mutex);
+ diagfwd_queue_read(&peripheral_info[TYPE_DATA][peripheral]);
+ diagfwd_queue_read(&peripheral_info[TYPE_CMD][peripheral]);
+}
+
+void *diagfwd_request_write_buf(struct diagfwd_info *fwd_info)
+{
+ void *buf = NULL;
+ int index;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fwd_info->write_buf_lock, flags);
+ for (index = 0 ; index < NUM_WRITE_BUFFERS; index++) {
+ if (!atomic_read(&(fwd_info->buf_ptr[index]->in_busy))) {
+ atomic_set(&(fwd_info->buf_ptr[index]->in_busy), 1);
+ buf = fwd_info->buf_ptr[index]->data;
+ if (!buf)
+ return NULL;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&fwd_info->write_buf_lock, flags);
+ return buf;
+}
+
+int diagfwd_write(uint8_t peripheral, uint8_t type, void *buf, int len)
+{
+ struct diagfwd_info *fwd_info = NULL;
+ int err = 0;
+ uint8_t retry_count = 0;
+ uint8_t max_retries = 3;
+ void *buf_ptr = NULL;
+
+ if (peripheral >= NUM_PERIPHERALS || type >= NUM_TYPES)
+ return -EINVAL;
+
+ if (type == TYPE_CMD || type == TYPE_DCI_CMD) {
+ if (!driver->feature[peripheral].rcvd_feature_mask ||
+ !driver->feature[peripheral].sent_feature_mask) {
+ pr_debug_ratelimited("diag: In %s, feature mask for peripheral: %d not received or sent yet\n",
+ __func__, peripheral);
+ return 0;
+ }
+ if (!driver->feature[peripheral].separate_cmd_rsp)
+ type = (type == TYPE_CMD) ? TYPE_DATA : TYPE_DCI;
+ }
+
+ fwd_info = &peripheral_info[type][peripheral];
+ if (!fwd_info->inited || !atomic_read(&fwd_info->opened))
+ return -ENODEV;
+
+ if (!(fwd_info->p_ops && fwd_info->p_ops->write && fwd_info->ctxt))
+ return -EIO;
+
+ if (fwd_info->transport == TRANSPORT_GLINK) {
+ buf_ptr = diagfwd_request_write_buf(fwd_info);
+ if (buf_ptr)
+ memcpy(buf_ptr, buf, len);
+ else {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: buffer not found for writing\n");
+ return -EIO;
+ }
+ } else
+ buf_ptr = buf;
+
+ while (retry_count < max_retries) {
+ err = 0;
+ err = fwd_info->p_ops->write(fwd_info->ctxt, buf_ptr, len);
+ if (err && err != -ENODEV) {
+ usleep_range(100000, 101000);
+ retry_count++;
+ continue;
+ }
+ break;
+ }
+
+ if (!err)
+ fwd_info->write_bytes += len;
+ else
+ if (fwd_info->transport == TRANSPORT_GLINK)
+ diagfwd_write_buffer_done(fwd_info, buf_ptr);
+ return err;
+}
+
+static void __diag_fwd_open(struct diagfwd_info *fwd_info)
+{
+ if (!fwd_info)
+ return;
+
+ atomic_set(&fwd_info->opened, 1);
+ if (!fwd_info->inited)
+ return;
+
+ if (fwd_info->buf_1)
+ atomic_set(&fwd_info->buf_1->in_busy, 0);
+ if (fwd_info->buf_2)
+ atomic_set(&fwd_info->buf_2->in_busy, 0);
+
+ if (fwd_info->p_ops && fwd_info->p_ops->open)
+ fwd_info->p_ops->open(fwd_info->ctxt);
+
+ diagfwd_queue_read(fwd_info);
+}
+
+void diagfwd_early_open(uint8_t peripheral)
+{
+ uint8_t transport = 0;
+ struct diagfwd_info *fwd_info = NULL;
+
+ if (peripheral >= NUM_PERIPHERALS)
+ return;
+
+ for (transport = 0; transport < NUM_TRANSPORT; transport++) {
+ fwd_info = &early_init_info[transport][peripheral];
+ __diag_fwd_open(fwd_info);
+ }
+}
+
+void diagfwd_open(uint8_t peripheral, uint8_t type)
+{
+ struct diagfwd_info *fwd_info = NULL;
+
+ if (peripheral >= NUM_PERIPHERALS || type >= NUM_TYPES)
+ return;
+
+ fwd_info = &peripheral_info[type][peripheral];
+ __diag_fwd_open(fwd_info);
+}
+
+void diagfwd_late_open(struct diagfwd_info *fwd_info)
+{
+ __diag_fwd_open(fwd_info);
+}
+
+void diagfwd_close(uint8_t peripheral, uint8_t type)
+{
+ struct diagfwd_info *fwd_info = NULL;
+
+ if (peripheral >= NUM_PERIPHERALS || type >= NUM_TYPES)
+ return;
+
+ fwd_info = &peripheral_info[type][peripheral];
+ atomic_set(&fwd_info->opened, 0);
+ if (!fwd_info->inited)
+ return;
+
+ if (fwd_info->p_ops && fwd_info->p_ops->close)
+ fwd_info->p_ops->close(fwd_info->ctxt);
+
+ if (fwd_info->buf_1)
+ atomic_set(&fwd_info->buf_1->in_busy, 1);
+ /*
+ * Only Data channels have two buffers. Set both the buffers
+ * to busy on close.
+ */
+ if (fwd_info->buf_2)
+ atomic_set(&fwd_info->buf_2->in_busy, 1);
+}
+
+int diagfwd_channel_open(struct diagfwd_info *fwd_info)
+{
+ int i;
+
+ if (!fwd_info)
+ return -EIO;
+
+ if (!fwd_info->inited) {
+ pr_debug("diag: In %s, channel is not inited, p: %d, t: %d\n",
+ __func__, fwd_info->peripheral, fwd_info->type);
+ return -EINVAL;
+ }
+
+ if (fwd_info->ch_open) {
+ pr_debug("diag: In %s, channel is already open, p: %d, t: %d\n",
+ __func__, fwd_info->peripheral, fwd_info->type);
+ return 0;
+ }
+
+ fwd_info->ch_open = 1;
+ diagfwd_buffers_init(fwd_info);
+ diagfwd_write_buffers_init(fwd_info);
+ if (fwd_info && fwd_info->c_ops && fwd_info->c_ops->open)
+ fwd_info->c_ops->open(fwd_info);
+ for (i = 0; i < NUM_WRITE_BUFFERS; i++) {
+ if (fwd_info->buf_ptr[i])
+ atomic_set(&fwd_info->buf_ptr[i]->in_busy, 0);
+ }
+ diagfwd_queue_read(fwd_info);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "p: %d t: %d considered opened\n",
+ fwd_info->peripheral, fwd_info->type);
+
+ if (atomic_read(&fwd_info->opened)) {
+ if (fwd_info->p_ops && fwd_info->p_ops->open)
+ fwd_info->p_ops->open(fwd_info->ctxt);
+ }
+
+ return 0;
+}
+
+int diagfwd_channel_close(struct diagfwd_info *fwd_info)
+{
+ int i;
+
+ if (!fwd_info)
+ return -EIO;
+
+ fwd_info->ch_open = 0;
+ if (fwd_info && fwd_info->c_ops && fwd_info->c_ops->close)
+ fwd_info->c_ops->close(fwd_info);
+
+ if (fwd_info->buf_1 && fwd_info->buf_1->data)
+ atomic_set(&fwd_info->buf_1->in_busy, 0);
+ if (fwd_info->buf_2 && fwd_info->buf_2->data)
+ atomic_set(&fwd_info->buf_2->in_busy, 0);
+
+ for (i = 0; i < NUM_WRITE_BUFFERS; i++) {
+ if (fwd_info->buf_ptr[i])
+ atomic_set(&fwd_info->buf_ptr[i]->in_busy, 1);
+ }
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "p: %d t: %d considered closed\n",
+ fwd_info->peripheral, fwd_info->type);
+
+ return 0;
+}
+
+int diagfwd_channel_read_done(struct diagfwd_info *fwd_info,
+ unsigned char *buf, uint32_t len)
+{
+ if (!fwd_info) {
+ diag_ws_release();
+ return -EIO;
+ }
+
+ /*
+ * Diag peripheral layers should send len as 0 if there is any error
+ * in reading data from the transport. Use this information to reset the
+ * in_busy flags. No need to queue read in this case.
+ */
+ if (len == 0) {
+ diagfwd_reset_buffers(fwd_info, buf);
+ diag_ws_release();
+ return 0;
+ }
+
+ if (fwd_info && fwd_info->c_ops && fwd_info->c_ops->read_done)
+ fwd_info->c_ops->read_done(fwd_info, buf, len);
+ fwd_info->read_bytes += len;
+
+ return 0;
+}
+
+void diagfwd_write_done(uint8_t peripheral, uint8_t type, int ctxt)
+{
+ struct diagfwd_info *fwd_info = NULL;
+
+ if (peripheral >= NUM_PERIPHERALS || type >= NUM_TYPES)
+ return;
+
+ fwd_info = &peripheral_info[type][peripheral];
+ if (ctxt == 1 && fwd_info->buf_1)
+ atomic_set(&fwd_info->buf_1->in_busy, 0);
+ else if (ctxt == 2 && fwd_info->buf_2)
+ atomic_set(&fwd_info->buf_2->in_busy, 0);
+ else
+ pr_err("diag: In %s, invalid ctxt %d\n", __func__, ctxt);
+
+ diagfwd_queue_read(fwd_info);
+}
+
+int diagfwd_write_buffer_done(struct diagfwd_info *fwd_info, const void *ptr)
+{
+
+ int found = 0;
+ int index = 0;
+ unsigned long flags;
+
+ if (!fwd_info || !ptr)
+ return found;
+ spin_lock_irqsave(&fwd_info->write_buf_lock, flags);
+ for (index = 0; index < NUM_WRITE_BUFFERS; index++) {
+ if (fwd_info->buf_ptr[index]->data == ptr) {
+ atomic_set(&fwd_info->buf_ptr[index]->in_busy, 0);
+ found = 1;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&fwd_info->write_buf_lock, flags);
+ return found;
+}
+
+void diagfwd_channel_read(struct diagfwd_info *fwd_info)
+{
+ int err = 0;
+ uint32_t read_len = 0;
+ unsigned char *read_buf = NULL;
+ struct diagfwd_buf_t *temp_buf = NULL;
+
+ if (!fwd_info) {
+ diag_ws_release();
+ return;
+ }
+
+ if (!fwd_info->inited || !atomic_read(&fwd_info->opened)) {
+ pr_debug("diag: In %s, p: %d, t: %d, inited: %d, opened: %d ch_open: %d\n",
+ __func__, fwd_info->peripheral, fwd_info->type,
+ fwd_info->inited, atomic_read(&fwd_info->opened),
+ fwd_info->ch_open);
+ diag_ws_release();
+ return;
+ }
+
+ if (fwd_info->buf_1 && !atomic_read(&fwd_info->buf_1->in_busy)) {
+ temp_buf = fwd_info->buf_1;
+ atomic_set(&temp_buf->in_busy, 1);
+ if (driver->feature[fwd_info->peripheral].encode_hdlc &&
+ (fwd_info->type == TYPE_DATA ||
+ fwd_info->type == TYPE_CMD)) {
+ read_buf = fwd_info->buf_1->data_raw;
+ read_len = fwd_info->buf_1->len_raw;
+ } else {
+ read_buf = fwd_info->buf_1->data;
+ read_len = fwd_info->buf_1->len;
+ }
+ } else if (fwd_info->buf_2 && !atomic_read(&fwd_info->buf_2->in_busy)) {
+ temp_buf = fwd_info->buf_2;
+ atomic_set(&temp_buf->in_busy, 1);
+ if (driver->feature[fwd_info->peripheral].encode_hdlc &&
+ (fwd_info->type == TYPE_DATA ||
+ fwd_info->type == TYPE_CMD)) {
+ read_buf = fwd_info->buf_2->data_raw;
+ read_len = fwd_info->buf_2->len_raw;
+ } else {
+ read_buf = fwd_info->buf_2->data;
+ read_len = fwd_info->buf_2->len;
+ }
+ } else {
+ pr_debug("diag: In %s, both buffers are empty for p: %d, t: %d\n",
+ __func__, fwd_info->peripheral, fwd_info->type);
+ }
+
+ if (!read_buf) {
+ diag_ws_release();
+ return;
+ }
+
+ if (!(fwd_info->p_ops && fwd_info->p_ops->read && fwd_info->ctxt))
+ goto fail_return;
+
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "issued a read p: %d t: %d buf: %pK\n",
+ fwd_info->peripheral, fwd_info->type, read_buf);
+ err = fwd_info->p_ops->read(fwd_info->ctxt, read_buf, read_len);
+ if (err)
+ goto fail_return;
+
+ return;
+
+fail_return:
+ diag_ws_release();
+ atomic_set(&temp_buf->in_busy, 0);
+}
+
+static void diagfwd_queue_read(struct diagfwd_info *fwd_info)
+{
+ if (!fwd_info)
+ return;
+
+ if (!fwd_info->inited || !atomic_read(&fwd_info->opened)) {
+ pr_debug("diag: In %s, p: %d, t: %d, inited: %d, opened: %d ch_open: %d\n",
+ __func__, fwd_info->peripheral, fwd_info->type,
+ fwd_info->inited, atomic_read(&fwd_info->opened),
+ fwd_info->ch_open);
+ return;
+ }
+
+ /*
+ * Don't queue a read on the data and command channels before receiving
+ * the feature mask from the peripheral. We won't know which buffer to
+ * use - HDLC or non HDLC buffer for reading.
+ */
+ if ((!driver->feature[fwd_info->peripheral].rcvd_feature_mask) &&
+ (fwd_info->type != TYPE_CNTL)) {
+ return;
+ }
+
+ if (fwd_info->p_ops && fwd_info->p_ops->queue_read && fwd_info->ctxt)
+ fwd_info->p_ops->queue_read(fwd_info->ctxt);
+}
+
+void diagfwd_buffers_init(struct diagfwd_info *fwd_info)
+{
+ unsigned long flags;
+
+ if (!fwd_info)
+ return;
+
+ if (!fwd_info->inited) {
+ pr_err("diag: In %s, channel not inited, p: %d, t: %d\n",
+ __func__, fwd_info->peripheral, fwd_info->type);
+ return;
+ }
+
+ spin_lock_irqsave(&fwd_info->buf_lock, flags);
+ if (!fwd_info->buf_1) {
+ fwd_info->buf_1 = kzalloc(sizeof(struct diagfwd_buf_t),
+ GFP_ATOMIC);
+ if (!fwd_info->buf_1)
+ goto err;
+ kmemleak_not_leak(fwd_info->buf_1);
+ }
+ if (!fwd_info->buf_1->data) {
+ fwd_info->buf_1->data = kzalloc(PERIPHERAL_BUF_SZ +
+ APF_DIAG_PADDING,
+ GFP_ATOMIC);
+ if (!fwd_info->buf_1->data)
+ goto err;
+ fwd_info->buf_1->len = PERIPHERAL_BUF_SZ;
+ kmemleak_not_leak(fwd_info->buf_1->data);
+ fwd_info->buf_1->ctxt = SET_BUF_CTXT(fwd_info->peripheral,
+ fwd_info->type, 1);
+ }
+
+ if (fwd_info->type == TYPE_DATA) {
+ if (!fwd_info->buf_2) {
+ fwd_info->buf_2 = kzalloc(sizeof(struct diagfwd_buf_t),
+ GFP_ATOMIC);
+ if (!fwd_info->buf_2)
+ goto err;
+ kmemleak_not_leak(fwd_info->buf_2);
+ }
+
+ if (!fwd_info->buf_2->data) {
+ fwd_info->buf_2->data = kzalloc(PERIPHERAL_BUF_SZ +
+ APF_DIAG_PADDING,
+ GFP_ATOMIC);
+ if (!fwd_info->buf_2->data)
+ goto err;
+ fwd_info->buf_2->len = PERIPHERAL_BUF_SZ;
+ kmemleak_not_leak(fwd_info->buf_2->data);
+ fwd_info->buf_2->ctxt = SET_BUF_CTXT(
+ fwd_info->peripheral,
+ fwd_info->type, 2);
+ }
+
+ if (driver->supports_apps_hdlc_encoding) {
+ /* In support of hdlc encoding */
+ if (!fwd_info->buf_1->data_raw) {
+ fwd_info->buf_1->data_raw =
+ kzalloc(PERIPHERAL_BUF_SZ +
+ APF_DIAG_PADDING,
+ GFP_ATOMIC);
+ if (!fwd_info->buf_1->data_raw)
+ goto err;
+ fwd_info->buf_1->len_raw = PERIPHERAL_BUF_SZ;
+ kmemleak_not_leak(fwd_info->buf_1->data_raw);
+ }
+ if (!fwd_info->buf_2->data_raw) {
+ fwd_info->buf_2->data_raw =
+ kzalloc(PERIPHERAL_BUF_SZ +
+ APF_DIAG_PADDING,
+ GFP_ATOMIC);
+ if (!fwd_info->buf_2->data_raw)
+ goto err;
+ fwd_info->buf_2->len_raw = PERIPHERAL_BUF_SZ;
+ kmemleak_not_leak(fwd_info->buf_2->data_raw);
+ }
+ }
+ }
+
+ if (fwd_info->type == TYPE_CMD && driver->supports_apps_hdlc_encoding) {
+ /* In support of hdlc encoding */
+ if (!fwd_info->buf_1->data_raw) {
+ fwd_info->buf_1->data_raw = kzalloc(PERIPHERAL_BUF_SZ +
+ APF_DIAG_PADDING,
+ GFP_ATOMIC);
+ if (!fwd_info->buf_1->data_raw)
+ goto err;
+ fwd_info->buf_1->len_raw = PERIPHERAL_BUF_SZ;
+ kmemleak_not_leak(fwd_info->buf_1->data_raw);
+ }
+ }
+
+ spin_unlock_irqrestore(&fwd_info->buf_lock, flags);
+ return;
+
+err:
+ spin_unlock_irqrestore(&fwd_info->buf_lock, flags);
+ diagfwd_buffers_exit(fwd_info);
+}
+
+static void diagfwd_buffers_exit(struct diagfwd_info *fwd_info)
+{
+ unsigned long flags;
+
+ if (!fwd_info)
+ return;
+
+ spin_lock_irqsave(&fwd_info->buf_lock, flags);
+ if (fwd_info->buf_1) {
+ kfree(fwd_info->buf_1->data);
+ fwd_info->buf_1->data = NULL;
+ kfree(fwd_info->buf_1->data_raw);
+ fwd_info->buf_1->data_raw = NULL;
+ kfree(fwd_info->buf_1);
+ fwd_info->buf_1 = NULL;
+ }
+ if (fwd_info->buf_2) {
+ kfree(fwd_info->buf_2->data);
+ fwd_info->buf_2->data = NULL;
+ kfree(fwd_info->buf_2->data_raw);
+ fwd_info->buf_2->data_raw = NULL;
+ kfree(fwd_info->buf_2);
+ fwd_info->buf_2 = NULL;
+ }
+ spin_unlock_irqrestore(&fwd_info->buf_lock, flags);
+}
+
+void diagfwd_write_buffers_init(struct diagfwd_info *fwd_info)
+{
+ unsigned long flags;
+ int i;
+
+ if (!fwd_info)
+ return;
+
+ if (!fwd_info->inited) {
+ pr_err("diag: In %s, channel not inited, p: %d, t: %d\n",
+ __func__, fwd_info->peripheral, fwd_info->type);
+ return;
+ }
+
+ spin_lock_irqsave(&fwd_info->write_buf_lock, flags);
+ for (i = 0; i < NUM_WRITE_BUFFERS; i++) {
+ if (!fwd_info->buf_ptr[i])
+ fwd_info->buf_ptr[i] =
+ kzalloc(sizeof(struct diagfwd_buf_t),
+ GFP_ATOMIC);
+ if (!fwd_info->buf_ptr[i])
+ goto err;
+ kmemleak_not_leak(fwd_info->buf_ptr[i]);
+ if (!fwd_info->buf_ptr[i]->data) {
+ fwd_info->buf_ptr[i]->data = kzalloc(PERIPHERAL_BUF_SZ,
+ GFP_ATOMIC);
+ if (!fwd_info->buf_ptr[i]->data)
+ goto err;
+ fwd_info->buf_ptr[i]->len = PERIPHERAL_BUF_SZ;
+ kmemleak_not_leak(fwd_info->buf_ptr[i]->data);
+ }
+ }
+ spin_unlock_irqrestore(&fwd_info->write_buf_lock, flags);
+ return;
+
+err:
+ spin_unlock_irqrestore(&fwd_info->write_buf_lock, flags);
+ pr_err("diag:unable to allocate write buffers\n");
+ diagfwd_write_buffers_exit(fwd_info);
+
+}
+
+static void diagfwd_write_buffers_exit(struct diagfwd_info *fwd_info)
+{
+ unsigned long flags;
+ int i;
+
+ if (!fwd_info)
+ return;
+
+ spin_lock_irqsave(&fwd_info->write_buf_lock, flags);
+ for (i = 0; i < NUM_WRITE_BUFFERS; i++) {
+ if (fwd_info->buf_ptr[i]) {
+ kfree(fwd_info->buf_ptr[i]->data);
+ fwd_info->buf_ptr[i]->data = NULL;
+ kfree(fwd_info->buf_ptr[i]);
+ fwd_info->buf_ptr[i] = NULL;
+ }
+ }
+ spin_unlock_irqrestore(&fwd_info->write_buf_lock, flags);
+}
diff --git a/drivers/char/diag/diagfwd_peripheral.h b/drivers/char/diag/diagfwd_peripheral.h
new file mode 100644
index 0000000..ed4bd76
--- /dev/null
+++ b/drivers/char/diag/diagfwd_peripheral.h
@@ -0,0 +1,117 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAGFWD_PERIPHERAL_H
+#define DIAGFWD_PERIPHERAL_H
+
+#define PERIPHERAL_BUF_SZ 16384
+#define MAX_PERIPHERAL_BUF_SZ 32768
+#define MAX_PERIPHERAL_HDLC_BUF_SZ 65539
+
+#define TRANSPORT_UNKNOWN -1
+#define TRANSPORT_SOCKET 0
+#define TRANSPORT_GLINK 1
+#define NUM_TRANSPORT 2
+#define NUM_WRITE_BUFFERS 2
+#define PERIPHERAL_MASK(x) \
+ ((x == PERIPHERAL_MODEM) ? DIAG_CON_MPSS : \
+ ((x == PERIPHERAL_LPASS) ? DIAG_CON_LPASS : \
+ ((x == PERIPHERAL_WCNSS) ? DIAG_CON_WCNSS : \
+ ((x == PERIPHERAL_SENSORS) ? DIAG_CON_SENSORS : \
+ ((x == PERIPHERAL_WDSP) ? DIAG_CON_WDSP : 0))))) \
+
+#define PERIPHERAL_STRING(x) \
+ ((x == PERIPHERAL_MODEM) ? "MODEM" : \
+ ((x == PERIPHERAL_LPASS) ? "LPASS" : \
+ ((x == PERIPHERAL_WCNSS) ? "WCNSS" : \
+ ((x == PERIPHERAL_SENSORS) ? "SENSORS" : \
+ ((x == PERIPHERAL_WDSP) ? "WDSP" : "UNKNOWN"))))) \
+
+struct diagfwd_buf_t {
+ unsigned char *data;
+ unsigned char *data_raw;
+ uint32_t len;
+ uint32_t len_raw;
+ atomic_t in_busy;
+ int ctxt;
+};
+
+struct diag_channel_ops {
+ void (*open)(struct diagfwd_info *fwd_info);
+ void (*close)(struct diagfwd_info *fwd_info);
+ void (*read_done)(struct diagfwd_info *fwd_info,
+ unsigned char *buf, int len);
+};
+
+struct diag_peripheral_ops {
+ void (*open)(void *ctxt);
+ void (*close)(void *ctxt);
+ int (*write)(void *ctxt, unsigned char *buf, int len);
+ int (*read)(void *ctxt, unsigned char *buf, int len);
+ void (*queue_read)(void *ctxt);
+};
+
+struct diagfwd_info {
+ uint8_t peripheral;
+ uint8_t type;
+ uint8_t transport;
+ uint8_t inited;
+ uint8_t ch_open;
+ atomic_t opened;
+ unsigned long read_bytes;
+ unsigned long write_bytes;
+ spinlock_t buf_lock;
+ spinlock_t write_buf_lock;
+ struct mutex data_mutex;
+ void *ctxt;
+ struct diagfwd_buf_t *buf_1;
+ struct diagfwd_buf_t *buf_2;
+ struct diagfwd_buf_t *buf_ptr[NUM_WRITE_BUFFERS];
+ struct diag_peripheral_ops *p_ops;
+ struct diag_channel_ops *c_ops;
+};
+
+extern struct diagfwd_info peripheral_info[NUM_TYPES][NUM_PERIPHERALS];
+
+int diagfwd_peripheral_init(void);
+void diagfwd_peripheral_exit(void);
+
+void diagfwd_close_transport(uint8_t transport, uint8_t peripheral);
+
+void diagfwd_open(uint8_t peripheral, uint8_t type);
+void diagfwd_early_open(uint8_t peripheral);
+
+void diagfwd_late_open(struct diagfwd_info *fwd_info);
+void diagfwd_close(uint8_t peripheral, uint8_t type);
+int diagfwd_register(uint8_t transport, uint8_t peripheral, uint8_t type,
+ void *ctxt, struct diag_peripheral_ops *ops,
+ struct diagfwd_info **fwd_ctxt);
+int diagfwd_cntl_register(uint8_t transport, uint8_t peripheral, void *ctxt,
+ struct diag_peripheral_ops *ops,
+ struct diagfwd_info **fwd_ctxt);
+void diagfwd_deregister(uint8_t peripheral, uint8_t type, void *ctxt);
+
+int diagfwd_write(uint8_t peripheral, uint8_t type, void *buf, int len);
+void diagfwd_write_done(uint8_t peripheral, uint8_t type, int ctxt);
+void diagfwd_buffers_init(struct diagfwd_info *fwd_info);
+
+/*
+ * The following functions are called by the channels
+ */
+int diagfwd_channel_open(struct diagfwd_info *fwd_info);
+int diagfwd_channel_close(struct diagfwd_info *fwd_info);
+void diagfwd_channel_read(struct diagfwd_info *fwd_info);
+int diagfwd_channel_read_done(struct diagfwd_info *fwd_info,
+ unsigned char *buf, uint32_t len);
+int diagfwd_write_buffer_done(struct diagfwd_info *fwd_info, const void *ptr);
+
+#endif
diff --git a/drivers/char/diag/diagfwd_smux.c b/drivers/char/diag/diagfwd_smux.c
new file mode 100644
index 0000000..33f91d1
--- /dev/null
+++ b/drivers/char/diag/diagfwd_smux.c
@@ -0,0 +1,331 @@
+/* Copyright (c) 2012, 2014, 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/termios.h>
+#include <linux/slab.h>
+#include <linux/diagchar.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/usb/usbdiag.h>
+
+#include "diagchar.h"
+#include "diagfwd_bridge.h"
+#include "diagfwd_smux.h"
+
+struct diag_smux_info diag_smux[NUM_SMUX_DEV] = {
+ {
+ .id = SMUX_1,
+ .lcid = SMUX_USB_DIAG_0,
+ .dev_id = DIAGFWD_SMUX,
+ .name = "SMUX_1",
+ .read_buf = NULL,
+ .read_len = 0,
+ .in_busy = 0,
+ .enabled = 0,
+ .opened = 0,
+ },
+};
+
+static void diag_smux_event(void *priv, int event_type, const void *metadata)
+{
+ int len = 0;
+ int id = (int)priv;
+ unsigned char *rx_buf = NULL;
+ struct diag_smux_info *ch = NULL;
+
+ if (id < 0 || id >= NUM_SMUX_DEV)
+ return;
+
+ ch = &diag_smux[id];
+ if (metadata) {
+ len = ((struct smux_meta_read *)metadata)->len;
+ rx_buf = ((struct smux_meta_read *)metadata)->buffer;
+ }
+
+ switch (event_type) {
+ case SMUX_CONNECTED:
+ pr_info("diag: SMUX_CONNECTED received, ch: %d\n", ch->id);
+ ch->opened = 1;
+ ch->in_busy = 0;
+ break;
+ case SMUX_DISCONNECTED:
+ ch->opened = 0;
+ msm_smux_close(ch->lcid);
+ pr_info("diag: SMUX_DISCONNECTED received, ch: %d\n", ch->id);
+ break;
+ case SMUX_WRITE_DONE:
+ pr_debug("diag: SMUX Write done, ch: %d\n", ch->id);
+ diag_remote_dev_write_done(ch->dev_id, rx_buf, len, ch->id);
+ break;
+ case SMUX_WRITE_FAIL:
+ pr_info("diag: SMUX Write Failed, ch: %d\n", ch->id);
+ break;
+ case SMUX_READ_FAIL:
+ pr_info("diag: SMUX Read Failed, ch: %d\n", ch->id);
+ break;
+ case SMUX_READ_DONE:
+ ch->read_buf = rx_buf;
+ ch->read_len = len;
+ ch->in_busy = 1;
+ diag_remote_dev_read_done(ch->dev_id, ch->read_buf,
+ ch->read_len);
+ break;
+ };
+}
+
+static int diag_smux_init_ch(struct diag_smux_info *ch)
+{
+ if (!ch)
+ return -EINVAL;
+
+ if (!ch->enabled) {
+ pr_debug("diag: SMUX channel is not enabled id: %d\n", ch->id);
+ return -ENODEV;
+ }
+
+ if (ch->inited) {
+ pr_debug("diag: SMUX channel %d is already initialize\n",
+ ch->id);
+ return 0;
+ }
+
+ ch->read_buf = kzalloc(IN_BUF_SIZE, GFP_KERNEL);
+ if (!ch->read_buf)
+ return -ENOMEM;
+
+ ch->inited = 1;
+
+ return 0;
+}
+
+static int smux_get_rx_buffer(void *priv, void **pkt_priv, void **buf,
+ int size)
+{
+ int id = (int)priv;
+ struct diag_smux_info *ch = NULL;
+
+ if (id < 0 || id >= NUM_SMUX_DEV)
+ return -EINVAL;
+
+ ch = &diag_smux[id];
+
+ if (ch->in_busy) {
+ pr_debug("diag: read buffer for SMUX is BUSY\n");
+ return -EAGAIN;
+ }
+
+ *pkt_priv = (void *)0x1234;
+ *buf = ch->read_buf;
+ ch->in_busy = 1;
+ return 0;
+}
+
+static int smux_open(int id)
+{
+ int err = 0;
+ struct diag_smux_info *ch = NULL;
+
+ if (id < 0 || id >= NUM_SMUX_DEV)
+ return -EINVAL;
+
+ ch = &diag_smux[id];
+ if (ch->opened) {
+ pr_debug("diag: SMUX channel %d is already connected\n",
+ ch->id);
+ return 0;
+ }
+
+ err = diag_smux_init_ch(ch);
+ if (err) {
+ pr_err("diag: Unable to initialize SMUX channel %d, err: %d\n",
+ ch->id, err);
+ return err;
+ }
+
+ err = msm_smux_open(ch->lcid, (void *)ch->id, diag_smux_event,
+ smux_get_rx_buffer);
+ if (err) {
+ pr_err("diag: failed to open SMUX ch %d, err: %d\n",
+ ch->id, err);
+ return err;
+ }
+ msm_smux_tiocm_set(ch->lcid, TIOCM_DTR, 0);
+ ch->opened = 1;
+ pr_info("diag: SMUX ch %d is connected\n", ch->id);
+ return 0;
+}
+
+static int smux_close(int id)
+{
+ struct diag_smux_info *ch = NULL;
+
+ if (id < 0 || id >= NUM_SMUX_DEV)
+ return -EINVAL;
+
+ ch = &diag_smux[id];
+ if (!ch->enabled) {
+ pr_debug("diag: SMUX channel is not enabled id: %d\n", ch->id);
+ return -ENODEV;
+ }
+
+ msm_smux_close(ch->lcid);
+ ch->opened = 0;
+ ch->in_busy = 1;
+ kfree(ch->read_buf);
+ ch->read_buf = NULL;
+ return 0;
+}
+
+static int smux_queue_read(int id)
+{
+ return 0;
+}
+
+static int smux_write(int id, unsigned char *buf, int len, int ctxt)
+{
+ struct diag_smux_info *ch = NULL;
+
+ if (id < 0 || id >= NUM_SMUX_DEV)
+ return -EINVAL;
+
+ ch = &diag_smux[id];
+ return msm_smux_write(ch->lcid, NULL, buf, len);
+}
+
+static int smux_fwd_complete(int id, unsigned char *buf, int len, int ctxt)
+{
+ if (id < 0 || id >= NUM_SMUX_DEV)
+ return -EINVAL;
+
+ diag_smux[id].in_busy = 0;
+ return 0;
+}
+
+static int diagfwd_smux_runtime_suspend(struct device *dev)
+{
+ dev_dbg(dev, "pm_runtime: suspending...\n");
+ return 0;
+}
+
+static int diagfwd_smux_runtime_resume(struct device *dev)
+{
+ dev_dbg(dev, "pm_runtime: resuming...\n");
+ return 0;
+}
+
+static const struct dev_pm_ops diagfwd_smux_dev_pm_ops = {
+ .runtime_suspend = diagfwd_smux_runtime_suspend,
+ .runtime_resume = diagfwd_smux_runtime_resume,
+};
+
+static int diagfwd_smux_probe(struct platform_device *pdev)
+{
+ if (!pdev)
+ return -EINVAL;
+
+ pr_debug("diag: SMUX probe called, pdev->id: %d\n", pdev->id);
+ if (pdev->id < 0 || pdev->id >= NUM_SMUX_DEV) {
+ pr_err("diag: No support for SMUX device %d\n", pdev->id);
+ return -EINVAL;
+ }
+
+ diag_smux[pdev->id].enabled = 1;
+ return smux_open(pdev->id);
+}
+
+static int diagfwd_smux_remove(struct platform_device *pdev)
+{
+ if (!pdev)
+ return -EINVAL;
+
+ pr_debug("diag: SMUX probe called, pdev->id: %d\n", pdev->id);
+ if (pdev->id < 0 || pdev->id >= NUM_SMUX_DEV) {
+ pr_err("diag: No support for SMUX device %d\n", pdev->id);
+ return -EINVAL;
+ }
+ if (!diag_smux[pdev->id].enabled) {
+ pr_err("diag: SMUX channel %d is not enabled\n",
+ diag_smux[pdev->id].id);
+ return -ENODEV;
+ }
+ return smux_close(pdev->id);
+}
+
+static struct platform_driver msm_diagfwd_smux_driver = {
+ .probe = diagfwd_smux_probe,
+ .remove = diagfwd_smux_remove,
+ .driver = {
+ .name = "SMUX_DIAG",
+ .owner = THIS_MODULE,
+ .pm = &diagfwd_smux_dev_pm_ops,
+ },
+};
+
+static struct diag_remote_dev_ops diag_smux_fwd_ops = {
+ .open = smux_open,
+ .close = smux_close,
+ .queue_read = smux_queue_read,
+ .write = smux_write,
+ .fwd_complete = smux_fwd_complete,
+};
+
+int diag_smux_init(void)
+{
+ int i;
+ int err = 0;
+ struct diag_smux_info *ch = NULL;
+ char wq_name[DIAG_SMUX_NAME_SZ + 11];
+
+ for (i = 0; i < NUM_SMUX_DEV; i++) {
+ ch = &diag_smux[i];
+ strlcpy(wq_name, "DIAG_SMUX_", 11);
+ strlcat(wq_name, ch->name, sizeof(ch->name));
+ ch->smux_wq = create_singlethread_workqueue(wq_name);
+ if (!ch->smux_wq) {
+ err = -ENOMEM;
+ goto fail;
+ }
+ err = diagfwd_bridge_register(ch->dev_id, ch->id,
+ &diag_smux_fwd_ops);
+ if (err) {
+ pr_err("diag: Unable to register SMUX ch %d with bridge\n",
+ ch->id);
+ goto fail;
+ }
+ }
+
+ err = platform_driver_register(&msm_diagfwd_smux_driver);
+ if (err) {
+ pr_err("diag: Unable to register SMUX device, err: %d\n", err);
+ goto fail;
+ }
+
+ return 0;
+fail:
+ diag_smux_exit();
+ return err;
+}
+
+void diag_smux_exit(void)
+{
+ int i;
+ struct diag_smux_info *ch = NULL;
+
+ for (i = 0; i < NUM_SMUX_DEV; i++) {
+ ch = &diag_smux[i];
+ kfree(ch->read_buf);
+ ch->read_buf = NULL;
+ ch->enabled = 0;
+ ch->opened = 0;
+ ch->read_len = 0;
+ }
+ platform_driver_unregister(&msm_diagfwd_smux_driver);
+}
diff --git a/drivers/char/diag/diagfwd_smux.h b/drivers/char/diag/diagfwd_smux.h
new file mode 100644
index 0000000..f2514a2
--- /dev/null
+++ b/drivers/char/diag/diagfwd_smux.h
@@ -0,0 +1,43 @@
+/* Copyright (c) 2012,2014 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAGFWD_SMUX_H
+#define DIAGFWD_SMUX_H
+
+#include <linux/smux.h>
+
+#define SMUX_1 0
+#define NUM_SMUX_DEV 1
+
+#define DIAG_SMUX_NAME_SZ 24
+
+struct diag_smux_info {
+ int id;
+ int lcid;
+ int dev_id;
+ char name[DIAG_SMUX_NAME_SZ];
+ unsigned char *read_buf;
+ int read_len;
+ int in_busy;
+ int enabled;
+ int inited;
+ int opened;
+ struct work_struct read_work;
+ struct workqueue_struct *smux_wq;
+};
+
+extern struct diag_smux_info diag_smux[NUM_SMUX_DEV];
+
+int diag_smux_init(void);
+void diag_smux_exit(void);
+
+#endif
diff --git a/drivers/char/diag/diagfwd_socket.c b/drivers/char/diag/diagfwd_socket.c
new file mode 100644
index 0000000..c82c918
--- /dev/null
+++ b/drivers/char/diag/diagfwd_socket.c
@@ -0,0 +1,1107 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/sched.h>
+#include <linux/ratelimit.h>
+#include <linux/workqueue.h>
+#include <linux/msm_ipc.h>
+#include <linux/socket.h>
+#include <linux/pm_runtime.h>
+#include <linux/delay.h>
+#include <linux/diagchar.h>
+#include <linux/of.h>
+#include <linux/kmemleak.h>
+#include <asm/current.h>
+#include <net/sock.h>
+#include <linux/ipc_router.h>
+#include <linux/notifier.h>
+#include "diagchar.h"
+#include "diagfwd.h"
+#include "diagfwd_peripheral.h"
+#include "diagfwd_socket.h"
+#include "diag_ipc_logging.h"
+
+#define DIAG_SVC_ID 0x1001
+
+#define MODEM_INST_BASE 0
+#define LPASS_INST_BASE 64
+#define WCNSS_INST_BASE 128
+#define SENSORS_INST_BASE 192
+#define WDSP_INST_BASE 256
+
+#define INST_ID_CNTL 0
+#define INST_ID_CMD 1
+#define INST_ID_DATA 2
+#define INST_ID_DCI_CMD 3
+#define INST_ID_DCI 4
+
+struct diag_cntl_socket_info *cntl_socket;
+
+struct diag_socket_info socket_data[NUM_PERIPHERALS] = {
+ {
+ .peripheral = PERIPHERAL_MODEM,
+ .type = TYPE_DATA,
+ .name = "MODEM_DATA"
+ },
+ {
+ .peripheral = PERIPHERAL_LPASS,
+ .type = TYPE_DATA,
+ .name = "LPASS_DATA"
+ },
+ {
+ .peripheral = PERIPHERAL_WCNSS,
+ .type = TYPE_DATA,
+ .name = "WCNSS_DATA"
+ },
+ {
+ .peripheral = PERIPHERAL_SENSORS,
+ .type = TYPE_DATA,
+ .name = "SENSORS_DATA"
+ },
+ {
+ .peripheral = PERIPHERAL_WDSP,
+ .type = TYPE_DATA,
+ .name = "DIAG_DATA"
+ }
+};
+
+struct diag_socket_info socket_cntl[NUM_PERIPHERALS] = {
+ {
+ .peripheral = PERIPHERAL_MODEM,
+ .type = TYPE_CNTL,
+ .name = "MODEM_CNTL"
+ },
+ {
+ .peripheral = PERIPHERAL_LPASS,
+ .type = TYPE_CNTL,
+ .name = "LPASS_CNTL"
+ },
+ {
+ .peripheral = PERIPHERAL_WCNSS,
+ .type = TYPE_CNTL,
+ .name = "WCNSS_CNTL"
+ },
+ {
+ .peripheral = PERIPHERAL_SENSORS,
+ .type = TYPE_CNTL,
+ .name = "SENSORS_CNTL"
+ },
+ {
+ .peripheral = PERIPHERAL_WDSP,
+ .type = TYPE_CNTL,
+ .name = "DIAG_CTRL"
+ }
+};
+
+struct diag_socket_info socket_dci[NUM_PERIPHERALS] = {
+ {
+ .peripheral = PERIPHERAL_MODEM,
+ .type = TYPE_DCI,
+ .name = "MODEM_DCI"
+ },
+ {
+ .peripheral = PERIPHERAL_LPASS,
+ .type = TYPE_DCI,
+ .name = "LPASS_DCI"
+ },
+ {
+ .peripheral = PERIPHERAL_WCNSS,
+ .type = TYPE_DCI,
+ .name = "WCNSS_DCI"
+ },
+ {
+ .peripheral = PERIPHERAL_SENSORS,
+ .type = TYPE_DCI,
+ .name = "SENSORS_DCI"
+ },
+ {
+ .peripheral = PERIPHERAL_WDSP,
+ .type = TYPE_DCI,
+ .name = "DIAG_DCI_DATA"
+ }
+};
+
+struct diag_socket_info socket_cmd[NUM_PERIPHERALS] = {
+ {
+ .peripheral = PERIPHERAL_MODEM,
+ .type = TYPE_CMD,
+ .name = "MODEM_CMD"
+ },
+ {
+ .peripheral = PERIPHERAL_LPASS,
+ .type = TYPE_CMD,
+ .name = "LPASS_CMD"
+ },
+ {
+ .peripheral = PERIPHERAL_WCNSS,
+ .type = TYPE_CMD,
+ .name = "WCNSS_CMD"
+ },
+ {
+ .peripheral = PERIPHERAL_SENSORS,
+ .type = TYPE_CMD,
+ .name = "SENSORS_CMD"
+ },
+ {
+ .peripheral = PERIPHERAL_WDSP,
+ .type = TYPE_CMD,
+ .name = "DIAG_CMD"
+ }
+
+};
+
+struct diag_socket_info socket_dci_cmd[NUM_PERIPHERALS] = {
+ {
+ .peripheral = PERIPHERAL_MODEM,
+ .type = TYPE_DCI_CMD,
+ .name = "MODEM_DCI_CMD"
+ },
+ {
+ .peripheral = PERIPHERAL_LPASS,
+ .type = TYPE_DCI_CMD,
+ .name = "LPASS_DCI_CMD"
+ },
+ {
+ .peripheral = PERIPHERAL_WCNSS,
+ .type = TYPE_DCI_CMD,
+ .name = "WCNSS_DCI_CMD"
+ },
+ {
+ .peripheral = PERIPHERAL_SENSORS,
+ .type = TYPE_DCI_CMD,
+ .name = "SENSORS_DCI_CMD"
+ },
+ {
+ .peripheral = PERIPHERAL_WDSP,
+ .type = TYPE_DCI_CMD,
+ .name = "DIAG_DCI_CMD"
+ }
+};
+
+static void diag_state_open_socket(void *ctxt);
+static void diag_state_close_socket(void *ctxt);
+static int diag_socket_write(void *ctxt, unsigned char *buf, int len);
+static int diag_socket_read(void *ctxt, unsigned char *buf, int buf_len);
+static void diag_socket_queue_read(void *ctxt);
+static void socket_init_work_fn(struct work_struct *work);
+static int socket_ready_notify(struct notifier_block *nb,
+ unsigned long action, void *data);
+
+static struct diag_peripheral_ops socket_ops = {
+ .open = diag_state_open_socket,
+ .close = diag_state_close_socket,
+ .write = diag_socket_write,
+ .read = diag_socket_read,
+ .queue_read = diag_socket_queue_read
+};
+
+static struct notifier_block socket_notify = {
+ .notifier_call = socket_ready_notify,
+};
+
+static void diag_state_open_socket(void *ctxt)
+{
+ struct diag_socket_info *info = NULL;
+
+ if (!ctxt)
+ return;
+
+ info = (struct diag_socket_info *)(ctxt);
+ atomic_set(&info->diag_state, 1);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "%s setting diag state to 1", info->name);
+}
+
+static void diag_state_close_socket(void *ctxt)
+{
+ struct diag_socket_info *info = NULL;
+
+ if (!ctxt)
+ return;
+
+ info = (struct diag_socket_info *)(ctxt);
+ atomic_set(&info->diag_state, 0);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "%s setting diag state to 0", info->name);
+ wake_up_interruptible(&info->read_wait_q);
+ flush_workqueue(info->wq);
+}
+
+static void socket_data_ready(struct sock *sk_ptr)
+{
+ unsigned long flags;
+ struct diag_socket_info *info = NULL;
+
+ if (!sk_ptr) {
+ pr_err_ratelimited("diag: In %s, invalid sk_ptr", __func__);
+ return;
+ }
+
+ info = (struct diag_socket_info *)(sk_ptr->sk_user_data);
+ if (!info) {
+ pr_err_ratelimited("diag: In %s, invalid info\n", __func__);
+ return;
+ }
+
+ spin_lock_irqsave(&info->lock, flags);
+ info->data_ready++;
+ spin_unlock_irqrestore(&info->lock, flags);
+ diag_ws_on_notify();
+
+ /*
+ * Initialize read buffers for the servers. The servers must read data
+ * first to get the address of its clients.
+ */
+ if (!atomic_read(&info->opened) && info->port_type == PORT_TYPE_SERVER)
+ diagfwd_buffers_init(info->fwd_ctxt);
+
+ queue_work(info->wq, &(info->read_work));
+ wake_up_interruptible(&info->read_wait_q);
+}
+
+static void cntl_socket_data_ready(struct sock *sk_ptr)
+{
+ if (!sk_ptr || !cntl_socket) {
+ pr_err_ratelimited("diag: In %s, invalid ptrs. sk_ptr: %pK cntl_socket: %pK\n",
+ __func__, sk_ptr, cntl_socket);
+ return;
+ }
+
+ atomic_inc(&cntl_socket->data_ready);
+ wake_up_interruptible(&cntl_socket->read_wait_q);
+ queue_work(cntl_socket->wq, &(cntl_socket->read_work));
+}
+
+static void socket_flow_cntl(struct sock *sk_ptr)
+{
+ struct diag_socket_info *info = NULL;
+
+ if (!sk_ptr)
+ return;
+
+ info = (struct diag_socket_info *)(sk_ptr->sk_user_data);
+ if (!info) {
+ pr_err_ratelimited("diag: In %s, invalid info\n", __func__);
+ return;
+ }
+
+ atomic_inc(&info->flow_cnt);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s flow controlled\n", info->name);
+ pr_debug("diag: In %s, channel %s flow controlled\n",
+ __func__, info->name);
+}
+
+static int lookup_server(struct diag_socket_info *info)
+{
+ int ret = 0;
+ struct server_lookup_args *args = NULL;
+ struct sockaddr_msm_ipc *srv_addr = NULL;
+
+ if (!info)
+ return -EINVAL;
+
+ args = kzalloc((sizeof(struct server_lookup_args) +
+ sizeof(struct msm_ipc_server_info)), GFP_KERNEL);
+ if (!args)
+ return -ENOMEM;
+ kmemleak_not_leak(args);
+
+ args->lookup_mask = 0xFFFFFFFF;
+ args->port_name.service = info->svc_id;
+ args->port_name.instance = info->ins_id;
+ args->num_entries_in_array = 1;
+ args->num_entries_found = 0;
+
+ ret = kernel_sock_ioctl(info->hdl, IPC_ROUTER_IOCTL_LOOKUP_SERVER,
+ (unsigned long)args);
+ if (ret < 0) {
+ pr_err("diag: In %s, cannot find service for %s\n", __func__,
+ info->name);
+ kfree(args);
+ return -EFAULT;
+ }
+
+ srv_addr = &info->remote_addr;
+ srv_addr->family = AF_MSM_IPC;
+ srv_addr->address.addrtype = MSM_IPC_ADDR_ID;
+ srv_addr->address.addr.port_addr.node_id = args->srv_info[0].node_id;
+ srv_addr->address.addr.port_addr.port_id = args->srv_info[0].port_id;
+ ret = args->num_entries_found;
+ kfree(args);
+ if (ret < 1)
+ return -EIO;
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s found server node: %d port: %d",
+ info->name, srv_addr->address.addr.port_addr.node_id,
+ srv_addr->address.addr.port_addr.port_id);
+ return 0;
+}
+
+static void __socket_open_channel(struct diag_socket_info *info)
+{
+ if (!info)
+ return;
+
+ if (!info->inited) {
+ pr_debug("diag: In %s, socket %s is not initialized\n",
+ __func__, info->name);
+ return;
+ }
+
+ if (atomic_read(&info->opened)) {
+ pr_debug("diag: In %s, socket %s already opened\n",
+ __func__, info->name);
+ return;
+ }
+
+ atomic_set(&info->opened, 1);
+ diagfwd_channel_open(info->fwd_ctxt);
+}
+
+static void socket_open_client(struct diag_socket_info *info)
+{
+ int ret = 0;
+
+ if (!info || info->port_type != PORT_TYPE_CLIENT)
+ return;
+
+ ret = sock_create(AF_MSM_IPC, SOCK_DGRAM, 0, &info->hdl);
+ if (ret < 0 || !info->hdl) {
+ pr_err("diag: In %s, socket not initialized for %s\n", __func__,
+ info->name);
+ return;
+ }
+
+ write_lock_bh(&info->hdl->sk->sk_callback_lock);
+ info->hdl->sk->sk_user_data = (void *)(info);
+ info->hdl->sk->sk_data_ready = socket_data_ready;
+ info->hdl->sk->sk_write_space = socket_flow_cntl;
+ write_unlock_bh(&info->hdl->sk->sk_callback_lock);
+ ret = lookup_server(info);
+ if (ret) {
+ pr_err("diag: In %s, failed to lookup server, ret: %d\n",
+ __func__, ret);
+ return;
+ }
+ __socket_open_channel(info);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s exiting\n", info->name);
+}
+
+static void socket_open_server(struct diag_socket_info *info)
+{
+ int ret = 0;
+ struct sockaddr_msm_ipc srv_addr = { 0 };
+
+ if (!info)
+ return;
+
+ ret = sock_create(AF_MSM_IPC, SOCK_DGRAM, 0, &info->hdl);
+ if (ret < 0 || !info->hdl) {
+ pr_err("diag: In %s, socket not initialized for %s\n", __func__,
+ info->name);
+ return;
+ }
+
+ write_lock_bh(&info->hdl->sk->sk_callback_lock);
+ info->hdl->sk->sk_user_data = (void *)(info);
+ info->hdl->sk->sk_data_ready = socket_data_ready;
+ info->hdl->sk->sk_write_space = socket_flow_cntl;
+ write_unlock_bh(&info->hdl->sk->sk_callback_lock);
+
+ srv_addr.family = AF_MSM_IPC;
+ srv_addr.address.addrtype = MSM_IPC_ADDR_NAME;
+ srv_addr.address.addr.port_name.service = info->svc_id;
+ srv_addr.address.addr.port_name.instance = info->ins_id;
+
+ ret = kernel_bind(info->hdl, (struct sockaddr *)&srv_addr,
+ sizeof(srv_addr));
+ if (ret) {
+ pr_err("diag: In %s, failed to bind, ch: %s, svc_id: %d ins_id: %d, err: %d\n",
+ __func__, info->name, info->svc_id, info->ins_id, ret);
+ return;
+ }
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s opened server svc: %d ins: %d",
+ info->name, info->svc_id, info->ins_id);
+}
+
+static void socket_init_work_fn(struct work_struct *work)
+{
+ struct diag_socket_info *info = container_of(work,
+ struct diag_socket_info,
+ init_work);
+ if (!info)
+ return;
+
+ if (!info->inited) {
+ pr_debug("diag: In %s, socket %s is not initialized\n",
+ __func__, info->name);
+ return;
+ }
+
+ switch (info->port_type) {
+ case PORT_TYPE_SERVER:
+ socket_open_server(info);
+ break;
+ case PORT_TYPE_CLIENT:
+ socket_open_client(info);
+ break;
+ default:
+ pr_err("diag: In %s, unknown type %d\n", __func__,
+ info->port_type);
+ break;
+ }
+}
+
+static void __socket_close_channel(struct diag_socket_info *info)
+{
+ if (!info || !info->hdl)
+ return;
+
+ if (!atomic_read(&info->opened))
+ return;
+
+ memset(&info->remote_addr, 0, sizeof(struct sockaddr_msm_ipc));
+ diagfwd_channel_close(info->fwd_ctxt);
+
+ atomic_set(&info->opened, 0);
+
+ /* Don't close the server. Server should always remain open */
+ if (info->port_type != PORT_TYPE_SERVER) {
+ write_lock_bh(&info->hdl->sk->sk_callback_lock);
+ info->hdl->sk->sk_user_data = NULL;
+ info->hdl->sk->sk_data_ready = NULL;
+ write_unlock_bh(&info->hdl->sk->sk_callback_lock);
+ sock_release(info->hdl);
+ info->hdl = NULL;
+ wake_up_interruptible(&info->read_wait_q);
+ }
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s exiting\n", info->name);
+}
+
+static void socket_close_channel(struct diag_socket_info *info)
+{
+ if (!info)
+ return;
+
+ __socket_close_channel(info);
+
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s exiting\n", info->name);
+}
+
+static int cntl_socket_process_msg_server(uint32_t cmd, uint32_t svc_id,
+ uint32_t ins_id)
+{
+ uint8_t peripheral;
+ uint8_t found = 0;
+ struct diag_socket_info *info = NULL;
+
+ for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+ info = &socket_cmd[peripheral];
+ if ((svc_id == info->svc_id) &&
+ (ins_id == info->ins_id)) {
+ found = 1;
+ break;
+ }
+
+ info = &socket_dci_cmd[peripheral];
+ if ((svc_id == info->svc_id) &&
+ (ins_id == info->ins_id)) {
+ found = 1;
+ break;
+ }
+ }
+
+ if (!found)
+ return -EIO;
+
+ switch (cmd) {
+ case CNTL_CMD_NEW_SERVER:
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s received new server\n",
+ info->name);
+ diagfwd_register(TRANSPORT_SOCKET, info->peripheral,
+ info->type, (void *)info, &socket_ops,
+ &info->fwd_ctxt);
+ queue_work(info->wq, &(info->init_work));
+ break;
+ case CNTL_CMD_REMOVE_SERVER:
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s received remove server\n",
+ info->name);
+ socket_close_channel(info);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cntl_socket_process_msg_client(uint32_t cmd, uint32_t node_id,
+ uint32_t port_id)
+{
+ uint8_t peripheral;
+ uint8_t found = 0;
+ struct diag_socket_info *info = NULL;
+ struct msm_ipc_port_addr remote_port = {0};
+
+ for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+ info = &socket_data[peripheral];
+ remote_port = info->remote_addr.address.addr.port_addr;
+ if ((remote_port.node_id == node_id) &&
+ (remote_port.port_id == port_id)) {
+ found = 1;
+ break;
+ }
+
+ info = &socket_cntl[peripheral];
+ remote_port = info->remote_addr.address.addr.port_addr;
+ if ((remote_port.node_id == node_id) &&
+ (remote_port.port_id == port_id)) {
+ found = 1;
+ break;
+ }
+
+ info = &socket_dci[peripheral];
+ remote_port = info->remote_addr.address.addr.port_addr;
+ if ((remote_port.node_id == node_id) &&
+ (remote_port.port_id == port_id)) {
+ found = 1;
+ break;
+ }
+ }
+
+ if (!found)
+ return -EIO;
+
+ switch (cmd) {
+ case CNTL_CMD_REMOVE_CLIENT:
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s received remove client\n",
+ info->name);
+ socket_close_channel(info);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void cntl_socket_read_work_fn(struct work_struct *work)
+{
+ union cntl_port_msg msg;
+ int ret = 0;
+ struct kvec iov = { 0 };
+ struct msghdr read_msg = { 0 };
+
+
+ if (!cntl_socket)
+ return;
+
+ ret = wait_event_interruptible(cntl_socket->read_wait_q,
+ (atomic_read(&cntl_socket->data_ready) > 0));
+ if (ret)
+ return;
+
+ do {
+ iov.iov_base = &msg;
+ iov.iov_len = sizeof(msg);
+ read_msg.msg_name = NULL;
+ read_msg.msg_namelen = 0;
+ ret = kernel_recvmsg(cntl_socket->hdl, &read_msg, &iov, 1,
+ sizeof(msg), MSG_DONTWAIT);
+ if (ret < 0) {
+ pr_debug("diag: In %s, Error recving data %d\n",
+ __func__, ret);
+ break;
+ }
+
+ atomic_dec(&cntl_socket->data_ready);
+
+ switch (msg.srv.cmd) {
+ case CNTL_CMD_NEW_SERVER:
+ case CNTL_CMD_REMOVE_SERVER:
+ cntl_socket_process_msg_server(msg.srv.cmd,
+ msg.srv.service,
+ msg.srv.instance);
+ break;
+ case CNTL_CMD_REMOVE_CLIENT:
+ cntl_socket_process_msg_client(msg.cli.cmd,
+ msg.cli.node_id,
+ msg.cli.port_id);
+ break;
+ }
+ } while (atomic_read(&cntl_socket->data_ready) > 0);
+}
+
+static void socket_read_work_fn(struct work_struct *work)
+{
+ struct diag_socket_info *info = container_of(work,
+ struct diag_socket_info,
+ read_work);
+
+ if (!info)
+ return;
+
+ diagfwd_channel_read(info->fwd_ctxt);
+}
+
+static void diag_socket_queue_read(void *ctxt)
+{
+ struct diag_socket_info *info = NULL;
+
+ if (!ctxt)
+ return;
+
+ info = (struct diag_socket_info *)ctxt;
+ if (info->hdl && info->wq)
+ queue_work(info->wq, &(info->read_work));
+}
+
+void diag_socket_invalidate(void *ctxt, struct diagfwd_info *fwd_ctxt)
+{
+ struct diag_socket_info *info = NULL;
+
+ if (!ctxt || !fwd_ctxt)
+ return;
+
+ info = (struct diag_socket_info *)ctxt;
+ info->fwd_ctxt = fwd_ctxt;
+}
+
+int diag_socket_check_state(void *ctxt)
+{
+ struct diag_socket_info *info = NULL;
+
+ if (!ctxt)
+ return 0;
+
+ info = (struct diag_socket_info *)ctxt;
+ return (int)(atomic_read(&info->diag_state));
+}
+
+static void __diag_socket_init(struct diag_socket_info *info)
+{
+ uint16_t ins_base = 0;
+ uint16_t ins_offset = 0;
+
+ char wq_name[DIAG_SOCKET_NAME_SZ + 10];
+
+ if (!info)
+ return;
+
+ init_waitqueue_head(&info->wait_q);
+ info->inited = 0;
+ atomic_set(&info->opened, 0);
+ atomic_set(&info->diag_state, 0);
+ info->pkt_len = 0;
+ info->pkt_read = 0;
+ info->hdl = NULL;
+ info->fwd_ctxt = NULL;
+ info->data_ready = 0;
+ atomic_set(&info->flow_cnt, 0);
+ spin_lock_init(&info->lock);
+ strlcpy(wq_name, "DIAG_SOCKET_", 10);
+ strlcat(wq_name, info->name, sizeof(info->name));
+ init_waitqueue_head(&info->read_wait_q);
+ info->wq = create_singlethread_workqueue(wq_name);
+ if (!info->wq) {
+ pr_err("diag: In %s, unable to create workqueue for socket channel %s\n",
+ __func__, info->name);
+ return;
+ }
+ INIT_WORK(&(info->init_work), socket_init_work_fn);
+ INIT_WORK(&(info->read_work), socket_read_work_fn);
+
+ switch (info->peripheral) {
+ case PERIPHERAL_MODEM:
+ ins_base = MODEM_INST_BASE;
+ break;
+ case PERIPHERAL_LPASS:
+ ins_base = LPASS_INST_BASE;
+ break;
+ case PERIPHERAL_WCNSS:
+ ins_base = WCNSS_INST_BASE;
+ break;
+ case PERIPHERAL_SENSORS:
+ ins_base = SENSORS_INST_BASE;
+ break;
+ case PERIPHERAL_WDSP:
+ ins_base = WDSP_INST_BASE;
+ break;
+ }
+
+ switch (info->type) {
+ case TYPE_DATA:
+ ins_offset = INST_ID_DATA;
+ info->port_type = PORT_TYPE_SERVER;
+ break;
+ case TYPE_CNTL:
+ ins_offset = INST_ID_CNTL;
+ info->port_type = PORT_TYPE_SERVER;
+ break;
+ case TYPE_DCI:
+ ins_offset = INST_ID_DCI;
+ info->port_type = PORT_TYPE_SERVER;
+ break;
+ case TYPE_CMD:
+ ins_offset = INST_ID_CMD;
+ info->port_type = PORT_TYPE_CLIENT;
+ break;
+ case TYPE_DCI_CMD:
+ ins_offset = INST_ID_DCI_CMD;
+ info->port_type = PORT_TYPE_CLIENT;
+ break;
+ }
+
+ info->svc_id = DIAG_SVC_ID;
+ info->ins_id = ins_base + ins_offset;
+ info->inited = 1;
+}
+
+static void cntl_socket_init_work_fn(struct work_struct *work)
+{
+ int ret = 0;
+
+ if (!cntl_socket)
+ return;
+
+ ret = sock_create(AF_MSM_IPC, SOCK_DGRAM, 0, &cntl_socket->hdl);
+ if (ret < 0 || !cntl_socket->hdl) {
+ pr_err("diag: In %s, cntl socket is not initialized, ret: %d\n",
+ __func__, ret);
+ return;
+ }
+
+ write_lock_bh(&cntl_socket->hdl->sk->sk_callback_lock);
+ cntl_socket->hdl->sk->sk_user_data = (void *)cntl_socket;
+ cntl_socket->hdl->sk->sk_data_ready = cntl_socket_data_ready;
+ write_unlock_bh(&cntl_socket->hdl->sk->sk_callback_lock);
+
+ ret = kernel_sock_ioctl(cntl_socket->hdl,
+ IPC_ROUTER_IOCTL_BIND_CONTROL_PORT, 0);
+ if (ret < 0) {
+ pr_err("diag: In %s Could not bind as control port, ret: %d\n",
+ __func__, ret);
+ }
+
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "Initialized control sockets");
+}
+
+static int __diag_cntl_socket_init(void)
+{
+ cntl_socket = kzalloc(sizeof(struct diag_cntl_socket_info), GFP_KERNEL);
+ if (!cntl_socket)
+ return -ENOMEM;
+
+ cntl_socket->svc_id = DIAG_SVC_ID;
+ cntl_socket->ins_id = 1;
+ atomic_set(&cntl_socket->data_ready, 0);
+ init_waitqueue_head(&cntl_socket->read_wait_q);
+ cntl_socket->wq = create_singlethread_workqueue("DIAG_CNTL_SOCKET");
+ INIT_WORK(&(cntl_socket->read_work), cntl_socket_read_work_fn);
+ INIT_WORK(&(cntl_socket->init_work), cntl_socket_init_work_fn);
+
+ return 0;
+}
+
+int diag_socket_init(void)
+{
+ int err = 0;
+ int peripheral = 0;
+ struct diag_socket_info *info = NULL;
+
+ for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+ info = &socket_cntl[peripheral];
+ __diag_socket_init(&socket_cntl[peripheral]);
+
+ diagfwd_cntl_register(TRANSPORT_SOCKET, peripheral,
+ (void *)info, &socket_ops, &(info->fwd_ctxt));
+
+ __diag_socket_init(&socket_data[peripheral]);
+ __diag_socket_init(&socket_cmd[peripheral]);
+ __diag_socket_init(&socket_dci[peripheral]);
+ __diag_socket_init(&socket_dci_cmd[peripheral]);
+ }
+
+ err = __diag_cntl_socket_init();
+ if (err) {
+ pr_err("diag: Unable to open control sockets, err: %d\n", err);
+ goto fail;
+ }
+
+ register_ipcrtr_af_init_notifier(&socket_notify);
+fail:
+ return err;
+}
+
+static int socket_ready_notify(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ uint8_t peripheral;
+ struct diag_socket_info *info = NULL;
+
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "received notification from IPCR");
+
+ if (action != IPCRTR_AF_INIT) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "action not recognized by diag %lu\n", action);
+ return 0;
+ }
+
+ /* Initialize only the servers */
+ for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+ info = &socket_cntl[peripheral];
+ queue_work(info->wq, &(info->init_work));
+ info = &socket_data[peripheral];
+ queue_work(info->wq, &(info->init_work));
+ info = &socket_dci[peripheral];
+ queue_work(info->wq, &(info->init_work));
+ }
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "Initialized all servers");
+
+ queue_work(cntl_socket->wq, &(cntl_socket->init_work));
+
+ return 0;
+}
+
+int diag_socket_init_peripheral(uint8_t peripheral)
+{
+ struct diag_socket_info *info = NULL;
+
+ if (peripheral >= NUM_PERIPHERALS)
+ return -EINVAL;
+
+ info = &socket_data[peripheral];
+ diagfwd_register(TRANSPORT_SOCKET, info->peripheral,
+ info->type, (void *)info, &socket_ops,
+ &info->fwd_ctxt);
+
+ info = &socket_dci[peripheral];
+ diagfwd_register(TRANSPORT_SOCKET, info->peripheral,
+ info->type, (void *)info, &socket_ops,
+ &info->fwd_ctxt);
+ return 0;
+}
+
+static void __diag_socket_exit(struct diag_socket_info *info)
+{
+ if (!info)
+ return;
+
+ diagfwd_deregister(info->peripheral, info->type, (void *)info);
+ info->fwd_ctxt = NULL;
+ info->hdl = NULL;
+ if (info->wq)
+ destroy_workqueue(info->wq);
+
+}
+
+void diag_socket_early_exit(void)
+{
+ int i = 0;
+
+ for (i = 0; i < NUM_PERIPHERALS; i++)
+ __diag_socket_exit(&socket_cntl[i]);
+}
+
+void diag_socket_exit(void)
+{
+ int i = 0;
+
+ for (i = 0; i < NUM_PERIPHERALS; i++) {
+ __diag_socket_exit(&socket_data[i]);
+ __diag_socket_exit(&socket_cmd[i]);
+ __diag_socket_exit(&socket_dci[i]);
+ __diag_socket_exit(&socket_dci_cmd[i]);
+ }
+}
+
+static int diag_socket_read(void *ctxt, unsigned char *buf, int buf_len)
+{
+ int err = 0;
+ int pkt_len = 0;
+ int read_len = 0;
+ int bytes_remaining = 0;
+ int total_recd = 0;
+ int loop_count = 0;
+ uint8_t buf_full = 0;
+ unsigned char *temp = NULL;
+ struct kvec iov = {0};
+ struct msghdr read_msg = {0};
+ struct sockaddr_msm_ipc src_addr = {0};
+ struct diag_socket_info *info = NULL;
+ unsigned long flags;
+
+ info = (struct diag_socket_info *)(ctxt);
+ if (!info)
+ return -ENODEV;
+
+ if (!buf || !ctxt || buf_len <= 0)
+ return -EINVAL;
+
+ temp = buf;
+ bytes_remaining = buf_len;
+
+ err = wait_event_interruptible(info->read_wait_q,
+ (info->data_ready > 0) || (!info->hdl) ||
+ (atomic_read(&info->diag_state) == 0));
+ if (err) {
+ mutex_lock(&driver->diagfwd_channel_mutex);
+ diagfwd_channel_read_done(info->fwd_ctxt, buf, 0);
+ mutex_unlock(&driver->diagfwd_channel_mutex);
+ return -ERESTARTSYS;
+ }
+
+ /*
+ * There is no need to continue reading over peripheral in this case.
+ * Release the wake source hold earlier.
+ */
+ if (atomic_read(&info->diag_state) == 0) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "%s closing read thread. diag state is closed\n",
+ info->name);
+ mutex_lock(&driver->diagfwd_channel_mutex);
+ diagfwd_channel_read_done(info->fwd_ctxt, buf, 0);
+ mutex_unlock(&driver->diagfwd_channel_mutex);
+ return 0;
+ }
+
+ if (!info->hdl) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s closing read thread\n",
+ info->name);
+ goto fail;
+ }
+
+ do {
+ loop_count++;
+ iov.iov_base = temp;
+ iov.iov_len = bytes_remaining;
+ read_msg.msg_name = &src_addr;
+ read_msg.msg_namelen = sizeof(src_addr);
+
+ pkt_len = kernel_recvmsg(info->hdl, &read_msg, &iov, 1, 0,
+ MSG_PEEK);
+ if (pkt_len <= 0)
+ break;
+
+ if (pkt_len > bytes_remaining) {
+ buf_full = 1;
+ break;
+ }
+
+ spin_lock_irqsave(&info->lock, flags);
+ info->data_ready--;
+ spin_unlock_irqrestore(&info->lock, flags);
+
+ read_len = kernel_recvmsg(info->hdl, &read_msg, &iov, 1,
+ pkt_len, 0);
+ if (read_len <= 0)
+ goto fail;
+
+ if (!atomic_read(&info->opened) &&
+ info->port_type == PORT_TYPE_SERVER) {
+ /*
+ * This is the first packet from the client. Copy its
+ * address to the connection object. Consider this
+ * channel open for communication.
+ */
+ memcpy(&info->remote_addr, &src_addr, sizeof(src_addr));
+ if (info->ins_id == INST_ID_DCI)
+ atomic_set(&info->opened, 1);
+ else
+ __socket_open_channel(info);
+ }
+
+ if (read_len < 0) {
+ pr_err_ratelimited("diag: In %s, error receiving data, err: %d\n",
+ __func__, pkt_len);
+ err = read_len;
+ goto fail;
+ }
+ temp += read_len;
+ total_recd += read_len;
+ bytes_remaining -= read_len;
+ } while (info->data_ready > 0);
+
+ if (buf_full || (info->type == TYPE_DATA && pkt_len))
+ err = queue_work(info->wq, &(info->read_work));
+
+ if (total_recd > 0) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s read total bytes: %d\n",
+ info->name, total_recd);
+ mutex_lock(&driver->diagfwd_channel_mutex);
+ err = diagfwd_channel_read_done(info->fwd_ctxt,
+ buf, total_recd);
+ mutex_unlock(&driver->diagfwd_channel_mutex);
+ if (err)
+ goto fail;
+ } else {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s error in read, err: %d\n",
+ info->name, total_recd);
+ goto fail;
+ }
+
+ diag_socket_queue_read(info);
+ return 0;
+
+fail:
+ mutex_lock(&driver->diagfwd_channel_mutex);
+ diagfwd_channel_read_done(info->fwd_ctxt, buf, 0);
+ mutex_unlock(&driver->diagfwd_channel_mutex);
+ return -EIO;
+}
+
+static int diag_socket_write(void *ctxt, unsigned char *buf, int len)
+{
+ int err = 0;
+ int write_len = 0;
+ struct kvec iov = {0};
+ struct msghdr write_msg = {0};
+ struct diag_socket_info *info = NULL;
+
+ if (!ctxt || !buf || len <= 0)
+ return -EIO;
+
+ info = (struct diag_socket_info *)(ctxt);
+ if (!atomic_read(&info->opened) || !info->hdl)
+ return -ENODEV;
+
+ iov.iov_base = buf;
+ iov.iov_len = len;
+ write_msg.msg_name = &info->remote_addr;
+ write_msg.msg_namelen = sizeof(info->remote_addr);
+ write_msg.msg_flags |= MSG_DONTWAIT;
+ write_len = kernel_sendmsg(info->hdl, &write_msg, &iov, 1, len);
+ if (write_len < 0) {
+ err = write_len;
+ /*
+ * -EAGAIN means that the number of packets in flight is at
+ * max capactity and the peripheral hasn't read the data.
+ */
+ if (err != -EAGAIN) {
+ pr_err_ratelimited("diag: In %s, error sending data, err: %d, ch: %s\n",
+ __func__, err, info->name);
+ }
+ } else if (write_len != len) {
+ err = write_len;
+ pr_err_ratelimited("diag: In %s, wrote partial packet to %s, len: %d, wrote: %d\n",
+ __func__, info->name, len, write_len);
+ }
+
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s wrote to socket, len: %d\n",
+ info->name, write_len);
+
+ return err;
+}
+
diff --git a/drivers/char/diag/diagfwd_socket.h b/drivers/char/diag/diagfwd_socket.h
new file mode 100644
index 0000000..a2b922a
--- /dev/null
+++ b/drivers/char/diag/diagfwd_socket.h
@@ -0,0 +1,96 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAGFWD_SOCKET_H
+#define DIAGFWD_SOCKET_H
+
+#include <linux/socket.h>
+#include <linux/msm_ipc.h>
+
+#define DIAG_SOCKET_NAME_SZ 24
+
+#define DIAG_SOCK_MODEM_SVC_ID 64
+#define DIAG_SOCK_MODEM_INS_ID 3
+
+#define PORT_TYPE_SERVER 0
+#define PORT_TYPE_CLIENT 1
+
+#define CNTL_CMD_NEW_SERVER 4
+#define CNTL_CMD_REMOVE_SERVER 5
+#define CNTL_CMD_REMOVE_CLIENT 6
+
+struct diag_socket_info {
+ uint8_t peripheral;
+ uint8_t type;
+ uint8_t port_type;
+ uint8_t inited;
+ atomic_t opened;
+ atomic_t diag_state;
+ uint32_t pkt_len;
+ uint32_t pkt_read;
+ uint32_t svc_id;
+ uint32_t ins_id;
+ uint32_t data_ready;
+ atomic_t flow_cnt;
+ char name[DIAG_SOCKET_NAME_SZ];
+ spinlock_t lock;
+ wait_queue_head_t wait_q;
+ struct sockaddr_msm_ipc remote_addr;
+ struct socket *hdl;
+ struct workqueue_struct *wq;
+ struct work_struct init_work;
+ struct work_struct read_work;
+ struct diagfwd_info *fwd_ctxt;
+ wait_queue_head_t read_wait_q;
+};
+
+union cntl_port_msg {
+ struct {
+ uint32_t cmd;
+ uint32_t service;
+ uint32_t instance;
+ uint32_t node_id;
+ uint32_t port_id;
+ } srv;
+ struct {
+ uint32_t cmd;
+ uint32_t node_id;
+ uint32_t port_id;
+ } cli;
+};
+
+struct diag_cntl_socket_info {
+ uint32_t svc_id;
+ uint32_t ins_id;
+ atomic_t data_ready;
+ struct workqueue_struct *wq;
+ struct work_struct read_work;
+ struct work_struct init_work;
+ wait_queue_head_t read_wait_q;
+ struct socket *hdl;
+};
+
+extern struct diag_socket_info socket_data[NUM_PERIPHERALS];
+extern struct diag_socket_info socket_cntl[NUM_PERIPHERALS];
+extern struct diag_socket_info socket_dci[NUM_PERIPHERALS];
+extern struct diag_socket_info socket_cmd[NUM_PERIPHERALS];
+extern struct diag_socket_info socket_dci_cmd[NUM_PERIPHERALS];
+
+extern struct diag_cntl_socket_info *cntl_socket;
+
+int diag_socket_init(void);
+int diag_socket_init_peripheral(uint8_t peripheral);
+void diag_socket_exit(void);
+void diag_socket_early_exit(void);
+void diag_socket_invalidate(void *ctxt, struct diagfwd_info *fwd_ctxt);
+int diag_socket_check_state(void *ctxt);
+#endif
diff --git a/drivers/char/diag/diagmem.c b/drivers/char/diag/diagmem.c
new file mode 100644
index 0000000..ada645d
--- /dev/null
+++ b/drivers/char/diag/diagmem.c
@@ -0,0 +1,295 @@
+/* Copyright (c) 2008-2014, 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mempool.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/kmemleak.h>
+#include <linux/ratelimit.h>
+#include <linux/atomic.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/kmemleak.h>
+
+#include "diagchar.h"
+#include "diagmem.h"
+
+struct diag_mempool_t diag_mempools[NUM_MEMORY_POOLS] = {
+ {
+ .id = POOL_TYPE_COPY,
+ .name = "POOL_COPY",
+ .pool = NULL,
+ .itemsize = 0,
+ .poolsize = 0,
+ .count = 0
+ },
+ {
+ .id = POOL_TYPE_HDLC,
+ .name = "POOL_HDLC",
+ .pool = NULL,
+ .itemsize = 0,
+ .poolsize = 0,
+ .count = 0
+ },
+ {
+ .id = POOL_TYPE_USER,
+ .name = "POOL_USER",
+ .pool = NULL,
+ .itemsize = 0,
+ .poolsize = 0,
+ .count = 0
+ },
+ {
+ .id = POOL_TYPE_MUX_APPS,
+ .name = "POOL_MUX_APPS",
+ .pool = NULL,
+ .itemsize = 0,
+ .poolsize = 0,
+ .count = 0
+ },
+ {
+ .id = POOL_TYPE_DCI,
+ .name = "POOL_DCI",
+ .pool = NULL,
+ .itemsize = 0,
+ .poolsize = 0,
+ .count = 0
+ },
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+ {
+ .id = POOL_TYPE_MDM,
+ .name = "POOL_MDM",
+ .pool = NULL,
+ .itemsize = 0,
+ .poolsize = 0,
+ .count = 0
+ },
+ {
+ .id = POOL_TYPE_MDM2,
+ .name = "POOL_MDM2",
+ .pool = NULL,
+ .itemsize = 0,
+ .poolsize = 0,
+ .count = 0
+ },
+ {
+ .id = POOL_TYPE_MDM_DCI,
+ .name = "POOL_MDM_DCI",
+ .pool = NULL,
+ .itemsize = 0,
+ .poolsize = 0,
+ .count = 0
+ },
+ {
+ .id = POOL_TYPE_MDM2_DCI,
+ .name = "POOL_MDM2_DCI",
+ .pool = NULL,
+ .itemsize = 0,
+ .poolsize = 0,
+ .count = 0
+ },
+ {
+ .id = POOL_TYPE_MDM_MUX,
+ .name = "POOL_MDM_MUX",
+ .pool = NULL,
+ .itemsize = 0,
+ .poolsize = 0,
+ .count = 0
+ },
+ {
+ .id = POOL_TYPE_MDM2_MUX,
+ .name = "POOL_MDM2_MUX",
+ .pool = NULL,
+ .itemsize = 0,
+ .poolsize = 0,
+ .count = 0
+ },
+ {
+ .id = POOL_TYPE_MDM_DCI_WRITE,
+ .name = "POOL_MDM_DCI_WRITE",
+ .pool = NULL,
+ .itemsize = 0,
+ .poolsize = 0,
+ .count = 0
+ },
+ {
+ .id = POOL_TYPE_MDM2_DCI_WRITE,
+ .name = "POOL_MDM2_DCI_WRITE",
+ .pool = NULL,
+ .itemsize = 0,
+ .poolsize = 0,
+ .count = 0
+ },
+ {
+ .id = POOL_TYPE_QSC_MUX,
+ .name = "POOL_QSC_MUX",
+ .pool = NULL,
+ .itemsize = 0,
+ .poolsize = 0,
+ .count = 0
+ }
+#endif
+};
+
+void diagmem_setsize(int pool_idx, int itemsize, int poolsize)
+{
+ if (pool_idx < 0 || pool_idx >= NUM_MEMORY_POOLS) {
+ pr_err("diag: Invalid pool index %d in %s\n", pool_idx,
+ __func__);
+ return;
+ }
+
+ diag_mempools[pool_idx].itemsize = itemsize;
+ diag_mempools[pool_idx].poolsize = poolsize;
+ pr_debug("diag: Mempool %s sizes: itemsize %d poolsize %d\n",
+ diag_mempools[pool_idx].name, diag_mempools[pool_idx].itemsize,
+ diag_mempools[pool_idx].poolsize);
+}
+
+void *diagmem_alloc(struct diagchar_dev *driver, int size, int pool_type)
+{
+ void *buf = NULL;
+ int i = 0;
+ unsigned long flags;
+ struct diag_mempool_t *mempool = NULL;
+
+ if (!driver)
+ return NULL;
+
+ for (i = 0; i < NUM_MEMORY_POOLS; i++) {
+ mempool = &diag_mempools[i];
+ if (pool_type != mempool->id)
+ continue;
+ if (!mempool->pool) {
+ pr_err_ratelimited("diag: %s mempool is not initialized yet\n",
+ mempool->name);
+ break;
+ }
+ if (size == 0 || size > mempool->itemsize) {
+ pr_err_ratelimited("diag: cannot alloc from mempool %s, invalid size: %d\n",
+ mempool->name, size);
+ break;
+ }
+ spin_lock_irqsave(&mempool->lock, flags);
+ if (mempool->count < mempool->poolsize) {
+ atomic_add(1, (atomic_t *)&mempool->count);
+ buf = mempool_alloc(mempool->pool, GFP_ATOMIC);
+ kmemleak_not_leak(buf);
+ }
+ spin_unlock_irqrestore(&mempool->lock, flags);
+ if (!buf) {
+ pr_debug_ratelimited("diag: Unable to allocate buffer from memory pool %s, size: %d/%d count: %d/%d\n",
+ mempool->name,
+ size, mempool->itemsize,
+ mempool->count,
+ mempool->poolsize);
+ }
+ break;
+ }
+
+ return buf;
+}
+
+void diagmem_free(struct diagchar_dev *driver, void *buf, int pool_type)
+{
+ int i = 0;
+ unsigned long flags;
+ struct diag_mempool_t *mempool = NULL;
+
+ if (!driver || !buf)
+ return;
+
+ for (i = 0; i < NUM_MEMORY_POOLS; i++) {
+ mempool = &diag_mempools[i];
+ if (pool_type != mempool->id)
+ continue;
+ if (!mempool->pool) {
+ pr_err_ratelimited("diag: %s mempool is not initialized yet\n",
+ mempool->name);
+ break;
+ }
+ spin_lock_irqsave(&mempool->lock, flags);
+ if (mempool->count > 0) {
+ mempool_free(buf, mempool->pool);
+ atomic_add(-1, (atomic_t *)&mempool->count);
+ } else {
+ pr_err_ratelimited("diag: Attempting to free items from %s mempool which is already empty\n",
+ mempool->name);
+ }
+ spin_unlock_irqrestore(&mempool->lock, flags);
+ break;
+ }
+}
+
+void diagmem_init(struct diagchar_dev *driver, int index)
+{
+ struct diag_mempool_t *mempool = NULL;
+
+ if (!driver)
+ return;
+
+ if (index < 0 || index >= NUM_MEMORY_POOLS) {
+ pr_err("diag: In %s, Invalid index %d\n", __func__, index);
+ return;
+ }
+
+ mempool = &diag_mempools[index];
+ if (mempool->pool) {
+ pr_debug("diag: mempool %s is already initialized\n",
+ mempool->name);
+ return;
+ }
+ if (mempool->itemsize <= 0 || mempool->poolsize <= 0) {
+ pr_err("diag: Unable to initialize %s mempool, itemsize: %d poolsize: %d\n",
+ mempool->name, mempool->itemsize,
+ mempool->poolsize);
+ return;
+ }
+
+ mempool->pool = mempool_create_kmalloc_pool(mempool->poolsize,
+ mempool->itemsize);
+ if (!mempool->pool)
+ pr_err("diag: cannot allocate %s mempool\n", mempool->name);
+ else
+ kmemleak_not_leak(mempool->pool);
+
+ spin_lock_init(&mempool->lock);
+}
+
+void diagmem_exit(struct diagchar_dev *driver, int index)
+{
+ unsigned long flags;
+ struct diag_mempool_t *mempool = NULL;
+
+ if (!driver)
+ return;
+
+ if (index < 0 || index >= NUM_MEMORY_POOLS) {
+ pr_err("diag: In %s, Invalid index %d\n", __func__, index);
+ return;
+ }
+
+ mempool = &diag_mempools[index];
+ spin_lock_irqsave(&mempool->lock, flags);
+ if (mempool->count == 0 && mempool->pool != NULL) {
+ mempool_destroy(mempool->pool);
+ mempool->pool = NULL;
+ } else {
+ pr_err("diag: Unable to destroy %s pool, count: %d\n",
+ mempool->name, mempool->count);
+ }
+ spin_unlock_irqrestore(&mempool->lock, flags);
+}
+
diff --git a/drivers/char/diag/diagmem.h b/drivers/char/diag/diagmem.h
new file mode 100644
index 0000000..d097a37
--- /dev/null
+++ b/drivers/char/diag/diagmem.h
@@ -0,0 +1,63 @@
+/* Copyright (c) 2008-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAGMEM_H
+#define DIAGMEM_H
+#include "diagchar.h"
+
+#define POOL_TYPE_COPY 0
+#define POOL_TYPE_HDLC 1
+#define POOL_TYPE_USER 2
+#define POOL_TYPE_MUX_APPS 3
+#define POOL_TYPE_DCI 4
+#define POOL_TYPE_LOCAL_LAST 5
+
+#define POOL_TYPE_REMOTE_BASE POOL_TYPE_LOCAL_LAST
+#define POOL_TYPE_MDM POOL_TYPE_REMOTE_BASE
+#define POOL_TYPE_MDM2 (POOL_TYPE_REMOTE_BASE + 1)
+#define POOL_TYPE_MDM_DCI (POOL_TYPE_REMOTE_BASE + 2)
+#define POOL_TYPE_MDM2_DCI (POOL_TYPE_REMOTE_BASE + 3)
+#define POOL_TYPE_MDM_MUX (POOL_TYPE_REMOTE_BASE + 4)
+#define POOL_TYPE_MDM2_MUX (POOL_TYPE_REMOTE_BASE + 5)
+#define POOL_TYPE_MDM_DCI_WRITE (POOL_TYPE_REMOTE_BASE + 6)
+#define POOL_TYPE_MDM2_DCI_WRITE (POOL_TYPE_REMOTE_BASE + 7)
+#define POOL_TYPE_QSC_MUX (POOL_TYPE_REMOTE_BASE + 8)
+#define POOL_TYPE_REMOTE_LAST (POOL_TYPE_REMOTE_BASE + 9)
+
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+#define NUM_MEMORY_POOLS POOL_TYPE_REMOTE_LAST
+#else
+#define NUM_MEMORY_POOLS POOL_TYPE_LOCAL_LAST
+#endif
+
+#define DIAG_MEMPOOL_NAME_SZ 24
+#define DIAG_MEMPOOL_GET_NAME(x) (diag_mempools[x].name)
+
+struct diag_mempool_t {
+ int id;
+ char name[DIAG_MEMPOOL_NAME_SZ];
+ mempool_t *pool;
+ unsigned int itemsize;
+ unsigned int poolsize;
+ int count;
+ spinlock_t lock;
+} __packed;
+
+extern struct diag_mempool_t diag_mempools[NUM_MEMORY_POOLS];
+
+void diagmem_setsize(int pool_idx, int itemsize, int poolsize);
+void *diagmem_alloc(struct diagchar_dev *driver, int size, int pool_type);
+void diagmem_free(struct diagchar_dev *driver, void *buf, int pool_type);
+void diagmem_init(struct diagchar_dev *driver, int type);
+void diagmem_exit(struct diagchar_dev *driver, int type);
+
+#endif
diff --git a/include/linux/diagchar.h b/include/linux/diagchar.h
new file mode 100644
index 0000000..3b5c7bf
--- /dev/null
+++ b/include/linux/diagchar.h
@@ -0,0 +1,895 @@
+/* Copyright (c) 2008-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAGCHAR_SHARED
+#define DIAGCHAR_SHARED
+
+#define MSG_MASKS_TYPE 0x00000001
+#define LOG_MASKS_TYPE 0x00000002
+#define EVENT_MASKS_TYPE 0x00000004
+#define PKT_TYPE 0x00000008
+#define DEINIT_TYPE 0x00000010
+#define USER_SPACE_DATA_TYPE 0x00000020
+#define DCI_DATA_TYPE 0x00000040
+#define USER_SPACE_RAW_DATA_TYPE 0x00000080
+#define DCI_LOG_MASKS_TYPE 0x00000100
+#define DCI_EVENT_MASKS_TYPE 0x00000200
+#define DCI_PKT_TYPE 0x00000400
+#define HDLC_SUPPORT_TYPE 0x00001000
+
+#define USB_MODE 1
+#define MEMORY_DEVICE_MODE 2
+#define NO_LOGGING_MODE 3
+#define UART_MODE 4
+#define SOCKET_MODE 5
+#define CALLBACK_MODE 6
+
+/* different values that go in for diag_data_type */
+#define DATA_TYPE_EVENT 0
+#define DATA_TYPE_F3 1
+#define DATA_TYPE_LOG 2
+#define DATA_TYPE_RESPONSE 3
+#define DATA_TYPE_DELAYED_RESPONSE 4
+#define DATA_TYPE_DCI_LOG 0x00000100
+#define DATA_TYPE_DCI_EVENT 0x00000200
+
+/* Different IOCTL values */
+#define DIAG_IOCTL_COMMAND_REG 0
+#define DIAG_IOCTL_COMMAND_DEREG 1
+#define DIAG_IOCTL_SWITCH_LOGGING 7
+#define DIAG_IOCTL_GET_DELAYED_RSP_ID 8
+#define DIAG_IOCTL_LSM_DEINIT 9
+#define DIAG_IOCTL_DCI_INIT 20
+#define DIAG_IOCTL_DCI_DEINIT 21
+#define DIAG_IOCTL_DCI_SUPPORT 22
+#define DIAG_IOCTL_DCI_REG 23
+#define DIAG_IOCTL_DCI_STREAM_INIT 24
+#define DIAG_IOCTL_DCI_HEALTH_STATS 25
+#define DIAG_IOCTL_DCI_LOG_STATUS 26
+#define DIAG_IOCTL_DCI_EVENT_STATUS 27
+#define DIAG_IOCTL_DCI_CLEAR_LOGS 28
+#define DIAG_IOCTL_DCI_CLEAR_EVENTS 29
+#define DIAG_IOCTL_REMOTE_DEV 32
+#define DIAG_IOCTL_VOTE_REAL_TIME 33
+#define DIAG_IOCTL_GET_REAL_TIME 34
+#define DIAG_IOCTL_PERIPHERAL_BUF_CONFIG 35
+#define DIAG_IOCTL_PERIPHERAL_BUF_DRAIN 36
+#define DIAG_IOCTL_REGISTER_CALLBACK 37
+#define DIAG_IOCTL_HDLC_TOGGLE 38
+
+/* PC Tools IDs */
+#define APQ8060_TOOLS_ID 4062
+#define AO8960_TOOLS_ID 4064
+#define APQ8064_TOOLS_ID 4072
+#define MSM8625_TOOLS_ID 4075
+#define MSM8930_TOOLS_ID 4076
+#define MSM8630_TOOLS_ID 4077
+#define MSM8230_TOOLS_ID 4078
+#define APQ8030_TOOLS_ID 4079
+#define MSM8627_TOOLS_ID 4080
+#define MSM8227_TOOLS_ID 4081
+#define MSM8974_TOOLS_ID 4083
+#define APQ8074_TOOLS_ID 4090
+#define MSM8916_TOOLS_ID 4094
+#define APQ8084_TOOLS_ID 4095
+#define MSM8994_TOOLS_ID 4097
+#define MSM8939_TOOLS_ID 4103
+#define APQ8026_TOOLS_ID 4104
+#define MSM8909_TOOLS_ID 4108
+#define MSM8992_TOOLS_ID 4111
+#define MSM8952_TOOLS_ID 4110
+#define MSM_8996_TOOLS_ID 4112
+
+#define MSG_MASK_0 (0x00000001)
+#define MSG_MASK_1 (0x00000002)
+#define MSG_MASK_2 (0x00000004)
+#define MSG_MASK_3 (0x00000008)
+#define MSG_MASK_4 (0x00000010)
+#define MSG_MASK_5 (0x00000020)
+#define MSG_MASK_6 (0x00000040)
+#define MSG_MASK_7 (0x00000080)
+#define MSG_MASK_8 (0x00000100)
+#define MSG_MASK_9 (0x00000200)
+#define MSG_MASK_10 (0x00000400)
+#define MSG_MASK_11 (0x00000800)
+#define MSG_MASK_12 (0x00001000)
+#define MSG_MASK_13 (0x00002000)
+#define MSG_MASK_14 (0x00004000)
+#define MSG_MASK_15 (0x00008000)
+#define MSG_MASK_16 (0x00010000)
+#define MSG_MASK_17 (0x00020000)
+#define MSG_MASK_18 (0x00040000)
+#define MSG_MASK_19 (0x00080000)
+#define MSG_MASK_20 (0x00100000)
+#define MSG_MASK_21 (0x00200000)
+#define MSG_MASK_22 (0x00400000)
+#define MSG_MASK_23 (0x00800000)
+#define MSG_MASK_24 (0x01000000)
+#define MSG_MASK_25 (0x02000000)
+#define MSG_MASK_26 (0x04000000)
+#define MSG_MASK_27 (0x08000000)
+#define MSG_MASK_28 (0x10000000)
+#define MSG_MASK_29 (0x20000000)
+#define MSG_MASK_30 (0x40000000)
+#define MSG_MASK_31 (0x80000000)
+
+/* These masks are to be used for support of all legacy messages in the sw.
+ * The user does not need to remember the names as they will be embedded in
+ * the appropriate macros.
+ */
+#define MSG_LEGACY_LOW MSG_MASK_0
+#define MSG_LEGACY_MED MSG_MASK_1
+#define MSG_LEGACY_HIGH MSG_MASK_2
+#define MSG_LEGACY_ERROR MSG_MASK_3
+#define MSG_LEGACY_FATAL MSG_MASK_4
+
+/* Legacy Message Priorities */
+#define MSG_LVL_FATAL (MSG_LEGACY_FATAL)
+#define MSG_LVL_ERROR (MSG_LEGACY_ERROR | MSG_LVL_FATAL)
+#define MSG_LVL_HIGH (MSG_LEGACY_HIGH | MSG_LVL_ERROR)
+#define MSG_LVL_MED (MSG_LEGACY_MED | MSG_LVL_HIGH)
+#define MSG_LVL_LOW (MSG_LEGACY_LOW | MSG_LVL_MED)
+
+#define MSG_LVL_NONE 0
+
+/* This needs to be modified manually now, when we add
+ * a new RANGE of SSIDs to the msg_mask_tbl.
+ */
+#define MSG_MASK_TBL_CNT 25
+#define APPS_EVENT_LAST_ID 0x0B14
+
+#define MSG_SSID_0 0
+#define MSG_SSID_0_LAST 118
+#define MSG_SSID_1 500
+#define MSG_SSID_1_LAST 506
+#define MSG_SSID_2 1000
+#define MSG_SSID_2_LAST 1007
+#define MSG_SSID_3 2000
+#define MSG_SSID_3_LAST 2008
+#define MSG_SSID_4 3000
+#define MSG_SSID_4_LAST 3014
+#define MSG_SSID_5 4000
+#define MSG_SSID_5_LAST 4010
+#define MSG_SSID_6 4500
+#define MSG_SSID_6_LAST 4573
+#define MSG_SSID_7 4600
+#define MSG_SSID_7_LAST 4615
+#define MSG_SSID_8 5000
+#define MSG_SSID_8_LAST 5032
+#define MSG_SSID_9 5500
+#define MSG_SSID_9_LAST 5516
+#define MSG_SSID_10 6000
+#define MSG_SSID_10_LAST 6081
+#define MSG_SSID_11 6500
+#define MSG_SSID_11_LAST 6521
+#define MSG_SSID_12 7000
+#define MSG_SSID_12_LAST 7003
+#define MSG_SSID_13 7100
+#define MSG_SSID_13_LAST 7111
+#define MSG_SSID_14 7200
+#define MSG_SSID_14_LAST 7201
+#define MSG_SSID_15 8000
+#define MSG_SSID_15_LAST 8000
+#define MSG_SSID_16 8500
+#define MSG_SSID_16_LAST 8529
+#define MSG_SSID_17 9000
+#define MSG_SSID_17_LAST 9008
+#define MSG_SSID_18 9500
+#define MSG_SSID_18_LAST 9510
+#define MSG_SSID_19 10200
+#define MSG_SSID_19_LAST 10210
+#define MSG_SSID_20 10251
+#define MSG_SSID_20_LAST 10255
+#define MSG_SSID_21 10300
+#define MSG_SSID_21_LAST 10300
+#define MSG_SSID_22 10350
+#define MSG_SSID_22_LAST 10377
+#define MSG_SSID_23 10400
+#define MSG_SSID_23_LAST 10415
+#define MSG_SSID_24 0xC000
+#define MSG_SSID_24_LAST 0xC063
+
+static const uint32_t msg_bld_masks_0[] = {
+ MSG_LVL_LOW,
+ MSG_LVL_MED,
+ MSG_LVL_LOW,
+ MSG_LVL_ERROR,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_MED,
+ MSG_LVL_HIGH,
+ MSG_LVL_ERROR,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_ERROR,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_HIGH,
+ MSG_LVL_HIGH,
+ MSG_LVL_LOW | MSG_MASK_5 | MSG_MASK_6 | MSG_MASK_7 | MSG_MASK_8,
+ MSG_LVL_LOW,
+ MSG_LVL_ERROR,
+ MSG_LVL_LOW,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_LOW,
+ MSG_LVL_MED,
+ MSG_LVL_LOW,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED | MSG_MASK_7 | MSG_MASK_8 | MSG_MASK_9 | MSG_MASK_10 |
+ MSG_MASK_11 | MSG_MASK_12 | MSG_MASK_13 | MSG_MASK_14 |
+ MSG_MASK_15 | MSG_MASK_16 | MSG_MASK_17 | MSG_MASK_18 |
+ MSG_MASK_19 | MSG_MASK_20 | MSG_MASK_21,
+ MSG_LVL_MED | MSG_MASK_5 | MSG_MASK_6 | MSG_MASK_7 | MSG_MASK_8 |
+ MSG_MASK_9 | MSG_MASK_10 | MSG_MASK_11 | MSG_MASK_12 |
+ MSG_MASK_13 | MSG_MASK_14 | MSG_MASK_15 | MSG_MASK_16 |
+ MSG_MASK_17,
+ MSG_LVL_LOW,
+ MSG_LVL_MED,
+ MSG_LVL_HIGH,
+ MSG_LVL_HIGH,
+ MSG_LVL_HIGH,
+ MSG_LVL_HIGH,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED | MSG_MASK_5 | MSG_MASK_6 | MSG_MASK_7 | MSG_MASK_8,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_HIGH,
+ MSG_LVL_HIGH,
+ MSG_LVL_HIGH,
+ MSG_LVL_MED,
+ MSG_LVL_MED | MSG_MASK_5 | MSG_MASK_6 | MSG_MASK_7 | MSG_MASK_8 |
+ MSG_MASK_9 | MSG_MASK_10 | MSG_MASK_11 | MSG_MASK_12 |
+ MSG_MASK_13 | MSG_MASK_14 | MSG_MASK_15 | MSG_MASK_16 |
+ MSG_MASK_17 | MSG_MASK_18 | MSG_MASK_19 | MSG_MASK_20 |
+ MSG_MASK_21 | MSG_MASK_22 | MSG_MASK_23 | MSG_MASK_24|
+ MSG_MASK_25,
+ MSG_LVL_MED | MSG_MASK_5 | MSG_MASK_6 | MSG_MASK_7 | MSG_MASK_8 |
+ MSG_MASK_9 | MSG_MASK_10,
+ MSG_LVL_MED,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_MED,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_HIGH,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW | MSG_MASK_5 | MSG_MASK_6 | MSG_MASK_7 | MSG_MASK_8,
+ MSG_LVL_LOW | MSG_MASK_5 | MSG_MASK_6,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_LOW,
+ MSG_LVL_MED | MSG_MASK_5 | MSG_MASK_6 | MSG_MASK_7 | MSG_MASK_8 |
+ MSG_MASK_9 | MSG_MASK_10 | MSG_MASK_11 | MSG_MASK_12 |
+ MSG_MASK_13 | MSG_MASK_14 | MSG_MASK_15 | MSG_MASK_16 |
+ MSG_MASK_17 | MSG_MASK_18 | MSG_MASK_19 | MSG_MASK_20,
+ MSG_LVL_LOW,
+ MSG_LVL_MED,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_HIGH,
+ MSG_LVL_HIGH,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_HIGH | MSG_MASK_21,
+ MSG_LVL_HIGH,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW | MSG_LVL_MED | MSG_LVL_HIGH | MSG_LVL_ERROR |
+ MSG_LVL_FATAL,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW | MSG_LVL_MED | MSG_LVL_HIGH | MSG_LVL_ERROR |
+ MSG_LVL_FATAL,
+ MSG_LVL_LOW,
+ MSG_LVL_MED,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW | MSG_LVL_MED | MSG_LVL_HIGH | MSG_LVL_ERROR |
+ MSG_LVL_FATAL,
+ MSG_LVL_MED,
+ MSG_LVL_HIGH,
+ MSG_LVL_LOW,
+ MSG_LVL_HIGH,
+ MSG_LVL_LOW | MSG_LVL_MED | MSG_LVL_HIGH | MSG_LVL_ERROR |
+ MSG_LVL_FATAL,
+ MSG_LVL_MED | MSG_LVL_HIGH | MSG_LVL_ERROR,
+ MSG_LVL_MED | MSG_LVL_HIGH | MSG_LVL_ERROR,
+ MSG_LVL_MED | MSG_LVL_HIGH,
+ MSG_LVL_MED | MSG_LVL_HIGH,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW | MSG_LVL_MED | MSG_LVL_HIGH | MSG_LVL_ERROR |
+ MSG_LVL_FATAL,
+ MSG_LVL_HIGH,
+ MSG_LVL_LOW | MSG_LVL_MED | MSG_LVL_HIGH | MSG_LVL_ERROR |
+ MSG_LVL_FATAL,
+ MSG_LVL_LOW | MSG_LVL_MED | MSG_LVL_HIGH | MSG_LVL_ERROR |
+ MSG_LVL_FATAL,
+ MSG_LVL_LOW | MSG_LVL_MED | MSG_LVL_HIGH | MSG_LVL_ERROR |
+ MSG_LVL_FATAL,
+ MSG_LVL_LOW,
+ MSG_LVL_MED,
+ MSG_LVL_LOW | MSG_LVL_MED | MSG_LVL_HIGH | MSG_LVL_ERROR |
+ MSG_LVL_FATAL,
+ MSG_LVL_LOW | MSG_LVL_MED | MSG_LVL_HIGH | MSG_LVL_ERROR |
+ MSG_LVL_FATAL,
+ MSG_LVL_MED,
+ MSG_LVL_LOW | MSG_LVL_MED | MSG_LVL_HIGH | MSG_LVL_ERROR |
+ MSG_LVL_FATAL,
+ MSG_LVL_LOW | MSG_LVL_MED | MSG_LVL_HIGH | MSG_LVL_ERROR |
+ MSG_LVL_FATAL,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_HIGH
+};
+
+static const uint32_t msg_bld_masks_1[] = {
+ MSG_LVL_MED,
+ MSG_LVL_HIGH,
+ MSG_LVL_HIGH,
+ MSG_LVL_LOW,
+ MSG_LVL_HIGH,
+ MSG_LVL_HIGH,
+ MSG_LVL_HIGH
+};
+
+static const uint32_t msg_bld_masks_2[] = {
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED | MSG_MASK_5,
+ MSG_LVL_MED,
+ MSG_LVL_MED
+};
+
+static const uint32_t msg_bld_masks_3[] = {
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED | MSG_MASK_5 | MSG_MASK_6 | MSG_MASK_7 | MSG_MASK_8 |
+ MSG_MASK_9 | MSG_MASK_10,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED
+};
+
+static const uint32_t msg_bld_masks_4[] = {
+ MSG_LVL_HIGH,
+ MSG_LVL_HIGH,
+ MSG_LVL_HIGH,
+ MSG_LVL_HIGH,
+ MSG_LVL_HIGH,
+ MSG_LVL_HIGH,
+ MSG_LVL_HIGH,
+ MSG_LVL_HIGH,
+ MSG_LVL_HIGH,
+ MSG_LVL_HIGH,
+ MSG_LVL_HIGH,
+ MSG_LVL_HIGH,
+ MSG_LVL_HIGH,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW
+};
+
+static const uint32_t msg_bld_masks_5[] = {
+ MSG_LVL_HIGH,
+ MSG_LVL_MED,
+ MSG_LVL_HIGH,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED | MSG_LVL_MED | MSG_MASK_5 | MSG_MASK_6 | MSG_MASK_7 |
+ MSG_MASK_8 | MSG_MASK_9,
+ MSG_LVL_MED
+};
+
+static const uint32_t msg_bld_masks_6[] = {
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW
+};
+
+static const uint32_t msg_bld_masks_7[] = {
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW | MSG_LVL_MED | MSG_LVL_HIGH | MSG_LVL_ERROR | MSG_LVL_FATAL
+};
+
+static const uint32_t msg_bld_masks_8[] = {
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED
+};
+
+static const uint32_t msg_bld_masks_9[] = {
+ MSG_LVL_MED | MSG_MASK_5,
+ MSG_LVL_MED | MSG_MASK_5,
+ MSG_LVL_MED | MSG_MASK_5,
+ MSG_LVL_MED | MSG_MASK_5,
+ MSG_LVL_MED | MSG_MASK_5,
+ MSG_LVL_MED | MSG_MASK_5,
+ MSG_LVL_MED | MSG_MASK_5,
+ MSG_LVL_MED | MSG_MASK_5,
+ MSG_LVL_MED | MSG_MASK_5,
+ MSG_LVL_MED | MSG_MASK_5,
+ MSG_LVL_MED | MSG_MASK_5,
+ MSG_LVL_MED | MSG_MASK_5,
+ MSG_LVL_MED | MSG_MASK_5,
+ MSG_LVL_MED | MSG_MASK_5,
+ MSG_LVL_MED | MSG_MASK_5,
+ MSG_LVL_MED | MSG_MASK_5,
+ MSG_LVL_MED | MSG_MASK_5
+};
+
+static const uint32_t msg_bld_masks_10[] = {
+ MSG_LVL_MED,
+ MSG_LVL_ERROR,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW | MSG_MASK_5 | MSG_MASK_6 | MSG_MASK_7 | MSG_MASK_8 |
+ MSG_MASK_9 | MSG_MASK_10 | MSG_MASK_11 | MSG_MASK_12 |
+ MSG_MASK_13 | MSG_MASK_14 | MSG_MASK_15 | MSG_MASK_16 |
+ MSG_MASK_17 | MSG_MASK_18 | MSG_MASK_19 | MSG_MASK_20 |
+ MSG_MASK_21 | MSG_MASK_22,
+ MSG_LVL_LOW | MSG_MASK_5,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW | MSG_MASK_5,
+ MSG_LVL_LOW | MSG_MASK_5,
+ MSG_LVL_LOW | MSG_MASK_5,
+ MSG_LVL_LOW | MSG_MASK_5,
+ MSG_LVL_LOW | MSG_MASK_5,
+ MSG_LVL_LOW | MSG_MASK_5,
+ MSG_LVL_LOW | MSG_MASK_5,
+ MSG_LVL_LOW | MSG_MASK_5,
+ MSG_LVL_LOW | MSG_MASK_5,
+ MSG_LVL_LOW | MSG_MASK_5,
+ MSG_LVL_LOW | MSG_MASK_5,
+ MSG_LVL_LOW | MSG_MASK_5,
+ MSG_LVL_LOW | MSG_MASK_5,
+ MSG_LVL_LOW | MSG_MASK_5,
+ MSG_LVL_LOW | MSG_MASK_5,
+ MSG_LVL_LOW | MSG_MASK_5,
+ MSG_LVL_LOW | MSG_MASK_5,
+ MSG_LVL_LOW | MSG_MASK_5,
+ MSG_LVL_LOW | MSG_MASK_5,
+ MSG_LVL_LOW,
+ MSG_LVL_MED,
+ MSG_LVL_HIGH,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW | MSG_MASK_5,
+ MSG_LVL_LOW|MSG_MASK_0 | MSG_MASK_1 | MSG_MASK_2 | MSG_MASK_3 |
+ MSG_MASK_4 | MSG_MASK_5 | MSG_MASK_6,
+ MSG_LVL_HIGH,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_MED
+};
+
+static const uint32_t msg_bld_masks_11[] = {
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+};
+
+static const uint32_t msg_bld_masks_12[] = {
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+};
+
+static const uint32_t msg_bld_masks_13[] = {
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+};
+
+static const uint32_t msg_bld_masks_14[] = {
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+};
+
+static const uint32_t msg_bld_masks_15[] = {
+ MSG_LVL_MED
+};
+
+static const uint32_t msg_bld_masks_16[] = {
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW | MSG_LVL_MED | MSG_LVL_HIGH | MSG_LVL_ERROR |
+ MSG_LVL_FATAL,
+ MSG_LVL_LOW | MSG_LVL_MED | MSG_LVL_HIGH | MSG_LVL_ERROR |
+ MSG_LVL_FATAL,
+ MSG_LVL_LOW | MSG_LVL_MED | MSG_LVL_HIGH | MSG_LVL_ERROR |
+ MSG_LVL_FATAL,
+ MSG_LVL_LOW | MSG_LVL_MED | MSG_LVL_HIGH | MSG_LVL_ERROR |
+ MSG_LVL_FATAL,
+ MSG_LVL_LOW | MSG_LVL_MED | MSG_LVL_HIGH | MSG_LVL_ERROR |
+ MSG_LVL_FATAL
+};
+
+static const uint32_t msg_bld_masks_17[] = {
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+ MSG_LVL_MED | MSG_MASK_6 | MSG_MASK_7 | MSG_MASK_8 | MSG_MASK_9,
+ MSG_LVL_MED | MSG_MASK_5 | MSG_MASK_6 | MSG_MASK_7 | MSG_MASK_8 |
+ MSG_MASK_9 | MSG_MASK_10 | MSG_MASK_11 | MSG_MASK_12 |
+ MSG_MASK_13 | MSG_MASK_14 | MSG_MASK_15 | MSG_MASK_16 |
+ MSG_MASK_17,
+ MSG_LVL_MED,
+ MSG_LVL_MED | MSG_MASK_5 | MSG_MASK_6 | MSG_MASK_7 | MSG_MASK_8 |
+ MSG_MASK_9 | MSG_MASK_10 | MSG_MASK_11 | MSG_MASK_12 |
+ MSG_MASK_13 | MSG_MASK_14 | MSG_MASK_15 | MSG_MASK_16 |
+ MSG_MASK_17 | MSG_MASK_18 | MSG_MASK_19 | MSG_MASK_20 |
+ MSG_MASK_21 | MSG_MASK_22,
+ MSG_LVL_MED,
+ MSG_LVL_MED,
+};
+
+static const uint32_t msg_bld_masks_18[] = {
+ MSG_LVL_LOW,
+ MSG_LVL_LOW | MSG_MASK_8 | MSG_MASK_9 | MSG_MASK_10 | MSG_MASK_11 |
+ MSG_MASK_12 | MSG_MASK_13 | MSG_MASK_14 | MSG_MASK_15 |
+ MSG_MASK_16 | MSG_MASK_17 | MSG_MASK_18 | MSG_MASK_19 |
+ MSG_MASK_20,
+ MSG_LVL_LOW | MSG_MASK_5 | MSG_MASK_6,
+ MSG_LVL_LOW | MSG_MASK_5,
+ MSG_LVL_LOW | MSG_MASK_5 | MSG_MASK_6,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW | MSG_MASK_5 | MSG_MASK_6 | MSG_MASK_7 | MSG_MASK_8 |
+ MSG_MASK_9,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW
+};
+
+static const uint32_t msg_bld_masks_19[] = {
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW
+};
+
+static const uint32_t msg_bld_masks_20[] = {
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW
+};
+
+static const uint32_t msg_bld_masks_21[] = {
+ MSG_LVL_HIGH
+};
+
+static const uint32_t msg_bld_masks_22[] = {
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW
+};
+
+static const uint32_t msg_bld_masks_23[] = {
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW
+};
+
+/* LOG CODES */
+static const uint32_t log_code_last_tbl[] = {
+ 0x0, /* EQUIP ID 0 */
+ 0x1966, /* EQUIP ID 1 */
+ 0x0, /* EQUIP ID 2 */
+ 0x0, /* EQUIP ID 3 */
+ 0x4910, /* EQUIP ID 4 */
+ 0x5420, /* EQUIP ID 5 */
+ 0x0, /* EQUIP ID 6 */
+ 0x74FF, /* EQUIP ID 7 */
+ 0x0, /* EQUIP ID 8 */
+ 0x0, /* EQUIP ID 9 */
+ 0xA38A, /* EQUIP ID 10 */
+ 0xB201, /* EQUIP ID 11 */
+ 0x0, /* EQUIP ID 12 */
+ 0xD1FF, /* EQUIP ID 13 */
+ 0x0, /* EQUIP ID 14 */
+ 0x0, /* EQUIP ID 15 */
+};
+
+#define LOG_GET_ITEM_NUM(xx_code) (xx_code & 0x0FFF)
+#define LOG_GET_EQUIP_ID(xx_code) ((xx_code & 0xF000) >> 12)
+#define LOG_ITEMS_TO_SIZE(num_items) ((num_items+7)/8)
+#define LOG_SIZE_TO_ITEMS(size) ((8*size) - 7)
+#define EVENT_COUNT_TO_BYTES(count) ((count/8) + 1)
+
+#endif