Merge "msm: mdss: synchronize recovery handler access"
diff --git a/Documentation/devicetree/bindings/power/qpnp-charger.txt b/Documentation/devicetree/bindings/power/qpnp-charger.txt
index 87ecc64..ff89757 100644
--- a/Documentation/devicetree/bindings/power/qpnp-charger.txt
+++ b/Documentation/devicetree/bindings/power/qpnp-charger.txt
@@ -110,6 +110,8 @@
 					DC and USB OVP FETs. Please note that this should only
 					be enabled in board designs with PM8941 which have DC_IN
 					and USB_IN connected via a short.
+ - qcom,ext-ovp-isns-enable-gpio	External OVP enable GPIO.
+ - qcom,ext-ovp-isns-r-ohm			External ISNS OVP resistance in ohm.
 
 Sub node required structure:
 - A qcom,chg node must be a child of an SPMI node that has specified
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c
index b4574aa..3f40def 100644
--- a/arch/arm/common/gic.c
+++ b/arch/arm/common/gic.c
@@ -48,6 +48,7 @@
 #include <asm/system.h>
 
 #include <mach/socinfo.h>
+#include <mach/msm_rtb.h>
 
 union gic_base {
 	void __iomem *common_base;
@@ -310,7 +311,7 @@
 
 	if (gic->need_access_lock)
 		raw_spin_lock(&irq_controller_lock);
-	writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI);
+	writel_relaxed_no_log(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI);
 	if (gic->need_access_lock)
 		raw_spin_unlock(&irq_controller_lock);
 }
@@ -388,8 +389,8 @@
 	bit = 1 << (cpu_logical_map(cpu) + shift);
 
 	raw_spin_lock(&irq_controller_lock);
-	val = readl_relaxed(reg) & ~mask;
-	writel_relaxed(val | bit, reg);
+	val = readl_relaxed_no_log(reg) & ~mask;
+	writel_relaxed_no_log(val | bit, reg);
 	raw_spin_unlock(&irq_controller_lock);
 
 	return IRQ_SET_MASK_OK;
@@ -434,7 +435,7 @@
 	do {
 		if (gic->need_access_lock)
 			raw_spin_lock(&irq_controller_lock);
-		irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK);
+		irqstat = readl_relaxed_no_log(cpu_base + GIC_CPU_INTACK);
 		if (gic->need_access_lock)
 			raw_spin_unlock(&irq_controller_lock);
 		irqnr = irqstat & ~0x1c00;
@@ -442,16 +443,18 @@
 		if (likely(irqnr > 15 && irqnr < 1021)) {
 			irqnr = irq_find_mapping(gic->domain, irqnr);
 			handle_IRQ(irqnr, regs);
+			uncached_logk(LOGK_IRQ, (void *)(uintptr_t)irqnr);
 			continue;
 		}
 		if (irqnr < 16) {
 			if (gic->need_access_lock)
 				raw_spin_lock(&irq_controller_lock);
-			writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI);
+			writel_relaxed_no_log(irqstat, cpu_base + GIC_CPU_EOI);
 			if (gic->need_access_lock)
 				raw_spin_unlock(&irq_controller_lock);
 #ifdef CONFIG_SMP
 			handle_IPI(irqnr, regs);
+			uncached_logk(LOGK_IRQ, (void *)(uintptr_t)irqnr);
 #endif
 			continue;
 		}
@@ -713,19 +716,22 @@
 	if (!dist_base || !cpu_base)
 		return;
 
-	saved_cpu_ctrl = readl_relaxed(cpu_base + GIC_CPU_CTRL);
+	saved_cpu_ctrl = readl_relaxed_no_log(cpu_base + GIC_CPU_CTRL);
 
 	for (i = 0; i < DIV_ROUND_UP(32, 4); i++)
-		gic_data[gic_nr].saved_dist_pri[i] = readl_relaxed(dist_base +
+		gic_data[gic_nr].saved_dist_pri[i] = readl_relaxed_no_log(
+							dist_base +
 							GIC_DIST_PRI + i * 4);
 
 	ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_enable);
 	for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
-		ptr[i] = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
+		ptr[i] = readl_relaxed_no_log(dist_base +
+				GIC_DIST_ENABLE_SET + i * 4);
 
 	ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_conf);
 	for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
-		ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
+		ptr[i] = readl_relaxed_no_log(dist_base +
+				GIC_DIST_CONFIG + i * 4);
 
 }
 
@@ -747,18 +753,20 @@
 
 	ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_enable);
 	for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
-		writel_relaxed(ptr[i], dist_base + GIC_DIST_ENABLE_SET + i * 4);
+		writel_relaxed_no_log(ptr[i], dist_base +
+			GIC_DIST_ENABLE_SET + i * 4);
 
 	ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_conf);
 	for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
-		writel_relaxed(ptr[i], dist_base + GIC_DIST_CONFIG + i * 4);
+		writel_relaxed_no_log(ptr[i], dist_base +
+			GIC_DIST_CONFIG + i * 4);
 
 	for (i = 0; i < DIV_ROUND_UP(32, 4); i++)
-		writel_relaxed(gic_data[gic_nr].saved_dist_pri[i],
+		writel_relaxed_no_log(gic_data[gic_nr].saved_dist_pri[i],
 			dist_base + GIC_DIST_PRI + i * 4);
 
-	writel_relaxed(0xf0, cpu_base + GIC_CPU_PRIMASK);
-	writel_relaxed(saved_cpu_ctrl, cpu_base + GIC_CPU_CTRL);
+	writel_relaxed_no_log(0xf0, cpu_base + GIC_CPU_PRIMASK);
+	writel_relaxed_no_log(saved_cpu_ctrl, cpu_base + GIC_CPU_CTRL);
 }
 
 static int gic_notifier(struct notifier_block *self, unsigned long cmd,	void *v)
@@ -993,7 +1001,7 @@
 	if (gic->need_access_lock)
 		raw_spin_lock_irqsave(&irq_controller_lock, flags);
 	/* this always happens on GIC0 */
-	writel_relaxed(sgir, gic_data_dist_base(gic) + GIC_DIST_SOFTINT);
+	writel_relaxed_no_log(sgir, gic_data_dist_base(gic) + GIC_DIST_SOFTINT);
 	if (gic->need_access_lock)
 		raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
 	mb();
diff --git a/arch/arm/mach-msm/include/mach/msm_rtb.h b/arch/arm/mach-msm/include/mach/msm_rtb.h
index b33e8b6..c419a3d 100644
--- a/arch/arm/mach-msm/include/mach/msm_rtb.h
+++ b/arch/arm/mach-msm/include/mach/msm_rtb.h
@@ -27,6 +27,7 @@
 	LOGK_TIMESTAMP = 6,
 	LOGK_L2CPREAD = 7,
 	LOGK_L2CPWRITE = 8,
+	LOGK_IRQ = 9,
 };
 
 #define LOGTYPE_NOPC 0x80
diff --git a/block/row-iosched.c b/block/row-iosched.c
index dfb46b4..9b39741 100644
--- a/block/row-iosched.c
+++ b/block/row-iosched.c
@@ -1,7 +1,7 @@
 /*
  * ROW (Read Over Write) I/O scheduler.
  *
- * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -331,10 +331,6 @@
 	struct row_queue *rqueue = RQ_ROWQ(rq);
 	s64 diff_ms;
 	bool queue_was_empty = list_empty(&rqueue->fifo);
-	unsigned long bv_page_flags = 0;
-
-	if (rq->bio && rq->bio->bi_io_vec && rq->bio->bi_io_vec->bv_page)
-		bv_page_flags = rq->bio->bi_io_vec->bv_page->flags;
 
 	list_add_tail(&rq->queuelist, &rqueue->fifo);
 	rd->nr_reqs[rq_data_dir(rq)]++;
@@ -367,9 +363,7 @@
 			rqueue->idle_data.begin_idling = false;
 			return;
 		}
-
-		if ((bv_page_flags & (1L << PG_readahead)) ||
-		    (diff_ms < rd->rd_idle_data.freq_ms)) {
+		if (diff_ms < rd->rd_idle_data.freq_ms) {
 			rqueue->idle_data.begin_idling = true;
 			row_log_rowq(rd, rqueue->prio, "Enable idling");
 		} else {
diff --git a/drivers/char/diag/diag_dci.c b/drivers/char/diag/diag_dci.c
index 0edfdad..653e299 100644
--- a/drivers/char/diag/diag_dci.c
+++ b/drivers/char/diag/diag_dci.c
@@ -22,7 +22,10 @@
 #include <linux/platform_device.h>
 #include <linux/pm_wakeup.h>
 #include <linux/spinlock.h>
+#include <linux/ratelimit.h>
+#include <linux/reboot.h>
 #include <asm/current.h>
+#include <mach/restart.h>
 #ifdef CONFIG_DIAG_OVER_USB
 #include <mach/usbdiag.h>
 #endif
@@ -33,6 +36,10 @@
 #include "diagfwd_cntl.h"
 #include "diag_dci.h"
 
+static struct timer_list dci_drain_timer;
+static int dci_timer_in_progress;
+static struct work_struct dci_data_drain_work;
+
 unsigned int dci_max_reg = 100;
 unsigned int dci_max_clients = 10;
 unsigned char dci_cumulative_log_mask[DCI_LOG_MASK_SIZE];
@@ -47,14 +54,15 @@
 /* Number of milliseconds anticipated to process the DCI data */
 #define DCI_WAKEUP_TIMEOUT 1
 
-#define DCI_CHK_CAPACITY(entry, new_data_len)				\
-((entry->data_len + new_data_len > entry->total_capacity) ? 1 : 0)	\
+#define DCI_CAN_ADD_BUF_TO_LIST(buf)					\
+	(buf && buf->data && !buf->in_busy && buf->data_len > 0)	\
 
 #ifdef CONFIG_DEBUG_FS
 struct diag_dci_data_info *dci_data_smd;
 struct mutex dci_stat_mutex;
 
-void diag_dci_smd_record_info(int read_bytes, uint8_t ch_type)
+void diag_dci_smd_record_info(int read_bytes, uint8_t ch_type,
+			      uint8_t peripheral)
 {
 	static int curr_dci_data_smd;
 	static unsigned long iteration;
@@ -67,6 +75,7 @@
 	temp_data += curr_dci_data_smd;
 	temp_data->iteration = iteration + 1;
 	temp_data->data_size = read_bytes;
+	temp_data->peripheral = peripheral;
 	temp_data->ch_type = ch_type;
 	diag_get_timestamp(temp_data->time_stamp);
 	curr_dci_data_smd++;
@@ -74,50 +83,393 @@
 	mutex_unlock(&dci_stat_mutex);
 }
 #else
-void diag_dci_smd_record_info(int read_bytes, uint8_t ch_type) { }
+void diag_dci_smd_record_info(int read_bytes, uint8_t ch_type,
+			      uint8_t peripheral) { }
 #endif
 
+static void dci_drain_data(unsigned long data)
+{
+	queue_work(driver->diag_dci_wq, &dci_data_drain_work);
+}
+
+static void dci_check_drain_timer(void)
+{
+	if (!dci_timer_in_progress) {
+		dci_timer_in_progress = 1;
+		 mod_timer(&dci_drain_timer, jiffies + msecs_to_jiffies(500));
+	}
+}
+
+static int diag_dci_init_buffer(struct diag_dci_buffer_t *buffer, int type)
+{
+	if (!buffer || buffer->data)
+		return -EINVAL;
+
+	switch (type) {
+	case DCI_BUF_PRIMARY:
+		buffer->data = kzalloc(IN_BUF_SIZE, GFP_KERNEL);
+		if (!buffer->data)
+			return -ENOMEM;
+		buffer->capacity = IN_BUF_SIZE;
+		break;
+	case DCI_BUF_SECONDARY:
+		buffer->data = NULL;
+		buffer->capacity = IN_BUF_SIZE;
+		break;
+	case DCI_BUF_CMD:
+		buffer->data = kzalloc(PKT_SIZE, GFP_KERNEL);
+		if (!buffer->data)
+			return -ENOMEM;
+		buffer->capacity = PKT_SIZE;
+		break;
+	default:
+		pr_err("diag: In %s, unknown type %d", __func__, type);
+		return -EINVAL;
+	}
+
+	buffer->data_len = 0;
+	buffer->in_busy = 0;
+	buffer->buf_type = type;
+	mutex_init(&buffer->data_mutex);
+
+	return 0;
+}
+
+static inline int diag_dci_check_buffer(struct diag_dci_buffer_t *buf, int len)
+{
+	if (!buf)
+		return -EINVAL;
+
+	/* Return 1 if the buffer is not busy and can hold new data */
+	if ((buf->data_len + len < buf->capacity) && !buf->in_busy)
+		return 1;
+
+	return 0;
+}
+
+static void dci_add_buffer_to_list(struct diag_dci_client_tbl *client,
+				   struct diag_dci_buffer_t *buf)
+{
+	if (!buf || !client || !buf->data)
+		return;
+
+	if (buf->in_list || buf->data_len == 0)
+		return;
+
+	mutex_lock(&client->write_buf_mutex);
+	list_add_tail(&buf->buf_track, &client->list_write_buf);
+	mutex_lock(&buf->data_mutex);
+	buf->in_busy = 1;
+	buf->in_list = 1;
+	mutex_unlock(&buf->data_mutex);
+	mutex_unlock(&client->write_buf_mutex);
+}
+
+static int diag_dci_get_buffer(struct diag_dci_client_tbl *client,
+			       int data_source, int len)
+{
+	struct diag_dci_buffer_t *buf_primary = NULL;
+	struct diag_dci_buffer_t *buf_temp = NULL;
+	struct diag_dci_buffer_t *curr = NULL;
+
+	if (!client)
+		return -EINVAL;
+	if (len < 0 || len > IN_BUF_SIZE)
+		return -EINVAL;
+
+	curr = client->buffers[data_source].buf_curr;
+	buf_primary = client->buffers[data_source].buf_primary;
+
+	if (curr && diag_dci_check_buffer(curr, len) == 1)
+		return 0;
+
+	dci_add_buffer_to_list(client, curr);
+	client->buffers[data_source].buf_curr = NULL;
+
+	if (diag_dci_check_buffer(buf_primary, len) == 1) {
+		client->buffers[data_source].buf_curr = buf_primary;
+		return 0;
+	}
+
+	buf_temp = kzalloc(sizeof(struct diag_dci_buffer_t), GFP_KERNEL);
+	if (!buf_temp)
+		return -EIO;
+
+	if (!diag_dci_init_buffer(buf_temp, DCI_BUF_SECONDARY)) {
+		buf_temp->data = diagmem_alloc(driver, driver->itemsize_dci,
+					       POOL_TYPE_DCI);
+		if (!buf_temp->data) {
+			kfree(buf_temp);
+			buf_temp = NULL;
+			return -ENOMEM;
+		}
+		client->buffers[data_source].buf_curr = buf_temp;
+		return 0;
+	}
+
+	kfree(buf_temp);
+	buf_temp = NULL;
+	return -EIO;
+}
+
+void diag_dci_wakeup_clients()
+{
+	struct list_head *start, *temp;
+	struct diag_dci_client_tbl *entry = NULL;
+
+	list_for_each_safe(start, temp, &driver->dci_client_list) {
+		entry = list_entry(start, struct diag_dci_client_tbl, track);
+
+		/*
+		 * Don't wake up the client when there is no pending buffer to
+		 * write or when it is writing to user space
+		 */
+		if (!list_empty(&entry->list_write_buf) && !entry->in_service) {
+			mutex_lock(&entry->write_buf_mutex);
+			entry->in_service = 1;
+			mutex_unlock(&entry->write_buf_mutex);
+			diag_update_sleeping_process(entry->client->tgid,
+						     DCI_DATA_TYPE);
+		}
+	}
+}
+
+void dci_data_drain_work_fn(struct work_struct *work)
+{
+	int i;
+	struct list_head *start, *temp;
+	struct diag_dci_client_tbl *entry = NULL;
+	struct diag_dci_buf_peripheral_t *proc_buf = NULL;
+	struct diag_dci_buffer_t *buf_temp = NULL;
+
+	list_for_each_safe(start, temp, &driver->dci_client_list) {
+		entry = list_entry(start, struct diag_dci_client_tbl, track);
+		for (i = 0; i < NUM_DCI_PROC; i++) {
+			proc_buf = &entry->buffers[i];
+
+			buf_temp = proc_buf->buf_primary;
+			if (DCI_CAN_ADD_BUF_TO_LIST(buf_temp))
+				dci_add_buffer_to_list(entry, buf_temp);
+
+			buf_temp = proc_buf->buf_cmd;
+			if (DCI_CAN_ADD_BUF_TO_LIST(buf_temp))
+				dci_add_buffer_to_list(entry, buf_temp);
+
+			buf_temp = proc_buf->buf_curr;
+			if (DCI_CAN_ADD_BUF_TO_LIST(buf_temp)) {
+				dci_add_buffer_to_list(entry, buf_temp);
+				mutex_lock(&proc_buf->buf_mutex);
+				proc_buf->buf_curr = NULL;
+				mutex_unlock(&proc_buf->buf_mutex);
+			}
+		}
+		if (!list_empty(&entry->list_write_buf) && !entry->in_service) {
+			mutex_lock(&entry->write_buf_mutex);
+			entry->in_service = 1;
+			mutex_unlock(&entry->write_buf_mutex);
+			diag_update_sleeping_process(entry->client->tgid,
+						     DCI_DATA_TYPE);
+		}
+	}
+	dci_timer_in_progress = 0;
+}
+
+/* Process the data read from apps userspace client */
+void diag_process_apps_dci_read_data(int data_type, void *buf, int recd_bytes)
+{
+	uint8_t cmd_code;
+
+	if (!buf) {
+		pr_err_ratelimited("diag: In %s, Null buf pointer\n", __func__);
+		return;
+	}
+
+	if (data_type != DATA_TYPE_DCI_LOG && data_type != DATA_TYPE_DCI_EVENT
+						&& data_type != DCI_PKT_TYPE) {
+		pr_err("diag: In %s, unsupported data_type: 0x%x\n",
+				__func__, (unsigned int)data_type);
+		return;
+	}
+
+	cmd_code = *(uint8_t *)buf;
+
+	switch (cmd_code) {
+	case LOG_CMD_CODE:
+		extract_dci_log(buf, recd_bytes, APPS_DATA);
+		break;
+	case EVENT_CMD_CODE:
+		extract_dci_events(buf, recd_bytes, APPS_DATA);
+		break;
+	case DCI_PKT_RSP_CODE:
+	case DCI_DELAYED_RSP_CODE:
+		extract_dci_pkt_rsp(buf, recd_bytes, APPS_DATA, NULL);
+		break;
+	default:
+		pr_err("diag: In %s, unsupported command code: 0x%x, not log or event\n",
+		       __func__, cmd_code);
+		return;
+
+	}
+
+	/* wake up all sleeping DCI clients which have some data */
+	diag_dci_wakeup_clients();
+	dci_check_drain_timer();
+}
+
 /* Process the data read from the smd dci channel */
 int diag_process_smd_dci_read_data(struct diag_smd_info *smd_info, void *buf,
 								int recd_bytes)
 {
-	int read_bytes, dci_pkt_len, i;
+	int read_bytes, dci_pkt_len;
 	uint8_t recv_pkt_cmd_code;
 
-	diag_dci_smd_record_info(recd_bytes, (uint8_t)smd_info->type);
+	/*
+	 * Release wakeup source when there are no more clients to
+	 * process DCI data
+	 */
+	if (driver->num_dci_client == 0) {
+		diag_dci_try_deactivate_wakeup_source();
+		return 0;
+	}
+
+	diag_dci_smd_record_info(recd_bytes, (uint8_t)smd_info->type,
+				 (uint8_t)smd_info->peripheral);
 	/* Each SMD read can have multiple DCI packets */
 	read_bytes = 0;
 	while (read_bytes < recd_bytes) {
 		/* read actual length of dci pkt */
 		dci_pkt_len = *(uint16_t *)(buf+2);
+
+		/* Check if the length of the current packet is lesser than the
+		 * remaining bytes in the received buffer. This includes space
+		 * for the Start byte (1), Version byte (1), length bytes (2)
+		 * and End byte (1)
+		 */
+		if ((dci_pkt_len+5) > (recd_bytes-read_bytes)) {
+			pr_err("diag: Invalid length in %s, len: %d, dci_pkt_len: %d",
+					__func__, recd_bytes, dci_pkt_len);
+			diag_dci_try_deactivate_wakeup_source();
+			return 0;
+		}
 		/* process one dci packet */
-		pr_debug("diag: bytes read = %d, single dci pkt len = %d\n",
-			read_bytes, dci_pkt_len);
+		pr_debug("diag: dci: peripheral = %d bytes read = %d, single dci pkt len = %d\n",
+			 smd_info->peripheral, read_bytes, dci_pkt_len);
 		/* print_hex_dump(KERN_DEBUG, "Single DCI packet :",
 		 DUMP_PREFIX_ADDRESS, 16, 1, buf, 5 + dci_pkt_len, 1); */
 		recv_pkt_cmd_code = *(uint8_t *)(buf+4);
-		if (recv_pkt_cmd_code == LOG_CMD_CODE)
-			extract_dci_log(buf+4);
-		else if (recv_pkt_cmd_code == EVENT_CMD_CODE)
-			extract_dci_events(buf+4);
-		else
-			extract_dci_pkt_rsp(smd_info, buf); /* pkt response */
+		if (recv_pkt_cmd_code == LOG_CMD_CODE) {
+			/* Don't include the 4 bytes for command code */
+			extract_dci_log(buf + 4, recd_bytes - 4,
+					smd_info->peripheral);
+		} else if (recv_pkt_cmd_code == EVENT_CMD_CODE) {
+			/* Don't include the 4 bytes for command code */
+			extract_dci_events(buf + 4, recd_bytes - 4,
+					   smd_info->peripheral);
+		} else
+			extract_dci_pkt_rsp(buf + 4, dci_pkt_len,
+					    smd_info->peripheral, smd_info);
 		read_bytes += 5 + dci_pkt_len;
 		buf += 5 + dci_pkt_len; /* advance to next DCI pkt */
 	}
-	/* Release wakeup source when there are no more clients to
-	   process DCI data */
-	if (driver->num_dci_client == 0)
-		diag_dci_try_deactivate_wakeup_source(smd_info->ch);
 
 	/* wake up all sleeping DCI clients which have some data */
-	for (i = 0; i < MAX_DCI_CLIENTS; i++) {
-		if (driver->dci_client_tbl[i].client &&
-			driver->dci_client_tbl[i].data_len) {
-			smd_info->in_busy_1 = 1;
-			diag_update_sleeping_process(
-				driver->dci_client_tbl[i].client->tgid,
-					 DCI_DATA_TYPE);
+	diag_dci_wakeup_clients();
+	dci_check_drain_timer();
+	diag_dci_try_deactivate_wakeup_source();
+	return 0;
+}
+
+static inline struct diag_dci_client_tbl *__diag_dci_get_client_entry(
+								int client_id)
+{
+	struct list_head *start, *temp;
+	struct diag_dci_client_tbl *entry = NULL;
+	list_for_each_safe(start, temp, &driver->dci_client_list) {
+		entry = list_entry(start, struct diag_dci_client_tbl, track);
+		if (entry->client->tgid == client_id)
+			return entry;
+	}
+	return NULL;
+}
+
+static inline int __diag_dci_query_log_mask(struct diag_dci_client_tbl *entry,
+							uint16_t log_code)
+{
+	uint16_t item_num;
+	uint8_t equip_id, *log_mask_ptr, byte_mask;
+	int byte_index, offset;
+
+	if (!entry) {
+		pr_err("diag: In %s, invalid client entry\n", __func__);
+		return 0;
+	}
+
+	equip_id = LOG_GET_EQUIP_ID(log_code);
+	item_num = LOG_GET_ITEM_NUM(log_code);
+	byte_index = item_num/8 + 2;
+	byte_mask = 0x01 << (item_num % 8);
+	offset = equip_id * 514;
+
+	if (offset + byte_index > DCI_LOG_MASK_SIZE) {
+		pr_err("diag: In %s, invalid offset: %d, log_code: %d, byte_index: %d\n",
+				__func__, offset, log_code, byte_index);
+		return 0;
+	}
+
+	log_mask_ptr = entry->dci_log_mask;
+	log_mask_ptr = log_mask_ptr + offset + byte_index;
+	return ((*log_mask_ptr & byte_mask) == byte_mask) ? 1 : 0;
+
+}
+
+static inline int __diag_dci_query_event_mask(struct diag_dci_client_tbl *entry,
+							uint16_t event_id)
+{
+	uint8_t *event_mask_ptr, byte_mask;
+	int byte_index, bit_index;
+
+	if (!entry) {
+		pr_err("diag: In %s, invalid client entry\n", __func__);
+		return 0;
+	}
+
+	byte_index = event_id/8;
+	bit_index = event_id % 8;
+	byte_mask = 0x1 << bit_index;
+
+	if (byte_index > DCI_EVENT_MASK_SIZE) {
+		pr_err("diag: In %s, invalid, event_id: %d, byte_index: %d\n",
+				__func__, event_id, byte_index);
+		return 0;
+	}
+
+	event_mask_ptr = entry->dci_event_mask;
+	event_mask_ptr = event_mask_ptr + byte_index;
+	return ((*event_mask_ptr & byte_mask) == byte_mask) ? 1 : 0;
+}
+
+static int diag_dci_filter_commands(struct diag_pkt_header_t *header)
+{
+	if (!header)
+		return -ENOMEM;
+
+	switch (header->cmd_code) {
+	case 0x7d: /* Msg Mask Configuration */
+	case 0x73: /* Log Mask Configuration */
+	case 0x81: /* Event Mask Configuration */
+	case 0x82: /* Event Mask Change */
+	case 0x60: /* Event Mask Toggle */
+		return 1;
+	}
+
+	if (header->cmd_code == 0x4b && header->subsys_id == 0x12) {
+		switch (header->subsys_cmd_code) {
+		case 0x60: /* Extended Event Mask Config */
+		case 0x61: /* Extended Msg Mask Config */
+		case 0x62: /* Extended Log Mask Config */
+		case 0x20C: /* Set current Preset ID */
+		case 0x20D: /* Get current Preset ID */
+			return 1;
 		}
 	}
 
@@ -204,96 +556,181 @@
 	return 0;
 }
 
-void extract_dci_pkt_rsp(struct diag_smd_info *smd_info, unsigned char *buf)
+void extract_dci_pkt_rsp(unsigned char *buf, int len, int data_source,
+			 struct diag_smd_info *smd_info)
 {
-	int i = 0, cmd_code_len = 1;
-	int curr_client_pid = 0, write_len, *tag = NULL;
-	struct diag_dci_client_tbl *entry;
+	int tag, curr_client_pid = 0;
+	struct diag_dci_client_tbl *entry = NULL;
 	void *temp_buf = NULL;
-	uint8_t recv_pkt_cmd_code, delete_flag = 0;
+	uint8_t dci_cmd_code, cmd_code_len, delete_flag = 0;
+	uint32_t rsp_len = 0;
+	struct diag_dci_buffer_t *rsp_buf = NULL;
 	struct dci_pkt_req_entry_t *req_entry = NULL;
-	recv_pkt_cmd_code = *(uint8_t *)(buf+4);
-	if (recv_pkt_cmd_code != DCI_PKT_RSP_CODE)
-		cmd_code_len = 4; /* delayed response */
-	write_len = (int)(*(uint16_t *)(buf+2)) - cmd_code_len;
-	if (write_len <= 0) {
-		pr_err("diag: Invalid length in %s, write_len: %d",
-					__func__, write_len);
+	unsigned char *temp = buf;
+
+	if (!buf) {
+		pr_err("diag: Invalid pointer in %s\n", __func__);
 		return;
 	}
-	pr_debug("diag: len = %d\n", write_len);
-	tag = (int *)(buf + (4 + cmd_code_len)); /* Retrieve the Tag field */
-	req_entry = diag_dci_get_request_entry(*tag);
+	dci_cmd_code = *(uint8_t *)(temp);
+	if (dci_cmd_code == DCI_PKT_RSP_CODE) {
+		cmd_code_len = sizeof(uint8_t);
+	} else if (dci_cmd_code == DCI_DELAYED_RSP_CODE) {
+		cmd_code_len = sizeof(uint32_t);
+	} else {
+		pr_err("diag: In %s, invalid command code %d\n", __func__,
+								dci_cmd_code);
+		return;
+	}
+	temp += cmd_code_len;
+	tag = *(int *)temp;
+	temp += sizeof(int);
+
+	/*
+	 * The size of the response is (total length) - (length of the command
+	 * code, the tag (int)
+	 */
+	rsp_len = len - (cmd_code_len + sizeof(int));
+	/*
+	 * Check if the length embedded in the packet is correct.
+	 * Include the start (1), version (1), length (2) and the end
+	 * (1) bytes while checking. Total = 5 bytes
+	 */
+	if ((rsp_len == 0) || (rsp_len > (len - 5))) {
+		pr_err("diag: Invalid length in %s, len: %d, rsp_len: %d",
+						__func__, len, rsp_len);
+		return;
+	}
+
+	req_entry = diag_dci_get_request_entry(tag);
 	if (!req_entry) {
-		pr_alert("diag: No matching PID for DCI data\n");
+		pr_err("diag: No matching PID for DCI data\n");
 		return;
 	}
-	*tag = req_entry->uid;
 	curr_client_pid = req_entry->pid;
 
 	/* Remove the headers and send only the response to this function */
-	delete_flag = diag_dci_remove_req_entry(buf + 8 + cmd_code_len,
-						write_len - 4,
-						req_entry);
+	delete_flag = diag_dci_remove_req_entry(temp, rsp_len, req_entry);
 	if (delete_flag < 0)
 		return;
 
-	/* Using PID of client process, find client buffer */
-	i = diag_dci_find_client_index(curr_client_pid);
-	if (i != DCI_CLIENT_INDEX_INVALID) {
-		/* copy pkt rsp in client buf */
-		entry = &(driver->dci_client_tbl[i]);
-		mutex_lock(&entry->data_mutex);
-		/*
-		 * Check if we can fit the data in the rsp buffer. The total
-		 * length of the rsp is the rsp length (write_len) +
-		 * DCI_PKT_RSP_TYPE header (int) + field for length (int) +
-		 * delete_flag (uint8_t)
-		 */
-		if (DCI_CHK_CAPACITY(entry, 9+write_len)) {
-			pr_alert("diag: create capacity for pkt rsp\n");
-			entry->total_capacity += 9+write_len;
-			temp_buf = krealloc(entry->dci_data,
-			entry->total_capacity, GFP_KERNEL);
-			if (!temp_buf) {
-				pr_err("diag: DCI realloc failed\n");
-				mutex_unlock(&entry->data_mutex);
-				return;
-			} else {
-				entry->dci_data = temp_buf;
-			}
-		}
-		*(int *)(entry->dci_data+entry->data_len) =
-					DCI_PKT_RSP_TYPE;
-		entry->data_len += 4;
-		*(int *)(entry->dci_data+entry->data_len)
-						= write_len;
-		entry->data_len += 4;
-		*(uint8_t *)(entry->dci_data + entry->data_len) = delete_flag;
-		entry->data_len += sizeof(uint8_t);
-		memcpy(entry->dci_data+entry->data_len,
-			buf+4+cmd_code_len, write_len);
-		entry->data_len += write_len;
-		mutex_unlock(&entry->data_mutex);
+	entry = __diag_dci_get_client_entry(curr_client_pid);
+	if (!entry) {
+		pr_err("diag: In %s, couldn't find entry\n", __func__);
+		return;
 	}
+
+	rsp_buf = entry->buffers[data_source].buf_cmd;
+
+	mutex_lock(&rsp_buf->data_mutex);
+	/*
+	 * Check if we can fit the data in the rsp buffer. The total length of
+	 * the rsp is the rsp length (write_len) + DCI_PKT_RSP_TYPE header (int)
+	 * + field for length (int) + delete_flag (uint8_t)
+	 */
+	if ((rsp_buf->data_len + 9 + rsp_len) > rsp_buf->capacity) {
+		pr_alert("diag: create capacity for pkt rsp\n");
+		rsp_buf->capacity += 9 + rsp_len;
+		temp_buf = krealloc(rsp_buf->data, rsp_buf->capacity,
+				    GFP_KERNEL);
+		if (!temp_buf) {
+			pr_err("diag: DCI realloc failed\n");
+			mutex_unlock(&rsp_buf->data_mutex);
+			return;
+		} else {
+			rsp_buf->data = temp_buf;
+		}
+	}
+
+	/* Fill in packet response header information */
+	*(int *)(rsp_buf->data + rsp_buf->data_len) = DCI_PKT_RSP_TYPE;
+	rsp_buf->data_len += sizeof(int);
+	/* Packet Length = Response Length + Length of uid field (int) */
+	*(int *)(rsp_buf->data + rsp_buf->data_len) = rsp_len + sizeof(int);
+	rsp_buf->data_len += sizeof(int);
+	*(uint8_t *)(rsp_buf->data + rsp_buf->data_len) = delete_flag;
+	rsp_buf->data_len += sizeof(uint8_t);
+	*(int *)(rsp_buf->data + rsp_buf->data_len) = req_entry->uid;
+	rsp_buf->data_len += sizeof(int);
+	memcpy(rsp_buf->data + rsp_buf->data_len, temp, rsp_len);
+	rsp_buf->data_len += rsp_len;
+	rsp_buf->data_source = data_source;
+	if (smd_info)
+		smd_info->in_busy_1 = 1;
+	mutex_unlock(&rsp_buf->data_mutex);
+
+
+	/*
+	 * Add directly to the list for writing responses to the
+	 * userspace as these shouldn't be buffered and shouldn't wait
+	 * for log and event buffers to be full
+	 */
+	dci_add_buffer_to_list(entry, rsp_buf);
 }
 
-void extract_dci_events(unsigned char *buf)
+static void copy_dci_event(unsigned char *buf, int len,
+			   struct diag_dci_client_tbl *client, int data_source)
+{
+	struct diag_dci_buffer_t *data_buffer = NULL;
+	struct diag_dci_buf_peripheral_t *proc_buf = NULL;
+	int err = 0, total_len = 0;
+
+	if (!buf || !client) {
+		pr_err("diag: Invalid pointers in %s", __func__);
+		return;
+	}
+
+	total_len = sizeof(int) + len;
+
+	proc_buf = &client->buffers[data_source];
+	mutex_lock(&proc_buf->buf_mutex);
+	mutex_lock(&proc_buf->health_mutex);
+	err = diag_dci_get_buffer(client, data_source, total_len);
+	if (err) {
+		if (err == -ENOMEM)
+			proc_buf->health.dropped_events++;
+		else
+			pr_err("diag: In %s, invalid packet\n", __func__);
+		mutex_unlock(&proc_buf->health_mutex);
+		mutex_unlock(&proc_buf->buf_mutex);
+		return;
+	}
+
+	data_buffer = proc_buf->buf_curr;
+
+	proc_buf->health.received_events++;
+	mutex_unlock(&proc_buf->health_mutex);
+	mutex_unlock(&proc_buf->buf_mutex);
+
+	mutex_lock(&data_buffer->data_mutex);
+	*(int *)(data_buffer->data + data_buffer->data_len) = DCI_EVENT_TYPE;
+	data_buffer->data_len += sizeof(int);
+	memcpy(data_buffer->data + data_buffer->data_len, buf, len);
+	data_buffer->data_len += len;
+	data_buffer->data_source = data_source;
+	mutex_unlock(&data_buffer->data_mutex);
+
+}
+
+void extract_dci_events(unsigned char *buf, int len, int data_source)
 {
 	uint16_t event_id, event_id_packet, length, temp_len;
-	uint8_t *event_mask_ptr, byte_mask, payload_len, payload_len_field;
-	uint8_t timestamp[8] = {0}, bit_index, timestamp_len;
-	uint8_t event_data[MAX_EVENT_SIZE];
-	unsigned int byte_index, total_event_len, i;
-	struct diag_dci_client_tbl *entry;
+	uint8_t payload_len, payload_len_field;
+	uint8_t timestamp[8], timestamp_len;
+	unsigned char event_data[MAX_EVENT_SIZE];
+	unsigned int total_event_len;
+	struct list_head *start, *temp;
+	struct diag_dci_client_tbl *entry = NULL;
 
 	length =  *(uint16_t *)(buf + 1); /* total length of event series */
 	if (length == 0) {
 		pr_err("diag: Incoming dci event length is invalid\n");
 		return;
 	}
-	temp_len = 0;
-	buf = buf + 3; /* start of event series */
+	/* Move directly to the start of the event series. 1 byte for
+	 * event code and 2 bytes for the length field.
+	 */
+	temp_len = 3;
 	while (temp_len < (length - 1)) {
 		event_id_packet = *(uint16_t *)(buf + temp_len);
 		event_id = event_id_packet & 0x0FFF; /* extract 12 bits */
@@ -334,6 +771,22 @@
 			memcpy(event_data + 12, buf + temp_len + 2 +
 						timestamp_len, payload_len);
 		}
+
+		/* Before copying the data to userspace, check if we are still
+		 * within the buffer limit. This is an error case, don't count
+		 * it towards the health statistics.
+		 *
+		 * Here, the offset of 2 bytes(uint16_t) is for the
+		 * event_id_packet length
+		 */
+		temp_len += sizeof(uint16_t) + timestamp_len +
+						payload_len_field + payload_len;
+		if (temp_len > len) {
+			pr_err("diag: Invalid length in %s, len: %d, read: %d",
+						__func__, len, temp_len);
+			return;
+		}
+
 		/* 2 bytes for the event id & timestamp len is hard coded to 8,
 		   as individual events have full timestamp */
 		*(uint16_t *)(event_data) = 10 +
@@ -343,108 +796,114 @@
 		/* 2 bytes for the event length field which is added to
 		   the event data */
 		total_event_len = 2 + 10 + payload_len_field + payload_len;
-		byte_index = event_id / 8;
-		bit_index = event_id % 8;
-		byte_mask = 0x1 << bit_index;
 		/* parse through event mask tbl of each client and check mask */
-		for (i = 0; i < MAX_DCI_CLIENTS; i++) {
-			if (driver->dci_client_tbl[i].client) {
-				entry = &(driver->dci_client_tbl[i]);
-				event_mask_ptr = entry->dci_event_mask +
-								 byte_index;
-				mutex_lock(&dci_health_mutex);
-				mutex_lock(&entry->data_mutex);
-				if (*event_mask_ptr & byte_mask) {
-					/* copy to client buffer */
-					if (DCI_CHK_CAPACITY(entry,
-							 4 + total_event_len)) {
-						pr_err("diag: DCI event drop\n");
-						driver->dci_client_tbl[i].
-							dropped_events++;
-						mutex_unlock(
-							&entry->data_mutex);
-						mutex_unlock(
-							&dci_health_mutex);
-						break;
-					}
-					driver->dci_client_tbl[i].
-							received_events++;
-					*(int *)(entry->dci_data+
-					entry->data_len) = DCI_EVENT_TYPE;
-					/* 4 bytes for DCI_EVENT_TYPE */
-					memcpy(entry->dci_data +
-						entry->data_len + 4, event_data
-						, total_event_len);
-					entry->data_len += 4 + total_event_len;
-				}
-				mutex_unlock(&entry->data_mutex);
-				mutex_unlock(&dci_health_mutex);
+		list_for_each_safe(start, temp, &driver->dci_client_list) {
+			entry = list_entry(start, struct diag_dci_client_tbl,
+									track);
+			if (__diag_dci_query_event_mask(entry, event_id)) {
+				/* copy to client buffer */
+				copy_dci_event(event_data, total_event_len,
+					       entry, data_source);
 			}
 		}
-		temp_len += 2 + timestamp_len + payload_len_field + payload_len;
 	}
 }
 
-void extract_dci_log(unsigned char *buf)
+static void copy_dci_log(unsigned char *buf, int len,
+			 struct diag_dci_client_tbl *client, int data_source)
 {
-	uint16_t log_code, item_num, log_length;
-	uint8_t equip_id, *log_mask_ptr, byte_mask;
-	unsigned int i, byte_index, byte_offset = 0;
-	struct diag_dci_client_tbl *entry;
+	uint16_t log_length = 0;
+	struct diag_dci_buffer_t *data_buffer = NULL;
+	struct diag_dci_buf_peripheral_t *proc_buf = NULL;
+	int err = 0, total_len = 0;
+
+	if (!buf || !client) {
+		pr_err("diag: Invalid pointers in %s", __func__);
+		return;
+	}
 
 	log_length = *(uint16_t *)(buf + 2);
-	log_code = *(uint16_t *)(buf + 6);
-	equip_id = LOG_GET_EQUIP_ID(log_code);
-	item_num = LOG_GET_ITEM_NUM(log_code);
-	byte_index = item_num/8 + 2;
-	byte_mask = 0x01 << (item_num % 8);
-
 	if (log_length > USHRT_MAX - 4) {
-		pr_err("diag: Integer overflow in %s, log_len:%d",
+		pr_err("diag: Integer overflow in %s, log_len: %d",
 				__func__, log_length);
 		return;
 	}
-	byte_offset = (equip_id * 514) + byte_index;
-	if (byte_offset >=  DCI_LOG_MASK_SIZE) {
-		pr_err("diag: Invalid byte_offset %d in dci log\n",
-							byte_offset);
+	total_len = sizeof(int) + log_length;
+
+	/* Check if we are within the len. The check should include the
+	 * first 4 bytes for the Log code(2) and the length bytes (2)
+	 */
+	if ((log_length + sizeof(uint16_t) + 2) > len) {
+		pr_err("diag: Invalid length in %s, log_len: %d, len: %d",
+						__func__, log_length, len);
+		return;
+	}
+
+	proc_buf = &client->buffers[data_source];
+	mutex_lock(&proc_buf->buf_mutex);
+	mutex_lock(&proc_buf->health_mutex);
+	err = diag_dci_get_buffer(client, data_source, total_len);
+	if (err) {
+		if (err == -ENOMEM)
+			proc_buf->health.dropped_logs++;
+		else
+			pr_err("diag: In %s, invalid packet\n", __func__);
+		mutex_unlock(&proc_buf->health_mutex);
+		mutex_unlock(&proc_buf->buf_mutex);
+		return;
+	}
+
+	data_buffer = proc_buf->buf_curr;
+	proc_buf->health.received_logs++;
+	mutex_unlock(&proc_buf->health_mutex);
+	mutex_unlock(&proc_buf->buf_mutex);
+
+	mutex_lock(&data_buffer->data_mutex);
+	if (!data_buffer->data) {
+		mutex_unlock(&data_buffer->data_mutex);
+		return;
+	}
+
+	*(int *)(data_buffer->data + data_buffer->data_len) = DCI_LOG_TYPE;
+	data_buffer->data_len += sizeof(int);
+	memcpy(data_buffer->data + data_buffer->data_len, buf + sizeof(int),
+	       log_length);
+	data_buffer->data_len += log_length;
+	data_buffer->data_source = data_source;
+	mutex_unlock(&data_buffer->data_mutex);
+}
+
+void extract_dci_log(unsigned char *buf, int len, int data_source)
+{
+	uint16_t log_code, read_bytes = 0;
+	struct list_head *start, *temp;
+	struct diag_dci_client_tbl *entry = NULL;
+
+	if (!buf) {
+		pr_err("diag: In %s buffer is NULL\n", __func__);
+		return;
+	}
+
+	/* The first six bytes for the incoming log packet contains
+	 * Command code (2), the length of the packet (2) and the length
+	 * of the log (2)
+	 */
+	log_code = *(uint16_t *)(buf + 6);
+	read_bytes += sizeof(uint16_t) + 6;
+	if (read_bytes > len) {
+		pr_err("diag: Invalid length in %s, len: %d, read: %d",
+						__func__, len, read_bytes);
 		return;
 	}
 
 	/* parse through log mask table of each client and check mask */
-	for (i = 0; i < MAX_DCI_CLIENTS; i++) {
-		if (driver->dci_client_tbl[i].client) {
-			entry = &(driver->dci_client_tbl[i]);
-			log_mask_ptr = entry->dci_log_mask;
-			if (!log_mask_ptr)
-				return;
-			log_mask_ptr = log_mask_ptr + byte_offset;
-			mutex_lock(&dci_health_mutex);
-			mutex_lock(&entry->data_mutex);
-			if (*log_mask_ptr & byte_mask) {
-				pr_debug("\t log code %x needed by client %d",
-					 log_code, entry->client->tgid);
-				/* copy to client buffer */
-				if (DCI_CHK_CAPACITY(entry,
-						 4 + *(uint16_t *)(buf + 2))) {
-						pr_err("diag: DCI log drop\n");
-						driver->dci_client_tbl[i].
-								dropped_logs++;
-						mutex_unlock(
-							&entry->data_mutex);
-						mutex_unlock(
-							&dci_health_mutex);
-						return;
-				}
-				driver->dci_client_tbl[i].received_logs++;
-				*(int *)(entry->dci_data+entry->data_len) =
-								DCI_LOG_TYPE;
-				memcpy(entry->dci_data + entry->data_len + 4,
-					    buf + 4, log_length);
-				entry->data_len += 4 + log_length;
-			}
-			mutex_unlock(&entry->data_mutex);
-			mutex_unlock(&dci_health_mutex);
+	list_for_each_safe(start, temp, &driver->dci_client_list) {
+		entry = list_entry(start, struct diag_dci_client_tbl, track);
+		if (__diag_dci_query_log_mask(entry, log_code)) {
+			pr_debug("\t log code %x needed by client %d",
+				 log_code, entry->client->tgid);
+			/* copy to client buffer */
+			copy_dci_log(buf, len, entry, data_source);
 		}
 	}
 }
@@ -459,14 +918,10 @@
 	uint8_t *client_log_mask_ptr;
 	uint8_t *log_mask_ptr;
 	int ret;
-	int index = smd_info->peripheral;
+	struct list_head *start, *temp;
+	struct diag_dci_client_tbl *entry = NULL;
 
-	/* Update the peripheral(s) with the dci log and event masks */
-
-	/* If the cntl channel is not up, we can't update logs and events */
-	if (!driver->smd_cntl[index].ch)
-		return;
-
+	/* Update apps and peripheral(s) with the dci log and event masks */
 	memset(dirty_bits, 0, 16 * sizeof(uint8_t));
 
 	/*
@@ -474,15 +929,13 @@
 	 * which log entries in the cumulative logs that need
 	 * to be updated on the peripheral.
 	 */
-	for (i = 0; i < MAX_DCI_CLIENTS; i++) {
-		if (driver->dci_client_tbl[i].client) {
-			client_log_mask_ptr =
-				driver->dci_client_tbl[i].dci_log_mask;
-			for (j = 0; j < 16; j++) {
-				if (*(client_log_mask_ptr+1))
-					dirty_bits[j] = 1;
-				client_log_mask_ptr += 514;
-			}
+	list_for_each_safe(start, temp, &driver->dci_client_list) {
+		entry = list_entry(start, struct diag_dci_client_tbl, track);
+		client_log_mask_ptr = entry->dci_log_mask;
+		for (j = 0; j < 16; j++) {
+			if (*(client_log_mask_ptr+1))
+				dirty_bits[j] = 1;
+			client_log_mask_ptr += 514;
 		}
 	}
 
@@ -497,41 +950,48 @@
 	}
 	mutex_unlock(&dci_log_mask_mutex);
 
-	ret = diag_send_dci_log_mask(&driver->smd_cntl[index]);
+	/* Send updated mask to userspace clients */
+	diag_update_userspace_clients(DCI_LOG_MASKS_TYPE);
+	/* Send updated log mask to peripherals */
+	ret = diag_send_dci_log_mask();
 
-	ret = diag_send_dci_event_mask(&driver->smd_cntl[index]);
+	/* Send updated event mask to userspace clients */
+	diag_update_userspace_clients(DCI_EVENT_MASKS_TYPE);
+	/* Send updated event mask to peripheral */
+	ret = diag_send_dci_event_mask();
 
 	smd_info->notify_context = 0;
 }
 
 void diag_dci_notify_client(int peripheral_mask, int data)
 {
-	int i, stat;
+	int stat;
 	struct siginfo info;
+	struct list_head *start, *temp;
+	struct diag_dci_client_tbl *entry = NULL;
+
 	memset(&info, 0, sizeof(struct siginfo));
 	info.si_code = SI_QUEUE;
 	info.si_int = (peripheral_mask | data);
 
 	/* Notify the DCI process that the peripheral DCI Channel is up */
-	for (i = 0; i < MAX_DCI_CLIENTS; i++) {
-		if (!driver->dci_client_tbl[i].client)
-			continue;
-		if (driver->dci_client_tbl[i].list & peripheral_mask) {
-			info.si_signo = driver->dci_client_tbl[i].signal_type;
-			stat = send_sig_info(
-				driver->dci_client_tbl[i].signal_type,
-				&info, driver->dci_client_tbl[i].client);
+	list_for_each_safe(start, temp, &driver->dci_client_list) {
+		entry = list_entry(start, struct diag_dci_client_tbl, track);
+		if (entry->client_info.notification_list & peripheral_mask) {
+			info.si_signo = entry->client_info.signal_type;
+			stat = send_sig_info(entry->client_info.signal_type,
+					     &info, entry->client);
 			if (stat)
 				pr_err("diag: Err sending dci signal to client, signal data: 0x%x, stat: %d\n",
-				info.si_int, stat);
+							info.si_int, stat);
 		}
-	} /* end of loop for all DCI clients */
+	}
 }
 
-static int diag_send_dci_pkt(struct diag_master_table entry, unsigned char *buf,
-					 int len, int tag)
+static int diag_send_dci_pkt(struct diag_master_table entry,
+			     unsigned char *buf, int len, int tag)
 {
-	int i, status = 0;
+	int i, status = DIAG_DCI_NO_ERROR;
 	unsigned int read_len = 0;
 
 	/* The first 4 bytes is the uid tag and the next four bytes is
@@ -565,48 +1025,289 @@
 		mutex_unlock(&driver->dci_mutex);
 		return -EIO;
 	}
-
-	for (i = 0; i < NUM_SMD_DCI_CHANNELS; i++) {
-		struct diag_smd_info *smd_info = driver->separate_cmdrsp[i] ?
-					&driver->smd_dci_cmd[i] :
-					&driver->smd_dci[i];
-		if (entry.client_id == smd_info->peripheral) {
-			if (smd_info->ch) {
-				mutex_lock(&smd_info->smd_ch_mutex);
-				smd_write(smd_info->ch,
-					driver->apps_dci_buf, len + 10);
-				mutex_unlock(&smd_info->smd_ch_mutex);
-				status = DIAG_DCI_NO_ERROR;
-			}
-			break;
-		}
+	/* This command is registered locally on the Apps */
+	if (entry.client_id == APPS_DATA) {
+		driver->dci_pkt_length = len + 10;
+		diag_update_pkt_buffer(driver->apps_dci_buf, DCI_PKT_TYPE);
+		diag_update_sleeping_process(entry.process_id, DCI_PKT_TYPE);
+		mutex_unlock(&driver->dci_mutex);
+		return DIAG_DCI_NO_ERROR;
 	}
 
-	if (status != DIAG_DCI_NO_ERROR) {
-		pr_alert("diag: check DCI channel\n");
+	for (i = 0; i < NUM_SMD_DCI_CHANNELS; i++)
+		if (entry.client_id == i) {
+			status = 1;
+			break;
+		}
+
+	if (status) {
+		status = diag_dci_write_proc(entry.client_id,
+					     DIAG_DATA_TYPE,
+					     driver->apps_dci_buf,
+					     len + 10);
+	} else {
+		pr_err("diag: Cannot send packet to peripheral %d",
+		       entry.client_id);
 		status = DIAG_DCI_SEND_DATA_FAIL;
 	}
 	mutex_unlock(&driver->dci_mutex);
 	return status;
 }
 
+static int diag_dci_process_apps_pkt(struct diag_pkt_header_t *pkt_header,
+				     unsigned char *req_buf, int tag)
+{
+	uint8_t cmd_code, subsys_id, i, goto_download = 0;
+	uint8_t header_len = sizeof(struct diag_dci_pkt_header_t);
+	uint16_t ss_cmd_code;
+	uint32_t write_len = 0;
+	unsigned char *dest_buf = driver->apps_dci_buf;
+	unsigned char *payload_ptr = driver->apps_dci_buf + header_len;
+	struct diag_dci_pkt_header_t dci_header;
+
+	if (!pkt_header || !req_buf || tag < 0)
+		return -EIO;
+
+	cmd_code = pkt_header->cmd_code;
+	subsys_id = pkt_header->subsys_id;
+	ss_cmd_code = pkt_header->subsys_cmd_code;
+
+	if (cmd_code == DIAG_CMD_DOWNLOAD) {
+		*payload_ptr = DIAG_CMD_DOWNLOAD;
+		write_len = sizeof(uint8_t);
+		goto_download = 1;
+		goto fill_buffer;
+	} else if (cmd_code == DIAG_CMD_VERSION) {
+		if (chk_polling_response()) {
+			for (i = 0; i < 55; i++, write_len++, payload_ptr++)
+				*(payload_ptr) = 0;
+			goto fill_buffer;
+		}
+	} else if (cmd_code == DIAG_CMD_EXT_BUILD) {
+		if (chk_polling_response()) {
+			*payload_ptr = DIAG_CMD_EXT_BUILD;
+			write_len = sizeof(uint8_t);
+			payload_ptr += sizeof(uint8_t);
+			for (i = 0; i < 8; i++, write_len++, payload_ptr++)
+				*(payload_ptr) = 0;
+			*(int *)(payload_ptr) = chk_config_get_id();
+			write_len += sizeof(int);
+			goto fill_buffer;
+		}
+	} else if (cmd_code == DIAG_CMD_LOG_ON_DMND) {
+		if (driver->log_on_demand_support) {
+			*payload_ptr = DIAG_CMD_LOG_ON_DMND;
+			write_len = sizeof(uint8_t);
+			payload_ptr += sizeof(uint8_t);
+			*(uint16_t *)(payload_ptr) = *(uint16_t *)(req_buf + 1);
+			write_len += sizeof(uint16_t);
+			payload_ptr += sizeof(uint16_t);
+			*payload_ptr = 0x1; /* Unknown */
+			write_len += sizeof(uint8_t);
+			goto fill_buffer;
+		}
+	} else if (cmd_code != DIAG_CMD_DIAG_SUBSYS) {
+		return DIAG_DCI_TABLE_ERR;
+	}
+
+	if (subsys_id == DIAG_SS_DIAG) {
+		if (ss_cmd_code == DIAG_DIAG_MAX_PKT_SZ) {
+			memcpy(payload_ptr, pkt_header,
+					sizeof(struct diag_pkt_header_t));
+			write_len = sizeof(struct diag_pkt_header_t);
+			*(uint32_t *)(payload_ptr + write_len) = PKT_SIZE;
+			write_len += sizeof(uint32_t);
+		} else if (ss_cmd_code == DIAG_DIAG_STM) {
+			write_len = diag_process_stm_cmd(req_buf, payload_ptr);
+		}
+	} else if (subsys_id == DIAG_SS_PARAMS) {
+		if (ss_cmd_code == DIAG_DIAG_POLL) {
+			if (chk_polling_response()) {
+				memcpy(payload_ptr, pkt_header,
+					sizeof(struct diag_pkt_header_t));
+				write_len = sizeof(struct diag_pkt_header_t);
+				payload_ptr += write_len;
+				for (i = 0; i < 12; i++, write_len++) {
+					*(payload_ptr) = 0;
+					payload_ptr++;
+				}
+			}
+		} else if (ss_cmd_code == DIAG_DEL_RSP_WRAP) {
+			memcpy(payload_ptr, pkt_header,
+					sizeof(struct diag_pkt_header_t));
+			write_len = sizeof(struct diag_pkt_header_t);
+			*(int *)(payload_ptr + write_len) = wrap_enabled;
+			write_len += sizeof(int);
+		} else if (ss_cmd_code == DIAG_DEL_RSP_WRAP_CNT) {
+			wrap_enabled = true;
+			memcpy(payload_ptr, pkt_header,
+					sizeof(struct diag_pkt_header_t));
+			write_len = sizeof(struct diag_pkt_header_t);
+			*(uint16_t *)(payload_ptr + write_len) = wrap_count;
+			write_len += sizeof(uint16_t);
+		}
+	}
+
+fill_buffer:
+	if (write_len > 0) {
+		/* Check if we are within the range of the buffer*/
+		if (write_len + header_len > PKT_SIZE) {
+			pr_err("diag: In %s, invalid length %d\n", __func__,
+						write_len + header_len);
+			return -ENOMEM;
+		}
+		dci_header.start = CONTROL_CHAR;
+		dci_header.version = 1;
+		/*
+		 * Length of the rsp pkt = actual data len + pkt rsp code
+		 * (uint8_t) + tag (int)
+		 */
+		dci_header.len = write_len + sizeof(uint8_t) + sizeof(int);
+		dci_header.pkt_code = DCI_PKT_RSP_CODE;
+		dci_header.tag = tag;
+		driver->in_busy_dcipktdata = 1;
+		memcpy(dest_buf, &dci_header, header_len);
+		diag_process_apps_dci_read_data(DCI_PKT_TYPE, dest_buf + 4,
+						dci_header.len);
+		driver->in_busy_dcipktdata = 0;
+
+		if (goto_download) {
+			/*
+			 * Sleep for sometime so that the response reaches the
+			 * client. The value 5000 empirically as an optimum
+			 * time for the response to reach the client.
+			 */
+			usleep_range(5000, 5100);
+			/* call download API */
+			msm_set_restart_mode(RESTART_DLOAD);
+			pr_alert("diag: download mode set, Rebooting SoC..\n");
+			kernel_restart(NULL);
+		}
+		return DIAG_DCI_NO_ERROR;
+	}
+
+	return DIAG_DCI_TABLE_ERR;
+}
+
+static int diag_process_dci_pkt_rsp(unsigned char *buf, int len)
+{
+	int req_uid, ret = DIAG_DCI_TABLE_ERR, i;
+	struct diag_pkt_header_t *header = NULL;
+	unsigned char *temp = buf;
+	unsigned char *req_buf = NULL;
+	uint8_t retry_count = 0, max_retries = 3, found = 0;
+	uint32_t read_len = 0;
+	struct diag_master_table entry;
+	struct dci_pkt_req_entry_t *req_entry = NULL;
+
+	if (!buf)
+		return -EIO;
+
+	if (len < DCI_PKT_REQ_MIN_LEN || len > USER_SPACE_DATA) {
+		pr_err("diag: dci: Invalid length %d len in %s", len, __func__);
+		return -EIO;
+	}
+
+	req_uid = *(int *)temp; /* UID of the request */
+	temp += sizeof(int);
+	req_buf = temp; /* Start of the Request */
+	header = (struct diag_pkt_header_t *)temp;
+	temp += sizeof(struct diag_pkt_header_t);
+	read_len = sizeof(int) + sizeof(struct diag_pkt_header_t);
+	if (read_len >= USER_SPACE_DATA) {
+		pr_err("diag: dci: Invalid length in %s\n", __func__);
+		return -EIO;
+	}
+
+	/* Check if the command is allowed on DCI */
+	if (diag_dci_filter_commands(header)) {
+		pr_debug("diag: command not supported %d %d %d",
+			 header->cmd_code, header->subsys_id,
+			 header->subsys_cmd_code);
+		return DIAG_DCI_SEND_DATA_FAIL;
+	}
+
+	/*
+	 * Previous packet is yet to be consumed by the client. Wait
+	 * till the buffer is free.
+	 */
+	while (retry_count < max_retries) {
+		retry_count++;
+		if (driver->in_busy_dcipktdata)
+			usleep_range(10000, 10100);
+		else
+			break;
+	}
+	/* The buffer is still busy */
+	if (driver->in_busy_dcipktdata) {
+		pr_err("diag: In %s, apps dci buffer is still busy. Dropping packet\n",
+								__func__);
+		return -EAGAIN;
+	}
+
+	/* Register this new DCI packet */
+	req_entry = diag_register_dci_transaction(req_uid);
+	if (!req_entry) {
+		pr_alert("diag: registering new DCI transaction failed\n");
+		return DIAG_DCI_NO_REG;
+	}
+
+	/* Check if it is a dedicated Apps command */
+	ret = diag_dci_process_apps_pkt(header, req_buf, req_entry->tag);
+	if (ret == DIAG_DCI_NO_ERROR || ret < 0)
+		return ret;
+
+	/* Check the registration table for command entries */
+	for (i = 0; i < diag_max_reg && !found; i++) {
+		entry = driver->table[i];
+		if (entry.process_id == NO_PROCESS)
+			continue;
+		if (entry.cmd_code == header->cmd_code &&
+			    entry.subsys_id == header->subsys_id &&
+			    entry.cmd_code_lo <= header->subsys_cmd_code &&
+			    entry.cmd_code_hi >= header->subsys_cmd_code) {
+			ret = diag_send_dci_pkt(entry, buf, len,
+						req_entry->tag);
+			found = 1;
+		} else if (entry.cmd_code == 255 && header->cmd_code == 75) {
+			if (entry.subsys_id == header->subsys_id &&
+			    entry.cmd_code_lo <= header->subsys_cmd_code &&
+			    entry.cmd_code_hi >= header->subsys_cmd_code) {
+				ret = diag_send_dci_pkt(entry, buf, len,
+							req_entry->tag);
+				found = 1;
+			}
+		} else if (entry.cmd_code == 255 && entry.subsys_id == 255) {
+			if (entry.cmd_code_lo <= header->cmd_code &&
+			    entry.cmd_code_hi >= header->cmd_code) {
+				/*
+				 * If its a Mode reset command, make sure it is
+				 * registered on the Apps Processor
+				 */
+				if (entry.cmd_code_lo == MODE_CMD &&
+				    entry.cmd_code_hi == MODE_CMD)
+					if (entry.client_id != APPS_DATA)
+						continue;
+					ret = diag_send_dci_pkt(entry, buf, len,
+								req_entry->tag);
+					found = 1;
+			}
+		}
+	}
+
+	return ret;
+}
+
 int diag_process_dci_transaction(unsigned char *buf, int len)
 {
 	unsigned char *temp = buf;
-	uint16_t subsys_cmd_code, log_code, item_num;
-	int subsys_id, cmd_code, ret = -1, found = 0;
-	struct diag_master_table entry;
-	int count, set_mask, num_codes, bit_index, event_id, offset = 0, i;
+	uint16_t log_code, item_num;
+	int ret = -1, found = 0;
+	int count, set_mask, num_codes, bit_index, event_id, offset = 0;
 	unsigned int byte_index, read_len = 0;
 	uint8_t equip_id, *log_mask_ptr, *head_log_mask_ptr, byte_mask;
 	uint8_t *event_mask_ptr;
-	struct dci_pkt_req_entry_t *req_entry = NULL;
-
-	if (!driver->smd_dci[MODEM_DATA].ch) {
-		pr_err("diag: DCI smd channel for peripheral %d not valid for dci updates\n",
-			driver->smd_dci[MODEM_DATA].peripheral);
-		return DIAG_DCI_SEND_DATA_FAIL;
-	}
+	struct diag_dci_client_tbl *dci_entry = NULL;
 
 	if (!temp) {
 		pr_err("diag: Invalid buffer in %s\n", __func__);
@@ -615,68 +1316,7 @@
 
 	/* This is Pkt request/response transaction */
 	if (*(int *)temp > 0) {
-		if (len < DCI_PKT_REQ_MIN_LEN || len > USER_SPACE_DATA) {
-			pr_err("diag: dci: Invalid length %d len in %s", len,
-								__func__);
-			return -EIO;
-		}
-		/* enter this UID into kernel table and return index */
-		req_entry = diag_register_dci_transaction(*(int *)temp);
-		if (!req_entry) {
-			pr_alert("diag: registering new DCI transaction failed\n");
-			return DIAG_DCI_NO_REG;
-		}
-		temp += sizeof(int);
-		/*
-		 * Check for registered peripheral and fwd pkt to
-		 * appropriate proc
-		 */
-		cmd_code = (int)(*(char *)temp);
-		temp++;
-		subsys_id = (int)(*(char *)temp);
-		temp++;
-		subsys_cmd_code = *(uint16_t *)temp;
-		temp += sizeof(uint16_t);
-		read_len += sizeof(int) + 2 + sizeof(uint16_t);
-		if (read_len >= USER_SPACE_DATA) {
-			pr_err("diag: dci: Invalid length in %s\n", __func__);
-			return -EIO;
-		}
-		pr_debug("diag: %d %d %d", cmd_code, subsys_id,
-			subsys_cmd_code);
-		for (i = 0; i < diag_max_reg; i++) {
-			entry = driver->table[i];
-			if (entry.process_id != NO_PROCESS) {
-				if (entry.cmd_code == cmd_code &&
-					entry.subsys_id == subsys_id &&
-					entry.cmd_code_lo <= subsys_cmd_code &&
-					entry.cmd_code_hi >= subsys_cmd_code) {
-					ret = diag_send_dci_pkt(entry, buf,
-								len,
-								req_entry->tag);
-				} else if (entry.cmd_code == 255
-					  && cmd_code == 75) {
-					if (entry.subsys_id == subsys_id &&
-						entry.cmd_code_lo <=
-						subsys_cmd_code &&
-						entry.cmd_code_hi >=
-						subsys_cmd_code) {
-						ret = diag_send_dci_pkt(entry,
-							buf, len,
-							req_entry->tag);
-					}
-				} else if (entry.cmd_code == 255 &&
-					entry.subsys_id == 255) {
-					if (entry.cmd_code_lo <= cmd_code &&
-						entry.cmd_code_hi >=
-							cmd_code) {
-						ret = diag_send_dci_pkt(entry,
-							buf, len,
-							req_entry->tag);
-					}
-				}
-			}
-		}
+		return diag_process_dci_pkt_rsp(buf, len);
 	} else if (*(int *)temp == DCI_LOG_TYPE) {
 		/* Minimum length of a log mask config is 12 + 2 bytes for
 		   atleast one log code to be set or reset */
@@ -684,12 +1324,13 @@
 			pr_err("diag: dci: Invalid length in %s\n", __func__);
 			return -EIO;
 		}
-		/* find client id and table */
-		i = diag_dci_find_client_index(current->tgid);
-		if (i == DCI_CLIENT_INDEX_INVALID) {
-			pr_err("diag: dci client not registered/found\n");
+		/* find client table entry */
+		dci_entry = diag_dci_get_client_entry();
+		if (!dci_entry) {
+			pr_err("diag: In %s, invalid client\n", __func__);
 			return ret;
 		}
+
 		/* Extract each log code and put in client table */
 		temp += sizeof(int);
 		read_len += sizeof(int);
@@ -706,7 +1347,7 @@
 			return -EIO;
 		}
 
-		head_log_mask_ptr = driver->dci_client_tbl[i].dci_log_mask;
+		head_log_mask_ptr = dci_entry->dci_log_mask;
 		if (!head_log_mask_ptr) {
 			pr_err("diag: dci: Invalid Log mask pointer in %s\n",
 								__func__);
@@ -768,8 +1409,10 @@
 			count++;
 			ret = DIAG_DCI_NO_ERROR;
 		}
+		/* send updated mask to userspace clients */
+		diag_update_userspace_clients(DCI_LOG_MASKS_TYPE);
 		/* send updated mask to peripherals */
-		ret = diag_send_dci_log_mask(&driver->smd_cntl[MODEM_DATA]);
+		ret = diag_send_dci_log_mask();
 	} else if (*(int *)temp == DCI_EVENT_TYPE) {
 		/* Minimum length of a event mask config is 12 + 4 bytes for
 		  atleast one event id to be set or reset. */
@@ -777,10 +1420,10 @@
 			pr_err("diag: dci: Invalid length in %s\n", __func__);
 			return -EIO;
 		}
-		/* find client id and table */
-		i = diag_dci_find_client_index(current->tgid);
-		if (i == DCI_CLIENT_INDEX_INVALID) {
-			pr_err("diag: dci client not registered/found\n");
+		/* find client table entry */
+		dci_entry = diag_dci_get_client_entry();
+		if (!dci_entry) {
+			pr_err("diag: In %s, invalid client\n", __func__);
 			return ret;
 		}
 		/* Extract each log code and put in client table */
@@ -802,7 +1445,7 @@
 			return -EIO;
 		}
 
-		event_mask_ptr = driver->dci_client_tbl[i].dci_event_mask;
+		event_mask_ptr = dci_entry->dci_event_mask;
 		if (!event_mask_ptr) {
 			pr_err("diag: dci: Invalid event mask pointer in %s\n",
 								__func__);
@@ -839,58 +1482,35 @@
 			count++;
 			ret = DIAG_DCI_NO_ERROR;
 		}
+		/* send updated mask to userspace clients */
+		diag_update_userspace_clients(DCI_EVENT_MASKS_TYPE);
 		/* send updated mask to peripherals */
-		ret = diag_send_dci_event_mask(&driver->smd_cntl[MODEM_DATA]);
+		ret = diag_send_dci_event_mask();
 	} else {
 		pr_alert("diag: Incorrect DCI transaction\n");
 	}
 	return ret;
 }
 
-int diag_dci_find_client_index_health(int client_id)
+
+struct diag_dci_client_tbl *diag_dci_get_client_entry()
 {
-	int i, ret = DCI_CLIENT_INDEX_INVALID;
-
-	for (i = 0; i < MAX_DCI_CLIENTS; i++) {
-		if (driver->dci_client_tbl[i].client != NULL) {
-			if (driver->dci_client_tbl[i].client_id ==
-					client_id) {
-				ret = i;
-				break;
-			}
-		}
-	}
-	return ret;
-}
-
-int diag_dci_find_client_index(int client_id)
-{
-	int i, ret = DCI_CLIENT_INDEX_INVALID;
-
-	for (i = 0; i < MAX_DCI_CLIENTS; i++) {
-		if (driver->dci_client_tbl[i].client != NULL) {
-			if (driver->dci_client_tbl[i].client->tgid ==
-					client_id) {
-				ret = i;
-				break;
-			}
-		}
-	}
-	return ret;
+	return __diag_dci_get_client_entry(current->tgid);
 }
 
 void update_dci_cumulative_event_mask(int offset, uint8_t byte_mask)
 {
-	int i;
 	uint8_t *event_mask_ptr;
 	uint8_t *update_ptr = dci_cumulative_event_mask;
+	struct list_head *start, *temp;
+	struct diag_dci_client_tbl *entry = NULL;
 	bool is_set = false;
 
 	mutex_lock(&dci_event_mask_mutex);
 	update_ptr += offset;
-	for (i = 0; i < MAX_DCI_CLIENTS; i++) {
-		event_mask_ptr =
-			driver->dci_client_tbl[i].dci_event_mask;
+	list_for_each_safe(start, temp, &driver->dci_client_list) {
+		entry = list_entry(start, struct diag_dci_client_tbl, track);
+		event_mask_ptr = entry->dci_event_mask;
 		event_mask_ptr += offset;
 		if ((*event_mask_ptr & byte_mask) == byte_mask) {
 			is_set = true;
@@ -905,75 +1525,29 @@
 	mutex_unlock(&dci_event_mask_mutex);
 }
 
-void clear_client_dci_cumulative_event_mask(int client_index)
+void diag_dci_invalidate_cumulative_event_mask()
 {
-	int i, j;
-	uint8_t *update_ptr = dci_cumulative_event_mask;
-	uint8_t *event_mask_ptr, *client_event_mask_ptr, byte_mask = 0;
-	bool is_set = false;
-
-	event_mask_ptr =
-		(driver->dci_client_tbl[client_index].dci_event_mask);
+	int i = 0;
+	struct list_head *start, *temp;
+	struct diag_dci_client_tbl *entry = NULL;
+	uint8_t *update_ptr, *event_mask_ptr;
+	update_ptr = dci_cumulative_event_mask;
 
 	mutex_lock(&dci_event_mask_mutex);
-	for (i = 0; i < DCI_EVENT_MASK_SIZE; i++) {
-		is_set = false;
-		/* Already cleared event masks need not to be considered */
-		if (*event_mask_ptr != 0) {
-			byte_mask = *event_mask_ptr;
-		} else {
-			update_ptr++;
-			event_mask_ptr++;
-			continue;
-		}
-		for (j = 0; j < MAX_DCI_CLIENTS; j++) {
-			/* continue searching for valid client */
-			if (driver->dci_client_tbl[j].client == NULL ||
-				client_index == j)
-				continue;
-			client_event_mask_ptr =
-				(driver->dci_client_tbl[j].dci_event_mask);
-			client_event_mask_ptr += i;
-			if (*client_event_mask_ptr & byte_mask) {
-				/*
-				* Break if another client has same
-				* event mask set
-				*/
-				if ((*client_event_mask_ptr &
-					byte_mask) == byte_mask) {
-					is_set = true;
-					break;
-				} else {
-					byte_mask =
-					(~(*client_event_mask_ptr) &
-					byte_mask);
-					is_set = false;
-				}
-			}
-		}
-		/*
-		* Clear only if this client has event mask set else
-		* don't update cumulative event mask ptr
-		*/
-		if (is_set == false)
-			*update_ptr &= ~byte_mask;
-
-		update_ptr++;
-		event_mask_ptr++;
+	list_for_each_safe(start, temp, &driver->dci_client_list) {
+		entry = list_entry(start, struct diag_dci_client_tbl, track);
+		event_mask_ptr = entry->dci_event_mask;
+		for (i = 0; i < DCI_EVENT_MASK_SIZE; i++)
+			*(update_ptr+i) |= *(event_mask_ptr+i);
 	}
-	event_mask_ptr =
-		(driver->dci_client_tbl[client_index].dci_event_mask);
-	memset(event_mask_ptr, 0, DCI_EVENT_MASK_SIZE);
 	mutex_unlock(&dci_event_mask_mutex);
 }
 
-
-int diag_send_dci_event_mask(struct diag_smd_info *smd_info)
+int diag_send_dci_event_mask()
 {
 	void *buf = driver->buf_event_mask_update;
 	int header_size = sizeof(struct diag_ctrl_event_mask);
-	int wr_size = -ENOMEM, retry_count = 0, timer;
-	int ret = DIAG_DCI_NO_ERROR, i;
+	int ret = DIAG_DCI_NO_ERROR, err = DIAG_DCI_NO_ERROR, i;
 
 	mutex_lock(&driver->diag_cntl_mutex);
 	/* send event mask update */
@@ -991,28 +1565,18 @@
 	}
 	memcpy(buf, driver->event_mask, header_size);
 	memcpy(buf+header_size, dci_cumulative_event_mask, DCI_EVENT_MASK_SIZE);
-	if (smd_info && smd_info->ch) {
-		while (retry_count < 3) {
-			mutex_lock(&smd_info->smd_ch_mutex);
-			wr_size = smd_write(smd_info->ch, buf,
-					 header_size + DCI_EVENT_MASK_SIZE);
-			mutex_unlock(&smd_info->smd_ch_mutex);
-			if (wr_size == -ENOMEM) {
-				retry_count++;
-				for (timer = 0; timer < 5; timer++)
-					udelay(2000);
-			} else {
-				break;
-			}
-		}
-		if (wr_size != header_size + DCI_EVENT_MASK_SIZE) {
-			pr_err("diag: error writing dci event mask %d, tried %d\n",
-				 wr_size, header_size + DCI_EVENT_MASK_SIZE);
+	for (i = 0; i < NUM_SMD_DCI_CHANNELS; i++) {
+		/*
+		 * Don't send to peripheral if its regular channel
+		 * is down. It may also mean that the peripheral doesn't
+		 * support DCI.
+		 */
+		if (!driver->smd_dci[i].ch)
+			continue;
+		err = diag_dci_write_proc(i, DIAG_CNTL_TYPE, buf,
+					  header_size + DCI_EVENT_MASK_SIZE);
+		if (err != DIAG_DCI_NO_ERROR)
 			ret = DIAG_DCI_SEND_DATA_FAIL;
-		}
-	} else {
-		pr_err("diag: ch not valid for dci event mask update\n");
-		ret = DIAG_DCI_SEND_DATA_FAIL;
 	}
 	mutex_unlock(&driver->diag_cntl_mutex);
 
@@ -1026,6 +1590,8 @@
 	uint8_t *update_ptr = dci_cumulative_log_mask;
 	uint8_t *log_mask_ptr;
 	bool is_set = false;
+	struct list_head *start, *temp;
+	struct diag_dci_client_tbl *entry = NULL;
 
 	mutex_lock(&dci_log_mask_mutex);
 	*update_ptr = 0;
@@ -1037,9 +1603,9 @@
 	/* update the dirty bit */
 	*(update_ptr+1) = 1;
 	update_ptr = update_ptr + byte_index;
-	for (i = 0; i < MAX_DCI_CLIENTS; i++) {
-		log_mask_ptr =
-			(driver->dci_client_tbl[i].dci_log_mask);
+	list_for_each_safe(start, temp, &driver->dci_client_list) {
+		entry = list_entry(start, struct diag_dci_client_tbl, track);
+		log_mask_ptr = entry->dci_log_mask;
 		log_mask_ptr = log_mask_ptr + offset + byte_index;
 		if ((*log_mask_ptr & byte_mask) == byte_mask) {
 			is_set = true;
@@ -1055,103 +1621,35 @@
 	mutex_unlock(&dci_log_mask_mutex);
 }
 
-void clear_client_dci_cumulative_log_mask(int client_index)
+void diag_dci_invalidate_cumulative_log_mask()
 {
-	int i, j, k;
-	uint8_t *update_ptr = dci_cumulative_log_mask;
-	uint8_t *log_mask_ptr, *client_log_mask_ptr, byte_mask = 0;
-	bool is_set = false;
-
-	log_mask_ptr = driver->dci_client_tbl[client_index].dci_log_mask;
+	int i = 0;
+	struct list_head *start, *temp;
+	struct diag_dci_client_tbl *entry = NULL;
+	uint8_t *update_ptr, *log_mask_ptr;
+	update_ptr = dci_cumulative_log_mask;
 
 	mutex_lock(&dci_log_mask_mutex);
-	*update_ptr = 0;
-	/* set the equipment IDs */
-	for (i = 0; i < 16; i++)
-		*(update_ptr + (i*514)) = i;
-
-	/* update cumulative log mask ptr*/
-	update_ptr += 2;
-	log_mask_ptr += 2;
-	for (i = 0; i < 16; i++) {
-		for (j = 0; j < 512; j++) {
-			is_set = false;
-			/*
-			* Already cleared log masks need
-			* not to be considered
-			*/
-			if (*log_mask_ptr != 0) {
-				byte_mask = *log_mask_ptr;
-			} else {
-				update_ptr++;
-				log_mask_ptr++;
-				continue;
-			}
-			for (k = 0; k < MAX_DCI_CLIENTS; k++) {
-				/* continue searching for valid client */
-				if (driver->dci_client_tbl[k].client == NULL ||
-					client_index == k)
-					continue;
-				client_log_mask_ptr =
-				 (driver->dci_client_tbl[k].dci_log_mask);
-				client_log_mask_ptr += (i*514) + 2 + j;
-				if (*client_log_mask_ptr & byte_mask) {
-					/*
-					* Break if another client has same
-					* log mask set
-					*/
-					if ((*client_log_mask_ptr &
-						byte_mask) == byte_mask) {
-						is_set = true;
-						break;
-					} else {
-						byte_mask =
-						 (~(*client_log_mask_ptr) &
-						 byte_mask);
-						is_set = false;
-					}
-				}
-			}
-			/*
-			* Clear only if this client has log mask set else
-			* don't update cumulative log mask ptr
-			*/
-			if (is_set == false) {
-				/*
-				* Update the dirty bit for the equipment
-				* whose mask is changing
-				*/
-				dci_cumulative_log_mask[1+(i*514)] = 1;
-				*update_ptr &= ~byte_mask;
-			}
-
-			update_ptr++;
-			log_mask_ptr++;
-		}
-		update_ptr += 2;
-		log_mask_ptr += 2;
+	list_for_each_safe(start, temp, &driver->dci_client_list) {
+		entry = list_entry(start, struct diag_dci_client_tbl, track);
+		log_mask_ptr = entry->dci_log_mask;
+		for (i = 0; i < DCI_LOG_MASK_SIZE; i++)
+			*(update_ptr+i) |= *(log_mask_ptr+i);
 	}
-	log_mask_ptr = driver->dci_client_tbl[client_index].dci_log_mask;
-	memset(log_mask_ptr, 0, DCI_LOG_MASK_SIZE);
 	mutex_unlock(&dci_log_mask_mutex);
 }
 
-int diag_send_dci_log_mask(struct diag_smd_info *smd_info)
+int diag_send_dci_log_mask()
 {
 	void *buf = driver->buf_log_mask_update;
 	int header_size = sizeof(struct diag_ctrl_log_mask);
 	uint8_t *log_mask_ptr = dci_cumulative_log_mask;
-	int i, wr_size = -ENOMEM, retry_count = 0, timer;
-	int ret = DIAG_DCI_NO_ERROR;
-
-	if (!smd_info || !smd_info->ch) {
-		pr_err("diag: ch not valid for dci log mask update\n");
-		return DIAG_DCI_SEND_DATA_FAIL;
-	}
+	int i, j, ret = DIAG_DCI_NO_ERROR, err = DIAG_DCI_NO_ERROR;
+	int updated;
 
 	mutex_lock(&driver->diag_cntl_mutex);
 	for (i = 0; i < 16; i++) {
-		retry_count = 0;
+		updated = 1;
 		driver->log_mask->cmd_type = DIAG_CTRL_MSG_LOG_MASK;
 		driver->log_mask->num_items = 512;
 		driver->log_mask->data_len  = 11 + 512;
@@ -1162,31 +1660,26 @@
 		memcpy(buf, driver->log_mask, header_size);
 		memcpy(buf+header_size, log_mask_ptr+2, 512);
 		/* if dirty byte is set and channel is valid */
-		if (smd_info->ch && *(log_mask_ptr+1)) {
-			while (retry_count < 3) {
-				mutex_lock(&smd_info->smd_ch_mutex);
-				wr_size = smd_write(smd_info->ch, buf,
-							header_size + 512);
-				mutex_unlock(&smd_info->smd_ch_mutex);
-				if (wr_size == -ENOMEM) {
-					retry_count++;
-					for (timer = 0; timer < 5; timer++)
-						udelay(2000);
-				} else
-					break;
-			}
-			if (wr_size != header_size + 512) {
-				pr_err("diag: dci log mask update failed %d, tried %d for equip_id %d\n",
-					wr_size, header_size + 512,
-					driver->log_mask->equip_id);
-				ret = DIAG_DCI_SEND_DATA_FAIL;
+		for (j = 0; j < NUM_SMD_DCI_CHANNELS; j++) {
+			/*
+			 * Don't send to peripheral if its regular channel
+			 * is down. It may also mean that the peripheral
+			 * doesn't support DCI.
+			 */
+			if (!driver->smd_dci[j].ch)
+				continue;
 
-			} else {
-				*(log_mask_ptr+1) = 0; /* clear dirty byte */
-				pr_debug("diag: updated dci log equip ID %d\n",
-						 *log_mask_ptr);
+			if (!(*(log_mask_ptr+1)))
+				continue;
+			err = diag_dci_write_proc(j, DIAG_CNTL_TYPE, buf,
+				header_size + DCI_MAX_ITEMS_PER_LOG_CODE);
+			if (err != DIAG_DCI_NO_ERROR) {
+				updated = 0;
+				ret = DIAG_DCI_SEND_DATA_FAIL;
 			}
 		}
+		if (updated)
+			*(log_mask_ptr+1) = 0; /* clear dirty byte */
 		log_mask_ptr += 514;
 	}
 	mutex_unlock(&driver->diag_cntl_mutex);
@@ -1198,6 +1691,9 @@
 {
 	uint8_t i; int count = 0;
 
+	if (!tbl_buf)
+		return;
+
 	/* create hard coded table for log mask with 16 categories */
 	for (i = 0; i < 16; i++) {
 		*(uint8_t *)tbl_buf = i;
@@ -1220,17 +1716,15 @@
 
 	if (pdev->id == SMD_APPS_MODEM) {
 		index = MODEM_DATA;
-		err = smd_open("DIAG_2",
+		err = smd_named_open_on_edge("DIAG_2",
+			SMD_APPS_MODEM,
 			&driver->smd_dci[index].ch,
 			&driver->smd_dci[index],
 			diag_smd_notify);
 		driver->smd_dci[index].ch_save =
 			driver->smd_dci[index].ch;
-		driver->dci_device = &pdev->dev;
-		driver->dci_device->power.wakeup = wakeup_source_register
-							("DIAG_DCI_WS");
 		if (err)
-			pr_err("diag: In %s, cannot open DCI port, Id = %d, err: %d\n",
+			pr_err("diag: In %s, cannot open DCI Modem port, Id = %d, err: %d\n",
 				__func__, pdev->id, err);
 	}
 
@@ -1251,11 +1745,8 @@
 			diag_smd_notify);
 		driver->smd_dci_cmd[index].ch_save =
 			driver->smd_dci_cmd[index].ch;
-		driver->dci_cmd_device = &pdev->dev;
-		driver->dci_cmd_device->power.wakeup = wakeup_source_register
-							("DIAG_DCI_CMD_WS");
 		if (err)
-			pr_err("diag: In %s, cannot open DCI port, Id = %d, err: %d\n",
+			pr_err("diag: In %s, cannot open DCI Modem CMD port, Id = %d, err: %d\n",
 				__func__, pdev->id, err);
 	}
 
@@ -1305,8 +1796,6 @@
 	driver->dci_tag = 0;
 	driver->dci_client_id = 0;
 	driver->num_dci_client = 0;
-	driver->dci_device = NULL;
-	driver->dci_cmd_device = NULL;
 	mutex_init(&driver->dci_mutex);
 	mutex_init(&dci_log_mask_mutex);
 	mutex_init(&dci_event_mask_mutex);
@@ -1328,19 +1817,17 @@
 				goto err;
 		}
 	}
+
 	if (driver->apps_dci_buf == NULL) {
 		driver->apps_dci_buf = kzalloc(APPS_BUF_SIZE, GFP_KERNEL);
 		if (driver->apps_dci_buf == NULL)
 			goto err;
 	}
-	if (driver->dci_client_tbl == NULL) {
-		driver->dci_client_tbl = kzalloc(MAX_DCI_CLIENTS *
-			sizeof(struct diag_dci_client_tbl), GFP_KERNEL);
-		if (driver->dci_client_tbl == NULL)
-			goto err;
-	}
-	driver->diag_dci_wq = create_singlethread_workqueue("diag_dci_wq");
+	INIT_LIST_HEAD(&driver->dci_client_list);
 	INIT_LIST_HEAD(&driver->dci_req_list);
+
+	driver->diag_dci_wq = create_singlethread_workqueue("diag_dci_wq");
+	INIT_WORK(&dci_data_drain_work, dci_data_drain_work_fn);
 	success = platform_driver_register(&msm_diag_dci_driver);
 	if (success) {
 		pr_err("diag: Could not register DCI driver\n");
@@ -1353,10 +1840,10 @@
 			goto err;
 		}
 	}
+	setup_timer(&dci_drain_timer, dci_drain_data, 0);
 	return DIAG_DCI_NO_ERROR;
 err:
 	pr_err("diag: Could not initialize diag DCI buffers");
-	kfree(driver->dci_client_tbl);
 	kfree(driver->apps_dci_buf);
 	for (i = 0; i < NUM_SMD_DCI_CHANNELS; i++)
 		diag_smd_destructor(&driver->smd_dci[i]);
@@ -1383,21 +1870,12 @@
 
 	platform_driver_unregister(&msm_diag_dci_driver);
 
-	if (driver->dci_client_tbl) {
-		for (i = 0; i < MAX_DCI_CLIENTS; i++) {
-			kfree(driver->dci_client_tbl[i].dci_data);
-			mutex_destroy(&driver->dci_client_tbl[i].data_mutex);
-		}
-	}
-
 	if (driver->supports_separate_cmdrsp) {
 		for (i = 0; i < NUM_SMD_DCI_CMD_CHANNELS; i++)
 			diag_smd_destructor(&driver->smd_dci_cmd[i]);
 
 		platform_driver_unregister(&msm_diag_dci_cmd_driver);
 	}
-
-	kfree(driver->dci_client_tbl);
 	kfree(driver->apps_dci_buf);
 	mutex_destroy(&driver->dci_mutex);
 	mutex_destroy(&dci_log_mask_mutex);
@@ -1408,153 +1886,483 @@
 
 int diag_dci_clear_log_mask()
 {
-	int i, j, k, err = DIAG_DCI_NO_ERROR;
+	int j, k, err = DIAG_DCI_NO_ERROR;
 	uint8_t *log_mask_ptr, *update_ptr;
+	struct list_head *start, *temp;
+	struct diag_dci_client_tbl *entry = NULL;
 
-	i = diag_dci_find_client_index(current->tgid);
-	if (i == DCI_CLIENT_INDEX_INVALID)
+	entry = diag_dci_get_client_entry();
+	if (!entry) {
+		pr_err("diag: In %s, invalid client entry\n", __func__);
 		return DIAG_DCI_TABLE_ERR;
+	}
 
 	mutex_lock(&dci_log_mask_mutex);
-	create_dci_log_mask_tbl(
-			driver->dci_client_tbl[i].dci_log_mask);
-	memset(dci_cumulative_log_mask,
-				0x0, DCI_LOG_MASK_SIZE);
-	for (i = 0; i < MAX_DCI_CLIENTS; i++) {
+	create_dci_log_mask_tbl(entry->dci_log_mask);
+	memset(dci_cumulative_log_mask, 0x0, DCI_LOG_MASK_SIZE);
+	list_for_each_safe(start, temp, &driver->dci_client_list) {
+		entry = list_entry(start, struct diag_dci_client_tbl, track);
 		update_ptr = dci_cumulative_log_mask;
-		if (driver->dci_client_tbl[i].client) {
-			log_mask_ptr =
-				driver->dci_client_tbl[i].dci_log_mask;
-			for (j = 0; j < 16; j++) {
-				*update_ptr = j;
-				*(update_ptr + 1) = 1;
-				update_ptr += 2;
-				log_mask_ptr += 2;
-				for (k = 0; k < 513; k++) {
-					*update_ptr |= *log_mask_ptr;
-					update_ptr++;
-					log_mask_ptr++;
-				}
+		log_mask_ptr = entry->dci_log_mask;
+		for (j = 0; j < 16; j++) {
+			*update_ptr = j;
+			*(update_ptr + 1) = 1;
+			update_ptr += 2;
+			log_mask_ptr += 2;
+			for (k = 0; k < 513; k++) {
+				*update_ptr |= *log_mask_ptr;
+				update_ptr++;
+				log_mask_ptr++;
 			}
 		}
 	}
 	mutex_unlock(&dci_log_mask_mutex);
-	err = diag_send_dci_log_mask(&driver->smd_cntl[MODEM_DATA]);
+	/* send updated mask to userspace clients */
+	diag_update_userspace_clients(DCI_LOG_MASKS_TYPE);
+	/* Send updated mask to peripherals */
+	err = diag_send_dci_log_mask();
 	return err;
 }
 
 int diag_dci_clear_event_mask()
 {
-	int i, j, err = DIAG_DCI_NO_ERROR;
+	int j, err = DIAG_DCI_NO_ERROR;
 	uint8_t *event_mask_ptr, *update_ptr;
+	struct list_head *start, *temp;
+	struct diag_dci_client_tbl *entry = NULL;
 
-	i = diag_dci_find_client_index(current->tgid);
-	if (i == DCI_CLIENT_INDEX_INVALID)
+	entry = diag_dci_get_client_entry();
+	if (!entry) {
+		pr_err("diag: In %s, invalid client entry\n", __func__);
 		return DIAG_DCI_TABLE_ERR;
+	}
 
 	mutex_lock(&dci_event_mask_mutex);
-	memset(driver->dci_client_tbl[i].dci_event_mask,
-			0x0, DCI_EVENT_MASK_SIZE);
-	memset(dci_cumulative_event_mask,
-			0x0, DCI_EVENT_MASK_SIZE);
+	memset(entry->dci_event_mask, 0x0, DCI_EVENT_MASK_SIZE);
+	memset(dci_cumulative_event_mask, 0x0, DCI_EVENT_MASK_SIZE);
 	update_ptr = dci_cumulative_event_mask;
-	for (i = 0; i < MAX_DCI_CLIENTS; i++) {
-		event_mask_ptr =
-			driver->dci_client_tbl[i].dci_event_mask;
+
+	list_for_each_safe(start, temp, &driver->dci_client_list) {
+		entry = list_entry(start, struct diag_dci_client_tbl, track);
+		event_mask_ptr = entry->dci_event_mask;
 		for (j = 0; j < DCI_EVENT_MASK_SIZE; j++)
 			*(update_ptr + j) |= *(event_mask_ptr + j);
 	}
 	mutex_unlock(&dci_event_mask_mutex);
-	err = diag_send_dci_event_mask(&driver->smd_cntl[MODEM_DATA]);
+	/* send updated mask to userspace clients */
+	diag_update_userspace_clients(DCI_EVENT_MASKS_TYPE);
+	/* Send updated mask to peripherals */
+	err = diag_send_dci_event_mask();
 	return err;
 }
 
 int diag_dci_query_log_mask(uint16_t log_code)
 {
-	uint16_t item_num;
-	uint8_t equip_id, *log_mask_ptr, byte_mask;
-	int i, byte_index, offset;
-
-	equip_id = LOG_GET_EQUIP_ID(log_code);
-	item_num = LOG_GET_ITEM_NUM(log_code);
-	byte_index = item_num/8 + 2;
-	byte_mask = 0x01 << (item_num % 8);
-	offset = equip_id * 514;
-
-	i = diag_dci_find_client_index(current->tgid);
-	if (i != DCI_CLIENT_INDEX_INVALID) {
-		log_mask_ptr = driver->dci_client_tbl[i].dci_log_mask;
-		log_mask_ptr = log_mask_ptr + offset + byte_index;
-		return ((*log_mask_ptr & byte_mask) == byte_mask) ?
-								1 : 0;
-	}
-	return 0;
+	return __diag_dci_query_log_mask(diag_dci_get_client_entry(),
+					 log_code);
 }
 
-
 int diag_dci_query_event_mask(uint16_t event_id)
 {
-	uint8_t *event_mask_ptr, byte_mask;
-	int i, byte_index, bit_index;
-	byte_index = event_id/8;
-	bit_index = event_id % 8;
-	byte_mask = 0x1 << bit_index;
-
-	i = diag_dci_find_client_index(current->tgid);
-	if (i != DCI_CLIENT_INDEX_INVALID) {
-		event_mask_ptr =
-		driver->dci_client_tbl[i].dci_event_mask;
-		event_mask_ptr = event_mask_ptr + byte_index;
-		if ((*event_mask_ptr & byte_mask) == byte_mask)
-			return 1;
-		else
-			return 0;
-	}
-	return 0;
+	return __diag_dci_query_event_mask(diag_dci_get_client_entry(),
+					   event_id);
 }
 
 uint8_t diag_dci_get_cumulative_real_time()
 {
-	uint8_t real_time = MODE_NONREALTIME, i;
-	for (i = 0; i < MAX_DCI_CLIENTS; i++)
-		if (driver->dci_client_tbl[i].client &&
-				driver->dci_client_tbl[i].real_time ==
-				MODE_REALTIME) {
+	uint8_t real_time = MODE_NONREALTIME;
+	struct list_head *start, *temp;
+	struct diag_dci_client_tbl *entry = NULL;
+
+	list_for_each_safe(start, temp, &driver->dci_client_list) {
+		entry = list_entry(start, struct diag_dci_client_tbl, track);
+		if (entry->real_time == MODE_REALTIME) {
 			real_time = 1;
 			break;
 		}
+	}
 	return real_time;
 }
 
-int diag_dci_set_real_time(int client_id, uint8_t real_time)
+int diag_dci_set_real_time(uint8_t real_time)
 {
-	int i = DCI_CLIENT_INDEX_INVALID;
-	i = diag_dci_find_client_index(client_id);
-
-	if (i != DCI_CLIENT_INDEX_INVALID)
-		driver->dci_client_tbl[i].real_time = real_time;
-	return i;
-}
-
-void diag_dci_try_activate_wakeup_source(smd_channel_t *channel)
-{
-	spin_lock_irqsave(&ws_lock, ws_lock_flags);
-	if (channel == driver->smd_dci[MODEM_DATA].ch) {
-		pm_wakeup_event(driver->dci_device, DCI_WAKEUP_TIMEOUT);
-		pm_stay_awake(driver->dci_device);
-	} else if (channel == driver->smd_dci_cmd[MODEM_DATA].ch) {
-		pm_wakeup_event(driver->dci_cmd_device, DCI_WAKEUP_TIMEOUT);
-		pm_stay_awake(driver->dci_cmd_device);
+	struct diag_dci_client_tbl *entry = NULL;
+	entry = diag_dci_get_client_entry();
+	if (!entry) {
+		pr_err("diag: In %s, invalid client entry\n", __func__);
+		return 0;
 	}
+	entry->real_time = real_time;
+	return 1;
+}
+
+void diag_dci_try_activate_wakeup_source()
+{
+	spin_lock_irqsave(&ws_lock, ws_lock_flags);
+	pm_wakeup_event(driver->diag_dev, DCI_WAKEUP_TIMEOUT);
+	pm_stay_awake(driver->diag_dev);
 	spin_unlock_irqrestore(&ws_lock, ws_lock_flags);
 }
 
-void diag_dci_try_deactivate_wakeup_source(smd_channel_t *channel)
+void diag_dci_try_deactivate_wakeup_source()
 {
 	spin_lock_irqsave(&ws_lock, ws_lock_flags);
-	if (channel == driver->smd_dci[MODEM_DATA].ch)
-		pm_relax(driver->dci_device);
-	else if (channel == driver->smd_dci_cmd[MODEM_DATA].ch)
-		pm_relax(driver->dci_cmd_device);
+	pm_relax(driver->diag_dev);
 	spin_unlock_irqrestore(&ws_lock, ws_lock_flags);
 }
+
+int diag_dci_register_client(struct diag_dci_reg_tbl_t *reg_entry)
+{
+	int i, err = 0;
+	struct diag_dci_client_tbl *new_entry = NULL;
+	struct diag_dci_buf_peripheral_t *proc_buf = NULL;
+
+	if (!reg_entry)
+		return DIAG_DCI_NO_REG;
+
+	if (driver->dci_state == DIAG_DCI_NO_REG)
+		return DIAG_DCI_NO_REG;
+
+	if (driver->num_dci_client >= MAX_DCI_CLIENTS)
+		return DIAG_DCI_NO_REG;
+
+	new_entry = kzalloc(sizeof(struct diag_dci_client_tbl), GFP_KERNEL);
+	if (new_entry == NULL) {
+		pr_err("diag: unable to alloc memory\n");
+		return DIAG_DCI_NO_REG;
+	}
+
+	mutex_lock(&driver->dci_mutex);
+	if (!(driver->num_dci_client)) {
+		for (i = 0; i < NUM_SMD_DCI_CHANNELS; i++)
+			driver->smd_dci[i].in_busy_1 = 0;
+		if (driver->supports_separate_cmdrsp)
+			for (i = 0; i < NUM_SMD_DCI_CMD_CHANNELS; i++)
+				driver->smd_dci_cmd[i].in_busy_1 = 0;
+	}
+
+	new_entry->client = current;
+	new_entry->client_info.notification_list =
+				reg_entry->notification_list;
+	new_entry->client_info.signal_type =
+				reg_entry->signal_type;
+	new_entry->real_time = MODE_REALTIME;
+	new_entry->in_service = 0;
+	INIT_LIST_HEAD(&new_entry->list_write_buf);
+	mutex_init(&new_entry->write_buf_mutex);
+	new_entry->dci_log_mask =  kzalloc(DCI_LOG_MASK_SIZE, GFP_KERNEL);
+	if (!new_entry->dci_log_mask) {
+		pr_err("diag: Unable to create log mask for client, %d",
+							driver->dci_client_id);
+		goto fail_alloc;
+	}
+	create_dci_log_mask_tbl(new_entry->dci_log_mask);
+
+	new_entry->dci_event_mask =  kzalloc(DCI_EVENT_MASK_SIZE, GFP_KERNEL);
+	if (!new_entry->dci_event_mask) {
+		pr_err("diag: Unable to create event mask for client, %d",
+							driver->dci_client_id);
+		goto fail_alloc;
+	}
+	create_dci_event_mask_tbl(new_entry->dci_event_mask);
+
+	for (i = 0; i < NUM_DCI_PROC; i++) {
+		proc_buf = &new_entry->buffers[i];
+		if (!proc_buf)
+			goto fail_alloc;
+
+		mutex_init(&proc_buf->health_mutex);
+		mutex_init(&proc_buf->buf_mutex);
+		proc_buf->health.dropped_events = 0;
+		proc_buf->health.dropped_logs = 0;
+		proc_buf->health.received_events = 0;
+		proc_buf->health.received_logs = 0;
+		proc_buf->buf_primary = kzalloc(
+					sizeof(struct diag_dci_buffer_t),
+					GFP_KERNEL);
+		if (!proc_buf->buf_primary)
+			goto fail_alloc;
+		proc_buf->buf_cmd = kzalloc(sizeof(struct diag_dci_buffer_t),
+					    GFP_KERNEL);
+		if (!proc_buf->buf_cmd)
+			goto fail_alloc;
+		err = diag_dci_init_buffer(proc_buf->buf_primary,
+					   DCI_BUF_PRIMARY);
+		if (err)
+			goto fail_alloc;
+		err = diag_dci_init_buffer(proc_buf->buf_cmd, DCI_BUF_CMD);
+		if (err)
+			goto fail_alloc;
+		proc_buf->buf_curr = proc_buf->buf_primary;
+	}
+
+	list_add_tail(&new_entry->track, &driver->dci_client_list);
+	driver->dci_client_id++;
+	new_entry->client_info.client_id = driver->dci_client_id;
+	reg_entry->client_id = driver->dci_client_id;
+	driver->num_dci_client++;
+	if (driver->num_dci_client == 1)
+		diag_update_proc_vote(DIAG_PROC_DCI, VOTE_UP);
+	queue_work(driver->diag_real_time_wq, &driver->diag_real_time_work);
+	mutex_unlock(&driver->dci_mutex);
+
+	return driver->dci_client_id;
+
+fail_alloc:
+	if (new_entry) {
+		for (i = 0; i < NUM_DCI_PROC; i++) {
+			proc_buf = &new_entry->buffers[i];
+			mutex_destroy(&proc_buf->health_mutex);
+			mutex_destroy(&proc_buf->buf_primary->data_mutex);
+			mutex_destroy(&proc_buf->buf_cmd->data_mutex);
+			if (proc_buf->buf_primary)
+				kfree(proc_buf->buf_primary->data);
+			kfree(proc_buf->buf_primary);
+			if (proc_buf->buf_cmd)
+				kfree(proc_buf->buf_cmd->data);
+			kfree(proc_buf->buf_cmd);
+		}
+		kfree(new_entry->dci_event_mask);
+		kfree(new_entry->dci_log_mask);
+	}
+	kfree(new_entry);
+	mutex_unlock(&driver->dci_mutex);
+	return DIAG_DCI_NO_REG;
+}
+
+int diag_dci_deinit_client()
+{
+	int ret = DIAG_DCI_NO_ERROR, real_time = MODE_REALTIME, i, peripheral;
+	struct diag_dci_buf_peripheral_t *proc_buf = NULL;
+	struct diag_dci_client_tbl *entry = diag_dci_get_client_entry();
+	struct diag_dci_buffer_t *buf_entry, *temp;
+	struct list_head *start, *req_temp;
+	struct dci_pkt_req_entry_t *req_entry = NULL;
+	struct diag_smd_info *smd_info = NULL;
+
+	if (!entry)
+		return DIAG_DCI_NOT_SUPPORTED;
+
+	mutex_lock(&driver->dci_mutex);
+	/*
+	 * Remove the entry from the list before freeing the buffers
+	 * to ensure that we don't have any invalid access.
+	 */
+	list_del(&entry->track);
+	driver->num_dci_client--;
+	/*
+	 * Clear the client's log and event masks, update the cumulative
+	 * masks and send the masks to peripherals
+	 */
+	kfree(entry->dci_log_mask);
+	diag_update_userspace_clients(DCI_LOG_MASKS_TYPE);
+	diag_dci_invalidate_cumulative_log_mask();
+	ret = diag_send_dci_event_mask();
+	if (ret != DIAG_DCI_NO_ERROR) {
+		mutex_unlock(&driver->dci_mutex);
+		return ret;
+	}
+	kfree(entry->dci_event_mask);
+	diag_update_userspace_clients(DCI_EVENT_MASKS_TYPE);
+	diag_dci_invalidate_cumulative_event_mask();
+	ret = diag_send_dci_log_mask();
+	if (ret != DIAG_DCI_NO_ERROR) {
+		mutex_unlock(&driver->dci_mutex);
+		return ret;
+	}
+
+	list_for_each_safe(start, req_temp, &driver->dci_req_list) {
+		req_entry = list_entry(start, struct dci_pkt_req_entry_t,
+				       track);
+		if (req_entry->pid == current->tgid) {
+			list_del(&req_entry->track);
+			kfree(req_entry);
+		}
+	}
+
+	/* Clean up any buffer that is pending write */
+	mutex_lock(&entry->write_buf_mutex);
+	list_for_each_entry_safe(buf_entry, temp, &entry->list_write_buf,
+							buf_track) {
+		list_del(&buf_entry->buf_track);
+		if (buf_entry->buf_type == DCI_BUF_SECONDARY) {
+			mutex_lock(&buf_entry->data_mutex);
+			diagmem_free(driver, buf_entry->data, POOL_TYPE_DCI);
+			buf_entry->data = NULL;
+			mutex_unlock(&buf_entry->data_mutex);
+			kfree(buf_entry);
+		} else if (buf_entry->buf_type == DCI_BUF_CMD) {
+			peripheral = buf_entry->data_source;
+			if (peripheral == APPS_DATA)
+				continue;
+			mutex_lock(&buf_entry->data_mutex);
+			smd_info = driver->separate_cmdrsp[peripheral] ?
+					&driver->smd_dci_cmd[peripheral] :
+					&driver->smd_dci[peripheral];
+			smd_info->in_busy_1 = 0;
+			mutex_unlock(&buf_entry->data_mutex);
+		}
+		diag_dci_try_deactivate_wakeup_source();
+	}
+	mutex_unlock(&entry->write_buf_mutex);
+
+	for (i = 0; i < NUM_DCI_PROC; i++) {
+		proc_buf = &entry->buffers[i];
+		buf_entry = proc_buf->buf_curr;
+		mutex_lock(&proc_buf->buf_mutex);
+		/* Clean up secondary buffer from mempool that is active */
+		if (buf_entry && buf_entry->buf_type == DCI_BUF_SECONDARY) {
+			mutex_lock(&buf_entry->data_mutex);
+			diagmem_free(driver, buf_entry->data, POOL_TYPE_DCI);
+			buf_entry->data = NULL;
+			mutex_unlock(&buf_entry->data_mutex);
+			mutex_destroy(&buf_entry->data_mutex);
+			kfree(buf_entry);
+		}
+
+		mutex_lock(&proc_buf->buf_primary->data_mutex);
+		kfree(proc_buf->buf_primary->data);
+		mutex_unlock(&proc_buf->buf_primary->data_mutex);
+
+		mutex_lock(&proc_buf->buf_cmd->data_mutex);
+		kfree(proc_buf->buf_cmd->data);
+		mutex_unlock(&proc_buf->buf_cmd->data_mutex);
+
+		mutex_destroy(&proc_buf->health_mutex);
+		mutex_destroy(&proc_buf->buf_primary->data_mutex);
+		mutex_destroy(&proc_buf->buf_cmd->data_mutex);
+
+		kfree(proc_buf->buf_primary);
+		kfree(proc_buf->buf_cmd);
+		mutex_unlock(&proc_buf->buf_mutex);
+	}
+	mutex_destroy(&entry->write_buf_mutex);
+
+	kfree(entry);
+
+	if (driver->num_dci_client == 0) {
+		diag_update_proc_vote(DIAG_PROC_DCI, VOTE_DOWN);
+	} else {
+		real_time = diag_dci_get_cumulative_real_time();
+		diag_update_real_time_vote(DIAG_PROC_DCI, real_time);
+	}
+	queue_work(driver->diag_real_time_wq, &driver->diag_real_time_work);
+
+	mutex_unlock(&driver->dci_mutex);
+
+	return DIAG_DCI_NO_ERROR;
+}
+
+int diag_dci_write_proc(int peripheral, int pkt_type, char *buf, int len)
+{
+	struct diag_smd_info *smd_info = NULL;
+	int wr_size = 0, retry = 0, err = -EAGAIN, timer = 0, i;
+
+	if (!buf || (peripheral < 0 || peripheral > NUM_SMD_DCI_CHANNELS)
+								|| len < 0) {
+		pr_err("diag: In %s, invalid data 0x%p, peripheral: %d, len: %d\n",
+				__func__, buf, peripheral, len);
+		return -EINVAL;
+	}
+
+	if (pkt_type == DIAG_DATA_TYPE) {
+		for (i = 0; i < NUM_SMD_DCI_CMD_CHANNELS; i++)
+			if (peripheral == i)
+				smd_info = &driver->smd_dci_cmd[peripheral];
+		/*
+		 * This peripheral doesn't support separate channel for
+		 * command response.
+		 */
+		if (!smd_info)
+			smd_info = &driver->smd_dci[peripheral];
+	} else if (pkt_type == DIAG_CNTL_TYPE) {
+		smd_info = &driver->smd_cntl[peripheral];
+	} else {
+		pr_err("diag: Invalid DCI pkt type in %s", __func__);
+		return -EINVAL;
+	}
+
+	if (!smd_info || !smd_info->ch)
+		return -EINVAL;
+
+	while (retry < 3) {
+		mutex_lock(&smd_info->smd_ch_mutex);
+		wr_size = smd_write(smd_info->ch, buf, len);
+		if (wr_size == len) {
+			pr_debug("diag: successfully wrote pkt_type %d of len %d to %d in trial %d",
+					pkt_type, len, peripheral, (retry+1));
+			err = DIAG_DCI_NO_ERROR;
+			mutex_unlock(&smd_info->smd_ch_mutex);
+			break;
+		}
+		pr_debug("diag: cannot write pkt_type %d of len %d to %d in trial %d",
+					pkt_type, len, peripheral, (retry+1));
+		retry++;
+		mutex_unlock(&smd_info->smd_ch_mutex);
+
+		/*
+		 * Sleep for sometime before retrying. The delay of 2000 was
+		 * determined empirically as best value to use.
+		 */
+		for (timer = 0; timer < 5; timer++)
+			usleep(2000);
+	}
+	return err;
+}
+
+int diag_dci_copy_health_stats(struct diag_dci_health_stats *stats, int proc)
+{
+	struct diag_dci_client_tbl *entry = NULL;
+	struct diag_dci_health_t *health = NULL;
+	int i;
+
+	if (!stats)
+		return -EINVAL;
+
+	if (proc < ALL_PROC || proc > APPS_DATA)
+		return -EINVAL;
+
+	entry = diag_dci_get_client_entry();
+	if (!entry)
+		return DIAG_DCI_NOT_SUPPORTED;
+
+	stats->stats.dropped_logs = 0;
+	stats->stats.dropped_events = 0;
+	stats->stats.received_logs = 0;
+	stats->stats.received_events = 0;
+
+	if (proc != ALL_PROC) {
+		health = &entry->buffers[proc].health;
+		stats->stats.dropped_logs = health->dropped_logs;
+		stats->stats.dropped_events = health->dropped_events;
+		stats->stats.received_logs = health->received_logs;
+		stats->stats.received_events = health->received_events;
+		if (stats->reset_status) {
+			mutex_lock(&entry->buffers[proc].health_mutex);
+			health->dropped_logs = 0;
+			health->dropped_events = 0;
+			health->received_logs = 0;
+			health->received_events = 0;
+			mutex_unlock(&entry->buffers[proc].health_mutex);
+		}
+		return DIAG_DCI_NO_ERROR;
+	}
+
+
+	for (i = 0; i < NUM_DCI_PROC; i++) {
+		health = &entry->buffers[i].health;
+		stats->stats.dropped_logs += health->dropped_logs;
+		stats->stats.dropped_events += health->dropped_events;
+		stats->stats.received_logs += health->received_logs;
+		stats->stats.received_events += health->received_events;
+		if (stats->reset_status) {
+			mutex_lock(&entry->buffers[i].health_mutex);
+			health->dropped_logs = 0;
+			health->dropped_events = 0;
+			health->received_logs = 0;
+			health->received_events = 0;
+			mutex_unlock(&entry->buffers[i].health_mutex);
+		}
+	}
+	return DIAG_DCI_NO_ERROR;
+}
diff --git a/drivers/char/diag/diag_dci.h b/drivers/char/diag/diag_dci.h
index 870b0f3..ccd1a71 100644
--- a/drivers/char/diag/diag_dci.h
+++ b/drivers/char/diag/diag_dci.h
@@ -28,6 +28,13 @@
 #define DCI_LOG_CON_MIN_LEN		14
 #define DCI_EVENT_CON_MIN_LEN		16
 
+#define DIAG_DATA_TYPE		1
+#define DIAG_CNTL_TYPE		2
+
+#define DCI_BUF_PRIMARY		1
+#define DCI_BUF_SECONDARY	2
+#define DCI_BUF_CMD		3
+
 #ifdef CONFIG_DEBUG_FS
 #define DIAG_DCI_DEBUG_CNT	100
 #define DIAG_DCI_DEBUG_LEN	100
@@ -46,6 +53,8 @@
 
 extern unsigned int dci_max_reg;
 extern unsigned int dci_max_clients;
+extern unsigned char dci_cumulative_log_mask[DCI_LOG_MASK_SIZE];
+extern unsigned char dci_cumulative_event_mask[DCI_EVENT_MASK_SIZE];
 extern struct mutex dci_health_mutex;
 
 struct dci_pkt_req_entry_t {
@@ -55,34 +64,63 @@
 	struct list_head track;
 } __packed;
 
-struct diag_dci_client_tbl {
+struct diag_dci_reg_tbl_t {
 	uint32_t client_id;
-	struct task_struct *client;
-	uint16_t list; /* bit mask */
+	uint16_t notification_list;
 	int signal_type;
-	unsigned char dci_log_mask[DCI_LOG_MASK_SIZE];
-	unsigned char dci_event_mask[DCI_EVENT_MASK_SIZE];
-	unsigned char *dci_data;
-	int data_len;
-	int total_capacity;
-	int dropped_logs;
-	int dropped_events;
-	int received_logs;
-	int received_events;
-	struct mutex data_mutex;
-	uint8_t real_time;
 };
 
-/* This is used for DCI health stats */
-struct diag_dci_health_stats {
-	int client_id;
+struct diag_dci_health_t {
 	int dropped_logs;
 	int dropped_events;
 	int received_logs;
 	int received_events;
+};
+
+struct diag_dci_buffer_t {
+	unsigned char *data;
+	unsigned int data_len;
+	struct mutex data_mutex;
+	uint8_t in_busy;
+	uint8_t buf_type;
+	int data_source;
+	int capacity;
+	uint8_t in_list;
+	struct list_head buf_track;
+};
+
+struct diag_dci_buf_peripheral_t {
+	struct diag_dci_buffer_t *buf_curr;
+	struct diag_dci_buffer_t *buf_primary;
+	struct diag_dci_buffer_t *buf_cmd;
+	struct diag_dci_health_t health;
+	struct mutex health_mutex;
+	struct mutex buf_mutex;
+};
+
+struct diag_dci_client_tbl {
+	struct diag_dci_reg_tbl_t client_info;
+	struct task_struct *client;
+	unsigned char *dci_log_mask;
+	unsigned char *dci_event_mask;
+	uint8_t real_time;
+	struct list_head track;
+	struct diag_dci_buf_peripheral_t buffers[NUM_DCI_PROC];
+	uint8_t in_service;
+	struct list_head list_write_buf;
+	struct mutex write_buf_mutex;
+};
+
+struct diag_dci_health_stats {
+	struct diag_dci_health_t stats;
 	int reset_status;
 };
 
+struct diag_dci_health_stats_proc {
+	struct diag_dci_health_stats *health;
+	int proc;
+};
+
 /* This is used for querying DCI Log
    or Event Mask */
 struct diag_log_event_stats {
@@ -90,6 +128,14 @@
 	int is_set;
 };
 
+struct diag_dci_pkt_header_t {
+	uint8_t start;
+	uint8_t version;
+	uint16_t len;
+	uint8_t pkt_code;
+	int tag;
+} __packed;
+
 enum {
 	DIAG_DCI_NO_ERROR = 1001,	/* No error */
 	DIAG_DCI_NO_REG,		/* Could not register */
@@ -106,6 +152,7 @@
 	unsigned long iteration;
 	int data_size;
 	char time_stamp[DIAG_TS_SIZE];
+	uint8_t peripheral;
 	uint8_t ch_type;
 };
 
@@ -115,36 +162,42 @@
 
 int diag_dci_init(void);
 void diag_dci_exit(void);
+int diag_dci_register_client(struct diag_dci_reg_tbl_t *reg_entry);
+int diag_dci_deinit_client(void);
 void diag_update_smd_dci_work_fn(struct work_struct *);
 void diag_dci_notify_client(int peripheral_mask, int data);
+void diag_dci_wakeup_clients(void);
+void diag_process_apps_dci_read_data(int data_type, void *buf, int recd_bytes);
 int diag_process_smd_dci_read_data(struct diag_smd_info *smd_info, void *buf,
 								int recd_bytes);
 int diag_process_dci_transaction(unsigned char *buf, int len);
-void extract_dci_pkt_rsp(struct diag_smd_info *smd_info, unsigned char *buf);
-
-int diag_dci_find_client_index_health(int client_id);
-int diag_dci_find_client_index(int client_id);
+void extract_dci_pkt_rsp(unsigned char *buf, int len, int data_source,
+			 struct diag_smd_info *smd_info);
+struct diag_dci_client_tbl *diag_dci_get_client_entry(void);
 /* DCI Log streaming functions */
 void create_dci_log_mask_tbl(unsigned char *tbl_buf);
 void update_dci_cumulative_log_mask(int offset, unsigned int byte_index,
 						uint8_t byte_mask);
-void clear_client_dci_cumulative_log_mask(int client_index);
-int diag_send_dci_log_mask(struct diag_smd_info *smd_info);
-void extract_dci_log(unsigned char *buf);
+void diag_dci_invalidate_cumulative_log_mask(void);
+int diag_send_dci_log_mask(void);
+void extract_dci_log(unsigned char *buf, int len, int data_source);
 int diag_dci_clear_log_mask(void);
 int diag_dci_query_log_mask(uint16_t log_code);
 /* DCI event streaming functions */
 void update_dci_cumulative_event_mask(int offset, uint8_t byte_mask);
-void clear_client_dci_cumulative_event_mask(int client_index);
-int diag_send_dci_event_mask(struct diag_smd_info *smd_info);
-void extract_dci_events(unsigned char *buf);
+void diag_dci_invalidate_cumulative_event_mask(void);
+int diag_send_dci_event_mask(void);
+void extract_dci_events(unsigned char *buf, int len, int data_source);
 void create_dci_event_mask_tbl(unsigned char *tbl_buf);
 int diag_dci_clear_event_mask(void);
 int diag_dci_query_event_mask(uint16_t event_id);
-void diag_dci_smd_record_info(int read_bytes, uint8_t ch_type);
+void diag_dci_smd_record_info(int read_bytes, uint8_t ch_type,
+			      uint8_t peripheral);
 uint8_t diag_dci_get_cumulative_real_time(void);
-int diag_dci_set_real_time(int client_id, uint8_t real_time);
+int diag_dci_set_real_time(uint8_t real_time);
+int diag_dci_copy_health_stats(struct diag_dci_health_stats *stats, int proc);
 /* Functions related to DCI wakeup sources */
-void diag_dci_try_activate_wakeup_source(smd_channel_t *channel);
-void diag_dci_try_deactivate_wakeup_source(smd_channel_t *channel);
+void diag_dci_try_activate_wakeup_source(void);
+void diag_dci_try_deactivate_wakeup_source(void);
+int diag_dci_write_proc(int peripheral, int pkt_type, char *buf, int len);
 #endif
diff --git a/drivers/char/diag/diag_debugfs.c b/drivers/char/diag/diag_debugfs.c
index 3a1c96b..4dd0845 100644
--- a/drivers/char/diag/diag_debugfs.c
+++ b/drivers/char/diag/diag_debugfs.c
@@ -233,38 +233,30 @@
 		bytes_in_buf += bytes_written;
 		bytes_remaining -= bytes_written;
 #endif
-		if (driver->dci_device) {
-			bytes_written = scnprintf(buf+bytes_in_buf,
-						  bytes_remaining,
-				"dci power active, relax: %lu, %lu\n",
-				driver->dci_device->power.wakeup->active_count,
-				driver->dci_device->power.wakeup->relax_count);
-			bytes_in_buf += bytes_written;
-			bytes_remaining -= bytes_written;
-		}
-		if (driver->dci_cmd_device) {
-			bytes_written = scnprintf(buf+bytes_in_buf,
-						  bytes_remaining,
-				"dci cmd power active, relax: %lu, %lu\n",
-				driver->dci_cmd_device->power.wakeup->
-						  active_count,
-				driver->dci_cmd_device->power.wakeup->
-						  relax_count);
-			bytes_in_buf += bytes_written;
-			bytes_remaining -= bytes_written;
-		}
+		bytes_written = scnprintf(buf+bytes_in_buf,
+					  bytes_remaining,
+					  "dci power: active, relax: %lu, %lu\n",
+					  driver->diag_dev->power.wakeup->
+						active_count,
+					  driver->diag_dev->
+						power.wakeup->relax_count);
+		bytes_in_buf += bytes_written;
+		bytes_remaining -= bytes_written;
+
 	}
 	temp_data += diag_dbgfs_dci_data_index;
 	for (i = diag_dbgfs_dci_data_index; i < DIAG_DCI_DEBUG_CNT; i++) {
 		if (temp_data->iteration != 0) {
 			bytes_written = scnprintf(
 				buf + bytes_in_buf, bytes_remaining,
-				"i %-10ld\t"
-				"s %-10d\t"
-				"c %-10d\t"
+				"i %-5ld\t"
+				"s %-5d\t"
+				"p %-5d\t"
+				"c %-5d\t"
 				"t %-15s\n",
 				temp_data->iteration,
 				temp_data->data_size,
+				temp_data->peripheral,
 				temp_data->ch_type,
 				temp_data->time_stamp);
 			bytes_in_buf += bytes_written;
@@ -446,7 +438,8 @@
 		"POOL_TYPE_COPY: [0x%p : 0x%p] count = %d\n"
 		"POOL_TYPE_HDLC: [0x%p : 0x%p] count = %d\n"
 		"POOL_TYPE_USER: [0x%p : 0x%p] count = %d\n"
-		"POOL_TYPE_WRITE_STRUCT: [0x%p : 0x%p] count = %d\n",
+		"POOL_TYPE_WRITE_STRUCT: [0x%p : 0x%p] count = %d\n"
+		"POOL_TYPE_DCI: [0x%p : 0x%p] count = %d\n",
 		driver->diagpool,
 		diag_pools_array[POOL_COPY_IDX],
 		driver->count,
@@ -458,7 +451,10 @@
 		driver->count_user_pool,
 		driver->diag_write_struct_pool,
 		diag_pools_array[POOL_WRITE_STRUCT_IDX],
-		driver->count_write_struct_pool);
+		driver->count_write_struct_pool,
+		driver->diag_dci_pool,
+		diag_pools_array[POOL_DCI_IDX],
+		driver->count_dci_pool);
 
 	for (i = 0; i < MAX_HSIC_CH; i++) {
 		if (!diag_hsic[i].hsic_inited)
@@ -506,7 +502,8 @@
 		"POOL_TYPE_COPY: [0x%p : 0x%p] count = %d\n"
 		"POOL_TYPE_HDLC: [0x%p : 0x%p] count = %d\n"
 		"POOL_TYPE_USER: [0x%p : 0x%p] count = %d\n"
-		"POOL_TYPE_WRITE_STRUCT: [0x%p : 0x%p] count = %d\n",
+		"POOL_TYPE_WRITE_STRUCT: [0x%p : 0x%p] count = %d\n"
+		"POOL_TYPE_DCI: [0x%p : 0x%p] count = %d\n",
 		driver->diagpool,
 		diag_pools_array[POOL_COPY_IDX],
 		driver->count,
@@ -518,7 +515,10 @@
 		driver->count_user_pool,
 		driver->diag_write_struct_pool,
 		diag_pools_array[POOL_WRITE_STRUCT_IDX],
-		driver->count_write_struct_pool);
+		driver->count_write_struct_pool,
+		driver->diag_dci_pool,
+		diag_pools_array[POOL_DCI_IDX],
+		driver->count_dci_pool);
 
 	ret = simple_read_from_buffer(ubuf, count, ppos, buf, ret);
 
diff --git a/drivers/char/diag/diagchar.h b/drivers/char/diag/diagchar.h
index 95d90b3..eda745f 100644
--- a/drivers/char/diag/diagchar.h
+++ b/drivers/char/diag/diagchar.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2008-2014, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -17,13 +17,13 @@
 #include <linux/module.h>
 #include <linux/mempool.h>
 #include <linux/mutex.h>
+#include <linux/list.h>
 #include <linux/spinlock.h>
 #include <linux/workqueue.h>
 #include <linux/sched.h>
 #include <linux/wakelock.h>
 #include <mach/msm_smd.h>
 #include <asm/atomic.h>
-#include <asm/mach-types.h>
 
 /* Size of the USB buffers used for read and write*/
 #define USB_MAX_OUT_BUF 4096
@@ -44,28 +44,32 @@
 #define POOL_TYPE_HSIC_WRITE	11
 #define POOL_TYPE_HSIC_2_WRITE	12
 #define POOL_TYPE_ALL		10
+#define POOL_TYPE_DCI		20
 
 #define POOL_COPY_IDX		0
 #define POOL_HDLC_IDX		1
 #define POOL_USER_IDX		2
 #define POOL_WRITE_STRUCT_IDX	3
-#define POOL_HSIC_IDX		4
-#define POOL_HSIC_2_IDX		5
-#define POOL_HSIC_3_IDX		6
-#define POOL_HSIC_4_IDX		7
-#define POOL_HSIC_WRITE_IDX	8
-#define POOL_HSIC_2_WRITE_IDX	9
-#define POOL_HSIC_3_WRITE_IDX	10
-#define POOL_HSIC_4_WRITE_IDX	11
+#define POOL_DCI_IDX		4
+#define POOL_BRIDGE_BASE	POOL_DCI_IDX
+#define POOL_HSIC_IDX		(POOL_BRIDGE_BASE + 1)
+#define POOL_HSIC_2_IDX		(POOL_BRIDGE_BASE + 2)
+#define POOL_HSIC_3_IDX		(POOL_BRIDGE_BASE + 3)
+#define POOL_HSIC_4_IDX		(POOL_BRIDGE_BASE + 4)
+#define POOL_HSIC_WRITE_IDX	(POOL_BRIDGE_BASE + 5)
+#define POOL_HSIC_2_WRITE_IDX	(POOL_BRIDGE_BASE + 6)
+#define POOL_HSIC_3_WRITE_IDX	(POOL_BRIDGE_BASE + 7)
+#define POOL_HSIC_4_WRITE_IDX	(POOL_BRIDGE_BASE + 8)
 
 #ifdef CONFIG_DIAGFWD_BRIDGE_CODE
-#define NUM_MEMORY_POOLS	12
+#define NUM_MEMORY_POOLS	13
 #else
-#define NUM_MEMORY_POOLS	4
+#define NUM_MEMORY_POOLS	5
 #endif
 
 #define MAX_SSID_PER_RANGE	200
 
+#define ALL_PROC		-1
 #define MODEM_DATA		0
 #define LPASS_DATA		1
 #define WCNSS_DATA		2
@@ -107,10 +111,26 @@
 #define DIAG_STM_WCNSS	0x04
 #define DIAG_STM_APPS	0x08
 
-#define DIAG_DIAG_STM		0x214
-
 #define BAD_PARAM_RESPONSE_MESSAGE 20
 
+#define DIAG_CMD_VERSION	0
+#define DIAG_CMD_DOWNLOAD	0x3A
+#define DIAG_CMD_DIAG_SUBSYS	0x4B
+#define DIAG_CMD_LOG_ON_DMND	0x78
+#define DIAG_CMD_EXT_BUILD	0x7c
+
+#define DIAG_SS_DIAG		0x12
+#define DIAG_SS_PARAMS		0x32
+
+#define DIAG_DIAG_MAX_PKT_SZ	0x55
+#define DIAG_DIAG_STM		0x20E
+#define DIAG_DIAG_POLL		0x03
+#define DIAG_DEL_RSP_WRAP	0x04
+#define DIAG_DEL_RSP_WRAP_CNT	0x05
+
+#define MODE_CMD	41
+#define RESET_ID	2
+
 /*
  * The status bit masks when received in a signal handler are to be
  * used in conjunction with the peripheral list bit mask to determine the
@@ -129,6 +149,13 @@
 #define NUM_SMD_CMD_CHANNELS 1
 #define NUM_SMD_DCI_CMD_CHANNELS 1
 
+/*
+ * Indicates number of peripherals that can support DCI and Apps
+ * processor. This doesn't mean that a peripheral has the
+ * feature.
+ */
+#define NUM_DCI_PROC	(NUM_SMD_DATA_CHANNELS + 1)
+
 #define SMD_DATA_TYPE 0
 #define SMD_CNTL_TYPE 1
 #define SMD_DCI_TYPE 2
@@ -165,6 +192,12 @@
 	QSC = 5,
 };
 
+struct diag_pkt_header_t {
+	uint8_t cmd_code;
+	uint8_t subsys_id;
+	uint16_t subsys_cmd_code;
+} __packed;
+
 struct diag_master_table {
 	uint16_t cmd_code;
 	uint16_t subsys_id;
@@ -286,6 +319,7 @@
 	char *name;
 	int dropped_count;
 	struct class *diagchar_class;
+	struct device *diag_dev;
 	int ref_count;
 	struct mutex diagchar_mutex;
 	wait_queue_head_t wait_q;
@@ -307,7 +341,7 @@
 	int peripheral_supports_stm[NUM_SMD_CONTROL_CHANNELS];
 	/* DCI related variables */
 	struct list_head dci_req_list;
-	struct diag_dci_client_tbl *dci_client_tbl;
+	struct list_head dci_client_list;
 	int dci_tag;
 	int dci_client_id;
 	struct mutex dci_mutex;
@@ -324,17 +358,21 @@
 	unsigned int poolsize_user;
 	unsigned int itemsize_write_struct;
 	unsigned int poolsize_write_struct;
+	unsigned int itemsize_dci;
+	unsigned int poolsize_dci;
 	unsigned int debug_flag;
 	/* State for the mempool for the char driver */
 	mempool_t *diagpool;
 	mempool_t *diag_hdlc_pool;
 	mempool_t *diag_user_pool;
 	mempool_t *diag_write_struct_pool;
+	mempool_t *diag_dci_pool;
 	spinlock_t diag_mem_lock;
 	int count;
 	int count_hdlc_pool;
 	int count_user_pool;
 	int count_write_struct_pool;
+	int count_dci_pool;
 	int used;
 	/* Buffers for masks */
 	struct mutex diag_cntl_mutex;
@@ -365,8 +403,6 @@
 	unsigned hdlc_count;
 	unsigned hdlc_escape;
 	int in_busy_pktdata;
-	struct device *dci_device;
-	struct device *dci_cmd_device;
 	/* Variables for non real time mode */
 	int real_time_mode;
 	int real_time_update_busy;
@@ -397,6 +433,9 @@
 	struct diag_master_table *table;
 	uint8_t *pkt_buf;
 	int pkt_length;
+	uint8_t *dci_pkt_buf; /* For Apps DCI packets */
+	uint32_t dci_pkt_length;
+	int in_busy_dcipktdata;
 	struct diag_request *usb_read_ptr;
 	struct diag_request *write_ptr_svc;
 	int logging_mode;
@@ -441,5 +480,6 @@
 
 void diag_get_timestamp(char *time_str);
 int diag_find_polling_reg(int i);
+void check_drain_timer(void);
 
 #endif
diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c
index 93e932a..263715d 100644
--- a/drivers/char/diag/diagchar_core.c
+++ b/drivers/char/diag/diagchar_core.c
@@ -71,6 +71,9 @@
 /* for write structure buffer */
 static unsigned int itemsize_write_struct = 20; /*Size of item in the mempool */
 static unsigned int poolsize_write_struct = 10;/* Num of items in the mempool */
+/* For the dci memory pool */
+static unsigned int itemsize_dci = IN_BUF_SIZE; /*Size of item in the mempool */
+static unsigned int poolsize_dci = 10;  /*Number of items in the mempool */
 /* This is the max number of user-space clients supported at initialization*/
 static unsigned int max_clients = 15;
 static unsigned int threshold_client_limit = 30;
@@ -144,6 +147,16 @@
 	mutex_unlock(&driver->diagchar_mutex);
 }
 
+void check_drain_timer(void)
+{
+	int ret = 0;
+
+	if (!timer_in_progress) {
+		timer_in_progress = 1;
+		ret = mod_timer(&drain_timer, jiffies + msecs_to_jiffies(500));
+	}
+}
+
 #ifdef CONFIG_DIAGFWD_BRIDGE_CODE
 void diag_clear_hsic_tbl(void)
 {
@@ -266,9 +279,7 @@
 	* This will specially help in case of ungraceful exit of any DCI client
 	* This call will remove any pending registrations of such client
 	*/
-	if (diag_dci_find_client_index(current->tgid) !=
-		 DCI_CLIENT_INDEX_INVALID)
-		diagchar_ioctl(NULL, DIAG_IOCTL_DCI_DEINIT, 0);
+	diag_dci_deinit_client();
 	/* If the exiting process is the socket process */
 	mutex_lock(&driver->diagchar_mutex);
 	if (driver->socket_process &&
@@ -303,11 +314,12 @@
 
 	mutex_lock(&driver->diagchar_mutex);
 	driver->ref_count--;
-	/* On Client exit, try to destroy all 4 pools */
+	/* On Client exit, try to destroy all 5 pools */
 	diagmem_exit(driver, POOL_TYPE_COPY);
 	diagmem_exit(driver, POOL_TYPE_HDLC);
 	diagmem_exit(driver, POOL_TYPE_USER);
 	diagmem_exit(driver, POOL_TYPE_WRITE_STRUCT);
+	diagmem_exit(driver, POOL_TYPE_DCI);
 	for (i = 0; i < driver->num_clients; i++) {
 		if (NULL != diagpriv_data && diagpriv_data->pid ==
 						driver->client_map[i].pid) {
@@ -552,6 +564,87 @@
 			    int *pnum_data) { return 0; }
 #endif
 
+static int diag_copy_dci(char __user *buf, size_t count,
+			struct diag_dci_client_tbl *entry, int *pret)
+{
+	int total_data_len = 0;
+	int ret = 0;
+	int exit_stat = 1;
+	struct diag_dci_buffer_t *buf_entry, *temp;
+	struct diag_smd_info *smd_info = NULL;
+
+	if (!buf || !entry || !pret)
+		return exit_stat;
+
+	ret = *pret;
+
+	ret += 4;
+
+	mutex_lock(&entry->write_buf_mutex);
+	list_for_each_entry_safe(buf_entry, temp, &entry->list_write_buf,
+								buf_track) {
+		list_del(&buf_entry->buf_track);
+		mutex_lock(&buf_entry->data_mutex);
+		if ((buf_entry->data_len > 0) &&
+		    (buf_entry->in_busy) &&
+		    (buf_entry->data)) {
+			if (copy_to_user(buf+ret, (void *)buf_entry->data,
+					 buf_entry->data_len))
+				goto drop;
+			ret += buf_entry->data_len;
+			total_data_len += buf_entry->data_len;
+drop:
+			buf_entry->in_busy = 0;
+			buf_entry->data_len = 0;
+			buf_entry->in_list = 0;
+			if (buf_entry->buf_type == DCI_BUF_CMD) {
+				if (buf_entry->data_source == APPS_DATA) {
+					mutex_unlock(&buf_entry->data_mutex);
+					continue;
+				}
+				if (driver->separate_cmdrsp[
+						buf_entry->data_source]) {
+					smd_info = &driver->smd_dci_cmd[
+						buf_entry->data_source];
+				} else {
+					smd_info = &driver->smd_dci[
+						buf_entry->data_source];
+				}
+				smd_info->in_busy_1 = 0;
+				mutex_unlock(&buf_entry->data_mutex);
+				continue;
+			} else if (buf_entry->buf_type == DCI_BUF_SECONDARY) {
+				diagmem_free(driver, buf_entry->data,
+					     POOL_TYPE_DCI);
+				buf_entry->data = NULL;
+				mutex_unlock(&buf_entry->data_mutex);
+				kfree(buf_entry);
+				continue;
+			}
+
+		}
+		mutex_unlock(&buf_entry->data_mutex);
+	}
+
+	if (total_data_len > 0) {
+		/* Copy the total data length */
+		COPY_USER_SPACE_OR_EXIT(buf+4, total_data_len, 4);
+		ret -= 4;
+	} else {
+		pr_debug("diag: In %s, Trying to copy ZERO bytes, total_data_len: %d\n",
+			__func__, total_data_len);
+	}
+
+	entry->in_service = 0;
+	mutex_unlock(&entry->write_buf_mutex);
+
+	exit_stat = 0;
+exit:
+	*pret = ret;
+
+	return exit_stat;
+}
+
 int diag_command_reg(unsigned long ioarg)
 {
 	int i = 0, success = -EINVAL, j;
@@ -877,13 +970,11 @@
 	int i, result = -EINVAL, interim_size = 0, client_id = 0, real_time = 0;
 	int retry_count = 0, timer = 0;
 	uint16_t support_list = 0, interim_rsp_id, remote_dev;
-	struct diag_dci_client_tbl *dci_params;
-	struct diag_dci_health_stats stats;
+	struct diag_dci_reg_tbl_t *dci_reg_params;
+	struct diag_dci_health_stats_proc stats;
 	struct diag_log_event_stats le_stats;
 	struct diagpkt_delay_params delay_params;
 	struct real_time_vote_t rt_vote;
-	struct list_head *start, *req_temp;
-	struct dci_pkt_req_entry_t *req_entry = NULL;
 
 	switch (iocmd) {
 	case DIAG_IOCTL_COMMAND_REG:
@@ -909,127 +1000,25 @@
 		}
 		break;
 	case DIAG_IOCTL_DCI_REG:
-		if (driver->dci_state == DIAG_DCI_NO_REG)
-			return DIAG_DCI_NO_REG;
-		if (driver->num_dci_client >= MAX_DCI_CLIENTS)
-			return DIAG_DCI_NO_REG;
-		dci_params = kzalloc(sizeof(struct diag_dci_client_tbl),
+		dci_reg_params = kzalloc(sizeof(struct diag_dci_reg_tbl_t),
 								 GFP_KERNEL);
-		if (dci_params == NULL) {
+		if (dci_reg_params == NULL) {
 			pr_err("diag: unable to alloc memory\n");
 			return -ENOMEM;
 		}
-		if (copy_from_user(dci_params, (void *)ioarg,
-				 sizeof(struct diag_dci_client_tbl))) {
-			kfree(dci_params);
+		if (copy_from_user(dci_reg_params, (void *)ioarg,
+				 sizeof(struct diag_dci_reg_tbl_t))) {
+			kfree(dci_reg_params);
 			return -EFAULT;
 		}
-		mutex_lock(&driver->dci_mutex);
-		if (!(driver->num_dci_client)) {
-			for (i = 0; i < NUM_SMD_DCI_CHANNELS; i++)
-				driver->smd_dci[i].in_busy_1 = 0;
-			if (driver->supports_separate_cmdrsp)
-				for (i = 0; i < NUM_SMD_DCI_CMD_CHANNELS; i++)
-					driver->smd_dci_cmd[i].in_busy_1 = 0;
-		}
-		driver->num_dci_client++;
-		if (driver->num_dci_client == 1)
-			diag_update_proc_vote(DIAG_PROC_DCI, VOTE_UP);
-		queue_work(driver->diag_real_time_wq,
-				 &driver->diag_real_time_work);
-		pr_debug("diag: In %s, id = %d\n",
-				__func__, driver->dci_client_id);
-		driver->dci_client_id++;
-		for (i = 0; i < MAX_DCI_CLIENTS; i++) {
-			if (driver->dci_client_tbl[i].client == NULL) {
-				driver->dci_client_tbl[i].client = current;
-				driver->dci_client_tbl[i].client_id =
-							driver->dci_client_id;
-				driver->dci_client_tbl[i].list =
-							 dci_params->list;
-				driver->dci_client_tbl[i].signal_type =
-					 dci_params->signal_type;
-				create_dci_log_mask_tbl(driver->
-					dci_client_tbl[i].dci_log_mask);
-				create_dci_event_mask_tbl(driver->
-					dci_client_tbl[i].dci_event_mask);
-				driver->dci_client_tbl[i].data_len = 0;
-				driver->dci_client_tbl[i].dci_data =
-					 kzalloc(IN_BUF_SIZE, GFP_KERNEL);
-				driver->dci_client_tbl[i].total_capacity =
-								 IN_BUF_SIZE;
-				driver->dci_client_tbl[i].dropped_logs = 0;
-				driver->dci_client_tbl[i].dropped_events = 0;
-				driver->dci_client_tbl[i].received_logs = 0;
-				driver->dci_client_tbl[i].received_events = 0;
-				driver->dci_client_tbl[i].real_time = 1;
-				mutex_init(&driver->dci_client_tbl[i].
-								data_mutex);
-				break;
-			}
-		}
-		kfree(dci_params);
-		mutex_unlock(&driver->dci_mutex);
-		result = driver->dci_client_id;
+		result = diag_dci_register_client(dci_reg_params);
+		kfree(dci_reg_params);
 		break;
 	case DIAG_IOCTL_DCI_DEINIT:
-		result = -EIO;
-		/* Delete this process from DCI table */
-		mutex_lock(&driver->dci_mutex);
-		i = diag_dci_find_client_index(current->tgid);
-		if (i == DCI_CLIENT_INDEX_INVALID) {
-			result = DIAG_DCI_NOT_SUPPORTED;
-		} else {
-			/* clear respective cumulative log masks */
-			clear_client_dci_cumulative_log_mask(i);
-			/* send updated log mask to peripherals */
-			result =
-			diag_send_dci_log_mask(&driver->smd_cntl[MODEM_DATA]);
-			if (result != DIAG_DCI_NO_ERROR) {
-				mutex_unlock(&driver->dci_mutex);
-				return result;
-			}
-			/* clear respective cumulative event masks */
-			clear_client_dci_cumulative_event_mask(i);
-			/* send updated event mask to peripherals */
-			result =
-			diag_send_dci_event_mask(
-				&driver->smd_cntl[MODEM_DATA]);
-			if (result != DIAG_DCI_NO_ERROR) {
-				mutex_unlock(&driver->dci_mutex);
-				return result;
-			}
-			result = i;
-			/* Delete this process from DCI table */
-			list_for_each_safe(start, req_temp,
-							&driver->dci_req_list) {
-				req_entry = list_entry(start,
-						struct dci_pkt_req_entry_t,
-						track);
-				if (req_entry->pid == current->tgid) {
-					list_del(&req_entry->track);
-					kfree(req_entry);
-				}
-			}
-			driver->dci_client_tbl[result].client = NULL;
-			kfree(driver->dci_client_tbl[result].dci_data);
-			driver->dci_client_tbl[result].dci_data = NULL;
-			mutex_destroy(&driver->dci_client_tbl[result].
-								data_mutex);
-			driver->num_dci_client--;
-			if (driver->num_dci_client == 0) {
-				diag_update_proc_vote(DIAG_PROC_DCI, VOTE_DOWN);
-			} else {
-				real_time = diag_dci_get_cumulative_real_time();
-				diag_update_real_time_vote(DIAG_PROC_DCI,
-								real_time);
-			}
-			queue_work(driver->diag_real_time_wq,
-				   &driver->diag_real_time_work);
-		}
-		mutex_unlock(&driver->dci_mutex);
+		result = diag_dci_deinit_client();
 		break;
 	case DIAG_IOCTL_DCI_SUPPORT:
+		support_list |= DIAG_CON_APSS;
 		for (i = 0; i < NUM_SMD_DCI_CHANNELS; i++) {
 			if (driver->smd_dci[i].ch)
 				support_list |=
@@ -1042,29 +1031,16 @@
 		break;
 	case DIAG_IOCTL_DCI_HEALTH_STATS:
 		if (copy_from_user(&stats, (void *)ioarg,
-				 sizeof(struct diag_dci_health_stats)))
+				 sizeof(struct diag_dci_health_stats_proc)))
 			return -EFAULT;
-		mutex_lock(&dci_health_mutex);
-		i = diag_dci_find_client_index_health(stats.client_id);
-		if (i != DCI_CLIENT_INDEX_INVALID) {
-			dci_params = &(driver->dci_client_tbl[i]);
-			stats.dropped_logs = dci_params->dropped_logs;
-			stats.dropped_events =
-					dci_params->dropped_events;
-			stats.received_logs =
-					dci_params->received_logs;
-			stats.received_events =
-					dci_params->received_events;
-			if (stats.reset_status) {
-				dci_params->dropped_logs = 0;
-				dci_params->dropped_events = 0;
-				dci_params->received_logs = 0;
-				dci_params->received_events = 0;
-			}
-		}
-		mutex_unlock(&dci_health_mutex);
+
+		result = diag_dci_copy_health_stats(stats.health,
+						    stats.proc);
+		if (result != DIAG_DCI_NO_ERROR)
+			break;
+
 		if (copy_to_user((void *)ioarg, &stats,
-				   sizeof(struct diag_dci_health_stats)))
+				   sizeof(struct diag_dci_health_stats_proc)))
 			return -EFAULT;
 		result = DIAG_DCI_NO_ERROR;
 		break;
@@ -1126,8 +1102,7 @@
 			return -EFAULT;
 		driver->real_time_update_busy++;
 		if (rt_vote.proc == DIAG_PROC_DCI) {
-			diag_dci_set_real_time(current->tgid,
-						rt_vote.real_time_vote);
+			diag_dci_set_real_time(rt_vote.real_time_vote);
 			real_time = diag_dci_get_cumulative_real_time();
 		} else {
 			real_time = rt_vote.real_time_vote;
@@ -1181,7 +1156,10 @@
 		pr_err("diag: Client PID not found in table");
 		return -EINVAL;
 	}
-
+	if (!buf) {
+		pr_err("diag: bad address from user side\n");
+		return -EFAULT;
+	}
 	wait_event_interruptible(driver->wait_q, driver->data_ready[index]);
 
 	mutex_lock(&driver->diagchar_mutex);
@@ -1389,32 +1367,55 @@
 		goto exit;
 	}
 
+	if (driver->data_ready[index] & DCI_PKT_TYPE) {
+		/* Copy the type of data being passed */
+		data_type = driver->data_ready[index] & DCI_PKT_TYPE;
+		COPY_USER_SPACE_OR_EXIT(buf, data_type, 4);
+		COPY_USER_SPACE_OR_EXIT(buf+4, *(driver->dci_pkt_buf),
+					driver->dci_pkt_length);
+		driver->data_ready[index] ^= DCI_PKT_TYPE;
+		driver->in_busy_dcipktdata = 0;
+		goto exit;
+	}
+
+	if (driver->data_ready[index] & DCI_EVENT_MASKS_TYPE) {
+		/*Copy the type of data being passed*/
+		data_type = driver->data_ready[index] & DCI_EVENT_MASKS_TYPE;
+		COPY_USER_SPACE_OR_EXIT(buf, data_type, 4);
+		COPY_USER_SPACE_OR_EXIT(buf+4, driver->num_dci_client, 4);
+		COPY_USER_SPACE_OR_EXIT(buf+8, *(dci_cumulative_event_mask),
+							DCI_EVENT_MASK_SIZE);
+		driver->data_ready[index] ^= DCI_EVENT_MASKS_TYPE;
+		goto exit;
+	}
+
+	if (driver->data_ready[index] & DCI_LOG_MASKS_TYPE) {
+		/*Copy the type of data being passed*/
+		data_type = driver->data_ready[index] & DCI_LOG_MASKS_TYPE;
+		COPY_USER_SPACE_OR_EXIT(buf, data_type, 4);
+		COPY_USER_SPACE_OR_EXIT(buf+4, driver->num_dci_client, 4);
+		COPY_USER_SPACE_OR_EXIT(buf+8, *(dci_cumulative_log_mask),
+							DCI_LOG_MASK_SIZE);
+		driver->data_ready[index] ^= DCI_LOG_MASKS_TYPE;
+		goto exit;
+	}
+
 	if (driver->data_ready[index] & DCI_DATA_TYPE) {
 		/* Copy the type of data being passed */
 		data_type = driver->data_ready[index] & DCI_DATA_TYPE;
-		COPY_USER_SPACE_OR_EXIT(buf, data_type, 4);
-		/* check the current client and copy its data */
-		for (i = 0; i < MAX_DCI_CLIENTS; i++) {
-			entry = &(driver->dci_client_tbl[i]);
-			if (entry && entry->client) {
-				if (current->tgid == entry->client->tgid) {
-					mutex_lock(&entry->data_mutex);
-					COPY_USER_SPACE_OR_EXIT(buf+4,
-							entry->data_len, 4);
-					COPY_USER_SPACE_OR_EXIT(buf+8,
-					*(entry->dci_data), entry->data_len);
-					entry->data_len = 0;
-					mutex_unlock(&entry->data_mutex);
-					break;
-				}
-			}
-		}
 		driver->data_ready[index] ^= DCI_DATA_TYPE;
+		/* check the current client and copy its data */
+		entry = diag_dci_get_client_entry();
+		if (entry) {
+			if (!entry->in_service)
+				goto exit;
+			COPY_USER_SPACE_OR_EXIT(buf, data_type, 4);
+			exit_stat = diag_copy_dci(buf, count, entry, &ret);
+			if (exit_stat == 1)
+				goto exit;
+		}
 		for (i = 0; i < NUM_SMD_DCI_CHANNELS; i++) {
-			driver->smd_dci[i].in_busy_1 = 0;
 			if (driver->smd_dci[i].ch) {
-				diag_dci_try_deactivate_wakeup_source(
-						driver->smd_dci[i].ch);
 				queue_work(driver->diag_dci_wq,
 				&(driver->smd_dci[i].diag_read_smd_work));
 			}
@@ -1423,10 +1424,7 @@
 			for (i = 0; i < NUM_SMD_DCI_CMD_CHANNELS; i++) {
 				if (!driver->separate_cmdrsp[i])
 					continue;
-				driver->smd_dci_cmd[i].in_busy_1 = 0;
 				if (driver->smd_dci_cmd[i].ch) {
-					diag_dci_try_deactivate_wakeup_source(
-						driver->smd_dci_cmd[i].ch);
 					queue_work(driver->diag_dci_wq,
 						&(driver->smd_dci_cmd[i].
 							diag_read_smd_work));
@@ -1449,7 +1447,7 @@
 				size_t count, loff_t *ppos)
 {
 	int err, ret = 0, pkt_type, token_offset = 0;
-	int remote_proc = 0;
+	int remote_proc = 0, data_type;
 	uint8_t index;
 #ifdef DIAG_DEBUG
 	int length = 0, i;
@@ -1480,9 +1478,11 @@
 		return -EBADMSG;
 	}
 #ifdef CONFIG_DIAG_OVER_USB
-	if (((pkt_type != DCI_DATA_TYPE) && (driver->logging_mode == USB_MODE)
-				&& (!driver->usb_connected)) ||
-				(driver->logging_mode == NO_LOGGING_MODE)) {
+	if (driver->logging_mode == NO_LOGGING_MODE ||
+	    (!((pkt_type == DCI_DATA_TYPE) ||
+	       ((pkt_type & (DATA_TYPE_DCI_LOG | DATA_TYPE_DCI_EVENT)) == 0))
+		&& (driver->logging_mode == USB_MODE) &&
+		(!driver->usb_connected))) {
 		/*Drop the diag payload */
 		return -EIO;
 	}
@@ -1541,9 +1541,9 @@
 		}
 		/* The packet is for the remote processor */
 		if (payload_size <= MIN_SIZ_ALLOW) {
-				pr_err("diag: Integer underflow in %s, payload size: %d",
-							__func__, payload_size);
-		return -EBADMSG;
+			pr_err("diag: Integer underflow in %s, payload size: %d",
+					__func__, payload_size);
+			return -EBADMSG;
 		}
 		token_offset = 4;
 		payload_size -= 4;
@@ -1761,20 +1761,47 @@
 		return -EBADMSG;
 	}
 
-	mutex_lock(&driver->diagchar_mutex);
 	buf_copy = diagmem_alloc(driver, payload_size, POOL_TYPE_COPY);
 	if (!buf_copy) {
 		driver->dropped_count++;
-		mutex_unlock(&driver->diagchar_mutex);
 		return -ENOMEM;
 	}
 
 	err = copy_from_user(buf_copy, buf + 4, payload_size);
 	if (err) {
 		printk(KERN_INFO "diagchar : copy_from_user failed\n");
-		ret = -EFAULT;
-		goto fail_free_copy;
+		diagmem_free(driver, buf_copy, POOL_TYPE_COPY);
+		buf_copy = NULL;
+		return -EFAULT;
 	}
+
+	data_type = pkt_type &
+		    (DATA_TYPE_DCI_LOG | DATA_TYPE_DCI_EVENT | DCI_PKT_TYPE);
+	if (data_type) {
+		diag_process_apps_dci_read_data(data_type, buf_copy,
+						payload_size);
+		if (pkt_type & DATA_TYPE_DCI_LOG)
+			pkt_type ^= DATA_TYPE_DCI_LOG;
+		else if (pkt_type & DATA_TYPE_DCI_EVENT) {
+			pkt_type ^= DATA_TYPE_DCI_EVENT;
+		} else {
+			pkt_type ^= DCI_PKT_TYPE;
+			diagmem_free(driver, buf_copy, POOL_TYPE_COPY);
+			return 0;
+		}
+
+		/*
+		 * If the data is not headed for normal processing or the usb
+		 * is unplugged and we are in usb mode
+		 */
+		if ((pkt_type != DATA_TYPE_LOG && pkt_type != DATA_TYPE_EVENT)
+			|| ((driver->logging_mode == USB_MODE) &&
+			(!driver->usb_connected))) {
+			diagmem_free(driver, buf_copy, POOL_TYPE_COPY);
+			return 0;
+		}
+	}
+
 	if (driver->stm_state[APPS_DATA] &&
 		(pkt_type >= DATA_TYPE_EVENT && pkt_type <= DATA_TYPE_LOG)) {
 		int stm_size = 0;
@@ -1809,6 +1836,7 @@
 			length++;
 	}
 #endif
+	mutex_lock(&driver->diagchar_mutex);
 	if (!buf_hdlc)
 		buf_hdlc = diagmem_alloc(driver, HDLC_OUT_BUF_SIZE,
 						 POOL_TYPE_HDLC);
@@ -1882,10 +1910,9 @@
 	diagmem_free(driver, buf_copy, POOL_TYPE_COPY);
 	buf_copy = NULL;
 	mutex_unlock(&driver->diagchar_mutex);
-	if (!timer_in_progress)	{
-		timer_in_progress = 1;
-		ret = mod_timer(&drain_timer, jiffies + msecs_to_jiffies(500));
-	}
+
+	check_drain_timer();
+
 	return 0;
 
 fail_free_hdlc:
@@ -2017,9 +2044,13 @@
 		return -1;
 	}
 
-	device_create(driver->diagchar_class, NULL, devno,
-				  (void *)driver, "diag");
+	driver->diag_dev = device_create(driver->diagchar_class, NULL, devno,
+					 (void *)driver, "diag");
 
+	if (!driver->diag_dev)
+		return -EIO;
+
+	driver->diag_dev->power.wakeup = wakeup_source_register("DIAG_WS");
 	return 0;
 
 }
@@ -2110,12 +2141,15 @@
 		driver->poolsize_user = poolsize_user;
 		driver->itemsize_write_struct = itemsize_write_struct;
 		driver->poolsize_write_struct = poolsize_write_struct;
+		driver->itemsize_dci = itemsize_dci;
+		driver->poolsize_dci = poolsize_dci;
 		driver->num_clients = max_clients;
 		driver->logging_mode = USB_MODE;
 		driver->socket_process = NULL;
 		driver->callback_process = NULL;
 		driver->mask_check = 0;
 		driver->in_busy_pktdata = 0;
+		driver->in_busy_dcipktdata = 0;
 		mutex_init(&driver->diagchar_mutex);
 		init_waitqueue_head(&driver->wait_q);
 		init_waitqueue_head(&driver->smd_wait_q);
diff --git a/drivers/char/diag/diagfwd.c b/drivers/char/diag/diagfwd.c
index f7e720f..eb8a75b 100644
--- a/drivers/char/diag/diagfwd.c
+++ b/drivers/char/diag/diagfwd.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2008-2014, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -43,9 +43,6 @@
 #include "diag_masks.h"
 #include "diagfwd_bridge.h"
 
-#define MODE_CMD		41
-#define RESET_ID		2
-
 #define STM_CMD_VERSION_OFFSET	4
 #define STM_CMD_MASK_OFFSET	5
 #define STM_CMD_DATA_OFFSET	6
@@ -695,15 +692,14 @@
 
 	if (!buf && (smd_info->type == SMD_DCI_TYPE ||
 					smd_info->type == SMD_DCI_CMD_TYPE))
-		diag_dci_try_deactivate_wakeup_source(smd_info->ch);
+		diag_dci_try_deactivate_wakeup_source();
 
 	if (smd_info->ch && buf) {
-		pkt_len = smd_cur_packet_size(smd_info->ch);
 
+		pkt_len = smd_cur_packet_size(smd_info->ch);
 		if (pkt_len == 0 && (smd_info->type == SMD_DCI_TYPE ||
 					smd_info->type == SMD_DCI_CMD_TYPE))
-			diag_dci_try_deactivate_wakeup_source(smd_info->ch);
-
+			diag_dci_try_deactivate_wakeup_source();
 		if (pkt_len > buf_size)
 			resize_success = diag_smd_resize_buf(smd_info, &buf,
 							&buf_size, pkt_len);
@@ -811,7 +807,7 @@
 fail_return:
 	if (smd_info->type == SMD_DCI_TYPE ||
 					smd_info->type == SMD_DCI_CMD_TYPE)
-		diag_dci_try_deactivate_wakeup_source(smd_info->ch);
+		diag_dci_try_deactivate_wakeup_source();
 	return;
 }
 
@@ -1049,17 +1045,46 @@
     return err;
 }
 
-static void diag_update_pkt_buffer(unsigned char *buf)
+void diag_update_pkt_buffer(unsigned char *buf, int type)
 {
-	unsigned char *ptr = driver->pkt_buf;
+	unsigned char *ptr = NULL;
 	unsigned char *temp = buf;
+	unsigned int length;
+	int *in_busy = NULL;
 
+	if (!buf) {
+		pr_err("diag: Invalid buffer in %s\n", __func__);
+		return;
+	}
+
+	switch (type) {
+	case PKT_TYPE:
+		ptr = driver->pkt_buf;
+		length = driver->pkt_length;
+		in_busy = &driver->in_busy_pktdata;
+		break;
+	case DCI_PKT_TYPE:
+		ptr = driver->dci_pkt_buf;
+		length = driver->dci_pkt_length;
+		in_busy = &driver->in_busy_dcipktdata;
+		break;
+	default:
+		pr_err("diag: Invalid type %d in %s\n", type, __func__);
+		return;
+	}
+
+	if (!ptr || length == 0) {
+		pr_err("diag: Invalid ptr %p and length %d in %s",
+						ptr, length, __func__);
+		return;
+	}
 	mutex_lock(&driver->diagchar_mutex);
-	if (CHK_OVERFLOW(ptr, ptr, ptr + PKT_SIZE, driver->pkt_length)) {
-		memcpy(ptr, temp , driver->pkt_length);
-		driver->in_busy_pktdata = 1;
-	} else
+	if (CHK_OVERFLOW(ptr, ptr, ptr + PKT_SIZE, length)) {
+		memcpy(ptr, temp , length);
+		*in_busy = 1;
+	} else {
 		printk(KERN_CRIT " Not enough buffer space for PKT_RESP\n");
+	}
 	mutex_unlock(&driver->diagchar_mutex);
 }
 
@@ -1108,7 +1133,7 @@
 	if (entry.process_id != NON_APPS_PROC) {
 		/* If the message is to be sent to the apps process */
 		if (type != MODEM_DATA) {
-			diag_update_pkt_buffer(buf);
+			diag_update_pkt_buffer(buf, PKT_TYPE);
 			diag_update_sleeping_process(entry.process_id,
 							PKT_TYPE);
 		}
@@ -2191,7 +2216,7 @@
 	if (smd_info->type == SMD_DCI_TYPE ||
 					smd_info->type == SMD_DCI_CMD_TYPE) {
 		if (event == SMD_EVENT_DATA)
-			diag_dci_try_activate_wakeup_source(smd_info->ch);
+			diag_dci_try_activate_wakeup_source();
 		queue_work(driver->diag_dci_wq,
 				&(smd_info->diag_read_smd_work));
 	} else if (smd_info->type == SMD_DATA_TYPE) {
@@ -2659,6 +2684,12 @@
 			 GFP_KERNEL)) == NULL)
 		goto err;
 	kmemleak_not_leak(driver->pkt_buf);
+	if (driver->dci_pkt_buf == NULL) {
+		driver->dci_pkt_buf = kzalloc(PKT_SIZE, GFP_KERNEL);
+		if (!driver->dci_pkt_buf)
+			goto err;
+	}
+	kmemleak_not_leak(driver->dci_pkt_buf);
 	if (driver->apps_rsp_buf == NULL) {
 		driver->apps_rsp_buf = kzalloc(APPS_BUF_SIZE, GFP_KERNEL);
 		if (driver->apps_rsp_buf == NULL)
@@ -2709,6 +2740,7 @@
 	kfree(driver->data_ready);
 	kfree(driver->table);
 	kfree(driver->pkt_buf);
+	kfree(driver->dci_pkt_buf);
 	kfree(driver->usb_read_ptr);
 	kfree(driver->apps_rsp_buf);
 	kfree(driver->user_space_data_buf);
@@ -2749,6 +2781,7 @@
 	kfree(driver->data_ready);
 	kfree(driver->table);
 	kfree(driver->pkt_buf);
+	kfree(driver->dci_pkt_buf);
 	kfree(driver->usb_read_ptr);
 	kfree(driver->apps_rsp_buf);
 	kfree(driver->user_space_data_buf);
diff --git a/drivers/char/diag/diagfwd.h b/drivers/char/diag/diagfwd.h
index 1a4601a..7b2ded3 100644
--- a/drivers/char/diag/diagfwd.h
+++ b/drivers/char/diag/diagfwd.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2008-2014, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -54,6 +54,8 @@
 int diag_process_apps_pkt(unsigned char *buf, int len);
 void diag_reset_smd_data(int queue);
 int diag_apps_responds(void);
+void diag_update_pkt_buffer(unsigned char *buf, int type);
+int diag_process_stm_cmd(unsigned char *buf, unsigned char *dest_buf);
 /* State for diag forwarding */
 #ifdef CONFIG_DIAG_OVER_USB
 int diagfwd_connect(void);
diff --git a/drivers/char/diag/diagmem.c b/drivers/char/diag/diagmem.c
index 4ceca4f..db01e9b 100644
--- a/drivers/char/diag/diagmem.c
+++ b/drivers/char/diag/diagmem.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2008-2014, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -70,6 +70,16 @@
 				driver->diag_write_struct_pool, GFP_ATOMIC);
 			}
 		}
+	} else if (pool_type == POOL_TYPE_DCI) {
+		if (driver->diag_dci_pool) {
+			if ((driver->count_dci_pool < driver->poolsize_dci) &&
+				(size <= driver->itemsize_dci)) {
+				atomic_add(1,
+					(atomic_t *)&driver->count_dci_pool);
+				buf = mempool_alloc(driver->diag_dci_pool,
+								GFP_ATOMIC);
+			}
+		}
 #ifdef CONFIG_DIAGFWD_BRIDGE_CODE
 	} else if (pool_type == POOL_TYPE_HSIC ||
 				pool_type == POOL_TYPE_HSIC_2) {
@@ -155,6 +165,16 @@
 			pr_err("diag: Unable to destroy STRUCT mempool");
 		}
 	}
+
+	if (driver->diag_dci_pool) {
+		if (driver->count_dci_pool == 0 && driver->ref_count == 0) {
+			mempool_destroy(driver->diag_dci_pool);
+			driver->diag_dci_pool = NULL;
+		} else if (driver->ref_count == 0 && pool_type ==
+							POOL_TYPE_ALL) {
+				pr_err("diag: Unable to destroy DCI mempool");
+		}
+	}
 #ifdef CONFIG_DIAGFWD_BRIDGE_CODE
 	for (index = 0; index < MAX_HSIC_CH; index++) {
 		if (diag_hsic[index].diag_hsic_pool &&
@@ -231,6 +251,15 @@
 		} else
 			pr_err("diag: Attempt to free up DIAG driver USB structure mempool which is already free %d ",
 					driver->count_write_struct_pool);
+	} else if (pool_type == POOL_TYPE_DCI) {
+		if (driver->diag_dci_pool != NULL &&
+			driver->count_dci_pool > 0) {
+				mempool_free(buf, driver->diag_dci_pool);
+				atomic_add(-1,
+					(atomic_t *)&driver->count_dci_pool);
+		} else
+			pr_err("diag: Attempt to free up DIAG driver DCI mempool which is already free %d ",
+					driver->count_dci_pool);
 #ifdef CONFIG_DIAGFWD_BRIDGE_CODE
 	} else if (pool_type == POOL_TYPE_HSIC ||
 				pool_type == POOL_TYPE_HSIC_2) {
@@ -294,6 +323,12 @@
 						driver->diag_write_struct_pool;
 	}
 
+	if (driver->count_dci_pool == 0) {
+		driver->diag_dci_pool = mempool_create_kmalloc_pool(
+			driver->poolsize_dci, driver->itemsize_dci);
+		diag_pools_array[POOL_DCI_IDX] = driver->diag_dci_pool;
+	}
+
 	if (!driver->diagpool)
 		pr_err("diag: Cannot allocate diag mempool\n");
 
@@ -305,6 +340,10 @@
 
 	if (!driver->diag_write_struct_pool)
 		pr_err("diag: Cannot allocate diag USB struct mempool\n");
+
+	if (!driver->diag_dci_pool)
+		pr_err("diag: Cannot allocate diag DCI mempool\n");
+
 }
 
 #ifdef CONFIG_DIAGFWD_BRIDGE_CODE
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 12fa799..d163317 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2002,2007-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2002,2007-2014, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -461,6 +461,7 @@
 	struct kgsl_perfcounter_read_group __user *reads, unsigned int count)
 {
 	struct adreno_perfcounters *counters = adreno_dev->gpudev->perfcounters;
+	struct kgsl_device *device = &adreno_dev->dev;
 	struct adreno_perfcount_group *group;
 	struct kgsl_perfcounter_read_group *list = NULL;
 	unsigned int i, j;
@@ -488,6 +489,13 @@
 		goto done;
 	}
 
+	kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
+	ret = kgsl_active_count_get(device);
+	if (ret) {
+		kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
+		goto done;
+	}
+
 	/* list iterator */
 	for (j = 0; j < count; j++) {
 
@@ -496,7 +504,7 @@
 		/* Verify that the group ID is within range */
 		if (list[j].groupid >= counters->group_count) {
 			ret = -EINVAL;
-			goto done;
+			break;
 		}
 
 		group = &(counters->groups[list[j].groupid]);
@@ -512,11 +520,13 @@
 		}
 	}
 
+	kgsl_active_count_put(device);
+	kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
+
 	/* write the data */
-	if (copy_to_user(reads, list,
-			sizeof(struct kgsl_perfcounter_read_group) *
-			count) != 0)
-		ret = -EFAULT;
+	if (ret == 0)
+		ret = copy_to_user(reads, list,
+			sizeof(struct kgsl_perfcounter_read_group) * count);
 
 done:
 	kfree(list);
@@ -592,8 +602,11 @@
 	unsigned int *max_counters)
 {
 	struct adreno_perfcounters *counters = adreno_dev->gpudev->perfcounters;
+	struct kgsl_device *device = &adreno_dev->dev;
 	struct adreno_perfcount_group *group;
-	unsigned int i;
+	unsigned int i, t;
+	int ret;
+	unsigned int *buf;
 
 	*max_counters = 0;
 
@@ -603,6 +616,8 @@
 	if (groupid >= counters->group_count)
 		return -EINVAL;
 
+	kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
+
 	group = &(counters->groups[groupid]);
 	*max_counters = group->reg_count;
 
@@ -610,20 +625,28 @@
 	 * if NULL countable or *count of zero, return max reg_count in
 	 * *max_counters and return success
 	 */
-	if (countables == NULL || count == 0)
+	if (countables == NULL || count == 0) {
+		kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
 		return 0;
-
-	/*
-	 * Go through all available counters.  Write upto *count * countable
-	 * values.
-	 */
-	for (i = 0; i < group->reg_count && i < count; i++) {
-		if (copy_to_user(&countables[i], &(group->regs[i].countable),
-				sizeof(unsigned int)) != 0)
-			return -EFAULT;
 	}
 
-	return 0;
+	t = min_t(int, group->reg_count, count);
+
+	buf = kmalloc(t * sizeof(unsigned int), GFP_KERNEL);
+	if (buf == NULL) {
+		kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < t; i++)
+		buf[i] = group->regs[i].countable;
+
+	kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
+
+	ret = copy_to_user(countables, buf, sizeof(unsigned int) * t);
+	kfree(buf);
+
+	return ret;
 }
 
 static inline void refcount_group(struct adreno_perfcount_group *group,
@@ -2832,6 +2855,8 @@
 				break;
 			}
 
+			kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
+
 			if (enable) {
 				device->pwrctrl.ctrl_flags = 0;
 				adreno_dev->fast_hang_detect = 1;
@@ -2851,6 +2876,7 @@
 				kgsl_pwrscale_disable(device);
 			}
 
+			kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
 			status = 0;
 		}
 		break;
@@ -2869,10 +2895,13 @@
 
 			context = kgsl_context_get_owner(dev_priv,
 							constraint.context_id);
+
 			if (context == NULL)
 				break;
+
 			status = adreno_set_constraint(device, context,
 								&constraint);
+
 			kgsl_context_put(context);
 		}
 		break;
@@ -3354,24 +3383,28 @@
 	}
 	case IOCTL_KGSL_PERFCOUNTER_GET: {
 		struct kgsl_perfcounter_get *get = data;
+		kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
 		/*
 		 * adreno_perfcounter_get() is called by kernel clients
 		 * during start(), so it is not safe to take an
 		 * active count inside this function.
 		 */
 		result = kgsl_active_count_get(device);
-		if (result)
-			break;
-		result = adreno_perfcounter_get(adreno_dev, get->groupid,
-			get->countable, &get->offset, &get->offset_hi,
-			PERFCOUNTER_FLAG_NONE);
-		kgsl_active_count_put(device);
+		if (result == 0) {
+			result = adreno_perfcounter_get(adreno_dev,
+				get->groupid, get->countable, &get->offset,
+				&get->offset_hi, PERFCOUNTER_FLAG_NONE);
+			kgsl_active_count_put(device);
+		}
+		kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
 		break;
 	}
 	case IOCTL_KGSL_PERFCOUNTER_PUT: {
 		struct kgsl_perfcounter_put *put = data;
+		kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
 		result = adreno_perfcounter_put(adreno_dev, put->groupid,
 			put->countable, PERFCOUNTER_FLAG_NONE);
+		kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
 		break;
 	}
 	case IOCTL_KGSL_PERFCOUNTER_QUERY: {
@@ -3383,12 +3416,8 @@
 	}
 	case IOCTL_KGSL_PERFCOUNTER_READ: {
 		struct kgsl_perfcounter_read *read = data;
-		result = kgsl_active_count_get(device);
-		if (result)
-			break;
 		result = adreno_perfcounter_read_group(adreno_dev,
 			read->reads, read->count);
-		kgsl_active_count_put(device);
 		break;
 	}
 	default:
diff --git a/drivers/gpu/msm/adreno_profile.c b/drivers/gpu/msm/adreno_profile.c
index 45075a5..7785f3b 100644
--- a/drivers/gpu/msm/adreno_profile.c
+++ b/drivers/gpu/msm/adreno_profile.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -680,6 +680,15 @@
 	if (adreno_is_a2xx(adreno_dev))
 		return -ENOSPC;
 
+	buf = kmalloc(len + 1, GFP_KERNEL);
+	if (buf == NULL)
+		return -ENOMEM;
+
+	if (copy_from_user(buf, user_buf, len)) {
+		size = -EFAULT;
+		goto error_free;
+	}
+
 	kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
 
 	if (adreno_profile_enabled(profile)) {
@@ -688,8 +697,10 @@
 	}
 
 	ret = kgsl_active_count_get(device);
-	if (ret)
-		return -EINVAL;
+	if (ret) {
+		size = ret;
+		goto error_unlock;
+	}
 
 	/*
 	 * When adding/removing assignments, ensure that the GPU is done with
@@ -697,19 +708,13 @@
 	 * GPU and avoid racey conditions.
 	 */
 	if (adreno_idle(device)) {
-		size = -EINVAL;
+		size = -ETIMEDOUT;
 		goto error_put;
 	}
 
 	/* clear all shared buffer results */
 	adreno_profile_process_results(device);
 
-	buf = kmalloc(len + 1, GFP_KERNEL);
-	if (!buf) {
-		size = -EINVAL;
-		goto error_put;
-	}
-
 	pbuf = buf;
 
 	/* clear the log buffer */
@@ -718,10 +723,6 @@
 		profile->log_tail = profile->log_buffer;
 	}
 
-	if (copy_from_user(buf, user_buf, len)) {
-		size = -EFAULT;
-		goto error_free;
-	}
 
 	/* for sanity and parsing, ensure it is null terminated */
 	buf[len] = '\0';
@@ -741,12 +742,12 @@
 
 	size = len;
 
-error_free:
-	kfree(buf);
 error_put:
 	kgsl_active_count_put(device);
 error_unlock:
 	kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
+error_free:
+	kfree(buf);
 	return size;
 }
 
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index 462e7a5..5a33d9d 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -1478,7 +1478,6 @@
 				      kgsl_readtimestamp(device, context,
 							KGSL_TIMESTAMP_RETIRED),
 				      result);
-
 	return result;
 }
 
@@ -1487,9 +1486,14 @@
 						void *data)
 {
 	struct kgsl_device_waittimestamp *param = data;
+	struct kgsl_device *device = dev_priv->device;
+	long result = -EINVAL;
 
-	return _device_waittimestamp(dev_priv, NULL,
+	kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
+	result = _device_waittimestamp(dev_priv, NULL,
 			param->timestamp, param->timeout);
+	kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
+	return result;
 }
 
 static long kgsl_ioctl_device_waittimestamp_ctxtid(struct kgsl_device_private
@@ -1498,8 +1502,10 @@
 {
 	struct kgsl_device_waittimestamp_ctxtid *param = data;
 	struct kgsl_context *context;
+	struct kgsl_device *device = dev_priv->device;
 	long result = -EINVAL;
 
+	kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
 	context = kgsl_context_get_owner(dev_priv, param->context_id);
 
 	if (context)
@@ -1507,6 +1513,7 @@
 			param->timestamp, param->timeout);
 
 	kgsl_context_put(context);
+	kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
 	return result;
 }
 
@@ -2296,9 +2303,14 @@
 						void *data)
 {
 	struct kgsl_cmdstream_readtimestamp *param = data;
+	struct kgsl_device *device = dev_priv->device;
+	long result = -EINVAL;
 
-	return _cmdstream_readtimestamp(dev_priv, NULL,
+	kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
+	result = _cmdstream_readtimestamp(dev_priv, NULL,
 			param->type, &param->timestamp);
+	kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
+	return result;
 }
 
 static long kgsl_ioctl_cmdstream_readtimestamp_ctxtid(struct kgsl_device_private
@@ -2306,9 +2318,11 @@
 						void *data)
 {
 	struct kgsl_cmdstream_readtimestamp_ctxtid *param = data;
+	struct kgsl_device *device = dev_priv->device;
 	struct kgsl_context *context;
 	long result = -EINVAL;
 
+	kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
 	context = kgsl_context_get_owner(dev_priv, param->context_id);
 
 	if (context)
@@ -2316,6 +2330,7 @@
 			param->type, &param->timestamp);
 
 	kgsl_context_put(context);
+	kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
 	return result;
 }
 
@@ -2367,9 +2382,14 @@
 						    void *data)
 {
 	struct kgsl_cmdstream_freememontimestamp *param = data;
+	struct kgsl_device *device = dev_priv->device;
+	long result = -EINVAL;
 
-	return _cmdstream_freememontimestamp(dev_priv, param->gpuaddr,
+	kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
+	result = _cmdstream_freememontimestamp(dev_priv, param->gpuaddr,
 			NULL, param->timestamp, param->type);
+	kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
+	return result;
 }
 
 static long kgsl_ioctl_cmdstream_freememontimestamp_ctxtid(
@@ -2379,13 +2399,16 @@
 {
 	struct kgsl_cmdstream_freememontimestamp_ctxtid *param = data;
 	struct kgsl_context *context;
+	struct kgsl_device *device = dev_priv->device;
 	long result = -EINVAL;
 
+	kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
 	context = kgsl_context_get_owner(dev_priv, param->context_id);
 	if (context)
 		result = _cmdstream_freememontimestamp(dev_priv, param->gpuaddr,
 			context, param->timestamp, param->type);
 	kgsl_context_put(context);
+	kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
 	return result;
 }
 
@@ -2397,6 +2420,7 @@
 	struct kgsl_context *context = NULL;
 	struct kgsl_device *device = dev_priv->device;
 
+	kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
 	context = device->ftbl->drawctxt_create(dev_priv, &param->flags);
 	if (IS_ERR(context)) {
 		result = PTR_ERR(context);
@@ -2405,6 +2429,7 @@
 	trace_kgsl_context_create(dev_priv->device, context, param->flags);
 	param->drawctxt_id = context->id;
 done:
+	kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
 	return result;
 }
 
@@ -2412,14 +2437,17 @@
 					unsigned int cmd, void *data)
 {
 	struct kgsl_drawctxt_destroy *param = data;
+	struct kgsl_device *device = dev_priv->device;
 	struct kgsl_context *context;
 	long result;
 
+	kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
 	context = kgsl_context_get_owner(dev_priv, param->drawctxt_id);
 
 	result = kgsl_context_detach(context);
 
 	kgsl_context_put(context);
+	kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
 	return result;
 }
 
@@ -3488,78 +3516,65 @@
 typedef long (*kgsl_ioctl_func_t)(struct kgsl_device_private *,
 	unsigned int, void *);
 
-#define KGSL_IOCTL_FUNC(_cmd, _func, _flags) \
+#define KGSL_IOCTL_FUNC(_cmd, _func) \
 	[_IOC_NR((_cmd))] = \
-		{ .cmd = (_cmd), .func = (_func), .flags = (_flags) }
+		{ .cmd = (_cmd), .func = (_func) }
 
-#define KGSL_IOCTL_LOCK		BIT(0)
 
 static const struct {
 	unsigned int cmd;
 	kgsl_ioctl_func_t func;
-	unsigned int flags;
 } kgsl_ioctl_funcs[] = {
 	KGSL_IOCTL_FUNC(IOCTL_KGSL_DEVICE_GETPROPERTY,
-			kgsl_ioctl_device_getproperty,
-			KGSL_IOCTL_LOCK),
+			kgsl_ioctl_device_getproperty),
 	KGSL_IOCTL_FUNC(IOCTL_KGSL_DEVICE_WAITTIMESTAMP,
-			kgsl_ioctl_device_waittimestamp,
-			KGSL_IOCTL_LOCK),
+			kgsl_ioctl_device_waittimestamp),
 	KGSL_IOCTL_FUNC(IOCTL_KGSL_DEVICE_WAITTIMESTAMP_CTXTID,
-			kgsl_ioctl_device_waittimestamp_ctxtid,
-			KGSL_IOCTL_LOCK),
+			kgsl_ioctl_device_waittimestamp_ctxtid),
 	KGSL_IOCTL_FUNC(IOCTL_KGSL_RINGBUFFER_ISSUEIBCMDS,
-			kgsl_ioctl_rb_issueibcmds, 0),
+			kgsl_ioctl_rb_issueibcmds),
 	KGSL_IOCTL_FUNC(IOCTL_KGSL_SUBMIT_COMMANDS,
-			kgsl_ioctl_submit_commands, 0),
+			kgsl_ioctl_submit_commands),
 	KGSL_IOCTL_FUNC(IOCTL_KGSL_CMDSTREAM_READTIMESTAMP,
-			kgsl_ioctl_cmdstream_readtimestamp,
-			KGSL_IOCTL_LOCK),
+			kgsl_ioctl_cmdstream_readtimestamp),
 	KGSL_IOCTL_FUNC(IOCTL_KGSL_CMDSTREAM_READTIMESTAMP_CTXTID,
-			kgsl_ioctl_cmdstream_readtimestamp_ctxtid,
-			KGSL_IOCTL_LOCK),
+			kgsl_ioctl_cmdstream_readtimestamp_ctxtid),
 	KGSL_IOCTL_FUNC(IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP,
-			kgsl_ioctl_cmdstream_freememontimestamp,
-			KGSL_IOCTL_LOCK),
+			kgsl_ioctl_cmdstream_freememontimestamp),
 	KGSL_IOCTL_FUNC(IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP_CTXTID,
-			kgsl_ioctl_cmdstream_freememontimestamp_ctxtid,
-			KGSL_IOCTL_LOCK),
+			kgsl_ioctl_cmdstream_freememontimestamp_ctxtid),
 	KGSL_IOCTL_FUNC(IOCTL_KGSL_DRAWCTXT_CREATE,
-			kgsl_ioctl_drawctxt_create,
-			KGSL_IOCTL_LOCK),
+			kgsl_ioctl_drawctxt_create),
 	KGSL_IOCTL_FUNC(IOCTL_KGSL_DRAWCTXT_DESTROY,
-			kgsl_ioctl_drawctxt_destroy,
-			KGSL_IOCTL_LOCK),
+			kgsl_ioctl_drawctxt_destroy),
 	KGSL_IOCTL_FUNC(IOCTL_KGSL_MAP_USER_MEM,
-			kgsl_ioctl_map_user_mem, 0),
+			kgsl_ioctl_map_user_mem),
 	KGSL_IOCTL_FUNC(IOCTL_KGSL_SHAREDMEM_FROM_PMEM,
-			kgsl_ioctl_map_user_mem, 0),
+			kgsl_ioctl_map_user_mem),
 	KGSL_IOCTL_FUNC(IOCTL_KGSL_SHAREDMEM_FREE,
-			kgsl_ioctl_sharedmem_free, 0),
+			kgsl_ioctl_sharedmem_free),
 	KGSL_IOCTL_FUNC(IOCTL_KGSL_SHAREDMEM_FLUSH_CACHE,
-			kgsl_ioctl_sharedmem_flush_cache, 0),
+			kgsl_ioctl_sharedmem_flush_cache),
 	KGSL_IOCTL_FUNC(IOCTL_KGSL_GPUMEM_ALLOC,
-			kgsl_ioctl_gpumem_alloc, 0),
+			kgsl_ioctl_gpumem_alloc),
 	KGSL_IOCTL_FUNC(IOCTL_KGSL_CFF_SYNCMEM,
-			kgsl_ioctl_cff_syncmem, 0),
+			kgsl_ioctl_cff_syncmem),
 	KGSL_IOCTL_FUNC(IOCTL_KGSL_CFF_USER_EVENT,
-			kgsl_ioctl_cff_user_event, 0),
+			kgsl_ioctl_cff_user_event),
 	KGSL_IOCTL_FUNC(IOCTL_KGSL_TIMESTAMP_EVENT,
-			kgsl_ioctl_timestamp_event,
-			KGSL_IOCTL_LOCK),
+			kgsl_ioctl_timestamp_event),
 	KGSL_IOCTL_FUNC(IOCTL_KGSL_SETPROPERTY,
-			kgsl_ioctl_device_setproperty,
-			KGSL_IOCTL_LOCK),
+			kgsl_ioctl_device_setproperty),
 	KGSL_IOCTL_FUNC(IOCTL_KGSL_GPUMEM_ALLOC_ID,
-			kgsl_ioctl_gpumem_alloc_id, 0),
+			kgsl_ioctl_gpumem_alloc_id),
 	KGSL_IOCTL_FUNC(IOCTL_KGSL_GPUMEM_FREE_ID,
-			kgsl_ioctl_gpumem_free_id, 0),
+			kgsl_ioctl_gpumem_free_id),
 	KGSL_IOCTL_FUNC(IOCTL_KGSL_GPUMEM_GET_INFO,
-			kgsl_ioctl_gpumem_get_info, 0),
+			kgsl_ioctl_gpumem_get_info),
 	KGSL_IOCTL_FUNC(IOCTL_KGSL_GPUMEM_SYNC_CACHE,
-			kgsl_ioctl_gpumem_sync_cache, 0),
+			kgsl_ioctl_gpumem_sync_cache),
 	KGSL_IOCTL_FUNC(IOCTL_KGSL_GPUMEM_SYNC_CACHE_BULK,
-			kgsl_ioctl_gpumem_sync_cache_bulk, 0),
+			kgsl_ioctl_gpumem_sync_cache_bulk),
 };
 
 static long kgsl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
@@ -3567,7 +3582,7 @@
 	struct kgsl_device_private *dev_priv = filep->private_data;
 	unsigned int nr;
 	kgsl_ioctl_func_t func;
-	int lock, ret;
+	int ret;
 	char ustack[64];
 	void *uptr = NULL;
 
@@ -3624,7 +3639,6 @@
 		}
 
 		func = kgsl_ioctl_funcs[nr].func;
-		lock = kgsl_ioctl_funcs[nr].flags & KGSL_IOCTL_LOCK;
 	} else {
 		func = dev_priv->device->ftbl->ioctl;
 		if (!func) {
@@ -3633,19 +3647,10 @@
 			ret = -ENOIOCTLCMD;
 			goto done;
 		}
-		lock = 1;
 	}
 
-	if (lock)
-		kgsl_mutex_lock(&dev_priv->device->mutex,
-			&dev_priv->device->mutex_owner);
-
 	ret = func(dev_priv, cmd, uptr);
 
-	if (lock)
-		kgsl_mutex_unlock(&dev_priv->device->mutex,
-			&dev_priv->device->mutex_owner);
-
 	/*
 	 * Still copy back on failure, but assume function took
 	 * all necessary precautions sanitizing the return values.
diff --git a/drivers/gpu/msm/kgsl_mmu.c b/drivers/gpu/msm/kgsl_mmu.c
old mode 100644
new mode 100755
index 65e607b..4591165
--- a/drivers/gpu/msm/kgsl_mmu.c
+++ b/drivers/gpu/msm/kgsl_mmu.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2002,2007-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2002,2007-2014, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -85,8 +85,17 @@
 	return status;
 }
 
-static void _kgsl_destroy_pagetable(struct kgsl_pagetable *pagetable)
+static void kgsl_destroy_pagetable(struct kref *kref)
 {
+	struct kgsl_pagetable *pagetable = container_of(kref,
+		struct kgsl_pagetable, refcount);
+
+	unsigned long flags;
+
+	spin_lock_irqsave(&kgsl_driver.ptlock, flags);
+	list_del(&pagetable->list);
+	spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
+
 	pagetable_remove_sysfs_objects(pagetable);
 
 	kgsl_cleanup_pt(pagetable);
@@ -101,29 +110,6 @@
 	kfree(pagetable);
 }
 
-static void kgsl_destroy_pagetable(struct kref *kref)
-{
-	struct kgsl_pagetable *pagetable = container_of(kref,
-		struct kgsl_pagetable, refcount);
-	unsigned long flags;
-
-	spin_lock_irqsave(&kgsl_driver.ptlock, flags);
-	list_del(&pagetable->list);
-	spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
-
-	_kgsl_destroy_pagetable(pagetable);
-}
-
-static void kgsl_destroy_pagetable_locked(struct kref *kref)
-{
-	struct kgsl_pagetable *pagetable = container_of(kref,
-		struct kgsl_pagetable, refcount);
-
-	list_del(&pagetable->list);
-
-	_kgsl_destroy_pagetable(pagetable);
-}
-
 static inline void kgsl_put_pagetable(struct kgsl_pagetable *pagetable)
 {
 	if (pagetable)
@@ -138,12 +124,9 @@
 
 	spin_lock_irqsave(&kgsl_driver.ptlock, flags);
 	list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) {
-		if (kref_get_unless_zero(&pt->refcount)) {
-			if (pt->name == name) {
-				ret = pt;
-				break;
-			}
-			kref_put(&pt->refcount, kgsl_destroy_pagetable_locked);
+		if (name == pt->name && kref_get_unless_zero(&pt->refcount)) {
+			ret = pt;
+			break;
 		}
 	}
 
@@ -340,14 +323,9 @@
 		return KGSL_MMU_GLOBAL_PT;
 	spin_lock(&kgsl_driver.ptlock);
 	list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) {
-		if (kref_get_unless_zero(&pt->refcount)) {
-			if (mmu->mmu_ops->mmu_pt_equal(mmu, pt, pt_base)) {
-				ptid = (int) pt->name;
-				kref_put(&pt->refcount,
-					kgsl_destroy_pagetable_locked);
-				break;
-			}
-			kref_put(&pt->refcount, kgsl_destroy_pagetable_locked);
+		if (mmu->mmu_ops->mmu_pt_equal(mmu, pt, pt_base)) {
+			ptid = (int) pt->name;
+			break;
 		}
 	}
 	spin_unlock(&kgsl_driver.ptlock);
@@ -367,23 +345,16 @@
 		return 0;
 	spin_lock(&kgsl_driver.ptlock);
 	list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) {
-		if (kref_get_unless_zero(&pt->refcount)) {
-			if (mmu->mmu_ops->mmu_pt_equal(mmu, pt, pt_base)) {
-				if ((addr & ~(PAGE_SIZE-1)) == pt->fault_addr) {
-					ret = 1;
-					kref_put(&pt->refcount,
-						kgsl_destroy_pagetable_locked);
-					break;
-				} else {
-					pt->fault_addr =
-						(addr & ~(PAGE_SIZE-1));
-					ret = 0;
-					kref_put(&pt->refcount,
-						kgsl_destroy_pagetable_locked);
-					break;
-				}
+		if (mmu->mmu_ops->mmu_pt_equal(mmu, pt, pt_base)) {
+			if ((addr & ~(PAGE_SIZE-1)) == pt->fault_addr) {
+				ret = 1;
+				break;
+			} else {
+				pt->fault_addr =
+					(addr & ~(PAGE_SIZE-1));
+				ret = 0;
+				break;
 			}
-			kref_put(&pt->refcount, kgsl_destroy_pagetable_locked);
 		}
 	}
 	spin_unlock(&kgsl_driver.ptlock);
diff --git a/drivers/gpu/msm/kgsl_sync.c b/drivers/gpu/msm/kgsl_sync.c
index 0e694a7..cef052d 100644
--- a/drivers/gpu/msm/kgsl_sync.c
+++ b/drivers/gpu/msm/kgsl_sync.c
@@ -120,6 +120,8 @@
 	int ret = -EINVAL;
 	char fence_name[sizeof(fence->name)] = {};
 
+	priv.fence_fd = -1;
+
 	if (len != sizeof(priv))
 		return -EINVAL;
 
@@ -127,10 +129,12 @@
 	if (event == NULL)
 		return -ENOMEM;
 
+	kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
+
 	context = kgsl_context_get_owner(owner, context_id);
 
 	if (context == NULL)
-		goto fail_pt;
+		goto unlock;
 
 	event->context = context;
 	event->timestamp = timestamp;
@@ -139,7 +143,7 @@
 	if (pt == NULL) {
 		KGSL_DRV_ERR(device, "kgsl_sync_pt_create failed\n");
 		ret = -ENOMEM;
-		goto fail_pt;
+		goto unlock;
 	}
 	snprintf(fence_name, sizeof(fence_name),
 		"%s-pid-%d-ctx-%d-ts-%d",
@@ -153,42 +157,53 @@
 		kgsl_sync_pt_destroy(pt);
 		KGSL_DRV_ERR(device, "sync_fence_create failed\n");
 		ret = -ENOMEM;
-		goto fail_fence;
+		goto unlock;
 	}
 
 	priv.fence_fd = get_unused_fd_flags(0);
 	if (priv.fence_fd < 0) {
-		KGSL_DRV_ERR(device, "invalid fence fd\n");
-		ret = -EINVAL;
-		goto fail_fd;
+		KGSL_DRV_ERR(device, "Unable to get a file descriptor: %d\n",
+			priv.fence_fd);
+		ret = priv.fence_fd;
+		goto unlock;
 	}
 	sync_fence_install(fence, priv.fence_fd);
 
+	/* Unlock the mutex before copying to user */
+	kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
+
 	if (copy_to_user(data, &priv, sizeof(priv))) {
 		ret = -EFAULT;
-		goto fail_copy_fd;
+		goto out;
 	}
 
 	/*
 	 * Hold the context ref-count for the event - it will get released in
 	 * the callback
 	 */
+
+	kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
+
 	ret = kgsl_add_event(device, context_id, timestamp,
 			kgsl_fence_event_cb, event, owner);
+
+	kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
+
 	if (ret)
-		goto fail_event;
+		goto out;
 
 	return 0;
 
-fail_event:
-fail_copy_fd:
-	/* clean up sync_fence_install */
-	put_unused_fd(priv.fence_fd);
-fail_fd:
-	/* clean up sync_fence_create */
-	sync_fence_put(fence);
-fail_fence:
-fail_pt:
+unlock:
+	kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
+
+out:
+	if (priv.fence_fd >= 0)
+		put_unused_fd(priv.fence_fd);
+
+	if (fence)
+		sync_fence_put(fence);
+
 	kgsl_context_put(context);
 	kfree(event);
 	return ret;
diff --git a/drivers/md/dm-req-crypt.c b/drivers/md/dm-req-crypt.c
index 87a4ab9..ef8f37c 100644
--- a/drivers/md/dm-req-crypt.c
+++ b/drivers/md/dm-req-crypt.c
@@ -23,6 +23,10 @@
 #include <linux/backing-dev.h>
 #include <linux/atomic.h>
 #include <linux/scatterlist.h>
+#include <linux/device-mapper.h>
+#include <linux/printk.h>
+#include <linux/pft.h>
+
 #include <crypto/scatterwalk.h>
 #include <asm/page.h>
 #include <asm/unaligned.h>
@@ -31,9 +35,6 @@
 #include <crypto/algapi.h>
 #include <mach/qcrypto.h>
 
-#include <linux/device-mapper.h>
-
-
 #define DM_MSG_PREFIX "req-crypt"
 
 #define MAX_SG_LIST	1024
@@ -52,13 +53,17 @@
 	int err;
 };
 
-struct dm_dev *dev;
+#define FDE_KEY_ID	0
+#define PFE_KEY_ID	1
+
+static struct dm_dev *dev;
 static struct kmem_cache *_req_crypt_io_pool;
-sector_t start_sector_orig;
-struct workqueue_struct *req_crypt_queue;
-mempool_t *req_io_pool;
-mempool_t *req_page_pool;
-struct crypto_ablkcipher *tfm;
+static sector_t start_sector_orig;
+static struct workqueue_struct *req_crypt_queue;
+static mempool_t *req_io_pool;
+static mempool_t *req_page_pool;
+static bool is_fde_enabled;
+static struct crypto_ablkcipher *tfm;
 
 struct req_dm_crypt_io {
 	struct work_struct work;
@@ -66,12 +71,69 @@
 	int error;
 	atomic_t pending;
 	struct timespec start_time;
+	bool should_encrypt;
+	bool should_decrypt;
+	u32 key_id;
 };
 
 static void req_crypt_cipher_complete
 		(struct crypto_async_request *req, int err);
 
 
+static  bool req_crypt_should_encrypt(struct req_dm_crypt_io *req)
+{
+	int ret;
+	bool should_encrypt = false;
+	struct bio *bio = NULL;
+	u32 key_id = 0;
+	bool is_encrypted = false;
+	bool is_inplace = false;
+
+	if (!req || !req->cloned_request || !req->cloned_request->bio)
+		return false;
+
+	bio = req->cloned_request->bio;
+
+	ret = pft_get_key_index(bio, &key_id, &is_encrypted, &is_inplace);
+	/* req->key_id = key_id; @todo support more than 1 pfe key */
+	if ((ret == 0) && (is_encrypted || is_inplace)) {
+		should_encrypt = true;
+		req->key_id = PFE_KEY_ID;
+	} else if (is_fde_enabled) {
+		should_encrypt = true;
+		req->key_id = FDE_KEY_ID;
+	}
+
+	return should_encrypt;
+}
+
+static  bool req_crypt_should_deccrypt(struct req_dm_crypt_io *req)
+{
+	int ret;
+	bool should_deccrypt = false;
+	struct bio *bio = NULL;
+	u32 key_id = 0;
+	bool is_encrypted = false;
+	bool is_inplace = false;
+
+	if (!req || !req->cloned_request || !req->cloned_request->bio)
+		return false;
+
+	bio = req->cloned_request->bio;
+
+	ret = pft_get_key_index(bio, &key_id, &is_encrypted, &is_inplace);
+	/* req->key_id = key_id; @todo support more than 1 pfe key */
+	if ((ret == 0) && (is_encrypted && !is_inplace)) {
+		should_deccrypt = true;
+		req->key_id = PFE_KEY_ID;
+	} else if (is_fde_enabled) {
+		should_deccrypt = true;
+		req->key_id = FDE_KEY_ID;
+	}
+
+	return should_deccrypt;
+}
+
 static void req_crypt_inc_pending(struct req_dm_crypt_io *io)
 {
 	atomic_inc(&io->pending);
@@ -196,6 +258,13 @@
 	ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
 					req_crypt_cipher_complete, &result);
 	init_completion(&result.completion);
+	err = qcrypto_cipher_set_device(req, io->key_id);
+	if (err != 0) {
+		DMERR("%s qcrypto_cipher_set_device failed with err %d\n",
+				__func__, err);
+		error = DM_REQ_CRYPT_ERROR;
+		goto ablkcipher_req_alloc_failure;
+	}
 	qcrypto_cipher_set_flag(req,
 		QCRYPTO_CTX_USE_PIPE_KEY | QCRYPTO_CTX_XTS_DU_SIZE_512B);
 	crypto_ablkcipher_clear_flags(tfm, ~0);
@@ -270,6 +339,26 @@
 }
 
 /*
+ * This callback is called by the worker queue to perform non-decrypt reads
+ * and use the dm function to complete the bios and requests.
+ */
+static void req_cryptd_crypt_read_plain(struct req_dm_crypt_io *io)
+{
+	struct request *clone = NULL;
+	int error = 0;
+
+	if (!io || !io->cloned_request) {
+		DMERR("%s io is invalid\n", __func__);
+		BUG(); /* should not happen */
+	}
+
+	clone = io->cloned_request;
+
+	dm_end_request(clone, error);
+	mempool_free(io, req_io_pool);
+}
+
+/*
  * The callback that will be called by the worker queue to perform Encryption
  * for writes and submit the request using the elevelator.
  */
@@ -291,6 +380,7 @@
 	struct page *page = NULL;
 	u8 IV[AES_XTS_IV_LEN];
 	int remaining_size = 0;
+	int err = 0;
 
 	if (io) {
 		if (io->cloned_request) {
@@ -322,6 +412,13 @@
 				req_crypt_cipher_complete, &result);
 
 	init_completion(&result.completion);
+	err = qcrypto_cipher_set_device(req, io->key_id);
+	if (err != 0) {
+		DMERR("%s qcrypto_cipher_set_device failed with err %d\n",
+				__func__, err);
+		error = DM_REQ_CRYPT_ERROR;
+		goto ablkcipher_req_alloc_failure;
+	}
 	qcrypto_cipher_set_flag(req,
 		QCRYPTO_CTX_USE_PIPE_KEY | QCRYPTO_CTX_XTS_DU_SIZE_512B);
 	crypto_ablkcipher_clear_flags(tfm, ~0);
@@ -460,19 +557,44 @@
 	req_crypt_dec_pending_encrypt(io);
 }
 
+/*
+ * This callback is called by the worker queue to perform non-encrypted writes
+ * and submit the request using the elevelator.
+ */
+static void req_cryptd_crypt_write_plain(struct req_dm_crypt_io *io)
+{
+	struct request *clone = NULL;
+
+	if (!io || !io->cloned_request) {
+		DMERR("%s io is invalid\n", __func__);
+		BUG(); /* should not happen */
+	}
+
+	clone = io->cloned_request;
+	io->error = 0;
+	dm_dispatch_request(clone);
+}
+
 /* Queue callback function that will get triggered */
 static void req_cryptd_crypt(struct work_struct *work)
 {
 	struct req_dm_crypt_io *io =
 			container_of(work, struct req_dm_crypt_io, work);
 
-	if (rq_data_dir(io->cloned_request) == WRITE)
-		req_cryptd_crypt_write_convert(io);
-	else if (rq_data_dir(io->cloned_request) == READ)
-		req_cryptd_crypt_read_convert(io);
-	else
-		DMERR("%s received non-read/write request for Clone %u\n",
+	if (rq_data_dir(io->cloned_request) == WRITE) {
+		if (io->should_encrypt)
+			req_cryptd_crypt_write_convert(io);
+		else
+			req_cryptd_crypt_write_plain(io);
+	} else if (rq_data_dir(io->cloned_request) == READ) {
+		if (io->should_decrypt)
+			req_cryptd_crypt_read_convert(io);
+		else
+			req_cryptd_crypt_read_plain(io);
+	} else {
+		DMERR("%s received non-write request for Clone %u\n",
 				__func__, (unsigned int)io->cloned_request);
+	}
 }
 
 static void req_cryptd_queue_crypt(struct req_dm_crypt_io *io)
@@ -537,7 +659,7 @@
 	bvec = NULL;
 	if (rq_data_dir(clone) == WRITE) {
 		rq_for_each_segment(bvec, clone, iter1) {
-			if (bvec->bv_offset == 0) {
+			if (req_io->should_encrypt && bvec->bv_offset == 0) {
 				mempool_free(bvec->bv_page, req_page_pool);
 				bvec->bv_page = NULL;
 			} else
@@ -565,7 +687,6 @@
  * For a read request no pre-processing is required the request
  * is returned to dm once mapping is done
  */
-
 static int req_crypt_map(struct dm_target *ti, struct request *clone,
 			 union map_info *map_context)
 {
@@ -594,6 +715,11 @@
 	map_context->ptr = req_io;
 	atomic_set(&req_io->pending, 0);
 
+	if (rq_data_dir(clone) == WRITE)
+		req_io->should_encrypt = req_crypt_should_encrypt(req_io);
+	if (rq_data_dir(clone) == READ)
+		req_io->should_decrypt = req_crypt_should_deccrypt(req_io);
+
 	/* Get the queue of the underlying original device */
 	clone->q = bdev_get_queue(dev->bdev);
 	clone->rq_disk = dev->bdev->bd_disk;
@@ -641,6 +767,8 @@
 
 static void req_crypt_dtr(struct dm_target *ti)
 {
+	DMDEBUG("dm-req-crypt Destructor.\n");
+
 	if (req_crypt_queue) {
 		destroy_workqueue(req_crypt_queue);
 		req_crypt_queue = NULL;
@@ -670,6 +798,8 @@
 	char dummy;
 	int err = DM_REQ_CRYPT_ERROR;
 
+	DMDEBUG("dm-req-crypt Constructor.\n");
+
 	if (argc < 5) {
 		DMERR(" %s Not enough args\n", __func__);
 		err = DM_REQ_CRYPT_ERROR;
@@ -696,13 +826,24 @@
 			goto ctr_exit;
 		}
 	} else {
-		DMERR(" %s Arg[4]invalid\n", __func__);
+		DMERR(" %s Arg[4] invalid\n", __func__);
 		err =  DM_REQ_CRYPT_ERROR;
 		goto ctr_exit;
 	}
 
 	start_sector_orig = tmpll;
 
+	if (argv[5]) {
+		if (!strcmp(argv[5], "fde_enabled"))
+			is_fde_enabled = true;
+		else
+			is_fde_enabled = false;
+	} else {
+		DMERR(" %s Arg[5] invalid, set FDE eanbled.\n", __func__);
+		is_fde_enabled = true; /* backward compatible */
+	}
+	DMDEBUG("%s is_fde_enabled=%d\n", __func__, is_fde_enabled);
+
 	req_crypt_queue = alloc_workqueue("req_cryptd",
 					WQ_NON_REENTRANT |
 					WQ_HIGHPRI |
@@ -725,6 +866,7 @@
 	}
 
 	req_io_pool = mempool_create_slab_pool(MIN_IOS, _req_crypt_io_pool);
+	BUG_ON(!req_io_pool);
 	if (!req_io_pool) {
 		DMERR("%s req_io_pool not allocated\n", __func__);
 		err =  DM_REQ_CRYPT_ERROR;
@@ -791,6 +933,8 @@
 		kmem_cache_destroy(_req_crypt_io_pool);
 	}
 
+	DMINFO("dm-req-crypt successfully initalized.\n");
+
 	return r;
 }
 
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
index 69c5190..e8702e4 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
@@ -355,6 +355,7 @@
 	struct msm_vfe_src_info src_info[VFE_SRC_MAX];
 	uint16_t stream_handle_cnt;
 	unsigned long event_mask;
+	uint32_t burst_len;
 };
 
 struct msm_vfe_stats_hardware_info {
@@ -397,6 +398,7 @@
 	uint16_t stream_handle_cnt;
 	atomic_t stats_update;
 	uint32_t stats_mask;
+	uint32_t stats_burst_len;
 };
 
 struct msm_vfe_tasklet_queue_cmd {
@@ -480,8 +482,7 @@
 	struct list_head tasklet_q;
 	struct tasklet_struct vfe_tasklet;
 	struct msm_vfe_tasklet_queue_cmd
-		tasklet_queue_cmd[MSM_VFE_TASKLETQ_SIZE];
-
+	tasklet_queue_cmd[MSM_VFE_TASKLETQ_SIZE];
 	uint32_t soc_hw_version;
 	uint32_t vfe_hw_version;
 	struct msm_vfe_hardware_info *hw_info;
@@ -498,6 +499,7 @@
 	void __iomem *p_avtimer_lsw;
 	uint8_t ignore_error;
 	struct msm_isp_statistics *stats;
+	uint32_t vfe_ub_size;
 };
 
 #endif
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
index e817680..353b55f 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
@@ -36,10 +36,10 @@
 #define VFE40_8x26_VERSION 0x20000013
 #define VFE40_8x26V2_VERSION 0x20010014
 
-#define VFE40_BURST_LEN 1
-#define VFE40_STATS_BURST_LEN 1
-#define VFE40_UB_SIZE 1536
-#define VFE40_EQUAL_SLICE_UB 228
+
+/* STATS_SIZE (BE + BG + BF+ RS + CS + IHIST + BHIST ) = 392 */
+#define VFE40_STATS_SIZE 392
+
 #define VFE40_WM_BASE(idx) (0x6C + 0x24 * idx)
 #define VFE40_RDI_BASE(idx) (0x2E8 + 0x4 * idx)
 #define VFE40_XBAR_BASE(idx) (0x58 + 0x4 * (idx / 2))
@@ -971,6 +971,11 @@
 	uint8_t plane_idx)
 {
 	uint32_t val;
+
+	struct msm_vfe_axi_shared_data *axi_data =
+		&vfe_dev->axi_data;
+	uint32_t burst_len = axi_data->burst_len;
+
 	uint32_t wm_base = VFE40_WM_BASE(stream_info->wm[plane_idx]);
 
 	if (!stream_info->frame_based) {
@@ -992,7 +997,7 @@
 				plane_idx].output_stride) << 16 |
 			(stream_info->plane_cfg[
 				plane_idx].output_height - 1) << 4 |
-			VFE40_BURST_LEN;
+			burst_len;
 		msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x18);
 	} else {
 		msm_camera_io_w(0x2, vfe_dev->vfe_base + wm_base);
@@ -1002,7 +1007,7 @@
 				plane_idx].output_width) << 16 |
 			(stream_info->plane_cfg[
 				plane_idx].output_height - 1) << 4 |
-			VFE40_BURST_LEN;
+			burst_len;
 		msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x18);
 	}
 
@@ -1117,6 +1122,7 @@
 	uint8_t num_used_wms = 0;
 	uint32_t prop_size = 0;
 	uint32_t wm_ub_size;
+	uint32_t axi_wm_ub;
 
 	for (i = 0; i < axi_data->hw_info->num_wm; i++) {
 		if (axi_data->free_wm[i] > 0) {
@@ -1124,7 +1130,9 @@
 			total_image_size += axi_data->wm_image_size[i];
 		}
 	}
-	prop_size = MSM_ISP40_TOTAL_WM_UB -
+	axi_wm_ub = vfe_dev->vfe_ub_size - VFE40_STATS_SIZE;
+
+	prop_size = axi_wm_ub -
 		axi_data->hw_info->min_wm_ub * num_used_wms;
 	for (i = 0; i < axi_data->hw_info->num_wm; i++) {
 		if (axi_data->free_wm[i]) {
@@ -1149,10 +1157,14 @@
 	int i;
 	uint32_t ub_offset = 0;
 	struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+	uint32_t axi_equal_slice_ub =
+		(vfe_dev->vfe_ub_size - VFE40_STATS_SIZE)/
+			(axi_data->hw_info->num_wm - 1);
+
 	for (i = 0; i < axi_data->hw_info->num_wm; i++) {
-		msm_camera_io_w(ub_offset << 16 | (VFE40_EQUAL_SLICE_UB - 1),
+		msm_camera_io_w(ub_offset << 16 | (axi_equal_slice_ub - 1),
 			vfe_dev->vfe_base + VFE40_WM_BASE(i) + 0x10);
-		ub_offset += VFE40_EQUAL_SLICE_UB;
+		ub_offset += axi_equal_slice_ub;
 	}
 }
 
@@ -1334,7 +1346,11 @@
 static void msm_vfe40_stats_cfg_ub(struct vfe_device *vfe_dev)
 {
 	int i;
-	uint32_t ub_offset = VFE40_UB_SIZE;
+	struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
+	uint32_t ub_offset = vfe_dev->vfe_ub_size;
+	uint32_t stats_burst_len = stats_data->stats_burst_len;
+
+
 	uint32_t ub_size[VFE40_NUM_STATS_TYPE] = {
 		64, /*MSM_ISP_STATS_BE*/
 		128, /*MSM_ISP_STATS_BG*/
@@ -1348,7 +1364,7 @@
 
 	for (i = 0; i < VFE40_NUM_STATS_TYPE; i++) {
 		ub_offset -= ub_size[i];
-		msm_camera_io_w(VFE40_STATS_BURST_LEN << 30 |
+		msm_camera_io_w(stats_burst_len << 30 |
 			ub_offset << 16 | (ub_size[i] - 1),
 			vfe_dev->vfe_base + VFE40_STATS_BASE(i) + 0xC);
 	}
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
index 4c3a3d5..d11ea68 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
@@ -524,7 +524,9 @@
 			stream_info->format_factor / ISP_Q2;
 	} else {
 		int rdi = SRC_TO_INTF(stream_info->stream_src);
-		stream_info->bandwidth = axi_data->src_info[rdi].pixel_clock;
+		if (rdi < VFE_SRC_MAX)
+			stream_info->bandwidth =
+				axi_data->src_info[rdi].pixel_clock;
 	}
 }
 
@@ -534,6 +536,7 @@
 	uint32_t io_format = 0;
 	struct msm_vfe_axi_stream_request_cmd *stream_cfg_cmd = arg;
 	struct msm_vfe_axi_stream *stream_info;
+	struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
 
 	rc = msm_isp_axi_create_stream(
 		&vfe_dev->axi_data, stream_cfg_cmd);
@@ -581,6 +584,8 @@
 
 	msm_isp_calculate_framedrop(&vfe_dev->axi_data, stream_cfg_cmd);
 	stream_info->vt_enable = stream_cfg_cmd->vt_enable;
+	axi_data->burst_len = stream_cfg_cmd->burst_len;
+
 	if (stream_info->vt_enable) {
 		vfe_dev->vt_enable = stream_info->vt_enable;
 	#ifdef CONFIG_MSM_AVTIMER
@@ -853,8 +858,11 @@
 	struct msm_isp_event_data buf_event;
 	struct timeval *time_stamp;
 	uint32_t stream_idx = HANDLE_TO_IDX(stream_info->stream_handle);
-	uint32_t frame_id = vfe_dev->axi_data.
-		src_info[SRC_TO_INTF(stream_info->stream_src)].frame_id;
+	uint32_t src_intf = SRC_TO_INTF(stream_info->stream_src);
+	uint32_t frame_id = 0;
+	if (src_intf < VFE_SRC_MAX) {
+		frame_id = vfe_dev->axi_data.src_info[src_intf].frame_id;
+	}
 
 	if (buf && ts) {
 		if (vfe_dev->vt_enable) {
@@ -1196,7 +1204,7 @@
 			enum msm_isp_camif_update_state camif_update)
 {
 	int i, rc = 0;
-	uint8_t src_state, wait_for_complete = 0;
+	uint8_t src_state = 0, wait_for_complete = 0;
 	uint32_t wm_reload_mask = 0x0;
 	struct msm_vfe_axi_stream *stream_info;
 	struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
@@ -1212,8 +1220,9 @@
 		}
 		stream_info = &axi_data->stream_info[
 			HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i])];
-		src_state = axi_data->src_info[
-			SRC_TO_INTF(stream_info->stream_src)].active;
+		if (SRC_TO_INTF(stream_info->stream_src) < VFE_SRC_MAX)
+			src_state = axi_data->src_info[
+				SRC_TO_INTF(stream_info->stream_src)].active;
 
 		msm_isp_calculate_bandwidth(axi_data, stream_info);
 		msm_isp_reset_framedrop(vfe_dev, stream_info);
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c
index 6bd7585..d4c86a5 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c
@@ -490,6 +490,9 @@
 {
 	int rc = 0;
 	struct msm_vfe_stats_stream_cfg_cmd *stream_cfg_cmd = arg;
+	struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
+	stats_data->stats_burst_len =  stream_cfg_cmd->stats_burst_len;
+
 	if (vfe_dev->stats_data.num_active_stream == 0)
 		vfe_dev->hw_info->vfe_ops.stats_ops.cfg_ub(vfe_dev);
 
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
index ebdca89..a81c7bb 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
@@ -701,6 +701,10 @@
 		}
 		break;
 	}
+	case SET_WM_UB_SIZE: {
+		vfe_dev->vfe_ub_size = *cfg_data;
+		break;
+	}
 	}
 	return 0;
 }
diff --git a/drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_platform.c b/drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_platform.c
index 407b81f..2f943a4 100644
--- a/drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_platform.c
+++ b/drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_platform.c
@@ -347,8 +347,12 @@
 		JPEG_DBG("%s:%d]", __func__, __LINE__);
 	}
 #endif
+	if (pgmn_dev->jpeg_bus_client) {
+		msm_bus_scale_client_update_request(
+			pgmn_dev->jpeg_bus_client, 0);
+		msm_bus_scale_unregister_client(pgmn_dev->jpeg_bus_client);
+	}
 
-	msm_bus_scale_unregister_client(pgmn_dev->jpeg_bus_client);
 	msm_cam_clk_enable(&pgmn_dev->pdev->dev, jpeg_8x_clk_info,
 	pgmn_dev->jpeg_clk, ARRAY_SIZE(jpeg_8x_clk_info), 0);
 	JPEG_DBG("%s:%d] clock disbale done", __func__, __LINE__);
diff --git a/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.c b/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.c
index bf66442..22498a4 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.c
@@ -1102,7 +1102,7 @@
 {
 	struct cci_device *new_cci_dev;
 	int rc = 0;
-	pr_err("%s: pdev %p device id = %d\n", __func__, pdev, pdev->id);
+	CDBG("%s: pdev %p device id = %d\n", __func__, pdev, pdev->id);
 	new_cci_dev = kzalloc(sizeof(struct cci_device), GFP_KERNEL);
 	if (!new_cci_dev) {
 		CDBG("%s: no enough memory\n", __func__);
diff --git a/drivers/media/platform/msm/camera_v2/sensor/eeprom/msm_eeprom.c b/drivers/media/platform/msm/camera_v2/sensor/eeprom/msm_eeprom.c
index 7649a40..059780d 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/eeprom/msm_eeprom.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/eeprom/msm_eeprom.c
@@ -131,12 +131,6 @@
 		e_ctrl->cal_data.mapdata,
 		cdata->cfg.read_data.num_bytes);
 
-	/* should only be called once.  free kernel resource */
-	if (!rc) {
-		kfree(e_ctrl->cal_data.mapdata);
-		kfree(e_ctrl->cal_data.map);
-		memset(&e_ctrl->cal_data, 0, sizeof(e_ctrl->cal_data));
-	}
 	return rc;
 }
 
diff --git a/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_dt_util.c b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_dt_util.c
index cc301ae..c321a68 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_dt_util.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_dt_util.c
@@ -1068,11 +1068,6 @@
 	return 0;
 power_up_failed:
 	pr_err("%s:%d failed\n", __func__, __LINE__);
-	if (device_type == MSM_CAMERA_PLATFORM_DEVICE) {
-		sensor_i2c_client->i2c_func_tbl->i2c_util(
-			sensor_i2c_client, MSM_CCI_RELEASE);
-	}
-
 	for (index--; index >= 0; index--) {
 		CDBG("%s index %d\n", __func__, index);
 		power_setting = &ctrl->power_setting[index];
diff --git a/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_init.c b/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_init.c
index c50a623..c848287 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_init.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_init.c
@@ -50,7 +50,7 @@
 	int rc;
 	int tm = 10000;
 	if (s_init->module_init_status == 1) {
-		pr_err("msm_cam_get_module_init_status -2\n");
+		CDBG("msm_cam_get_module_init_status -2\n");
 		return 0;
 	}
 	rc = wait_event_interruptible_timeout(s_init->state_wait,
@@ -137,7 +137,7 @@
 		return -ENOMEM;
 	}
 
-	pr_err("MSM_SENSOR_INIT_MODULE %p", NULL);
+	CDBG("MSM_SENSOR_INIT_MODULE %p", NULL);
 
 	/* Initialize mutex */
 	mutex_init(&s_init->imutex);
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
old mode 100644
new mode 100755
index 071f698..9f0dac4
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -3467,13 +3467,13 @@
 				capability->height.min);
 			rc = -ENOTSUPP;
 		}
-
-		if (!rc) {
-			rc = call_hfi_op(hdev, capability_check,
-					inst->fmts[OUTPUT_PORT]->fourcc,
-					inst->prop.width[CAPTURE_PORT],
-					&capability->width.max,
-					&capability->height.max);
+		if (!rc && (inst->prop.width[CAPTURE_PORT] >
+			capability->width.max)) {
+			dprintk(VIDC_ERR,
+				"Unsupported width = %u supported max width = %u",
+				inst->prop.width[CAPTURE_PORT],
+				capability->width.max);
+				rc = -ENOTSUPP;
 		}
 		if (!rc && (inst->prop.height[CAPTURE_PORT]
 			* inst->prop.width[CAPTURE_PORT] >
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_debug.c b/drivers/media/platform/msm/vidc/msm_vidc_debug.c
old mode 100644
new mode 100755
index 475683c..0ac6fc4
--- a/drivers/media/platform/msm/vidc/msm_vidc_debug.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_debug.c
@@ -20,7 +20,6 @@
 int msm_fw_debug = 0x18;
 int msm_fw_debug_mode = 0x1;
 int msm_fw_low_power_mode = 0x1;
-int msm_vp8_low_tier = 0x1;
 int msm_vidc_hw_rsp_timeout = 1000;
 
 struct debug_buffer {
@@ -184,11 +183,6 @@
 		dprintk(VIDC_ERR, "debugfs_create_file: fail\n");
 		goto failed_create_dir;
 	}
-	if (!debugfs_create_u32("vp8_low_tier", S_IRUGO | S_IWUSR,
-			parent, &msm_vp8_low_tier)) {
-		dprintk(VIDC_ERR, "debugfs_create_file: fail\n");
-		goto failed_create_dir;
-	}
 	if (!debugfs_create_u32("debug_output", S_IRUGO | S_IWUSR,
 			parent, &msm_vidc_debug_out)) {
 		dprintk(VIDC_ERR, "debugfs_create_file: fail\n");
diff --git a/drivers/media/platform/msm/vidc/q6_hfi.c b/drivers/media/platform/msm/vidc/q6_hfi.c
old mode 100644
new mode 100755
index bbba29a..486d740
--- a/drivers/media/platform/msm/vidc/q6_hfi.c
+++ b/drivers/media/platform/msm/vidc/q6_hfi.c
@@ -1318,24 +1318,6 @@
 	return rc;
 }
 
-int q6_hfi_capability_check(u32 fourcc, u32 width,
-				u32 *max_width, u32 *max_height)
-{
-	int rc = 0;
-	if (!max_width || !max_height) {
-		dprintk(VIDC_ERR, "%s - invalid parameter\n", __func__);
-		return -EINVAL;
-	}
-
-	if (width > *max_width) {
-		dprintk(VIDC_ERR,
-			"Unsupported width = %u supported max width = %u\n",
-			width, *max_width);
-		rc = -ENOTSUPP;
-	}
-	return rc;
-}
-
 static void q6_hfi_unload_fw(void *hfi_device_data)
 {
 	struct q6_hfi_device *device = hfi_device_data;
@@ -1390,7 +1372,6 @@
 	hdev->unset_ocmem = q6_hfi_unset_ocmem;
 	hdev->iommu_get_domain_partition = q6_hfi_iommu_get_domain_partition;
 	hdev->load_fw = q6_hfi_load_fw;
-	hdev->capability_check = q6_hfi_capability_check;
 	hdev->unload_fw = q6_hfi_unload_fw;
 	hdev->get_stride_scanline = q6_hfi_get_stride_scanline;
 }
diff --git a/drivers/media/platform/msm/vidc/venus_hfi.c b/drivers/media/platform/msm/vidc/venus_hfi.c
old mode 100644
new mode 100755
index 30ee45d..448fe3b
--- a/drivers/media/platform/msm/vidc/venus_hfi.c
+++ b/drivers/media/platform/msm/vidc/venus_hfi.c
@@ -3747,29 +3747,6 @@
 	return rc;
 }
 
-int venus_hfi_capability_check(u32 fourcc, u32 width,
-				u32 *max_width, u32 *max_height)
-{
-	int rc = 0;
-	if (!max_width || !max_height) {
-		dprintk(VIDC_ERR, "%s - invalid parameter\n", __func__);
-		return -EINVAL;
-	}
-
-	if (msm_vp8_low_tier && fourcc == V4L2_PIX_FMT_VP8) {
-		*max_width = DEFAULT_WIDTH;
-		*max_height = DEFAULT_HEIGHT;
-	}
-
-	if (width > *max_width) {
-		dprintk(VIDC_ERR,
-			"Unsupported width = %u supported max width = %u\n",
-			width, *max_width);
-		rc = -ENOTSUPP;
-	}
-	return rc;
-}
-
 static void *venus_hfi_add_device(u32 device_id,
 			struct msm_vidc_platform_resources *res,
 			hfi_cmd_response_callback callback)
@@ -3932,7 +3909,6 @@
 	hdev->get_info = venus_hfi_get_info;
 	hdev->get_stride_scanline = venus_hfi_get_stride_scanline;
 	hdev->get_core_capabilities = venus_hfi_get_core_capabilities;
-	hdev->capability_check = venus_hfi_capability_check;
 	hdev->power_enable = venus_hfi_power_enable;
 }
 
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_api.h b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
old mode 100644
new mode 100755
index 62507a1..38c5bdb
--- a/drivers/media/platform/msm/vidc/vidc_hfi_api.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
@@ -1168,8 +1168,6 @@
 	int (*get_info) (void *dev, enum dev_info info);
 	int (*get_stride_scanline)(int color_fmt, int width,
 		int height,	int *stride, int *scanlines);
-	int (*capability_check)(u32 fourcc, u32 width,
-			u32 *max_width, u32 *max_height);
 	int (*session_clean)(void *sess);
 	int (*get_core_capabilities)(void);
 	int (*power_enable)(void *dev);
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index 23edc8a..1a11541 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -2947,9 +2947,10 @@
 	struct qseecom_command_scm_resp resp;
 	int ret;
 
-	if (usage != QSEOS_KM_USAGE_DISK_ENCRYPTION) {
-		pr_err("Error:: unsupported usage %d\n", usage);
-		return -EFAULT;
+	if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
+		usage >= QSEOS_KM_USAGE_MAX) {
+			pr_err("Error:: unsupported usage %d\n", usage);
+			return -EFAULT;
 	}
 
 	__qseecom_enable_clk(CLK_QSEE);
diff --git a/drivers/misc/tspp.c b/drivers/misc/tspp.c
index 2dfa404..1ef9886 100644
--- a/drivers/misc/tspp.c
+++ b/drivers/misc/tspp.c
@@ -2210,6 +2210,7 @@
 	struct tspp_mem_buffer *buffer;
 	struct tspp_channel *channel;
 	struct tspp_device *pdev;
+	unsigned long flags;
 
 	if (channel_id >= TSPP_NUM_CHANNELS) {
 		pr_err("tspp: channel id out of range");
@@ -2220,9 +2221,13 @@
 		pr_err("tspp_get: can't find device %i", dev);
 		return NULL;
 	}
+
+	spin_lock_irqsave(&pdev->spinlock, flags);
+
 	channel = &pdev->channels[channel_id];
 
 	if (!channel->read) {
+		spin_unlock_irqrestore(&pdev->spinlock, flags);
 		pr_warn("tspp: no buffer to get on channel %i!",
 			channel->id);
 		return NULL;
@@ -2230,8 +2235,10 @@
 
 	buffer = channel->read;
 	/* see if we have any buffers ready to read */
-	if (buffer->state != TSPP_BUF_STATE_DATA)
-		return 0;
+	if (buffer->state != TSPP_BUF_STATE_DATA) {
+		spin_unlock_irqrestore(&pdev->spinlock, flags);
+		return NULL;
+	}
 
 	if (buffer->state == TSPP_BUF_STATE_DATA) {
 		/* mark the buffer as busy */
@@ -2241,6 +2248,8 @@
 		channel->read = channel->read->next;
 	}
 
+	spin_unlock_irqrestore(&pdev->spinlock, flags);
+
 	return &buffer->desc;
 }
 EXPORT_SYMBOL(tspp_get_buffer);
@@ -2261,6 +2270,7 @@
 	struct tspp_mem_buffer *buffer;
 	struct tspp_channel *channel;
 	struct tspp_device *pdev;
+	unsigned long flags;
 
 	if (channel_id >= TSPP_NUM_CHANNELS) {
 		pr_err("tspp: channel id out of range");
@@ -2271,6 +2281,9 @@
 		pr_err("tspp: can't find device %i", dev);
 		return -ENODEV;
 	}
+
+	spin_lock_irqsave(&pdev->spinlock, flags);
+
 	channel = &pdev->channels[channel_id];
 
 	if (descriptor_id > channel->buffer_count)
@@ -2288,12 +2301,14 @@
 	channel->locked = channel->locked->next;
 
 	if (!found) {
+		spin_unlock_irqrestore(&pdev->spinlock, flags);
 		pr_err("tspp: cant find desc %i", descriptor_id);
 		return -EINVAL;
 	}
 
 	/* make sure the buffer is in the expected state */
 	if (buffer->state != TSPP_BUF_STATE_LOCKED) {
+		spin_unlock_irqrestore(&pdev->spinlock, flags);
 		pr_err("tspp: buffer %i not locked", descriptor_id);
 		return -EINVAL;
 	}
@@ -2302,6 +2317,9 @@
 
 	if (tspp_queue_buffer(channel, buffer))
 		pr_warn("tspp: can't requeue buffer");
+
+	spin_unlock_irqrestore(&pdev->spinlock, flags);
+
 	return 0;
 }
 EXPORT_SYMBOL(tspp_release_buffer);
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 36bf643..da62cb0 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -3129,6 +3129,19 @@
 		return 1;
 
 	ret = host->bus_ops->alive(host);
+
+	/*
+	 * Card detect status and alive check may be out of sync if card is
+	 * removed slowly, when card detect switch changes while card/slot
+	 * pads are still contacted in hardware (refer to "SD Card Mechanical
+	 * Addendum, Appendix C: Card Detection Switch"). So reschedule a
+	 * detect work 200ms later for this case.
+	 */
+	if (!ret && host->ops->get_cd && !host->ops->get_cd(host)) {
+		mmc_detect_change(host, msecs_to_jiffies(200));
+		pr_debug("%s: card removed too slowly\n", mmc_hostname(host));
+	}
+
 	if (ret) {
 		mmc_card_set_removed(host->card);
 		pr_debug("%s: card remove detected\n", mmc_hostname(host));
@@ -3229,8 +3242,12 @@
 	 */
 	mmc_bus_put(host);
 
-	if (host->ops->get_cd && host->ops->get_cd(host) == 0)
+	if (host->ops->get_cd && host->ops->get_cd(host) == 0) {
+		mmc_claim_host(host);
+		mmc_power_off(host);
+		mmc_release_host(host);
 		goto out;
+	}
 
 	mmc_rpm_hold(host, &host->class_dev);
 	mmc_claim_host(host);
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 277aef5..ba07348 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -1567,6 +1567,7 @@
 					MMC_SEND_TUNING_BLOCK_HS200 :
 					MMC_SEND_TUNING_BLOCK;
 				host->mrq = NULL;
+				host->flags &= ~SDHCI_NEEDS_RETUNING;
 				spin_unlock_irqrestore(&host->lock, flags);
 				sdhci_execute_tuning(mmc, tuning_opcode);
 				spin_lock_irqsave(&host->lock, flags);
@@ -2561,6 +2562,7 @@
 static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
 {
 	u16 auto_cmd_status;
+	u32 command;
 	BUG_ON(intmask == 0);
 
 	if (!host->cmd) {
@@ -2592,8 +2594,13 @@
 	}
 
 	if (host->cmd->error) {
-		if (host->cmd->error == -EILSEQ)
-			host->flags |= SDHCI_NEEDS_RETUNING;
+		command = SDHCI_GET_CMD(sdhci_readw(host,
+						    SDHCI_COMMAND));
+		if (host->cmd->error == -EILSEQ &&
+		    (command != MMC_SEND_TUNING_BLOCK_HS400) &&
+		    (command != MMC_SEND_TUNING_BLOCK_HS200) &&
+		    (command != MMC_SEND_TUNING_BLOCK))
+				host->flags |= SDHCI_NEEDS_RETUNING;
 		tasklet_schedule(&host->finish_tasklet);
 		return;
 	}
diff --git a/drivers/platform/msm/pft.c b/drivers/platform/msm/pft.c
index 20e7249..513aefb 100644
--- a/drivers/platform/msm/pft.c
+++ b/drivers/platform/msm/pft.c
@@ -162,6 +162,8 @@
 /* Device Driver State */
 static struct pft_device *pft_dev;
 
+static struct inode *pft_bio_get_inode(struct bio *bio);
+
 /**
  * pft_is_ready() - driver is initialized and ready.
  *
@@ -267,10 +269,11 @@
 	int ret = -ENOENT;
 	struct pft_file_info *tmp = NULL;
 	struct list_head *pos = NULL;
+	struct list_head *next = NULL;
 	bool found = false;
 
 	mutex_lock(&pft_dev->lock);
-	list_for_each(pos, &pft_dev->open_file_list) {
+	list_for_each_safe(pos, next, &pft_dev->open_file_list) {
 		tmp = list_entry(pos, struct pft_file_info, list);
 		if (filp == tmp->file) {
 			found = true;
@@ -394,6 +397,9 @@
 {
 	struct inode_security_struct *isec = inode->i_security;
 
+	if (isec == NULL)
+		return false;
+
 	return ((isec->tag & PFT_TAG_MAGIC_MASK) == PFT_TAG_MAGIC) ?
 		true : false;
 }
@@ -568,27 +574,6 @@
 }
 
 /**
- * pft_is_encrypted_inode() - is the file encrypted.
- * @inode: inode of file to check.
- *
- * Return: true if the file is encrypted, false otherwise.
- */
-static bool pft_is_encrypted_inode(struct inode *inode)
-{
-	u32 tag;
-
-	if (!pft_is_ready())
-		return false;
-
-	if (!pft_is_xattr_supported(inode))
-		return false;
-
-	tag = pft_get_inode_tag(inode);
-
-	return pft_is_file_encrypted(tag);
-}
-
-/**
  * pft_is_inplace_inode() - is this the inode of file for
  * in-place encryption.
  * @inode: inode of file to check.
@@ -641,10 +626,11 @@
  *
  * Return: 0 on successe, negative value on failure.
  */
-int pft_get_key_index(struct inode *inode, u32 *key_index,
+int pft_get_key_index(struct bio *bio, u32 *key_index,
 		      bool *is_encrypted, bool *is_inplace)
 {
 	u32 tag = 0;
+	struct inode *inode = NULL;
 
 	if (!pft_is_ready())
 		return -ENODEV;
@@ -652,7 +638,7 @@
 	if (!selinux_is_enabled())
 		return -ENODEV;
 
-	if (!inode)
+	if (!bio)
 		return -EPERM;
 
 	if (!is_encrypted) {
@@ -668,6 +654,10 @@
 		return -EPERM;
 	}
 
+	inode = pft_bio_get_inode(bio);
+	if (!inode)
+		return -EINVAL;
+
 	if (!pft_is_tag_valid(inode)) {
 		pr_debug("file %s, Tag not valid\n", inode_to_filename(inode));
 		return -EINVAL;
@@ -703,8 +693,27 @@
  */
 static struct inode *pft_bio_get_inode(struct bio *bio)
 {
-	if (!bio || !bio->bi_io_vec || !bio->bi_io_vec->bv_page ||
-	    !bio->bi_io_vec->bv_page->mapping)
+	if (!bio)
+		return NULL;
+	if (!bio->bi_io_vec)
+		return NULL;
+	if (!bio->bi_io_vec->bv_page)
+		return NULL;
+
+	if (PageAnon(bio->bi_io_vec->bv_page)) {
+		struct inode *inode;
+
+		/* Using direct-io (O_DIRECT) without page cache */
+		inode = dio_bio_get_inode(bio);
+		pr_debug("inode on direct-io, inode = 0x%x.\n", (int) inode);
+
+		return inode;
+	}
+
+	if (!bio->bi_io_vec->bv_page->mapping)
+		return NULL;
+
+	if (!bio->bi_io_vec->bv_page->mapping->host)
 		return NULL;
 
 	return bio->bi_io_vec->bv_page->mapping->host;
@@ -733,12 +742,12 @@
 	if (!pft_is_ready())
 		return true;
 
-	ret = pft_get_key_index(pft_bio_get_inode(bio1), &key_index1,
+	ret = pft_get_key_index(bio1, &key_index1,
 				&is_encrypted1, &is_inplace);
 	if (ret)
 		is_encrypted1 = false;
 
-	ret = pft_get_key_index(pft_bio_get_inode(bio2), &key_index2,
+	ret = pft_get_key_index(bio2, &key_index2,
 				&is_encrypted2, &is_inplace);
 	if (ret)
 		is_encrypted2 = false;
@@ -881,92 +890,6 @@
 EXPORT_SYMBOL(pft_inode_mknod);
 
 /**
- * pft_inode_symlink() - symlink file hook (callback)
- * @dir:	directory inode pointer
- * @dentry:	file dentry pointer
- * @name:	Old file name
- *
- * Allow only enterprise app to create symlink to enterprise
- * file.
- * Call path:
- * vfs_symlink()->security_inode_symlink()->selinux_inode_symlink()
- *
- * Return: 0 on allowed operation, negative value otherwise.
- */
-int pft_inode_symlink(struct inode *dir, struct dentry *dentry,
-		      const char *name)
-{
-	struct inode *inode;
-
-	if (!dir) {
-		pr_err("dir is NULL.\n");
-		return 0;
-	}
-	if (!dentry) {
-		pr_err("dentry is NULL.\n");
-		return 0;
-	}
-	if (!name) {
-		pr_err("name is NULL.\n");
-		return 0;
-	}
-
-	pr_debug("symlink for file [%s] dir [%s] dentry [%s] started! ....\n",
-		 name, inode_to_filename(dir), dentry->d_iname);
-	inode = dentry->d_inode;
-
-	if (!dentry->d_inode) {
-		pr_debug("d_inode is NULL.\n");
-		return 0;
-	}
-
-	if (!pft_is_ready())
-		return 0;
-
-	/* do nothing for non-encrypted files */
-	if (!pft_is_encrypted_inode(inode))
-		return 0;
-
-	/*
-	 * Only PFM allowed to access in-place-encryption-file
-	 * during in-place-encryption process
-	 */
-	if (pft_is_inplace_inode(inode)) {
-		pr_err("symlink for in-place-encryption file %s by pid %d is blocked.\n",
-			 inode_to_filename(inode), current_pid());
-		return -EACCES;
-	}
-
-	switch (pft_dev->state) {
-	case PFT_STATE_DEACTIVATED:
-	case PFT_STATE_KEY_REMOVED:
-	case PFT_STATE_DEACTIVATING:
-	case PFT_STATE_REMOVING_KEY:
-		/* Block any access for encrypted files when key not loaded */
-		pr_debug("key not loaded. uid (%u) can not access file %s\n",
-			 current_uid(), inode_to_filename(inode));
-		return -EACCES;
-	case PFT_STATE_KEY_LOADED:
-		 /* Only registered apps may access encrypted files. */
-		if (!pft_is_current_process_registered()) {
-			pr_err("unregistered app uid %u pid %u is trying to access encrypted file %s\n",
-			       current_uid(), current_pid(), name);
-			return -EACCES;
-		}
-		break;
-	default:
-		BUG(); /* State is set by "set state" command */
-		break;
-	}
-
-	pr_debug("symlink for file %s ok.\n", name);
-
-	return 0;
-
-}
-EXPORT_SYMBOL(pft_inode_symlink);
-
-/**
  * pft_inode_rename() - file rename hook.
  * @inode:	directory inode
  * @dentry:	file dentry
@@ -1025,12 +948,17 @@
  */
 int pft_file_open(struct file *filp, const struct cred *cred)
 {
+	int ret;
+
 	if (!filp || !filp->f_path.dentry)
 		return 0;
 
 	if (!pft_is_ready())
 		return 0;
 
+	if (filp->f_flags & O_DIRECT)
+		pr_debug("file %s using O_DIRECT.\n", file_to_filename(filp));
+
 	/* do nothing for non-encrypted files */
 	if (!pft_is_encrypted_file(filp->f_dentry))
 		return 0;
@@ -1063,7 +991,12 @@
 			return -EACCES;
 		}
 
-		pft_add_file(filp);
+		ret = pft_add_file(filp);
+		if (ret) {
+			pr_err("failed to add file %s to the list.\n",
+			       file_to_filename(filp));
+			return -EFAULT;
+		}
 		break;
 	default:
 		BUG(); /* State is set by "set state" command */
@@ -1230,7 +1163,7 @@
 	if (pft_is_inplace_inode(inode)) {
 		pr_err("block delete in-place-encryption file %s by uid [%d] pid [%d], while encryption in progress.\n",
 		       inode_to_filename(inode), current_uid(), current_pid());
-		return -EACCES;
+		return -EBUSY;
 	}
 
 	if (!pft_is_current_process_registered()) {
@@ -1562,7 +1495,7 @@
  */
 static int pft_handle_command(void *buf, int buf_size)
 {
-	size_t ret = 0;
+	int ret = 0;
 	struct pft_command *command = NULL;
 
 	/* opcode field is the minimum length of command */
@@ -1620,7 +1553,7 @@
 static int pft_device_release(struct inode *inode, struct file *file)
 {
 	mutex_lock(&pft_dev->lock);
-	if (0 < pft_dev->open_count)
+	if (pft_dev->open_count > 0)
 		pft_dev->open_count--;
 	pft_dev->pfm_pid = UINT_MAX;
 	mutex_unlock(&pft_dev->lock);
@@ -1769,15 +1702,32 @@
 
 }
 
+static void  __exit pft_free_open_files_list(void)
+{
+	struct pft_file_info *tmp = NULL;
+	struct list_head *pos = NULL;
+	struct list_head *next = NULL;
+
+	mutex_lock(&pft_dev->lock);
+	list_for_each_safe(pos, next, &pft_dev->open_file_list) {
+		tmp = list_entry(pos, struct pft_file_info, list);
+		list_del(&tmp->list);
+		kfree(tmp);
+	}
+	mutex_unlock(&pft_dev->lock);
+}
+
 static void __exit pft_exit(void)
 {
 	if (pft_dev == NULL)
 		return;
 
 	pft_unregister_chrdev();
+	pft_free_open_files_list();
 
 	kfree(pft_dev->uid_table);
 	kfree(pft_dev);
+	pft_dev = NULL;
 }
 
 static int __init pft_init(void)
@@ -1790,13 +1740,14 @@
 		pr_err("No memory for device structr\n");
 		return -ENOMEM;
 	}
+	pft_dev = dev;
 
 	dev->state = PFT_STATE_DEACTIVATED;
+	dev->pfm_pid = UINT_MAX;
+
 	INIT_LIST_HEAD(&dev->open_file_list);
 	mutex_init(&dev->lock);
 
-	pft_dev = dev;
-
 	ret = pft_register_chardev();
 	if (ret) {
 		pr_err("create character device failed.\n");
@@ -1810,6 +1761,7 @@
 fail:
 	pr_err("Failed to init driver.\n");
 	kfree(dev);
+	pft_dev = NULL;
 
 	return -ENODEV;
 }
diff --git a/drivers/power/qpnp-charger.c b/drivers/power/qpnp-charger.c
index d5c753f..94d48e6 100644
--- a/drivers/power/qpnp-charger.c
+++ b/drivers/power/qpnp-charger.c
@@ -32,6 +32,9 @@
 #include <linux/qpnp-revid.h>
 #include <linux/android_alarm.h>
 #include <linux/spinlock.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+#include <linux/qpnp/pin.h>
 
 /* Interrupt offsets */
 #define INT_RT_STS(base)			(base + 0x10)
@@ -95,6 +98,8 @@
 #define CHGR_BUCK_PSTG_CTRL			0x73
 #define CHGR_BUCK_COMPARATOR_OVRIDE_1		0xEB
 #define CHGR_BUCK_COMPARATOR_OVRIDE_3		0xED
+#define CHG_OVR0				0xED
+#define CHG_TRICKLE_CLAMP			0xE3
 #define CHGR_BUCK_BCK_VBAT_REG_MODE		0x74
 #define MISC_REVISION2				0x01
 #define USB_OVP_CTL				0x42
@@ -388,6 +393,9 @@
 	struct work_struct		reduce_power_stage_work;
 	bool				power_stage_workaround_running;
 	bool				power_stage_workaround_enable;
+	bool				ext_ovp_ic_gpio_enabled;
+	unsigned int			ext_ovp_isns_gpio;
+	unsigned int			usb_trim_default;
 };
 
 static void
@@ -430,6 +438,81 @@
 	USBIN_OVP,
 };
 
+static int ext_ovp_isns_present;
+module_param(ext_ovp_isns_present, int, 0444);
+static int ext_ovp_isns_r;
+module_param(ext_ovp_isns_r, int, 0444);
+
+static bool ext_ovp_isns_online;
+static long ext_ovp_isns_ua;
+#define MAX_CURRENT_LENGTH_9A	10
+#define ISNS_CURRENT_RATIO	2500
+static int ext_ovp_isns_read(char *buffer, const struct kernel_param *kp)
+{
+	int rc;
+	struct qpnp_vadc_result results;
+	struct power_supply *batt_psy = power_supply_get_by_name("battery");
+	struct qpnp_chg_chip *chip = container_of(batt_psy,
+				struct qpnp_chg_chip, batt_psy);
+
+	if (!ext_ovp_isns_present)
+		return 0;
+
+	rc = qpnp_vadc_read(chip->vadc_dev, P_MUX7_1_1, &results);
+	if (rc) {
+		pr_err("Unable to read vbat rc=%d\n", rc);
+		return 0;
+	}
+
+	pr_debug("voltage %lld uV, current: %d\n mA", results.physical,
+			((int) results.physical /
+			 (ext_ovp_isns_r / ISNS_CURRENT_RATIO)));
+
+	return snprintf(buffer, MAX_CURRENT_LENGTH_9A, "%d\n",
+			((int)results.physical /
+			 (ext_ovp_isns_r / ISNS_CURRENT_RATIO)));
+}
+
+static int ext_ovp_isns_enable(const char *val, const struct kernel_param *kp)
+{
+	int rc;
+	struct power_supply *batt_psy = power_supply_get_by_name("battery");
+	struct qpnp_chg_chip *chip = container_of(batt_psy,
+				struct qpnp_chg_chip, batt_psy);
+
+	rc = param_set_bool(val, kp);
+	if (rc) {
+		pr_err("Unable to set gpio en: %d\n", rc);
+		return rc;
+	}
+
+	if (*(bool *)kp->arg) {
+		gpio_direction_output(
+						chip->ext_ovp_isns_gpio, 1);
+		chip->ext_ovp_ic_gpio_enabled = 1;
+		pr_debug("enabled GPIO\n");
+	} else {
+		gpio_direction_output(
+						chip->ext_ovp_isns_gpio, 0);
+		chip->ext_ovp_ic_gpio_enabled = 0;
+		pr_debug("disabled GPIO\n");
+	}
+
+	return rc;
+}
+
+static struct kernel_param_ops ext_ovp_isns_ops = {
+	.get = ext_ovp_isns_read,
+};
+module_param_cb(ext_ovp_isns_ua, &ext_ovp_isns_ops, &ext_ovp_isns_ua, 0644);
+
+static struct kernel_param_ops ext_ovp_en_ops = {
+	.set = ext_ovp_isns_enable,
+	.get = param_get_bool,
+};
+module_param_cb(ext_ovp_isns_online, &ext_ovp_en_ops,
+		&ext_ovp_isns_online, 0664);
+
 static inline int
 get_bpd(const char *name)
 {
@@ -886,6 +969,7 @@
 	return rc;
 }
 
+#define IOVP_USB_WALL_TRSH_MA   150
 static int
 qpnp_chg_iusbmax_set(struct qpnp_chg_chip *chip, int mA)
 {
@@ -1319,7 +1403,13 @@
 	if ((qpnp_chg_is_usb_chg_plugged_in(chip)
 			|| qpnp_chg_is_dc_chg_plugged_in(chip))
 			&& (usb_sts & CHG_GONE_IRQ)) {
+		if (ext_ovp_isns_present) {
+			pr_debug("EXT OVP IC ISNS disabled due to ARB WA\n");
+			gpio_direction_output(chip->ext_ovp_isns_gpio, 0);
+		}
+
 		qpnp_chg_charge_en(chip, 0);
+
 		qpnp_chg_force_run_on_batt(chip, 1);
 		schedule_delayed_work(&chip->arb_stop_work,
 			msecs_to_jiffies(ARB_STOP_WORK_MS));
@@ -1652,6 +1742,7 @@
 
 			qpnp_chg_usb_suspend_enable(chip, 0);
 			qpnp_chg_iusbmax_set(chip, QPNP_CHG_I_MAX_MIN_100);
+			qpnp_chg_iusb_trim_set(chip, chip->usb_trim_default);
 			chip->prev_usb_max_ma = -EINVAL;
 			chip->aicl_settled = false;
 		} else {
@@ -1814,7 +1905,8 @@
 		chip->dc_present = dc_present;
 		if (qpnp_chg_is_otg_en_set(chip))
 			qpnp_chg_force_run_on_batt(chip, !dc_present ? 1 : 0);
-		if (!dc_present && !qpnp_chg_is_usb_chg_plugged_in(chip)) {
+		if (!dc_present && (!qpnp_chg_is_usb_chg_plugged_in(chip) ||
+					qpnp_chg_is_otg_en_set(chip))) {
 			chip->delta_vddmax_mv = 0;
 			qpnp_chg_set_appropriate_vddmax(chip);
 			chip->chg_done = false;
@@ -1983,6 +2075,13 @@
 			}
 			if (chip->parallel_ovp_mode)
 				switch_parallel_ovp_mode(chip, 1);
+
+			if (ext_ovp_isns_present &&
+					chip->ext_ovp_ic_gpio_enabled) {
+				pr_debug("EXT OVP IC ISNS enabled\n");
+				gpio_direction_output(
+						chip->ext_ovp_isns_gpio, 1);
+			}
 		} else {
 			if (chip->parallel_ovp_mode)
 				switch_parallel_ovp_mode(chip, 0);
@@ -2211,7 +2310,7 @@
 static int ext_ovp_present;
 module_param(ext_ovp_present, int, 0444);
 
-#define OVP_USB_WALL_TRSH_MA	200
+#define OVP_USB_WALL_TRSH_MA   200
 static int
 qpnp_power_get_property_mains(struct power_supply *psy,
 				  enum power_supply_property psp,
@@ -2594,12 +2693,18 @@
 					if (!qpnp_is_dc_higher_prio(chip))
 						qpnp_chg_idcmax_set(chip,
 							QPNP_CHG_I_MAX_MIN_100);
-					if (!ext_ovp_present)
-						qpnp_chg_iusbmax_set(chip,
-							USB_WALL_THRESHOLD_MA);
-					else
+					if (unlikely(ext_ovp_present)) {
 						qpnp_chg_iusbmax_set(chip,
 							OVP_USB_WALL_TRSH_MA);
+					} else if (unlikely(
+							ext_ovp_isns_present)) {
+						qpnp_chg_iusb_trim_set(chip, 0);
+						qpnp_chg_iusbmax_set(chip,
+							IOVP_USB_WALL_TRSH_MA);
+					} else {
+						qpnp_chg_iusbmax_set(chip,
+							USB_WALL_THRESHOLD_MA);
+					}
 			} else {
 				qpnp_chg_iusbmax_set(chip, ret.intval / 1000);
 			}
@@ -3141,6 +3246,12 @@
 
 	if (qpnp_chg_is_usb_chg_plugged_in(chip) &&
 			(chip->flags & BOOST_FLASH_WA)) {
+
+		if (ext_ovp_isns_present && chip->ext_ovp_ic_gpio_enabled) {
+			pr_debug("EXT OVP IC ISNS disabled\n");
+			gpio_direction_output(chip->ext_ovp_isns_gpio, 0);
+		}
+
 		qpnp_chg_usb_suspend_enable(chip, 1);
 
 		rc = qpnp_chg_masked_write(chip,
@@ -3258,6 +3369,11 @@
 		qpnp_chg_usb_suspend_enable(chip, 0);
 	}
 
+	if (ext_ovp_isns_present && chip->ext_ovp_ic_gpio_enabled) {
+		pr_debug("EXT OVP IC ISNS enable\n");
+		gpio_direction_output(chip->ext_ovp_isns_gpio, 1);
+	}
+
 	return rc;
 }
 
@@ -4470,6 +4586,25 @@
 			CHGR_IBAT_TERM_CHGR,
 			0xFF, 0x08, 1);
 
+		/* HACK: trkl stuck workaround */
+
+		rc = qpnp_chg_masked_write(chip,
+			chip->chgr_base + SEC_ACCESS,
+			0xFF,
+			0xA5, 1);
+
+		rc = qpnp_chg_masked_write(chip, chip->chgr_base +
+			CHG_OVR0,
+			0xFF, 0x00, 1);
+
+		rc = qpnp_chg_masked_write(chip,
+			chip->chgr_base + SEC_ACCESS,
+			0xFF,
+			0xA5, 1);
+
+		rc = qpnp_chg_masked_write(chip, chip->chgr_base +
+			CHG_TRICKLE_CLAMP,
+			0xFF, 0x00, 1);
 		break;
 	case SMBB_BUCK_SUBTYPE:
 	case SMBBP_BUCK_SUBTYPE:
@@ -4797,6 +4932,17 @@
 	ext_ovp_present = of_property_read_bool(chip->spmi->dev.of_node,
 					"qcom,ext-ovp-present");
 
+	/* Check if external IOVP part is configured */
+	chip->ext_ovp_isns_gpio = of_get_named_gpio(chip->spmi->dev.of_node,
+					"qcom,ext-ovp-isns-enable-gpio", 0);
+	if (gpio_is_valid(chip->ext_ovp_isns_gpio)) {
+		ext_ovp_isns_present = true;
+		rc = of_property_read_u32(chip->spmi->dev.of_node,
+				"qcom,ext-ovp-isns-r-ohm", &ext_ovp_isns_r);
+		if (rc)
+			return rc;
+	}
+
 	/* Get the charging-disabled property */
 	chip->charging_disabled = of_property_read_bool(chip->spmi->dev.of_node,
 					"qcom,charging-disabled");
@@ -4902,6 +5048,9 @@
 	if (rc)
 		return rc;
 
+	if (ext_ovp_isns_present)
+		chip->ext_ovp_ic_gpio_enabled = 0;
+
 	/*
 	 * Check if bat_if is set in DT and make sure VADC is present
 	 * Also try loading the battery data profile if bat_if exists
@@ -5183,6 +5332,7 @@
 		goto unregister_dc_psy;
 	}
 
+	chip->usb_trim_default = qpnp_chg_iusb_trim_get(chip);
 	qpnp_chg_charge_en(chip, !chip->charging_disabled);
 	qpnp_chg_force_run_on_batt(chip, chip->charging_disabled);
 	qpnp_chg_set_appropriate_vddmax(chip);
diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
index e3284d5..81640a0 100644
--- a/drivers/spmi/spmi-pmic-arb.c
+++ b/drivers/spmi/spmi-pmic-arb.c
@@ -33,7 +33,9 @@
 /* PMIC Arbiter configuration registers */
 #define PMIC_ARB_VERSION		0x0000
 #define PMIC_ARB_INT_EN			0x0004
-
+#define PMIC_ARB_PROTOCOL_IRQ_STATUS	(0x700 + 0x820)
+#define PMIC_ARB_GENI_CTRL		0x0024
+#define PMIC_ARB_GENI_STATUS	0x0028
 /* PMIC Arbiter channel registers */
 #define PMIC_ARB_CMD(N)			(0x0800 + (0x80 * (N)))
 #define PMIC_ARB_CONFIG(N)		(0x0804 + (0x80 * (N)))
@@ -125,6 +127,7 @@
 	u8			max_apid;
 	u16			periph_id_map[PMIC_ARB_MAX_PERIPHS];
 	u32			mapping_table[SPMI_MAPPING_TABLE_LEN];
+	u32			prev_prtcl_irq_stat;
 };
 
 static struct spmi_pmic_arb_dev *the_pmic_arb;
@@ -143,6 +146,37 @@
 	writel_relaxed(val, dev->base + offset);
 }
 
+static void pmic_arb_save_stat_before_txn(struct spmi_pmic_arb_dev *dev)
+{
+	dev->prev_prtcl_irq_stat =
+		readl_relaxed(dev->cnfg + PMIC_ARB_PROTOCOL_IRQ_STATUS);
+}
+
+static int pmic_arb_diagnosis(struct spmi_pmic_arb_dev *dev, u32 status)
+{
+	if (status & PMIC_ARB_STATUS_DENIED) {
+		dev_err(dev->dev,
+		    "wait_for_done: transaction denied by SPMI master (0x%x)\n",
+		    status);
+		return -EPERM;
+	}
+
+	if (status & PMIC_ARB_STATUS_FAILURE) {
+		dev_err(dev->dev,
+		    "wait_for_done: transaction failed (0x%x)\n", status);
+		return -EIO;
+	}
+
+	if (status & PMIC_ARB_STATUS_DROPPED) {
+		dev_err(dev->dev,
+		    "wait_for_done: transaction dropped pmic-arb busy (0x%x)\n",
+		    status);
+		return -EAGAIN;
+	}
+
+	return 0;
+}
+
 static int pmic_arb_wait_for_done(struct spmi_pmic_arb_dev *dev)
 {
 	u32 status = 0;
@@ -152,34 +186,13 @@
 	while (timeout--) {
 		status = pmic_arb_read(dev, offset);
 
-		if (status & PMIC_ARB_STATUS_DONE) {
-			if (status & PMIC_ARB_STATUS_DENIED) {
-				dev_err(dev->dev,
-					"%s: transaction denied (0x%x)\n",
-					__func__, status);
-				return -EPERM;
-			}
+		if (status & PMIC_ARB_STATUS_DONE)
+			return pmic_arb_diagnosis(dev, status);
 
-			if (status & PMIC_ARB_STATUS_FAILURE) {
-				dev_err(dev->dev,
-					"%s: transaction failed (0x%x)\n",
-					__func__, status);
-				return -EIO;
-			}
-
-			if (status & PMIC_ARB_STATUS_DROPPED) {
-				dev_err(dev->dev,
-					"%s: transaction dropped (0x%x)\n",
-					__func__, status);
-				return -EIO;
-			}
-
-			return 0;
-		}
 		udelay(1);
 	}
 
-	dev_err(dev->dev, "%s: timeout, status 0x%x\n", __func__, status);
+	dev_err(dev->dev, "wait_for_done:: timeout, status 0x%x\n", status);
 	return -ETIMEDOUT;
 }
 
@@ -209,6 +222,29 @@
 	pmic_arb_write(dev, reg, data);
 }
 
+static void pmic_arb_dbg_err_dump(struct spmi_pmic_arb_dev *pmic_arb, int ret,
+		const char *msg, u8 opc, u8 sid, u16 addr, u8 bc, u8 *buf)
+{
+	u32 irq_stat  = readl_relaxed(pmic_arb->cnfg
+				+ PMIC_ARB_PROTOCOL_IRQ_STATUS);
+	u32 geni_stat = readl_relaxed(pmic_arb->cnfg + PMIC_ARB_GENI_STATUS);
+	u32 geni_ctrl = readl_relaxed(pmic_arb->cnfg + PMIC_ARB_GENI_CTRL);
+
+	bc += 1; /* actual byte count */
+
+	if (buf)
+		dev_err(pmic_arb->dev,
+		"error:%d on data %s  opcode:0x%x sid:%d addr:0x%x bc:%d buf:%*phC\n",
+			ret, msg, opc, sid, addr, bc, bc, buf);
+	else
+		dev_err(pmic_arb->dev,
+		"error:%d on non-data-cmd opcode:0x%x sid:%d\n",
+			ret, opc, sid);
+	dev_err(pmic_arb->dev,
+		"PROTOCOL_IRQ_STATUS before:0x%x after:0x%x GENI_STATUS:0x%x GENI_CTRL:0x%x\n",
+		irq_stat, pmic_arb->prev_prtcl_irq_stat, geni_stat, geni_ctrl);
+}
+
 /* Non-data command */
 static int pmic_arb_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid)
 {
@@ -228,10 +264,13 @@
 	cmd = (opc << 27) | ((sid & 0xf) << 20);
 
 	spin_lock_irqsave(&pmic_arb->lock, flags);
+	pmic_arb_save_stat_before_txn(pmic_arb);
 	pmic_arb_write(pmic_arb, PMIC_ARB_CMD(pmic_arb->channel), cmd);
 	rc = pmic_arb_wait_for_done(pmic_arb);
 	spin_unlock_irqrestore(&pmic_arb->lock, flags);
 
+	if (rc)
+		pmic_arb_dbg_err_dump(pmic_arb, rc, "cmd", opc, sid, 0, 0, 0);
 	return rc;
 }
 
@@ -249,7 +288,8 @@
 					, PMIC_ARB_MAX_TRANS_BYTES, bc+1);
 		return  -EINVAL;
 	}
-	pr_debug("op:0x%x sid:%d bc:%d addr:0x%x\n", opc, sid, bc, addr);
+	dev_dbg(pmic_arb->dev, "client-rd op:0x%x sid:%d addr:0x%x bc:%d\n",
+							opc, sid, addr, bc + 1);
 
 	/* Check the opcode */
 	if (opc >= 0x60 && opc <= 0x7F)
@@ -264,6 +304,7 @@
 	cmd = (opc << 27) | ((sid & 0xf) << 20) | (addr << 4) | (bc & 0x7);
 
 	spin_lock_irqsave(&pmic_arb->lock, flags);
+	pmic_arb_save_stat_before_txn(pmic_arb);
 	pmic_arb_write(pmic_arb, PMIC_ARB_CMD(pmic_arb->channel), cmd);
 	rc = pmic_arb_wait_for_done(pmic_arb);
 	if (rc)
@@ -279,6 +320,9 @@
 
 done:
 	spin_unlock_irqrestore(&pmic_arb->lock, flags);
+	if (rc)
+		pmic_arb_dbg_err_dump(pmic_arb, rc, "read", opc, sid, addr, bc,
+									buf);
 	return rc;
 }
 
@@ -296,7 +340,8 @@
 					, PMIC_ARB_MAX_TRANS_BYTES, bc+1);
 		return  -EINVAL;
 	}
-	pr_debug("op:0x%x sid:%d bc:%d addr:0x%x\n", opc, sid, bc, addr);
+	dev_dbg(pmic_arb->dev, "client-wr op:0x%x sid:%d addr:0x%x bc:%d\n",
+							opc, sid, addr, bc + 1);
 
 	/* Check the opcode */
 	if (opc >= 0x40 && opc <= 0x5F)
@@ -314,6 +359,7 @@
 
 	/* Write data to FIFOs */
 	spin_lock_irqsave(&pmic_arb->lock, flags);
+	pmic_arb_save_stat_before_txn(pmic_arb);
 	pa_write_data(pmic_arb, buf, PMIC_ARB_WDATA0(pmic_arb->channel)
 							, min_t(u8, bc, 3));
 	if (bc > 3)
@@ -325,6 +371,10 @@
 	rc = pmic_arb_wait_for_done(pmic_arb);
 	spin_unlock_irqrestore(&pmic_arb->lock, flags);
 
+	if (rc)
+		pmic_arb_dbg_err_dump(pmic_arb, rc, "write", opc, sid, addr, bc,
+									buf);
+
 	return rc;
 }
 
@@ -501,7 +551,9 @@
 	int i;
 
 	if (!is_apid_valid(pmic_arb, apid)) {
-		dev_err(pmic_arb->dev, "unknown peripheral id 0x%x\n", ppid);
+		dev_err(pmic_arb->dev,
+		"periph_interrupt(apid:0x%x sid:0x%x pid:0x%x) unknown peripheral\n",
+			apid, sid, pid);
 		/* return IRQ_NONE; */
 	}
 
diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c
index b7bf74e..159121f 100644
--- a/drivers/staging/android/binder.c
+++ b/drivers/staging/android/binder.c
@@ -2847,9 +2847,15 @@
 	binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
 }
 
+static int binder_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+	return VM_FAULT_SIGBUS;
+}
+
 static struct vm_operations_struct binder_vm_ops = {
 	.open = binder_vma_open,
 	.close = binder_vma_close,
+	.fault = binder_vm_fault,
 };
 
 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
diff --git a/drivers/tty/serial/msm_serial_hs.c b/drivers/tty/serial/msm_serial_hs.c
index 6078ef1..a6c60ce 100644
--- a/drivers/tty/serial/msm_serial_hs.c
+++ b/drivers/tty/serial/msm_serial_hs.c
@@ -3301,32 +3301,31 @@
 	if (is_blsp_uart(msm_uport)) {
 		core_resource = platform_get_resource_byname(pdev,
 					IORESOURCE_MEM, "core_mem");
-		bam_resource = platform_get_resource_byname(pdev,
-					IORESOURCE_MEM, "bam_mem");
-		core_irqres = platform_get_irq_byname(pdev, "core_irq");
-		bam_irqres = platform_get_irq_byname(pdev, "bam_irq");
-		wakeup_irqres = platform_get_irq_byname(pdev, "wakeup_irq");
-
 		if (!core_resource) {
 			MSM_HS_ERR("Invalid core HSUART Resources.\n");
 			return -ENXIO;
 		}
-
+		bam_resource = platform_get_resource_byname(pdev,
+					IORESOURCE_MEM, "bam_mem");
 		if (!bam_resource) {
 			MSM_HS_ERR("Invalid BAM HSUART Resources.\n");
 			return -ENXIO;
 		}
-
-		if (!core_irqres) {
+		core_irqres = platform_get_irq_byname(pdev, "core_irq");
+		if (core_irqres < 0) {
 			MSM_HS_ERR("Invalid core irqres Resources.\n");
 			return -ENXIO;
 		}
-		if (!bam_irqres) {
+		bam_irqres = platform_get_irq_byname(pdev, "bam_irq");
+		if (bam_irqres < 0) {
 			MSM_HS_ERR("Invalid bam irqres Resources.\n");
 			return -ENXIO;
 		}
-		if (!wakeup_irqres)
+		wakeup_irqres = platform_get_irq_byname(pdev, "wakeup_irq");
+		if (wakeup_irqres < 0) {
+			wakeup_irqres = -1;
 			MSM_HS_DBG("Wakeup irq not specified.\n");
+		}
 
 		uport->mapbase = core_resource->start;
 
@@ -3390,11 +3389,6 @@
 		msm_uport->wakeup.inject_rx = pdata->inject_rx_on_wakeup;
 		msm_uport->wakeup.rx_to_inject = pdata->rx_to_inject;
 
-		if (unlikely(msm_uport->wakeup.irq < 0)) {
-			ret = -ENXIO;
-			goto deregister_bus_client;
-		}
-
 		if (is_blsp_uart(msm_uport)) {
 			msm_uport->bam_tx_ep_pipe_index =
 					pdata->bam_tx_ep_pipe_index;
diff --git a/drivers/video/msm/mdss/dsi_status_6g.c b/drivers/video/msm/mdss/dsi_status_6g.c
index 4f7e4da..ead7f2c 100644
--- a/drivers/video/msm/mdss/dsi_status_6g.c
+++ b/drivers/video/msm/mdss/dsi_status_6g.c
@@ -65,6 +65,7 @@
 		return;
 	}
 
+	mutex_lock(&ctrl_pdata->mutex);
 	if (ctl->shared_lock)
 		mutex_lock(ctl->shared_lock);
 	mutex_lock(&mdp5_data->ov_lock);
@@ -73,7 +74,8 @@
 		mutex_unlock(&mdp5_data->ov_lock);
 		if (ctl->shared_lock)
 			mutex_unlock(ctl->shared_lock);
-		pr_err("%s: DSI turning off, avoiding BTA status check\n",
+		mutex_unlock(&ctrl_pdata->mutex);
+		pr_err("%s: DSI turning off, avoiding panel status check\n",
 							__func__);
 		return;
 	}
@@ -100,6 +102,7 @@
 	mutex_unlock(&mdp5_data->ov_lock);
 	if (ctl->shared_lock)
 		mutex_unlock(ctl->shared_lock);
+	mutex_unlock(&ctrl_pdata->mutex);
 
 	if ((pstatus_data->mfd->panel_power_on)) {
 		if (ret > 0) {
diff --git a/drivers/video/msm/mdss/mdss.h b/drivers/video/msm/mdss/mdss.h
index afa0bc1..b1c0343 100644
--- a/drivers/video/msm/mdss/mdss.h
+++ b/drivers/video/msm/mdss/mdss.h
@@ -198,6 +198,7 @@
 	int handoff_pending;
 	struct mdss_prefill_data prefill_data;
 	bool ulps;
+	int iommu_ref_cnt;
 };
 extern struct mdss_data_type *mdss_res;
 
@@ -220,7 +221,8 @@
 void mdss_enable_irq(struct mdss_hw *hw);
 void mdss_disable_irq(struct mdss_hw *hw);
 void mdss_disable_irq_nosync(struct mdss_hw *hw);
-int mdss_bus_bandwidth_ctrl(int enable);
+void mdss_bus_bandwidth_ctrl(int enable);
+int mdss_iommu_ctrl(int enable);
 
 static inline struct ion_client *mdss_get_ionclient(void)
 {
diff --git a/drivers/video/msm/mdss/mdss_dsi.c b/drivers/video/msm/mdss/mdss_dsi.c
index fcb8f50..8fa04cd 100644
--- a/drivers/video/msm/mdss/mdss_dsi.c
+++ b/drivers/video/msm/mdss/mdss_dsi.c
@@ -316,6 +316,7 @@
 	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
 				panel_data);
 
+	mutex_lock(&ctrl_pdata->mutex);
 	panel_info = &ctrl_pdata->panel_data.panel_info;
 	pr_debug("%s+: ctrl=%p ndx=%d\n", __func__,
 				ctrl_pdata, ctrl_pdata->ndx);
@@ -333,6 +334,7 @@
 
 	ret = mdss_dsi_panel_power_on(pdata, 0);
 	if (ret) {
+		mutex_unlock(&ctrl_pdata->mutex);
 		pr_err("%s: Panel power off failed\n", __func__);
 		return ret;
 	}
@@ -342,6 +344,7 @@
 	    && (panel_info->new_fps != panel_info->mipi.frame_rate))
 		panel_info->mipi.frame_rate = panel_info->new_fps;
 
+	mutex_unlock(&ctrl_pdata->mutex);
 	pr_debug("%s-:\n", __func__);
 
 	return ret;
diff --git a/drivers/video/msm/mdss/mdss_dsi_host.c b/drivers/video/msm/mdss/mdss_dsi_host.c
index 8e9f760..267a687 100644
--- a/drivers/video/msm/mdss/mdss_dsi_host.c
+++ b/drivers/video/msm/mdss/mdss_dsi_host.c
@@ -1031,7 +1031,7 @@
 
 
 	if (is_mdss_iommu_attached()) {
-		int ret = msm_iommu_map_contig_buffer(tp->dmap,
+		ret = msm_iommu_map_contig_buffer(tp->dmap,
 					mdss_get_iommu_domain(domain), 0,
 					size, SZ_4K, 0, &(addr));
 		if (IS_ERR_VALUE(ret)) {
@@ -1240,7 +1240,7 @@
 {
 	struct dcs_cmd_req *req;
 	int ret = -EINVAL;
-
+	int rc = 0;
 	mutex_lock(&ctrl->cmd_mutex);
 	req = mdss_dsi_cmdlist_get(ctrl);
 
@@ -1263,20 +1263,21 @@
 	 * also, axi bus bandwidth need since dsi controller will
 	 * fetch dcs commands from axi bus
 	 */
-	ret = mdss_bus_bandwidth_ctrl(1);
-	if (ret) {
-		pr_err("bus bandwidth request failed ret=%d\n", ret);
-		goto need_lock;
-	}
-
+	mdss_bus_bandwidth_ctrl(1);
 	pr_debug("%s:  from_mdp=%d pid=%d\n", __func__, from_mdp, current->pid);
 	mdss_dsi_clk_ctrl(ctrl, DSI_ALL_CLKS, 1);
 
+	rc = mdss_iommu_ctrl(1);
+	if (IS_ERR_VALUE(rc)) {
+		pr_err("IOMMU attach failed\n");
+		mutex_unlock(&ctrl->cmd_mutex);
+		return rc;
+	}
 	if (req->flags & CMD_REQ_RX)
 		ret = mdss_dsi_cmdlist_rx(ctrl, req);
 	else
 		ret = mdss_dsi_cmdlist_tx(ctrl, req);
-
+	mdss_iommu_ctrl(0);
 	mdss_dsi_clk_ctrl(ctrl, DSI_ALL_CLKS, 0);
 	mdss_bus_bandwidth_ctrl(0);
 
@@ -1381,8 +1382,10 @@
 		if (todo & DSI_EV_MDP_FIFO_UNDERFLOW) {
 			mutex_lock(&ctrl->mutex);
 			if (ctrl->recovery) {
+				mdss_dsi_clk_ctrl(ctrl, DSI_ALL_CLKS, 1);
 				mdss_dsi_sw_reset_restore(ctrl);
 				ctrl->recovery->fxn(ctrl->recovery->data);
+				mdss_dsi_clk_ctrl(ctrl, DSI_ALL_CLKS, 0);
 			}
 			mutex_unlock(&ctrl->mutex);
 		}
@@ -1398,7 +1401,9 @@
 			spin_unlock_irqrestore(&ctrl->mdp_lock, flag);
 
 			/* enable dsi error interrupt */
+			mdss_dsi_clk_ctrl(ctrl, DSI_ALL_CLKS, 1);
 			mdss_dsi_err_intr_ctrl(ctrl, DSI_INTR_ERROR_MASK, 1);
+			mdss_dsi_clk_ctrl(ctrl, DSI_ALL_CLKS, 0);
 		}
 
 	}
diff --git a/drivers/video/msm/mdss/mdss_fb.c b/drivers/video/msm/mdss/mdss_fb.c
index c14f936..da2ae5f 100644
--- a/drivers/video/msm/mdss/mdss_fb.c
+++ b/drivers/video/msm/mdss/mdss_fb.c
@@ -454,6 +454,7 @@
 
 	mfd->ext_ad_ctrl = -1;
 	mfd->bl_level = 0;
+	mfd->bl_level_prev_scaled = 0;
 	mfd->bl_scale = 1024;
 	mfd->bl_min_lvl = 30;
 	mfd->fb_imgType = MDP_RGBA_8888;
@@ -782,6 +783,7 @@
 	pdata = dev_get_platdata(&mfd->pdev->dev);
 
 	if ((pdata) && (pdata->set_backlight)) {
+		mfd->bl_level_prev_scaled = mfd->bl_level_scaled;
 		if (!IS_CALIB_MODE_BL(mfd))
 			mdss_fb_scale_bl(mfd, &temp);
 		/*
@@ -792,13 +794,13 @@
 		 * as well as setting bl_level to bkl_lvl even though the
 		 * backlight has been set to the scaled value.
 		 */
-		if (mfd->bl_level_old == temp) {
+		if (mfd->bl_level_scaled == temp) {
 			mfd->bl_level = bkl_lvl;
 			return;
 		}
 		pdata->set_backlight(pdata, temp);
 		mfd->bl_level = bkl_lvl;
-		mfd->bl_level_old = temp;
+		mfd->bl_level_scaled = temp;
 
 		if (mfd->mdp.update_ad_input) {
 			update_ad_input = mfd->mdp.update_ad_input;
@@ -821,7 +823,7 @@
 		if ((pdata) && (pdata->set_backlight)) {
 			mfd->bl_level = mfd->unset_bl_level;
 			pdata->set_backlight(pdata, mfd->bl_level);
-			mfd->bl_level_old = mfd->unset_bl_level;
+			mfd->bl_level_scaled = mfd->unset_bl_level;
 			mfd->bl_updated = 1;
 		}
 	}
@@ -858,6 +860,13 @@
 				schedule_delayed_work(&mfd->idle_notify_work,
 					msecs_to_jiffies(mfd->idle_time));
 		}
+
+		mutex_lock(&mfd->bl_lock);
+		if (!mfd->bl_updated) {
+			mfd->bl_updated = 1;
+			mdss_fb_set_backlight(mfd, mfd->bl_level_prev_scaled);
+		}
+		mutex_unlock(&mfd->bl_lock);
 		break;
 
 	case FB_BLANK_VSYNC_SUSPEND:
@@ -879,8 +888,9 @@
 
 			mfd->op_enable = false;
 			curr_pwr_state = mfd->panel_power_on;
-			mfd->panel_power_on = false;
 			mutex_lock(&mfd->bl_lock);
+			mdss_fb_set_backlight(mfd, 0);
+			mfd->panel_power_on = false;
 			mfd->bl_updated = 0;
 			mutex_unlock(&mfd->bl_lock);
 
@@ -1687,7 +1697,11 @@
 	u32 wait_for_finish = disp_commit->wait_for_finish;
 	int ret = 0;
 
-	if (!mfd || (!mfd->op_enable) || (!mfd->panel_power_on))
+	if (!mfd || (!mfd->op_enable))
+		return -EPERM;
+
+	if ((!mfd->panel_power_on) && !((mfd->dcm_state == DCM_ENTER) &&
+				(mfd->panel.type == MIPI_CMD_PANEL)))
 		return -EPERM;
 
 	if (var->xoffset > (info->var.xres_virtual - info->var.xres))
@@ -1739,7 +1753,11 @@
 {
 	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
 
-	if ((!mfd->op_enable) || (!mfd->panel_power_on))
+	if (!mfd->op_enable)
+		return -EPERM;
+
+	if ((!mfd->panel_power_on) && !((mfd->dcm_state == DCM_ENTER) &&
+				(mfd->panel.type == MIPI_CMD_PANEL)))
 		return -EPERM;
 
 	if (var->xoffset > (info->var.xres_virtual - info->var.xres))
diff --git a/drivers/video/msm/mdss/mdss_fb.h b/drivers/video/msm/mdss/mdss_fb.h
index 3416b9e..ce0a7f9 100644
--- a/drivers/video/msm/mdss/mdss_fb.h
+++ b/drivers/video/msm/mdss/mdss_fb.h
@@ -189,7 +189,8 @@
 	u32 bl_min_lvl;
 	u32 unset_bl_level;
 	u32 bl_updated;
-	u32 bl_level_old;
+	u32 bl_level_scaled;
+	u32 bl_level_prev_scaled;
 	struct mutex bl_lock;
 	struct mutex lock;
 
diff --git a/drivers/video/msm/mdss/mdss_mdp.c b/drivers/video/msm/mdss/mdss_mdp.c
index bf151f4..097d568 100644
--- a/drivers/video/msm/mdss/mdss_mdp.c
+++ b/drivers/video/msm/mdss/mdss_mdp.c
@@ -620,6 +620,37 @@
 	return clk_rate;
 }
 
+int mdss_iommu_ctrl(int enable)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	int rc = 0;
+
+	mutex_lock(&mdp_iommu_lock);
+	pr_debug("%pS: enable %d mdata->iommu_ref_cnt %d\n",
+		__builtin_return_address(0), enable, mdata->iommu_ref_cnt);
+
+	if (enable) {
+
+		if (mdata->iommu_ref_cnt == 0)
+			rc = mdss_iommu_attach(mdata);
+		mdata->iommu_ref_cnt++;
+	} else {
+		if (mdata->iommu_ref_cnt) {
+			mdata->iommu_ref_cnt--;
+			if (mdata->iommu_ref_cnt == 0)
+				rc = mdss_iommu_dettach(mdata);
+		} else {
+			pr_err("unbalanced iommu ref\n");
+		}
+	}
+	mutex_unlock(&mdp_iommu_lock);
+
+	if (IS_ERR_VALUE(rc))
+		return rc;
+	else
+		return mdata->iommu_ref_cnt;
+}
+
 /**
  * mdss_bus_bandwidth_ctrl() -- place bus bandwidth request
  * @enable:	value of enable or disable
@@ -629,27 +660,16 @@
  * Bus bandwidth is required by mdp.For dsi, it only requires to send
  * dcs coammnd. It returns error if bandwidth request fails.
  */
-int mdss_bus_bandwidth_ctrl(int enable)
+void mdss_bus_bandwidth_ctrl(int enable)
 {
 	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
 	static int bus_bw_cnt;
 	int changed = 0;
-	int rc = 0;
 
 	mutex_lock(&bus_bw_lock);
 	if (enable) {
-		if (bus_bw_cnt == 0) {
+		if (bus_bw_cnt == 0)
 			changed++;
-			if (!mdata->handoff_pending) {
-				rc = mdss_iommu_attach(mdata);
-				if (rc) {
-					pr_err("iommu attach failed rc=%d\n",
-									rc);
-					goto end;
-				}
-			}
-		}
-
 		bus_bw_cnt++;
 	} else {
 		if (bus_bw_cnt) {
@@ -668,7 +688,6 @@
 		if (!enable) {
 			msm_bus_scale_client_update_request(
 				mdata->bus_hdl, 0);
-			mdss_iommu_dettach(mdata);
 			pm_runtime_put(&mdata->pdev->dev);
 		} else {
 			pm_runtime_get_sync(&mdata->pdev->dev);
@@ -677,9 +696,7 @@
 		}
 	}
 
-end:
 	mutex_unlock(&bus_bw_lock);
-	return rc;
 }
 EXPORT_SYMBOL(mdss_bus_bandwidth_ctrl);
 
@@ -687,7 +704,7 @@
 {
 	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
 	static int mdp_clk_cnt;
-	int changed = 0, rc;
+	int changed = 0;
 
 	mutex_lock(&mdp_clk_lock);
 	if (enable) {
@@ -720,9 +737,7 @@
 		if (mdata->vsync_ena)
 			mdss_mdp_clk_update(MDSS_CLK_MDP_VSYNC, enable);
 
-		rc = mdss_bus_bandwidth_ctrl(enable);
-		if (rc)
-			pr_err("bus bandwidth control failed rc=%d", rc);
+		mdss_bus_bandwidth_ctrl(enable);
 
 		if (!enable)
 			pm_runtime_put(&mdata->pdev->dev);
@@ -809,7 +824,6 @@
 	struct mdss_iommu_map_type *iomap;
 	int i, rc = 0;
 
-	mutex_lock(&mdp_iommu_lock);
 	MDSS_XLOG(mdata->iommu_attached);
 
 	if (mdata->iommu_attached) {
@@ -840,8 +854,6 @@
 
 	mdata->iommu_attached = true;
 end:
-	mutex_unlock(&mdp_iommu_lock);
-
 	return rc;
 }
 
@@ -851,12 +863,10 @@
 	struct mdss_iommu_map_type *iomap;
 	int i;
 
-	mutex_lock(&mdp_iommu_lock);
 	MDSS_XLOG(mdata->iommu_attached);
 
 	if (!mdata->iommu_attached) {
 		pr_debug("mdp iommu already dettached\n");
-		mutex_unlock(&mdp_iommu_lock);
 		return 0;
 	}
 
@@ -873,7 +883,6 @@
 	}
 
 	mdata->iommu_attached = false;
-	mutex_unlock(&mdp_iommu_lock);
 
 	return 0;
 }
@@ -2560,7 +2569,6 @@
 		mdata->fs_ena = true;
 	} else {
 		pr_debug("Disable MDP FS\n");
-		mdss_iommu_dettach(mdata);
 		if (mdata->fs_ena) {
 			regulator_disable(mdata->fs);
 			if (!mdata->ulps) {
@@ -2591,20 +2599,20 @@
 	pr_debug("called on=%d\n", on);
 	if (on) {
 		pm_runtime_get_sync(dev);
-		rc = mdss_iommu_attach(mdata);
-		if (rc) {
+		rc = mdss_iommu_ctrl(1);
+		if (IS_ERR_VALUE(rc)) {
 			pr_err("mdss iommu attach failed rc=%d\n", rc);
-			goto end;
+			return rc;
 		}
 		mdss_hw_init(mdata);
 		mdata->ulps = false;
+		mdss_iommu_ctrl(0);
 	} else {
 		mdata->ulps = true;
 		pm_runtime_put_sync(dev);
 	}
 
-end:
-	return rc;
+	return 0;
 }
 
 static inline int mdss_mdp_suspend_sub(struct mdss_data_type *mdata)
diff --git a/drivers/video/msm/mdss/mdss_mdp_intf_cmd.c b/drivers/video/msm/mdss/mdss_mdp_intf_cmd.c
index ce94fcf..e619e6b 100644
--- a/drivers/video/msm/mdss/mdss_mdp_intf_cmd.c
+++ b/drivers/video/msm/mdss/mdss_mdp_intf_cmd.c
@@ -189,6 +189,7 @@
 {
 	unsigned long flags;
 	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	int rc;
 
 	if (!ctx->panel_on)
 		return;
@@ -201,6 +202,10 @@
 		if (cancel_delayed_work_sync(&ctx->ulps_work))
 			pr_debug("deleted pending ulps work\n");
 
+		rc = mdss_iommu_ctrl(1);
+		if (IS_ERR_VALUE(rc))
+			pr_err("IOMMU attach failed\n");
+
 		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
 
 		if (ctx->ulps) {
@@ -243,6 +248,7 @@
 		mdss_mdp_hist_intr_setup(&mdata->hist_intr, MDSS_IRQ_SUSPEND);
 		mdss_mdp_ctl_intf_event
 			(ctx->ctl, MDSS_EVENT_PANEL_CLK_CTRL, (void *)0);
+		mdss_iommu_ctrl(0);
 		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
 		if (ctx->panel_on)
 			schedule_delayed_work(&ctx->ulps_work, ULPS_ENTER_TIME);
diff --git a/drivers/video/msm/mdss/mdss_mdp_intf_video.c b/drivers/video/msm/mdss/mdss_mdp_intf_video.c
index eff708c..e5bba31 100644
--- a/drivers/video/msm/mdss/mdss_mdp_intf_video.c
+++ b/drivers/video/msm/mdss/mdss_mdp_intf_video.c
@@ -333,6 +333,8 @@
 				frame_rate = 24;
 			msleep((1000/frame_rate) + 1);
 		}
+
+		mdss_iommu_ctrl(0);
 		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
 		ctx->timegen_en = false;
 
@@ -717,6 +719,12 @@
 
 		pr_debug("enabling timing gen for intf=%d\n", ctl->intf_num);
 
+		rc = mdss_iommu_ctrl(1);
+		if (IS_ERR_VALUE(rc)) {
+			pr_err("IOMMU attach failed\n");
+			return rc;
+		}
+
 		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
 
 		mdss_mdp_irq_enable(MDSS_MDP_IRQ_INTF_UNDER_RUN, ctl->intf_num);
diff --git a/drivers/video/msm/mdss/mdss_mdp_intf_writeback.c b/drivers/video/msm/mdss/mdss_mdp_intf_writeback.c
index 02e7b75..7bf03eb 100644
--- a/drivers/video/msm/mdss/mdss_mdp_intf_writeback.c
+++ b/drivers/video/msm/mdss/mdss_mdp_intf_writeback.c
@@ -442,6 +442,7 @@
 		rc = 0;
 	}
 
+	mdss_iommu_ctrl(0);
 	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false); /* clock off */
 
 	ctx->comp_cnt--;
@@ -501,6 +502,11 @@
 	INIT_COMPLETION(ctx->wb_comp);
 	mdss_mdp_irq_enable(ctx->intr_type, ctx->intf_num);
 
+	ret = mdss_iommu_ctrl(1);
+	if (IS_ERR_VALUE(ret)) {
+		pr_err("IOMMU attach failed\n");
+		return ret;
+	}
 	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
 	mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_START, 1);
 	wmb();
diff --git a/drivers/video/msm/mdss/mdss_mdp_overlay.c b/drivers/video/msm/mdss/mdss_mdp_overlay.c
index 52c220f..2141659 100644
--- a/drivers/video/msm/mdss/mdss_mdp_overlay.c
+++ b/drivers/video/msm/mdss/mdss_mdp_overlay.c
@@ -707,9 +707,11 @@
 	if ((num_planes <= 0) || (num_planes > MAX_PLANES))
 		return -EINVAL;
 
-	rc = mdss_bus_bandwidth_ctrl(1);
-	if (rc)
+	rc = mdss_iommu_ctrl(1);
+	if (IS_ERR_VALUE(rc)) {
+		pr_err("Iommu attach failed");
 		goto end;
+	}
 
 	memset(data, 0, sizeof(*data));
 	for (i = 0; i < num_planes; i++) {
@@ -724,31 +726,29 @@
 			break;
 		}
 	}
-	mdss_bus_bandwidth_ctrl(0);
 
+	mdss_iommu_ctrl(0);
 	data->num_planes = i;
-
 end:
 	return rc;
 }
 
 int mdss_mdp_overlay_free_buf(struct mdss_mdp_data *data)
 {
-	int i;
-	int rc;
+	int i, rc;
 
-	rc = mdss_bus_bandwidth_ctrl(1);
-	if (rc)
-		goto end;
+	rc = mdss_iommu_ctrl(1);
+	if (IS_ERR_VALUE(rc)) {
+		pr_err("Iommu attach failed");
+		return rc;
+	}
 
 	for (i = 0; i < data->num_planes && data->p[i].len; i++)
 		mdss_mdp_put_img(&data->p[i]);
-	mdss_bus_bandwidth_ctrl(0);
 
+	mdss_iommu_ctrl(0);
 	data->num_planes = 0;
-
-end:
-	return rc;
+	return 0;
 }
 
 /**
@@ -919,13 +919,6 @@
 
 		if (!mdp5_data->mdata->batfet)
 			mdss_mdp_batfet_ctrl(mdp5_data->mdata, true);
-		if (!mfd->panel_info->cont_splash_enabled) {
-			rc = mdss_iommu_attach(mdp5_data->mdata);
-			if (rc) {
-				pr_err("mdss iommu attach failed rc=%d\n", rc);
-				goto end;
-			}
-		}
 		mdss_mdp_release_splash_pipe(mfd);
 		return 0;
 	}
@@ -948,13 +941,14 @@
 	 */
 	if (!is_mdss_iommu_attached()) {
 		if (!mfd->panel_info->cont_splash_enabled) {
-			rc = mdss_iommu_attach(mdss_res);
-			if (rc) {
-				pr_err("mdss iommu attach failed rc=%d\n", rc);
+			rc = mdss_iommu_ctrl(1);
+			if (IS_ERR_VALUE(rc)) {
+				pr_err("iommu attach failed rc=%d\n", rc);
 				goto pm_error;
 			}
+			mdss_hw_init(mdss_res);
+			mdss_iommu_ctrl(0);
 		}
-		mdss_hw_init(mdss_res);
 	}
 
 	rc = mdss_mdp_ctl_start(ctl, false);
@@ -1385,14 +1379,6 @@
 	flags = (pipe->flags & MDP_SECURE_OVERLAY_SESSION);
 	flags |= (pipe->flags & MDP_SECURE_DISPLAY_OVERLAY_SESSION);
 
-	if (!mfd->panel_info->cont_splash_enabled) {
-		ret = mdss_iommu_attach(mdata);
-		if (ret) {
-			pr_err("mdss iommu attach failed ret=%d\n", ret);
-			goto end;
-		}
-	}
-
 	src_data = &pipe->back_buf;
 	if (src_data->num_planes) {
 		pr_warn("dropped buffer pnum=%d play=%d addr=0x%x\n",
@@ -1405,7 +1391,6 @@
 		pr_err("src_data pmem error\n");
 	}
 
-end:
 	mdss_mdp_pipe_unmap(pipe);
 
 	return ret;
@@ -1605,12 +1590,20 @@
 	if (mutex_lock_interruptible(&mdp5_data->ov_lock))
 		return;
 
-	if (!mfd->panel_power_on) {
+	if ((!mfd->panel_power_on) && !((mfd->dcm_state == DCM_ENTER) &&
+				(mfd->panel.type == MIPI_CMD_PANEL))) {
 		mutex_unlock(&mdp5_data->ov_lock);
 		return;
 	}
 
 	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
+
+	ret = mdss_iommu_ctrl(1);
+	if (IS_ERR_VALUE(ret)) {
+		pr_err("IOMMU attach failed\n");
+		goto pan_display_error;
+	}
+
 	bpp = fbi->var.bits_per_pixel / 8;
 	offset = fbi->var.xoffset * bpp +
 		 fbi->var.yoffset * fbi->fix.line_length;
@@ -1677,10 +1670,12 @@
 	    (fbi->var.activate & FB_ACTIVATE_FORCE))
 		mfd->mdp.kickoff_fnc(mfd, NULL);
 
+	mdss_iommu_ctrl(0);
 	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
 	return;
 
 pan_display_error:
+	mdss_iommu_ctrl(0);
 	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
 	mutex_unlock(&mdp5_data->ov_lock);
 }
diff --git a/drivers/video/msm/mdss/mdss_mdp_rotator.c b/drivers/video/msm/mdss/mdss_mdp_rotator.c
index 6953469..ff8c6b8 100755
--- a/drivers/video/msm/mdss/mdss_mdp_rotator.c
+++ b/drivers/video/msm/mdss/mdss_mdp_rotator.c
@@ -692,7 +692,6 @@
 			    struct msmfb_overlay_data *req)
 {
 	struct mdss_mdp_rotator_session *rot;
-	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
 	int ret;
 	u32 flgs;
 
@@ -712,9 +711,6 @@
 		goto dst_buf_fail;
 	}
 
-	if (!mfd->panel_info->cont_splash_enabled)
-		mdss_iommu_attach(mdp5_data->mdata);
-
 	mdss_mdp_overlay_free_buf(&rot->src_buf);
 	ret = mdss_mdp_overlay_get_buf(mfd, &rot->src_buf, &req->data, 1, flgs);
 	if (ret) {
diff --git a/drivers/video/msm/mdss/mdss_mdp_splash_logo.c b/drivers/video/msm/mdss/mdss_mdp_splash_logo.c
index 838f58f..829806d 100644
--- a/drivers/video/msm/mdss/mdss_mdp_splash_logo.c
+++ b/drivers/video/msm/mdss/mdss_mdp_splash_logo.c
@@ -109,7 +109,7 @@
 {
 	struct iommu_domain *domain;
 	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
-	int rc;
+	int rc, ret;
 
 	/*
 	 * iommu dynamic attach for following conditions.
@@ -139,9 +139,9 @@
 	if (rc) {
 		pr_debug("iommu memory mapping failed rc=%d\n", rc);
 	} else {
-		rc = mdss_iommu_attach(mdss_res);
-		if (rc) {
-			pr_debug("mdss iommu attach failed\n");
+		ret = mdss_iommu_ctrl(1);
+		if (IS_ERR_VALUE(ret)) {
+			pr_err("mdss iommu attach failed\n");
 			iommu_unmap(domain, mdp5_data->splash_mem_addr,
 						mdp5_data->splash_mem_size);
 		} else {
@@ -167,6 +167,8 @@
 
 		iommu_unmap(domain, mdp5_data->splash_mem_addr,
 						mdp5_data->splash_mem_size);
+		mdss_iommu_ctrl(0);
+
 		mfd->splash_info.iommu_dynamic_attached = false;
 	}
 }
@@ -246,12 +248,6 @@
 	}
 
 	mdss_mdp_footswitch_ctrl_splash(0);
-	if (!is_mdss_iommu_attached()) {
-		rc = mdss_iommu_attach(mdss_res);
-		if (rc)
-			pr_err("iommu attach failed rc=%d\n", rc);
-	}
-
 end:
 	return rc;
 }
diff --git a/drivers/video/msm/mdss/mdss_mdp_wb.c b/drivers/video/msm/mdss/mdss_mdp_wb.c
index 0e59a26..6086e28 100644
--- a/drivers/video/msm/mdss/mdss_mdp_wb.c
+++ b/drivers/video/msm/mdss/mdss_mdp_wb.c
@@ -427,11 +427,20 @@
 	buf = &node->buf_data.p[0];
 	if (wb->is_secure)
 		buf->flags |= MDP_SECURE_OVERLAY_SESSION;
+
+	ret = mdss_iommu_ctrl(1);
+	if (IS_ERR_VALUE(ret)) {
+		pr_err("IOMMU attach failed\n");
+		goto register_fail;
+	}
 	ret = mdss_mdp_get_img(data, buf);
 	if (IS_ERR_VALUE(ret)) {
 		pr_err("error getting buffer info\n");
+		mdss_iommu_ctrl(0);
 		goto register_fail;
 	}
+	mdss_iommu_ctrl(0);
+
 	memcpy(&node->buf_info, data, sizeof(*data));
 
 	ret = mdss_mdp_wb_register_node(wb, node);
@@ -471,7 +480,6 @@
 {
 	struct mdss_mdp_wb *wb = mfd_to_wb(mfd);
 	struct mdss_mdp_wb_data *node = NULL;
-	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
 	int ret = 0;
 
 	if (!wb) {
@@ -481,14 +489,6 @@
 
 	pr_debug("fb%d queue\n", wb->fb_ndx);
 
-	if (!mfd->panel_info->cont_splash_enabled) {
-		ret  = mdss_iommu_attach(mdp5_data->mdata);
-		if (ret) {
-			pr_err("mdss iommu attach failed rc=%d", ret);
-			return ret;
-		}
-	}
-
 	mutex_lock(&wb->lock);
 	if (local)
 		node = get_local_node(wb, data);
@@ -741,7 +741,13 @@
 		}
 		break;
 	case MSMFB_WRITEBACK_TERMINATE:
+		ret = mdss_iommu_ctrl(1);
+		if (IS_ERR_VALUE(ret)) {
+			pr_err("IOMMU attach failed\n");
+			return ret;
+		}
 		ret = mdss_mdp_wb_terminate(mfd);
+		mdss_iommu_ctrl(0);
 		break;
 	case MSMFB_WRITEBACK_SET_MIRRORING_HINT:
 		if (!copy_from_user(&hint, arg, sizeof(hint))) {
@@ -854,19 +860,25 @@
 EXPORT_SYMBOL(msm_fb_writeback_set_secure);
 
 /**
- * msm_fb_writeback_iommu_ref() - Power ON/OFF mdp clock
- * @enable - true/false to Power ON/OFF mdp clock
+ * msm_fb_writeback_iommu_ref() - Add/Remove vote on MDSS IOMMU being attached.
+ * @enable - true adds vote on MDSS IOMMU, false removes the vote.
  *
- * Call to enable mdp clock at start of mdp_mmap/mdp_munmap API and
- * to disable mdp clock at end of these API's to ensure iommu is in
- * proper state while driver map/un-map any buffers.
+ * Call to vote on MDSS IOMMU being enabled. To ensure buffers are properly
+ * mapped to IOMMU context bank.
  */
 int msm_fb_writeback_iommu_ref(struct fb_info *info, int enable)
 {
-	if (enable)
-		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
-	else
-		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
+	int ret;
+
+	if (enable) {
+		ret = mdss_iommu_ctrl(1);
+		if (IS_ERR_VALUE(ret)) {
+			pr_err("IOMMU attach failed\n");
+			return ret;
+		}
+	} else {
+		mdss_iommu_ctrl(0);
+	}
 
 	return 0;
 }
diff --git a/fs/bio.c b/fs/bio.c
index 84da885..ee796c4 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -458,6 +458,7 @@
 	bio->bi_vcnt = bio_src->bi_vcnt;
 	bio->bi_size = bio_src->bi_size;
 	bio->bi_idx = bio_src->bi_idx;
+	bio->bi_dio_inode = bio_src->bi_dio_inode;
 }
 EXPORT_SYMBOL(__bio_clone);
 
diff --git a/fs/direct-io.c b/fs/direct-io.c
index f4aadd1..be21245 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -423,6 +423,8 @@
 	if (dio->is_async && dio->rw == READ)
 		bio_set_pages_dirty(bio);
 
+	bio->bi_dio_inode = dio->inode;
+
 	if (sdio->submit_io)
 		sdio->submit_io(dio->rw, bio, dio->inode,
 			       sdio->logical_offset_in_bio);
@@ -434,6 +436,19 @@
 	sdio->logical_offset_in_bio = 0;
 }
 
+struct inode *dio_bio_get_inode(struct bio *bio)
+{
+	struct inode *inode = NULL;
+
+	if (bio == NULL)
+		return NULL;
+
+	inode = bio->bi_dio_inode;
+
+	return inode;
+}
+EXPORT_SYMBOL(dio_bio_get_inode);
+
 /*
  * Release any resources in case of a failure
  */
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 3f62354..97210b7 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -70,6 +70,13 @@
 	struct bio_integrity_payload *bi_integrity;  /* data integrity */
 #endif
 
+	/*
+	 * When using dircet-io (O_DIRECT), we can't get the inode from a bio
+	 * by walking bio->bi_io_vec->bv_page->mapping->host
+	 * since the page is anon.
+	 */
+	struct inode		*bi_dio_inode;
+
 	bio_destructor_t	*bi_destructor;	/* destructor */
 
 	/*
diff --git a/include/linux/diagchar.h b/include/linux/diagchar.h
index f78d418..1e15415 100644
--- a/include/linux/diagchar.h
+++ b/include/linux/diagchar.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2008-2014, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -13,14 +13,18 @@
 #ifndef DIAGCHAR_SHARED
 #define DIAGCHAR_SHARED
 
-#define MSG_MASKS_TYPE			1
-#define LOG_MASKS_TYPE			2
-#define EVENT_MASKS_TYPE		4
-#define PKT_TYPE			8
-#define DEINIT_TYPE			16
-#define USER_SPACE_DATA_TYPE		32
-#define DCI_DATA_TYPE			64
-#define CALLBACK_DATA_TYPE		128
+#define MSG_MASKS_TYPE		0x00000001
+#define LOG_MASKS_TYPE		0x00000002
+#define EVENT_MASKS_TYPE	0x00000004
+#define PKT_TYPE		0x00000008
+#define DEINIT_TYPE		0x00000010
+#define USER_SPACE_DATA_TYPE	0x00000020
+#define DCI_DATA_TYPE		0x00000040
+#define CALLBACK_DATA_TYPE	0x00000080
+#define DCI_LOG_MASKS_TYPE	0x00000100
+#define DCI_EVENT_MASKS_TYPE	0x00000200
+#define DCI_PKT_TYPE		0x00000400
+
 #define USB_MODE			1
 #define MEMORY_DEVICE_MODE		2
 #define NO_LOGGING_MODE			3
@@ -32,6 +36,8 @@
 #define DATA_TYPE_F3            	1
 #define DATA_TYPE_LOG           	2
 #define DATA_TYPE_RESPONSE      	3
+#define DATA_TYPE_DCI_LOG		0x00000100
+#define DATA_TYPE_DCI_EVENT		0x00000200
 
 /* Different IOCTL values */
 #define DIAG_IOCTL_COMMAND_REG  	0
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 093f0b8..6181c69 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -2436,6 +2436,7 @@
 void dio_end_io(struct bio *bio, int error);
 void inode_dio_wait(struct inode *inode);
 void inode_dio_done(struct inode *inode);
+struct inode *dio_bio_get_inode(struct bio *bio);
 
 ssize_t __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
 	struct block_device *bdev, const struct iovec *iov, loff_t offset,
diff --git a/include/linux/pft.h b/include/linux/pft.h
index 36e9612..2ec92da 100644
--- a/include/linux/pft.h
+++ b/include/linux/pft.h
@@ -15,11 +15,12 @@
 
 #include <linux/types.h>
 #include <linux/fs.h>
+#include <linux/bio.h>
 
 #ifdef CONFIG_PFT
 
 /* dm-req-crypt API */
-int pft_get_key_index(struct inode *inode, u32 *key_index,
+int pft_get_key_index(struct bio *bio, u32 *key_index,
 		      bool *is_encrypted, bool *is_inplace);
 
 /* block layer API */
diff --git a/include/linux/spmi.h b/include/linux/spmi.h
index e8e932e..b581de8 100644
--- a/include/linux/spmi.h
+++ b/include/linux/spmi.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2014 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -382,6 +382,7 @@
  * -EPERM if the SPMI transaction is denied due to permission issues.
  * -EIO if the SPMI transaction fails (parity errors, etc).
  * -ETIMEDOUT if the SPMI transaction times out.
+ * -EAGAIN if the SPMI transaction is temporarily unavailable
  */
 extern int spmi_command_reset(struct spmi_controller *ctrl, u8 sid);
 
@@ -397,6 +398,7 @@
  * -EPERM if the SPMI transaction is denied due to permission issues.
  * -EIO if the SPMI transaction fails (parity errors, etc).
  * -ETIMEDOUT if the SPMI transaction times out.
+ * -EAGAIN if the SPMI transaction is temporarily unavailable
  */
 extern int spmi_command_sleep(struct spmi_controller *ctrl, u8 sid);
 
@@ -413,6 +415,7 @@
  * -EPERM if the SPMI transaction is denied due to permission issues.
  * -EIO if the SPMI transaction fails (parity errors, etc).
  * -ETIMEDOUT if the SPMI transaction times out.
+ * -EAGAIN if the SPMI transaction is temporarily unavailable
  */
 extern int spmi_command_wakeup(struct spmi_controller *ctrl, u8 sid);
 
@@ -428,6 +431,7 @@
  * -EPERM if the SPMI transaction is denied due to permission issues.
  * -EIO if the SPMI transaction fails (parity errors, etc).
  * -ETIMEDOUT if the SPMI transaction times out.
+ * -EAGAIN if the SPMI transaction is temporarily unavailable
  */
 extern int spmi_command_shutdown(struct spmi_controller *ctrl, u8 sid);
 
diff --git a/include/media/msmb_isp.h b/include/media/msmb_isp.h
index 30e7d06..e627977 100644
--- a/include/media/msmb_isp.h
+++ b/include/media/msmb_isp.h
@@ -154,6 +154,7 @@
 	uint8_t buf_divert; /* if TRUE no vb2 buf done. */
 	/*Return values*/
 	uint32_t axi_stream_handle;
+	uint32_t burst_len;
 };
 
 struct msm_vfe_axi_stream_release_cmd {
@@ -225,6 +226,7 @@
 	uint8_t num_streams;
 	uint32_t stream_handle[MSM_ISP_STATS_MAX];
 	uint8_t enable;
+	uint32_t stats_burst_len;
 };
 
 enum msm_vfe_reg_cfg_type {
@@ -242,6 +244,7 @@
 	GET_MAX_CLK_RATE,
 	VFE_HW_UPDATE_LOCK,
 	VFE_HW_UPDATE_UNLOCK,
+	SET_WM_UB_SIZE,
 };
 
 struct msm_vfe_cfg_cmd2 {
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index cf82dbd..4ff640a 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -1963,8 +1963,8 @@
 
 	hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0,
 			     NL80211_CMD_NEW_KEY);
-	if (IS_ERR(hdr))
-		return PTR_ERR(hdr);
+	if (!hdr)
+		goto nla_put_failure;
 
 	cookie.msg = msg;
 	cookie.idx = key_idx;
@@ -4245,6 +4245,10 @@
 				err = -EINVAL;
 				goto out_free;
 			}
+
+			if (!wiphy->bands[band])
+				continue;
+
 			err = ieee80211_get_ratemask(wiphy->bands[band],
 						     nla_data(attr),
 						     nla_len(attr),
@@ -5409,6 +5413,9 @@
 					   NL80211_CMD_TESTMODE);
 		struct nlattr *tmdata;
 
+		if (!hdr)
+			break;
+
 		if (nla_put_u32(skb, NL80211_ATTR_WIPHY, phy_idx) < 0) {
 			genlmsg_cancel(skb, hdr);
 			break;
@@ -5817,9 +5824,8 @@
 
 	hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0,
 			     NL80211_CMD_REMAIN_ON_CHANNEL);
-
-	if (IS_ERR(hdr)) {
-		err = PTR_ERR(hdr);
+	if (!hdr) {
+		err = -ENOBUFS;
 		goto free_msg;
 	}
 
@@ -6100,9 +6106,8 @@
 
 		hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0,
 				     NL80211_CMD_FRAME);
-
-		if (IS_ERR(hdr)) {
-			err = PTR_ERR(hdr);
+		if (!hdr) {
+			err = -ENOBUFS;
 			goto free_msg;
 		}
 	}
@@ -6662,9 +6667,8 @@
 
 	hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0,
 			     NL80211_CMD_PROBE_CLIENT);
-
-	if (IS_ERR(hdr)) {
-		err = PTR_ERR(hdr);
+	if (!hdr) {
+		err = -ENOBUFS;
 		goto free_msg;
 	}
 
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index 87547ca..94c06df 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -65,11 +65,45 @@
 	return is_all_idle;
 }
 
+
+static bool cfg80211_is_all_countryie_ignore(void)
+{
+	struct cfg80211_registered_device *rdev;
+	struct wireless_dev *wdev;
+	bool is_all_countryie_ignore = true;
+
+	mutex_lock(&cfg80211_mutex);
+
+	list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
+		cfg80211_lock_rdev(rdev);
+		list_for_each_entry(wdev, &rdev->netdev_list, list) {
+			wdev_lock(wdev);
+			if (!(wdev->wiphy->country_ie_pref &
+				NL80211_COUNTRY_IE_IGNORE_CORE)) {
+				is_all_countryie_ignore = false;
+				wdev_unlock(wdev);
+				cfg80211_unlock_rdev(rdev);
+				goto out;
+			}
+			wdev_unlock(wdev);
+		}
+		cfg80211_unlock_rdev(rdev);
+	}
+out:
+	mutex_unlock(&cfg80211_mutex);
+
+	return is_all_countryie_ignore;
+}
+
+
 static void disconnect_work(struct work_struct *work)
 {
 	if (!cfg80211_is_all_idle())
 		return;
 
+	if (cfg80211_is_all_countryie_ignore())
+		return;
+
 	regulatory_hint_disconnect();
 }
 
diff --git a/sound/soc/codecs/wcd9306.c b/sound/soc/codecs/wcd9306.c
index 38d7901..314c4f9 100644
--- a/sound/soc/codecs/wcd9306.c
+++ b/sound/soc/codecs/wcd9306.c
@@ -4139,6 +4139,12 @@
 
 	core = dev_get_drvdata(codec->dev->parent);
 
+	if(core == NULL) {
+		dev_err(codec->dev, "%s: core is null\n",
+				__func__);
+		return -EINVAL;
+	}
+
 	dev_dbg(codec->dev, "%s: event called! codec name %s\n",
 		__func__, w->codec->name);
 	dev_dbg(codec->dev, "%s: num_dai %d stream name %s event %d\n",
diff --git a/sound/soc/codecs/wcd9320.c b/sound/soc/codecs/wcd9320.c
index 4c5d327..5602dd1 100644
--- a/sound/soc/codecs/wcd9320.c
+++ b/sound/soc/codecs/wcd9320.c
@@ -2555,9 +2555,10 @@
 						 WCD9XXX_CLSH_STATE_LO,
 						 WCD9XXX_CLSH_REQ_ENABLE,
 						 WCD9XXX_CLSH_EVENT_POST_PA);
-		pr_debug("%s: sleeping 3 ms after %s PA turn on\n",
+		pr_debug("%s: sleeping 5 ms after %s PA turn on\n",
 				__func__, w->name);
-		usleep_range(3000, 3000);
+		/* Wait for CnP time after PA enable */
+		usleep_range(5000, 5100);
 		break;
 	case SND_SOC_DAPM_POST_PMD:
 		wcd9xxx_clsh_fsm(codec, &taiko->clsh_d,
@@ -2565,6 +2566,10 @@
 						 WCD9XXX_CLSH_REQ_DISABLE,
 						 WCD9XXX_CLSH_EVENT_POST_PA);
 		snd_soc_update_bits(codec, lineout_gain_reg, 0x40, 0x00);
+		pr_debug("%s: sleeping 5 ms after %s PA turn off\n",
+				__func__, w->name);
+		/* Wait for CnP time after PA disable */
+		usleep_range(5000, 5100);
 		break;
 	}
 	return 0;
diff --git a/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c
index fce1940..f6702c5 100644
--- a/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c
@@ -129,6 +129,8 @@
 	uint32_t stream_available;
 	uint32_t next_stream;
 
+	uint64_t marker_timestamp;
+
 	struct msm_compr_gapless_state gapless_state;
 
 	atomic_t start;
@@ -1064,6 +1066,7 @@
 		prtd->app_pointer  = 0;
 		prtd->bytes_received = 0;
 		prtd->bytes_sent = 0;
+		prtd->marker_timestamp = 0;
 
 		atomic_set(&prtd->xrun, 0);
 		spin_unlock_irqrestore(&prtd->lock, flags);
@@ -1196,6 +1199,8 @@
 			prtd->first_buffer = 1;
 			prtd->last_buffer = 0;
 			prtd->gapless_state.gapless_transition = 1;
+			prtd->marker_timestamp = 0;
+
 			/*
 			Don't reset these as these vars map to
 			total_bytes_transferred and total_bytes_available
@@ -1251,23 +1256,23 @@
 			q6asm_stream_cmd_nowait(ac, CMD_PAUSE, ac->stream_id);
 			prtd->cmd_ack = 0;
 			spin_unlock_irqrestore(&prtd->lock, flags);
-			pr_debug("%s:issue CMD_FLUSH ac->stream_id %d",
-					      __func__, ac->stream_id);
-			q6asm_stream_cmd(ac, CMD_FLUSH, ac->stream_id);
-			wait_event_timeout(prtd->flush_wait,
-					   prtd->cmd_ack, 1 * HZ / 4);
 
+			/*
+			 * Cache this time as last known time
+			 */
+			q6asm_get_session_time(prtd->audio_client,
+					       &prtd->marker_timestamp);
 			spin_lock_irqsave(&prtd->lock, flags);
 			/*
-			Don't reset these as these vars map to
-			total_bytes_transferred and total_bytes_available
-			directly, only total_bytes_transferred will be updated
-			in the next avail() ioctl
-			prtd->copied_total = 0;
-			prtd->bytes_received = 0;
-			do not reset prtd->bytes_sent as well as the same
-			session is used for gapless playback
-			*/
+			 * Don't reset these as these vars map to
+			 * total_bytes_transferred and total_bytes_available.
+			 * Just total_bytes_transferred will be updated
+			 * in the next avail() ioctl.
+			 * prtd->copied_total = 0;
+			 * prtd->bytes_received = 0;
+			 * do not reset prtd->bytes_sent as well as the same
+			 * session is used for gapless playback
+			 */
 			prtd->byte_offset = 0;
 
 			prtd->app_pointer  = 0;
@@ -1275,8 +1280,15 @@
 			prtd->last_buffer = 0;
 			atomic_set(&prtd->drain, 0);
 			atomic_set(&prtd->xrun, 1);
-			q6asm_run_nowait(prtd->audio_client, 0, 0, 0);
 			spin_unlock_irqrestore(&prtd->lock, flags);
+
+			pr_debug("%s:issue CMD_FLUSH ac->stream_id %d",
+					      __func__, ac->stream_id);
+			q6asm_stream_cmd(ac, CMD_FLUSH, ac->stream_id);
+			wait_event_timeout(prtd->flush_wait,
+					   prtd->cmd_ack, 1 * HZ / 4);
+
+			q6asm_run_nowait(prtd->audio_client, 0, 0, 0);
 		}
 		prtd->cmd_interrupt = 0;
 		break;
@@ -1404,6 +1416,8 @@
 				__func__, timestamp);
 			return -EAGAIN;
 		}
+	} else {
+		timestamp = prtd->marker_timestamp;
 	}
 
 	/* DSP returns timestamp in usec */
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
index 4893990..1553d1c 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
@@ -3971,50 +3971,36 @@
 	{"AUDIO_REF_EC_UL1 MUX", "SEC_MI2S_TX" , "SEC_MI2S_TX"},
 	{"AUDIO_REF_EC_UL1 MUX", "TERT_MI2S_TX" , "TERT_MI2S_TX"},
 	{"AUDIO_REF_EC_UL1 MUX", "QUAT_MI2S_TX" , "QUAT_MI2S_TX"},
-	{"AUDIO_REF_EC_UL1 MUX", "I2S_RX" , "PRI_I2S_TX"},
-	{"AUDIO_REF_EC_UL1 MUX", "SLIM_RX" , "SLIMBUS_0_TX"},
 
 	{"AUDIO_REF_EC_UL2 MUX", "PRI_MI2S_TX" , "PRI_MI2S_TX"},
 	{"AUDIO_REF_EC_UL2 MUX", "SEC_MI2S_TX" , "SEC_MI2S_TX"},
 	{"AUDIO_REF_EC_UL2 MUX", "TERT_MI2S_TX" , "TERT_MI2S_TX"},
 	{"AUDIO_REF_EC_UL2 MUX", "QUAT_MI2S_TX" , "QUAT_MI2S_TX"},
-	{"AUDIO_REF_EC_UL2 MUX", "I2S_RX" , "PRI_I2S_TX"},
-	{"AUDIO_REF_EC_UL2 MUX", "SLIM_RX" , "SLIMBUS_0_TX"},
 
 	{"AUDIO_REF_EC_UL4 MUX", "PRI_MI2S_TX" , "PRI_MI2S_TX"},
 	{"AUDIO_REF_EC_UL4 MUX", "SEC_MI2S_TX" , "SEC_MI2S_TX"},
 	{"AUDIO_REF_EC_UL4 MUX", "TERT_MI2S_TX" , "TERT_MI2S_TX"},
 	{"AUDIO_REF_EC_UL4 MUX", "QUAT_MI2S_TX" , "QUAT_MI2S_TX"},
-	{"AUDIO_REF_EC_UL4 MUX", "I2S_RX" , "PRI_I2S_TX"},
-	{"AUDIO_REF_EC_UL4 MUX", "SLIM_RX" , "SLIMBUS_0_TX"},
 
 	{"AUDIO_REF_EC_UL5 MUX", "PRI_MI2S_TX" , "PRI_MI2S_TX"},
 	{"AUDIO_REF_EC_UL5 MUX", "SEC_MI2S_TX" , "SEC_MI2S_TX"},
 	{"AUDIO_REF_EC_UL5 MUX", "TERT_MI2S_TX" , "TERT_MI2S_TX"},
 	{"AUDIO_REF_EC_UL5 MUX", "QUAT_MI2S_TX" , "QUAT_MI2S_TX"},
-	{"AUDIO_REF_EC_UL5 MUX", "I2S_RX" , "PRI_I2S_TX"},
-	{"AUDIO_REF_EC_UL5 MUX", "SLIM_RX" , "SLIMBUS_0_TX"},
 
 	{"AUDIO_REF_EC_UL6 MUX", "PRI_MI2S_TX" , "PRI_MI2S_TX"},
 	{"AUDIO_REF_EC_UL6 MUX", "SEC_MI2S_TX" , "SEC_MI2S_TX"},
 	{"AUDIO_REF_EC_UL6 MUX", "TERT_MI2S_TX" , "TERT_MI2S_TX"},
 	{"AUDIO_REF_EC_UL6 MUX", "QUAT_MI2S_TX" , "QUAT_MI2S_TX"},
-	{"AUDIO_REF_EC_UL6 MUX", "I2S_RX" , "PRI_I2S_TX"},
-	{"AUDIO_REF_EC_UL6 MUX", "SLIM_RX" , "SLIMBUS_0_TX"},
 
 	{"AUDIO_REF_EC_UL8 MUX", "PRI_MI2S_TX" , "PRI_MI2S_TX"},
 	{"AUDIO_REF_EC_UL8 MUX", "SEC_MI2S_TX" , "SEC_MI2S_TX"},
 	{"AUDIO_REF_EC_UL8 MUX", "TERT_MI2S_TX" , "TERT_MI2S_TX"},
 	{"AUDIO_REF_EC_UL8 MUX", "QUAT_MI2S_TX" , "QUAT_MI2S_TX"},
-	{"AUDIO_REF_EC_UL8 MUX", "I2S_RX" , "PRI_I2S_TX"},
-	{"AUDIO_REF_EC_UL8 MUX", "SLIM_RX" , "SLIMBUS_0_TX"},
 
 	{"AUDIO_REF_EC_UL9 MUX", "PRI_MI2S_TX" , "PRI_MI2S_TX"},
 	{"AUDIO_REF_EC_UL9 MUX", "SEC_MI2S_TX" , "SEC_MI2S_TX"},
 	{"AUDIO_REF_EC_UL9 MUX", "TERT_MI2S_TX" , "TERT_MI2S_TX"},
 	{"AUDIO_REF_EC_UL9 MUX", "QUAT_MI2S_TX" , "QUAT_MI2S_TX"},
-	{"AUDIO_REF_EC_UL9 MUX", "I2S_RX" , "PRI_I2S_TX"},
-	{"AUDIO_REF_EC_UL9 MUX", "SLIM_RX" , "SLIMBUS_0_TX"},
 
 	{"MM_UL1", NULL, "AUDIO_REF_EC_UL1 MUX"},
 	{"MM_UL2", NULL, "AUDIO_REF_EC_UL2 MUX"},