Merge "msm: video: wfd: Move domain specific mappings into subdevices"
diff --git a/Documentation/devicetree/bindings/cache/msm_cache_dump.txt b/Documentation/devicetree/bindings/cache/msm_cache_dump.txt
new file mode 100644
index 0000000..b7a68b5
--- /dev/null
+++ b/Documentation/devicetree/bindings/cache/msm_cache_dump.txt
@@ -0,0 +1,24 @@
+* Qualcomm Krait L1 / L2 cache dumping device
+This is a virtual device that dumps the contents of L1 and L2 caches in the
+event of a kernel panic or a watchdog trigger.
+
+Required properties:
+- compatible:			Should be "qcom,cache_dump"
+- qcom,l1-dump-size:		Amount of memory needed for L1 dump(s), in bytes
+- qcom,l2-dump-size:		Amount of memory needed for L2 dump, in bytes
+- qcom,memory-reservation-size: Combined reserved memory for the dump buffers
+- qcom,memory-reservation-type: Type of memory to be reserved
+
+Optional properties:
+- qcom,use-imem-dump-offset: If specified, store cache dump buffer addresses
+  in IMEM rather than using the memory dump reservation table.
+
+Example:
+	qcom,cache_dump {
+		compatible = "qcom,cache_dump";
+		qcom,l1-dump-size = <0x100000>;
+		qcom,l2-dump-size = <0x400000>;
+		qcom,memory-reservation-type = "EBI1";
+		qcom,memory-reservation-size = <0x500000>;
+	};
+
diff --git a/arch/arm/boot/dts/msm8974.dtsi b/arch/arm/boot/dts/msm8974.dtsi
index fe60c83..ffd4518 100644
--- a/arch/arm/boot/dts/msm8974.dtsi
+++ b/arch/arm/boot/dts/msm8974.dtsi
@@ -860,6 +860,14 @@
 		interrupt-names = "l1_irq", "l2_irq";
 	};
 
+	qcom,cache_dump {
+		compatible = "qcom,cache_dump";
+		qcom,l1-dump-size = <0x100000>;
+		qcom,l2-dump-size = <0x500000>;
+		qcom,memory-reservation-type = "EBI1";
+		qcom,memory-reservation-size = <0x600000>; /* 6M EBI1 buffer */
+	};
+
 	tsens@fc4a8000 {
 		compatible = "qcom,msm-tsens";
 		reg = <0xfc4a8000 0x2000>,
diff --git a/arch/arm/mach-msm/clock-8960.c b/arch/arm/mach-msm/clock-8960.c
index 6ddb462..f6b1332 100644
--- a/arch/arm/mach-msm/clock-8960.c
+++ b/arch/arm/mach-msm/clock-8960.c
@@ -5330,6 +5330,7 @@
 	CLK_LOOKUP("cam_clk",		cam0_clk.c,	"4-0020"),
 	CLK_LOOKUP("cam_clk",		cam1_clk.c,	"4-0048"),
 	CLK_LOOKUP("cam_clk",		cam1_clk.c,	"4-006c"),
+	CLK_LOOKUP("cam_clk",		cam2_clk.c,		""),
 	CLK_LOOKUP("csi_src_clk",	csi0_src_clk.c,		"msm_csid.0"),
 	CLK_LOOKUP("csi_src_clk",	csi1_src_clk.c,		"msm_csid.1"),
 	CLK_LOOKUP("csi_src_clk",	csi2_src_clk.c,		"msm_csid.2"),
diff --git a/arch/arm/mach-msm/devices-iommu.c b/arch/arm/mach-msm/devices-iommu.c
index b65bd1f..acf577e 100644
--- a/arch/arm/mach-msm/devices-iommu.c
+++ b/arch/arm/mach-msm/devices-iommu.c
@@ -365,25 +365,25 @@
 static struct msm_iommu_dev gfx3d_iommu = {
 	.name = "gfx3d",
 	.ncb = 3,
-	.ttbr_split = 2,
+	.ttbr_split = 1,
 };
 
 static struct msm_iommu_dev gfx3d1_iommu = {
 	.name = "gfx3d1",
 	.ncb = 3,
-	.ttbr_split = 2,
+	.ttbr_split = 1,
 };
 
 static struct msm_iommu_dev gfx2d0_iommu = {
 	.name = "gfx2d0",
 	.ncb = 2,
-	.ttbr_split = 2,
+	.ttbr_split = 1,
 };
 
 static struct msm_iommu_dev gfx2d1_iommu = {
 	.name = "gfx2d1",
 	.ncb = 2,
-	.ttbr_split = 2,
+	.ttbr_split = 1,
 };
 
 static struct msm_iommu_dev vcap_iommu = {
diff --git a/arch/arm/mach-msm/include/mach/msm_memory_dump.h b/arch/arm/mach-msm/include/mach/msm_memory_dump.h
index 5e686bf..729a077 100644
--- a/arch/arm/mach-msm/include/mach/msm_memory_dump.h
+++ b/arch/arm/mach-msm/include/mach/msm_memory_dump.h
@@ -17,7 +17,8 @@
 
 enum dump_client_type {
 	MSM_CPU_CTXT = 0,
-	MSM_CACHE,
+	MSM_L1_CACHE,
+	MSM_L2_CACHE,
 	MSM_OCMEM,
 	MSM_TMC_ETFETB,
 	MSM_ETM0_REG,
diff --git a/arch/arm/mach-msm/msm_cache_dump.c b/arch/arm/mach-msm/msm_cache_dump.c
index 9759d5a..8b4978f 100644
--- a/arch/arm/mach-msm/msm_cache_dump.c
+++ b/arch/arm/mach-msm/msm_cache_dump.c
@@ -21,10 +21,13 @@
 #include <linux/pm.h>
 #include <linux/memory_alloc.h>
 #include <linux/notifier.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
 #include <mach/scm.h>
 #include <mach/msm_cache_dump.h>
 #include <mach/memory.h>
 #include <mach/msm_iomap.h>
+#include <mach/msm_memory_dump.h>
 
 #define L2_DUMP_OFFSET 0x14
 
@@ -37,6 +40,7 @@
  */
 static struct l1_cache_dump *l1_dump;
 static struct l2_cache_dump *l2_dump;
+static int use_imem_dump_offset;
 
 static int msm_cache_dump_panic(struct notifier_block *this,
 				unsigned long event, void *ptr)
@@ -45,7 +49,8 @@
 	/*
 	 * Clear the bootloader magic so the dumps aren't overwritten
 	 */
-	__raw_writel(0, MSM_IMEM_BASE + L2_DUMP_OFFSET);
+	if (use_imem_dump_offset)
+		__raw_writel(0, MSM_IMEM_BASE + L2_DUMP_OFFSET);
 
 	scm_call_atomic1(L1C_SERVICE_ID, CACHE_BUFFER_DUMP_COMMAND_ID, 2);
 	scm_call_atomic1(L1C_SERVICE_ID, CACHE_BUFFER_DUMP_COMMAND_ID, 1);
@@ -65,14 +70,38 @@
 static int msm_cache_dump_probe(struct platform_device *pdev)
 {
 	struct msm_cache_dump_platform_data *d = pdev->dev.platform_data;
+	struct msm_client_dump l1_dump_entry, l2_dump_entry;
 	int ret;
 	struct {
 		unsigned long buf;
 		unsigned long size;
 	} l1_cache_data;
 	void *temp;
-	unsigned long total_size = d->l1_size + d->l2_size;
+	u32 l1_size, l2_size;
+	unsigned long total_size;
 
+	if (pdev->dev.of_node) {
+		ret = of_property_read_u32(pdev->dev.of_node,
+					   "qcom,l1-dump-size", &l1_size);
+		if (ret)
+			return ret;
+
+		ret = of_property_read_u32(pdev->dev.of_node,
+					   "qcom,l2-dump-size", &l2_size);
+		if (ret)
+			return ret;
+
+		use_imem_dump_offset = of_property_read_bool(pdev->dev.of_node,
+						   "qcom,use-imem-dump-offset");
+	} else {
+		l1_size = d->l1_size;
+		l2_size = d->l2_size;
+
+		/* Non-DT targets assume the IMEM dump offset shall be used */
+		use_imem_dump_offset = 1;
+	};
+
+	total_size = l1_size + l2_size;
 	msm_cache_dump_addr = allocate_contiguous_ebi_nomap(total_size, SZ_4K);
 
 	if (!msm_cache_dump_addr) {
@@ -86,7 +115,7 @@
 	iounmap(temp);
 
 	l1_cache_data.buf = msm_cache_dump_addr;
-	l1_cache_data.size = d->l1_size;
+	l1_cache_data.size = l1_size;
 
 	ret = scm_call(L1C_SERVICE_ID, L1C_BUFFER_SET_COMMAND_ID,
 			&l1_cache_data, sizeof(l1_cache_data), NULL, 0);
@@ -96,10 +125,11 @@
 			__func__, ret);
 
 	l1_dump = (struct l1_cache_dump *)msm_cache_dump_addr;
+	l2_dump = (struct l2_cache_dump *)(msm_cache_dump_addr + l1_size);
 
 #if defined(CONFIG_MSM_CACHE_DUMP_ON_PANIC)
-	l1_cache_data.buf = msm_cache_dump_addr + d->l1_size;
-	l1_cache_data.size = d->l2_size;
+	l1_cache_data.buf = msm_cache_dump_addr + l1_size;
+	l1_cache_data.size = l2_size;
 
 	ret = scm_call(L1C_SERVICE_ID, L2C_BUFFER_SET_COMMAND_ID,
 			&l1_cache_data, sizeof(l1_cache_data), NULL, 0);
@@ -108,11 +138,27 @@
 		pr_err("%s: could not register L2 buffer ret = %d.\n",
 			__func__, ret);
 #endif
-	__raw_writel(msm_cache_dump_addr + d->l1_size,
+
+	if (use_imem_dump_offset)
+		__raw_writel(msm_cache_dump_addr + l1_size,
 			MSM_IMEM_BASE + L2_DUMP_OFFSET);
+	else {
+		l1_dump_entry.id = MSM_L1_CACHE;
+		l1_dump_entry.start_addr = msm_cache_dump_addr;
+		l1_dump_entry.end_addr = l1_dump_entry.start_addr + l1_size - 1;
 
+		l2_dump_entry.id = MSM_L2_CACHE;
+		l2_dump_entry.start_addr = msm_cache_dump_addr + l1_size;
+		l2_dump_entry.end_addr = l2_dump_entry.start_addr + l2_size - 1;
 
-	l2_dump = (struct l2_cache_dump *)(msm_cache_dump_addr + d->l1_size);
+		ret = msm_dump_table_register(&l1_dump_entry);
+		if (ret)
+			pr_err("Could not register L1 dump area: %d\n", ret);
+
+		ret = msm_dump_table_register(&l2_dump_entry);
+		if (ret)
+			pr_err("Could not register L2 dump area: %d\n", ret);
+	}
 
 	atomic_notifier_chain_register(&panic_notifier_list,
 						&msm_cache_dump_blk);
@@ -126,11 +172,18 @@
 	return 0;
 }
 
+static struct of_device_id cache_dump_match_table[] = {
+	{	.compatible = "qcom,cache_dump",	},
+	{}
+};
+EXPORT_COMPAT("qcom,cache_dump");
+
 static struct platform_driver msm_cache_dump_driver = {
 	.remove		= __devexit_p(msm_cache_dump_remove),
 	.driver         = {
 		.name = "msm_cache_dump",
-		.owner = THIS_MODULE
+		.owner = THIS_MODULE,
+		.of_match_table = cache_dump_match_table,
 	},
 };
 
diff --git a/arch/arm/mach-msm/rpm-smd.c b/arch/arm/mach-msm/rpm-smd.c
index a190342..a9d1ed8 100644
--- a/arch/arm/mach-msm/rpm-smd.c
+++ b/arch/arm/mach-msm/rpm-smd.c
@@ -289,9 +289,10 @@
 
 	if (!handle)
 		return;
-	for (i = 0; i < handle->write_idx; i++)
+	for (i = 0; i < handle->num_elements; i++)
 		kfree(handle->kvp[i].value);
 	kfree(handle->kvp);
+	kfree(handle->buf);
 	kfree(handle);
 }
 EXPORT_SYMBOL(msm_rpm_free_request);
diff --git a/drivers/gpu/msm/adreno_a3xx.c b/drivers/gpu/msm/adreno_a3xx.c
index b8fca38..7bf928f 100644
--- a/drivers/gpu/msm/adreno_a3xx.c
+++ b/drivers/gpu/msm/adreno_a3xx.c
@@ -2706,62 +2706,100 @@
 	return val;
 }
 
+struct a3xx_vbif_data {
+	unsigned int reg;
+	unsigned int val;
+};
+
+/* VBIF registers start after 0x3000 so use 0x0 as end of list marker */
+static struct a3xx_vbif_data a305_vbif[] = {
+	/* Set up 16 deep read/write request queues */
+	{ A3XX_VBIF_IN_RD_LIM_CONF0, 0x10101010 },
+	{ A3XX_VBIF_IN_RD_LIM_CONF1, 0x10101010 },
+	{ A3XX_VBIF_OUT_RD_LIM_CONF0, 0x10101010 },
+	{ A3XX_VBIF_OUT_WR_LIM_CONF0, 0x10101010 },
+	{ A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303 },
+	{ A3XX_VBIF_IN_WR_LIM_CONF0, 0x10101010 },
+	{ A3XX_VBIF_IN_WR_LIM_CONF1, 0x10101010 },
+	/* Enable WR-REQ */
+	{ A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x0000FF },
+	/* Set up round robin arbitration between both AXI ports */
+	{ A3XX_VBIF_ARB_CTL, 0x00000030 },
+	/* Set up AOOO */
+	{ A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000003C },
+	{ A3XX_VBIF_OUT_AXI_AOOO, 0x003C003C },
+	{0, 0},
+};
+
+static struct a3xx_vbif_data a320_vbif[] = {
+	/* Set up 16 deep read/write request queues */
+	{ A3XX_VBIF_IN_RD_LIM_CONF0, 0x10101010 },
+	{ A3XX_VBIF_IN_RD_LIM_CONF1, 0x10101010 },
+	{ A3XX_VBIF_OUT_RD_LIM_CONF0, 0x10101010 },
+	{ A3XX_VBIF_OUT_WR_LIM_CONF0, 0x10101010 },
+	{ A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303 },
+	{ A3XX_VBIF_IN_WR_LIM_CONF0, 0x10101010 },
+	{ A3XX_VBIF_IN_WR_LIM_CONF1, 0x10101010 },
+	/* Enable WR-REQ */
+	{ A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x0000FF },
+	/* Set up round robin arbitration between both AXI ports */
+	{ A3XX_VBIF_ARB_CTL, 0x00000030 },
+	/* Set up AOOO */
+	{ A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000003C },
+	{ A3XX_VBIF_OUT_AXI_AOOO, 0x003C003C },
+	/* Enable 1K sort */
+	{ A3XX_VBIF_ABIT_SORT, 0x000000FF },
+	{ A3XX_VBIF_ABIT_SORT_CONF, 0x000000A4 },
+	{0, 0},
+};
+
+static struct a3xx_vbif_data a330_vbif[] = {
+	/* Set up 16 deep read/write request queues */
+	{ A3XX_VBIF_IN_RD_LIM_CONF0, 0x18181818 },
+	{ A3XX_VBIF_IN_RD_LIM_CONF1, 0x18181818 },
+	{ A3XX_VBIF_OUT_RD_LIM_CONF0, 0x18181818 },
+	{ A3XX_VBIF_OUT_WR_LIM_CONF0, 0x18181818 },
+	{ A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303 },
+	{ A3XX_VBIF_IN_WR_LIM_CONF0, 0x18181818 },
+	{ A3XX_VBIF_IN_WR_LIM_CONF1, 0x18181818 },
+	/* Enable WR-REQ */
+	{ A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x00003F },
+	/* Set up round robin arbitration between both AXI ports */
+	{ A3XX_VBIF_ARB_CTL, 0x00000030 },
+	/* Set up VBIF_ROUND_ROBIN_QOS_ARB */
+	{ A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0001 },
+	/* Set up AOOO */
+	{ A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000FFFF },
+	{ A3XX_VBIF_OUT_AXI_AOOO, 0xFFFFFFFF },
+	/* Enable 1K sort */
+	{ A3XX_VBIF_ABIT_SORT, 0x1FFFF },
+	{ A3XX_VBIF_ABIT_SORT_CONF, 0x000000A4 },
+	/* Disable VBIF clock gating. This is to enable AXI running
+	 * higher frequency than GPU.
+	 */
+	{ A3XX_VBIF_CLKON, 1 },
+	{0, 0},
+};
+
 static void a3xx_start(struct adreno_device *adreno_dev)
 {
 	struct kgsl_device *device = &adreno_dev->dev;
+	struct a3xx_vbif_data *vbif = NULL;
 
-	/* Set up 16 deep read/write request queues */
-	if (adreno_is_a330(adreno_dev)) {
-		adreno_regwrite(device, A3XX_VBIF_IN_RD_LIM_CONF0, 0x18181818);
-		adreno_regwrite(device, A3XX_VBIF_IN_RD_LIM_CONF1, 0x18181818);
-		adreno_regwrite(device, A3XX_VBIF_OUT_RD_LIM_CONF0, 0x18181818);
-		adreno_regwrite(device, A3XX_VBIF_OUT_WR_LIM_CONF0, 0x18181818);
-		adreno_regwrite(device, A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303);
-		adreno_regwrite(device, A3XX_VBIF_IN_WR_LIM_CONF0, 0x18181818);
-		adreno_regwrite(device, A3XX_VBIF_IN_WR_LIM_CONF1, 0x18181818);
-		/* Enable WR-REQ */
-		adreno_regwrite(device, A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x00003F);
+	if (adreno_is_a305(adreno_dev))
+		vbif = a305_vbif;
+	else if (adreno_is_a320(adreno_dev))
+		vbif = a320_vbif;
+	else if (adreno_is_a330(adreno_dev))
+		vbif = a330_vbif;
 
-		/* Set up round robin arbitration between both AXI ports */
-		adreno_regwrite(device, A3XX_VBIF_ARB_CTL, 0x00000030);
-		/* Set up VBIF_ROUND_ROBIN_QOS_ARB */
-		adreno_regwrite(device, A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0001);
+	BUG_ON(vbif == NULL);
 
-		/* Set up AOOO */
-		adreno_regwrite(device, A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000FFFF);
-		adreno_regwrite(device, A3XX_VBIF_OUT_AXI_AOOO, 0xFFFFFFFF);
-
-		/* Enable 1K sort */
-		adreno_regwrite(device, A3XX_VBIF_ABIT_SORT, 0x1FFFF);
-		adreno_regwrite(device, A3XX_VBIF_ABIT_SORT_CONF, 0x000000A4);
-
-		/* Diable VBIF clock gating. This is to enable AXI running
-		 * higher frequency than GPU.
-		 */
-		adreno_regwrite(device, A3XX_VBIF_CLKON, 1);
-	} else {
-		adreno_regwrite(device, A3XX_VBIF_IN_RD_LIM_CONF0, 0x10101010);
-		adreno_regwrite(device, A3XX_VBIF_IN_RD_LIM_CONF1, 0x10101010);
-		adreno_regwrite(device, A3XX_VBIF_OUT_RD_LIM_CONF0, 0x10101010);
-		adreno_regwrite(device, A3XX_VBIF_OUT_WR_LIM_CONF0, 0x10101010);
-		adreno_regwrite(device, A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303);
-		adreno_regwrite(device, A3XX_VBIF_IN_WR_LIM_CONF0, 0x10101010);
-		adreno_regwrite(device, A3XX_VBIF_IN_WR_LIM_CONF1, 0x10101010);
-		/* Enable WR-REQ */
-		adreno_regwrite(device, A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x0000FF);
-
-		/* Set up round robin arbitration between both AXI ports */
-		adreno_regwrite(device, A3XX_VBIF_ARB_CTL, 0x00000030);
-		/* Set up AOOO */
-		adreno_regwrite(device, A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000003C);
-		adreno_regwrite(device, A3XX_VBIF_OUT_AXI_AOOO, 0x003C003C);
+	while (vbif->reg != 0) {
+		adreno_regwrite(device, vbif->reg, vbif->val);
+		vbif++;
 	}
 
-	if (cpu_is_apq8064()) {
-		/* Enable 1K sort */
-		adreno_regwrite(device, A3XX_VBIF_ABIT_SORT, 0x000000FF);
-		adreno_regwrite(device, A3XX_VBIF_ABIT_SORT_CONF, 0x000000A4);
-	}
 	/* Make all blocks contribute to the GPU BUSY perf counter */
 	adreno_regwrite(device, A3XX_RBBM_GPU_BUSY_MASKED, 0xFFFFFFFF);
 
diff --git a/drivers/gpu/msm/kgsl_mmu.c b/drivers/gpu/msm/kgsl_mmu.c
index dee889d..0e1e100 100644
--- a/drivers/gpu/msm/kgsl_mmu.c
+++ b/drivers/gpu/msm/kgsl_mmu.c
@@ -320,7 +320,7 @@
 	if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type)
 		return CONFIG_MSM_KGSL_PAGE_TABLE_SIZE;
 	else if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type)
-		return SZ_2G;
+		return SZ_2G - KGSL_PAGETABLE_BASE;
 	else
 		return 0;
 }
diff --git a/drivers/mmc/host/msm_sdcc.c b/drivers/mmc/host/msm_sdcc.c
index b153b27..841e5e8 100644
--- a/drivers/mmc/host/msm_sdcc.c
+++ b/drivers/mmc/host/msm_sdcc.c
@@ -1336,34 +1336,41 @@
 msmsdcc_data_err(struct msmsdcc_host *host, struct mmc_data *data,
 		 unsigned int status)
 {
-	if (status & MCI_DATACRCFAIL) {
-		if (!(data->mrq->cmd->opcode == MMC_BUS_TEST_W
-			|| data->mrq->cmd->opcode == MMC_BUS_TEST_R
-			|| data->mrq->cmd->opcode ==
-				MMC_SEND_TUNING_BLOCK_HS200)) {
-			pr_err("%s: Data CRC error\n",
-			       mmc_hostname(host->mmc));
-			pr_err("%s: opcode 0x%.8x\n", __func__,
-			       data->mrq->cmd->opcode);
-			pr_err("%s: blksz %d, blocks %d\n", __func__,
-			       data->blksz, data->blocks);
-			data->error = -EILSEQ;
+	if ((status & MCI_DATACRCFAIL) || (status & MCI_DATATIMEOUT)) {
+		u32 opcode = data->mrq->cmd->opcode;
+
+		if (!((!host->tuning_in_progress && opcode == MMC_BUS_TEST_W)
+		    || (opcode == MMC_BUS_TEST_R) ||
+		    (host->tuning_in_progress &&
+		    (opcode == MMC_SEND_TUNING_BLOCK_HS200 ||
+		     opcode == MMC_SEND_TUNING_BLOCK)))) {
+			if (status & MCI_DATACRCFAIL) {
+				pr_err("%s: Data CRC error\n",
+				       mmc_hostname(host->mmc));
+				pr_err("%s: opcode 0x%.8x\n", __func__, opcode);
+				pr_err("%s: blksz %d, blocks %d\n", __func__,
+				       data->blksz, data->blocks);
+			} else {
+				pr_err("%s: CMD%d: Data timeout. DAT0 => %d\n",
+					 mmc_hostname(host->mmc), opcode,
+					 (readl_relaxed(host->base
+					 + MCI_TEST_INPUT) & 0x2) ? 1 : 0);
+				msmsdcc_dump_sdcc_state(host);
+			}
 		}
-	} else if (status & MCI_DATATIMEOUT) {
-		/* CRC is optional for the bus test commands, not all
+
+		/*
+		 * CRC is optional for the bus test commands, not all
 		 * cards respond back with CRC. However controller
 		 * waits for the CRC and times out. Hence ignore the
 		 * data timeouts during the Bustest.
 		 */
-		if (!(data->mrq->cmd->opcode == MMC_BUS_TEST_W
-			|| data->mrq->cmd->opcode == MMC_BUS_TEST_R)) {
-			pr_err("%s: CMD%d: Data timeout. DAT0 => %d\n",
-				 mmc_hostname(host->mmc),
-				 data->mrq->cmd->opcode,
-				 (readl_relaxed(host->base
-				 + MCI_TEST_INPUT) & 0x2) ? 1 : 0);
-			data->error = -ETIMEDOUT;
-			msmsdcc_dump_sdcc_state(host);
+		if (!((!host->tuning_in_progress && opcode == MMC_BUS_TEST_W)
+		    || (opcode == MMC_BUS_TEST_R))) {
+			if (status & MCI_DATACRCFAIL)
+				data->error = -EILSEQ;
+			else
+				data->error = -ETIMEDOUT;
 		}
 	} else if (status & MCI_RXOVERRUN) {
 		pr_err("%s: RX overrun\n", mmc_hostname(host->mmc));
diff --git a/include/linux/diagchar.h b/include/linux/diagchar.h
index 45d51ce..288ed43 100644
--- a/include/linux/diagchar.h
+++ b/include/linux/diagchar.h
@@ -109,7 +109,7 @@
 /* This needs to be modified manually now, when we add
  a new RANGE of SSIDs to the msg_mask_tbl */
 #define MSG_MASK_TBL_CNT		24
-#define EVENT_LAST_ID			0x08AD
+#define EVENT_LAST_ID			0x08C5
 
 #define MSG_SSID_0			0
 #define MSG_SSID_0_LAST			93
diff --git a/include/sound/apr_audio-v2.h b/include/sound/apr_audio-v2.h
index ac4ec09..1da6aa2 100644
--- a/include/sound/apr_audio-v2.h
+++ b/include/sound/apr_audio-v2.h
@@ -242,18 +242,18 @@
  */
 struct adm_cmd_set_pp_params_v5 {
 	struct apr_hdr hdr;
-		u32                  data_payload_addr_lsw;
+	u32		payload_addr_lsw;
 	/* LSW of parameter data payload address.*/
-	u32                  data_payload_addr_msw;
+	u32		payload_addr_msw;
 	/* MSW of parameter data payload address.*/
 
-		u32                  mem_map_handle;
+	u32		mem_map_handle;
 /* Memory map handle returned by ADM_CMD_SHARED_MEM_MAP_REGIONS
  * command */
 /* If mem_map_handle is zero implies the message is in
  * the payload */
 
-	u32                  data_payload_size;
+	u32		payload_size;
 /* Size in bytes of the variable payload accompanying this
  * message or
  * in shared memory. This is used for parsing the parameter
diff --git a/sound/soc/codecs/wcd9310.c b/sound/soc/codecs/wcd9310.c
index ac11f1a..a47e446 100644
--- a/sound/soc/codecs/wcd9310.c
+++ b/sound/soc/codecs/wcd9310.c
@@ -8379,8 +8379,9 @@
 {
 	struct platform_device *pdev = to_platform_device(dev);
 	struct tabla_priv *tabla = platform_get_drvdata(pdev);
-	dev_dbg(dev, "%s: system resume\n", __func__);
-	tabla->mbhc_last_resume = jiffies;
+	dev_dbg(dev, "%s: system resume tabla %p\n", __func__, tabla);
+	if (tabla)
+		tabla->mbhc_last_resume = jiffies;
 	return 0;
 }
 
diff --git a/sound/soc/msm/qdsp6v2/q6adm.c b/sound/soc/msm/qdsp6v2/q6adm.c
index e5837b2..62257b4 100644
--- a/sound/soc/msm/qdsp6v2/q6adm.c
+++ b/sound/soc/msm/qdsp6v2/q6adm.c
@@ -41,6 +41,14 @@
 	wait_queue_head_t wait[Q6_AFE_MAX_PORTS];
 };
 
+static struct acdb_cal_block mem_addr_audproc[MAX_AUDPROC_TYPES];
+static struct acdb_cal_block mem_addr_audvol[MAX_AUDPROC_TYPES];
+
+/* 0 - (MAX_AUDPROC_TYPES -1):				audproc handles */
+/* (MAX_AUDPROC_TYPES -1) - (2 * MAX_AUDPROC_TYPES -1):	audvol handles */
+atomic_t mem_map_handles[(2 * MAX_AUDPROC_TYPES)];
+atomic_t mem_map_index;
+
 static struct adm_ctl			this_adm;
 
 static int32_t adm_callback(struct apr_client_data *data, void *priv)
@@ -63,6 +71,18 @@
 			}
 			this_adm.apr = NULL;
 		}
+		pr_debug("Resetting calibration blocks");
+		for (i = 0; i < MAX_AUDPROC_TYPES; i++) {
+			/* Device calibration */
+			mem_addr_audproc[i].cal_size = 0;
+			mem_addr_audproc[i].cal_kvaddr = 0;
+			mem_addr_audproc[i].cal_paddr = 0;
+
+			/* Volume calibration */
+			mem_addr_audvol[i].cal_size = 0;
+			mem_addr_audvol[i].cal_kvaddr = 0;
+			mem_addr_audvol[i].cal_paddr = 0;
+		}
 		return 0;
 	}
 
@@ -82,18 +102,23 @@
 			switch (payload[0]) {
 			case ADM_CMD_SET_PP_PARAMS_V5:
 				if (rtac_make_adm_callback(
-					payload, data->payload_size))
+					payload, data->payload_size)) {
 					pr_debug("%s: payload[0]: 0x%x\n",
 						__func__, payload[0]);
 					break;
+				}
 			case ADM_CMD_DEVICE_CLOSE_V5:
 			case ADM_CMD_SHARED_MEM_UNMAP_REGIONS:
-			case ADM_CMD_SHARED_MEM_MAP_REGIONS:
 			case ADM_CMD_MATRIX_MAP_ROUTINGS_V5:
-				pr_debug("ADM_CMD_MATRIX_MAP_ROUTINGS\n");
+				pr_debug("%s: Basic callback received, wake up.\n",
+					__func__);
 				atomic_set(&this_adm.copp_stat[index], 1);
 				wake_up(&this_adm.wait[index]);
 				break;
+			case ADM_CMD_SHARED_MEM_MAP_REGIONS:
+				/* Block until memory handle comes back */
+				/* via ADM_CMDRSP_SHARED_MEM_MAP_REGIONS */
+				break;
 			default:
 				pr_err("%s: Unknown Cmd: 0x%x\n", __func__,
 								payload[0]);
@@ -125,6 +150,14 @@
 			rtac_make_adm_callback(payload,
 				data->payload_size);
 			break;
+		case ADM_CMDRSP_SHARED_MEM_MAP_REGIONS:
+			pr_debug("%s: ADM_CMDRSP_SHARED_MEM_MAP_REGIONS\n",
+				__func__);
+			atomic_set(&mem_map_handles[
+				atomic_read(&mem_map_index)], *payload);
+			atomic_set(&this_adm.copp_stat[0], 1);
+			wake_up(&this_adm.wait[index]);
+			break;
 		default:
 			pr_err("%s: Unknown cmd:0x%x\n", __func__,
 							data->opcode);
@@ -134,12 +167,143 @@
 	return 0;
 }
 
-/* TODO: send_adm_cal_block function to be defined
-	when calibration available for 8974 */
+static int send_adm_cal_block(int port_id, struct acdb_cal_block *aud_cal)
+{
+	s32				result = 0;
+	struct adm_cmd_set_pp_params_v5	adm_params;
+	int index = afe_get_port_index(port_id);
+	if (index < 0 || index >= AFE_MAX_PORTS) {
+		pr_err("%s: invalid port idx %d portid %d\n",
+				__func__, index, port_id);
+		return 0;
+	}
+
+	pr_debug("%s: Port id %d, index %d\n", __func__, port_id, index);
+
+	if (!aud_cal || aud_cal->cal_size == 0) {
+		pr_debug("%s: No ADM cal to send for port_id = %d!\n",
+			__func__, port_id);
+		result = -EINVAL;
+		goto done;
+	}
+
+	adm_params.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+		APR_HDR_LEN(20), APR_PKT_VER);
+	adm_params.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+		sizeof(adm_params));
+	adm_params.hdr.src_svc = APR_SVC_ADM;
+	adm_params.hdr.src_domain = APR_DOMAIN_APPS;
+	adm_params.hdr.src_port = port_id;
+	adm_params.hdr.dest_svc = APR_SVC_ADM;
+	adm_params.hdr.dest_domain = APR_DOMAIN_ADSP;
+	adm_params.hdr.dest_port = atomic_read(&this_adm.copp_id[index]);
+	adm_params.hdr.token = port_id;
+	adm_params.hdr.opcode = ADM_CMD_SET_PP_PARAMS_V5;
+	adm_params.payload_addr_lsw = aud_cal->cal_paddr;
+	adm_params.payload_addr_msw = 0;
+	adm_params.mem_map_handle = atomic_read(&mem_map_handles[
+					atomic_read(&mem_map_index)]);
+	adm_params.payload_size = aud_cal->cal_size;
+
+	atomic_set(&this_adm.copp_stat[index], 0);
+	pr_debug("%s: Sending SET_PARAMS payload = 0x%x, size = %d\n",
+		__func__, adm_params.payload_addr_lsw,
+		adm_params.payload_size);
+	result = apr_send_pkt(this_adm.apr, (uint32_t *)&adm_params);
+	if (result < 0) {
+		pr_err("%s: Set params failed port = %d payload = 0x%x\n",
+			__func__, port_id, aud_cal->cal_paddr);
+		result = -EINVAL;
+		goto done;
+	}
+	/* Wait for the callback */
+	result = wait_event_timeout(this_adm.wait[index],
+		atomic_read(&this_adm.copp_stat[index]),
+		msecs_to_jiffies(TIMEOUT_MS));
+	if (!result) {
+		pr_err("%s: Set params timed out port = %d, payload = 0x%x\n",
+			__func__, port_id, aud_cal->cal_paddr);
+		result = -EINVAL;
+		goto done;
+	}
+
+	result = 0;
+done:
+	return result;
+}
+
 static void send_adm_cal(int port_id, int path)
 {
-	/* function to be defined when calibration available for 8974 */
+	int			result = 0;
+	s32			acdb_path;
+	struct acdb_cal_block	aud_cal;
+	int			size = 4096;
 	pr_debug("%s\n", __func__);
+
+	/* Maps audio_dev_ctrl path definition to ACDB definition */
+	acdb_path = path - 1;
+
+	pr_debug("%s: Sending audproc cal\n", __func__);
+	get_audproc_cal(acdb_path, &aud_cal);
+
+	/* map & cache buffers used */
+	if (((mem_addr_audproc[acdb_path].cal_paddr != aud_cal.cal_paddr)  &&
+		(aud_cal.cal_size > 0)) ||
+		(aud_cal.cal_size > mem_addr_audproc[acdb_path].cal_size)) {
+
+		atomic_set(&mem_map_index, acdb_path);
+		if (mem_addr_audproc[acdb_path].cal_paddr != 0)
+			adm_memory_unmap_regions(port_id,
+				&mem_addr_audproc[acdb_path].cal_paddr,
+				&size, 1);
+
+		result = adm_memory_map_regions(port_id, &aud_cal.cal_paddr,
+						0, &aud_cal.cal_size, 1);
+		if (result < 0)
+			pr_err("ADM audproc mmap did not work! path = %d, addr = 0x%x, size = %d\n",
+				acdb_path, aud_cal.cal_paddr,
+				aud_cal.cal_size);
+		else
+			mem_addr_audproc[acdb_path] = aud_cal;
+	}
+
+	if (!send_adm_cal_block(port_id, &aud_cal))
+		pr_debug("%s: Audproc cal sent for port id: %d, path %d\n",
+			__func__, port_id, acdb_path);
+	else
+		pr_debug("%s: Audproc cal not sent for port id: %d, path %d\n",
+			__func__, port_id, acdb_path);
+
+	pr_debug("%s: Sending audvol cal\n", __func__);
+	get_audvol_cal(acdb_path, &aud_cal);
+
+	/* map & cache buffers used */
+	if (((mem_addr_audvol[acdb_path].cal_paddr != aud_cal.cal_paddr)  &&
+		(aud_cal.cal_size > 0))  ||
+		(aud_cal.cal_size > mem_addr_audvol[acdb_path].cal_size)) {
+
+		atomic_set(&mem_map_index, (acdb_path + MAX_AUDPROC_TYPES));
+		if (mem_addr_audvol[acdb_path].cal_paddr != 0)
+			adm_memory_unmap_regions(port_id,
+				&mem_addr_audvol[acdb_path].cal_paddr,
+				&size, 1);
+
+		result = adm_memory_map_regions(port_id, &aud_cal.cal_paddr,
+						0, &aud_cal.cal_size, 1);
+		if (result < 0)
+			pr_err("ADM audvol mmap did not work! path = %d, addr = 0x%x, size = %d\n",
+				acdb_path, aud_cal.cal_paddr,
+				aud_cal.cal_size);
+		else
+			mem_addr_audvol[acdb_path] = aud_cal;
+	}
+
+	if (!send_adm_cal_block(port_id, &aud_cal))
+		pr_debug("%s: Audvol cal sent for port id: %d, path %d\n",
+			__func__, port_id, acdb_path);
+	else
+		pr_debug("%s: Audvol cal not sent for port id: %d, path %d\n",
+			__func__, port_id, acdb_path);
 }
 
 int adm_connect_afe_port(int mode, int session_id, int port_id)
@@ -572,7 +736,8 @@
 	unmap_regions.hdr.dest_port = 0;
 	unmap_regions.hdr.token = 0;
 	unmap_regions.hdr.opcode = ADM_CMD_SHARED_MEM_UNMAP_REGIONS;
-	unmap_regions.mem_map_handle = this_adm.mem_map_handle[index];
+	unmap_regions.mem_map_handle = atomic_read(&mem_map_handles[
+						atomic_read(&mem_map_index)]);
 	atomic_set(&this_adm.copp_stat[0], 0);
 	ret = apr_send_pkt(this_adm.apr, (uint32_t *) &unmap_regions);
 	if (ret < 0) {