Merge "ASoC: msm: qdsp6v2: Add support for VoWLAN"
diff --git a/arch/arm/boot/dts/msm8610.dtsi b/arch/arm/boot/dts/msm8610.dtsi
index f152ceb..90e8fd6 100644
--- a/arch/arm/boot/dts/msm8610.dtsi
+++ b/arch/arm/boot/dts/msm8610.dtsi
@@ -765,6 +765,11 @@
 			compatible = "qcom,msm-dai-q6-dev";
 			qcom,msm-dai-q6-dev-id = <32773>;
 		};
+
+		qcom,msm-dai-q6-incall-music-2-rx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <32770>;
+		};
 	};
 
 	qcom,msm-pcm-hostless {
diff --git a/arch/arm/boot/dts/msm8974pro-pm.dtsi b/arch/arm/boot/dts/msm8974pro-pm.dtsi
index aca8f20..f735b65 100644
--- a/arch/arm/boot/dts/msm8974pro-pm.dtsi
+++ b/arch/arm/boot/dts/msm8974pro-pm.dtsi
@@ -242,6 +242,7 @@
 			<0xff 109>,  /* ocmem_dm_nonsec_irq */
 			<0xff 126>,  /* bam_irq[0] */
 			<0xff 140>,  /* uart_dm_intr */
+			<0xff 146>,  /* uart_dm_intr: blsp2_uart_2_irq */
 			<0xff 155>,  /* sdcc_irq[0] */
 			<0xff 157>,  /* sdcc_irq[0] */
 			<0xff 159>,  /* sdcc_irq[0] */
diff --git a/arch/arm/configs/msm8610-perf_defconfig b/arch/arm/configs/msm8610-perf_defconfig
index c7abf42..1ba8527 100644
--- a/arch/arm/configs/msm8610-perf_defconfig
+++ b/arch/arm/configs/msm8610-perf_defconfig
@@ -412,6 +412,8 @@
 CONFIG_CRYPTO_MD4=y
 CONFIG_CRYPTO_ARC4=y
 CONFIG_CRYPTO_TWOFISH=y
+CONFIG_MOBICORE_SUPPORT=m
+CONFIG_MOBICORE_API=m
 # CONFIG_CRYPTO_HW is not set
 CONFIG_CRC_CCITT=y
 CONFIG_PPP=y
diff --git a/arch/arm/configs/msm8610_defconfig b/arch/arm/configs/msm8610_defconfig
index fe84f96..458faac 100644
--- a/arch/arm/configs/msm8610_defconfig
+++ b/arch/arm/configs/msm8610_defconfig
@@ -485,3 +485,5 @@
 CONFIG_SENSORS_MMA8X5X=y
 CONFIG_SENSORS_CAPELLA_CM36283=y
 CONFIG_MSM_RDBG=m
+CONFIG_MOBICORE_SUPPORT=m
+CONFIG_MOBICORE_API=m
diff --git a/arch/arm/mach-msm/board-8226.c b/arch/arm/mach-msm/board-8226.c
index 1c1fbe3..43646cd 100644
--- a/arch/arm/mach-msm/board-8226.c
+++ b/arch/arm/mach-msm/board-8226.c
@@ -167,7 +167,7 @@
 	NULL
 };
 
-DT_MACHINE_START(MSM8226_DT, "Qualcomm MSM 8226 (Flattened Device Tree)")
+DT_MACHINE_START(MSM8226_DT, "Qualcomm MSM 8x26 / MSM 8x28 (Flattened Device Tree)")
 	.map_io = msm_map_msm8226_io,
 	.init_irq = msm_dt_init_irq,
 	.init_machine = msm8226_init,
diff --git a/arch/arm/mach-msm/board-8610.c b/arch/arm/mach-msm/board-8610.c
index d175bb4..cd9b82e 100644
--- a/arch/arm/mach-msm/board-8610.c
+++ b/arch/arm/mach-msm/board-8610.c
@@ -136,7 +136,7 @@
 	NULL
 };
 
-DT_MACHINE_START(MSM8610_DT, "Qualcomm MSM 8610 (Flattened Device Tree)")
+DT_MACHINE_START(MSM8610_DT, "Qualcomm MSM 8x10 / MSM 8x12 (Flattened Device Tree)")
 	.map_io = msm_map_msm8610_io,
 	.init_irq = msm_dt_init_irq,
 	.init_machine = msm8610_init,
diff --git a/arch/arm/mach-msm/include/mach/ocmem_priv.h b/arch/arm/mach-msm/include/mach/ocmem_priv.h
index 32d58d4..00aedb6 100644
--- a/arch/arm/mach-msm/include/mach/ocmem_priv.h
+++ b/arch/arm/mach-msm/include/mach/ocmem_priv.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -226,7 +226,6 @@
 int process_dump(int, struct ocmem_handle *, unsigned long);
 int ocmem_rdm_transfer(int, struct ocmem_map_list *,
 				unsigned long, int);
-int ocmem_clear(unsigned long, unsigned long);
 unsigned long process_quota(int);
 int ocmem_memory_off(int, unsigned long, unsigned long);
 int ocmem_memory_on(int, unsigned long, unsigned long);
diff --git a/arch/arm/mach-msm/lpm_levels.c b/arch/arm/mach-msm/lpm_levels.c
index 7128017..9857162 100644
--- a/arch/arm/mach-msm/lpm_levels.c
+++ b/arch/arm/mach-msm/lpm_levels.c
@@ -526,7 +526,7 @@
 		if (latency_us < pwr->latency_us)
 			continue;
 
-		if (next_event_us)
+		if (next_event_us) {
 			if (next_event_us < pwr->latency_us)
 				continue;
 
@@ -535,6 +535,7 @@
 				next_wakeup_us = next_event_us
 					- pwr->latency_us;
 			}
+		}
 
 		if (next_wakeup_us <= pwr->time_overhead_us)
 			continue;
diff --git a/arch/arm/mach-msm/msm_bus/msm_bus_fabric.c b/arch/arm/mach-msm/msm_bus/msm_bus_fabric.c
index 626c5e8..0ffc194 100644
--- a/arch/arm/mach-msm/msm_bus/msm_bus_fabric.c
+++ b/arch/arm/mach-msm/msm_bus/msm_bus_fabric.c
@@ -364,7 +364,7 @@
 {
 	struct msm_bus_fabric *fabric = to_msm_bus_fabric(fabdev);
 	void *sel_cdata;
-	long rounded_rate;
+	long rounded_rate, cur_rate;
 
 	sel_cdata = fabric->cdata[ctx];
 
@@ -379,16 +379,20 @@
 	}
 
 	/* Enable clocks before accessing QoS registers */
-	if (fabric->info.nodeclk[DUAL_CTX].clk)
+	if (fabric->info.nodeclk[DUAL_CTX].clk) {
 		if (fabric->info.nodeclk[DUAL_CTX].rate == 0) {
-			rounded_rate = clk_round_rate(fabric->
-				info.nodeclk[DUAL_CTX].clk, 1);
+			cur_rate = clk_get_rate(
+					fabric->info.nodeclk[DUAL_CTX].clk);
+			rounded_rate = clk_round_rate(
+					fabric->info.nodeclk[DUAL_CTX].clk,
+					cur_rate ? cur_rate : 1);
 		if (clk_set_rate(fabric->info.nodeclk[DUAL_CTX].clk,
 				rounded_rate))
 			MSM_BUS_ERR("Error: clk: en: Node: %d rate: %ld",
 				fabric->fabdev.id, rounded_rate);
 
 		clk_prepare_enable(fabric->info.nodeclk[DUAL_CTX].clk);
+		}
 	}
 
 	if (info->iface_clk.clk)
@@ -514,22 +518,26 @@
 	struct msm_bus_inode_info *info, uint64_t req_clk, uint64_t req_bw)
 {
 	struct msm_bus_fabric *fabric = to_msm_bus_fabric(fabdev);
-	long rounded_rate;
+	long rounded_rate, cur_rate;
 
 	if (fabdev->hw_algo.config_master == NULL)
 		return;
 
 	/* Enable clocks before accessing QoS registers */
-	if (fabric->info.nodeclk[DUAL_CTX].clk)
+	if (fabric->info.nodeclk[DUAL_CTX].clk) {
 		if (fabric->info.nodeclk[DUAL_CTX].rate == 0) {
-			rounded_rate = clk_round_rate(fabric->
-				info.nodeclk[DUAL_CTX].clk, 1);
+			cur_rate = clk_get_rate(
+					fabric->info.nodeclk[DUAL_CTX].clk);
+			rounded_rate = clk_round_rate(
+					fabric->info.nodeclk[DUAL_CTX].clk,
+					cur_rate ? cur_rate : 1);
 		if (clk_set_rate(fabric->info.nodeclk[DUAL_CTX].clk,
 				rounded_rate))
 			MSM_BUS_ERR("Error: clk: en: Node: %d rate: %ld",
 				fabric->fabdev.id, rounded_rate);
 
 		clk_prepare_enable(fabric->info.nodeclk[DUAL_CTX].clk);
+		}
 	}
 
 	if (info->iface_clk.clk)
diff --git a/arch/arm/mach-msm/ocmem_core.c b/arch/arm/mach-msm/ocmem_core.c
index c186a5e..f753391 100644
--- a/arch/arm/mach-msm/ocmem_core.c
+++ b/arch/arm/mach-msm/ocmem_core.c
@@ -51,6 +51,7 @@
 static struct ocmem_hw_region *region_ctrl;
 static struct mutex region_ctrl_lock;
 static void *ocmem_base;
+static void *ocmem_vbase;
 
 #define OCMEM_V1_MACROS 8
 #define OCMEM_V1_MACRO_SZ (SZ_64K)
@@ -562,6 +563,13 @@
 	ocmem_write(0x0, ocmem_base + OC_GFX_MPU_END);
 }
 
+int ocmem_clear(unsigned long start, unsigned long size)
+{
+	memset((ocmem_vbase + start), 0x4D4D434F, size);
+	mb();
+	return 0;
+}
+
 static int do_lock(enum ocmem_client id, unsigned long offset,
 			unsigned long len, enum region_mode mode)
 {
@@ -1144,6 +1152,7 @@
 
 	pdata = platform_get_drvdata(pdev);
 	ocmem_base = pdata->reg_base;
+	ocmem_vbase = pdata->vbase;
 
 	rc = ocmem_enable_core_clock();
 
diff --git a/arch/arm/mach-msm/ocmem_rdm.c b/arch/arm/mach-msm/ocmem_rdm.c
index 4ff7212..9eac050 100644
--- a/arch/arm/mach-msm/ocmem_rdm.c
+++ b/arch/arm/mach-msm/ocmem_rdm.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -149,38 +149,6 @@
 	return IRQ_HANDLED;
 }
 
-#ifdef CONFIG_MSM_OCMEM_NONSECURE
-int ocmem_clear(unsigned long start, unsigned long size)
-{
-	INIT_COMPLETION(dm_clear_event);
-	/* Clear DM Mask */
-	ocmem_write(DM_MASK_RESET, dm_base + DM_INTR_MASK);
-	/* Clear DM Interrupts */
-	ocmem_write(DM_INTR_RESET, dm_base + DM_INTR_CLR);
-	/* DM CLR offset */
-	ocmem_write(start, dm_base + DM_CLR_OFFSET);
-	/* DM CLR size */
-	ocmem_write(size, dm_base + DM_CLR_SIZE);
-	/* Wipe out memory as "OCMM" */
-	ocmem_write(0x4D4D434F, dm_base + DM_CLR_PATTERN);
-	/* The offset, size and pattern for clearing must be set
-	 * before triggering the clearing engine
-	 */
-	mb();
-	/* Trigger Data Clear */
-	ocmem_write(DM_CLR_ENABLE, dm_base + DM_CLR_TRIGGER);
-
-	wait_for_completion(&dm_clear_event);
-
-	return 0;
-}
-#else
-int ocmem_clear(unsigned long start, unsigned long size)
-{
-	return 0;
-}
-#endif
-
 /* Lock during transfers */
 int ocmem_rdm_transfer(int id, struct ocmem_map_list *clist,
 			unsigned long start, int direction)
diff --git a/drivers/gud/Makefile b/drivers/gud/Makefile
index ef0e083..c415ad8 100644
--- a/drivers/gud/Makefile
+++ b/drivers/gud/Makefile
@@ -3,34 +3,35 @@
 #
 GUD_ROOT_FOLDER := drivers/gud
 # add our modules to kernel.
-obj-$(CONFIG_MOBICORE_API) += mckernelapi.o
-obj-$(CONFIG_MOBICORE_SUPPORT) += mcdrvmodule.o
+obj-$(CONFIG_MOBICORE_API) += mcKernelApi.o
+obj-$(CONFIG_MOBICORE_SUPPORT) += mcDrvModule.o
 
-mcdrvmodule-objs := mobicore_driver/logging.o \
-		mobicore_driver/ops.o \
-		mobicore_driver/mem.o \
-		mobicore_driver/api.o \
-		mobicore_driver/main.o \
-		mobicore_driver/pm.o
+mcDrvModule-objs := MobiCoreDriver/logging.o \
+		MobiCoreDriver/ops.o \
+		MobiCoreDriver/mem.o \
+		MobiCoreDriver/api.o \
+		MobiCoreDriver/pm.o \
+		MobiCoreDriver/main.o
 
-mckernelapi-objs := mobicore_kernelapi/main.o \
-		mobicore_kernelapi/clientlib.o \
-		mobicore_kernelapi/device.o \
-		mobicore_kernelapi/session.o \
-		mobicore_kernelapi/connection.o
+mcKernelApi-objs := MobiCoreKernelApi/main.o \
+		MobiCoreKernelApi/clientlib.o \
+		MobiCoreKernelApi/device.o \
+		MobiCoreKernelApi/session.o \
+		MobiCoreKernelApi/connection.o
 
 # Release mode by default
-ccflags-y := -DNDEBUG -include $(PWD)/$(GUD_ROOT_FOLDER)/mobicore_driver/build_tag.h
+ccflags-y := -DNDEBUG -I$(GUD_ROOT_FOLDER)
 ccflags-y += -Wno-declaration-after-statement
 
 ccflags-$(CONFIG_MOBICORE_DEBUG) += -DDEBUG
 ccflags-$(CONFIG_MOBICORE_VERBOSE) += -DDEBUG_VERBOSE
 
 # Choose one platform from the folder
-MOBICORE_PLATFORM := $(shell (ls -1 $(PWD)/$(GUD_ROOT_FOLDER)/mobicore_driver/platforms | tail -1) )
+MOBICORE_PLATFORM := $(shell (ls -1 $(PWD)/$(GUD_ROOT_FOLDER)/MobiCoreDriver/platforms | tail -1) )
 # Use the available platform folder
-ccflags-y += -I$(GUD_ROOT_FOLDER)/mobicore_driver/platforms/$(MOBICORE_PLATFORM)
-
-
-ccflags-y += -I$(GUD_ROOT_FOLDER)/mobicore_driver/public
-ccflags-y += -I$(GUD_ROOT_FOLDER)/mobicore_kernelapi/include
+ccflags-y += -I$(GUD_ROOT_FOLDER)/MobiCoreDriver/platforms/$(MOBICORE_PLATFORM)
+# MobiCore Driver includes
+ccflags-y += -I$(GUD_ROOT_FOLDER)/MobiCoreDriver/public
+# MobiCore KernelApi required incldes
+ccflags-y += -I$(GUD_ROOT_FOLDER)/MobiCoreKernelApi/include
+ccflags-y += -I$(GUD_ROOT_FOLDER)/MobiCoreKernelApi/public
diff --git a/drivers/gud/MobiCoreDriver/Makefile b/drivers/gud/MobiCoreDriver/Makefile
new file mode 100644
index 0000000..c17f35e
--- /dev/null
+++ b/drivers/gud/MobiCoreDriver/Makefile
@@ -0,0 +1,26 @@
+#
+# this makefile is called from the kernel make system. Thus we basically
+# add things to "obj-m" here.
+
+ifeq ($(MODE),release)
+    ccflags-y = -O2 -DNDEBUG
+else
+    ccflags-y = -DDEBUG
+endif # DEBUG/RELEASE
+
+# CFLAGS from the build script
+ifdef MOBICORE_CFLAGS
+	ccflags-y += $(MOBICORE_CFLAGS)
+endif
+#EXTRA_CFLAGS+=-DDEBUG_VERBOSE
+
+ccflags-y += -I$(M) -Wall -D__$(PLATFORM)__
+# add our module to kernel.
+obj-m += mcDrvModule.o
+
+mcDrvModule-objs :=logging.o ops.o mem.o api.o pm.o main.o
+
+clean:
+	rm -rf *.o *~ core .depend .*.cmd *.ko *.mod.c .tmp_versions \
+		Module.markers Module.symvers modules.order
+
diff --git a/drivers/gud/mobicore_driver/api.c b/drivers/gud/MobiCoreDriver/api.c
similarity index 80%
rename from drivers/gud/mobicore_driver/api.c
rename to drivers/gud/MobiCoreDriver/api.c
index b47383a0..e7fa8e2 100644
--- a/drivers/gud/mobicore_driver/api.c
+++ b/drivers/gud/MobiCoreDriver/api.c
@@ -14,23 +14,12 @@
 #include "mem.h"
 #include "debug.h"
 
-
-/*
- * Map a virtual memory buffer structure to Mobicore
- * @param instance
- * @param addr		address of the buffer(NB it must be kernel virtual!)
- * @param len		buffer length
- * @param handle	pointer to handle
- * @param phys_wsm_l2_table	pointer to physical L2 table(?)
- *
- * @return 0 if no error
- *
- */
 int mobicore_map_vmem(struct mc_instance *instance, void *addr,
-	uint32_t len, uint32_t *handle, uint32_t *phys)
+	uint32_t len, uint32_t *handle)
 {
-	return mc_register_wsm_l2(instance, (uint32_t)addr, len,
-		handle, phys);
+	phys_addr_t phys;
+	return mc_register_wsm_mmu(instance, addr, len,
+		handle, &phys);
 }
 EXPORT_SYMBOL(mobicore_map_vmem);
 
@@ -44,7 +33,7 @@
  */
 int mobicore_unmap_vmem(struct mc_instance *instance, uint32_t handle)
 {
-	return mc_unregister_wsm_l2(instance, handle);
+	return mc_unregister_wsm_mmu(instance, handle);
 }
 EXPORT_SYMBOL(mobicore_unmap_vmem);
 
@@ -70,13 +59,11 @@
  * @param requested_size		size of the WSM
  * @param handle		pointer where the handle will be saved
  * @param virt_kernel_addr	pointer for the kernel virtual address
- * @param phys_addr		pointer for the physical address
  *
  * @return error code or 0 for success
  */
 int mobicore_allocate_wsm(struct mc_instance *instance,
-	unsigned long requested_size, uint32_t *handle, void **virt_kernel_addr,
-	void **phys_addr)
+	unsigned long requested_size, uint32_t *handle, void **virt_kernel_addr)
 {
 	struct mc_buffer *buffer = NULL;
 
@@ -85,7 +72,6 @@
 		return -EFAULT;
 
 	*handle = buffer->handle;
-	*phys_addr = buffer->phys;
 	*virt_kernel_addr = buffer->addr;
 	return 0;
 }
@@ -117,3 +103,14 @@
 }
 EXPORT_SYMBOL(mobicore_release);
 
+/*
+ * Test if mobicore can sleep
+ *
+ * @return true if mobicore can sleep, false if it can't sleep
+ */
+bool mobicore_sleep_ready(void)
+{
+	return mc_sleep_ready();
+}
+EXPORT_SYMBOL(mobicore_sleep_ready);
+
diff --git a/drivers/gud/mobicore_driver/arm.h b/drivers/gud/MobiCoreDriver/arm.h
similarity index 100%
rename from drivers/gud/mobicore_driver/arm.h
rename to drivers/gud/MobiCoreDriver/arm.h
diff --git a/drivers/gud/MobiCoreDriver/build.sh b/drivers/gud/MobiCoreDriver/build.sh
new file mode 100644
index 0000000..db8410c
--- /dev/null
+++ b/drivers/gud/MobiCoreDriver/build.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+
+#  source the setup script
+if [ -z $COMP_PATH_ROOT ]; then
+	echo "The build environment is not set!"
+	echo "Trying to source setupDrivers.sh automatically!"
+	source ../setupDrivers.sh || exit 1
+fi
+
+ROOT_PATH=$(dirname $(readlink -f $0))
+#  These folders need to be relative to the kernel dir or absolute!
+PLATFORM=EXYNOS_5410_STD
+CODE_INCLUDE=$(readlink -f $ROOT_PATH/Locals/Code)
+PLATFORM_INCLUDE="$CODE_INCLUDE/platforms/$PLATFORM"
+MOBICORE_DAEMON=$COMP_PATH_MobiCoreDriverLib/Public
+
+MOBICORE_CFLAGS="-I$MOBICORE_DRIVER/Public -I$MOBICORE_DAEMON -I$COMP_PATH_MobiCore/inc/Mci -I$COMP_PATH_MobiCore/inc -I${PLATFORM_INCLUDE}"
+
+# Clean first
+make -C $CODE_INCLUDE clean
+
+make -C $LINUX_PATH \
+	MODE=$MODE \
+	ARCH=arm \
+	CROSS_COMPILE=$CROSS_COMPILE \
+	M=$CODE_INCLUDE \
+	"MOBICORE_CFLAGS=$MOBICORE_CFLAGS" \
+	modules
diff --git a/drivers/gud/mobicore_driver/debug.h b/drivers/gud/MobiCoreDriver/debug.h
similarity index 89%
rename from drivers/gud/mobicore_driver/debug.h
rename to drivers/gud/MobiCoreDriver/debug.h
index 1f9a632..d29efef 100644
--- a/drivers/gud/mobicore_driver/debug.h
+++ b/drivers/gud/MobiCoreDriver/debug.h
@@ -15,7 +15,7 @@
 extern struct device *mcd;
 
 #define MCDRV_DBG_ERROR(dev, txt, ...) \
-	dev_err(dev, "MobiCore %s() ### ERROR: " txt, \
+	dev_err(dev, "MobiCore %s() ### ERROR: " txt "\n", \
 		__func__, \
 		##__VA_ARGS__)
 
@@ -32,12 +32,12 @@
 #endif
 
 #define MCDRV_DBG(dev, txt, ...) \
-	dev_info(dev, "MobiCore %s(): " txt, \
+	dev_info(dev, "MobiCore %s(): " txt "\n", \
 		 __func__, \
 		 ##__VA_ARGS__)
 
 #define MCDRV_DBG_WARN(dev, txt, ...) \
-	dev_warn(dev, "MobiCore %s() WARNING: " txt, \
+	dev_warn(dev, "MobiCore %s() WARNING: " txt "\n", \
 		 __func__, \
 		 ##__VA_ARGS__)
 
diff --git a/drivers/gud/mobicore_driver/fastcall.h b/drivers/gud/MobiCoreDriver/fastcall.h
similarity index 70%
rename from drivers/gud/mobicore_driver/fastcall.h
rename to drivers/gud/MobiCoreDriver/fastcall.h
index 1c90520..33538df 100644
--- a/drivers/gud/mobicore_driver/fastcall.h
+++ b/drivers/gud/MobiCoreDriver/fastcall.h
@@ -36,9 +36,10 @@
  */
 #define MC_FC_INIT		-1
 #define MC_FC_INFO		-2
-#define MC_FC_POWER		-3
-#define MC_FC_DUMP		-4
 #define MC_FC_NWD_TRACE		-31 /* Mem trace setup fastcall */
+#ifdef TBASE_CORE_SWITCHER
+#define MC_FC_SWITCH_CORE   0x84000005
+#endif
 
 
 /*
@@ -96,6 +97,23 @@
 	} as_out;
 };
 
+#ifdef TBASE_CORE_SWITCHER
+/* fast call switch Core parameters */
+union mc_fc_swich_core {
+	union fc_generic as_generic;
+	struct {
+		uint32_t cmd;
+		uint32_t core_id;
+		uint32_t rfu[2];
+	} as_in;
+	struct {
+		uint32_t resp;
+		uint32_t ret;
+		uint32_t state;
+		uint32_t ext_info;
+	} as_out;
+};
+#endif
 /*
  * _smc() - fast call to MobiCore
  *
@@ -104,23 +122,22 @@
 static inline long _smc(void *data)
 {
 	int ret = 0;
-	union fc_generic fc_generic;
 
 	if (data == NULL)
 		return -EPERM;
 
 #ifdef MC_SMC_FASTCALL
 	{
-		ret = smc_fastcall(data, sizeof(fc_generic));
+		ret = smc_fastcall(data, sizeof(union fc_generic));
 	}
 #else
-	memcpy(&fc_generic, data, sizeof(union fc_generic));
 	{
-		/* SVC expect values in r0-r3 */
-		register u32 reg0 __asm__("r0") = fc_generic.as_in.cmd;
-		register u32 reg1 __asm__("r1") = fc_generic.as_in.param[0];
-		register u32 reg2 __asm__("r2") = fc_generic.as_in.param[1];
-		register u32 reg3 __asm__("r3") = fc_generic.as_in.param[2];
+		union fc_generic *fc_generic = data;
+		/* SMC expect values in r0-r3 */
+		register u32 reg0 __asm__("r0") = fc_generic->as_in.cmd;
+		register u32 reg1 __asm__("r1") = fc_generic->as_in.param[0];
+		register u32 reg2 __asm__("r2") = fc_generic->as_in.param[1];
+		register u32 reg3 __asm__("r3") = fc_generic->as_in.param[2];
 
 		__asm__ volatile (
 #ifdef MC_ARCH_EXTENSION_SEC
@@ -131,13 +148,23 @@
 			"smc 0\n"
 			: "+r"(reg0), "+r"(reg1), "+r"(reg2), "+r"(reg3)
 		);
+#ifdef __ARM_VE_A9X4_QEMU__
+		/* Qemu does not return to the address following the SMC
+		   instruction so we have to insert several nop instructions to
+		   workaround this Qemu bug. */
+		__asm__ volatile (
+		    "nop\n"
+		    "nop\n"
+		    "nop\n"
+		    "nop"
+		 );
+#endif
 
 		/* set response */
-		fc_generic.as_out.resp     = reg0;
-		fc_generic.as_out.ret      = reg1;
-		fc_generic.as_out.param[0] = reg2;
-		fc_generic.as_out.param[1] = reg3;
-		memcpy(data, &fc_generic, sizeof(union fc_generic));
+		fc_generic->as_out.resp     = reg0;
+		fc_generic->as_out.ret      = reg1;
+		fc_generic->as_out.param[0] = reg2;
+		fc_generic->as_out.param[1] = reg3;
 	}
 #endif
 	return ret;
diff --git a/drivers/gud/mobicore_driver/logging.c b/drivers/gud/MobiCoreDriver/logging.c
similarity index 95%
rename from drivers/gud/mobicore_driver/logging.c
rename to drivers/gud/MobiCoreDriver/logging.c
index 1f599f9..507c4ed 100644
--- a/drivers/gud/mobicore_driver/logging.c
+++ b/drivers/gud/MobiCoreDriver/logging.c
@@ -251,7 +251,7 @@
  */
 long mobicore_log_setup(void)
 {
-	unsigned long phys_log_buf;
+	phys_addr_t phys_log_buf;
 	union fc_generic fc_log;
 	struct sched_param param = { .sched_priority = 1 };
 
@@ -300,11 +300,12 @@
 
 	memset(&fc_log, 0, sizeof(fc_log));
 	fc_log.as_in.cmd = MC_FC_NWD_TRACE;
-	fc_log.as_in.param[0] = phys_log_buf;
-	fc_log.as_in.param[1] = log_size;
+	fc_log.as_in.param[0] = (uint32_t)phys_log_buf;
+	fc_log.as_in.param[1] = (uint32_t)(((uint64_t)phys_log_buf) >> 32);
+	fc_log.as_in.param[2] = log_size;
 
-	MCDRV_DBG(mcd, "fc_log virt=%p phys=%p ",
-		  log_buf, (void *)phys_log_buf);
+	MCDRV_DBG(mcd, "fc_log virt=%p phys=0x%llX",
+		  log_buf, (u64)phys_log_buf);
 	mc_fastcall(&fc_log);
 	MCDRV_DBG(mcd, "fc_log out ret=0x%08x", fc_log.as_out.ret);
 
@@ -319,7 +320,7 @@
 
 	set_task_state(log_thread, TASK_INTERRUPTIBLE);
 
-	MCDRV_DBG(mcd, "fc_log Logger version %u\n", log_buf->version);
+	MCDRV_DBG(mcd, "fc_log Logger version %u", log_buf->version);
 	return 0;
 
 err_stop_kthread:
diff --git a/drivers/gud/mobicore_driver/logging.h b/drivers/gud/MobiCoreDriver/logging.h
similarity index 100%
rename from drivers/gud/mobicore_driver/logging.h
rename to drivers/gud/MobiCoreDriver/logging.h
diff --git a/drivers/gud/mobicore_driver/main.c b/drivers/gud/MobiCoreDriver/main.c
similarity index 81%
rename from drivers/gud/mobicore_driver/main.c
rename to drivers/gud/MobiCoreDriver/main.c
index 0451452..ed2928a 100644
--- a/drivers/gud/mobicore_driver/main.c
+++ b/drivers/gud/MobiCoreDriver/main.c
@@ -112,12 +112,12 @@
 	int i;
 	struct page *page = virt_to_page(addr);
 	for (i = 0; i < (1<<order); i++) {
-		MCDRV_DBG_VERBOSE(mcd, "free page at 0x%p\n", page);
-		ClearPageReserved(page);
+		MCDRV_DBG_VERBOSE(mcd, "free page at 0x%p", page);
+		clear_bit(PG_reserved, &page->flags);
 		page++;
 	}
 
-	MCDRV_DBG_VERBOSE(mcd, "freeing addr:%p, order:%x\n", addr, order);
+	MCDRV_DBG_VERBOSE(mcd, "freeing addr:%p, order:%x", addr, order);
 	free_pages((unsigned long)addr, order);
 }
 
@@ -131,8 +131,9 @@
 		return -EINVAL;
 
 	MCDRV_DBG_VERBOSE(mcd,
-			  "handle=%u phys_addr=0x%p, virt_addr=0x%p len=%u\n",
-		  buffer->handle, buffer->phys, buffer->addr, buffer->len);
+			  "handle=%u phys_addr=0x%llx, virt_addr=0x%p len=%u",
+		  buffer->handle, (u64)buffer->phys,
+		  buffer->addr, buffer->len);
 
 	if (!atomic_dec_and_test(&buffer->usage)) {
 		MCDRV_DBG_VERBOSE(mcd, "Could not free %u", buffer->handle);
@@ -147,7 +148,7 @@
 }
 
 static uint32_t mc_find_cont_wsm_addr(struct mc_instance *instance, void *uaddr,
-	uint32_t *addr, uint32_t len)
+	void **addr, uint32_t len)
 {
 	int ret = 0;
 	struct mc_buffer *buffer;
@@ -162,7 +163,7 @@
 	/* search for the given handle in the buffers list */
 	list_for_each_entry(buffer, &ctx.cont_bufs, list) {
 		if (buffer->uaddr == uaddr && buffer->len == len) {
-			*addr = (uint32_t)buffer->addr;
+			*addr = buffer->addr;
 			goto found;
 		}
 	}
@@ -186,7 +187,7 @@
 	struct task_struct *peer = NULL;
 	bool ret = false;
 
-	MCDRV_DBG(mcd, "Finding wsm for fd = %d\n", fd);
+	MCDRV_DBG_VERBOSE(mcd, "Finding wsm for fd = %d", fd);
 	if (!instance)
 		return false;
 
@@ -197,7 +198,7 @@
 	s = __get_socket(fp);
 	if (s) {
 		peer = get_pid_task(s->sk_peer_pid, PIDTYPE_PID);
-		MCDRV_DBG(mcd, "Found pid for fd %d\n", peer->pid);
+		MCDRV_DBG_VERBOSE(mcd, "Found pid for fd %d", peer->pid);
 	}
 	if (peer) {
 		task_lock(peer);
@@ -209,11 +210,10 @@
 			if (!fp)
 				continue;
 			if (fp->private_data == instance) {
-				MCDRV_DBG(mcd, "Found owner!");
+				MCDRV_DBG_VERBOSE(mcd, "Found owner!");
 				ret = true;
 				goto out;
 			}
-
 		}
 	} else {
 		MCDRV_DBG(mcd, "Owner not found!");
@@ -230,7 +230,7 @@
 #endif
 }
 static uint32_t mc_find_cont_wsm(struct mc_instance *instance, uint32_t handle,
-	int32_t fd, uint32_t *phys, uint32_t *len)
+	int32_t fd, phys_addr_t *phys, uint32_t *len)
 {
 	int ret = 0;
 	struct mc_buffer *buffer;
@@ -239,7 +239,7 @@
 		return -EFAULT;
 
 	if (WARN_ON(!is_daemon(instance))) {
-		MCDRV_DBG_ERROR(mcd, "caller not MobiCore Daemon\n");
+		MCDRV_DBG_ERROR(mcd, "caller not MobiCore Daemon");
 		return -EPERM;
 	}
 
@@ -251,7 +251,7 @@
 	list_for_each_entry(buffer, &ctx.cont_bufs, list) {
 		if (buffer->handle == handle) {
 			if (mc_check_owner_fd(buffer->instance, fd)) {
-				*phys = (uint32_t)buffer->phys;
+				*phys = buffer->phys;
 				*len = buffer->len;
 				goto found;
 			} else {
@@ -326,7 +326,7 @@
 			/* Something is not right if we end up here, better not
 			 * clean the buffer so we just leak memory instead of
 			 * creating security issues */
-			MCDRV_DBG_ERROR(mcd, "Memory can't be unmapped\n");
+			MCDRV_DBG_ERROR(mcd, "Memory can't be unmapped");
 			return -EINVAL;
 		}
 	}
@@ -370,7 +370,7 @@
 {
 	struct mc_buffer *cbuffer = NULL;
 	void *addr = 0;
-	void *phys = 0;
+	phys_addr_t phys = 0;
 	unsigned int order;
 	unsigned long allocated_size;
 	int ret = 0;
@@ -379,13 +379,13 @@
 		return -EFAULT;
 
 	if (len == 0) {
-		MCDRV_DBG_WARN(mcd, "cannot allocate size 0\n");
+		MCDRV_DBG_WARN(mcd, "cannot allocate size 0");
 		return -ENOMEM;
 	}
 
 	order = get_order(len);
 	if (order > MAX_ORDER) {
-		MCDRV_DBG_WARN(mcd, "Buffer size too large\n");
+		MCDRV_DBG_WARN(mcd, "Buffer size too large");
 		return -ENOMEM;
 	}
 	allocated_size = (1 << order) * PAGE_SIZE;
@@ -398,23 +398,23 @@
 
 	if (cbuffer == NULL) {
 		MCDRV_DBG_WARN(mcd,
-			       "MMAP_WSM request: could not allocate buffer\n");
+			       "MMAP_WSM request: could not allocate buffer");
 		ret = -ENOMEM;
 		goto unlock_instance;
 	}
 	mutex_lock(&ctx.bufs_lock);
 
-	MCDRV_DBG_VERBOSE(mcd, "size %ld -> order %d --> %ld (2^n pages)\n",
+	MCDRV_DBG_VERBOSE(mcd, "size %ld -> order %d --> %ld (2^n pages)",
 			  len, order, allocated_size);
 
 	addr = (void *)__get_free_pages(GFP_USER | __GFP_ZERO, order);
 
 	if (addr == NULL) {
-		MCDRV_DBG_WARN(mcd, "get_free_pages failed\n");
+		MCDRV_DBG_WARN(mcd, "get_free_pages failed");
 		ret = -ENOMEM;
 		goto err;
 	}
-	phys = (void *)virt_to_phys(addr);
+	phys = virt_to_phys(addr);
 	cbuffer->handle = get_unique_id();
 	cbuffer->phys = phys;
 	cbuffer->addr = addr;
@@ -429,9 +429,11 @@
 	list_add(&cbuffer->list, &ctx.cont_bufs);
 
 	MCDRV_DBG_VERBOSE(mcd,
-			  "allocated phys=0x%p - 0x%p, size=%ld, kvirt=0x%p, h=%d\n",
-		  phys, (void *)((unsigned int)phys+allocated_size),
-		  allocated_size, addr, cbuffer->handle);
+			  "allocated phys=0x%llx - 0x%llx, size=%ld, kvirt=0x%p"
+			  ", h=%d",
+			  (u64)phys,
+			  (u64)(phys+allocated_size),
+			  allocated_size, addr, cbuffer->handle);
 	*buffer = cbuffer;
 	goto unlock;
 
@@ -457,7 +459,7 @@
 		return -EFAULT;
 
 	if (WARN_ON(!is_daemon(instance))) {
-		MCDRV_DBG_ERROR(mcd, "caller not MobiCore Daemon\n");
+		MCDRV_DBG_ERROR(mcd, "caller not MobiCore Daemon");
 		return -EPERM;
 	}
 
@@ -476,7 +478,7 @@
 	return ret;
 }
 
-void *get_mci_base_phys(unsigned int len)
+static phys_addr_t get_mci_base_phys(unsigned int len)
 {
 	if (ctx.mci_base.phys) {
 		return ctx.mci_base.phys;
@@ -487,45 +489,45 @@
 		ctx.mci_base.addr =
 			(void *)__get_free_pages(GFP_USER | __GFP_ZERO, order);
 		if (ctx.mci_base.addr == NULL) {
-			MCDRV_DBG_WARN(mcd, "get_free_pages failed\n");
+			MCDRV_DBG_WARN(mcd, "get_free_pages failed");
 			memset(&ctx.mci_base, 0, sizeof(ctx.mci_base));
-			return NULL;
+			return 0;
 		}
-		ctx.mci_base.phys = (void *)virt_to_phys(ctx.mci_base.addr);
+		ctx.mci_base.phys = virt_to_phys(ctx.mci_base.addr);
 		return ctx.mci_base.phys;
 	}
 }
 
 /*
- * Create a l2 table from a virtual memory buffer which can be vmalloc
+ * Create a MMU table from a virtual memory buffer which can be vmalloc
  * or user space virtual memory
  */
-int mc_register_wsm_l2(struct mc_instance *instance,
-	uint32_t buffer, uint32_t len,
-	uint32_t *handle, uint32_t *phys)
+int mc_register_wsm_mmu(struct mc_instance *instance,
+	void *buffer, uint32_t len,
+	uint32_t *handle, phys_addr_t *phys)
 {
 	int ret = 0;
-	struct mc_l2_table *table = NULL;
+	struct mc_mmu_table *table = NULL;
 	struct task_struct *task = current;
-	uint32_t kbuff = 0x0;
+	void *kbuff = NULL;
 
 	if (WARN(!instance, "No instance data available"))
 		return -EFAULT;
 
 	if (len == 0) {
-		MCDRV_DBG_ERROR(mcd, "len=0 is not supported!\n");
+		MCDRV_DBG_ERROR(mcd, "len=0 is not supported!");
 		return -EINVAL;
 	}
 
-	MCDRV_DBG_VERBOSE(mcd, "buffer: %p, len=%08x\n", (void *)buffer, len);
+	MCDRV_DBG_VERBOSE(mcd, "buffer: %p, len=%08x", buffer, len);
 
-	if (!mc_find_cont_wsm_addr(instance, (void *)buffer, &kbuff, len))
-		table = mc_alloc_l2_table(instance, NULL, (void *)kbuff, len);
+	if (!mc_find_cont_wsm_addr(instance, buffer, &kbuff, len))
+		table = mc_alloc_mmu_table(instance, NULL, kbuff, len);
 	else
-		table = mc_alloc_l2_table(instance, task, (void *)buffer, len);
+		table = mc_alloc_mmu_table(instance, task, buffer, len);
 
 	if (IS_ERR(table)) {
-		MCDRV_DBG_ERROR(mcd, "new_used_l2_table() failed\n");
+		MCDRV_DBG_ERROR(mcd, "mc_alloc_mmu_table() failed");
 		return -EINVAL;
 	}
 
@@ -533,19 +535,19 @@
 	*handle = table->handle;
 	/* WARNING: daemon shouldn't know this either, but live with it */
 	if (is_daemon(instance))
-		*phys = (uint32_t)table->phys;
+		*phys = table->phys;
 	else
 		*phys = 0;
 
-	MCDRV_DBG_VERBOSE(mcd, "handle: %d, phys=%p\n",
-			  *handle, (void *)*phys);
+	MCDRV_DBG_VERBOSE(mcd, "handle: %d, phys=0x%llX",
+			  *handle, (u64)(*phys));
 
-	MCDRV_DBG_VERBOSE(mcd, "exit with %d/0x%08X\n", ret, ret);
+	MCDRV_DBG_VERBOSE(mcd, "exit with %d/0x%08X", ret, ret);
 
 	return ret;
 }
 
-int mc_unregister_wsm_l2(struct mc_instance *instance, uint32_t handle)
+int mc_unregister_wsm_mmu(struct mc_instance *instance, uint32_t handle)
 {
 	int ret = 0;
 
@@ -553,11 +555,11 @@
 		return -EFAULT;
 
 	/* free table (if no further locks exist) */
-	mc_free_l2_table(instance, handle);
+	mc_free_mmu_table(instance, handle);
 
 	return ret;
 }
-/* Lock the object from handle, it could be a WSM l2 table or a cont buffer! */
+/* Lock the object from handle, it could be a WSM MMU table or a cont buffer! */
 static int mc_lock_handle(struct mc_instance *instance, uint32_t handle)
 {
 	int ret = 0;
@@ -566,14 +568,14 @@
 		return -EFAULT;
 
 	if (WARN_ON(!is_daemon(instance))) {
-		MCDRV_DBG_ERROR(mcd, "caller not MobiCore Daemon\n");
+		MCDRV_DBG_ERROR(mcd, "caller not MobiCore Daemon");
 		return -EPERM;
 	}
 
 	mutex_lock(&instance->lock);
-	ret = mc_lock_l2_table(instance, handle);
+	ret = mc_lock_mmu_table(instance, handle);
 
-	/* Handle was not a l2 table but a cont buffer */
+	/* Handle was not a MMU table but a cont buffer */
 	if (ret == -EINVAL) {
 		/* Call the non locking variant! */
 		ret = __lock_buffer(instance, handle);
@@ -592,14 +594,14 @@
 		return -EFAULT;
 
 	if (WARN_ON(!is_daemon(instance))) {
-		MCDRV_DBG_ERROR(mcd, "caller not MobiCore Daemon\n");
+		MCDRV_DBG_ERROR(mcd, "caller not MobiCore Daemon");
 		return -EPERM;
 	}
 
 	mutex_lock(&instance->lock);
-	ret = mc_free_l2_table(instance, handle);
+	ret = mc_free_mmu_table(instance, handle);
 
-	/* Not a l2 table, then it must be a buffer */
+	/* Not a MMU table, then it must be a buffer */
 	if (ret == -EINVAL) {
 		/* Call the non locking variant! */
 		ret = __free_buffer(instance, handle, true);
@@ -609,35 +611,31 @@
 	return ret;
 }
 
-static uint32_t mc_find_wsm_l2(struct mc_instance *instance,
+static phys_addr_t mc_find_wsm_mmu(struct mc_instance *instance,
 	uint32_t handle, int32_t fd)
 {
-	uint32_t ret = 0;
-
 	if (WARN(!instance, "No instance data available"))
 		return 0;
 
 	if (WARN_ON(!is_daemon(instance))) {
-		MCDRV_DBG_ERROR(mcd, "caller not MobiCore Daemon\n");
+		MCDRV_DBG_ERROR(mcd, "caller not MobiCore Daemon");
 		return 0;
 	}
 
-	ret = mc_find_l2_table(handle, fd);
-
-	return ret;
+	return mc_find_mmu_table(handle, fd);
 }
 
-static int mc_clean_wsm_l2(struct mc_instance *instance)
+static int mc_clean_wsm_mmu(struct mc_instance *instance)
 {
 	if (WARN(!instance, "No instance data available"))
 		return -EFAULT;
 
 	if (WARN_ON(!is_daemon(instance))) {
-		MCDRV_DBG_ERROR(mcd, "caller not MobiCore Daemon\n");
+		MCDRV_DBG_ERROR(mcd, "caller not MobiCore Daemon");
 		return -EPERM;
 	}
 
-	mc_clean_l2_tables();
+	mc_clean_mmu_tables();
 
 	return 0;
 }
@@ -646,19 +644,20 @@
 {
 	struct mc_instance *instance = get_instance(file);
 	unsigned long len = vmarea->vm_end - vmarea->vm_start;
-	void *paddr = (void *)(vmarea->vm_pgoff << PAGE_SHIFT);
+	phys_addr_t paddr = (vmarea->vm_pgoff << PAGE_SHIFT);
 	unsigned int pfn;
 	struct mc_buffer *buffer = 0;
 	int ret = 0;
 
-	MCDRV_DBG_VERBOSE(mcd, "enter (vma start=0x%p, size=%ld, mci=%p)\n",
-			  (void *)vmarea->vm_start, len, ctx.mci_base.phys);
+	MCDRV_DBG_VERBOSE(mcd, "enter (vma start=0x%p, size=%ld, mci=0x%llX)",
+			  (void *)vmarea->vm_start, len,
+			  (u64)ctx.mci_base.phys);
 
 	if (WARN(!instance, "No instance data available"))
 		return -EFAULT;
 
 	if (len == 0) {
-		MCDRV_DBG_ERROR(mcd, "cannot allocate size 0\n");
+		MCDRV_DBG_ERROR(mcd, "cannot allocate size 0");
 		return -ENOMEM;
 	}
 	if (paddr) {
@@ -722,7 +721,7 @@
 			vmarea->vm_page_prot);
 	}
 
-	MCDRV_DBG_VERBOSE(mcd, "exit with %d/0x%08X\n", ret, ret);
+	MCDRV_DBG_VERBOSE(mcd, "exit with %d/0x%08X", ret, ret);
 
 	return ret;
 }
@@ -768,21 +767,24 @@
 
 	case MC_IO_REG_WSM:{
 		struct mc_ioctl_reg_wsm reg;
+		phys_addr_t phys;
 		if (copy_from_user(&reg, uarg, sizeof(reg)))
 			return -EFAULT;
 
-		ret = mc_register_wsm_l2(instance, reg.buffer,
-			reg.len, &reg.handle, &reg.table_phys);
+		ret = mc_register_wsm_mmu(instance, (void *)reg.buffer,
+			reg.len, &reg.handle, &phys);
+		reg.table_phys = phys;
+
 		if (!ret) {
 			if (copy_to_user(uarg, &reg, sizeof(reg))) {
 				ret = -EFAULT;
-				mc_unregister_wsm_l2(instance, reg.handle);
+				mc_unregister_wsm_mmu(instance, reg.handle);
 			}
 		}
 		break;
 	}
 	case MC_IO_UNREG_WSM:
-		ret = mc_unregister_wsm_l2(instance, (uint32_t)arg);
+		ret = mc_unregister_wsm_mmu(instance, (uint32_t)arg);
 		break;
 
 	case MC_IO_VERSION:
@@ -803,7 +805,7 @@
 			return -EFAULT;
 
 		map.handle = buffer->handle;
-		map.phys_addr = (unsigned long)buffer->phys;
+		map.phys_addr = buffer->phys;
 		map.reused = 0;
 		if (copy_to_user(uarg, &map, sizeof(map)))
 			ret = -EFAULT;
@@ -812,7 +814,7 @@
 		break;
 	}
 	default:
-		MCDRV_DBG_ERROR(mcd, "unsupported cmd=%d\n", cmd);
+		MCDRV_DBG_ERROR(mcd, "unsupported cmd=0x%x", cmd);
 		ret = -ENOIOCTLCMD;
 		break;
 
@@ -836,7 +838,7 @@
 		return -EFAULT;
 
 	if (WARN_ON(!is_daemon(instance))) {
-		MCDRV_DBG_ERROR(mcd, "caller not MobiCore Daemon\n");
+		MCDRV_DBG_ERROR(mcd, "caller not MobiCore Daemon");
 		return -EPERM;
 	}
 
@@ -856,8 +858,8 @@
 			return -EFAULT;
 
 		ctx.mcp = ctx.mci_base.addr + init.mcp_offset;
-		ret = mc_init((uint32_t)ctx.mci_base.phys, init.nq_offset,
-			init.nq_length, init.mcp_offset, init.mcp_length);
+		ret = mc_init(ctx.mci_base.phys, init.nq_length,
+			init.mcp_offset, init.mcp_length);
 		break;
 	}
 	case MC_IO_INFO: {
@@ -890,14 +892,14 @@
 		ret = mc_unlock_handle(instance, (uint32_t)arg);
 		break;
 	case MC_IO_CLEAN_WSM:
-		ret = mc_clean_wsm_l2(instance);
+		ret = mc_clean_wsm_mmu(instance);
 		break;
 	case MC_IO_RESOLVE_WSM: {
-		uint32_t phys;
+		phys_addr_t phys;
 		struct mc_ioctl_resolv_wsm wsm;
 		if (copy_from_user(&wsm, uarg, sizeof(wsm)))
 			return -EFAULT;
-		phys = mc_find_wsm_l2(instance, wsm.handle, wsm.fd);
+		phys = mc_find_wsm_mmu(instance, wsm.handle, wsm.fd);
 		if (!phys)
 			return -EINVAL;
 
@@ -909,7 +911,8 @@
 	}
 	case MC_IO_RESOLVE_CONT_WSM: {
 		struct mc_ioctl_resolv_cont_wsm cont_wsm;
-		uint32_t phys = 0, len = 0;
+		phys_addr_t phys = 0;
+		uint32_t len = 0;
 		if (copy_from_user(&cont_wsm, uarg, sizeof(cont_wsm)))
 			return -EFAULT;
 		ret = mc_find_cont_wsm(instance, cont_wsm.handle, cont_wsm.fd,
@@ -928,7 +931,7 @@
 			return -EFAULT;
 
 		map.reused = (ctx.mci_base.phys != 0);
-		map.phys_addr = (unsigned long)get_mci_base_phys(map.len);
+		map.phys_addr = get_mci_base_phys(map.len);
 		if (!map.phys_addr) {
 			MCDRV_DBG_ERROR(mcd, "Failed to setup MCI buffer!");
 			return -EFAULT;
@@ -939,10 +942,6 @@
 		ret = 0;
 		break;
 	}
-	case MC_IO_MAP_PWSM:{
-		break;
-	}
-
 	case MC_IO_LOG_SETUP: {
 #ifdef MC_MEM_TRACES
 		ret = mobicore_log_setup();
@@ -985,27 +984,27 @@
 		return -EFAULT;
 
 	/* avoid debug output on non-error, because this is call quite often */
-	MCDRV_DBG_VERBOSE(mcd, "enter\n");
+	MCDRV_DBG_VERBOSE(mcd, "enter");
 
 	/* only the MobiCore Daemon is allowed to call this function */
 	if (WARN_ON(!is_daemon(instance))) {
-		MCDRV_DBG_ERROR(mcd, "caller not MobiCore Daemon\n");
+		MCDRV_DBG_ERROR(mcd, "caller not MobiCore Daemon");
 		return -EPERM;
 	}
 
 	if (buffer_len < sizeof(unsigned int)) {
-		MCDRV_DBG_ERROR(mcd, "invalid length\n");
+		MCDRV_DBG_ERROR(mcd, "invalid length");
 		return -EINVAL;
 	}
 
 	for (;;) {
 		if (wait_for_completion_interruptible(&ctx.isr_comp)) {
-			MCDRV_DBG_VERBOSE(mcd, "read interrupted\n");
+			MCDRV_DBG_VERBOSE(mcd, "read interrupted");
 			return -ERESTARTSYS;
 		}
 
 		ssiq_counter = atomic_read(&ctx.isr_counter);
-		MCDRV_DBG_VERBOSE(mcd, "ssiq_counter=%i, ctx.counter=%i\n",
+		MCDRV_DBG_VERBOSE(mcd, "ssiq_counter=%i, ctx.counter=%i",
 				  ssiq_counter, ctx.evt_counter);
 
 		if (ssiq_counter != ctx.evt_counter) {
@@ -1017,12 +1016,12 @@
 
 		/* end loop if non-blocking */
 		if (file->f_flags & O_NONBLOCK) {
-			MCDRV_DBG_ERROR(mcd, "non-blocking read\n");
+			MCDRV_DBG_ERROR(mcd, "non-blocking read");
 			return -EAGAIN;
 		}
 
 		if (signal_pending(current)) {
-			MCDRV_DBG_VERBOSE(mcd, "received signal.\n");
+			MCDRV_DBG_VERBOSE(mcd, "received signal.");
 			return -ERESTARTSYS;
 		}
 	}
@@ -1031,7 +1030,7 @@
 	ret = copy_to_user(buffer, &ctx.evt_counter, sizeof(unsigned int));
 
 	if (ret != 0) {
-		MCDRV_DBG_ERROR(mcd, "copy_to_user failed\n");
+		MCDRV_DBG_ERROR(mcd, "copy_to_user failed");
 		return -EFAULT;
 	}
 
@@ -1061,6 +1060,43 @@
 	return instance;
 }
 
+#if defined(TBASE_CORE_SWITCHER) && defined(DEBUG)
+static ssize_t mc_fd_write(struct file *file, const char __user *buffer,
+			size_t buffer_len, loff_t *x)
+{
+	uint32_t cpu_new;
+	/* we only consider one digit */
+	char buf[2];
+	struct mc_instance *instance = get_instance(file);
+
+	if (WARN(!instance, "No instance data available"))
+		return -EFAULT;
+
+	/* Invalid data, nothing to do */
+	if (buffer_len < 1)
+		return -EINVAL;
+
+	/* Invalid data, nothing to do */
+	if (copy_from_user(buf, buffer, min(sizeof(buf), buffer_len)))
+		return -EFAULT;
+
+	if (buf[0] == 'n') {
+		mc_nsiq();
+	/* If it's a digit then switch cores */
+	} else if ((buf[0] >= '0') && (buf[0] <= '9')) {
+		cpu_new = buf[0] - '0';
+		if (cpu_new <= 8) {
+			MCDRV_DBG_VERBOSE(mcd, "Set Active Cpu: %d\n", cpu_new);
+			mc_switch_core(cpu_new);
+		}
+	} else {
+		return -EINVAL;
+	}
+
+	return buffer_len;
+}
+#endif
+
 /*
  * Release a mobicore instance object and all objects related to it
  * @instance:	instance
@@ -1074,7 +1110,7 @@
 		return -EFAULT;
 
 	mutex_lock(&instance->lock);
-	mc_clear_l2_tables(instance);
+	mc_clear_mmu_tables(instance);
 
 	mutex_lock(&ctx.bufs_lock);
 	/* release all mapped data */
@@ -1112,7 +1148,7 @@
 {
 	struct mc_instance *instance;
 
-	MCDRV_DBG_VERBOSE(mcd, "enter\n");
+	MCDRV_DBG_VERBOSE(mcd, "enter");
 
 	instance = mc_alloc_instance();
 	if (instance == NULL)
@@ -1141,7 +1177,7 @@
 		return -ENOMEM;
 	instance = get_instance(file);
 
-	MCDRV_DBG(mcd, "accept this as MobiCore Daemon\n");
+	MCDRV_DBG(mcd, "accept this as MobiCore Daemon");
 
 	ctx.daemon_inst = instance;
 	ctx.daemon = current;
@@ -1172,7 +1208,7 @@
 
 	/* check if daemon closes us. */
 	if (is_daemon(instance)) {
-		MCDRV_DBG_WARN(mcd, "WARNING: MobiCore Daemon died\n");
+		MCDRV_DBG_WARN(mcd, "MobiCore Daemon died");
 		ctx.daemon_inst = NULL;
 		ctx.daemon = NULL;
 	}
@@ -1183,7 +1219,7 @@
 	 * ret is quite irrelevant here as most apps don't care about the
 	 * return value from close() and it's quite difficult to recover
 	 */
-	MCDRV_DBG_VERBOSE(mcd, "exit with %d/0x%08X\n", ret, ret);
+	MCDRV_DBG_VERBOSE(mcd, "exit with %d/0x%08X", ret, ret);
 
 	return (int)ret;
 }
@@ -1200,7 +1236,9 @@
 
 	/* signal the daemon */
 	complete(&ctx.isr_comp);
-
+#ifdef MC_MEM_TRACES
+	mobicore_log_read();
+#endif
 	return IRQ_HANDLED;
 }
 
@@ -1221,6 +1259,9 @@
 	.release	= mc_fd_release,
 	.unlocked_ioctl	= mc_fd_user_ioctl,
 	.mmap		= mc_fd_mmap,
+#if defined(TBASE_CORE_SWITCHER) && defined(DEBUG)
+	.write          = mc_fd_write,
+#endif
 };
 
 static int create_devices(void)
@@ -1239,17 +1280,17 @@
 
 	ret = alloc_chrdev_region(&mc_dev_admin, 0, MC_DEV_MAX, "mobicore");
 	if (ret < 0) {
-		MCDRV_DBG_ERROR(mcd, "failed to allocate char dev region\n");
+		MCDRV_DBG_ERROR(mcd, "failed to allocate char dev region");
 		goto error;
 	}
 	mc_dev_user = MKDEV(MAJOR(mc_dev_admin), 1);
 
-	MCDRV_DBG_VERBOSE(mcd, "%s: dev %d", "mobicore", MAJOR(mc_dev_region));
+	MCDRV_DBG_VERBOSE(mcd, "%s: dev %d", "mobicore", MAJOR(mc_dev_admin));
 
 	/* First the ADMIN node */
 	ret = cdev_add(&mc_admin_cdev,  mc_dev_admin, 1);
 	if (ret != 0) {
-		MCDRV_DBG_ERROR(mcd, "admin device register failed\n");
+		MCDRV_DBG_ERROR(mcd, "admin device register failed");
 		goto error;
 	}
 	mc_admin_cdev.owner = THIS_MODULE;
@@ -1260,7 +1301,7 @@
 
 	ret = cdev_add(&mc_user_cdev, mc_dev_user, 1);
 	if (ret != 0) {
-		MCDRV_DBG_ERROR(mcd, "user device register failed\n");
+		MCDRV_DBG_ERROR(mcd, "user device register failed");
 		goto error_unregister;
 	}
 	mc_user_cdev.owner = THIS_MODULE;
@@ -1301,13 +1342,13 @@
 	/* Hardware does not support ARM TrustZone -> Cannot continue! */
 	if (!has_security_extensions()) {
 		MCDRV_DBG_ERROR(mcd,
-				"Hardware doesn't support ARM TrustZone!\n");
+				"Hardware doesn't support ARM TrustZone!");
 		return -ENODEV;
 	}
 
 	/* Running in secure mode -> Cannot load the driver! */
 	if (is_secure_mode()) {
-		MCDRV_DBG_ERROR(mcd, "Running in secure MODE!\n");
+		MCDRV_DBG_ERROR(mcd, "Running in secure MODE!");
 		return -ENODEV;
 	}
 
@@ -1320,18 +1361,18 @@
 	/* initialize event counter for signaling of an IRQ to zero */
 	atomic_set(&ctx.isr_counter, 0);
 
-	/* set up S-SIQ interrupt handler */
+	/* set up S-SIQ interrupt handler ************************/
 	ret = request_irq(MC_INTR_SSIQ, mc_ssiq_isr, IRQF_TRIGGER_RISING,
 			MC_ADMIN_DEVNODE, &ctx);
 	if (ret != 0) {
-		MCDRV_DBG_ERROR(mcd, "interrupt request failed\n");
+		MCDRV_DBG_ERROR(mcd, "interrupt request failed");
 		goto err_req_irq;
 	}
 
 #ifdef MC_PM_RUNTIME
 	ret = mc_pm_initialize(&ctx);
 	if (ret != 0) {
-		MCDRV_DBG_ERROR(mcd, "Power Management init failed!\n");
+		MCDRV_DBG_ERROR(mcd, "Power Management init failed!");
 		goto free_isr;
 	}
 #endif
@@ -1340,7 +1381,7 @@
 	if (ret != 0)
 		goto free_pm;
 
-	ret = mc_init_l2_tables();
+	ret = mc_init_mmu_tables();
 
 #ifdef MC_CRYPTO_CLOCK_MANAGEMENT
 	ret = mc_pm_clock_initialize();
@@ -1361,7 +1402,7 @@
 	mutex_init(&ctx.bufs_lock);
 
 	memset(&ctx.mci_base, 0, sizeof(ctx.mci_base));
-	MCDRV_DBG(mcd, "initialized\n");
+	MCDRV_DBG(mcd, "initialized");
 	return 0;
 
 free_pm:
@@ -1381,12 +1422,12 @@
  */
 static void __exit mobicore_exit(void)
 {
-	MCDRV_DBG_VERBOSE(mcd, "enter\n");
+	MCDRV_DBG_VERBOSE(mcd, "enter");
 #ifdef MC_MEM_TRACES
 	mobicore_log_free();
 #endif
 
-	mc_release_l2_tables();
+	mc_release_mmu_tables();
 
 #ifdef MC_PM_RUNTIME
 	mc_pm_free();
@@ -1408,6 +1449,15 @@
 	MCDRV_DBG_VERBOSE(mcd, "exit");
 }
 
+bool mc_sleep_ready(void)
+{
+#ifdef MC_PM_RUNTIME
+	return mc_pm_sleep_ready();
+#else
+	return true;
+#endif
+}
+
 /* Linux Driver Module Macros */
 module_init(mobicore_init);
 module_exit(mobicore_exit);
diff --git a/drivers/gud/mobicore_driver/main.h b/drivers/gud/MobiCoreDriver/main.h
similarity index 89%
rename from drivers/gud/mobicore_driver/main.h
rename to drivers/gud/MobiCoreDriver/main.h
index 871191e..11e304c 100644
--- a/drivers/gud/mobicore_driver/main.h
+++ b/drivers/gud/MobiCoreDriver/main.h
@@ -52,7 +52,7 @@
 	/* virtual Userspace start address */
 	void			*uaddr;
 	/* physical start address */
-	void			*phys;
+	phys_addr_t		phys;
 	/* order of number of pages */
 	unsigned int		order;
 	uint32_t		len;
@@ -83,8 +83,8 @@
 };
 
 struct mc_sleep_mode {
-	uint16_t	SleepReq;
-	uint16_t	ReadyToSleep;
+	uint16_t	sleep_req;
+	uint16_t	ready_to_sleep;
 };
 
 /* MobiCore is idle. No scheduling required. */
@@ -129,14 +129,14 @@
 int mc_release_instance(struct mc_instance *instance);
 
 /*
- * mc_register_wsm_l2() - Create a L2 table from a virtual memory buffer which
+ * mc_register_wsm_mmu() - Create a MMU table from a virtual memory buffer which
  * can be vmalloc or user space virtual memory
  */
-int mc_register_wsm_l2(struct mc_instance *instance,
-	uint32_t buffer, uint32_t len,
-	uint32_t *handle, uint32_t *phys);
+int mc_register_wsm_mmu(struct mc_instance *instance,
+	void *buffer, uint32_t len,
+	uint32_t *handle, phys_addr_t *phys);
 /* Unregister the buffer mapped above */
-int mc_unregister_wsm_l2(struct mc_instance *instance, uint32_t handle);
+int mc_unregister_wsm_mmu(struct mc_instance *instance, uint32_t handle);
 
 /* Allocate one mc_buffer of contiguous space */
 int mc_get_buffer(struct mc_instance *instance,
@@ -147,4 +147,7 @@
 /* Check if the other end of the fd owns instance */
 bool mc_check_owner_fd(struct mc_instance *instance, int32_t fd);
 
+/* Test if sleep is possible */
+bool mc_sleep_ready(void);
+
 #endif /* _MC_MAIN_H_ */
diff --git a/drivers/gud/MobiCoreDriver/mem.c b/drivers/gud/MobiCoreDriver/mem.c
new file mode 100644
index 0000000..2d92f74
--- /dev/null
+++ b/drivers/gud/MobiCoreDriver/mem.c
@@ -0,0 +1,743 @@
+/*
+ * MobiCore Driver Kernel Module.
+ *
+ * This module is written as a Linux device driver.
+ * This driver represents the command proxy on the lowest layer, from the
+ * secure world to the non secure world, and vice versa.
+ * This driver is located in the non secure world (Linux).
+ * This driver offers IOCTL commands, for access to the secure world, and has
+ * the interface from the secure world to the normal world.
+ * The access to the driver is possible with a file descriptor,
+ * which has to be created by the fd = open(/dev/mobicore) command.
+ *
+ * <-- Copyright Giesecke & Devrient GmbH 2009-2012 -->
+ * <-- Copyright Trustonic Limited 2013 -->
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include "main.h"
+#include "debug.h"
+#include "mem.h"
+
+#include <linux/highmem.h>
+#include <linux/slab.h>
+#include <linux/kthread.h>
+#include <linux/pagemap.h>
+#include <linux/device.h>
+
+#ifdef LPAE_SUPPORT
+#define MMU_TYPE_PAGE	(3 << 0)
+#define MMU_BUFFERABLE	(1 << 2) /* AttrIndx[0] */
+#define MMU_CACHEABLE	(1 << 3) /* AttrIndx[1] */
+#define MMU_NS		(1 << 5)
+#define MMU_AP_RW_ALL	(1 << 6) /* AP[2:1], RW, at any privilege level */
+#define MMU_EXT_SHARED	(3 << 8) /* SH[1:0], inner shareable */
+#define MMU_EXT_AF	(1 << 10) /* Access Flag */
+#define MMU_EXT_NG	(1 << 11)
+#define MMU_EXT_XN      (((uint64_t)1) << 54) /* XN */
+#else
+#define MMU_TYPE_EXT	(3 << 0)	/* v5 */
+#define MMU_TYPE_SMALL	(2 << 0)
+#define MMU_BUFFERABLE	(1 << 2)
+#define MMU_CACHEABLE	(1 << 3)
+#define MMU_EXT_AP0	(1 << 4)
+#define MMU_EXT_AP1	(2 << 4)
+#define MMU_EXT_TEX(x)	((x) << 6)	/* v5 */
+#define MMU_EXT_SHARED	(1 << 10)	/* v6 */
+#define MMU_EXT_NG	(1 << 11)	/* v6 */
+#endif
+
+/* MobiCore memory context data */
+struct mc_mem_context mem_ctx;
+
+static inline void release_page(struct page *page)
+{
+	set_bit(PG_dirty, &page->flags);
+
+	page_cache_release(page);
+}
+
+static int lock_pages(struct task_struct *task, void *virt_start_page_addr,
+	int pages_no, struct page **pages)
+{
+	int locked_pages;
+
+	/* lock user pages, must hold the mmap_sem to do this. */
+	down_read(&(task->mm->mmap_sem));
+	locked_pages = get_user_pages(
+				task,
+				task->mm,
+				(unsigned long)virt_start_page_addr,
+				pages_no,
+				1, /* write access */
+				0,
+				pages,
+				NULL);
+	up_read(&(task->mm->mmap_sem));
+
+	/* check if we could lock all pages. */
+	if (locked_pages != pages_no) {
+		MCDRV_DBG_ERROR(mcd, "get_user_pages() failed, locked_pages=%d",
+				locked_pages);
+		if (locked_pages > 0) {
+			/* release all locked pages. */
+			release_pages(pages, locked_pages, 0);
+		}
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+/* Get kernel pointer to shared MMU table given a per-process reference */
+static void *get_mmu_table_kernel_virt(struct mc_mmu_table *table)
+{
+	if (WARN(!table, "Invalid MMU table"))
+		return NULL;
+
+	if (WARN(!table->set, "Invalid MMU table set"))
+		return NULL;
+
+	if (WARN(!table->set->kernel_virt, "Invalid MMU pointer"))
+		return NULL;
+
+	return &(table->set->kernel_virt->table[table->idx]);
+}
+
+static inline int in_use(struct mc_mmu_table *table)
+{
+	return atomic_read(&table->usage) > 0;
+}
+
+/*
+ * Search the list of used MMU tables and return the one with the handle.
+ * Assumes the table_lock is taken.
+ */
+struct mc_mmu_table *find_mmu_table(unsigned int handle)
+{
+	struct mc_mmu_table *table;
+
+	list_for_each_entry(table, &mem_ctx.mmu_tables, list) {
+		if (table->handle == handle)
+			return table;
+	}
+	return NULL;
+}
+
+/*
+ * Allocate a new MMU table store plus MMU_TABLES_PER_PAGE in the MMU free
+ * tables list. Assumes the table_lock is already taken by the caller above.
+ */
+static int alloc_mmu_table_store(void)
+{
+	unsigned long store;
+	struct mc_mmu_tables_set *mmutable_set;
+	struct mc_mmu_table *mmutable, *mmutable2;
+	struct page *page;
+	int ret = 0, i;
+	/* temp list for holding the MMU tables */
+	LIST_HEAD(temp);
+
+	store = get_zeroed_page(GFP_KERNEL);
+	if (!store)
+		return -ENOMEM;
+
+	/*
+	 * Actually, locking is not necessary, because kernel
+	 * memory is not supposed to get swapped out. But we
+	 * play safe....
+	 */
+	page = virt_to_page(store);
+	set_bit(PG_reserved, &page->flags);
+
+	/* add all the descriptors to the free descriptors list */
+	mmutable_set = kmalloc(sizeof(*mmutable_set), GFP_KERNEL | __GFP_ZERO);
+	if (mmutable_set == NULL) {
+		ret = -ENOMEM;
+		goto free_store;
+	}
+	/* initialize */
+	mmutable_set->kernel_virt = (void *)store;
+	mmutable_set->page = page;
+	mmutable_set->phys = virt_to_phys((void *)store);
+	/* the set is not yet used */
+	atomic_set(&mmutable_set->used_tables, 0);
+
+	/* init add to list. */
+	INIT_LIST_HEAD(&(mmutable_set->list));
+	list_add(&mmutable_set->list, &mem_ctx.mmu_tables_sets);
+
+	for (i = 0; i < MMU_TABLES_PER_PAGE; i++) {
+		/* allocate a WSM MMU descriptor */
+		mmutable  = kmalloc(sizeof(*mmutable), GFP_KERNEL | __GFP_ZERO);
+		if (mmutable == NULL) {
+			ret = -ENOMEM;
+			MCDRV_DBG_ERROR(mcd, "out of memory");
+			/* Free the full temp list and the store in this case */
+			goto free_temp_list;
+		}
+
+		/* set set reference */
+		mmutable->set = mmutable_set;
+		mmutable->idx = i;
+		mmutable->virt = get_mmu_table_kernel_virt(mmutable);
+		mmutable->phys = mmutable_set->phys+i*sizeof(struct mmutable);
+		atomic_set(&mmutable->usage, 0);
+
+		/* add to temp list. */
+		INIT_LIST_HEAD(&mmutable->list);
+		list_add_tail(&mmutable->list, &temp);
+	}
+
+	/*
+	 * If everything went ok then merge the temp list with the global
+	 * free list
+	 */
+	list_splice_tail(&temp, &mem_ctx.free_mmu_tables);
+	return 0;
+free_temp_list:
+	list_for_each_entry_safe(mmutable, mmutable2, &temp, list) {
+		kfree(mmutable);
+	}
+
+	list_del(&mmutable_set->list);
+
+free_store:
+	free_page(store);
+	return ret;
+}
+/*
+ * Get a MMU table from the free tables list or allocate a new one and
+ * initialize it. Assumes the table_lock is already taken.
+ */
+static struct mc_mmu_table *alloc_mmu_table(struct mc_instance *instance)
+{
+	int ret = 0;
+	struct mc_mmu_table *table = NULL;
+
+	if (list_empty(&mem_ctx.free_mmu_tables)) {
+		ret = alloc_mmu_table_store();
+		if (ret) {
+			MCDRV_DBG_ERROR(mcd, "Failed to allocate new store!");
+			return ERR_PTR(-ENOMEM);
+		}
+		/* if it's still empty something wrong has happened */
+		if (list_empty(&mem_ctx.free_mmu_tables)) {
+			MCDRV_DBG_ERROR(mcd,
+					"Free list not updated correctly!");
+			return ERR_PTR(-EFAULT);
+		}
+	}
+
+	/* get a WSM MMU descriptor */
+	table  = list_first_entry(&mem_ctx.free_mmu_tables,
+		struct mc_mmu_table, list);
+	if (table == NULL) {
+		MCDRV_DBG_ERROR(mcd, "out of memory");
+		return ERR_PTR(-ENOMEM);
+	}
+	/* Move it to the used MMU tables list */
+	list_move_tail(&table->list, &mem_ctx.mmu_tables);
+
+	table->handle = get_unique_id();
+	table->owner = instance;
+
+	atomic_inc(&table->set->used_tables);
+	atomic_inc(&table->usage);
+
+	MCDRV_DBG_VERBOSE(mcd,
+			  "chunkPhys=0x%llX, idx=%d",
+			  (u64)table->set->phys, table->idx);
+
+	return table;
+}
+
+/*
+ * Frees the object associated with a MMU table. Initially the object is moved
+ * to the free tables list, but if all the 4 lists of the store are free
+ * then the store is also released.
+ * Assumes the table_lock is already taken.
+ */
+static void free_mmu_table(struct mc_mmu_table *table)
+{
+	struct mc_mmu_tables_set *mmutable_set;
+
+	if (WARN(!table, "Invalid table"))
+		return;
+
+	mmutable_set = table->set;
+	if (WARN(!mmutable_set, "Invalid table set"))
+		return;
+
+	list_move_tail(&table->list, &mem_ctx.free_mmu_tables);
+
+	/* if nobody uses this set, we can release it. */
+	if (atomic_dec_and_test(&mmutable_set->used_tables)) {
+		struct mc_mmu_table *tmp;
+
+		/* remove from list */
+		list_del(&mmutable_set->list);
+		/*
+		 * All the MMU tables are in the free list for this set
+		 * so we can just remove them from there
+		 */
+		list_for_each_entry_safe(table, tmp, &mem_ctx.free_mmu_tables,
+					 list) {
+			if (table->set == mmutable_set) {
+				list_del(&table->list);
+				kfree(table);
+			}
+		} /* end while */
+
+		/*
+		 * We shouldn't recover from this since it was some data
+		 * corruption before
+		 */
+		BUG_ON(!mmutable_set->page);
+		clear_bit(PG_reserved, &(mmutable_set->page)->flags);
+
+
+		BUG_ON(!mmutable_set->kernel_virt);
+		free_page((unsigned long)mmutable_set->kernel_virt);
+
+		kfree(mmutable_set);
+	}
+}
+
+/*
+ * Create a MMU table in a WSM container that has been allocates previously.
+ * Assumes the table lock is already taken or there is no need to take like
+ * when first creating the MMU table the full list is locked.
+ *
+ * @task	pointer to task owning WSM
+ * @wsm_buffer  user space WSM start
+ * @wsm_len     WSM length
+ * @table       Pointer to MMU table details
+ */
+static int map_buffer(struct task_struct *task, void *wsm_buffer,
+		      unsigned int wsm_len, struct mc_mmu_table *table)
+{
+	int		ret = 0;
+	unsigned int	i, nr_of_pages;
+	/* start address of the 4 KiB page of wsm_buffer */
+	void		*virt_addr_page;
+	struct page	*page;
+	struct mmutable	*mmutable;
+	struct page	**mmutable_as_array_of_pointers_to_page;
+	/* page offset in wsm buffer */
+	unsigned int offset;
+
+	if (WARN(!wsm_buffer, "Invalid WSM buffer pointer"))
+		return -EINVAL;
+
+	if (WARN(wsm_len == 0, "Invalid WSM buffer length"))
+		return -EINVAL;
+
+	if (WARN(!table, "Invalid mapping table for WSM"))
+		return -EINVAL;
+
+	/* no size > 1Mib supported */
+	if (wsm_len > SZ_1M) {
+		MCDRV_DBG_ERROR(mcd, "size > 1 MiB");
+		return -EINVAL;
+	}
+
+	MCDRV_DBG_VERBOSE(mcd, "WSM addr=0x%p, len=0x%08x", wsm_buffer,
+			  wsm_len);
+
+	/* calculate page usage */
+	virt_addr_page = (void *)(((unsigned long)(wsm_buffer)) & PAGE_MASK);
+	offset = (unsigned int)	(((unsigned long)(wsm_buffer)) & (~PAGE_MASK));
+	nr_of_pages  = PAGE_ALIGN(offset + wsm_len) / PAGE_SIZE;
+
+	MCDRV_DBG_VERBOSE(mcd, "virt addr page start=0x%p, pages=%d",
+			  virt_addr_page, nr_of_pages);
+
+	/* MMU table can hold max 1MiB in 256 pages. */
+	if ((nr_of_pages * PAGE_SIZE) > SZ_1M) {
+		MCDRV_DBG_ERROR(mcd, "WSM paged exceed 1 MiB");
+		return -EINVAL;
+	}
+
+	mmutable = table->virt;
+	/*
+	 * We use the memory for the MMU table to hold the pointer
+	 * and convert them later. This works, as everything comes
+	 * down to a 32 bit value.
+	 */
+	mmutable_as_array_of_pointers_to_page = (struct page **)mmutable;
+
+	/* Request comes from user space */
+	if (task != NULL && !is_vmalloc_addr(wsm_buffer)) {
+		/*
+		 * lock user page in memory, so they do not get swapped
+		 * out.
+		 * REV axh: Kernel 2.6.27 added a new get_user_pages_fast()
+		 * function, maybe it is called fast_gup() in some versions.
+		 * handle user process doing a fork().
+		 * Child should not get things.
+		 * http://osdir.com/ml/linux-media/2009-07/msg00813.html
+		 * http://lwn.net/Articles/275808/
+		 */
+		ret = lock_pages(task, virt_addr_page, nr_of_pages,
+				 mmutable_as_array_of_pointers_to_page);
+		if (ret != 0) {
+			MCDRV_DBG_ERROR(mcd, "lock_user_pages() failed");
+			return ret;
+		}
+	}
+	/* Request comes from kernel space(cont buffer) */
+	else if (task == NULL && !is_vmalloc_addr(wsm_buffer)) {
+		void *uaddr = wsm_buffer;
+		for (i = 0; i < nr_of_pages; i++) {
+			page = virt_to_page(uaddr);
+			if (!page) {
+				MCDRV_DBG_ERROR(mcd, "failed to map address");
+				return -EINVAL;
+			}
+			get_page(page);
+			mmutable_as_array_of_pointers_to_page[i] = page;
+			uaddr += PAGE_SIZE;
+		}
+	}
+	/* Request comes from kernel space(vmalloc buffer) */
+	else {
+		void *uaddr = wsm_buffer;
+		for (i = 0; i < nr_of_pages; i++) {
+			page = vmalloc_to_page(uaddr);
+			if (!page) {
+				MCDRV_DBG_ERROR(mcd, "failed to map address");
+				return -EINVAL;
+			}
+			get_page(page);
+			mmutable_as_array_of_pointers_to_page[i] = page;
+			uaddr += PAGE_SIZE;
+		}
+	}
+
+	table->pages = nr_of_pages;
+
+	/*
+	 * create MMU Table entries.
+	 * used_mmutable->table contains a list of page pointers here.
+	 * For a proper cleanup we have to ensure that the following
+	 * code either works and used_mmutable contains a valid MMU table
+	 * - or fails and used_mmutable->table contains the list of page
+	 * pointers.
+	 * Any mixed contents will make cleanup difficult.
+	 * Fill the table in reverse order as the table is used as input and
+	 * output.
+	 */
+	i = MC_ARM_MMU_TABLE_ENTRIES-1;
+	do {
+		if (i < nr_of_pages) {
+#ifdef LPAE_SUPPORT
+			uint64_t pte;
+#elif defined(CONFIG_ARM_LPAE) && !defined(LPAE_SUPPORT)
+			/* Nwd supports 64bit addresses, SWD only 32bit */
+			uint64_t pte64;
+			uint32_t pte;
+#else
+			uint32_t pte;
+#endif
+			page = mmutable_as_array_of_pointers_to_page[i];
+
+			/*
+			 * create MMU table entry, see ARM MMU docu for details
+			 * about flags stored in the lowest 12 bits.
+			 * As a side reference, the Article
+			 * "ARM's multiply-mapped memory mess"
+			 * found in the collection at
+			 * http://lwn.net/Articles/409032/
+			 * is also worth reading.
+			 */
+#ifdef LPAE_SUPPORT
+			pte = page_to_phys(page);
+			pte |=	MMU_EXT_XN
+				| MMU_EXT_NG
+				| MMU_EXT_AF
+				| MMU_AP_RW_ALL
+				| MMU_NS
+				| MMU_CACHEABLE | MMU_BUFFERABLE
+				| MMU_TYPE_PAGE;
+#elif defined(CONFIG_ARM_LPAE) && !defined(LPAE_SUPPORT)
+			/*
+			 * NWD uses 64bit addresses but SWD can handle only
+			 * short descriptors
+			 * and physical addresses not bigger than 4GB
+			 */
+			 pte64 = page_to_phys(page);
+			 if ((pte64 >> 32) != 0) {
+				MCDRV_DBG_ERROR(mcd,
+						"physical addresses bigger than 4GB not supported");
+				return -EINVAL;
+				}
+			pte = (uint32_t)pte64;
+			pte |= MMU_EXT_AP1 | MMU_EXT_AP0
+				| MMU_CACHEABLE | MMU_BUFFERABLE
+				| MMU_TYPE_SMALL | MMU_TYPE_EXT | MMU_EXT_NG;
+#else
+			pte = page_to_phys(page);
+			pte |= MMU_EXT_AP1 | MMU_EXT_AP0
+				| MMU_CACHEABLE | MMU_BUFFERABLE
+				| MMU_TYPE_SMALL | MMU_TYPE_EXT | MMU_EXT_NG;
+#endif /* LPAE_SUPPORT */
+			/*
+			 * Linux uses different mappings for SMP systems(the
+			 * sharing flag is set for the pte. In order not to
+			 * confuse things too much in Mobicore make sure the
+			 * shared buffers have the same flags.
+			 * This should also be done in SWD side
+			 */
+#ifdef CONFIG_SMP
+#ifdef LPAE_SUPPORT
+			pte |= MMU_EXT_SHARED;
+#else
+			pte |= MMU_EXT_SHARED | MMU_EXT_TEX(1);
+#endif /* LPAE_SUPPORT */
+#endif /* CONFIG_SMP */
+
+			mmutable->table_entries[i] = pte;
+			MCDRV_DBG_VERBOSE(mcd, "MMU entry %d:  0x%llx", i,
+					  (u64)(pte));
+		} else {
+			/* ensure rest of table is empty */
+			mmutable->table_entries[i] = 0;
+		}
+	} while (i-- != 0);
+
+	return ret;
+}
+
+/*
+ * Remove a MMU table in a WSM container. Afterwards the container may be
+ * released. Assumes the table_lock and the lock is taken.
+ */
+static void unmap_buffers(struct mc_mmu_table *table)
+{
+	struct mmutable *mmutable;
+	int i;
+
+	if (WARN_ON(!table))
+		return;
+
+	/* found the table, now release the resources. */
+	MCDRV_DBG_VERBOSE(mcd,
+			  "clear MMU table, phys_base=0x%llX,nr_of_pages=%d",
+			  (u64)table->phys, table->pages);
+
+	mmutable = table->virt;
+
+	/* release all locked user space pages */
+	for (i = 0; i < table->pages; i++) {
+		/* convert physical entries from MMU table to page pointers */
+		struct page *page;
+		page = phys_to_page(mmutable->table_entries[i]);
+		release_page(page);
+	}
+
+	/* remember that all pages have been freed */
+	table->pages = 0;
+}
+
+/* Delete a used MMU table. Assumes the table_lock and the lock is taken */
+static void unmap_mmu_table(struct mc_mmu_table *table)
+{
+	/* Check if it's not locked by other processes too! */
+	if (!atomic_dec_and_test(&table->usage))
+		return;
+
+	/* release if Nwd and Swd/MC do no longer use it. */
+	unmap_buffers(table);
+	free_mmu_table(table);
+}
+
+int mc_free_mmu_table(struct mc_instance *instance, uint32_t handle)
+{
+	struct mc_mmu_table *table;
+	int ret = 0;
+
+	if (WARN(!instance, "No instance data available"))
+		return -EFAULT;
+
+	mutex_lock(&mem_ctx.table_lock);
+	table = find_mmu_table(handle);
+
+	if (table == NULL) {
+		MCDRV_DBG_VERBOSE(mcd, "entry not found");
+		ret = -EINVAL;
+		goto err_unlock;
+	}
+	if (instance != table->owner && !is_daemon(instance)) {
+		MCDRV_DBG_ERROR(mcd, "instance does no own it");
+		ret = -EPERM;
+		goto err_unlock;
+	}
+	/* free table (if no further locks exist) */
+	unmap_mmu_table(table);
+err_unlock:
+	mutex_unlock(&mem_ctx.table_lock);
+
+	return ret;
+}
+
+int mc_lock_mmu_table(struct mc_instance *instance, uint32_t handle)
+{
+	int ret = 0;
+	struct mc_mmu_table *table = NULL;
+
+	if (WARN(!instance, "No instance data available"))
+		return -EFAULT;
+
+	mutex_lock(&mem_ctx.table_lock);
+	table = find_mmu_table(handle);
+
+	if (table == NULL) {
+		MCDRV_DBG_VERBOSE(mcd, "entry not found %u", handle);
+		ret = -EINVAL;
+		goto table_err;
+	}
+	if (instance != table->owner && !is_daemon(instance)) {
+		MCDRV_DBG_ERROR(mcd, "instance does no own it");
+		ret = -EPERM;
+		goto table_err;
+	}
+
+	/* lock entry */
+	atomic_inc(&table->usage);
+table_err:
+	mutex_unlock(&mem_ctx.table_lock);
+	return ret;
+}
+/*
+ * Allocate MMU table and map buffer into it.
+ * That is, create respective table entries.
+ */
+struct mc_mmu_table *mc_alloc_mmu_table(struct mc_instance *instance,
+	struct task_struct *task, void *wsm_buffer, unsigned int wsm_len)
+{
+	int ret = 0;
+	struct mc_mmu_table *table;
+
+	if (WARN(!instance, "No instance data available"))
+		return ERR_PTR(-EFAULT);
+
+	mutex_lock(&mem_ctx.table_lock);
+	table = alloc_mmu_table(instance);
+	if (IS_ERR(table)) {
+		MCDRV_DBG_ERROR(mcd, "alloc_mmu_table() failed");
+		ret = -ENOMEM;
+		goto err_no_mem;
+	}
+
+	/* create the MMU page for the WSM */
+	ret = map_buffer(task, wsm_buffer, wsm_len, table);
+
+	if (ret != 0) {
+		MCDRV_DBG_ERROR(mcd, "map_buffer() failed");
+		unmap_mmu_table(table);
+		goto err_no_mem;
+	}
+	MCDRV_DBG_VERBOSE(mcd,
+			  "mapped buffer %p to table with handle %d @ 0x%llX",
+			  wsm_buffer, table->handle, (u64)table->phys);
+
+	mutex_unlock(&mem_ctx.table_lock);
+	return table;
+err_no_mem:
+	mutex_unlock(&mem_ctx.table_lock);
+	return ERR_PTR(ret);
+}
+
+phys_addr_t mc_find_mmu_table(uint32_t handle, int32_t fd)
+{
+	phys_addr_t ret = 0;
+	struct mc_mmu_table *table = NULL;
+
+	mutex_lock(&mem_ctx.table_lock);
+	table = find_mmu_table(handle);
+
+	if (table == NULL) {
+		MCDRV_DBG_ERROR(mcd, "entry not found %u", handle);
+		ret = 0;
+		goto table_err;
+	}
+
+	/* It's safe here not to lock the instance since the owner of
+	 * the table will be cleared only with the table lock taken */
+	if (!mc_check_owner_fd(table->owner, fd)) {
+		MCDRV_DBG_ERROR(mcd, "not valid owner %u", handle);
+		ret = 0;
+		goto table_err;
+	}
+
+	ret = table->phys;
+table_err:
+	mutex_unlock(&mem_ctx.table_lock);
+	return ret;
+}
+
+void mc_clean_mmu_tables(void)
+{
+	struct mc_mmu_table *table, *tmp;
+
+	mutex_lock(&mem_ctx.table_lock);
+	/* Check if some WSM is orphaned. */
+	list_for_each_entry_safe(table, tmp, &mem_ctx.mmu_tables, list) {
+		if (table->owner == NULL) {
+			MCDRV_DBG(mcd,
+				  "cleariM MMU: p=0x%llX pages=%d",
+				  (u64)table->phys,
+				  table->pages);
+			unmap_mmu_table(table);
+		}
+	}
+	mutex_unlock(&mem_ctx.table_lock);
+}
+
+void mc_clear_mmu_tables(struct mc_instance *instance)
+{
+	struct mc_mmu_table *table, *tmp;
+
+	mutex_lock(&mem_ctx.table_lock);
+	/* Check if some WSM is still in use. */
+	list_for_each_entry_safe(table, tmp, &mem_ctx.mmu_tables, list) {
+		if (table->owner == instance) {
+			MCDRV_DBG(mcd, "release WSM MMU: p=0x%llX pages=%d",
+				  (u64)table->phys,
+				  table->pages);
+			/* unlock app usage and free or mark it as orphan */
+			table->owner = NULL;
+			unmap_mmu_table(table);
+		}
+	}
+	mutex_unlock(&mem_ctx.table_lock);
+}
+
+int mc_init_mmu_tables(void)
+{
+	/* init list for WSM MMU chunks. */
+	INIT_LIST_HEAD(&mem_ctx.mmu_tables_sets);
+
+	/* MMU table descriptor list. */
+	INIT_LIST_HEAD(&mem_ctx.mmu_tables);
+
+	/* MMU free table descriptor list. */
+	INIT_LIST_HEAD(&mem_ctx.free_mmu_tables);
+
+	mutex_init(&mem_ctx.table_lock);
+
+	return 0;
+}
+
+void mc_release_mmu_tables(void)
+{
+	struct mc_mmu_table *table;
+	/* Check if some WSM is still in use. */
+	list_for_each_entry(table, &mem_ctx.mmu_tables, list) {
+		WARN(1, "WSM MMU still in use: phys=0x%llX ,nr_of_pages=%d",
+		     (u64)table->phys, table->pages);
+	}
+}
diff --git a/drivers/gud/MobiCoreDriver/mem.h b/drivers/gud/MobiCoreDriver/mem.h
new file mode 100644
index 0000000..5c9006a
--- /dev/null
+++ b/drivers/gud/MobiCoreDriver/mem.h
@@ -0,0 +1,139 @@
+/*
+ * MobiCore driver module.(interface to the secure world SWD)
+ *
+ * <-- Copyright Giesecke & Devrient GmbH 2009-2012 -->
+ * <-- Copyright Trustonic Limited 2013 -->
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _MC_MEM_H_
+#define _MC_MEM_H_
+
+#ifdef LPAE_SUPPORT
+/*
+ * Number of page table entries in one MMU table. This is ARM specific, an
+ * MMU table covers 2 MiB by using 512 entries referring to 4KiB pages each.
+ */
+#define MC_ARM_MMU_TABLE_ENTRIES		512
+
+/* ARM level 3 (MMU) table with 512 entries. Size: 4k */
+struct mmutable {
+	uint64_t	table_entries[MC_ARM_MMU_TABLE_ENTRIES];
+};
+
+/* There is 1 table in each page. */
+#define MMU_TABLES_PER_PAGE		1
+#else
+/*
+ * MobiCore specific page tables for world shared memory.
+ * Linux uses shadow page tables, see arch/arm/include/asm/pgtable-2level.
+ * MobiCore uses the default ARM format.
+ *
+ * Number of page table entries in one MMU table. This is ARM specific, an
+ * MMU table covers 1 MiB by using 256 entries referring to 4KiB pages each.
+ */
+#define MC_ARM_MMU_TABLE_ENTRIES		256
+
+/* ARM level 2 (MMU) table with 256 entries. Size: 1k */
+struct mmutable {
+	uint32_t	table_entries[MC_ARM_MMU_TABLE_ENTRIES];
+};
+
+/* There are 4 tables in each page. */
+#define MMU_TABLES_PER_PAGE		4
+#endif
+
+/* Store for four MMU tables in one 4kb page*/
+struct mc_mmu_table_store {
+	struct mmutable table[MMU_TABLES_PER_PAGE];
+};
+
+/* Usage and maintenance information about mc_mmu_table_store */
+struct mc_mmu_tables_set {
+	struct list_head		list;
+	/* kernel virtual address */
+	struct mc_mmu_table_store	*kernel_virt;
+	/* physical address */
+	phys_addr_t			phys;
+	/* pointer to page struct */
+	struct page			*page;
+	/* How many pages from this set are used */
+	atomic_t			used_tables;
+};
+
+/*
+ * MMU table allocated to the Daemon or a TLC describing a world shared
+ * buffer.
+ * When users map a malloc()ed area into SWd, a MMU table is allocated.
+ * In addition, the area of maximum 1MB virtual address space is mapped into
+ * the MMU table and a handle for this table is returned to the user.
+ */
+struct mc_mmu_table {
+	struct list_head	list;
+	/* Table lock */
+	struct mutex		lock;
+	/* handle as communicated to user mode */
+	unsigned int		handle;
+	/* Number of references kept to this MMU table */
+	atomic_t		usage;
+	/* owner of this MMU table */
+	struct mc_instance	*owner;
+	/* set describing where our MMU table is stored */
+	struct mc_mmu_tables_set	*set;
+	/* index into MMU table set */
+	unsigned int		idx;
+	/* size of buffer */
+	unsigned int		pages;
+	/* virtual address*/
+	void			*virt;
+	/* physical address */
+	phys_addr_t		phys;
+};
+
+/* MobiCore Driver Memory context data. */
+struct mc_mem_context {
+	struct mc_instance	*daemon_inst;
+	/* Backing store for MMU tables */
+	struct list_head	mmu_tables_sets;
+	/* Bookkeeping for used MMU tables */
+	struct list_head	mmu_tables;
+	/* Bookkeeping for free MMU tables */
+	struct list_head	free_mmu_tables;
+	/* semaphore to synchronize access to above lists */
+	struct mutex		table_lock;
+};
+
+/*
+ * Allocate MMU table and map buffer into it.
+ * That is, create respective table entries.
+ */
+struct mc_mmu_table *mc_alloc_mmu_table(struct mc_instance *instance,
+	struct task_struct *task, void *wsm_buffer, unsigned int wsm_len);
+
+/* Delete all the MMU tables associated with an instance */
+void mc_clear_mmu_tables(struct mc_instance *instance);
+
+/* Release all orphaned MMU tables */
+void mc_clean_mmu_tables(void);
+
+/* Delete a used MMU table. */
+int mc_free_mmu_table(struct mc_instance *instance, uint32_t handle);
+
+/*
+ * Lock a MMU table - the daemon adds +1 to refcount of the MMU table
+ * marking it in use by SWD so it doesn't get released when the TLC dies.
+ */
+int mc_lock_mmu_table(struct mc_instance *instance, uint32_t handle);
+
+/* Return the phys address of MMU table. */
+phys_addr_t mc_find_mmu_table(uint32_t handle, int32_t fd);
+/* Release all used MMU tables to Linux memory space */
+void mc_release_mmu_tables(void);
+
+/* Initialize all MMU tables structure */
+int mc_init_mmu_tables(void);
+
+#endif /* _MC_MEM_H_ */
diff --git a/drivers/gud/MobiCoreDriver/ops.c b/drivers/gud/MobiCoreDriver/ops.c
new file mode 100644
index 0000000..96b4f4f
--- /dev/null
+++ b/drivers/gud/MobiCoreDriver/ops.c
@@ -0,0 +1,398 @@
+/*
+ * MobiCore Driver Kernel Module.
+ *
+ * This module is written as a Linux device driver.
+ * This driver represents the command proxy on the lowest layer, from the
+ * secure world to the non secure world, and vice versa.
+ * This driver is located in the non secure world (Linux).
+ * This driver offers IOCTL commands, for access to the secure world, and has
+ * the interface from the secure world to the normal world.
+ * The access to the driver is possible with a file descriptor,
+ * which has to be created by the fd = open(/dev/mobicore) command.
+ *
+ * <-- Copyright Giesecke & Devrient GmbH 2009-2012 -->
+ * <-- Copyright Trustonic Limited 2013 -->
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/workqueue.h>
+#include <linux/cpu.h>
+
+#include "main.h"
+#include "fastcall.h"
+#include "ops.h"
+#include "mem.h"
+#include "pm.h"
+#include "debug.h"
+
+/* MobiCore context data */
+static struct mc_context *ctx;
+#ifdef TBASE_CORE_SWITCHER
+static uint32_t active_cpu;
+
+static int mobicore_cpu_callback(struct notifier_block *nfb,
+				 unsigned long action, void *hcpu);
+static struct notifier_block mobicore_cpu_notifer = {
+	.notifier_call = mobicore_cpu_callback,
+};
+#endif
+
+static inline long smc(union fc_generic *fc)
+{
+	/* If we request sleep yields must be filtered out as they
+	 * make no sense */
+	if (ctx->mcp)
+		if (ctx->mcp->flags.sleep_mode.sleep_req) {
+			if (fc->as_in.cmd == MC_SMC_N_YIELD)
+				return MC_FC_RET_ERR_INVALID;
+		}
+	return _smc(fc);
+}
+
+struct fastcall_work {
+#ifdef MC_FASTCALL_WORKER_THREAD
+	struct kthread_work work;
+#else
+	struct work_struct work;
+#endif
+	void *data;
+};
+
+#ifdef MC_FASTCALL_WORKER_THREAD
+static void fastcall_work_func(struct kthread_work *work);
+#else
+static void fastcall_work_func(struct work_struct *work);
+#endif
+
+
+#ifdef MC_FASTCALL_WORKER_THREAD
+
+static struct task_struct *fastcall_thread;
+static DEFINE_KTHREAD_WORKER(fastcall_worker);
+
+bool mc_fastcall(void *data)
+{
+	struct fastcall_work fc_work = {
+		KTHREAD_WORK_INIT(fc_work.work, fastcall_work_func),
+		.data = data,
+	};
+
+	if (!queue_kthread_work(&fastcall_worker, &fc_work.work))
+		return false;
+	flush_kthread_work(&fc_work.work);
+	return true;
+}
+
+int mc_fastcall_init(struct mc_context *context)
+{
+	int ret = 0;
+	ctx = context;
+
+	fastcall_thread = kthread_create(kthread_worker_fn, &fastcall_worker,
+					 "mc_fastcall");
+	if (IS_ERR(fastcall_thread)) {
+		ret = PTR_ERR(fastcall_thread);
+		fastcall_thread = NULL;
+		MCDRV_DBG_ERROR(mcd, "cannot create fastcall wq (%d)", ret);
+		return ret;
+	}
+
+	wake_up_process(fastcall_thread);
+
+	/* this thread MUST run on CPU 0 at startup */
+	set_cpus_allowed(fastcall_thread, CPU_MASK_CPU0);
+#ifdef TBASE_CORE_SWITCHER
+	register_cpu_notifier(&mobicore_cpu_notifer);
+#endif
+	return 0;
+}
+
+void mc_fastcall_destroy(void)
+{
+	if (!IS_ERR_OR_NULL(fastcall_thread)) {
+		kthread_stop(fastcall_thread);
+		fastcall_thread = NULL;
+	}
+}
+#else
+
+bool mc_fastcall(void *data)
+{
+	struct fastcall_work work = {
+		.data = data,
+	};
+	INIT_WORK(&work.work, fastcall_work_func);
+	if (!schedule_work_on(0, &work.work))
+		return false;
+	flush_work(&work.work);
+	return true;
+}
+
+int mc_fastcall_init(struct mc_context *context)
+{
+	ctx = context;
+	return 0;
+};
+
+void mc_fastcall_destroy(void) {};
+#endif
+
+#ifdef MC_FASTCALL_WORKER_THREAD
+static void fastcall_work_func(struct kthread_work *work)
+#else
+static void fastcall_work_func(struct work_struct *work)
+#endif
+{
+	struct fastcall_work *fc_work =
+		container_of(work, struct fastcall_work, work);
+	union fc_generic *fc_generic = fc_work->data;
+#ifdef TBASE_CORE_SWITCHER
+	uint32_t cpu_swap = 0, new_cpu;
+	uint32_t cpu_id[] = CPU_IDS;
+#endif
+
+#ifdef MC_CRYPTO_CLOCK_MANAGEMENT
+	mc_pm_clock_enable();
+#endif
+
+
+	if (fc_generic == NULL)
+		return;
+#ifdef TBASE_CORE_SWITCHER
+	if (fc_generic->as_in.cmd == MC_FC_SWITCH_CORE) {
+		cpu_swap = 1;
+		new_cpu = fc_generic->as_in.param[0];
+		fc_generic->as_in.param[0] = cpu_id[fc_generic->as_in.param[0]];
+	}
+#endif
+	smc(fc_work->data);
+#ifdef TBASE_CORE_SWITCHER
+	if (cpu_swap) {
+		if (fc_generic->as_out.ret == 0) {
+			cpumask_t cpu;
+			active_cpu = new_cpu;
+			MCDRV_DBG(mcd, "CoreSwap ok %d -> %d\n",
+				  raw_smp_processor_id(), active_cpu);
+			cpumask_clear(&cpu);
+			cpumask_set_cpu(active_cpu, &cpu);
+#ifdef MC_FASTCALL_WORKER_THREAD
+			set_cpus_allowed(fastcall_thread, cpu);
+#endif
+		} else {
+			MCDRV_DBG(mcd, "CoreSwap failed %d -> %d\n",
+				  raw_smp_processor_id(),
+				  fc_generic->as_in.param[0]);
+		}
+	}
+#endif
+#ifdef MC_CRYPTO_CLOCK_MANAGEMENT
+	mc_pm_clock_disable();
+#endif
+}
+
+int mc_info(uint32_t ext_info_id, uint32_t *state, uint32_t *ext_info)
+{
+	int ret = 0;
+	union mc_fc_info fc_info;
+
+	MCDRV_DBG_VERBOSE(mcd, "enter");
+
+	memset(&fc_info, 0, sizeof(fc_info));
+	fc_info.as_in.cmd = MC_FC_INFO;
+	fc_info.as_in.ext_info_id = ext_info_id;
+
+	MCDRV_DBG(mcd, "<- cmd=0x%08x, ext_info_id=0x%08x",
+		  fc_info.as_in.cmd, fc_info.as_in.ext_info_id);
+
+	mc_fastcall(&(fc_info.as_generic));
+
+	MCDRV_DBG(mcd,
+		  "-> r=0x%08x ret=0x%08x state=0x%08x "
+		  "ext_info=0x%08x",
+		  fc_info.as_out.resp,
+		  fc_info.as_out.ret,
+		  fc_info.as_out.state,
+		  fc_info.as_out.ext_info);
+
+	ret = convert_fc_ret(fc_info.as_out.ret);
+
+	*state  = fc_info.as_out.state;
+	*ext_info = fc_info.as_out.ext_info;
+
+	MCDRV_DBG_VERBOSE(mcd, "exit with %d/0x%08X", ret, ret);
+
+	return ret;
+}
+
+#ifdef TBASE_CORE_SWITCHER
+int mc_switch_core(uint32_t core_num)
+{
+	int32_t ret = 0;
+	union mc_fc_swich_core fc_switch_core;
+
+	if (!cpu_online(core_num))
+		return 1;
+
+	MCDRV_DBG_VERBOSE(mcd, "enter\n");
+
+	memset(&fc_switch_core, 0, sizeof(fc_switch_core));
+	fc_switch_core.as_in.cmd = MC_FC_SWITCH_CORE;
+
+	if (core_num < COUNT_OF_CPUS)
+		fc_switch_core.as_in.core_id = core_num;
+	else
+		fc_switch_core.as_in.core_id = 0;
+
+	MCDRV_DBG(
+			mcd, "<- cmd=0x%08x, core_num=0x%08x, "
+			"active_cpu=0x%08x, active_cpu=0x%08x\n",
+			fc_switch_core.as_in.cmd,
+			fc_switch_core.as_in.core_id,
+			core_num, active_cpu);
+	mc_fastcall(&(fc_switch_core.as_generic));
+
+	ret = convert_fc_ret(fc_switch_core.as_out.ret);
+
+	MCDRV_DBG_VERBOSE(mcd, "exit with %d/0x%08X\n", ret, ret);
+
+	return ret;
+}
+
+void mc_cpu_offfline(int cpu)
+{
+	if (active_cpu == cpu) {
+		int i;
+		/* Chose the first online CPU and switch! */
+		for_each_online_cpu(i) {
+			if (i == cpu) {
+				MCDRV_DBG(mcd, "Skipping CPU %d\n", cpu);
+				continue;
+			}
+			MCDRV_DBG(mcd, "CPU %d is dying, switching to %d\n",
+				  cpu, i);
+			mc_switch_core(i);
+			break;
+		}
+	} else {
+		MCDRV_DBG(mcd, "not active CPU, no action taken\n");
+	}
+}
+
+static int mobicore_cpu_callback(struct notifier_block *nfb,
+				unsigned long action, void *hcpu)
+{
+	unsigned int cpu = (unsigned long)hcpu;
+
+	switch (action) {
+	case CPU_DOWN_PREPARE:
+	case CPU_DOWN_PREPARE_FROZEN:
+		dev_info(mcd, "Cpu %u is going to die\n", cpu);
+		mc_cpu_offfline(cpu);
+		break;
+	case CPU_DEAD:
+	case CPU_DEAD_FROZEN:
+		dev_info(mcd, "Cpu %u is dead\n", cpu);
+		break;
+	}
+	return NOTIFY_OK;
+}
+#endif
+
+/* Yield to MobiCore */
+int mc_yield(void)
+{
+	int ret = 0;
+	union fc_generic yield;
+
+	MCDRV_DBG_VERBOSE(mcd, "enter");
+	memset(&yield, 0, sizeof(yield));
+	yield.as_in.cmd = MC_SMC_N_YIELD;
+	mc_fastcall(&yield);
+	ret = convert_fc_ret(yield.as_out.ret);
+
+	return ret;
+}
+
+/* call common notify */
+int mc_nsiq(void)
+{
+	int ret = 0;
+	union fc_generic nsiq;
+	MCDRV_DBG_VERBOSE(mcd, "enter");
+	memset(&nsiq, 0, sizeof(nsiq));
+	nsiq.as_in.cmd = MC_SMC_N_SIQ;
+	mc_fastcall(&nsiq);
+	ret = convert_fc_ret(nsiq.as_out.ret);
+	return ret;
+}
+
+/* call common notify */
+int _nsiq(void)
+{
+	int ret = 0;
+	union fc_generic nsiq;
+	MCDRV_DBG_VERBOSE(mcd, "enter");
+	memset(&nsiq, 0, sizeof(nsiq));
+	nsiq.as_in.cmd = MC_SMC_N_SIQ;
+	_smc(&nsiq);
+	ret = convert_fc_ret(nsiq.as_out.ret);
+	return ret;
+}
+
+/* Call the INIT fastcall to setup MobiCore initialization */
+int mc_init(phys_addr_t base, uint32_t nq_length,
+	uint32_t mcp_offset, uint32_t mcp_length)
+{
+	int ret = 0;
+	union mc_fc_init fc_init;
+	uint64_t base_addr = (uint64_t)base;
+	uint32_t base_high = (uint32_t)(base_addr >> 32);
+
+	MCDRV_DBG_VERBOSE(mcd, "enter");
+
+	memset(&fc_init, 0, sizeof(fc_init));
+
+	fc_init.as_in.cmd = MC_FC_INIT;
+	/* base address of mci buffer 4KB aligned */
+	fc_init.as_in.base = (uint32_t)base_addr;
+	/* notification buffer start/length [16:16] [start, length] */
+	fc_init.as_in.nq_info = ((base_high && 0xFFFF) << 16) |
+				(nq_length & 0xFFFF);
+	/* mcp buffer start/length [16:16] [start, length] */
+	fc_init.as_in.mcp_info = (mcp_offset << 16) | (mcp_length & 0xFFFF);
+
+	/*
+	 * Set KMOD notification queue to start of MCI
+	 * mciInfo was already set up in mmap
+	 */
+	MCDRV_DBG(mcd,
+		  "cmd=0x%08x, base=0x%08x,nq_info=0x%08x, mcp_info=0x%08x",
+		  fc_init.as_in.cmd, fc_init.as_in.base, fc_init.as_in.nq_info,
+		  fc_init.as_in.mcp_info);
+	mc_fastcall(&fc_init.as_generic);
+	MCDRV_DBG(mcd, "out cmd=0x%08x, ret=0x%08x", fc_init.as_out.resp,
+		  fc_init.as_out.ret);
+
+	ret = convert_fc_ret(fc_init.as_out.ret);
+
+	MCDRV_DBG_VERBOSE(mcd, "exit with %d/0x%08X", ret, ret);
+
+	return ret;
+}
+
+/* Return MobiCore driver version */
+uint32_t mc_get_version(void)
+{
+	MCDRV_DBG(mcd, "MobiCore driver version is %i.%i",
+		  MCDRVMODULEAPI_VERSION_MAJOR,
+		  MCDRVMODULEAPI_VERSION_MINOR);
+
+	return MC_VERSION(MCDRVMODULEAPI_VERSION_MAJOR,
+					MCDRVMODULEAPI_VERSION_MINOR);
+}
diff --git a/drivers/gud/mobicore_driver/ops.h b/drivers/gud/MobiCoreDriver/ops.h
similarity index 78%
rename from drivers/gud/mobicore_driver/ops.h
rename to drivers/gud/MobiCoreDriver/ops.h
index 910c1f4..f04eb3e 100644
--- a/drivers/gud/mobicore_driver/ops.h
+++ b/drivers/gud/MobiCoreDriver/ops.h
@@ -21,10 +21,13 @@
 uint32_t mc_get_version(void);
 
 int mc_info(uint32_t ext_info_id, uint32_t *state, uint32_t *ext_info);
-int mc_init(uint32_t base, uint32_t  nq_offset, uint32_t  nq_length,
-	    uint32_t mcp_offset, uint32_t  mcp_length);
+int mc_init(phys_addr_t base, uint32_t  nq_length, uint32_t mcp_offset,
+		uint32_t  mcp_length);
+#ifdef TBASE_CORE_SWITCHER
+int mc_switch_core(uint32_t core_num);
+#endif
 
-void mc_fastcall(void *data);
+bool mc_fastcall(void *data);
 
 int mc_fastcall_init(struct mc_context *context);
 void mc_fastcall_destroy(void);
diff --git a/drivers/gud/mobicore_driver/platforms/msm8960_surf_std/platform.h b/drivers/gud/MobiCoreDriver/platforms/MSM8960_SURF_STD/platform.h
similarity index 85%
rename from drivers/gud/mobicore_driver/platforms/msm8960_surf_std/platform.h
rename to drivers/gud/MobiCoreDriver/platforms/MSM8960_SURF_STD/platform.h
index 9d128ae..72ea3ed 100644
--- a/drivers/gud/mobicore_driver/platforms/msm8960_surf_std/platform.h
+++ b/drivers/gud/MobiCoreDriver/platforms/MSM8960_SURF_STD/platform.h
@@ -36,19 +36,12 @@
 }
 
 /* Enable mobicore mem traces */
-/* #define MC_MEM_TRACES */
+#define MC_MEM_TRACES
 
 /* Enable the use of vm_unamp instead of the deprecated do_munmap
  * and other 3.7 features
  */
-#ifndef CONFIG_ARCH_MSM8960
 #define MC_VM_UNMAP
-#endif
-
-#if defined (CONFIG_ARCH_MSM8974) || defined (CONFIG_ARCH_MSM8226)
-/* Perform clock enable/disable */
-#define MC_CRYPTO_CLOCK_MANAGEMENT
-#endif
 
 /* Enable Power Management for Crypto Engine */
 #define MC_CRYPTO_CLOCK_MANAGEMENT
diff --git a/drivers/gud/mobicore_driver/pm.c b/drivers/gud/MobiCoreDriver/pm.c
similarity index 66%
rename from drivers/gud/mobicore_driver/pm.c
rename to drivers/gud/MobiCoreDriver/pm.c
index 55a1ef7..40365ef 100644
--- a/drivers/gud/mobicore_driver/pm.c
+++ b/drivers/gud/MobiCoreDriver/pm.c
@@ -46,7 +46,7 @@
 	if (!ctx->mcp)
 		return false;
 
-	if (!ctx->mcp->flags.sleep_mode.ReadyToSleep & READY_TO_SLEEP)
+	if (!(ctx->mcp->flags.sleep_mode.ready_to_sleep & READY_TO_SLEEP))
 		return false;
 
 	return true;
@@ -57,7 +57,7 @@
 	if (!ctx->mcp)
 		return;
 
-	ctx->mcp->flags.sleep_mode.SleepReq = REQ_TO_SLEEP;
+	ctx->mcp->flags.sleep_mode.sleep_req = REQ_TO_SLEEP;
 	_nsiq();
 }
 DECLARE_WORK(suspend_work, mc_suspend_handler);
@@ -66,9 +66,9 @@
 {
 	MCDRV_DBG(mcd, "MobiCore IDLE=%d!", flags->schedule);
 	MCDRV_DBG(mcd,
-		  "MobiCore Request Sleep=%d!", flags->sleep_mode.SleepReq);
+		  "MobiCore Request Sleep=%d!", flags->sleep_mode.sleep_req);
 	MCDRV_DBG(mcd,
-		  "MobiCore Sleep Ready=%d!", flags->sleep_mode.ReadyToSleep);
+		  "MobiCore Sleep Ready=%d!", flags->sleep_mode.ready_to_sleep);
 }
 
 static int mc_suspend_notifier(struct notifier_block *nb,
@@ -96,12 +96,12 @@
 		 */
 		dump_sleep_params(&mcp->flags);
 		if (!sleep_ready()) {
-			ctx->mcp->flags.sleep_mode.SleepReq = REQ_TO_SLEEP;
+			ctx->mcp->flags.sleep_mode.sleep_req = REQ_TO_SLEEP;
 			schedule_work_on(0, &suspend_work);
 			flush_work(&suspend_work);
 			if (!sleep_ready()) {
 				dump_sleep_params(&mcp->flags);
-				ctx->mcp->flags.sleep_mode.SleepReq = 0;
+				ctx->mcp->flags.sleep_mode.sleep_req = 0;
 				MCDRV_DBG_ERROR(mcd, "MobiCore can't SLEEP!");
 				return NOTIFY_BAD;
 			}
@@ -109,7 +109,7 @@
 		break;
 	case PM_POST_SUSPEND:
 		MCDRV_DBG(mcd, "Resume MobiCore system!");
-		ctx->mcp->flags.sleep_mode.SleepReq = 0;
+		ctx->mcp->flags.sleep_mode.sleep_req = 0;
 		break;
 	default:
 		break;
@@ -121,57 +121,6 @@
 	.notifier_call = mc_suspend_notifier,
 };
 
-#ifdef MC_BL_NOTIFIER
-
-static int bL_switcher_notifier_handler(struct notifier_block *this,
-			unsigned long event, void *ptr)
-{
-	unsigned int mpidr, cpu, cluster;
-	struct mc_mcp_buffer *mcp = ctx->mcp;
-
-	if (!mcp)
-		return 0;
-
-	asm volatile ("mrc\tp15, 0, %0, c0, c0, 5" : "=r" (mpidr));
-	cpu = mpidr & 0x3;
-	cluster = (mpidr >> 8) & 0xf;
-	MCDRV_DBG(mcd, "%s switching!!, cpu: %u, Out=%u\n",
-		  (event == SWITCH_ENTER ? "Before" : "After"), cpu, cluster);
-
-	if (cpu != 0)
-		return 0;
-
-	switch (event) {
-	case SWITCH_ENTER:
-		if (!sleep_ready()) {
-			ctx->mcp->flags.sleep_mode.SleepReq = REQ_TO_SLEEP;
-			_nsiq();
-			/* By this time we should be ready for sleep or we are
-			 * in the middle of something important */
-			if (!sleep_ready()) {
-				dump_sleep_params(&mcp->flags);
-				MCDRV_DBG(mcd,
-					  "MobiCore: Don't allow switch!\n");
-				ctx->mcp->flags.sleep_mode.SleepReq = 0;
-				return -EPERM;
-			}
-		}
-		break;
-	case SWITCH_EXIT:
-			ctx->mcp->flags.sleep_mode.SleepReq = 0;
-			break;
-	default:
-		MCDRV_DBG(mcd, "MobiCore: Unknown switch event!\n");
-	}
-
-	return 0;
-}
-
-static struct notifier_block switcher_nb = {
-	.notifier_call = bL_switcher_notifier_handler,
-};
-#endif
-
 int mc_pm_initialize(struct mc_context *context)
 {
 	int ret = 0;
@@ -180,12 +129,7 @@
 
 	ret = register_pm_notifier(&mc_notif_block);
 	if (ret)
-		MCDRV_DBG_ERROR(mcd, "device pm register failed\n");
-#ifdef MC_BL_NOTIFIER
-	if (register_bL_swicher_notifier(&switcher_nb))
-		MCDRV_DBG_ERROR(mcd,
-				"Failed to register to bL_switcher_notifier\n");
-#endif
+		MCDRV_DBG_ERROR(mcd, "device pm register failed");
 
 	return ret;
 }
@@ -194,15 +138,16 @@
 {
 	int ret = unregister_pm_notifier(&mc_notif_block);
 	if (ret)
-		MCDRV_DBG_ERROR(mcd, "device pm unregister failed\n");
-#ifdef MC_BL_NOTIFIER
-	ret = unregister_bL_swicher_notifier(&switcher_nb);
-	if (ret)
-		MCDRV_DBG_ERROR(mcd, "device bl unregister failed\n");
-#endif
+		MCDRV_DBG_ERROR(mcd, "device pm unregister failed");
 	return ret;
 }
 
+bool mc_pm_sleep_ready(void)
+{
+	if (ctx == 0)
+		return true;
+	return sleep_ready();
+}
 #endif /* MC_PM_RUNTIME */
 
 #ifdef MC_CRYPTO_CLOCK_MANAGEMENT
@@ -215,7 +160,7 @@
 	mc_ce_core_clk = clk_get(mcd, "core_clk");
 	if (IS_ERR(mc_ce_core_clk)) {
 		ret = PTR_ERR(mc_ce_core_clk);
-		MCDRV_DBG_ERROR(mcd, "cannot get core clock\n");
+		MCDRV_DBG_ERROR(mcd, "cannot get core clock");
 		goto error;
 	}
 	/* Get Interface clk */
@@ -223,7 +168,7 @@
 	if (IS_ERR(mc_ce_iface_clk)) {
 		clk_put(mc_ce_core_clk);
 		ret = PTR_ERR(mc_ce_iface_clk);
-		MCDRV_DBG_ERROR(mcd, "cannot get iface clock\n");
+		MCDRV_DBG_ERROR(mcd, "cannot get iface clock");
 		goto error;
 	}
 	/* Get AXI clk */
@@ -232,7 +177,7 @@
 		clk_put(mc_ce_iface_clk);
 		clk_put(mc_ce_core_clk);
 		ret = PTR_ERR(mc_ce_bus_clk);
-		MCDRV_DBG_ERROR(mcd, "cannot get AXI bus clock\n");
+		MCDRV_DBG_ERROR(mcd, "cannot get AXI bus clock");
 		goto error;
 	}
 	return ret;
@@ -263,17 +208,17 @@
 
 	rc = clk_prepare_enable(mc_ce_core_clk);
 	if (rc) {
-		MCDRV_DBG_ERROR(mcd, "cannot enable clock\n");
+		MCDRV_DBG_ERROR(mcd, "cannot enable clock");
 	} else {
 		rc = clk_prepare_enable(mc_ce_iface_clk);
 		if (rc) {
 			clk_disable_unprepare(mc_ce_core_clk);
-			MCDRV_DBG_ERROR(mcd, "cannot enable clock\n");
+			MCDRV_DBG_ERROR(mcd, "cannot enable clock");
 		} else {
 			rc = clk_prepare_enable(mc_ce_bus_clk);
 			if (rc) {
 				clk_disable_unprepare(mc_ce_iface_clk);
-				MCDRV_DBG_ERROR(mcd, "cannot enable clock\n");
+				MCDRV_DBG_ERROR(mcd, "cannot enable clock");
 			}
 		}
 	}
diff --git a/drivers/gud/mobicore_driver/pm.h b/drivers/gud/MobiCoreDriver/pm.h
similarity index 94%
rename from drivers/gud/mobicore_driver/pm.h
rename to drivers/gud/MobiCoreDriver/pm.h
index 332da34..b71c603 100644
--- a/drivers/gud/mobicore_driver/pm.h
+++ b/drivers/gud/MobiCoreDriver/pm.h
@@ -13,10 +13,6 @@
 #define _MC_PM_H_
 
 #include "main.h"
-#ifdef MC_BL_NOTIFIER
-#include <asm/bL_switcher.h>
-#endif
-
 
 #define NO_SLEEP_REQ	0
 #define REQ_TO_SLEEP	1
@@ -39,5 +35,7 @@
 int mc_pm_clock_enable(void);
 /* Disable secure crypto clocks */
 void mc_pm_clock_disable(void);
+/* Test if sleep is possible */
+bool mc_pm_sleep_ready(void);
 
 #endif /* _MC_PM_H_ */
diff --git a/drivers/gud/mobicore_driver/public/mc_kernel_api.h b/drivers/gud/MobiCoreDriver/public/mc_kernel_api.h
similarity index 89%
rename from drivers/gud/mobicore_driver/public/mc_kernel_api.h
rename to drivers/gud/MobiCoreDriver/public/mc_kernel_api.h
index cca0636..15fd4a2 100644
--- a/drivers/gud/mobicore_driver/public/mc_kernel_api.h
+++ b/drivers/gud/MobiCoreDriver/public/mc_kernel_api.h
@@ -35,13 +35,12 @@
  * @requested_size:	memory size requested in bytes
  * @handle:		pointer to handle
  * @kernel_virt_addr:	virtual user start address
- * @phys_addr:		physical start address
  *
  * Returns 0 if OK
  */
 int mobicore_allocate_wsm(struct mc_instance *instance,
 			  unsigned long requested_size, uint32_t *handle,
-			  void **virt_kernel_addr, void **phys_addr);
+			  void **virt_kernel_addr);
 
 /*
  * mobicore_free() - Free a WSM buffer allocated with mobicore_allocate_wsm
@@ -58,12 +57,11 @@
  * @addr:		address of the buffer (NB it must be kernel virtual!)
  * @len:		buffer length (in bytes)
  * @handle:		unique handle
- * @phys:		pointer for physical address of L2 table
  *
  * Returns 0 if no error
  */
 int mobicore_map_vmem(struct mc_instance *instance, void *addr,
-		      uint32_t len, uint32_t *handle, uint32_t *phys);
+		      uint32_t len, uint32_t *handle);
 
 /*
  * mobicore_unmap_vmem() - Unmap a virtual memory buffer from MobiCore
@@ -74,4 +72,12 @@
  */
 int mobicore_unmap_vmem(struct mc_instance *instance, uint32_t handle);
 
+/*
+ * mobicore_sleep_ready() - Test if mobicore can sleep
+ *
+ * Returns true if mobicore can sleep, false if it can't sleep
+ */
+bool mobicore_sleep_ready(void);
+
+
 #endif /* _MC_KERNEL_API_H_ */
diff --git a/drivers/gud/mobicore_driver/public/mc_linux.h b/drivers/gud/MobiCoreDriver/public/mc_linux.h
similarity index 81%
rename from drivers/gud/mobicore_driver/public/mc_linux.h
rename to drivers/gud/MobiCoreDriver/public/mc_linux.h
index af027dc..98e7af1 100644
--- a/drivers/gud/mobicore_driver/public/mc_linux.h
+++ b/drivers/gud/MobiCoreDriver/public/mc_linux.h
@@ -1,7 +1,7 @@
 /*
  * The MobiCore Driver Kernel Module is a Linux device driver, which represents
  * the command proxy on the lowest layer to the secure world (Swd). Additional
- * services like memory allocation via mmap and generation of a L2 tables for
+ * services like memory allocation via mmap and generation of a MMU tables for
  * given virtual memory are also supported. IRQ functionality receives
  * information from the SWd in the non secure world (NWd).
  * As customary the driver is handled as linux device driver with "open",
@@ -55,8 +55,6 @@
  * INIT request data to SWD
  */
 struct mc_ioctl_init {
-	/* notification buffer start/length [16:16] [start, length] */
-	uint32_t  nq_offset;
 	/* length of notification queue */
 	uint32_t  nq_length;
 	/* mcp buffer start/length [16:16] [start, length] */
@@ -76,8 +74,7 @@
 };
 
 /*
- * Data exchange structure of the MC_IO_MAP_WSM, MC_IO_MAP_MCI, and
- *				  MC_IO_MAP_PWSM commands.
+ * Data exchange structure of the MC_IO_MAP_WSM and MC_IO_MAP_MCI commands.
  *
  * Allocate a contiguous memory buffer for a process.
  * The physical address can be used as for later calls to mmap.
@@ -86,19 +83,19 @@
  * already. I.e. Daemon was restarted.
  */
 struct mc_ioctl_map {
-	size_t	      len;	/* Buffer length */
-	uint32_t      handle;	/* WSM handle */
-	unsigned long addr;	/* Virtual address */
-	unsigned long phys_addr;/* physical address of WSM (or NULL) */
-	bool	      reused;	/* if WSM memory was reused, or new allocated */
+	size_t		len;	/* Buffer length */
+	uint32_t	handle;	/* WSM handle */
+	uint64_t	phys_addr; /* physical address of WSM (or 0) */
+	unsigned long	addr;	/* Virtual address */
+	bool		reused;	/* if WSM memory was reused, or new allocated */
 };
 
 /*
  * Data exchange structure of the MC_IO_REG_WSM command.
  *
- * Allocates a physical L2 table and maps the buffer into this page.
- * Returns the physical address of the L2 table.
- * The page alignment will be created and the appropriated pSize and pOffsetL2
+ * Allocates a physical MMU table and maps the buffer into this page.
+ * Returns the physical address of the MMU table.
+ * The page alignment will be created and the appropriated pSize and pOffsetMMU
  * will be modified to the used values.
  */
 struct mc_ioctl_reg_wsm {
@@ -106,19 +103,7 @@
 	uint32_t len;		/* size of the virtual address space */
 	uint32_t pid;		/* process id */
 	uint32_t handle;	/* driver handle for locked memory */
-	uint32_t table_phys;	/* physical address of the L2 table */
-};
-
-
-/*
- * Data exchange structure of the MC_DRV_MODULE_FC_EXECUTE ioctl command.
- * internal, unsupported
- */
-struct mc_ioctl_execute {
-	/* base address of mobicore binary */
-	uint32_t phys_start_addr;
-	/* length of DDR area */
-	uint32_t length;
+	uint64_t table_phys;	/* physical address of the MMU table */
 };
 
 /*
@@ -127,10 +112,10 @@
 struct mc_ioctl_resolv_cont_wsm {
 	/* driver handle for buffer */
 	uint32_t handle;
-	/* base address of memory */
-	uint32_t phys;
 	/* length memory */
 	uint32_t length;
+	/* base address of memory */
+	uint64_t phys;
 	/* fd to owner of the buffer */
 	int32_t fd;
 };
@@ -144,7 +129,7 @@
 	/* fd to owner of the buffer */
 	int32_t fd;
 	/* base address of memory */
-	uint32_t phys;
+	uint64_t phys;
 };
 
 
@@ -180,28 +165,24 @@
  */
 #define MC_IO_FREE		_IO(MC_IOC_MAGIC, 5)
 /*
- * Creates a L2 Table of the given base address and the size of the
+ * Creates a MMU Table of the given base address and the size of the
  * data.
- * Parameter: mc_ioctl_app_reg_wsm_l2_params
+ * Parameter: mc_ioctl_reg_wsm
  */
 #define MC_IO_REG_WSM		_IOWR(MC_IOC_MAGIC, 6, struct mc_ioctl_reg_wsm)
 #define MC_IO_UNREG_WSM		_IO(MC_IOC_MAGIC, 7)
 #define MC_IO_LOCK_WSM		_IO(MC_IOC_MAGIC, 8)
 #define MC_IO_UNLOCK_WSM	_IO(MC_IOC_MAGIC, 9)
-#define MC_IO_EXECUTE		_IOWR(MC_IOC_MAGIC, 10, struct mc_ioctl_execute)
 
 /*
  * Allocate contiguous memory for a process for later mapping with mmap.
- * MC_DRV_KMOD_MMAP_WSM	usual operation, pages are registered in
+ * MC_IO_MAP_WSM	usual operation, pages are registered in
  *					device structure and freed later.
- * MC_DRV_KMOD_MMAP_MCI	get Instance of MCI, allocates or mmaps
+ * MC_IO_MAP_MCI	get Instance of MCI, allocates or mmaps
  *					the MCI to daemon
- * MC_DRV_KMOD_MMAP_PERSISTENTWSM	special operation, without
- *						registration of pages
  */
 #define MC_IO_MAP_WSM		_IOWR(MC_IOC_MAGIC, 11, struct mc_ioctl_map)
 #define MC_IO_MAP_MCI		_IOWR(MC_IOC_MAGIC, 12, struct mc_ioctl_map)
-#define MC_IO_MAP_PWSM		_IOWR(MC_IOC_MAGIC, 13, struct mc_ioctl_map)
 
 /*
  * Clean orphaned WSM buffers. Only available to the daemon and should
@@ -215,7 +196,7 @@
 #define MC_IO_CLEAN_WSM		_IO(MC_IOC_MAGIC, 14)
 
 /*
- * Get L2 phys address of a buffer handle allocated to the user.
+ * Get MMU phys address of a buffer handle allocated to the user.
  * Only available to the daemon.
  */
 #define MC_IO_RESOLVE_WSM	_IOWR(MC_IOC_MAGIC, 15, \
diff --git a/drivers/gud/mobicore_driver/public/version.h b/drivers/gud/MobiCoreDriver/public/version.h
similarity index 100%
rename from drivers/gud/mobicore_driver/public/version.h
rename to drivers/gud/MobiCoreDriver/public/version.h
diff --git a/drivers/gud/MobiCoreKernelApi/Makefile b/drivers/gud/MobiCoreKernelApi/Makefile
new file mode 100644
index 0000000..9b37eea
--- /dev/null
+++ b/drivers/gud/MobiCoreKernelApi/Makefile
@@ -0,0 +1,52 @@
+#
+# this makefile is called from the kernel make syste
+ifeq ($(MODE),release)
+    ccflags-y += -O2 -DNDEBUG
+else  # DEBUG
+    # "-O" is needed to expand inlines
+    ccflags-y += -O -g3 -DDEBUG
+endif # DEBUG/RELEASE
+
+ifdef MOBICORE_CFLAGS
+    ccflags-y +=$(MOBICORE_CFLAGS)
+endif
+
+#Set the extra symbols
+ifdef MCDRV_SYMBOLS_FILE
+    KBUILD_EXTRA_SYMBOLS=$(MCDRV_SYMBOLS_FILE)
+endif
+
+ifeq ($(PLATFORM), ARM_VE_A9X4_QEMU)
+	ccflags-y += -DMC_NETLINK_COMPAT_V37
+endif
+
+ifeq ($(PLATFORM), MSM8974_SURF_STD)
+	ccflags-y += -DMC_NETLINK_COMPAT_V37
+endif
+
+ifeq ($(PLATFORM), EXYNOS_5422_STD)
+	ccflags-y += -DMC_NETLINK_COMPAT_V37
+endif
+
+ifeq ($(PLATFORM), EXYNOS_5430_STD)
+	ccflags-y += -DMC_NETLINK_COMPAT_V37
+endif
+
+#EXTRA_CFLAGS += -DDEBUG -DDEBUG_VERBOSE
+#EXTRA_CFLAGS += -Wno-declaration-after-statement
+ccflags-y += -Wno-declaration-after-statement
+# add our module to kernel.
+obj-m += mcKernelApi.o
+
+mcKernelApi-objs := main.o clientlib.o device.o session.o connection.o
+
+clean:
+	rm -rf *.o *~ core .depend .*.cmd *.ko *.mod.c .tmp_versions \
+		Module.markers Module.symvers modules.order
+
+depend .depend dep:
+	$(CC) $(CFLAGS) -M *.c > .depend
+
+ifeq (.depend,$(wildcard .depend))
+    include .depend
+endif
diff --git a/drivers/gud/MobiCoreKernelApi/build.sh b/drivers/gud/MobiCoreKernelApi/build.sh
new file mode 100644
index 0000000..86fe1b8
--- /dev/null
+++ b/drivers/gud/MobiCoreKernelApi/build.sh
@@ -0,0 +1,34 @@
+#!/bin/bash
+
+if [ -z $COMP_PATH_ROOT ]; then
+	echo "The build environment is not set!"
+	echo "Trying to source setupDrivers.sh automatically!"
+	source ../setupDrivers.sh || exit 1
+fi
+
+ROOT_PATH=$(dirname $(readlink -f $BASH_SOURCE))
+# These folders need to be relative to the kernel dir or absolute!
+PLATFORM=EXYNOS_4X12_STD
+CODE_INCLUDE=$(readlink -f $ROOT_PATH/Locals/Code)
+
+MOBICORE_DRIVER=$COMP_PATH_MobiCoreDriverMod
+MOBICORE_DAEMON=$COMP_PATH_MobiCoreDriverLib/Public
+MOBICORE_CFLAGS="-I$MOBICORE_DRIVER/Public -I$MOBICORE_DAEMON -I$COMP_PATH_MobiCore/inc/Mci -I$COMP_PATH_MobiCore/inc -I$CODE_INCLUDE/include -I$CODE_INCLUDE/public"
+MCDRV_SYMBOLS_FILE="$COMP_PATH_ROOT/MobiCoreDriverMod/Locals/Code/Module.symvers"
+
+if [ ! -f $MCDRV_SYMBOLS_FILE ]; then
+	echo "Please build the Mobicore Driver Module first!"
+	echo "Otherwise you will see warnings of missing symbols"
+fi
+
+# Clean first
+make -C $CODE_INCLUDE clean
+
+make -C $LINUX_PATH \
+	MODE=$MODE \
+	ARCH=arm \
+	CROSS_COMPILE=$CROSS_COMPILE \
+	M=$CODE_INCLUDE \
+	"MOBICORE_CFLAGS=$MOBICORE_CFLAGS" \
+	MCDRV_SYMBOLS_FILE=$MCDRV_SYMBOLS_FILE \
+	modules
diff --git a/drivers/gud/mobicore_kernelapi/clientlib.c b/drivers/gud/MobiCoreKernelApi/clientlib.c
similarity index 95%
rename from drivers/gud/mobicore_kernelapi/clientlib.c
rename to drivers/gud/MobiCoreKernelApi/clientlib.c
index 16b52e5..65b4a1c 100644
--- a/drivers/gud/mobicore_kernelapi/clientlib.c
+++ b/drivers/gud/MobiCoreKernelApi/clientlib.c
@@ -25,6 +25,7 @@
 
 /* device list */
 LIST_HEAD(devices);
+atomic_t device_usage = ATOMIC_INIT(0);
 
 static struct mcore_device_t *resolve_device_id(uint32_t device_id)
 {
@@ -71,14 +72,20 @@
 	do {
 		struct mcore_device_t *device = resolve_device_id(device_id);
 		if (device != NULL) {
-			MCDRV_DBG_ERROR(mc_kapi,
-					"Device %d already opened", device_id);
-			mc_result = MC_DRV_ERR_INVALID_OPERATION;
+			MCDRV_DBG(mc_kapi,
+				  "Device %d already opened\n", device_id);
+			atomic_inc(&device_usage);
+			mc_result = MC_DRV_OK;
 			break;
 		}
 
 		/* Open new connection to device */
 		dev_con = connection_new();
+		if (dev_con == NULL) {
+			mc_result = MC_DRV_ERR_NO_FREE_MEMORY;
+			break;
+		}
+
 		if (!connection_connect(dev_con, MC_DAEMON_PID)) {
 			MCDRV_DBG_ERROR(
 				mc_kapi,
@@ -144,6 +151,10 @@
 		/* there is no payload to read */
 
 		device = mcore_device_create(device_id, dev_con);
+		if (device == NULL) {
+			mc_result = MC_DRV_ERR_NO_FREE_MEMORY;
+			break;
+		}
 		if (!mcore_device_open(device, MC_DRV_MOD_DEVNODE_FULLPATH)) {
 			mcore_device_cleanup(device);
 			MCDRV_DBG_ERROR(mc_kapi,
@@ -154,6 +165,7 @@
 		}
 
 		add_device(device);
+		atomic_inc(&device_usage);
 
 	} while (false);
 
@@ -177,6 +189,12 @@
 			mc_result = MC_DRV_ERR_UNKNOWN_DEVICE;
 			break;
 		}
+		/* Check if it's not used by other modules */
+		if (!atomic_dec_and_test(&device_usage)) {
+			mc_result = MC_DRV_OK;
+			break;
+		}
+
 		struct connection *dev_con = device->connection;
 
 		/* Return if not all sessions have been closed */
@@ -274,12 +292,12 @@
 		}
 		struct connection *dev_con = device->connection;
 
-		/* Get the physical address of the given TCI */
+		/* Get the wsm of the given TCI */
 		struct wsm *wsm =
 			mcore_device_find_contiguous_wsm(device, tci);
 		if (wsm == NULL) {
 			MCDRV_DBG_ERROR(mc_kapi,
-					"Could not resolve TCI phy address ");
+					"Could not resolve TCI address ");
 			mc_result = MC_DRV_ERR_INVALID_PARAMETER;
 			break;
 		}
@@ -292,14 +310,14 @@
 		}
 
 		/* Prepare open session command */
-		struct mc_drv_cmd_open_session_t cmdOpenSession = {
+		struct mc_drv_cmd_open_session_t cmd_open_session = {
 			{
 				MC_DRV_CMD_OPEN_SESSION
 			},
 			{
 				session->device_id,
 				*uuid,
-				(uint32_t)(wsm->phys_addr) & 0xFFF,
+				(uint32_t)(wsm->virt_addr) & 0xFFF,
 				wsm->handle,
 				len
 			}
@@ -307,9 +325,9 @@
 
 		/* Transmit command data */
 		int len = connection_write_data(dev_con,
-						&cmdOpenSession,
-						sizeof(cmdOpenSession));
-		if (len != sizeof(cmdOpenSession)) {
+						&cmd_open_session,
+						sizeof(cmd_open_session));
+		if (len != sizeof(cmd_open_session)) {
 			MCDRV_DBG_ERROR(mc_kapi,
 					"CMD_OPEN_SESSION writeData failed %d",
 					len);
@@ -370,6 +388,10 @@
 
 		/* Set up second channel for notifications */
 		struct connection *session_connection = connection_new();
+		if (session_connection == NULL) {
+			mc_result = MC_DRV_ERR_NO_FREE_MEMORY;
+			break;
+		}
 
 		if (!connection_connect(session_connection, MC_DAEMON_PID)) {
 			MCDRV_DBG_ERROR(
@@ -422,9 +444,13 @@
 		/* there is no payload. */
 
 		/* Session established, new session object must be created */
-		mcore_device_create_new_session(device,
-						session->session_id,
-						session_connection);
+		if (!mcore_device_create_new_session(device,
+						     session->session_id,
+						     session_connection)) {
+			connection_cleanup(session_connection);
+			mc_result = MC_DRV_ERR_NO_FREE_MEMORY;
+			break;
+		}
 
 	} while (false);
 
@@ -706,7 +732,6 @@
 	MCDRV_DBG_VERBOSE(mc_kapi, "===%s()===", __func__);
 
 	do {
-
 		/* Get the device associated wit the given session */
 		device = resolve_device_id(device_id);
 		if (device == NULL) {
@@ -805,7 +830,7 @@
 			{
 				session->session_id,
 				bulk_buf->handle,
-				(uint32_t)bulk_buf->phys_addr_wsm_l2,
+				0,
 				(uint32_t)(bulk_buf->virt_addr) & 0xFFF,
 				bulk_buf->len
 			}
@@ -819,8 +844,8 @@
 		/* Read command response */
 		struct mc_drv_response_header_t rsp_header;
 		int len = connection_read_datablock(dev_con,
-						    &rsp_header,
-						    sizeof(rsp_header));
+							&rsp_header,
+							sizeof(rsp_header));
 		if (len != sizeof(rsp_header)) {
 			MCDRV_DBG_ERROR(mc_kapi,
 					"CMD_MAP_BULK_BUF readRsp failed %d",
diff --git a/drivers/gud/mobicore_kernelapi/common.h b/drivers/gud/MobiCoreKernelApi/common.h
similarity index 100%
rename from drivers/gud/mobicore_kernelapi/common.h
rename to drivers/gud/MobiCoreKernelApi/common.h
diff --git a/drivers/gud/mobicore_kernelapi/connection.c b/drivers/gud/MobiCoreKernelApi/connection.c
similarity index 96%
rename from drivers/gud/mobicore_kernelapi/connection.c
rename to drivers/gud/MobiCoreKernelApi/connection.c
index 03288a0..0372b82 100644
--- a/drivers/gud/mobicore_kernelapi/connection.c
+++ b/drivers/gud/MobiCoreKernelApi/connection.c
@@ -28,6 +28,10 @@
 	struct connection *conn;
 
 	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
+	if (conn == NULL) {
+		MCDRV_DBG_ERROR(mc_kapi, "Allocation failure");
+		return NULL;
+	}
 	conn->sequence_magic = mcapi_unique_id();
 	mutex_init(&conn->data_lock);
 	sema_init(&conn->data_available_sem, SEM_NO_DATA_AVAILABLE);
@@ -36,14 +40,6 @@
 	return conn;
 }
 
-struct connection *connection_create(int socket_descriptor, pid_t dest)
-{
-	struct connection *conn = connection_new();
-
-	conn->peer_pid = dest;
-	return conn;
-}
-
 void connection_cleanup(struct connection *conn)
 {
 	if (!conn)
diff --git a/drivers/gud/mobicore_kernelapi/connection.h b/drivers/gud/MobiCoreKernelApi/connection.h
similarity index 95%
rename from drivers/gud/mobicore_kernelapi/connection.h
rename to drivers/gud/MobiCoreKernelApi/connection.h
index 6c3ff00..57e783b 100644
--- a/drivers/gud/mobicore_kernelapi/connection.h
+++ b/drivers/gud/MobiCoreKernelApi/connection.h
@@ -44,7 +44,6 @@
 };
 
 struct connection *connection_new(void);
-struct connection *connection_create(int socket_descriptor, pid_t dest);
 void connection_cleanup(struct connection *conn);
 bool connection_connect(struct connection *conn, pid_t dest);
 size_t connection_read_datablock(struct connection *conn, void *buffer,
diff --git a/drivers/gud/mobicore_kernelapi/device.c b/drivers/gud/MobiCoreKernelApi/device.c
similarity index 85%
rename from drivers/gud/mobicore_kernelapi/device.c
rename to drivers/gud/MobiCoreKernelApi/device.c
index a176322..04db4c3 100644
--- a/drivers/gud/mobicore_kernelapi/device.c
+++ b/drivers/gud/MobiCoreKernelApi/device.c
@@ -18,16 +18,18 @@
 #include "device.h"
 #include "common.h"
 
-struct wsm *wsm_create(void *virt_addr, uint32_t len, uint32_t handle,
-		       void *phys_addr)
+static struct wsm *wsm_create(void *virt_addr, uint32_t len, uint32_t handle)
 {
 	struct wsm *wsm;
 
 	wsm = kzalloc(sizeof(*wsm), GFP_KERNEL);
+	if (wsm == NULL) {
+		MCDRV_DBG_ERROR(mc_kapi, "Allocation failure");
+		return NULL;
+	}
 	wsm->virt_addr = virt_addr;
 	wsm->len = len;
 	wsm->handle = handle;
-	wsm->phys_addr = phys_addr;
 	return wsm;
 }
 
@@ -37,11 +39,15 @@
 	struct mcore_device_t *dev;
 
 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	if (dev == NULL) {
+		MCDRV_DBG_ERROR(mc_kapi, "Allocation failure");
+		return NULL;
+	}
 	dev->device_id = device_id;
 	dev->connection = connection;
 
 	INIT_LIST_HEAD(&dev->session_vector);
-	INIT_LIST_HEAD(&dev->wsm_l2_vector);
+	INIT_LIST_HEAD(&dev->wsm_mmu_vector);
 
 	return dev;
 }
@@ -63,7 +69,7 @@
 	}
 
 	/* Free all allocated WSM descriptors */
-	list_for_each_safe(pos, q, &dev->wsm_l2_vector) {
+	list_for_each_safe(pos, q, &dev->wsm_mmu_vector) {
 		wsm = list_entry(pos, struct wsm, list);
 		list_del(pos);
 		kfree(wsm);
@@ -74,7 +80,7 @@
 	kfree(dev);
 }
 
-bool mcore_device_open(struct mcore_device_t *dev, const char *deviceName)
+bool mcore_device_open(struct mcore_device_t *dev, const char *device_name)
 {
 	dev->instance = mobicore_open();
 	return (dev->instance != NULL);
@@ -102,6 +108,8 @@
 	}
 	struct session *session =
 			session_create(session_id, dev->instance, connection);
+	if (session == NULL)
+		return false;
 	list_add_tail(&(session->list), &(dev->session_vector));
 	return true;
 }
@@ -154,16 +162,19 @@
 		/* Allocate shared memory */
 		void *virt_addr;
 		uint32_t handle;
-		void *phys_addr;
 		int ret = mobicore_allocate_wsm(dev->instance, len, &handle,
-						&virt_addr, &phys_addr);
+						&virt_addr);
 		if (ret != 0)
 			break;
 
-		/* Register (vaddr,paddr) with device */
-		wsm = wsm_create(virt_addr, len, handle, phys_addr);
+		/* Register (vaddr) with device */
+		wsm = wsm_create(virt_addr, len, handle);
+		if (wsm == NULL) {
+			mobicore_free_wsm(dev->instance, handle);
+			break;
+		}
 
-		list_add_tail(&(wsm->list), &(dev->wsm_l2_vector));
+		list_add_tail(&(wsm->list), &(dev->wsm_mmu_vector));
 
 	} while (0);
 
@@ -177,7 +188,7 @@
 	struct wsm *tmp;
 	struct list_head *pos;
 
-	list_for_each(pos, &dev->wsm_l2_vector) {
+	list_for_each(pos, &dev->wsm_mmu_vector) {
 		tmp = list_entry(pos, struct wsm, list);
 		if (tmp == wsm) {
 			ret = true;
@@ -205,7 +216,7 @@
 	struct wsm *wsm;
 	struct list_head *pos;
 
-	list_for_each(pos, &dev->wsm_l2_vector) {
+	list_for_each(pos, &dev->wsm_mmu_vector) {
 		wsm = list_entry(pos, struct wsm, list);
 		if (virt_addr == wsm->virt_addr)
 			return wsm;
diff --git a/drivers/gud/mobicore_kernelapi/device.h b/drivers/gud/MobiCoreKernelApi/device.h
similarity index 95%
rename from drivers/gud/mobicore_kernelapi/device.h
rename to drivers/gud/MobiCoreKernelApi/device.h
index 16626bd..c795ee8 100644
--- a/drivers/gud/mobicore_kernelapi/device.h
+++ b/drivers/gud/MobiCoreKernelApi/device.h
@@ -21,7 +21,7 @@
 struct mcore_device_t {
 	/* MobiCore Trustlet session associated with the device */
 	struct list_head	session_vector;
-	struct list_head	 wsm_l2_vector; /* WSM L2 Table  */
+	struct list_head	wsm_mmu_vector; /* WSM L2 or L3 Table  */
 
 	uint32_t		device_id;	/* Device identifier */
 	struct connection	*connection;	/* The device connection */
@@ -36,7 +36,7 @@
 void mcore_device_cleanup(struct mcore_device_t *dev);
 
 
-bool mcore_device_open(struct mcore_device_t *dev, const char *deviceName);
+bool mcore_device_open(struct mcore_device_t *dev, const char *device_name);
 void mcore_device_close(struct mcore_device_t *dev);
 bool mcore_device_has_sessions(struct mcore_device_t *dev);
 bool mcore_device_create_new_session(
diff --git a/drivers/gud/mobicore_kernelapi/include/mcinq.h b/drivers/gud/MobiCoreKernelApi/include/mcinq.h
similarity index 100%
rename from drivers/gud/mobicore_kernelapi/include/mcinq.h
rename to drivers/gud/MobiCoreKernelApi/include/mcinq.h
diff --git a/drivers/gud/mobicore_kernelapi/include/mcuuid.h b/drivers/gud/MobiCoreKernelApi/include/mcuuid.h
similarity index 100%
rename from drivers/gud/mobicore_kernelapi/include/mcuuid.h
rename to drivers/gud/MobiCoreKernelApi/include/mcuuid.h
diff --git a/drivers/gud/mobicore_kernelapi/main.c b/drivers/gud/MobiCoreKernelApi/main.c
similarity index 97%
rename from drivers/gud/mobicore_kernelapi/main.c
rename to drivers/gud/MobiCoreKernelApi/main.c
index 8943c26..5da3ef7 100644
--- a/drivers/gud/mobicore_kernelapi/main.c
+++ b/drivers/gud/MobiCoreKernelApi/main.c
@@ -150,6 +150,10 @@
 	dev_info(mc_kapi, "Mobicore API module initialized!\n");
 
 	mod_ctx = kzalloc(sizeof(struct mc_kernelapi_ctx), GFP_KERNEL);
+	if (mod_ctx == NULL) {
+		MCDRV_DBG_ERROR(mc_kapi, "Allocation failure");
+		return -ENOMEM;
+	}
 #ifdef MC_NETLINK_COMPAT_V37
 	mod_ctx->sk = netlink_kernel_create(&init_net, MC_DAEMON_NETLINK,
 					    &cfg);
diff --git a/drivers/gud/mobicore_kernelapi/public/mobicore_driver_api.h b/drivers/gud/MobiCoreKernelApi/public/mobicore_driver_api.h
similarity index 100%
rename from drivers/gud/mobicore_kernelapi/public/mobicore_driver_api.h
rename to drivers/gud/MobiCoreKernelApi/public/mobicore_driver_api.h
diff --git a/drivers/gud/mobicore_kernelapi/public/mobicore_driver_cmd.h b/drivers/gud/MobiCoreKernelApi/public/mobicore_driver_cmd.h
similarity index 99%
rename from drivers/gud/mobicore_kernelapi/public/mobicore_driver_cmd.h
rename to drivers/gud/MobiCoreKernelApi/public/mobicore_driver_cmd.h
index eaf7e6c..993d581 100644
--- a/drivers/gud/mobicore_kernelapi/public/mobicore_driver_cmd.h
+++ b/drivers/gud/MobiCoreKernelApi/public/mobicore_driver_cmd.h
@@ -171,7 +171,7 @@
 struct mc_drv_cmd_map_bulk_mem_payload_t {
 	uint32_t session_id;
 	uint32_t handle;
-	uint32_t phys_addr_l2;
+	uint32_t rfu;
 	uint32_t offset_payload;
 	uint32_t len_bulk_mem;
 };
diff --git a/drivers/gud/mobicore_kernelapi/session.c b/drivers/gud/MobiCoreKernelApi/session.c
similarity index 86%
rename from drivers/gud/mobicore_kernelapi/session.c
rename to drivers/gud/MobiCoreKernelApi/session.c
index dae2c00..2ea50e8 100644
--- a/drivers/gud/mobicore_kernelapi/session.c
+++ b/drivers/gud/MobiCoreKernelApi/session.c
@@ -14,15 +14,18 @@
 #include "session.h"
 
 struct bulk_buffer_descriptor *bulk_buffer_descriptor_create(
-	void *virt_addr, uint32_t len, uint32_t handle, void *phys_addr_wsm_l2)
+	void *virt_addr, uint32_t len, uint32_t handle)
 {
 	struct bulk_buffer_descriptor *desc;
 
 	desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+	if (desc == NULL) {
+		MCDRV_DBG_ERROR(mc_kapi, "Allocation failure");
+		return NULL;
+	}
 	desc->virt_addr = virt_addr;
 	desc->len = len;
 	desc->handle = handle;
-	desc->phys_addr_wsm_l2 = phys_addr_wsm_l2;
 
 	return desc;
 }
@@ -33,6 +36,10 @@
 	struct session *session;
 
 	session = kzalloc(sizeof(*session), GFP_KERNEL);
+	if (session == NULL) {
+		MCDRV_DBG_ERROR(mc_kapi, "Allocation failure");
+		return NULL;
+	}
 	session->session_id = session_id;
 	session->instance = instance;
 	session->notification_connection = connection;
@@ -47,19 +54,14 @@
 {
 	struct bulk_buffer_descriptor *bulk_buf_descr;
 	struct list_head *pos, *q;
-	unsigned int phys_addr_wsm_l2;
 
 	/* Unmap still mapped buffers */
 	list_for_each_safe(pos, q, &session->bulk_buffer_descriptors) {
 		bulk_buf_descr =
 			list_entry(pos, struct bulk_buffer_descriptor, list);
 
-		phys_addr_wsm_l2 =
-			(unsigned int)bulk_buf_descr->phys_addr_wsm_l2;
-
 		MCDRV_DBG_VERBOSE(mc_kapi,
-				  "Phys Addr of L2 Table = 0x%X, handle= %d",
-				  phys_addr_wsm_l2,
+				  "handle= %d",
 				  bulk_buf_descr->handle);
 
 		/* ignore any error, as we cannot do anything in this case. */
@@ -110,11 +112,10 @@
 		 * Prepare the interface structure for memory registration in
 		 * Kernel Module
 		 */
-		uint32_t l2_table_phys;
 		uint32_t handle;
 
 		int ret = mobicore_map_vmem(session->instance, buf, len,
-					    &handle, &l2_table_phys);
+					    &handle);
 
 		if (ret != 0) {
 			MCDRV_DBG_ERROR(mc_kapi,
@@ -123,15 +124,15 @@
 			break;
 		}
 
-		MCDRV_DBG_VERBOSE(mc_kapi,
-				  "Phys Addr of L2 Table = 0x%X, handle=%d",
-				  (unsigned int)l2_table_phys, handle);
+		MCDRV_DBG_VERBOSE(mc_kapi, "handle=%d", handle);
 
 		/* Create new descriptor */
 		bulk_buf_descr =
-			bulk_buffer_descriptor_create(buf, len,
-						      handle,
-						      (void *)l2_table_phys);
+			bulk_buffer_descriptor_create(buf, len, handle);
+		if (bulk_buf_descr == NULL) {
+			mobicore_unmap_vmem(session->instance, handle);
+			break;
+		}
 
 		/* Add to vector of descriptors */
 		list_add_tail(&(bulk_buf_descr->list),
@@ -165,8 +166,7 @@
 		MCDRV_DBG_ERROR(mc_kapi, "Virtual Address not found");
 		ret = false;
 	} else {
-		MCDRV_DBG_VERBOSE(mc_kapi, "WsmL2 phys=0x%X, handle=%d",
-				  (unsigned int)bulk_buf->phys_addr_wsm_l2,
+		MCDRV_DBG_VERBOSE(mc_kapi, "Wsm handle=%d",
 				  bulk_buf->handle);
 
 		/* ignore any error, as we cannot do anything */
diff --git a/drivers/gud/mobicore_kernelapi/session.h b/drivers/gud/MobiCoreKernelApi/session.h
similarity index 96%
rename from drivers/gud/mobicore_kernelapi/session.h
rename to drivers/gud/MobiCoreKernelApi/session.h
index 4a834e5..edcadcd 100644
--- a/drivers/gud/mobicore_kernelapi/session.h
+++ b/drivers/gud/MobiCoreKernelApi/session.h
@@ -19,9 +19,6 @@
 	uint32_t	len;		/* Length of the Bulk buffer */
 	uint32_t	handle;
 
-	/* The physical address of the L2 table of the Bulk buffer*/
-	void		*phys_addr_wsm_l2;
-
 	/* The list param for using the kernel lists*/
 	struct list_head list;
 };
@@ -29,8 +26,7 @@
 struct bulk_buffer_descriptor *bulk_buffer_descriptor_create(
 	void		*virt_addr,
 	uint32_t	len,
-	uint32_t	handle,
-	void		*phys_addr_wsm_l2
+	uint32_t	handle
 );
 
 /*
diff --git a/drivers/gud/mobicore_kernelapi/wsm.h b/drivers/gud/MobiCoreKernelApi/wsm.h
similarity index 74%
rename from drivers/gud/mobicore_kernelapi/wsm.h
rename to drivers/gud/MobiCoreKernelApi/wsm.h
index f8a107c..3a1767d 100644
--- a/drivers/gud/mobicore_kernelapi/wsm.h
+++ b/drivers/gud/MobiCoreKernelApi/wsm.h
@@ -17,17 +17,7 @@
 	void			*virt_addr;
 	uint32_t		len;
 	uint32_t		handle;
-	void			*phys_addr;
 	struct list_head	list;
 };
 
-struct wsm *wsm_create(
-	void			*virt_addr,
-	uint32_t		len,
-	uint32_t		handle,
-
-	/* NULL this may be unknown, so is can be omitted */
-	void			*phys_addr
-);
-
 #endif /* _MC_KAPI_WSM_H_ */
diff --git a/drivers/gud/mobicore_driver/build_tag.h b/drivers/gud/build_tag.h
similarity index 96%
rename from drivers/gud/mobicore_driver/build_tag.h
rename to drivers/gud/build_tag.h
index 4a24275..18faf5a 100644
--- a/drivers/gud/mobicore_driver/build_tag.h
+++ b/drivers/gud/build_tag.h
@@ -26,4 +26,4 @@
  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 #define MOBICORE_COMPONENT_BUILD_TAG \
-		"*** t-base-202_V001 ###"
+		"*** t-base-300-QC-8974-Android-V001 ###"
diff --git a/drivers/gud/mobicore_driver/mem.c b/drivers/gud/mobicore_driver/mem.c
deleted file mode 100644
index 33c51b6..0000000
--- a/drivers/gud/mobicore_driver/mem.c
+++ /dev/null
@@ -1,708 +0,0 @@
-/*
- * MobiCore Driver Kernel Module.
- *
- * This module is written as a Linux device driver.
- * This driver represents the command proxy on the lowest layer, from the
- * secure world to the non secure world, and vice versa.
- * This driver is located in the non secure world (Linux).
- * This driver offers IOCTL commands, for access to the secure world, and has
- * the interface from the secure world to the normal world.
- * The access to the driver is possible with a file descriptor,
- * which has to be created by the fd = open(/dev/mobicore) command.
- *
- * <-- Copyright Giesecke & Devrient GmbH 2009-2012 -->
- * <-- Copyright Trustonic Limited 2013 -->
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include "main.h"
-#include "debug.h"
-#include "mem.h"
-
-#include <linux/highmem.h>
-#include <linux/slab.h>
-#include <linux/kthread.h>
-#include <linux/pagemap.h>
-#include <linux/device.h>
-
-
-/* MobiCore memory context data */
-struct mc_mem_context mem_ctx;
-
-/* convert L2 PTE to page pointer */
-static inline struct page *l2_pte_to_page(pte_t pte)
-{
-	unsigned long phys_page_addr = ((unsigned long)pte & PAGE_MASK);
-	unsigned int pfn = phys_page_addr >> PAGE_SHIFT;
-	struct page *page = pfn_to_page(pfn);
-	return page;
-}
-
-/* convert page pointer to L2 PTE */
-static inline pte_t page_to_l2_pte(struct page *page)
-{
-	unsigned long pfn = page_to_pfn(page);
-	unsigned long phys_addr = (pfn << PAGE_SHIFT);
-	pte_t pte = (pte_t)(phys_addr & PAGE_MASK);
-	return pte;
-}
-
-static inline void release_page(struct page *page)
-{
-	SetPageDirty(page);
-
-	page_cache_release(page);
-}
-
-static int lock_pages(struct task_struct *task, void *virt_start_page_addr,
-	int pages_no, struct page **pages)
-{
-	int locked_pages;
-
-	/* lock user pages, must hold the mmap_sem to do this. */
-	down_read(&(task->mm->mmap_sem));
-	locked_pages = get_user_pages(
-				task,
-				task->mm,
-				(unsigned long)virt_start_page_addr,
-				pages_no,
-				1, /* write access */
-				0,
-				pages,
-				NULL);
-	up_read(&(task->mm->mmap_sem));
-
-	/* check if we could lock all pages. */
-	if (locked_pages != pages_no) {
-		MCDRV_DBG_ERROR(mcd, "get_user_pages() failed, locked_pages=%d",
-				locked_pages);
-		if (locked_pages > 0) {
-			/* release all locked pages. */
-			release_pages(pages, locked_pages, 0);
-		}
-		return -ENOMEM;
-	}
-
-	return 0;
-}
-
-/* Get kernel pointer to shared L2 table given a per-process reference */
-struct l2table *get_l2_table_kernel_virt(struct mc_l2_table *table)
-{
-	if (WARN(!table, "Invalid L2 table"))
-		return NULL;
-
-	if (WARN(!table->set, "Invalid L2 table set"))
-		return NULL;
-
-	if (WARN(!table->set->kernel_virt, "Invalid L2 pointer"))
-		return NULL;
-
-	return &(table->set->kernel_virt->table[table->idx]);
-}
-
-/* Get physical address of a shared L2 table given a per-process reference */
-struct l2table *get_l2_table_phys(struct mc_l2_table *table)
-{
-	if (WARN(!table, "Invalid L2 table"))
-		return NULL;
-	if (WARN(!table->set, "Invalid L2 table set"))
-		return NULL;
-	if (WARN(!table->set->kernel_virt, "Invalid L2 phys pointer"))
-		return NULL;
-
-	return &(table->set->phys->table[table->idx]);
-}
-
-static inline int in_use(struct mc_l2_table *table)
-{
-	return atomic_read(&table->usage) > 0;
-}
-
-/*
- * Search the list of used l2 tables and return the one with the handle.
- * Assumes the table_lock is taken.
- */
-struct mc_l2_table *find_l2_table(unsigned int handle)
-{
-	struct mc_l2_table *table;
-
-	list_for_each_entry(table, &mem_ctx.l2_tables, list) {
-		if (table->handle == handle)
-			return table;
-	}
-	return NULL;
-}
-
-/*
- * Allocate a new l2 table store plus L2_TABLES_PER_PAGE in the l2 free tables
- * list. Assumes the table_lock is already taken by the caller above.
- */
-static int alloc_table_store(void)
-{
-	unsigned long store;
-	struct mc_l2_tables_set *l2table_set;
-	struct mc_l2_table *l2table, *l2table2;
-	struct page *page;
-	int ret = 0, i;
-	/* temp list for holding the l2 tables */
-	LIST_HEAD(temp);
-
-	store = get_zeroed_page(GFP_KERNEL);
-	if (!store)
-		return -ENOMEM;
-
-	/*
-	 * Actually, locking is not necessary, because kernel
-	 * memory is not supposed to get swapped out. But we
-	 * play safe....
-	 */
-	page = virt_to_page(store);
-	SetPageReserved(page);
-
-	/* add all the descriptors to the free descriptors list */
-	l2table_set = kmalloc(sizeof(*l2table_set), GFP_KERNEL | __GFP_ZERO);
-	if (l2table_set == NULL) {
-		ret = -ENOMEM;
-		goto free_store;
-	}
-	/* initialize */
-	l2table_set->kernel_virt = (void *)store;
-	l2table_set->page = page;
-	l2table_set->phys = (void *)virt_to_phys((void *)store);
-	/* the set is not yet used */
-	atomic_set(&l2table_set->used_tables, 0);
-
-	/* init add to list. */
-	INIT_LIST_HEAD(&(l2table_set->list));
-	list_add(&l2table_set->list, &mem_ctx.l2_tables_sets);
-
-	for (i = 0; i < L2_TABLES_PER_PAGE; i++) {
-		/* allocate a WSM L2 descriptor */
-		l2table  = kmalloc(sizeof(*l2table), GFP_KERNEL | __GFP_ZERO);
-		if (l2table == NULL) {
-			ret = -ENOMEM;
-			MCDRV_DBG_ERROR(mcd, "out of memory\n");
-			/* Free the full temp list and the store in this case */
-			goto free_temp_list;
-		}
-
-		/* set set reference */
-		l2table->set = l2table_set;
-		l2table->idx = i;
-		l2table->virt = get_l2_table_kernel_virt(l2table);
-		l2table->phys = (unsigned long)get_l2_table_phys(l2table);
-		atomic_set(&l2table->usage, 0);
-
-		/* add to temp list. */
-		INIT_LIST_HEAD(&l2table->list);
-		list_add_tail(&l2table->list, &temp);
-	}
-
-	/*
-	 * If everything went ok then merge the temp list with the global
-	 * free list
-	 */
-	list_splice_tail(&temp, &mem_ctx.free_l2_tables);
-	return 0;
-free_temp_list:
-	list_for_each_entry_safe(l2table, l2table2, &temp, list) {
-		kfree(l2table);
-	}
-
-	list_del(&l2table_set->list);
-
-free_store:
-	free_page(store);
-	return ret;
-
-}
-/*
- * Get a l2 table from the free tables list or allocate a new one and
- * initialize it. Assumes the table_lock is already taken.
- */
-static struct mc_l2_table *alloc_l2_table(struct mc_instance *instance)
-{
-	int ret = 0;
-	struct mc_l2_table *table = NULL;
-
-	if (list_empty(&mem_ctx.free_l2_tables)) {
-		ret = alloc_table_store();
-		if (ret) {
-			MCDRV_DBG_ERROR(mcd, "Failed to allocate new store!");
-			return ERR_PTR(-ENOMEM);
-		}
-		/* if it's still empty something wrong has happened */
-		if (list_empty(&mem_ctx.free_l2_tables)) {
-			MCDRV_DBG_ERROR(mcd,
-					"Free list not updated correctly!");
-			return ERR_PTR(-EFAULT);
-		}
-	}
-
-	/* get a WSM L2 descriptor */
-	table  = list_first_entry(&mem_ctx.free_l2_tables,
-		struct mc_l2_table, list);
-	if (table == NULL) {
-		MCDRV_DBG_ERROR(mcd, "out of memory\n");
-		return ERR_PTR(-ENOMEM);
-	}
-	/* Move it to the used l2 tables list */
-	list_move_tail(&table->list, &mem_ctx.l2_tables);
-
-	table->handle = get_unique_id();
-	table->owner = instance;
-
-	atomic_inc(&table->set->used_tables);
-	atomic_inc(&table->usage);
-
-	MCDRV_DBG_VERBOSE(mcd,
-			  "chunkPhys=%p,idx=%d", table->set->phys, table->idx);
-
-	return table;
-}
-
-/*
- * Frees the object associated with a l2 table. Initially the object is moved
- * to the free tables list, but if all the 4 lists of the store are free
- * then the store is also released.
- * Assumes the table_lock is already taken.
- */
-static void free_l2_table(struct mc_l2_table *table)
-{
-	struct mc_l2_tables_set *l2table_set;
-
-	if (WARN(!table, "Invalid table"))
-		return;
-
-	l2table_set = table->set;
-	if (WARN(!l2table_set, "Invalid table set"))
-		return;
-
-	list_move_tail(&table->list, &mem_ctx.free_l2_tables);
-
-	/* if nobody uses this set, we can release it. */
-	if (atomic_dec_and_test(&l2table_set->used_tables)) {
-		struct mc_l2_table *tmp;
-
-		/* remove from list */
-		list_del(&l2table_set->list);
-		/*
-		 * All the l2 tables are in the free list for this set
-		 * so we can just remove them from there
-		 */
-		list_for_each_entry_safe(table, tmp, &mem_ctx.free_l2_tables,
-					 list) {
-			if (table->set == l2table_set) {
-				list_del(&table->list);
-				kfree(table);
-			}
-		} /* end while */
-
-		/*
-		 * We shouldn't recover from this since it was some data
-		 * corruption before
-		 */
-		BUG_ON(!l2table_set->page);
-		ClearPageReserved(l2table_set->page);
-
-		BUG_ON(!l2table_set->kernel_virt);
-		free_page((unsigned long)l2table_set->kernel_virt);
-
-		kfree(l2table_set);
-	}
-}
-
-/*
- * Create a L2 table in a WSM container that has been allocates previously.
- * Assumes the table lock is already taken or there is no need to take like
- * when first creating the l2 table the full list is locked.
- *
- * @task	pointer to task owning WSM
- * @wsm_buffer	user space WSM start
- * @wsm_len	WSM length
- * @table	Pointer to L2 table details
- */
-static int map_buffer(struct task_struct *task, void *wsm_buffer,
-		      unsigned int wsm_len, struct mc_l2_table *table)
-{
-	int		ret = 0;
-	unsigned int	i, nr_of_pages;
-	/* start address of the 4 KiB page of wsm_buffer */
-	void		*virt_addr_page;
-	struct page	*page;
-	struct l2table	*l2table;
-	struct page	**l2table_as_array_of_pointers_to_page;
-	/* page offset in wsm buffer */
-	unsigned int offset;
-
-	if (WARN(!wsm_buffer, "Invalid WSM buffer pointer"))
-		return -EINVAL;
-
-	if (WARN(wsm_len == 0, "Invalid WSM buffer length"))
-		return -EINVAL;
-
-	if (WARN(!table, "Invalid mapping table for WSM"))
-		return -EINVAL;
-
-	/* no size > 1Mib supported */
-	if (wsm_len > SZ_1M) {
-		MCDRV_DBG_ERROR(mcd, "size > 1 MiB\n");
-		return -EINVAL;
-	}
-
-	MCDRV_DBG_VERBOSE(mcd, "WSM addr=0x%p, len=0x%08x\n", wsm_buffer,
-			  wsm_len);
-
-
-	/* calculate page usage */
-	virt_addr_page = (void *)(((unsigned long)(wsm_buffer)) & PAGE_MASK);
-	offset = (unsigned int)	(((unsigned long)(wsm_buffer)) & (~PAGE_MASK));
-	nr_of_pages  = PAGE_ALIGN(offset + wsm_len) / PAGE_SIZE;
-
-	MCDRV_DBG_VERBOSE(mcd, "virt addr page start=0x%p, pages=%d\n",
-			  virt_addr_page, nr_of_pages);
-
-	/* L2 table can hold max 1MiB in 256 pages. */
-	if ((nr_of_pages * PAGE_SIZE) > SZ_1M) {
-		MCDRV_DBG_ERROR(mcd, "WSM paged exceed 1 MiB\n");
-		return -EINVAL;
-	}
-
-	l2table = table->virt;
-	/*
-	 * We use the memory for the L2 table to hold the pointer
-	 * and convert them later. This works, as everything comes
-	 * down to a 32 bit value.
-	 */
-	l2table_as_array_of_pointers_to_page = (struct page **)l2table;
-
-	/* Request comes from user space */
-	if (task != NULL && !is_vmalloc_addr(wsm_buffer)) {
-		/*
-		 * lock user page in memory, so they do not get swapped
-		 * out.
-		 * REV axh: Kernel 2.6.27 added a new get_user_pages_fast()
-		 * function, maybe it is called fast_gup() in some versions.
-		 * handle user process doing a fork().
-		 * Child should not get things.
-		 * http://osdir.com/ml/linux-media/2009-07/msg00813.html
-		 * http://lwn.net/Articles/275808/
-		 */
-		ret = lock_pages(task, virt_addr_page, nr_of_pages,
-				 l2table_as_array_of_pointers_to_page);
-		if (ret != 0) {
-			MCDRV_DBG_ERROR(mcd, "lock_user_pages() failed\n");
-			return ret;
-		}
-	}
-	/* Request comes from kernel space(cont buffer) */
-	else if (task == NULL && !is_vmalloc_addr(wsm_buffer)) {
-		void *uaddr = wsm_buffer;
-		for (i = 0; i < nr_of_pages; i++) {
-			page = virt_to_page(uaddr);
-			if (!page) {
-				MCDRV_DBG_ERROR(mcd, "failed to map address");
-				return -EINVAL;
-			}
-			get_page(page);
-			l2table_as_array_of_pointers_to_page[i] = page;
-			uaddr += PAGE_SIZE;
-		}
-	}
-	/* Request comes from kernel space(vmalloc buffer) */
-	else {
-		void *uaddr = wsm_buffer;
-		for (i = 0; i < nr_of_pages; i++) {
-			page = vmalloc_to_page(uaddr);
-			if (!page) {
-				MCDRV_DBG_ERROR(mcd, "failed to map address");
-				return -EINVAL;
-			}
-			get_page(page);
-			l2table_as_array_of_pointers_to_page[i] = page;
-			uaddr += PAGE_SIZE;
-		}
-	}
-
-	table->pages = nr_of_pages;
-
-	/*
-	 * create L2 Table entries.
-	 * used_l2table->table contains a list of page pointers here.
-	 * For a proper cleanup we have to ensure that the following
-	 * code either works and used_l2table contains a valid L2 table
-	 * - or fails and used_l2table->table contains the list of page
-	 * pointers.
-	 * Any mixed contents will make cleanup difficult.
-	 */
-	for (i = 0; i < nr_of_pages; i++) {
-		pte_t pte;
-		page = l2table_as_array_of_pointers_to_page[i];
-
-		/*
-		 * create L2 table entry, see ARM MMU docu for details
-		 * about flags stored in the lowest 12 bits.
-		 * As a side reference, the Article
-		 * "ARM's multiply-mapped memory mess"
-		 * found in the collection at
-		 * http://lwn.net/Articles/409032/
-		 * is also worth reading.
-		 */
-		pte = page_to_l2_pte(page)
-				| PTE_EXT_AP1 | PTE_EXT_AP0
-				| PTE_CACHEABLE | PTE_BUFFERABLE
-				| PTE_TYPE_SMALL | PTE_TYPE_EXT | PTE_EXT_NG;
-		/*
-		 * Linux uses different mappings for SMP systems(the
-		 * sharing flag is set for the pte. In order not to
-		 * confuse things too much in Mobicore make sure the
-		 * shared buffers have the same flags.
-		 * This should also be done in SWD side
-		 */
-#ifdef CONFIG_SMP
-		pte |= PTE_EXT_SHARED | PTE_EXT_TEX(1);
-#endif
-
-		l2table->table_entries[i] = pte;
-		MCDRV_DBG_VERBOSE(mcd, "L2 entry %d:  0x%08x\n", i,
-				  (unsigned int)(pte));
-	}
-
-	/* ensure rest of table is empty */
-	while (i < 255)
-		l2table->table_entries[i++] = (pte_t)0;
-
-
-	return ret;
-}
-
-/*
- * Remove a L2 table in a WSM container. Afterwards the container may be
- * released. Assumes the table_lock and the lock is taken.
- */
-static void unmap_buffers(struct mc_l2_table *table)
-{
-	struct l2table *l2table;
-	int i;
-
-	if (WARN_ON(!table))
-		return;
-
-	/* found the table, now release the resources. */
-	MCDRV_DBG_VERBOSE(mcd, "clear L2 table, phys_base=%p, nr_of_pages=%d\n",
-			  (void *)table->phys, table->pages);
-
-	l2table = table->virt;
-
-	/* release all locked user space pages */
-	for (i = 0; i < table->pages; i++) {
-		/* convert physical entries from L2 table to page pointers */
-		pte_t pte = l2table->table_entries[i];
-		struct page *page = l2_pte_to_page(pte);
-		release_page(page);
-	}
-
-	/* remember that all pages have been freed */
-	table->pages = 0;
-}
-
-/* Delete a used l2 table. Assumes the table_lock and the lock is taken */
-static void unmap_l2_table(struct mc_l2_table *table)
-{
-	/* Check if it's not locked by other processes too! */
-	if (!atomic_dec_and_test(&table->usage))
-		return;
-
-	/* release if Nwd and Swd/MC do no longer use it. */
-	unmap_buffers(table);
-	free_l2_table(table);
-}
-
-int mc_free_l2_table(struct mc_instance *instance, uint32_t handle)
-{
-	struct mc_l2_table *table;
-	int ret = 0;
-
-	if (WARN(!instance, "No instance data available"))
-		return -EFAULT;
-
-	mutex_lock(&mem_ctx.table_lock);
-	table = find_l2_table(handle);
-
-	if (table == NULL) {
-		MCDRV_DBG_VERBOSE(mcd, "entry not found");
-		ret = -EINVAL;
-		goto err_unlock;
-	}
-	if (instance != table->owner && !is_daemon(instance)) {
-		MCDRV_DBG_ERROR(mcd, "instance does no own it");
-		ret = -EPERM;
-		goto err_unlock;
-	}
-	/* free table (if no further locks exist) */
-	unmap_l2_table(table);
-err_unlock:
-	mutex_unlock(&mem_ctx.table_lock);
-
-	return ret;
-}
-
-int mc_lock_l2_table(struct mc_instance *instance, uint32_t handle)
-{
-	int ret = 0;
-	struct mc_l2_table *table = NULL;
-
-	if (WARN(!instance, "No instance data available"))
-		return -EFAULT;
-
-	mutex_lock(&mem_ctx.table_lock);
-	table = find_l2_table(handle);
-
-	if (table == NULL) {
-		MCDRV_DBG_VERBOSE(mcd, "entry not found %u\n", handle);
-		ret = -EINVAL;
-		goto table_err;
-	}
-	if (instance != table->owner && !is_daemon(instance)) {
-		MCDRV_DBG_ERROR(mcd, "instance does no own it\n");
-		ret = -EPERM;
-		goto table_err;
-	}
-
-	/* lock entry */
-	atomic_inc(&table->usage);
-table_err:
-	mutex_unlock(&mem_ctx.table_lock);
-	return ret;
-}
-/*
- * Allocate L2 table and map buffer into it.
- * That is, create respective table entries.
- * Must hold Semaphore mem_ctx.wsm_l2_sem
- */
-struct mc_l2_table *mc_alloc_l2_table(struct mc_instance *instance,
-	struct task_struct *task, void *wsm_buffer, unsigned int wsm_len)
-{
-	int ret = 0;
-	struct mc_l2_table *table;
-
-	if (WARN(!instance, "No instance data available"))
-		return ERR_PTR(-EFAULT);
-
-	mutex_lock(&mem_ctx.table_lock);
-	table = alloc_l2_table(instance);
-	if (IS_ERR(table)) {
-		MCDRV_DBG_ERROR(mcd, "allocate_used_l2_table() failed\n");
-		ret = -ENOMEM;
-		goto err_no_mem;
-	}
-
-	/* create the L2 page for the WSM */
-	ret = map_buffer(task, wsm_buffer, wsm_len, table);
-
-	if (ret != 0) {
-		MCDRV_DBG_ERROR(mcd, "map_buffer() failed\n");
-		unmap_l2_table(table);
-		goto err_no_mem;
-	}
-	MCDRV_DBG(mcd, "mapped buffer %p to table with handle %d @ %lx",
-		  wsm_buffer, table->handle, table->phys);
-
-	mutex_unlock(&mem_ctx.table_lock);
-	return table;
-err_no_mem:
-	mutex_unlock(&mem_ctx.table_lock);
-	return ERR_PTR(ret);
-}
-
-uint32_t mc_find_l2_table(uint32_t handle, int32_t fd)
-{
-	uint32_t ret = 0;
-	struct mc_l2_table *table = NULL;
-
-	mutex_lock(&mem_ctx.table_lock);
-	table = find_l2_table(handle);
-
-	if (table == NULL) {
-		MCDRV_DBG_ERROR(mcd, "entry not found %u\n", handle);
-		ret = 0;
-		goto table_err;
-	}
-
-	/* It's safe here not to lock the instance since the owner of
-	 * the table will be cleared only with the table lock taken */
-	if (!mc_check_owner_fd(table->owner, fd)) {
-		MCDRV_DBG_ERROR(mcd, "not valid owner%u\n", handle);
-		ret = 0;
-		goto table_err;
-	}
-
-	ret = table->phys;
-table_err:
-	mutex_unlock(&mem_ctx.table_lock);
-	return ret;
-}
-
-void mc_clean_l2_tables(void)
-{
-	struct mc_l2_table *table, *tmp;
-
-	mutex_lock(&mem_ctx.table_lock);
-	/* Check if some WSM is orphaned. */
-	list_for_each_entry_safe(table, tmp, &mem_ctx.l2_tables, list) {
-		if (table->owner == NULL) {
-			MCDRV_DBG(mcd,
-				  "clearing orphaned WSM L2: p=%lx pages=%d\n",
-				  table->phys, table->pages);
-			unmap_l2_table(table);
-		}
-	}
-	mutex_unlock(&mem_ctx.table_lock);
-}
-
-void mc_clear_l2_tables(struct mc_instance *instance)
-{
-	struct mc_l2_table *table, *tmp;
-
-	mutex_lock(&mem_ctx.table_lock);
-	/* Check if some WSM is still in use. */
-	list_for_each_entry_safe(table, tmp, &mem_ctx.l2_tables, list) {
-		if (table->owner == instance) {
-			MCDRV_DBG(mcd, "release WSM L2: p=%lx pages=%d\n",
-				  table->phys, table->pages);
-			/* unlock app usage and free or mark it as orphan */
-			table->owner = NULL;
-			unmap_l2_table(table);
-		}
-	}
-	mutex_unlock(&mem_ctx.table_lock);
-}
-
-int mc_init_l2_tables(void)
-{
-	/* init list for WSM L2 chunks. */
-	INIT_LIST_HEAD(&mem_ctx.l2_tables_sets);
-
-	/* L2 table descriptor list. */
-	INIT_LIST_HEAD(&mem_ctx.l2_tables);
-
-	/* L2 table descriptor list. */
-	INIT_LIST_HEAD(&mem_ctx.free_l2_tables);
-
-	mutex_init(&mem_ctx.table_lock);
-
-	return 0;
-}
-
-void mc_release_l2_tables()
-{
-	struct mc_l2_table *table;
-	/* Check if some WSM is still in use. */
-	list_for_each_entry(table, &mem_ctx.l2_tables, list) {
-		WARN(1, "WSM L2 still in use: phys=%lx ,nr_of_pages=%d\n",
-		     table->phys, table->pages);
-	}
-}
diff --git a/drivers/gud/mobicore_driver/mem.h b/drivers/gud/mobicore_driver/mem.h
deleted file mode 100644
index 397a6cc..0000000
--- a/drivers/gud/mobicore_driver/mem.h
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
- * MobiCore driver module.(interface to the secure world SWD)
- *
- * <-- Copyright Giesecke & Devrient GmbH 2009-2012 -->
- * <-- Copyright Trustonic Limited 2013 -->
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _MC_MEM_H_
-#define _MC_MEM_H_
-
-#define FREE_FROM_SWD	1
-#define FREE_FROM_NWD	0
-
-#define LOCKED_BY_APP	(1U << 0)
-#define LOCKED_BY_MC	(1U << 1)
-
-/*
- * MobiCore specific page tables for world shared memory.
- * Linux uses shadow page tables, see arch/arm/include/asm/pgtable-2level.
- * MobiCore uses the default ARM format.
- *
- * Number of page table entries in one L2 table. This is ARM specific, an
- * L2 table covers 1 MiB by using 256 entry referring to 4KiB pages each.
- */
-#define MC_ARM_L2_TABLE_ENTRIES		256
-
-/* ARM level 2 (L2) table with 256 entries. Size: 1k */
-struct l2table {
-	pte_t	table_entries[MC_ARM_L2_TABLE_ENTRIES];
-};
-
-/* Number of pages for L2 tables. There are 4 table in each page. */
-#define L2_TABLES_PER_PAGE		4
-
-/* Store for four L2 tables in one 4kb page*/
-struct mc_l2_table_store {
-	struct l2table table[L2_TABLES_PER_PAGE];
-};
-
-/* Usage and maintenance information about mc_l2_table_store */
-struct mc_l2_tables_set {
-	struct list_head		list;
-	/* kernel virtual address */
-	struct mc_l2_table_store	*kernel_virt;
-	/* physical address */
-	struct mc_l2_table_store	*phys;
-	/* pointer to page struct */
-	struct page			*page;
-	/* How many pages from this set are used */
-	atomic_t			used_tables;
-};
-
-/*
- * L2 table allocated to the Daemon or a TLC describing a world shared buffer.
- * When users map a malloc()ed area into SWd, a L2 table is allocated.
- * In addition, the area of maximum 1MB virtual address space is mapped into
- * the L2 table and a handle for this table is returned to the user.
- */
-struct mc_l2_table {
-	struct list_head	list;
-	/* Table lock */
-	struct mutex		lock;
-	/* handle as communicated to user mode */
-	unsigned int		handle;
-	/* Number of references kept to this l2 table */
-	atomic_t		usage;
-	/* owner of this L2 table */
-	struct mc_instance	*owner;
-	/* set describing where our L2 table is stored */
-	struct mc_l2_tables_set	*set;
-	/* index into L2 table set */
-	unsigned int		idx;
-	/* size of buffer */
-	unsigned int		pages;
-	/* virtual address*/
-	void			*virt;
-	unsigned long		phys;
-};
-
-/* MobiCore Driver Memory context data. */
-struct mc_mem_context {
-	struct mc_instance	*daemon_inst;
-	/* Backing store for L2 tables */
-	struct list_head	l2_tables_sets;
-	/* Bookkeeping for used L2 tables */
-	struct list_head	l2_tables;
-	/* Bookkeeping for free L2 tables */
-	struct list_head	free_l2_tables;
-	/* semaphore to synchronize access to above lists */
-	struct mutex		table_lock;
-};
-
-/*
- * Allocate L2 table and map buffer into it.
- * That is, create respective table entries.
- */
-struct mc_l2_table *mc_alloc_l2_table(struct mc_instance *instance,
-	struct task_struct *task, void *wsm_buffer, unsigned int wsm_len);
-
-/* Delete all the l2 tables associated with an instance */
-void mc_clear_l2_tables(struct mc_instance *instance);
-
-/* Release all orphaned L2 tables */
-void mc_clean_l2_tables(void);
-
-/* Delete a used l2 table. */
-int mc_free_l2_table(struct mc_instance *instance, uint32_t handle);
-
-/*
- * Lock a l2 table - the daemon adds +1 to refcount of the L2 table
- * marking it in use by SWD so it doesn't get released when the TLC dies.
- */
-int mc_lock_l2_table(struct mc_instance *instance, uint32_t handle);
-/* Unlock l2 table. */
-int mc_unlock_l2_table(struct mc_instance *instance, uint32_t handle);
-/* Return the phys address of l2 table. */
-uint32_t mc_find_l2_table(uint32_t handle, int32_t fd);
-/* Release all used l2 tables to Linux memory space */
-void mc_release_l2_tables(void);
-
-/* Initialize all l2 tables structure */
-int mc_init_l2_tables(void);
-
-#endif /* _MC_MEM_H_ */
diff --git a/drivers/gud/mobicore_driver/ops.c b/drivers/gud/mobicore_driver/ops.c
deleted file mode 100644
index 9d4af72..0000000
--- a/drivers/gud/mobicore_driver/ops.c
+++ /dev/null
@@ -1,286 +0,0 @@
-/*
- * MobiCore Driver Kernel Module.
- *
- * This module is written as a Linux device driver.
- * This driver represents the command proxy on the lowest layer, from the
- * secure world to the non secure world, and vice versa.
- * This driver is located in the non secure world (Linux).
- * This driver offers IOCTL commands, for access to the secure world, and has
- * the interface from the secure world to the normal world.
- * The access to the driver is possible with a file descriptor,
- * which has to be created by the fd = open(/dev/mobicore) command.
- *
- * <-- Copyright Giesecke & Devrient GmbH 2009-2012 -->
- * <-- Copyright Trustonic Limited 2013 -->
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/kthread.h>
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/workqueue.h>
-#include <linux/cpu.h>
-
-#include "main.h"
-#include "fastcall.h"
-#include "ops.h"
-#include "mem.h"
-#include "pm.h"
-#include "debug.h"
-
-/* MobiCore context data */
-static struct mc_context *ctx;
-
-static inline long smc(union fc_generic *fc)
-{
-	/* If we request sleep yields must be filtered out as they
-	 * make no sense */
-	if (ctx->mcp)
-		if (ctx->mcp->flags.sleep_mode.SleepReq) {
-			if (fc->as_in.cmd == MC_SMC_N_YIELD)
-				return MC_FC_RET_ERR_INVALID;
-		}
-	return _smc(fc);
-}
-
-#ifdef MC_FASTCALL_WORKER_THREAD
-
-static struct task_struct *fastcall_thread;
-static DEFINE_KTHREAD_WORKER(fastcall_worker);
-
-struct fastcall_work {
-	struct kthread_work work;
-	void *data;
-};
-
-static void fastcall_work_func(struct kthread_work *work)
-{
-	struct fastcall_work *fc_work =
-		container_of(work, struct fastcall_work, work);
-
-#ifdef MC_CRYPTO_CLOCK_MANAGEMENT
-	mc_pm_clock_enable();
-#endif
-
-	smc(fc_work->data);
-
-#ifdef MC_CRYPTO_CLOCK_MANAGEMENT
-	mc_pm_clock_disable();
-#endif
-}
-
-void mc_fastcall(void *data)
-{
-	struct fastcall_work fc_work = {
-		KTHREAD_WORK_INIT(fc_work.work, fastcall_work_func),
-		.data = data,
-	};
-
-	queue_kthread_work(&fastcall_worker, &fc_work.work);
-	flush_kthread_work(&fc_work.work);
-}
-
-int mc_fastcall_init(struct mc_context *context)
-{
-	int ret = 0;
-
-	ctx = context;
-
-	fastcall_thread = kthread_create(kthread_worker_fn, &fastcall_worker,
-					 "mc_fastcall");
-	if (IS_ERR(fastcall_thread)) {
-		ret = PTR_ERR(fastcall_thread);
-		fastcall_thread = NULL;
-		MCDRV_DBG_ERROR(mcd, "cannot create fastcall wq (%d)\n", ret);
-		return ret;
-	}
-
-	/* this thread MUST run on CPU 0 */
-	kthread_bind(fastcall_thread, 0);
-	wake_up_process(fastcall_thread);
-
-	return 0;
-}
-
-void mc_fastcall_destroy(void)
-{
-	if (!IS_ERR_OR_NULL(fastcall_thread)) {
-		kthread_stop(fastcall_thread);
-		fastcall_thread = NULL;
-	}
-}
-#else
-
-struct fastcall_work_struct {
-	struct work_struct work;
-	void *data;
-};
-
-static void fastcall_work_func(struct work_struct *work)
-{
-	struct fastcall_work_struct *fc_work =
-		container_of(work, struct fastcall_work_struct, work);
-
-#ifdef MC_CRYPTO_CLOCK_MANAGEMENT
-	mc_pm_clock_enable();
-#endif
-
-	smc(fc_work->data);
-
-#ifdef MC_CRYPTO_CLOCK_MANAGEMENT
-	mc_pm_clock_disable();
-#endif
-}
-
-void mc_fastcall(void *data)
-{
-	struct fastcall_work_struct work = {
-		.data = data,
-	};
-	INIT_WORK(&work.work, fastcall_work_func);
-	schedule_work_on(0, &work.work);
-
-	flush_work(&work.work);
-}
-
-int mc_fastcall_init(struct mc_context *context)
-{
-	ctx = context;
-	return 0;
-};
-
-void mc_fastcall_destroy(void) {};
-#endif
-
-int mc_info(uint32_t ext_info_id, uint32_t *state, uint32_t *ext_info)
-{
-	int ret = 0;
-	union mc_fc_info fc_info;
-
-	MCDRV_DBG_VERBOSE(mcd, "enter\n");
-
-	memset(&fc_info, 0, sizeof(fc_info));
-	fc_info.as_in.cmd = MC_FC_INFO;
-	fc_info.as_in.ext_info_id = ext_info_id;
-
-	MCDRV_DBG(mcd, "fc_info <- cmd=0x%08x, ext_info_id=0x%08x\n",
-		  fc_info.as_in.cmd, fc_info.as_in.ext_info_id);
-
-	mc_fastcall(&(fc_info.as_generic));
-
-	MCDRV_DBG(mcd,
-		  "fc_info -> r=0x%08x ret=0x%08x state=0x%08x ext_info=0x%08x",
-		  fc_info.as_out.resp,
-		  fc_info.as_out.ret,
-		  fc_info.as_out.state,
-		  fc_info.as_out.ext_info);
-
-	ret = convert_fc_ret(fc_info.as_out.ret);
-
-	*state  = fc_info.as_out.state;
-	*ext_info = fc_info.as_out.ext_info;
-
-	MCDRV_DBG_VERBOSE(mcd, "exit with %d/0x%08X\n", ret, ret);
-
-	return ret;
-}
-
-/* Yield to MobiCore */
-int mc_yield(void)
-{
-	int ret = 0;
-	union fc_generic yield;
-
-	MCDRV_DBG_VERBOSE(mcd, "enter\n");
-
-	memset(&yield, 0, sizeof(yield));
-	yield.as_in.cmd = MC_SMC_N_YIELD;
-	mc_fastcall(&yield);
-	ret = convert_fc_ret(yield.as_out.ret);
-
-	return ret;
-}
-
-/* call common notify */
-int mc_nsiq(void)
-{
-	int ret = 0;
-	union fc_generic nsiq;
-	MCDRV_DBG_VERBOSE(mcd, "enter\n");
-
-	memset(&nsiq, 0, sizeof(nsiq));
-	nsiq.as_in.cmd = MC_SMC_N_SIQ;
-	mc_fastcall(&nsiq);
-	ret = convert_fc_ret(nsiq.as_out.ret);
-
-	return ret;
-}
-
-/* call common notify */
-int _nsiq(void)
-{
-	int ret = 0;
-	union fc_generic nsiq;
-	MCDRV_DBG_VERBOSE(mcd, "enter\n");
-
-	memset(&nsiq, 0, sizeof(nsiq));
-	nsiq.as_in.cmd = MC_SMC_N_SIQ;
-	_smc(&nsiq);
-	ret = convert_fc_ret(nsiq.as_out.ret);
-
-	return ret;
-}
-
-/* Call the INIT fastcall to setup MobiCore initialization */
-int mc_init(uint32_t base, uint32_t nq_offset, uint32_t nq_length,
-	uint32_t mcp_offset, uint32_t mcp_length)
-{
-	int ret = 0;
-	union mc_fc_init fc_init;
-
-	MCDRV_DBG_VERBOSE(mcd, "enter\n");
-
-	memset(&fc_init, 0, sizeof(fc_init));
-
-	fc_init.as_in.cmd = MC_FC_INIT;
-	/* base address of mci buffer 4KB aligned */
-	fc_init.as_in.base = base;
-	/* notification buffer start/length [16:16] [start, length] */
-	fc_init.as_in.nq_info = (nq_offset << 16) | (nq_length & 0xFFFF);
-	/* mcp buffer start/length [16:16] [start, length] */
-	fc_init.as_in.mcp_info = (mcp_offset << 16) | (mcp_length & 0xFFFF);
-
-	/*
-	 * Set KMOD notification queue to start of MCI
-	 * mciInfo was already set up in mmap
-	 */
-	MCDRV_DBG(mcd,
-		  "cmd=0x%08x, base=0x%08x,nq_info=0x%08x, mcp_info=0x%08x\n",
-		  fc_init.as_in.cmd, fc_init.as_in.base, fc_init.as_in.nq_info,
-		  fc_init.as_in.mcp_info);
-
-	mc_fastcall(&fc_init.as_generic);
-
-	MCDRV_DBG(mcd, "out cmd=0x%08x, ret=0x%08x\n", fc_init.as_out.resp,
-		  fc_init.as_out.ret);
-
-	ret = convert_fc_ret(fc_init.as_out.ret);
-
-	MCDRV_DBG_VERBOSE(mcd, "exit with %d/0x%08X\n", ret, ret);
-
-	return ret;
-}
-
-/* Return MobiCore driver version */
-uint32_t mc_get_version(void)
-{
-	MCDRV_DBG(mcd, "MobiCore driver version is %i.%i\n",
-		  MCDRVMODULEAPI_VERSION_MAJOR,
-		  MCDRVMODULEAPI_VERSION_MINOR);
-
-	return MC_VERSION(MCDRVMODULEAPI_VERSION_MAJOR,
-					MCDRVMODULEAPI_VERSION_MINOR);
-}
diff --git a/drivers/gud/setupDrivers.sh b/drivers/gud/setupDrivers.sh
new file mode 100644
index 0000000..8f877b7
--- /dev/null
+++ b/drivers/gud/setupDrivers.sh
@@ -0,0 +1,19 @@
+#!/bin/bash
+export COMP_PATH_ROOT=$(dirname $(readlink -f $BASH_SOURCE)) #set this to the absolute path of the folder containing this file
+
+# This part has to be set by the customer
+# To be set, absolute path of kernel folder
+export LINUX_PATH=
+# To be set, absolute path! CROSS_COMPILE variable needed by kernel eg /home/user/arm-2009q3/bin/arm-none-linux-gnueabi-
+export CROSS_COMPILE=
+# To be set, build mode debug or release
+export MODE=debug
+# To be set, the absolute path to the Linux Android NDK
+export NDK_PATH=
+
+# Global variables needed by build scripts
+export COMP_PATH_Logwrapper=$COMP_PATH_ROOT/Logwrapper/Out
+export COMP_PATH_MobiCore=$COMP_PATH_ROOT/MobiCore/Out
+export COMP_PATH_MobiCoreDriverMod=$COMP_PATH_ROOT/mobicore_driver/Out
+export COMP_PATH_MobiCoreDriverLib=$COMP_PATH_ROOT/daemon/Out
+export COMP_PATH_AndroidNdkLinux=$NDK_PATH
\ No newline at end of file
diff --git a/drivers/input/misc/mma8x5x.c b/drivers/input/misc/mma8x5x.c
index d708d94..a605720 100644
--- a/drivers/input/misc/mma8x5x.c
+++ b/drivers/input/misc/mma8x5x.c
@@ -26,8 +26,8 @@
 #include <linux/slab.h>
 #include <linux/delay.h>
 #include <linux/i2c.h>
-#include <linux/input-polldev.h>
 #include <linux/sensors.h>
+#include <linux/input.h>
 #include <linux/regulator/consumer.h>
 #include <linux/of_gpio.h>
 #include <linux/irq.h>
@@ -44,8 +44,6 @@
 #define POLL_INTERVAL_MAX	10000
 #define POLL_INTERVAL		100 /* msecs */
 
-/* if sensor is standby ,set POLL_STOP_TIME to slow down the poll */
-#define POLL_STOP_TIME		10000
 #define INPUT_FUZZ			32
 #define INPUT_FLAT			32
 #define INPUT_DATA_DIVIDER	16
@@ -81,6 +79,7 @@
 #define MMA_INT_ROUTING_CFG	0x01
 
 #define MMA_POWER_CFG_MASK	0xFE
+#define MMA_ODR_MASK		0x38
 
 struct sensor_regulator {
 	struct regulator *vreg;
@@ -189,7 +188,7 @@
 };
 struct mma8x5x_data {
 	struct i2c_client *client;
-	struct input_polled_dev *poll_dev;
+	struct delayed_work dwork;
 	struct input_dev *idev;
 	struct mutex data_lock;
 	struct sensors_classdev cdev;
@@ -229,7 +228,6 @@
 	{{ 0,  1,  0}, { 1,  0,	0}, {0, 0,  -1} },
 	{{ 1,  0,  0}, { 0, -1,	0}, {0, 0,  -1} },
 };
-static struct mma8x5x_data *drv_data;
 static int mma8x5x_config_regulator(struct i2c_client *client, bool on)
 {
 	int rc = 0, i;
@@ -408,7 +406,7 @@
 	if (result < 0)
 		goto out;
 
-	val = (u8)result | val;
+	val = ((u8)result & ~MMA_ODR_MASK) | val;
 	result = i2c_smbus_write_byte_data(client, MMA8X5X_CTRL_REG1,
 					   (val & MMA_POWER_CFG_MASK));
 	if (result < 0)
@@ -487,32 +485,27 @@
 
 static void mma8x5x_report_data(struct mma8x5x_data *pdata)
 {
-	struct input_polled_dev *poll_dev = pdata->poll_dev;
 	struct mma8x5x_data_axis data;
+
 	mutex_lock(&pdata->data_lock);
-	if ((pdata->active & MMA_STATE_MASK) == MMA_STANDBY) {
-		poll_dev->poll_interval = POLL_STOP_TIME;
-		/* if standby ,set as 10s to slow the poll. */
-		goto out;
-	} else {
-		if (poll_dev->poll_interval == POLL_STOP_TIME)
-			poll_dev->poll_interval = pdata->poll_delay;
-	}
 	if (mma8x5x_read_data(pdata->client, &data) != 0)
 		goto out;
 	mma8x5x_data_convert(pdata, &data);
-	input_report_abs(poll_dev->input, ABS_X, data.x);
-	input_report_abs(poll_dev->input, ABS_Y, data.y);
-	input_report_abs(poll_dev->input, ABS_Z, data.z);
-	input_sync(poll_dev->input);
+	input_report_abs(pdata->idev, ABS_X, data.x);
+	input_report_abs(pdata->idev, ABS_Y, data.y);
+	input_report_abs(pdata->idev, ABS_Z, data.z);
+	input_sync(pdata->idev);
 out:
 	mutex_unlock(&pdata->data_lock);
 }
 
-static void mma8x5x_dev_poll(struct input_polled_dev *dev)
+static void mma8x5x_dev_poll(struct work_struct *work)
 {
-	struct mma8x5x_data *pdata = (struct mma8x5x_data *)dev->private;
+	struct mma8x5x_data *pdata = container_of((struct delayed_work *)work,
+				struct mma8x5x_data, dwork);
 	mma8x5x_report_data(pdata);
+	schedule_delayed_work(&pdata->dwork,
+				msecs_to_jiffies(pdata->poll_delay));
 }
 
 static irqreturn_t mma8x5x_interrupt(int vec, void *data)
@@ -577,12 +570,18 @@
 				dev_err(&client->dev, "change device state failed!");
 				goto err_failed;
 			}
+
+			schedule_delayed_work(&pdata->dwork,
+				msecs_to_jiffies(pdata->poll_delay));
+
 			pdata->active = MMA_ACTIVED;
 			dev_dbg(&client->dev, "%s:mma enable setting active.\n",
 					__func__);
 		}
 	} else if (enable == 0) {
 		if (pdata->active == MMA_ACTIVED) {
+			cancel_delayed_work_sync(&pdata->dwork);
+
 			val = i2c_smbus_read_byte_data(client,
 					MMA8X5X_CTRL_REG1);
 			if (val < 0) {
@@ -616,7 +615,7 @@
 static ssize_t mma8x5x_enable_show(struct device *dev,
 				   struct device_attribute *attr, char *buf)
 {
-	struct mma8x5x_data *pdata = drv_data;
+	struct mma8x5x_data *pdata = dev_get_drvdata(dev);
 	struct i2c_client *client;
 	u8 val;
 	int enable;
@@ -641,7 +640,7 @@
 				    struct device_attribute *attr,
 				    const char *buf, size_t count)
 {
-	struct mma8x5x_data *pdata = drv_data;
+	struct mma8x5x_data *pdata = dev_get_drvdata(dev);
 	struct i2c_client *client;
 	int ret;
 	unsigned long enable;
@@ -663,7 +662,7 @@
 static ssize_t mma8x5x_position_show(struct device *dev,
 				   struct device_attribute *attr, char *buf)
 {
-	struct mma8x5x_data *pdata = drv_data;
+	struct mma8x5x_data *pdata = dev_get_drvdata(dev);
 	int position = 0;
 
 	if (!pdata) {
@@ -680,7 +679,7 @@
 				    struct device_attribute *attr,
 				    const char *buf, size_t count)
 {
-	struct mma8x5x_data *pdata = drv_data;
+	struct mma8x5x_data *pdata = dev_get_drvdata(dev);
 	int position;
 	int ret;
 
@@ -714,7 +713,6 @@
 	} else {
 		mutex_lock(&pdata->data_lock);
 		pdata->poll_delay = delay_ms;
-		pdata->poll_dev->poll_interval = pdata->poll_delay;
 		mutex_unlock(&pdata->data_lock);
 	}
 
@@ -724,7 +722,7 @@
 static ssize_t mma8x5x_poll_delay_show(struct device *dev,
 				   struct device_attribute *attr, char *buf)
 {
-	struct mma8x5x_data *pdata = drv_data;
+	struct mma8x5x_data *pdata = dev_get_drvdata(dev);
 
 	if (!pdata) {
 		dev_err(dev, "Invalid driver private data!");
@@ -738,7 +736,7 @@
 				    struct device_attribute *attr,
 				    const char *buf, size_t count)
 {
-	struct mma8x5x_data *pdata = drv_data;
+	struct mma8x5x_data *pdata = dev_get_drvdata(dev);
 	int delay;
 	int ret;
 
@@ -825,7 +823,6 @@
 	struct input_dev *idev;
 	struct mma8x5x_data *pdata;
 	struct i2c_adapter *adapter;
-	struct input_polled_dev *poll_dev;
 	adapter = to_i2c_adapter(client->dev.parent);
 	/* power on the device */
 	result = mma8x5x_config_regulator(client, 1);
@@ -836,7 +833,7 @@
 					 I2C_FUNC_SMBUS_BYTE |
 					 I2C_FUNC_SMBUS_BYTE_DATA);
 	if (!result)
-		goto err_out;
+		goto err_check_id;
 
 	chip_id = i2c_smbus_read_byte_data(client, MMA8X5X_WHO_AM_I);
 
@@ -846,14 +843,14 @@
 			chip_id, MMA8451_ID, MMA8452_ID, MMA8453_ID,
 			MMA8652_ID, MMA8653_ID);
 		result = -EINVAL;
-		goto err_out;
+		goto err_check_id;
 	}
 	/* set the private data */
 	pdata = kzalloc(sizeof(struct mma8x5x_data), GFP_KERNEL);
 	if (!pdata) {
 		result = -ENOMEM;
 		dev_err(&client->dev, "alloc data memory error!\n");
-		goto err_out;
+		goto err_check_id;
 	}
 
 	if (client->dev.of_node) {
@@ -867,12 +864,10 @@
 	}
 
 	/* Initialize the MMA8X5X chip */
-	drv_data = pdata;
 	pdata->client = client;
 	pdata->chip_id = chip_id;
 	pdata->mode = MODE_2G;
-	pdata->poll_delay = POLL_STOP_TIME;
-	pdata->poll_dev = NULL;
+	pdata->poll_delay = POLL_INTERVAL;
 
 	mutex_init(&pdata->data_lock);
 	i2c_set_clientdata(client, pdata);
@@ -894,29 +889,9 @@
 			if (result) {
 				dev_err(&client->dev,
 					"set_direction for irq gpio failed\n");
-				goto err_set_direction;
+				goto err_set_gpio_direction;
 			}
 		}
-		idev = input_allocate_device();
-		if (!idev) {
-			result = -ENOMEM;
-			dev_err(&client->dev, "alloc input device failed!\n");
-			goto err_alloc_poll_device;
-		}
-		input_set_drvdata(idev, pdata);
-		idev->name = ACCEL_INPUT_DEV_NAME;
-		idev->uniq = mma8x5x_id2name(pdata->chip_id);
-		idev->id.bustype = BUS_I2C;
-		idev->evbit[0] = BIT_MASK(EV_ABS);
-		input_set_abs_params(idev, ABS_X, -0x7fff, 0x7fff, 0, 0);
-		input_set_abs_params(idev, ABS_Y, -0x7fff, 0x7fff, 0, 0);
-		input_set_abs_params(idev, ABS_Z, -0x7fff, 0x7fff, 0, 0);
-		result = input_register_device(idev);
-		if (result) {
-			dev_err(&client->dev, "register input device failed!\n");
-			goto err_register_device;
-		}
-		pdata->idev = idev;
 		device_init_wakeup(&client->dev, true);
 		enable_irq_wake(client->irq);
 		result = request_threaded_irq(client->irq, NULL,
@@ -930,34 +905,29 @@
 		}
 		mma8x5x_device_int_init(client);
 	} else {
-		/* create the input poll device */
-		poll_dev = input_allocate_polled_device();
-		if (!poll_dev) {
-			result = -ENOMEM;
-			dev_err(&client->dev, "alloc poll device failed!\n");
-			goto err_alloc_poll_device;
-		}
-		pdata->poll_dev = poll_dev;
-		pdata->idev = NULL;
-		poll_dev->poll = mma8x5x_dev_poll;
-		poll_dev->poll_interval = POLL_STOP_TIME;
-		poll_dev->poll_interval_min = POLL_INTERVAL_MIN;
-		poll_dev->poll_interval_max = POLL_INTERVAL_MAX;
-		poll_dev->private = pdata;
-		idev = poll_dev->input;
-		idev->name = ACCEL_INPUT_DEV_NAME;
-		idev->uniq = mma8x5x_id2name(pdata->chip_id);
-		idev->id.bustype = BUS_I2C;
-		idev->evbit[0] = BIT_MASK(EV_ABS);
-		input_set_abs_params(idev, ABS_X, -0x7fff, 0x7fff, 0, 0);
-		input_set_abs_params(idev, ABS_Y, -0x7fff, 0x7fff, 0, 0);
-		input_set_abs_params(idev, ABS_Z, -0x7fff, 0x7fff, 0, 0);
-		result = input_register_polled_device(pdata->poll_dev);
-		if (result) {
-			dev_err(&client->dev, "register poll device failed!\n");
-			goto err_register_device;
-		}
+		INIT_DELAYED_WORK(&pdata->dwork, mma8x5x_dev_poll);
 	}
+	idev = input_allocate_device();
+	if (!idev) {
+		result = -ENOMEM;
+		dev_err(&client->dev, "alloc input device failed!\n");
+		goto err_alloc_poll_device;
+	}
+	input_set_drvdata(idev, pdata);
+	idev->name = ACCEL_INPUT_DEV_NAME;
+	idev->uniq = mma8x5x_id2name(pdata->chip_id);
+	idev->id.bustype = BUS_I2C;
+	idev->evbit[0] = BIT_MASK(EV_ABS);
+	input_set_abs_params(idev, ABS_X, -0x7fff, 0x7fff, 0, 0);
+	input_set_abs_params(idev, ABS_Y, -0x7fff, 0x7fff, 0, 0);
+	input_set_abs_params(idev, ABS_Z, -0x7fff, 0x7fff, 0, 0);
+	result = input_register_device(idev);
+	if (result) {
+		dev_err(&client->dev, "register input device failed!\n");
+		goto err_register_device;
+	}
+	pdata->idev = idev;
+
 	result = sysfs_create_group(&idev->dev.kobj, &mma8x5x_attr_group);
 	if (result) {
 		dev_err(&client->dev, "create device file failed!\n");
@@ -983,23 +953,20 @@
 err_create_class_sysfs:
 	sysfs_remove_group(&idev->dev.kobj, &mma8x5x_attr_group);
 err_create_sysfs:
-	input_unregister_polled_device(pdata->poll_dev);
+	input_unregister_device(idev);
+err_register_device:
+	input_free_device(idev);
+err_alloc_poll_device:
 err_register_irq:
 	if (pdata->use_int)
 		device_init_wakeup(&client->dev, false);
-err_register_device:
-	if (pdata->use_int)
-		input_free_device(idev);
-	else
-		input_free_polled_device(pdata->poll_dev);
-err_alloc_poll_device:
-err_set_direction:
+err_set_gpio_direction:
 	if (gpio_is_valid(pdata->int_pin) && pdata->use_int)
 		gpio_free(pdata->int_pin);
 err_request_gpio:
 err_parse_dt:
 	kfree(pdata);
-err_out:
+err_check_id:
 	mma8x5x_config_regulator(client, 0);
 err_power_on:
 	return result;
@@ -1007,14 +974,22 @@
 static int __devexit mma8x5x_remove(struct i2c_client *client)
 {
 	struct mma8x5x_data *pdata = i2c_get_clientdata(client);
-	struct input_polled_dev *poll_dev;
+	struct input_dev *idev;
+
 	mma8x5x_device_stop(client);
 	if (pdata) {
-		poll_dev = pdata->poll_dev;
-		input_unregister_polled_device(poll_dev);
-		input_free_polled_device(poll_dev);
+		idev = pdata->idev;
+		sysfs_remove_group(&idev->dev.kobj, &mma8x5x_attr_group);
+		if (pdata->use_int) {
+			device_init_wakeup(&client->dev, false);
+			if (gpio_is_valid(pdata->int_pin))
+				gpio_free(pdata->int_pin);
+		}
+		input_unregister_device(idev);
+		input_free_device(idev);
 		kfree(pdata);
 	}
+	mma8x5x_config_regulator(client, 0);
 	return 0;
 }
 
@@ -1026,8 +1001,10 @@
 
 	if (pdata->use_int && pdata->active == MMA_ACTIVED)
 		return 0;
-	if (pdata->active == MMA_ACTIVED)
+	if (pdata->active == MMA_ACTIVED) {
 		mma8x5x_device_stop(client);
+		cancel_delayed_work_sync(&pdata->dwork);
+	}
 	if (pdata->active & MMA_SHUTTEDDOWN)
 		return 0;
 	if (!mma8x5x_config_regulator(client, 0))
@@ -1058,6 +1035,8 @@
 	if (pdata->active == MMA_ACTIVED) {
 		val = i2c_smbus_read_byte_data(client, MMA8X5X_CTRL_REG1);
 		i2c_smbus_write_byte_data(client, MMA8X5X_CTRL_REG1, val|0x01);
+		schedule_delayed_work(&pdata->dwork,
+				msecs_to_jiffies(pdata->poll_delay));
 	}
 
 	return 0;
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
index 5902a99..206620c 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
@@ -1223,6 +1223,8 @@
 			msm_isp_axi_stream_enable_cfg(vfe_dev, stream_info);
 			stream_info->state = ACTIVE;
 		}
+		vfe_dev->axi_data.src_info[
+			SRC_TO_INTF(stream_info->stream_src)].frame_id = 0;
 	}
 	msm_isp_update_stream_bandwidth(vfe_dev);
 	vfe_dev->hw_info->vfe_ops.axi_ops.reload_wm(vfe_dev, wm_reload_mask);
@@ -1236,16 +1238,6 @@
 			update_camif_state(vfe_dev, camif_update);
 	}
 
-	if (vfe_dev->axi_data.src_info[VFE_RAW_0].raw_stream_count > 0) {
-		vfe_dev->axi_data.src_info[VFE_RAW_0].frame_id = 0;
-	}
-	else if (vfe_dev->axi_data.src_info[VFE_RAW_1].raw_stream_count > 0) {
-		vfe_dev->axi_data.src_info[VFE_RAW_1].frame_id = 0;
-	}
-	else if (vfe_dev->axi_data.src_info[VFE_RAW_2].raw_stream_count > 0) {
-		vfe_dev->axi_data.src_info[VFE_RAW_2].frame_id = 0;
-	}
-
 	if (wait_for_complete)
 		rc = msm_isp_axi_wait_for_cfg_done(vfe_dev, camif_update);
 
diff --git a/drivers/media/platform/msm/vidc/msm_vidc.c b/drivers/media/platform/msm/vidc/msm_vidc.c
index f0721c3..9dbecfb 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc.c
@@ -1343,7 +1343,6 @@
 			mutex_lock(&inst->lock);
 		}
 		mutex_unlock(&inst->lock);
-		msm_smem_delete_client(inst->mem_client);
 		debugfs_remove_recursive(inst->debugfs_root);
 	}
 }
@@ -1401,7 +1400,9 @@
 	for (i = 0; i < MAX_PORT_NUM; i++)
 		vb2_queue_release(&inst->bufq[i].vb2_bufq);
 
+	msm_smem_delete_client(inst->mem_client);
 	pr_info(VIDC_DBG_TAG "Closed video instance: %p\n", VIDC_INFO, inst);
 	kfree(inst);
+
 	return 0;
 }
diff --git a/drivers/media/platform/msm/vidc/venus_hfi.c b/drivers/media/platform/msm/vidc/venus_hfi.c
index 4fcd20e..d6000c9 100644
--- a/drivers/media/platform/msm/vidc/venus_hfi.c
+++ b/drivers/media/platform/msm/vidc/venus_hfi.c
@@ -1561,9 +1561,6 @@
 
 	dev->intr_status = 0;
 	INIT_LIST_HEAD(&dev->sess_head);
-	mutex_init(&dev->read_lock);
-	mutex_init(&dev->write_lock);
-	mutex_init(&dev->session_lock);
 	venus_hfi_set_registers(dev);
 
 	if (!dev->hal_client) {
@@ -3301,7 +3298,6 @@
 			__func__, device);
 		return -EINVAL;
 	}
-	mutex_init(&device->clk_pwr_lock);
 	device->clk_gating_level = VCODEC_CLK;
 	rc = venus_hfi_iommu_attach(device);
 	if (rc) {
@@ -3574,6 +3570,11 @@
 		goto error_createq_pm;
 	}
 
+	mutex_init(&hdevice->read_lock);
+	mutex_init(&hdevice->write_lock);
+	mutex_init(&hdevice->session_lock);
+	mutex_init(&hdevice->clk_pwr_lock);
+
 	if (hal_ctxt.dev_count == 0)
 		INIT_LIST_HEAD(&hal_ctxt.dev_head);
 
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 9c6bef6..6de1cde 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -2638,15 +2638,13 @@
 	struct mmc_host *host = card->host;
 	unsigned long flags;
 
+	if (req && !mq->mqrq_prev->req) {
+		mmc_rpm_hold(host, &card->dev);
 #ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
 	if (mmc_bus_needs_resume(card->host)) {
 		mmc_resume_bus(card->host);
-		mmc_blk_set_blksize(md, card);
 	}
 #endif
-
-	if (req && !mq->mqrq_prev->req) {
-		mmc_rpm_hold(host, &card->dev);
 		/* claim host only for the first request */
 		mmc_claim_host(card->host);
 		if (card->ext_csd.bkops_en)
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index c496077..1feb26b 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -983,6 +983,10 @@
  */
 void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
 {
+#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
+	if (mmc_bus_needs_resume(host))
+		mmc_resume_bus(host);
+#endif
 	__mmc_start_req(host, mrq);
 	mmc_wait_for_req_done(host, mrq);
 }
@@ -2011,9 +2015,6 @@
 		host->bus_ops->resume(host);
 	}
 
-	if (host->bus_ops->detect && !host->bus_dead)
-		host->bus_ops->detect(host);
-
 	mmc_bus_put(host);
 	printk("%s: Deferred resume completed\n", mmc_hostname(host));
 	return 0;
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 32f5220..822548e 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -1503,7 +1503,8 @@
 		mrq->cmd->error = -EIO;
 		if (mrq->data)
 			mrq->data->error = -EIO;
-		tasklet_schedule(&host->finish_tasklet);
+		mmc_request_done(host->mmc, mrq);
+		sdhci_runtime_pm_put(host);
 		return;
 	}
 
diff --git a/drivers/net/wireless/wcnss/wcnss_wlan.c b/drivers/net/wireless/wcnss/wcnss_wlan.c
index 66875f7..d2b381b 100644
--- a/drivers/net/wireless/wcnss/wcnss_wlan.c
+++ b/drivers/net/wireless/wcnss/wcnss_wlan.c
@@ -189,6 +189,7 @@
 
 /* max 20mhz channel count */
 #define WCNSS_MAX_CH_NUM			45
+#define WCNSS_MAX_PIL_RETRY			3
 
 #define VALID_VERSION(version) \
 	((strncmp(version, "INVALID", WCNSS_VERSION_LEN)) ? 1 : 0)
@@ -592,6 +593,14 @@
 	reg = readl_relaxed(reg_addr);
 	pr_info_ratelimited("%s: PRONTO_SAW2_SPM_STS %08x\n", __func__, reg);
 
+	reg_addr = penv->pronto_pll_base + PRONTO_PLL_STATUS_OFFSET;
+	reg = readl_relaxed(reg_addr);
+	pr_err("PRONTO_PLL_STATUS %08x\n", reg);
+
+	reg_addr = penv->msm_wcnss_base + PRONTO_PMU_CPU_AHB_CMD_RCGR_OFFSET;
+	reg4 = readl_relaxed(reg_addr);
+	pr_err("PMU_CPU_CMD_RCGR %08x\n", reg4);
+
 	reg_addr = penv->msm_wcnss_base + PRONTO_PMU_COM_GDSCR_OFFSET;
 	reg = readl_relaxed(reg_addr);
 	pr_info_ratelimited("%s:  PRONTO_PMU_COM_GDSCR %08x\n",
@@ -640,10 +649,6 @@
 	reg = readl_relaxed(reg_addr);
 	pr_info_ratelimited("%s: CCU_CCPU_LAST_ADDR2 %08x\n", __func__, reg);
 
-	reg_addr = penv->pronto_pll_base + PRONTO_PLL_STATUS_OFFSET;
-	reg = readl_relaxed(reg_addr);
-	pr_info_ratelimited("%s: PRONTO_PLL_STATUS %08x\n", __func__, reg);
-
 	tst_addr = penv->pronto_a2xb_base + A2XB_TSTBUS_OFFSET;
 	tst_ctrl_addr = penv->pronto_a2xb_base + A2XB_TSTBUS_CTRL_OFFSET;
 
@@ -717,10 +722,6 @@
 	reg3 = readl_relaxed(reg_addr);
 	pr_info_ratelimited("%s:  PMU_WLAN_AHB_CBCR %08x\n", __func__, reg3);
 
-	reg_addr = penv->msm_wcnss_base + PRONTO_PMU_CPU_AHB_CMD_RCGR_OFFSET;
-	reg4 = readl_relaxed(reg_addr);
-	pr_info_ratelimited("%s:  PMU_CPU_CMD_RCGR %08x\n", __func__, reg4);
-
 	if ((reg & PRONTO_PMU_WLAN_BCR_BLK_ARES) ||
 		(reg2 & PRONTO_PMU_WLAN_GDSCR_SW_COLLAPSE) ||
 		(!(reg4 & PRONTO_PMU_CPU_AHB_CMD_RCGR_ROOT_EN)) ||
@@ -2068,6 +2069,7 @@
 	unsigned long wcnss_phys_addr;
 	int size = 0;
 	struct resource *res;
+	int pil_retry = 0;
 	int has_pronto_hw = of_property_read_bool(pdev->dev.of_node,
 									"qcom,has-pronto-hw");
 
@@ -2259,12 +2261,17 @@
 		penv->fw_vbatt_state = WCNSS_CONFIG_UNSPECIFIED;
 	}
 
-	/* trigger initialization of the WCNSS */
-	penv->pil = subsystem_get(WCNSS_PIL_DEVICE);
-	if (IS_ERR(penv->pil)) {
-		dev_err(&pdev->dev, "Peripheral Loader failed on WCNSS.\n");
-		ret = PTR_ERR(penv->pil);
-		wcnss_pronto_log_debug_regs();
+	do {
+		/* trigger initialization of the WCNSS */
+		penv->pil = subsystem_get(WCNSS_PIL_DEVICE);
+		if (IS_ERR(penv->pil)) {
+			dev_err(&pdev->dev, "Peripheral Loader failed on WCNSS.\n");
+			ret = PTR_ERR(penv->pil);
+			wcnss_pronto_log_debug_regs();
+		}
+	} while (pil_retry++ < WCNSS_MAX_PIL_RETRY && IS_ERR(penv->pil));
+
+	if (pil_retry >= WCNSS_MAX_PIL_RETRY) {
 		penv->pil = NULL;
 		goto fail_pil;
 	}
diff --git a/drivers/video/msm/mdss/mdp3_ctrl.c b/drivers/video/msm/mdss/mdp3_ctrl.c
index b324130..1111aeb 100644
--- a/drivers/video/msm/mdss/mdp3_ctrl.c
+++ b/drivers/video/msm/mdss/mdp3_ctrl.c
@@ -798,7 +798,7 @@
 
 	rc = mdp3_dma->stop(mdp3_dma, mdp3_session->intf);
 	if (rc) {
-		pr_err("fail to stop the MDP3 dma\n");
+		pr_err("fail to stop the MDP3 dma %d\n", rc);
 		goto reset_error;
 	}
 
@@ -1011,7 +1011,11 @@
 	panel = mdp3_session->panel;
 	if (!mdp3_iommu_is_attached(MDP3_CLIENT_DMA_P)) {
 		pr_debug("continuous splash screen, IOMMU not attached\n");
-		mdp3_ctrl_reset(mfd);
+		rc = mdp3_ctrl_reset(mfd);
+		if (rc) {
+			pr_err("fail to reset display\n");
+			return -EINVAL;
+		}
 		reset_done = true;
 	}
 
@@ -1092,7 +1096,11 @@
 
 	if (!mdp3_iommu_is_attached(MDP3_CLIENT_DMA_P)) {
 		pr_debug("continuous splash screen, IOMMU not attached\n");
-		mdp3_ctrl_reset(mfd);
+		rc = mdp3_ctrl_reset(mfd);
+		if (rc) {
+			pr_err("fail to reset display\n");
+			return;
+		}
 	}
 
 	mutex_lock(&mdp3_session->lock);
diff --git a/drivers/video/msm/mdss/mdp3_dma.c b/drivers/video/msm/mdss/mdp3_dma.c
index 800c4b3..993a36f 100644
--- a/drivers/video/msm/mdss/mdp3_dma.c
+++ b/drivers/video/msm/mdss/mdp3_dma.c
@@ -18,7 +18,7 @@
 #include "mdp3_hwio.h"
 
 #define DMA_STOP_POLL_SLEEP_US 1000
-#define DMA_STOP_POLL_TIMEOUT_US 32000
+#define DMA_STOP_POLL_TIMEOUT_US 200000
 #define DMA_HISTO_RESET_TIMEOUT_MS 40
 #define DMA_LUT_CONFIG_MASK 0xfffffbe8
 #define DMA_CCS_CONFIG_MASK 0xfffffc17
diff --git a/drivers/video/msm/mdss/mdss_mdp_overlay.c b/drivers/video/msm/mdss/mdss_mdp_overlay.c
index ca35aaf..7420f95 100644
--- a/drivers/video/msm/mdss/mdss_mdp_overlay.c
+++ b/drivers/video/msm/mdss/mdss_mdp_overlay.c
@@ -2277,10 +2277,12 @@
 		struct mdp_overlay_list *ovlist,
 		struct mdp_overlay *overlays)
 {
+	struct mdss_mdp_pipe *right_plist[MDSS_MDP_MAX_STAGE] = { 0 };
+	struct mdss_mdp_pipe *left_plist[MDSS_MDP_MAX_STAGE] = { 0 };
 	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
 	struct mdss_mdp_pipe *pipe;
 	struct mdp_overlay *req;
-	int ret = 0;
+	int ret = 0, left_cnt = 0, right_cnt = 0;
 	int i;
 	u32 new_reqs = 0;
 
@@ -2309,8 +2311,29 @@
 		/* keep track of the new overlays to unset in case of errors */
 		if (pipe->play_cnt == 0)
 			new_reqs |= pipe->ndx;
+
+		if (pipe->flags & MDSS_MDP_RIGHT_MIXER) {
+			if (right_cnt >= MDSS_MDP_MAX_STAGE) {
+				pr_err("too many pipes on right mixer\n");
+				ret = -EINVAL;
+				goto validate_exit;
+			}
+			right_plist[right_cnt] = pipe;
+			right_cnt++;
+		} else {
+			if (left_cnt >= MDSS_MDP_MAX_STAGE) {
+				pr_err("too many pipes on left mixer\n");
+				ret = -EINVAL;
+				goto validate_exit;
+			}
+			left_plist[left_cnt] = pipe;
+			left_cnt++;
+		}
 	}
 
+	ret = mdss_mdp_perf_bw_check(mdp5_data->ctl, left_plist, left_cnt,
+			right_plist, right_cnt);
+
 validate_exit:
 	if (IS_ERR_VALUE(ret))
 		mdss_mdp_overlay_release(mfd, new_reqs);
diff --git a/drivers/video/msm/mdss/mdss_mdp_pp.c b/drivers/video/msm/mdss/mdss_mdp_pp.c
index 5c9ad9c..54ec6f8 100644
--- a/drivers/video/msm/mdss/mdss_mdp_pp.c
+++ b/drivers/video/msm/mdss/mdss_mdp_pp.c
@@ -1625,7 +1625,7 @@
  */
 int mdss_mdp_pp_resume(struct mdss_mdp_ctl *ctl, u32 dspp_num)
 {
-	u32 flags = 0, disp_num, bl;
+	u32 flags = 0, disp_num, bl, ret = 0;
 	struct pp_sts_type pp_sts;
 	struct mdss_ad_info *ad;
 	struct mdss_data_type *mdata = ctl->mdata;
@@ -1636,7 +1636,9 @@
 	disp_num = ctl->mfd->index;
 
 	if (dspp_num < mdata->nad_cfgs) {
-		ad = &mdata->ad_cfgs[dspp_num];
+		ret = mdss_mdp_get_ad(ctl->mfd, &ad);
+		if (ret)
+			return ret;
 
 		if (PP_AD_STATE_CFG & ad->state)
 			pp_ad_cfg_write(&mdata->ad_off[dspp_num], ad);
diff --git a/include/sound/apr_audio-v2.h b/include/sound/apr_audio-v2.h
index 69944a6..1c6ea04 100644
--- a/include/sound/apr_audio-v2.h
+++ b/include/sound/apr_audio-v2.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
@@ -579,6 +579,15 @@
 	/* Clients must set this field to zero.*/
 } __packed;
 
+#define ASM_PARAM_ID_AAC_STEREO_MIX_COEFF_SELECTION_FLAG_V2 (0x00010DD8)
+
+struct asm_aac_stereo_mix_coeff_selection_param_v2 {
+	struct apr_hdr          hdr;
+	u32                     param_id;
+	u32                     param_size;
+	u32                     aac_stereo_mix_coeff_flag;
+} __packed;
+
 /* Allows a client to connect the desired stream to
  * the desired AFE port through the stream router
  *
diff --git a/sound/soc/codecs/msm8x10-wcd.c b/sound/soc/codecs/msm8x10-wcd.c
index 452bbab..c73b2c8 100644
--- a/sound/soc/codecs/msm8x10-wcd.c
+++ b/sound/soc/codecs/msm8x10-wcd.c
@@ -1200,6 +1200,13 @@
 			  MSM8X10_WCD_A_CDC_TX2_VOL_CTL_GAIN,
 			  -84, 40, digital_gain),
 
+	SOC_SINGLE_TLV("ADC1 Volume", MSM8X10_WCD_A_TX_1_EN, 2,
+					19, 0, analog_gain),
+	SOC_SINGLE_TLV("ADC2 Volume", MSM8X10_WCD_A_TX_2_EN, 2,
+					19, 0, analog_gain),
+	SOC_SINGLE_TLV("ADC3 Volume", MSM8X10_WCD_A_TX_3_EN, 2,
+					19, 0, analog_gain),
+
 	SOC_SINGLE_S8_TLV("IIR1 INP1 Volume",
 			  MSM8X10_WCD_A_CDC_IIR1_GAIN_B1_CTL,
 			  -84, 40, digital_gain),
diff --git a/sound/soc/codecs/wcd9306.c b/sound/soc/codecs/wcd9306.c
index d84ba90..95f2041 100644
--- a/sound/soc/codecs/wcd9306.c
+++ b/sound/soc/codecs/wcd9306.c
@@ -43,6 +43,8 @@
 #define TAPAN_HPH_PA_SETTLE_COMP_OFF 13000
 
 #define DAPM_MICBIAS2_EXTERNAL_STANDALONE "MIC BIAS2 External Standalone"
+#define TAPAN_VALIDATE_RX_SBPORT_RANGE(port) ((port >= 16) && (port <= 20))
+#define TAPAN_CONVERT_RX_SBPORT_ID(port) (port - 16) /* RX1 port ID = 0 */
 
 #define TAPAN_VDD_CX_OPTIMAL_UA 10000
 #define TAPAN_VDD_CX_SLEEP_UA 2000
@@ -3236,6 +3238,8 @@
 }
 
 #define TAPAN_FORMATS (SNDRV_PCM_FMTBIT_S16_LE)
+#define TAPAN_FORMATS_S16_S24_LE (SNDRV_PCM_FMTBIT_S16_LE | \
+				  SNDRV_PCM_FORMAT_S24_LE)
 static int tapan_write(struct snd_soc_codec *codec, unsigned int reg,
 	unsigned int value)
 {
@@ -3643,6 +3647,68 @@
 	return 0;
 }
 
+static void tapan_set_rxsb_port_format(struct snd_pcm_hw_params *params,
+				       struct snd_soc_dai *dai)
+{
+	struct snd_soc_codec *codec = dai->codec;
+	struct tapan_priv *tapan_p = snd_soc_codec_get_drvdata(codec);
+	struct wcd9xxx_codec_dai_data *cdc_dai;
+	struct wcd9xxx_ch *ch;
+	int port;
+	u8 bit_sel;
+	u16 sb_ctl_reg, field_shift;
+
+	switch (params_format(params)) {
+	case SNDRV_PCM_FORMAT_S16_LE:
+		bit_sel = 0x2;
+		tapan_p->dai[dai->id].bit_width = 16;
+		break;
+	case SNDRV_PCM_FORMAT_S24_LE:
+		bit_sel = 0x0;
+		tapan_p->dai[dai->id].bit_width = 24;
+		break;
+	default:
+		dev_err(codec->dev, "Invalid format %x\n",
+			params_format(params));
+		return;
+	}
+
+	cdc_dai = &tapan_p->dai[dai->id];
+
+	list_for_each_entry(ch, &cdc_dai->wcd9xxx_ch_list, list) {
+		port = wcd9xxx_get_slave_port(ch->ch_num);
+
+		if (IS_ERR_VALUE(port) ||
+		    !TAPAN_VALIDATE_RX_SBPORT_RANGE(port)) {
+			dev_warn(codec->dev,
+				 "%s: invalid port ID %d returned for RX DAI\n",
+				 __func__, port);
+			return;
+		}
+
+		port = TAPAN_CONVERT_RX_SBPORT_ID(port);
+
+		if (port <= 3) {
+			sb_ctl_reg = TAPAN_A_CDC_CONN_RX_SB_B1_CTL;
+			field_shift = port << 1;
+		} else if (port <= 4) {
+			sb_ctl_reg = TAPAN_A_CDC_CONN_RX_SB_B2_CTL;
+			field_shift = (port - 4) << 1;
+		} else { /* should not happen */
+			dev_warn(codec->dev,
+				 "%s: bad port ID %d\n", __func__, port);
+			return;
+		}
+
+		dev_dbg(codec->dev, "%s: sb_ctl_reg %x field_shift %x\n"
+			"bit_sel %x\n", __func__, sb_ctl_reg, field_shift,
+			bit_sel);
+		snd_soc_update_bits(codec, sb_ctl_reg, 0x3 << field_shift,
+				    bit_sel << field_shift);
+	}
+}
+
+
 static int tapan_hw_params(struct snd_pcm_substream *substream,
 			    struct snd_pcm_hw_params *params,
 			    struct snd_soc_dai *dai)
@@ -3755,29 +3821,7 @@
 			snd_soc_update_bits(codec, TAPAN_A_CDC_CLK_I2S_CTL,
 					    0x03, (rx_fs_rate >> 0x05));
 		} else {
-			switch (params_format(params)) {
-			case SNDRV_PCM_FORMAT_S16_LE:
-				snd_soc_update_bits(codec,
-					TAPAN_A_CDC_CONN_RX_SB_B1_CTL,
-					0xFF, 0xAA);
-				snd_soc_update_bits(codec,
-					TAPAN_A_CDC_CONN_RX_SB_B2_CTL,
-					0xFF, 0x2A);
-				tapan->dai[dai->id].bit_width = 16;
-				break;
-			case SNDRV_PCM_FORMAT_S24_LE:
-				snd_soc_update_bits(codec,
-					TAPAN_A_CDC_CONN_RX_SB_B1_CTL,
-					0xFF, 0x00);
-				snd_soc_update_bits(codec,
-					TAPAN_A_CDC_CONN_RX_SB_B2_CTL,
-					0xFF, 0x00);
-				tapan->dai[dai->id].bit_width = 24;
-				break;
-			default:
-				dev_err(codec->dev, "Invalid format\n");
-				break;
-			}
+			tapan_set_rxsb_port_format(params, dai);
 			tapan->dai[dai->id].rate   = params_rate(params);
 		}
 		break;
@@ -3894,7 +3938,7 @@
 		.playback = {
 			.stream_name = "AIF1 Playback",
 			.rates = WCD9306_RATES,
-			.formats = TAPAN_FORMATS,
+			.formats = TAPAN_FORMATS_S16_S24_LE,
 			.rate_max = 192000,
 			.rate_min = 8000,
 			.channels_min = 1,
@@ -3922,7 +3966,7 @@
 		.playback = {
 			.stream_name = "AIF2 Playback",
 			.rates = WCD9306_RATES,
-			.formats = TAPAN_FORMATS,
+			.formats = TAPAN_FORMATS_S16_S24_LE,
 			.rate_min = 8000,
 			.rate_max = 192000,
 			.channels_min = 1,
@@ -3964,7 +4008,7 @@
 		.playback = {
 			.stream_name = "AIF3 Playback",
 			.rates = WCD9306_RATES,
-			.formats = TAPAN_FORMATS,
+			.formats = TAPAN_FORMATS_S16_S24_LE,
 			.rate_min = 8000,
 			.rate_max = 192000,
 			.channels_min = 1,
diff --git a/sound/soc/codecs/wcd9320.c b/sound/soc/codecs/wcd9320.c
index 5dedec8..b72590f 100644
--- a/sound/soc/codecs/wcd9320.c
+++ b/sound/soc/codecs/wcd9320.c
@@ -1652,6 +1652,21 @@
 	"DEC9", "DEC10", "RX1", "RX2", "RX3", "RX4", "RX5", "RX6", "RX7"
 };
 
+static const char * const iir_inp2_text[] = {
+	"ZERO", "DEC1", "DEC2", "DEC3", "DEC4", "DEC5", "DEC6", "DEC7", "DEC8",
+	"DEC9", "DEC10", "RX1", "RX2", "RX3", "RX4", "RX5", "RX6", "RX7"
+};
+
+static const char * const iir_inp3_text[] = {
+	"ZERO", "DEC1", "DEC2", "DEC3", "DEC4", "DEC5", "DEC6", "DEC7", "DEC8",
+	"DEC9", "DEC10", "RX1", "RX2", "RX3", "RX4", "RX5", "RX6", "RX7"
+};
+
+static const char * const iir_inp4_text[] = {
+	"ZERO", "DEC1", "DEC2", "DEC3", "DEC4", "DEC5", "DEC6", "DEC7", "DEC8",
+	"DEC9", "DEC10", "RX1", "RX2", "RX3", "RX4", "RX5", "RX6", "RX7"
+};
+
 static const struct soc_enum rx_mix1_inp1_chain_enum =
 	SOC_ENUM_SINGLE(TAIKO_A_CDC_CONN_RX1_B1_CTL, 0, 12, rx_mix1_text);
 
@@ -1800,6 +1815,24 @@
 static const struct soc_enum iir2_inp1_mux_enum =
 	SOC_ENUM_SINGLE(TAIKO_A_CDC_CONN_EQ2_B1_CTL, 0, 18, iir_inp1_text);
 
+static const struct soc_enum iir1_inp2_mux_enum =
+	SOC_ENUM_SINGLE(TAIKO_A_CDC_CONN_EQ1_B2_CTL, 0, 18, iir_inp2_text);
+
+static const struct soc_enum iir2_inp2_mux_enum =
+	SOC_ENUM_SINGLE(TAIKO_A_CDC_CONN_EQ2_B2_CTL, 0, 18, iir_inp2_text);
+
+static const struct soc_enum iir1_inp3_mux_enum =
+	SOC_ENUM_SINGLE(TAIKO_A_CDC_CONN_EQ1_B3_CTL, 0, 18, iir_inp3_text);
+
+static const struct soc_enum iir2_inp3_mux_enum =
+	SOC_ENUM_SINGLE(TAIKO_A_CDC_CONN_EQ2_B3_CTL, 0, 18, iir_inp3_text);
+
+static const struct soc_enum iir1_inp4_mux_enum =
+	SOC_ENUM_SINGLE(TAIKO_A_CDC_CONN_EQ1_B4_CTL, 0, 18, iir_inp4_text);
+
+static const struct soc_enum iir2_inp4_mux_enum =
+	SOC_ENUM_SINGLE(TAIKO_A_CDC_CONN_EQ2_B4_CTL, 0, 18, iir_inp4_text);
+
 static const struct snd_kcontrol_new rx_mix1_inp1_mux =
 	SOC_DAPM_ENUM("RX1 MIX1 INP1 Mux", rx_mix1_inp1_chain_enum);
 
@@ -2025,6 +2058,24 @@
 static const struct snd_kcontrol_new iir2_inp1_mux =
 	SOC_DAPM_ENUM("IIR2 INP1 Mux", iir2_inp1_mux_enum);
 
+static const struct snd_kcontrol_new iir1_inp2_mux =
+	SOC_DAPM_ENUM("IIR1 INP2 Mux", iir1_inp2_mux_enum);
+
+static const struct snd_kcontrol_new iir2_inp2_mux =
+	SOC_DAPM_ENUM("IIR2 INP2 Mux", iir2_inp2_mux_enum);
+
+static const struct snd_kcontrol_new iir1_inp3_mux =
+	SOC_DAPM_ENUM("IIR1 INP3 Mux", iir1_inp3_mux_enum);
+
+static const struct snd_kcontrol_new iir2_inp3_mux =
+	SOC_DAPM_ENUM("IIR2 INP3 Mux", iir2_inp3_mux_enum);
+
+static const struct snd_kcontrol_new iir1_inp4_mux =
+	SOC_DAPM_ENUM("IIR1 INP4 Mux", iir1_inp4_mux_enum);
+
+static const struct snd_kcontrol_new iir2_inp4_mux =
+	SOC_DAPM_ENUM("IIR2 INP4 Mux", iir2_inp4_mux_enum);
+
 static const struct snd_kcontrol_new anc1_mux =
 	SOC_DAPM_ENUM("ANC1 MUX Mux", anc1_mux_enum);
 
@@ -4012,6 +4063,120 @@
 	{"IIR2 INP1 MUX", "RX6", "SLIM RX6"},
 	{"IIR2 INP1 MUX", "RX7", "SLIM RX7"},
 
+	{"IIR1", NULL, "IIR1 INP2 MUX"},
+	{"IIR1 INP2 MUX", "DEC1", "DEC1 MUX"},
+	{"IIR1 INP2 MUX", "DEC2", "DEC2 MUX"},
+	{"IIR1 INP2 MUX", "DEC3", "DEC3 MUX"},
+	{"IIR1 INP2 MUX", "DEC4", "DEC4 MUX"},
+	{"IIR1 INP2 MUX", "DEC5", "DEC5 MUX"},
+	{"IIR1 INP2 MUX", "DEC6", "DEC6 MUX"},
+	{"IIR1 INP2 MUX", "DEC7", "DEC7 MUX"},
+	{"IIR1 INP2 MUX", "DEC8", "DEC8 MUX"},
+	{"IIR1 INP2 MUX", "DEC9", "DEC9 MUX"},
+	{"IIR1 INP2 MUX", "DEC10", "DEC10 MUX"},
+	{"IIR1 INP2 MUX", "RX1", "SLIM RX1"},
+	{"IIR1 INP2 MUX", "RX2", "SLIM RX2"},
+	{"IIR1 INP2 MUX", "RX3", "SLIM RX3"},
+	{"IIR1 INP2 MUX", "RX4", "SLIM RX4"},
+	{"IIR1 INP2 MUX", "RX5", "SLIM RX5"},
+	{"IIR1 INP2 MUX", "RX6", "SLIM RX6"},
+	{"IIR1 INP2 MUX", "RX7", "SLIM RX7"},
+
+	{"IIR2", NULL, "IIR2 INP2 MUX"},
+	{"IIR2 INP2 MUX", "DEC1", "DEC1 MUX"},
+	{"IIR2 INP2 MUX", "DEC2", "DEC2 MUX"},
+	{"IIR2 INP2 MUX", "DEC3", "DEC3 MUX"},
+	{"IIR2 INP2 MUX", "DEC4", "DEC4 MUX"},
+	{"IIR2 INP2 MUX", "DEC5", "DEC5 MUX"},
+	{"IIR2 INP2 MUX", "DEC6", "DEC6 MUX"},
+	{"IIR2 INP2 MUX", "DEC7", "DEC7 MUX"},
+	{"IIR2 INP2 MUX", "DEC8", "DEC8 MUX"},
+	{"IIR2 INP2 MUX", "DEC9", "DEC9 MUX"},
+	{"IIR2 INP2 MUX", "DEC10", "DEC10 MUX"},
+	{"IIR2 INP2 MUX", "RX1", "SLIM RX1"},
+	{"IIR2 INP2 MUX", "RX2", "SLIM RX2"},
+	{"IIR2 INP2 MUX", "RX3", "SLIM RX3"},
+	{"IIR2 INP2 MUX", "RX4", "SLIM RX4"},
+	{"IIR2 INP2 MUX", "RX5", "SLIM RX5"},
+	{"IIR2 INP2 MUX", "RX6", "SLIM RX6"},
+	{"IIR2 INP2 MUX", "RX7", "SLIM RX7"},
+
+	{"IIR1", NULL, "IIR1 INP3 MUX"},
+	{"IIR1 INP3 MUX", "DEC1", "DEC1 MUX"},
+	{"IIR1 INP3 MUX", "DEC2", "DEC2 MUX"},
+	{"IIR1 INP3 MUX", "DEC3", "DEC3 MUX"},
+	{"IIR1 INP3 MUX", "DEC4", "DEC4 MUX"},
+	{"IIR1 INP3 MUX", "DEC5", "DEC5 MUX"},
+	{"IIR1 INP3 MUX", "DEC6", "DEC6 MUX"},
+	{"IIR1 INP3 MUX", "DEC7", "DEC7 MUX"},
+	{"IIR1 INP3 MUX", "DEC8", "DEC8 MUX"},
+	{"IIR1 INP3 MUX", "DEC9", "DEC9 MUX"},
+	{"IIR1 INP3 MUX", "DEC10", "DEC10 MUX"},
+	{"IIR1 INP3 MUX", "RX1", "SLIM RX1"},
+	{"IIR1 INP3 MUX", "RX2", "SLIM RX2"},
+	{"IIR1 INP3 MUX", "RX3", "SLIM RX3"},
+	{"IIR1 INP3 MUX", "RX4", "SLIM RX4"},
+	{"IIR1 INP3 MUX", "RX5", "SLIM RX5"},
+	{"IIR1 INP3 MUX", "RX6", "SLIM RX6"},
+	{"IIR1 INP3 MUX", "RX7", "SLIM RX7"},
+
+	{"IIR2", NULL, "IIR2 INP3 MUX"},
+	{"IIR2 INP3 MUX", "DEC1", "DEC1 MUX"},
+	{"IIR2 INP3 MUX", "DEC2", "DEC2 MUX"},
+	{"IIR2 INP3 MUX", "DEC3", "DEC3 MUX"},
+	{"IIR2 INP3 MUX", "DEC4", "DEC4 MUX"},
+	{"IIR2 INP3 MUX", "DEC5", "DEC5 MUX"},
+	{"IIR2 INP3 MUX", "DEC6", "DEC6 MUX"},
+	{"IIR2 INP3 MUX", "DEC7", "DEC7 MUX"},
+	{"IIR2 INP3 MUX", "DEC8", "DEC8 MUX"},
+	{"IIR2 INP3 MUX", "DEC9", "DEC9 MUX"},
+	{"IIR2 INP3 MUX", "DEC10", "DEC10 MUX"},
+	{"IIR2 INP3 MUX", "RX1", "SLIM RX1"},
+	{"IIR2 INP3 MUX", "RX2", "SLIM RX2"},
+	{"IIR2 INP3 MUX", "RX3", "SLIM RX3"},
+	{"IIR2 INP3 MUX", "RX4", "SLIM RX4"},
+	{"IIR2 INP3 MUX", "RX5", "SLIM RX5"},
+	{"IIR2 INP3 MUX", "RX6", "SLIM RX6"},
+	{"IIR2 INP3 MUX", "RX7", "SLIM RX7"},
+
+	{"IIR1", NULL, "IIR1 INP4 MUX"},
+	{"IIR1 INP4 MUX", "DEC1", "DEC1 MUX"},
+	{"IIR1 INP4 MUX", "DEC2", "DEC2 MUX"},
+	{"IIR1 INP4 MUX", "DEC3", "DEC3 MUX"},
+	{"IIR1 INP4 MUX", "DEC4", "DEC4 MUX"},
+	{"IIR1 INP4 MUX", "DEC5", "DEC5 MUX"},
+	{"IIR1 INP4 MUX", "DEC6", "DEC6 MUX"},
+	{"IIR1 INP4 MUX", "DEC7", "DEC7 MUX"},
+	{"IIR1 INP4 MUX", "DEC8", "DEC8 MUX"},
+	{"IIR1 INP4 MUX", "DEC9", "DEC9 MUX"},
+	{"IIR1 INP4 MUX", "DEC10", "DEC10 MUX"},
+	{"IIR1 INP4 MUX", "RX1", "SLIM RX1"},
+	{"IIR1 INP4 MUX", "RX2", "SLIM RX2"},
+	{"IIR1 INP4 MUX", "RX3", "SLIM RX3"},
+	{"IIR1 INP4 MUX", "RX4", "SLIM RX4"},
+	{"IIR1 INP4 MUX", "RX5", "SLIM RX5"},
+	{"IIR1 INP4 MUX", "RX6", "SLIM RX6"},
+	{"IIR1 INP4 MUX", "RX7", "SLIM RX7"},
+
+	{"IIR2", NULL, "IIR2 INP4 MUX"},
+	{"IIR2 INP4 MUX", "DEC1", "DEC1 MUX"},
+	{"IIR2 INP4 MUX", "DEC2", "DEC2 MUX"},
+	{"IIR2 INP4 MUX", "DEC3", "DEC3 MUX"},
+	{"IIR2 INP4 MUX", "DEC4", "DEC4 MUX"},
+	{"IIR2 INP4 MUX", "DEC5", "DEC5 MUX"},
+	{"IIR2 INP4 MUX", "DEC6", "DEC6 MUX"},
+	{"IIR2 INP4 MUX", "DEC7", "DEC7 MUX"},
+	{"IIR2 INP4 MUX", "DEC8", "DEC8 MUX"},
+	{"IIR2 INP4 MUX", "DEC9", "DEC9 MUX"},
+	{"IIR2 INP4 MUX", "DEC10", "DEC10 MUX"},
+	{"IIR2 INP4 MUX", "RX1", "SLIM RX1"},
+	{"IIR2 INP4 MUX", "RX2", "SLIM RX2"},
+	{"IIR2 INP4 MUX", "RX3", "SLIM RX3"},
+	{"IIR2 INP4 MUX", "RX4", "SLIM RX4"},
+	{"IIR2 INP4 MUX", "RX5", "SLIM RX5"},
+	{"IIR2 INP4 MUX", "RX6", "SLIM RX6"},
+	{"IIR2 INP4 MUX", "RX7", "SLIM RX7"},
+
 	{"MIC BIAS1 Internal1", NULL, "LDO_H"},
 	{"MIC BIAS1 Internal2", NULL, "LDO_H"},
 	{"MIC BIAS1 External", NULL, "LDO_H"},
@@ -5667,12 +5832,36 @@
 		&iir1_inp1_mux,  taiko_codec_iir_mux_event,
 		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
 
+	SND_SOC_DAPM_MUX_E("IIR1 INP2 MUX", TAIKO_A_CDC_IIR1_GAIN_B2_CTL, 0, 0,
+		&iir1_inp2_mux,  taiko_codec_iir_mux_event,
+		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_MUX_E("IIR1 INP3 MUX", TAIKO_A_CDC_IIR1_GAIN_B3_CTL, 0, 0,
+		&iir1_inp3_mux,  taiko_codec_iir_mux_event,
+		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_MUX_E("IIR1 INP4 MUX", TAIKO_A_CDC_IIR1_GAIN_B4_CTL, 0, 0,
+		&iir1_inp4_mux,  taiko_codec_iir_mux_event,
+		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
 	SND_SOC_DAPM_MIXER("IIR1", TAIKO_A_CDC_CLK_SD_CTL, 0, 0, NULL, 0),
 
 	SND_SOC_DAPM_MUX_E("IIR2 INP1 MUX", TAIKO_A_CDC_IIR2_GAIN_B1_CTL, 0, 0,
 		&iir2_inp1_mux,  taiko_codec_iir_mux_event,
 		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
 
+	SND_SOC_DAPM_MUX_E("IIR2 INP2 MUX", TAIKO_A_CDC_IIR2_GAIN_B2_CTL, 0, 0,
+		&iir2_inp2_mux,  taiko_codec_iir_mux_event,
+		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_MUX_E("IIR2 INP3 MUX", TAIKO_A_CDC_IIR2_GAIN_B3_CTL, 0, 0,
+		&iir2_inp3_mux,  taiko_codec_iir_mux_event,
+		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_MUX_E("IIR2 INP4 MUX", TAIKO_A_CDC_IIR2_GAIN_B4_CTL, 0, 0,
+		&iir2_inp4_mux,  taiko_codec_iir_mux_event,
+		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
 	SND_SOC_DAPM_MIXER("IIR2", TAIKO_A_CDC_CLK_SD_CTL, 1, 0, NULL, 0),
 
 	/* AUX PGA */
diff --git a/sound/soc/msm/msm8x10.c b/sound/soc/msm/msm8x10.c
index fe44a23..89df806 100644
--- a/sound/soc/msm/msm8x10.c
+++ b/sound/soc/msm/msm8x10.c
@@ -894,6 +894,21 @@
 		.codec_name = "snd-soc-dummy",
 		.be_id = MSM_FRONTEND_DAI_QCHAT,
 	},
+	{/* hw:x,15 */
+		.name = "MSM8X10 Media9",
+		.stream_name = "MultiMedia9",
+		.cpu_dai_name   = "MultiMedia9",
+		.platform_name  = "msm-pcm-dsp.0",
+		.dynamic = 1,
+		.trigger = {SND_SOC_DPCM_TRIGGER_POST,
+			SND_SOC_DPCM_TRIGGER_POST},
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.codec_name = "snd-soc-dummy",
+		.ignore_suspend = 1,
+		/* this dainlink has playback support */
+		.ignore_pmdown_time = 1,
+		.be_id = MSM_FRONTEND_DAI_MULTIMEDIA9
+	},
 	/* Backend I2S DAI Links */
 	{
 		.name = LPASS_BE_SEC_MI2S_RX,
@@ -1039,6 +1054,19 @@
 		.be_hw_params_fixup = msm_be_hw_params_fixup,
 		.ignore_suspend = 1,
 	},
+	/* Incall Music 2 BACK END DAI Link */
+	{
+		.name = LPASS_BE_VOICE2_PLAYBACK_TX,
+		.stream_name = "Voice2 Farend Playback",
+		.cpu_dai_name = "msm-dai-q6-dev.32770",
+		.platform_name = "msm-pcm-routing",
+		.codec_name     = "msm-stub-codec.1",
+		.codec_dai_name = "msm-stub-rx",
+		.no_pcm = 1,
+		.be_id = MSM_BACKEND_DAI_VOICE2_PLAYBACK_TX,
+		.be_hw_params_fixup = msm_be_hw_params_fixup,
+		.ignore_suspend = 1,
+	},
 };
 
 struct snd_soc_card snd_soc_card_msm8x10 = {
diff --git a/sound/soc/msm/qdsp6v2/msm-dolby-dap-config.c b/sound/soc/msm/qdsp6v2/msm-dolby-dap-config.c
index 63ac5d3..161904c 100644
--- a/sound/soc/msm/qdsp6v2/msm-dolby-dap-config.c
+++ b/sound/soc/msm/qdsp6v2/msm-dolby-dap-config.c
@@ -211,6 +211,14 @@
 		 DOLBY_ENDDEP_PARAM_VMB_OFFSET},
 		{-320, -320, 144}
 	},
+	{PROXY,	6, DOLBY_ENDP_EXT_SPEAKERS,
+		{DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
+		{DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
+		 DOLBY_ENDDEP_PARAM_VMB_LENGTH},
+		{DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
+		 DOLBY_ENDDEP_PARAM_VMB_OFFSET},
+		{-320, -320, 144}
+	},
 	{FM, 2, DOLBY_ENDP_HDMI,
 		{DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
 		{DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
@@ -409,7 +417,8 @@
 	for (idx = 0; idx < NUM_DOLBY_ENDP_DEVICE; idx++) {
 		if (dolby_dap_endp_params[idx].device ==
 			dolby_dap_params_states.device) {
-			if (dolby_dap_params_states.device == AUX_DIGITAL) {
+			if (dolby_dap_params_states.device == AUX_DIGITAL ||
+			    dolby_dap_params_states.device == PROXY) {
 				if (dolby_dap_endp_params[idx].device_ch_caps ==
 					device_channels)
 					break;
diff --git a/sound/soc/msm/qdsp6v2/msm-dolby-dap-config.h b/sound/soc/msm/qdsp6v2/msm-dolby-dap-config.h
index 4544fea..14586f4 100644
--- a/sound/soc/msm/qdsp6v2/msm-dolby-dap-config.h
+++ b/sound/soc/msm/qdsp6v2/msm-dolby-dap-config.h
@@ -249,7 +249,7 @@
 #define DOLBY_AUTO_ENDDEP_IDX			(MAX_DOLBY_PARAMS+4)
 
 #define TOTAL_LENGTH_DOLBY_PARAM		745
-#define NUM_DOLBY_ENDP_DEVICE			23
+#define NUM_DOLBY_ENDP_DEVICE			24
 #define DOLBY_VIS_PARAM_HEADER_SIZE		 25
 
 #define DOLBY_INVALID_PORT_ID			-1
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
index ca6189e..8e69a2b 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
@@ -4267,7 +4267,7 @@
 				path_type,
 				bedai->sample_rate,
 				channels,
-				topology, false,
+				topology, fe_dai_perf_mode[i][session_type],
 				bits_per_sample);
 			}
 
diff --git a/sound/soc/msm/qdsp6v2/q6asm.c b/sound/soc/msm/qdsp6v2/q6asm.c
index 74b79dd..c36b53a 100644
--- a/sound/soc/msm/qdsp6v2/q6asm.c
+++ b/sound/soc/msm/qdsp6v2/q6asm.c
@@ -2505,8 +2505,35 @@
 /* Support for selecting stereo mixing coefficients for B family not done */
 int q6asm_cfg_aac_sel_mix_coef(struct audio_client *ac, uint32_t mix_coeff)
 {
-	/* To Be Done */
+	struct asm_aac_stereo_mix_coeff_selection_param_v2 aac_mix_coeff;
+	int rc = 0;
+
+	q6asm_add_hdr(ac, &aac_mix_coeff.hdr, sizeof(aac_mix_coeff), TRUE);
+	aac_mix_coeff.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
+	aac_mix_coeff.param_id =
+		ASM_PARAM_ID_AAC_STEREO_MIX_COEFF_SELECTION_FLAG_V2;
+	aac_mix_coeff.param_size =
+		sizeof(struct asm_aac_stereo_mix_coeff_selection_param_v2);
+	aac_mix_coeff.aac_stereo_mix_coeff_flag = mix_coeff;
+	pr_debug("%s, mix_coeff = %u", __func__, mix_coeff);
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &aac_mix_coeff);
+	if (rc < 0) {
+		pr_err("%s:Command opcode[0x%x]paramid[0x%x] failed\n",
+			__func__, ASM_STREAM_CMD_SET_ENCDEC_PARAM,
+			ASM_PARAM_ID_AAC_STEREO_MIX_COEFF_SELECTION_FLAG_V2);
+		goto fail_cmd;
+	}
+	rc = wait_event_timeout(ac->cmd_wait,
+		(atomic_read(&ac->cmd_state) == 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s:timeout opcode[0x%x]\n",
+			__func__, aac_mix_coeff.hdr.opcode);
+		rc = -ETIMEDOUT;
+		goto fail_cmd;
+	}
 	return 0;
+fail_cmd:
+	return rc;
 }
 
 int q6asm_enc_cfg_blk_qcelp(struct audio_client *ac, uint32_t frames_per_buf,