Merge changes Iaf22b758,I522944b8 into msm-3.4

* changes:
  msm: rotator: Add secure session flag to rotator start data
  msm_fb: display: Add DMA and Overlay blt mode address to MDP driver
diff --git a/arch/arm/boot/dts/msmcopper-iommu.dtsi b/arch/arm/boot/dts/msmcopper-iommu.dtsi
index e0ce8ac..697136a 100644
--- a/arch/arm/boot/dts/msmcopper-iommu.dtsi
+++ b/arch/arm/boot/dts/msmcopper-iommu.dtsi
@@ -17,6 +17,7 @@
 		#size-cells = <1>;
 		ranges;
 		reg = <0xfda64000 0x10000>;
+		vdd-supply = <&gdsc_jpeg>;
 
 		qcom,iommu-ctx@fda6c000 {
 			reg = <0xfda6c000 0x1000>;
@@ -44,6 +45,7 @@
 		#size-cells = <1>;
 		ranges;
 		reg = <0xfd928000 0x10000>;
+		vdd-supply = <&gdsc_mdss>;
 
 		qcom,iommu-ctx@fd930000 {
 			reg = <0xfd930000 0x1000>;
@@ -65,6 +67,7 @@
 		#size-cells = <1>;
 		ranges;
 		reg = <0xfdc84000 0x10000>;
+		vdd-supply = <&gdsc_venus>;
 
 		qcom,iommu-ctx@fdc8c000 {
 			reg = <0xfdc8c000 0x1000>;
diff --git a/arch/arm/boot/dts/msmcopper.dtsi b/arch/arm/boot/dts/msmcopper.dtsi
index 77cbbe0..79d6814 100644
--- a/arch/arm/boot/dts/msmcopper.dtsi
+++ b/arch/arm/boot/dts/msmcopper.dtsi
@@ -392,4 +392,18 @@
 	qcom,qseecom@fe806000 {
 		compatible = "qcom,qseecom";
 	};
+
+	qcom,mdss_mdp@fd900000 {
+		cell-index = <0>;
+		compatible = "qcom,mdss_mdp";
+		reg = <0xfd900000 0x22100>;
+		interrupts = <0 72 0>;
+	};
+
+	qcom,mdss_wb_panel {
+		cell-index = <1>;
+		compatible = "qcom,mdss_wb";
+		qcom,mdss_pan_res = <640 480>;
+		qcom,mdss_pan_bpp = <24>;
+	};
 };
diff --git a/arch/arm/configs/msm-copper_defconfig b/arch/arm/configs/msm-copper_defconfig
index a978c30..78ab155 100644
--- a/arch/arm/configs/msm-copper_defconfig
+++ b/arch/arm/configs/msm-copper_defconfig
@@ -155,7 +155,14 @@
 CONFIG_ION=y
 CONFIG_ION_MSM=y
 CONFIG_FB=y
-CONFIG_FB_VIRTUAL=y
+CONFIG_FB_MSM=y
+# CONFIG_FB_MSM_BACKLIGHT is not set
+CONFIG_FB_MSM_MDSS=y
+CONFIG_FB_MSM_MDSS_WRITEBACK=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+# CONFIG_LCD_CLASS_DEVICE is not set
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+# CONFIG_BACKLIGHT_GENERIC is not set
 # CONFIG_HID_SUPPORT is not set
 CONFIG_USB_GADGET=y
 CONFIG_USB_CI13XXX_MSM=y
@@ -168,8 +175,6 @@
 # CONFIG_MMC_BLOCK_BOUNCE is not set
 CONFIG_MMC_TEST=m
 CONFIG_MMC_MSM=y
-CONFIG_NEW_LEDS=y
-CONFIG_LEDS_CLASS=y
 CONFIG_SWITCH=y
 CONFIG_STAGING=y
 CONFIG_ANDROID=y
diff --git a/arch/arm/mach-msm/board-8064.c b/arch/arm/mach-msm/board-8064.c
index 1d231ef..e4958f5 100644
--- a/arch/arm/mach-msm/board-8064.c
+++ b/arch/arm/mach-msm/board-8064.c
@@ -29,6 +29,7 @@
 #include <linux/ion.h>
 #include <linux/memory.h>
 #include <linux/memblock.h>
+#include <linux/msm_thermal.h>
 #include <linux/i2c/atmel_mxt_ts.h>
 #include <linux/cyttsp-qc.h>
 #include <linux/i2c/isa1200.h>
@@ -1716,6 +1717,14 @@
 	.id = -1,
 };
 
+static struct msm_thermal_data msm_thermal_pdata = {
+	.sensor_id = 7,
+	.poll_ms = 1000,
+	.limit_temp = 60,
+	.temp_hysteresis = 10,
+	.limit_freq = 918000,
+};
+
 #define MSM_SHARED_RAM_PHYS 0x80000000
 static void __init apq8064_map_io(void)
 {
@@ -2864,6 +2873,7 @@
 static void __init apq8064_common_init(void)
 {
 	msm_tsens_early_init(&apq_tsens_pdata);
+	msm_thermal_init(&msm_thermal_pdata);
 	if (socinfo_init() < 0)
 		pr_err("socinfo_init() failed!\n");
 	BUG_ON(msm_rpm_init(&apq8064_rpm_data));
diff --git a/arch/arm/mach-msm/board-8930.c b/arch/arm/mach-msm/board-8930.c
index a45f7cb..ab9fe5e 100644
--- a/arch/arm/mach-msm/board-8930.c
+++ b/arch/arm/mach-msm/board-8930.c
@@ -42,6 +42,7 @@
 #include <linux/gpio_keys.h>
 #include <linux/memory.h>
 #include <linux/memblock.h>
+#include <linux/msm_thermal.h>
 
 #include <linux/slimbus/slimbus.h>
 #include <linux/mfd/wcd9xxx/core.h>
@@ -1981,6 +1982,14 @@
 	.id = -1,
 };
 
+static struct msm_thermal_data msm_thermal_pdata = {
+	.sensor_id = 9,
+	.poll_ms = 1000,
+	.limit_temp = 60,
+	.temp_hysteresis = 10,
+	.limit_freq = 918000,
+};
+
 #ifdef CONFIG_MSM_FAKE_BATTERY
 static struct platform_device fish_battery_device = {
 	.name = "fish_battery",
@@ -2392,6 +2401,7 @@
 		pr_err("meminfo_init() failed!\n");
 
 	msm_tsens_early_init(&msm_tsens_pdata);
+	msm_thermal_init(&msm_thermal_pdata);
 	BUG_ON(msm_rpm_init(&msm8930_rpm_data));
 	BUG_ON(msm_rpmrs_levels_init(&msm_rpmrs_data));
 
diff --git a/arch/arm/mach-msm/board-8960.c b/arch/arm/mach-msm/board-8960.c
index 22ef940..628a324 100644
--- a/arch/arm/mach-msm/board-8960.c
+++ b/arch/arm/mach-msm/board-8960.c
@@ -42,6 +42,7 @@
 #include <linux/i2c/isa1200.h>
 #include <linux/memory.h>
 #include <linux/memblock.h>
+#include <linux/msm_thermal.h>
 
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
@@ -2427,6 +2428,14 @@
 	.id = -1,
 };
 
+static struct msm_thermal_data msm_thermal_pdata = {
+	.sensor_id = 0,
+	.poll_ms = 1000,
+	.limit_temp = 60,
+	.temp_hysteresis = 10,
+	.limit_freq = 918000,
+};
+
 #ifdef CONFIG_MSM_FAKE_BATTERY
 static struct platform_device fish_battery_device = {
 	.name = "fish_battery",
@@ -3044,6 +3053,7 @@
 
 	wdog_pdata->bark_time = 15000;
 	msm_tsens_early_init(&msm_tsens_pdata);
+	msm_thermal_init(&msm_thermal_pdata);
 	BUG_ON(msm_rpm_init(&msm8960_rpm_data));
 	BUG_ON(msm_rpmrs_levels_init(&msm_rpmrs_data));
 	regulator_suppress_info_printing();
@@ -3076,6 +3086,7 @@
 static void __init msm8960_rumi3_init(void)
 {
 	msm_tsens_early_init(&msm_tsens_pdata);
+	msm_thermal_init(&msm_thermal_pdata);
 	BUG_ON(msm_rpm_init(&msm8960_rpm_data));
 	BUG_ON(msm_rpmrs_levels_init(&msm_rpmrs_data));
 	regulator_suppress_info_printing();
@@ -3108,6 +3119,7 @@
 		pr_err("meminfo_init() failed!\n");
 
 	msm_tsens_early_init(&msm_tsens_pdata);
+	msm_thermal_init(&msm_thermal_pdata);
 	BUG_ON(msm_rpm_init(&msm8960_rpm_data));
 	BUG_ON(msm_rpmrs_levels_init(&msm_rpmrs_data));
 
diff --git a/arch/arm/mach-msm/board-9615.c b/arch/arm/mach-msm/board-9615.c
index 1089d61..568de46 100644
--- a/arch/arm/mach-msm/board-9615.c
+++ b/arch/arm/mach-msm/board-9615.c
@@ -758,10 +758,11 @@
 	.vbus_power		= msm_hsusb_vbus_power,
 	.disable_reset_on_disconnect	= true,
 	.enable_lpm_on_dev_suspend	= true,
+	.core_clk_always_on_workaround = true,
 };
 
 static struct msm_hsic_peripheral_platform_data msm_hsic_peripheral_pdata = {
-	.keep_core_clk_on_suspend_workaround = true,
+	.core_clk_always_on_workaround = true,
 };
 
 #define PID_MAGIC_ID		0x71432909
diff --git a/arch/arm/mach-msm/board-copper.c b/arch/arm/mach-msm/board-copper.c
index 4dda0b7..94a73c6 100644
--- a/arch/arm/mach-msm/board-copper.c
+++ b/arch/arm/mach-msm/board-copper.c
@@ -443,6 +443,12 @@
 	CLK_DUMMY("core_clk",	NULL,	"f9966000.i2c", 0),
 	CLK_DUMMY("iface_clk",	NULL,	"f9966000.i2c", 0),
 	CLK_DUMMY("core_clk",	NULL,	"fe12f000.slim",	OFF),
+	CLK_DUMMY("core_clk", "mdp.0", NULL, 0),
+	CLK_DUMMY("core_clk_src", "mdp.0", NULL, 0),
+	CLK_DUMMY("lut_clk", "mdp.0", NULL, 0),
+	CLK_DUMMY("vsync_clk", "mdp.0", NULL, 0),
+	CLK_DUMMY("iface_clk", "mdp.0", NULL, 0),
+	CLK_DUMMY("bus_clk", "mdp.0", NULL, 0),
 };
 
 struct clock_init_data msm_dummy_clock_init_data __initdata = {
@@ -511,6 +517,7 @@
 			"msm_rng", NULL),
 	OF_DEV_AUXDATA("qcom,qseecom", 0xFE806000, \
 			"qseecom", NULL),
+	OF_DEV_AUXDATA("qcom,mdss_mdp", 0xFD900000, "mdp.0", NULL),
 	{}
 };
 
diff --git a/arch/arm/mach-msm/clock-copper.c b/arch/arm/mach-msm/clock-copper.c
index 61706dc..87a8998 100644
--- a/arch/arm/mach-msm/clock-copper.c
+++ b/arch/arm/mach-msm/clock-copper.c
@@ -4706,10 +4706,10 @@
 	CLK_LOOKUP("core_clk", mdss_esc1_clk.c, ""),
 	CLK_LOOKUP("iface_clk", mdss_hdmi_ahb_clk.c, ""),
 	CLK_LOOKUP("core_clk", mdss_hdmi_clk.c, ""),
-	CLK_LOOKUP("core_clk", mdss_mdp_clk.c, ""),
-	CLK_LOOKUP("core_clk", mdss_mdp_lut_clk.c, ""),
-	CLK_LOOKUP("core_clk", mdp_clk_src.c, ""),
-	CLK_LOOKUP("core_clk", mdss_vsync_clk.c, ""),
+	CLK_LOOKUP("core_clk", mdss_mdp_clk.c, "mdp.0"),
+	CLK_LOOKUP("lut_clk", mdss_mdp_lut_clk.c, "mdp.0"),
+	CLK_LOOKUP("core_clk_src", mdp_clk_src.c, "mdp.0"),
+	CLK_LOOKUP("vsync_clk", mdss_vsync_clk.c, "mdp.0"),
 	CLK_LOOKUP("iface_clk", camss_cci_cci_ahb_clk.c, ""),
 	CLK_LOOKUP("core_clk", camss_cci_cci_clk.c, ""),
 	CLK_LOOKUP("iface_clk", camss_csi0_ahb_clk.c, ""),
@@ -4768,9 +4768,10 @@
 	CLK_LOOKUP("iface_clk", camss_vfe_vfe_ahb_clk.c, ""),
 	CLK_LOOKUP("bus_clk", camss_vfe_vfe_axi_clk.c, ""),
 	CLK_LOOKUP("bus_clk", camss_vfe_vfe_ocmemnoc_clk.c, ""),
+	CLK_LOOKUP("iface_clk", mdss_ahb_clk.c, "mdp.0"),
 	CLK_LOOKUP("iface_clk", mdss_ahb_clk.c, "fd928000.qcom,iommu"),
 	CLK_LOOKUP("core_clk", mdss_axi_clk.c, "fd928000.qcom,iommu"),
-	CLK_LOOKUP("bus_clk", mdss_axi_clk.c, ""),
+	CLK_LOOKUP("bus_clk", mdss_axi_clk.c, "mdp.0"),
 	CLK_LOOKUP("core_clk", oxili_gfx3d_clk.c, ""),
 	CLK_LOOKUP("iface_clk", oxilicx_ahb_clk.c, ""),
 	CLK_LOOKUP("bus_clk", oxilicx_axi_clk.c, ""),
diff --git a/arch/arm/mach-msm/clock-rpm.c b/arch/arm/mach-msm/clock-rpm.c
index ab57cf8..2ec40ce 100644
--- a/arch/arm/mach-msm/clock-rpm.c
+++ b/arch/arm/mach-msm/clock-rpm.c
@@ -272,9 +272,11 @@
 	if (rc < 0)
 		return HANDOFF_DISABLED_CLK;
 
-	r->last_set_khz = iv.value;
-	r->last_set_sleep_khz = iv.value;
-	clk->rate = iv.value * 1000;
+	if (!r->branch) {
+		r->last_set_khz = iv.value;
+		r->last_set_sleep_khz = iv.value;
+		clk->rate = iv.value * 1000;
+	}
 
 	return HANDOFF_ENABLED_CLK;
 }
diff --git a/arch/arm/mach-msm/hsic_sysmon.c b/arch/arm/mach-msm/hsic_sysmon.c
index 07a9dbb..153e1b4 100644
--- a/arch/arm/mach-msm/hsic_sysmon.c
+++ b/arch/arm/mach-msm/hsic_sysmon.c
@@ -314,6 +314,8 @@
 static inline void hsic_sysmon_debugfs_cleanup(void) { }
 #endif
 
+static void hsic_sysmon_pdev_release(struct device *dev) { }
+
 static int
 hsic_sysmon_probe(struct usb_interface *ifc, const struct usb_device_id *id)
 {
@@ -371,6 +373,7 @@
 
 	hs->pdev.name = "sys_mon";
 	hs->pdev.id = SYSMON_SS_EXT_MODEM;
+	hs->pdev.dev.release = hsic_sysmon_pdev_release;
 	platform_device_register(&hs->pdev);
 
 	pr_debug("complete");
diff --git a/arch/arm/mach-msm/include/mach/camera.h b/arch/arm/mach-msm/include/mach/camera.h
index d8543f3..6d2c25a 100644
--- a/arch/arm/mach-msm/include/mach/camera.h
+++ b/arch/arm/mach-msm/include/mach/camera.h
@@ -94,6 +94,7 @@
 	VFE_MSG_V2X_CAPTURE,
 	VFE_MSG_OUTPUT_PRIMARY,
 	VFE_MSG_OUTPUT_SECONDARY,
+	VFE_MSG_OUTPUT_TERTIARY1,
 };
 
 enum vpe_resp_msg {
diff --git a/drivers/clk/clkdev.c b/drivers/clk/clkdev.c
index fdff32e..826ba9a 100644
--- a/drivers/clk/clkdev.c
+++ b/drivers/clk/clkdev.c
@@ -122,7 +122,7 @@
 }
 EXPORT_SYMBOL(clkdev_add);
 
-void __init clkdev_add_table(struct clk_lookup *cl, size_t num)
+void clkdev_add_table(struct clk_lookup *cl, size_t num)
 {
 	mutex_lock(&clocks_mutex);
 	while (num--) {
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 4991a2e..f5cb888 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -267,7 +267,7 @@
 				KGSL_IOMMU_CONTEXT_USER))
 		goto done;
 
-	if (adreno_is_a225(adreno_dev))
+	if (cpu_is_msm8960())
 		cmds += adreno_add_change_mh_phys_limit_cmds(cmds, 0xFFFFF000,
 					device->mmu.setstate_memory.gpuaddr +
 					KGSL_IOMMU_SETSTATE_NOP_OFFSET);
@@ -362,7 +362,7 @@
 		}
 	}
 
-	if (adreno_is_a225(adreno_dev))
+	if (cpu_is_msm8960())
 		cmds += adreno_add_change_mh_phys_limit_cmds(cmds,
 			reg_map_desc[num_iommu_units - 1]->gpuaddr - PAGE_SIZE,
 			device->mmu.setstate_memory.gpuaddr +
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index e42c7b6..d20cf7e 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -17,6 +17,7 @@
 #include <linux/slab.h>
 #include <linux/iommu.h>
 #include <linux/msm_kgsl.h>
+#include <mach/socinfo.h>
 
 #include "kgsl.h"
 #include "kgsl_device.h"
@@ -268,14 +269,17 @@
 	struct kgsl_iommu *iommu = mmu->priv;
 	int i, j;
 
-	BUG_ON(mmu->hwpagetable == NULL);
-	BUG_ON(mmu->hwpagetable->priv == NULL);
-
-	iommu_pt = mmu->hwpagetable->priv;
-
 	for (i = 0; i < iommu->unit_count; i++) {
 		struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[i];
+		iommu_pt = mmu->defaultpagetable->priv;
 		for (j = 0; j < iommu_unit->dev_count; j++) {
+			/*
+			 * If there is a 2nd default pagetable then priv domain
+			 * is attached with this pagetable
+			 */
+			if (mmu->priv_bank_table &&
+				(KGSL_IOMMU_CONTEXT_PRIV == j))
+				iommu_pt = mmu->priv_bank_table->priv;
 			if (iommu_unit->dev[j].attached) {
 				iommu_detach_device(iommu_pt->domain,
 						iommu_unit->dev[j].dev);
@@ -307,18 +311,21 @@
 	struct kgsl_iommu *iommu = mmu->priv;
 	int i, j, ret = 0;
 
-	BUG_ON(mmu->hwpagetable == NULL);
-	BUG_ON(mmu->hwpagetable->priv == NULL);
-
-	iommu_pt = mmu->hwpagetable->priv;
-
 	/*
 	 * Loop through all the iommu devcies under all iommu units and
 	 * attach the domain
 	 */
 	for (i = 0; i < iommu->unit_count; i++) {
 		struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[i];
+		iommu_pt = mmu->defaultpagetable->priv;
 		for (j = 0; j < iommu_unit->dev_count; j++) {
+			/*
+			 * If there is a 2nd default pagetable then priv domain
+			 * is attached to this pagetable
+			 */
+			if (mmu->priv_bank_table &&
+				(KGSL_IOMMU_CONTEXT_PRIV == j))
+				iommu_pt = mmu->priv_bank_table->priv;
 			if (!iommu_unit->dev[j].attached) {
 				ret = iommu_attach_device(iommu_pt->domain,
 							iommu_unit->dev[j].dev);
@@ -614,17 +621,32 @@
 	int i = 0;
 	struct kgsl_iommu *iommu = mmu->priv;
 	struct kgsl_iommu_pt *iommu_pt;
+	struct kgsl_pagetable *pagetable = NULL;
 
+	/* If chip is not 8960 then we use the 2nd context bank for pagetable
+	 * switching on the 3D side for which a separate table is allocated */
+	if (!cpu_is_msm8960()) {
+		mmu->priv_bank_table =
+			kgsl_mmu_getpagetable(KGSL_MMU_PRIV_BANK_TABLE_NAME);
+		if (mmu->priv_bank_table == NULL) {
+			status = -ENOMEM;
+			goto err;
+		}
+		iommu_pt = mmu->priv_bank_table->priv;
+		iommu_pt->asid = 1;
+	}
 	mmu->defaultpagetable = kgsl_mmu_getpagetable(KGSL_MMU_GLOBAL_PT);
 	/* Return error if the default pagetable doesn't exist */
 	if (mmu->defaultpagetable == NULL) {
 		status = -ENOMEM;
 		goto err;
 	}
+	pagetable = mmu->priv_bank_table ? mmu->priv_bank_table :
+				mmu->defaultpagetable;
 	/* Map the IOMMU regsiters to only defaultpagetable */
 	for (i = 0; i < iommu->unit_count; i++) {
 		iommu->iommu_units[i].reg_map.priv |= KGSL_MEMFLAGS_GLOBAL;
-		status = kgsl_mmu_map(mmu->defaultpagetable,
+		status = kgsl_mmu_map(pagetable,
 			&(iommu->iommu_units[i].reg_map),
 			GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
 		if (status) {
@@ -644,10 +666,14 @@
 	return status;
 err:
 	for (i--; i >= 0; i--) {
-		kgsl_mmu_unmap(mmu->defaultpagetable,
+		kgsl_mmu_unmap(pagetable,
 				&(iommu->iommu_units[i].reg_map));
 		iommu->iommu_units[i].reg_map.priv &= ~KGSL_MEMFLAGS_GLOBAL;
 	}
+	if (mmu->priv_bank_table) {
+		kgsl_mmu_putpagetable(mmu->priv_bank_table);
+		mmu->priv_bank_table = NULL;
+	}
 	if (mmu->defaultpagetable) {
 		kgsl_mmu_putpagetable(mmu->defaultpagetable);
 		mmu->defaultpagetable = NULL;
@@ -669,9 +695,9 @@
 		if (status)
 			return -ENOMEM;
 	}
-	/* We use the GPU MMU to control access to IOMMU registers on a225,
-	 * hence we still keep the MMU active on a225 */
-	if (adreno_is_a225(ADRENO_DEVICE(mmu->device))) {
+	/* We use the GPU MMU to control access to IOMMU registers on 8960 with
+	 * a225, hence we still keep the MMU active on 8960 */
+	if (cpu_is_msm8960()) {
 		struct kgsl_mh *mh = &(mmu->device->mh);
 		kgsl_regwrite(mmu->device, MH_MMU_CONFIG, 0x00000001);
 		kgsl_regwrite(mmu->device, MH_MMU_MPU_END,
@@ -707,6 +733,12 @@
 	 */
 	for (i = 0; i < iommu->unit_count; i++) {
 		struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[i];
+		/* Make sure that the ASID of the priv bank is set to 1.
+		 * When we a different pagetable for the priv bank then the
+		 * iommu driver sets the ASID to 0 instead of 1 */
+		KGSL_IOMMU_SET_IOMMU_REG(iommu->iommu_units[i].reg_map.hostptr,
+					KGSL_IOMMU_CONTEXT_PRIV,
+					CONTEXTIDR, 1);
 		for (j = 0; j < iommu_unit->dev_count; j++)
 			iommu_unit->dev[j].pt_lsb = KGSL_IOMMMU_PT_LSB(
 						KGSL_IOMMU_GET_IOMMU_REG(
@@ -816,14 +848,19 @@
 	struct kgsl_iommu *iommu = mmu->priv;
 	int i;
 	for (i = 0; i < iommu->unit_count; i++) {
+		struct kgsl_pagetable *pagetable = (mmu->priv_bank_table ?
+			mmu->priv_bank_table : mmu->defaultpagetable);
 		if (iommu->iommu_units[i].reg_map.gpuaddr)
-			kgsl_mmu_unmap(mmu->defaultpagetable,
+			kgsl_mmu_unmap(pagetable,
 			&(iommu->iommu_units[i].reg_map));
 		if (iommu->iommu_units[i].reg_map.hostptr)
 			iounmap(iommu->iommu_units[i].reg_map.hostptr);
 		kgsl_sg_free(iommu->iommu_units[i].reg_map.sg,
 				iommu->iommu_units[i].reg_map.sglen);
 	}
+
+	if (mmu->priv_bank_table)
+		kgsl_mmu_putpagetable(mmu->priv_bank_table);
 	if (mmu->defaultpagetable)
 		kgsl_mmu_putpagetable(mmu->defaultpagetable);
 	kfree(iommu->asids);
diff --git a/drivers/gpu/msm/kgsl_mmu.c b/drivers/gpu/msm/kgsl_mmu.c
index 5216b34..dfaadba 100644
--- a/drivers/gpu/msm/kgsl_mmu.c
+++ b/drivers/gpu/msm/kgsl_mmu.c
@@ -39,7 +39,8 @@
 	/* For IOMMU only unmap the global structures to global pt */
 	if ((KGSL_MMU_TYPE_NONE != kgsl_mmu_type) &&
 		(KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type) &&
-		(KGSL_MMU_GLOBAL_PT !=  pt->name))
+		(KGSL_MMU_GLOBAL_PT !=  pt->name) &&
+		(KGSL_MMU_PRIV_BANK_TABLE_NAME !=  pt->name))
 		return 0;
 	for (i = 0; i < KGSL_DEVICE_MAX; i++) {
 		struct kgsl_device *device = kgsl_driver.devp[i];
@@ -58,7 +59,8 @@
 	/* For IOMMU only map the global structures to global pt */
 	if ((KGSL_MMU_TYPE_NONE != kgsl_mmu_type) &&
 		(KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type) &&
-		(KGSL_MMU_GLOBAL_PT !=  pt->name))
+		(KGSL_MMU_GLOBAL_PT !=  pt->name) &&
+		(KGSL_MMU_PRIV_BANK_TABLE_NAME !=  pt->name))
 		return 0;
 	for (i = 0; i < KGSL_DEVICE_MAX; i++) {
 		struct kgsl_device *device = kgsl_driver.devp[i];
@@ -453,9 +455,9 @@
 	 * just once from this pool of the defaultpagetable
 	 */
 	if ((KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype()) &&
-		(KGSL_MMU_GLOBAL_PT == name)) {
-		pagetable->kgsl_pool = gen_pool_create(KGSL_MMU_ALIGN_SHIFT,
-						       -1);
+		((KGSL_MMU_GLOBAL_PT == name) ||
+		(KGSL_MMU_PRIV_BANK_TABLE_NAME == name))) {
+		pagetable->kgsl_pool = gen_pool_create(PAGE_SHIFT, -1);
 		if (pagetable->kgsl_pool == NULL) {
 			KGSL_CORE_ERR("gen_pool_create(%d) failed\n",
 					KGSL_MMU_ALIGN_SHIFT);
diff --git a/drivers/gpu/msm/kgsl_mmu.h b/drivers/gpu/msm/kgsl_mmu.h
index 2db327b..4c0c015 100644
--- a/drivers/gpu/msm/kgsl_mmu.h
+++ b/drivers/gpu/msm/kgsl_mmu.h
@@ -29,6 +29,7 @@
    as an identifier */
 
 #define KGSL_MMU_GLOBAL_PT 0
+#define KGSL_MMU_PRIV_BANK_TABLE_NAME 0xFFFFFFFF
 
 struct kgsl_device;
 
@@ -165,6 +166,8 @@
 	struct kgsl_memdesc    setstate_memory;
 	/* current page table object being used by device mmu */
 	struct kgsl_pagetable  *defaultpagetable;
+	/* pagetable object used for priv bank of IOMMU */
+	struct kgsl_pagetable  *priv_bank_table;
 	struct kgsl_pagetable  *hwpagetable;
 	const struct kgsl_mmu_ops *mmu_ops;
 	void *priv;
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index daf21b8..78fc3ec 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -1769,7 +1769,7 @@
 		kref_put(&send_ioctx->kref, srpt_put_send_ioctx_kref);
 		goto send_sense;
 	}
-	ret = transport_generic_allocate_tasks(cmd, srp_cmd->cdb);
+	ret = target_setup_cmd_from_cdb(cmd, srp_cmd->cdb);
 	if (ret < 0) {
 		kref_put(&send_ioctx->kref, srpt_put_send_ioctx_kref);
 		if (cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT) {
diff --git a/drivers/media/video/msm/msm_isp.c b/drivers/media/video/msm/msm_isp.c
index 834c9b0..9b42f9b 100644
--- a/drivers/media/video/msm/msm_isp.c
+++ b/drivers/media/video/msm/msm_isp.c
@@ -136,6 +136,15 @@
 			image_mode = -1;
 			break;
 		}
+	} else if (vfe_msg == VFE_MSG_OUTPUT_TERTIARY1) {
+		switch (pmctl->vfe_output_mode) {
+		case VFE_OUTPUTS_RDI0:
+			image_mode = MSM_V4L2_EXT_CAPTURE_MODE_PREVIEW;
+			break;
+		default:
+			image_mode = -1;
+			break;
+		}
 	} else
 		image_mode = -1;
 
@@ -331,6 +340,9 @@
 			case MSG_ID_OUTPUT_SECONDARY:
 				msgid = VFE_MSG_OUTPUT_SECONDARY;
 				break;
+			case MSG_ID_OUTPUT_TERTIARY1:
+				msgid = VFE_MSG_OUTPUT_TERTIARY1;
+				break;
 			default:
 				pr_err("%s: Invalid VFE output id: %d\n",
 					   __func__, isp_output->output_id);
@@ -673,6 +685,7 @@
 	case CMD_AXI_CFG_PRIM_ALL_CHNLS|CMD_AXI_CFG_SEC:
 	case CMD_AXI_START:
 	case CMD_AXI_STOP:
+	case CMD_AXI_CFG_TERT1:
 		/* Dont need to pass buffer information.
 		 * subdev will get the buffer from media
 		 * controller free queue.
diff --git a/drivers/media/video/msm/msm_vfe32.c b/drivers/media/video/msm/msm_vfe32.c
index 9382292..acff492 100644
--- a/drivers/media/video/msm/msm_vfe32.c
+++ b/drivers/media/video/msm/msm_vfe32.c
@@ -432,8 +432,15 @@
 	axi_ctrl->share_ctrl->outpath.out2.ch1 =
 		0x0000FFFF & (*ch_info++ >> 16);
 	axi_ctrl->share_ctrl->outpath.out2.ch2 = 0x0000FFFF & *ch_info++;
+	axi_ctrl->share_ctrl->outpath.out2.image_mode =
+		0x0000FFFF & (*ch_info++ >> 16);
+
 
 	switch (mode) {
+	case OUTPUT_TERT1:
+		axi_ctrl->share_ctrl->outpath.output_mode =
+			VFE32_OUTPUT_MODE_TERTIARY1;
+		break;
 	case OUTPUT_PRIM:
 		axi_ctrl->share_ctrl->outpath.output_mode =
 			VFE32_OUTPUT_MODE_PRIMARY;
@@ -714,7 +721,7 @@
 
 static void vfe32_start_common(struct vfe32_ctrl_type *vfe32_ctrl)
 {
-	uint32_t irq_mask = 0x00E00021;
+	uint32_t irq_mask = 0x00E00021, irq_mask1;
 	vfe32_ctrl->start_ack_pending = TRUE;
 	CDBG("VFE opertaion mode = 0x%x, output mode = 0x%x\n",
 		vfe32_ctrl->share_ctrl->operation_mode,
@@ -723,19 +730,31 @@
 		irq_mask |= VFE_IRQ_STATUS0_STATS_COMPOSIT_MASK;
 	else
 		irq_mask |= 0x000FE000;
-
+	irq_mask |=
+		msm_camera_io_r(vfe32_ctrl->share_ctrl->vfebase +
+			VFE_IRQ_MASK_0);
 	msm_camera_io_w(irq_mask,
 		vfe32_ctrl->share_ctrl->vfebase + VFE_IRQ_MASK_0);
 	msm_camera_io_w(VFE_IMASK_WHILE_STOPPING_1,
 		vfe32_ctrl->share_ctrl->vfebase + VFE_IRQ_MASK_1);
 
+	if (vfe32_ctrl->share_ctrl->operation_mode == VFE_OUTPUTS_RDI0) {
+		irq_mask1 =
+		msm_camera_io_r(vfe32_ctrl->share_ctrl->vfebase +
+			VFE_IRQ_MASK_1);
+		irq_mask1 |= VFE_IRQ_STATUS1_RDI0_REG_UPDATE_MASK;
+		msm_camera_io_w(irq_mask1, vfe32_ctrl->share_ctrl->vfebase +
+			VFE_IRQ_MASK_1);
+		msm_camera_io_w_mb(2, vfe32_ctrl->share_ctrl->vfebase +
+			VFE_REG_UPDATE_CMD);
+	} else {
+		msm_camera_io_w_mb(1, vfe32_ctrl->share_ctrl->vfebase +
+			VFE_REG_UPDATE_CMD);
+		msm_camera_io_w_mb(1, vfe32_ctrl->share_ctrl->vfebase +
+			VFE_CAMIF_COMMAND);
+	}
 	/* Ensure the write order while writing
 	to the command register using the barrier */
-	msm_camera_io_w_mb(1,
-		vfe32_ctrl->share_ctrl->vfebase + VFE_REG_UPDATE_CMD);
-	msm_camera_io_w_mb(1,
-		vfe32_ctrl->share_ctrl->vfebase + VFE_CAMIF_COMMAND);
-
 	atomic_set(&vfe32_ctrl->share_ctrl->vstate, 1);
 }
 
@@ -989,7 +1008,8 @@
 	struct msm_cam_media_controller *pmctl,
 	struct vfe32_ctrl_type *vfe32_ctrl)
 {
-	uint32_t irq_comp_mask = 0;
+	uint32_t irq_comp_mask = 0, irq_mask = 0;
+
 	irq_comp_mask	=
 		msm_camera_io_r(vfe32_ctrl->share_ctrl->vfebase +
 			VFE_IRQ_COMP_MASK);
@@ -1018,6 +1038,16 @@
 			0x1 << (vfe32_ctrl->share_ctrl->outpath.out1.ch1 + 8) |
 			0x1 << (vfe32_ctrl->share_ctrl->outpath.out1.ch2 + 8));
 	}
+	if (vfe32_ctrl->share_ctrl->outpath.output_mode &
+		VFE32_OUTPUT_MODE_TERTIARY1) {
+		irq_mask = msm_camera_io_r(vfe32_ctrl->share_ctrl->vfebase +
+			VFE_IRQ_MASK_0);
+		irq_mask |= (0x1 << (vfe32_ctrl->share_ctrl->outpath.out2.ch0 +
+			VFE_WM_OFFSET));
+		msm_camera_io_w(irq_mask, vfe32_ctrl->share_ctrl->vfebase +
+			VFE_IRQ_MASK_0);
+	}
+
 	msm_camera_io_w(irq_comp_mask,
 		vfe32_ctrl->share_ctrl->vfebase + VFE_IRQ_COMP_MASK);
 
@@ -1235,6 +1265,8 @@
 		ch = &share_ctrl->outpath.out0;
 	else if (path == VFE_MSG_OUTPUT_SECONDARY)
 		ch = &share_ctrl->outpath.out1;
+	else if (path == VFE_MSG_OUTPUT_TERTIARY1)
+		ch = &share_ctrl->outpath.out2;
 	else
 		pr_err("%s: Invalid path %d\n", __func__,
 			path);
@@ -1251,8 +1283,10 @@
 
 	if (path == VFE_MSG_OUTPUT_PRIMARY)
 		image_mode = axi_ctrl->share_ctrl->outpath.out0.image_mode;
-	else
+	else if (path == VFE_MSG_OUTPUT_SECONDARY)
 		image_mode = axi_ctrl->share_ctrl->outpath.out1.image_mode;
+	else if (path == VFE_MSG_OUTPUT_TERTIARY1)
+		image_mode = axi_ctrl->share_ctrl->outpath.out2.image_mode;
 
 	vfe32_subdev_notify(id, path, image_mode,
 		&axi_ctrl->subdev, axi_ctrl->share_ctrl);
@@ -1269,8 +1303,10 @@
 	uint32_t image_mode = 0;
 	if (path == VFE_MSG_OUTPUT_PRIMARY)
 		image_mode = vfe32_ctrl->share_ctrl->outpath.out0.image_mode;
-	else
+	else if (path == VFE_MSG_OUTPUT_SECONDARY)
 		image_mode = vfe32_ctrl->share_ctrl->outpath.out1.image_mode;
+	else if (path == VFE_MSG_OUTPUT_TERTIARY1)
+		image_mode = vfe32_ctrl->share_ctrl->outpath.out2.image_mode;
 
 	vfe32_subdev_notify(id, path, image_mode,
 		&vfe32_ctrl->subdev, vfe32_ctrl->share_ctrl);
@@ -1286,8 +1322,9 @@
 			vfe32_ctrl->share_ctrl->vfebase, outch->ch0,
 			outch->pong.ch_paddr[0]);
 
-		if (vfe32_ctrl->share_ctrl->operation_mode !=
-			VFE_OUTPUTS_RAW) {
+		if ((vfe32_ctrl->share_ctrl->operation_mode !=
+			VFE_OUTPUTS_RAW) &&
+			(path != VFE_MSG_OUTPUT_TERTIARY1)) {
 			vfe32_put_ch_ping_addr(
 				vfe32_ctrl->share_ctrl->vfebase, outch->ch1,
 				outch->ping.ch_paddr[1]);
@@ -1377,6 +1414,11 @@
 			rc = vfe32_configure_pingpong_buffers(
 				VFE_MSG_V32_START, VFE_MSG_OUTPUT_PRIMARY,
 				vfe32_ctrl);
+		else if (vfe32_ctrl->share_ctrl->operation_mode ==
+				VFE_OUTPUTS_RDI0)
+			rc = vfe32_configure_pingpong_buffers(
+				VFE_MSG_V32_START, VFE_MSG_OUTPUT_TERTIARY1,
+				vfe32_ctrl);
 		else
 			/* Configure secondary channel */
 			rc = vfe32_configure_pingpong_buffers(
@@ -2844,11 +2886,14 @@
 		CDBG("stop video triggered .\n");
 	}
 
+	spin_lock_irqsave(&vfe32_ctrl->start_ack_lock, flags);
 	if (vfe32_ctrl->start_ack_pending == TRUE) {
+		vfe32_ctrl->start_ack_pending = FALSE;
+		spin_unlock_irqrestore(&vfe32_ctrl->start_ack_lock, flags);
 		vfe32_send_isp_msg(&vfe32_ctrl->subdev,
 			vfe32_ctrl->share_ctrl->vfeFrameId, MSG_ID_START_ACK);
-		vfe32_ctrl->start_ack_pending = FALSE;
 	} else {
+		spin_unlock_irqrestore(&vfe32_ctrl->start_ack_lock, flags);
 		if (vfe32_ctrl->recording_state ==
 				VFE_STATE_STOP_REQUESTED) {
 			vfe32_ctrl->recording_state = VFE_STATE_STOPPED;
@@ -2977,6 +3022,23 @@
 	} /* if snapshot mode. */
 }
 
+static void vfe32_process_rdi0_reg_update_irq(
+	struct vfe32_ctrl_type *vfe32_ctrl)
+{
+	unsigned long flags;
+	spin_lock_irqsave(&vfe32_ctrl->start_ack_lock, flags);
+	if (vfe32_ctrl->start_ack_pending == TRUE) {
+		vfe32_ctrl->start_ack_pending = FALSE;
+		spin_unlock_irqrestore(
+				&vfe32_ctrl->start_ack_lock, flags);
+		vfe32_send_isp_msg(&vfe32_ctrl->subdev,
+			vfe32_ctrl->share_ctrl->vfeFrameId, MSG_ID_START_ACK);
+	} else {
+		spin_unlock_irqrestore(
+				&vfe32_ctrl->start_ack_lock, flags);
+	}
+}
+
 static void vfe32_set_default_reg_values(
 			struct vfe32_ctrl_type *vfe32_ctrl)
 {
@@ -3381,6 +3443,48 @@
 	}
 }
 
+static void vfe32_process_output_path_irq_rdi0(
+			struct axi_ctrl_t *axi_ctrl)
+{
+	uint32_t ping_pong;
+	uint32_t ch0_paddr = 0;
+	/* this must be rdi image output. */
+	struct msm_free_buf *free_buf = NULL;
+	/*RDI0*/
+	if (axi_ctrl->share_ctrl->operation_mode == VFE_OUTPUTS_RDI0) {
+		free_buf = vfe32_check_free_buffer(VFE_MSG_OUTPUT_IRQ,
+			VFE_MSG_OUTPUT_TERTIARY1, axi_ctrl);
+		if (free_buf) {
+			ping_pong = msm_camera_io_r(axi_ctrl->
+				share_ctrl->vfebase +
+				VFE_BUS_PING_PONG_STATUS);
+
+			/* Y only channel */
+			ch0_paddr = vfe32_get_ch_addr(ping_pong,
+				axi_ctrl->share_ctrl->vfebase,
+				axi_ctrl->share_ctrl->outpath.out2.ch0);
+
+			pr_debug("%s ch0 = 0x%x\n",
+				__func__, ch0_paddr);
+
+			/* Y channel */
+			vfe32_put_ch_addr(ping_pong,
+				axi_ctrl->share_ctrl->vfebase,
+				axi_ctrl->share_ctrl->outpath.out2.ch0,
+				free_buf->ch_paddr[0]);
+
+			vfe_send_outmsg(axi_ctrl,
+				MSG_ID_OUTPUT_TERTIARY1, ch0_paddr,
+				0, 0,
+				axi_ctrl->share_ctrl->outpath.out2.image_mode);
+
+		} else {
+			axi_ctrl->share_ctrl->outpath.out2.frame_drop_cnt++;
+			pr_err("path_irq_2 irq - no free buffer for rdi0!\n");
+		}
+	}
+}
+
 static uint32_t  vfe32_process_stats_irq_common(
 	struct vfe32_ctrl_type *vfe32_ctrl,
 	uint32_t statsNum, uint32_t newAddr)
@@ -3756,6 +3860,10 @@
 		CDBG("irq	regUpdateIrq\n");
 		vfe32_process_reg_update_irq(vfe32_ctrl);
 		break;
+	case VFE_IRQ_STATUS1_RDI0_REG_UPDATE:
+		CDBG("irq	rdi0 regUpdateIrq\n");
+		vfe32_process_rdi0_reg_update_irq(vfe32_ctrl);
+		break;
 	case VFE_IMASK_WHILE_STOPPING_1:
 		CDBG("irq	resetAckIrq\n");
 		vfe32_process_reset_irq(vfe32_ctrl);
@@ -3845,6 +3953,12 @@
 				(void *)VFE_IRQ_STATUS0_REG_UPDATE_MASK);
 
 		if (qcmd->vfeInterruptStatus1 &
+				VFE_IRQ_STATUS1_RDI0_REG_UPDATE_MASK)
+			v4l2_subdev_notify(&axi_ctrl->subdev,
+				NOTIFY_VFE_IRQ,
+				(void *)VFE_IRQ_STATUS1_RDI0_REG_UPDATE);
+
+		if (qcmd->vfeInterruptStatus1 &
 				VFE_IMASK_WHILE_STOPPING_1)
 			v4l2_subdev_notify(&axi_ctrl->subdev,
 				NOTIFY_VFE_IRQ,
@@ -4284,6 +4398,7 @@
 	spin_lock_init(&vfe32_ctrl->state_lock);
 	spin_lock_init(&vfe32_ctrl->io_lock);
 	spin_lock_init(&vfe32_ctrl->update_ack_lock);
+	spin_lock_init(&vfe32_ctrl->start_ack_lock);
 
 	spin_lock_init(&vfe32_ctrl->aec_ack_lock);
 	spin_lock_init(&vfe32_ctrl->awb_ack_lock);
@@ -4366,6 +4481,11 @@
 				share_ctrl->outpath.out0.ch2]);
 		}
 		break;
+	case VFE_OUTPUTS_RDI0:
+		msm_camera_io_w(1, axi_ctrl->share_ctrl->vfebase +
+			vfe32_AXI_WM_CFG[axi_ctrl->
+			share_ctrl->outpath.out2.ch0]);
+		break;
 	default:
 		if (axi_ctrl->share_ctrl->outpath.output_mode &
 			VFE32_OUTPUT_MODE_SECONDARY) {
@@ -4467,8 +4587,8 @@
 		}
 		vfe32_config_axi(axi_ctrl, OUTPUT_PRIM, axio);
 		kfree(axio);
-	}
 		break;
+		}
 	case CMD_AXI_CFG_PRIM_ALL_CHNLS: {
 		uint32_t *axio = NULL;
 		axio = kmalloc(vfe32_cmd[VFE_CMD_AXI_OUT_CFG].length,
@@ -4486,8 +4606,8 @@
 		}
 		vfe32_config_axi(axi_ctrl, OUTPUT_PRIM_ALL_CHNLS, axio);
 		kfree(axio);
-	}
 		break;
+		}
 	case CMD_AXI_CFG_PRIM|CMD_AXI_CFG_SEC: {
 		uint32_t *axio = NULL;
 		axio = kmalloc(vfe32_cmd[VFE_CMD_AXI_OUT_CFG].length,
@@ -4505,8 +4625,8 @@
 		}
 		vfe32_config_axi(axi_ctrl, OUTPUT_PRIM|OUTPUT_SEC, axio);
 		kfree(axio);
-	}
 		break;
+		}
 	case CMD_AXI_CFG_PRIM|CMD_AXI_CFG_SEC_ALL_CHNLS: {
 		uint32_t *axio = NULL;
 		axio = kmalloc(vfe32_cmd[VFE_CMD_AXI_OUT_CFG].length,
@@ -4525,8 +4645,8 @@
 		vfe32_config_axi(axi_ctrl,
 			OUTPUT_PRIM|OUTPUT_SEC_ALL_CHNLS, axio);
 		kfree(axio);
-	}
 		break;
+		}
 	case CMD_AXI_CFG_PRIM_ALL_CHNLS|CMD_AXI_CFG_SEC: {
 		uint32_t *axio = NULL;
 		axio = kmalloc(vfe32_cmd[VFE_CMD_AXI_OUT_CFG].length,
@@ -4545,8 +4665,28 @@
 		vfe32_config_axi(axi_ctrl,
 			OUTPUT_PRIM_ALL_CHNLS|OUTPUT_SEC, axio);
 		kfree(axio);
-	}
 		break;
+		}
+	case CMD_AXI_CFG_TERT1: {
+		uint32_t *axio = NULL;
+		axio = kmalloc(vfe32_cmd[VFE_CMD_AXI_OUT_CFG].length,
+				GFP_ATOMIC);
+		if (!axio) {
+			rc = -ENOMEM;
+			break;
+		}
+
+		if (copy_from_user(axio, (void __user *)(vfecmd.value),
+				vfe32_cmd[VFE_CMD_AXI_OUT_CFG].length)) {
+			kfree(axio);
+			rc = -EFAULT;
+			break;
+		}
+		vfe32_config_axi(axi_ctrl,
+			OUTPUT_TERT1, axio);
+		kfree(axio);
+		break;
+		}
 	case CMD_AXI_CFG_PRIM_ALL_CHNLS|CMD_AXI_CFG_SEC_ALL_CHNLS:
 		pr_err("%s Invalid/Unsupported AXI configuration %x",
 			__func__, cfgcmd.cmd_type);
@@ -4585,6 +4725,13 @@
 		CDBG("Image composite done 1 irq occured.\n");
 		vfe32_process_output_path_irq_1(axi_ctrl);
 	}
+
+	if (axi_ctrl->share_ctrl->outpath.output_mode &
+		VFE32_OUTPUT_MODE_TERTIARY1)
+		if (irqstatus & (0x1 << (axi_ctrl->share_ctrl->outpath.out2.ch0
+			+ VFE_WM_OFFSET)))
+			vfe32_process_output_path_irq_rdi0(axi_ctrl);
+
 	/* in snapshot mode if done then send
 	snapshot done message */
 	if (axi_ctrl->share_ctrl->operation_mode ==
diff --git a/drivers/media/video/msm/msm_vfe32.h b/drivers/media/video/msm/msm_vfe32.h
index d5da432..1746f3f 100644
--- a/drivers/media/video/msm/msm_vfe32.h
+++ b/drivers/media/video/msm/msm_vfe32.h
@@ -87,6 +87,9 @@
  * the luma samples.  JPEG 4:2:2 */
 #define VFE_CHROMA_UPSAMPLE_INTERPOLATED 0
 
+/* wm bit offset for IRQ MASK and IRQ STATUS register */
+#define VFE_WM_OFFSET 6
+
 /* constants for irq registers */
 #define VFE_DISABLE_ALL_IRQS 0
 /* bit =1 is to clear the corresponding bit in VFE_IRQ_STATUS.  */
@@ -115,6 +118,17 @@
 #define VFE_IRQ_STATUS0_ASYNC_TIMER2  0x40000000  /* bit 30 */
 #define VFE_IRQ_STATUS0_ASYNC_TIMER3  0x80000000  /* bit 32 */
 
+#define VFE_IRQ_STATUS1_RDI0_REG_UPDATE_MASK  0x4000000 /*bit 26*/
+#define VFE_IRQ_STATUS1_RDI1_REG_UPDATE_MASK  0x8000000 /*bit 27*/
+
+/*TODOs the irq status passed from axi to vfe irq handler does not account
+* for 2 irq status registers. So below macro is added to differentiate between
+* same bit set on both irq status registers. This wil be fixed later by passing
+*entire payload to vfe irq handler and parsing there instead of passing just the
+*status bit*/
+#define VFE_IRQ_STATUS1_RDI0_REG_UPDATE  0x84000000 /*bit 26*/
+#define VFE_IRQ_STATUS1_RDI1_REG_UPDATE  0x88000000 /*bit 27*/
+
 /* imask for while waiting for stop ack,  driver has already
  * requested stop, waiting for reset irq, and async timer irq.
  * For irq_status_0, bit 28-32 are for async timer. For
@@ -788,7 +802,7 @@
 
 	struct vfe32_output_ch out0; /* preview and thumbnail */
 	struct vfe32_output_ch out1; /* snapshot */
-	struct vfe32_output_ch out2; /* video    */
+	struct vfe32_output_ch out2; /* rdi0    */
 };
 
 struct vfe32_frame_extra {
@@ -893,6 +907,7 @@
 #define VFE32_OUTPUT_MODE_PRIMARY_ALL_CHNLS	BIT(7)
 #define VFE32_OUTPUT_MODE_SECONDARY		BIT(8)
 #define VFE32_OUTPUT_MODE_SECONDARY_ALL_CHNLS	BIT(9)
+#define VFE32_OUTPUT_MODE_TERTIARY1		BIT(10)
 
 struct vfe_stats_control {
 	uint8_t  ackPending;
@@ -946,6 +961,7 @@
 	uint32_t vfeImaskCompositePacked;
 
 	spinlock_t  update_ack_lock;
+	spinlock_t  start_ack_lock;
 	spinlock_t  state_lock;
 	spinlock_t  io_lock;
 
diff --git a/drivers/media/video/msm/sensors/ov7692_v4l2.c b/drivers/media/video/msm/sensors/ov7692_v4l2.c
index c25eba9..6fc1da1 100644
--- a/drivers/media/video/msm/sensors/ov7692_v4l2.c
+++ b/drivers/media/video/msm/sensors/ov7692_v4l2.c
@@ -585,7 +585,8 @@
 static struct msm_camera_i2c_reg_conf ov7692_wb_oem[][4] = {
 	{{-1, -1, -1}, {-1, -1, -1}, {-1, -1, -1},
 		{-1, -1, -1},},/*WHITEBALNACE OFF*/
-	{{0x13, 0xf7}, {0x15, 0x00},},		/*WHITEBALNACE AUTO*/
+	{{0x13, 0xf7}, {0x15, 0x00}, {-1, -1, -1},
+		{-1, -1, -1},}, /*WHITEBALNACE AUTO*/
 	{{0x13, 0xf5}, {0x01, 0x56}, {0x02, 0x50},
 		{0x15, 0x00},},	/*WHITEBALNACE CUSTOM*/
 	{{0x13, 0xf5}, {0x01, 0x66}, {0x02, 0x40},
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 8b1d5e6..e569132 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -1016,13 +1016,8 @@
 		send_check_condition = 1;
 		goto attach_cmd;
 	}
-	/*
-	 * The Initiator Node has access to the LUN (the addressing method
-	 * is handled inside of iscsit_get_lun_for_cmd()).  Now it's time to
-	 * allocate 1->N transport tasks (depending on sector count and
-	 * maximum request size the physical HBA(s) can handle.
-	 */
-	transport_ret = transport_generic_allocate_tasks(&cmd->se_cmd, hdr->cdb);
+
+	transport_ret = target_setup_cmd_from_cdb(&cmd->se_cmd, hdr->cdb);
 	if (transport_ret == -ENOMEM) {
 		return iscsit_add_reject_from_cmd(
 				ISCSI_REASON_BOOKMARK_NO_RESOURCES,
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index a9b4eee..38dfac2 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -213,7 +213,7 @@
 	 * associated read buffers, go ahead and do that here for type
 	 * SCF_SCSI_CONTROL_SG_IO_CDB.  Also note that this is currently
 	 * guaranteed to be a single SGL for SCF_SCSI_CONTROL_SG_IO_CDB
-	 * by target core in transport_generic_allocate_tasks() ->
+	 * by target core in target_setup_cmd_from_cdb() ->
 	 * transport_generic_cmd_sequencer().
 	 */
 	if (se_cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB &&
@@ -227,7 +227,7 @@
 		}
 	}
 
-	ret = transport_generic_allocate_tasks(se_cmd, sc->cmnd);
+	ret = target_setup_cmd_from_cdb(se_cmd, sc->cmnd);
 	if (ret == -ENOMEM) {
 		transport_send_check_condition_and_sense(se_cmd,
 				TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 443704f..843ad54 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -1550,11 +1550,11 @@
 	return 0;
 }
 
-/*	transport_generic_allocate_tasks():
+/*	target_setup_cmd_from_cdb():
  *
  *	Called from fabric RX Thread.
  */
-int transport_generic_allocate_tasks(
+int target_setup_cmd_from_cdb(
 	struct se_cmd *cmd,
 	unsigned char *cdb)
 {
@@ -1620,7 +1620,7 @@
 	spin_unlock(&cmd->se_lun->lun_sep_lock);
 	return 0;
 }
-EXPORT_SYMBOL(transport_generic_allocate_tasks);
+EXPORT_SYMBOL(target_setup_cmd_from_cdb);
 
 /*
  * Used by fabric module frontends to queue tasks directly.
@@ -1701,6 +1701,8 @@
 	 */
 	transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
 				data_length, data_dir, task_attr, sense);
+	if (flags & TARGET_SCF_UNKNOWN_SIZE)
+		se_cmd->unknown_data_length = 1;
 	/*
 	 * Obtain struct se_cmd->cmd_kref reference and add new cmd to
 	 * se_sess->sess_cmd_list.  A second kref_get here is necessary
@@ -1726,7 +1728,7 @@
 	 * Sanitize CDBs via transport_generic_cmd_sequencer() and
 	 * allocate the necessary tasks to complete the received CDB+data
 	 */
-	rc = transport_generic_allocate_tasks(se_cmd, cdb);
+	rc = target_setup_cmd_from_cdb(se_cmd, cdb);
 	if (rc != 0) {
 		transport_generic_request_failure(se_cmd);
 		return;
@@ -2581,7 +2583,7 @@
  *	Generic Command Sequencer that should work for most DAS transport
  *	drivers.
  *
- *	Called from transport_generic_allocate_tasks() in the $FABRIC_MOD
+ *	Called from target_setup_cmd_from_cdb() in the $FABRIC_MOD
  *	RX Thread.
  *
  *	FIXME: Need to support other SCSI OPCODES where as well.
@@ -3142,6 +3144,9 @@
 		goto out_unsupported_cdb;
 	}
 
+	if (cmd->unknown_data_length)
+		cmd->data_length = size;
+
 	if (size != cmd->data_length) {
 		pr_warn("TARGET_CORE[%s]: Expected Transfer Length:"
 			" %u does not match SCSI CDB Length: %u for SAM Opcode:"
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index fbe0dd7..d99a02a 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -66,6 +66,7 @@
 config THERMAL_MONITOR
 	bool "Monitor thermal state and limit CPU Frequency"
 	depends on THERMAL_TSENS8960
+	depends on CPU_FREQ_MSM
 	default n
 	help
 	  This enables thermal monitoring capability in the kernel in the
diff --git a/drivers/thermal/msm_thermal.c b/drivers/thermal/msm_thermal.c
index e0d8d14..a8d3720 100644
--- a/drivers/thermal/msm_thermal.c
+++ b/drivers/thermal/msm_thermal.c
@@ -14,60 +14,51 @@
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/module.h>
-#include <linux/cpufreq.h>
 #include <linux/mutex.h>
 #include <linux/msm_tsens.h>
 #include <linux/workqueue.h>
 #include <linux/cpu.h>
-
-#define DEF_TEMP_SENSOR      0
-#define DEF_THERMAL_CHECK_MS 1000
-#define DEF_ALLOWED_MAX_HIGH 60
-#define DEF_ALLOWED_MAX_FREQ 918000
+#include <linux/cpufreq.h>
+#include <linux/msm_tsens.h>
+#include <linux/msm_thermal.h>
+#include <mach/cpufreq.h>
 
 static int enabled;
-static int allowed_max_high = DEF_ALLOWED_MAX_HIGH;
-static int allowed_max_low = (DEF_ALLOWED_MAX_HIGH - 10);
-static int allowed_max_freq = DEF_ALLOWED_MAX_FREQ;
-static int check_interval_ms = DEF_THERMAL_CHECK_MS;
-
-module_param(allowed_max_high, int, 0);
-module_param(allowed_max_freq, int, 0);
-module_param(check_interval_ms, int, 0);
-
+static struct msm_thermal_data msm_thermal_info;
+static uint32_t limited_max_freq = MSM_CPUFREQ_NO_LIMIT;
 static struct delayed_work check_temp_work;
 
-static int update_cpu_max_freq(struct cpufreq_policy *cpu_policy,
-			       int cpu, int max_freq)
+static int update_cpu_max_freq(int cpu, uint32_t max_freq)
 {
 	int ret = 0;
 
-	if (!cpu_policy)
-		return -EINVAL;
-
-	cpufreq_verify_within_limits(cpu_policy,
-				cpu_policy->min, max_freq);
-	cpu_policy->user_policy.max = max_freq;
+	ret = msm_cpufreq_set_freq_limits(cpu, MSM_CPUFREQ_NO_LIMIT, max_freq);
+	if (ret)
+		return ret;
 
 	ret = cpufreq_update_policy(cpu);
-	if (!ret)
-		pr_info("msm_thermal: Limiting core%d max frequency to %d\n",
-			cpu, max_freq);
+	if (ret)
+		return ret;
+
+	limited_max_freq = max_freq;
+	if (max_freq != MSM_CPUFREQ_NO_LIMIT)
+		pr_info("msm_thermal: Limiting cpu%d max frequency to %d\n",
+				cpu, max_freq);
+	else
+		pr_info("msm_thermal: Max frequency reset for cpu%d\n", cpu);
 
 	return ret;
 }
 
 static void check_temp(struct work_struct *work)
 {
-	struct cpufreq_policy *cpu_policy = NULL;
 	struct tsens_device tsens_dev;
 	unsigned long temp = 0;
-	unsigned int max_freq = 0;
-	int update_policy = 0;
+	uint32_t max_freq = limited_max_freq;
 	int cpu = 0;
 	int ret = 0;
 
-	tsens_dev.sensor_num = DEF_TEMP_SENSOR;
+	tsens_dev.sensor_num = msm_thermal_info.sensor_id;
 	ret = tsens_get_temp(&tsens_dev, &temp);
 	if (ret) {
 		pr_debug("msm_thermal: Unable to read TSENS sensor %d\n",
@@ -75,61 +66,42 @@
 		goto reschedule;
 	}
 
+	if (temp >= msm_thermal_info.limit_temp)
+		max_freq = msm_thermal_info.limit_freq;
+	else if (temp <
+		msm_thermal_info.limit_temp - msm_thermal_info.temp_hysteresis)
+		max_freq = MSM_CPUFREQ_NO_LIMIT;
+
+	if (max_freq == limited_max_freq)
+		goto reschedule;
+
+	/* Update new limits */
 	for_each_possible_cpu(cpu) {
-		update_policy = 0;
-		cpu_policy = cpufreq_cpu_get(cpu);
-		if (!cpu_policy) {
-			pr_debug("msm_thermal: NULL policy on cpu %d\n", cpu);
-			continue;
-		}
-		if (temp >= allowed_max_high) {
-			if (cpu_policy->max > allowed_max_freq) {
-				update_policy = 1;
-				max_freq = allowed_max_freq;
-			} else {
-				pr_debug("msm_thermal: policy max for cpu %d "
-					 "already < allowed_max_freq\n", cpu);
-			}
-		} else if (temp < allowed_max_low) {
-			if (cpu_policy->max < cpu_policy->cpuinfo.max_freq) {
-				max_freq = cpu_policy->cpuinfo.max_freq;
-				update_policy = 1;
-			} else {
-				pr_debug("msm_thermal: policy max for cpu %d "
-					 "already at max allowed\n", cpu);
-			}
-		}
-
-		if (update_policy)
-			update_cpu_max_freq(cpu_policy, cpu, max_freq);
-
-		cpufreq_cpu_put(cpu_policy);
+		ret = update_cpu_max_freq(cpu, max_freq);
+		if (ret)
+			pr_debug("Unable to limit cpu%d max freq to %d\n",
+					cpu, max_freq);
 	}
 
 reschedule:
 	if (enabled)
 		schedule_delayed_work(&check_temp_work,
-				msecs_to_jiffies(check_interval_ms));
+				msecs_to_jiffies(msm_thermal_info.poll_ms));
 }
 
 static void disable_msm_thermal(void)
 {
 	int cpu = 0;
-	struct cpufreq_policy *cpu_policy = NULL;
 
 	/* make sure check_temp is no longer running */
 	cancel_delayed_work(&check_temp_work);
 	flush_scheduled_work();
 
+	if (limited_max_freq == MSM_CPUFREQ_NO_LIMIT)
+		return;
+
 	for_each_possible_cpu(cpu) {
-		cpu_policy = cpufreq_cpu_get(cpu);
-		if (cpu_policy) {
-			if (cpu_policy->max < cpu_policy->cpuinfo.max_freq)
-				update_cpu_max_freq(cpu_policy, cpu,
-						    cpu_policy->
-						    cpuinfo.max_freq);
-			cpufreq_cpu_put(cpu_policy);
-		}
+		update_cpu_max_freq(cpu, MSM_CPUFREQ_NO_LIMIT);
 	}
 }
 
@@ -156,16 +128,17 @@
 module_param_cb(enabled, &module_ops, &enabled, 0644);
 MODULE_PARM_DESC(enabled, "enforce thermal limit on cpu");
 
-static int __init msm_thermal_init(void)
+int __init msm_thermal_init(struct msm_thermal_data *pdata)
 {
 	int ret = 0;
 
+	BUG_ON(!pdata);
+	BUG_ON(pdata->sensor_id >= TSENS_MAX_SENSORS);
+	memcpy(&msm_thermal_info, pdata, sizeof(struct msm_thermal_data));
+
 	enabled = 1;
 	INIT_DELAYED_WORK(&check_temp_work, check_temp);
-
 	schedule_delayed_work(&check_temp_work, 0);
 
 	return ret;
 }
-fs_initcall(msm_thermal_init);
-
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 95f11c1..87b307c 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -854,6 +854,16 @@
 	  Say "y" to link the driver statically, or "m" to build
 	  a dynamically linked module called "g_mass_storage".
 
+config USB_GADGET_TARGET
+	tristate "USB Gadget Target Fabric Module"
+	depends on TARGET_CORE
+	help
+	  This fabric is an USB gadget. Two USB protocols are supported that is
+	  BBB or BOT (Bulk Only Transport) and UAS (USB Attached SCSI). BOT is
+	  advertised on alternative interface 0 (primary) and UAS is on
+	  alternative interface 1. Both protocols can work on USB2.0 and USB3.0.
+	  UAS utilizes the USB 3.0 feature called streams support.
+
 config USB_G_SERIAL
 	tristate "Serial Gadget (with CDC ACM and CDC OBEX support)"
 	help
diff --git a/drivers/usb/gadget/Makefile b/drivers/usb/gadget/Makefile
index c646c9f..b8f5149 100644
--- a/drivers/usb/gadget/Makefile
+++ b/drivers/usb/gadget/Makefile
@@ -54,6 +54,7 @@
 g_webcam-y			:= webcam.o
 g_ncm-y				:= ncm.o
 g_acm_ms-y			:= acm_ms.o
+g_tcm_usb_gadget-y		:= tcm_usb_gadget.o
 g_android-y			:= android.o
 
 obj-$(CONFIG_USB_ZERO)		+= g_zero.o
@@ -74,4 +75,5 @@
 obj-$(CONFIG_USB_G_WEBCAM)	+= g_webcam.o
 obj-$(CONFIG_USB_G_NCM)		+= g_ncm.o
 obj-$(CONFIG_USB_G_ACM_MS)	+= g_acm_ms.o
+obj-$(CONFIG_USB_GADGET_TARGET)	+= tcm_usb_gadget.o
 obj-$(CONFIG_USB_G_ANDROID)	+= g_android.o
diff --git a/drivers/usb/gadget/android.c b/drivers/usb/gadget/android.c
index be8e6aa..b5a7291 100644
--- a/drivers/usb/gadget/android.c
+++ b/drivers/usb/gadget/android.c
@@ -1438,7 +1438,6 @@
 
 	sscanf(buff, "%d", &enabled);
 	if (enabled && !dev->enabled) {
-		cdev->next_string_id = 0;
 		/*
 		 * Update values in composite driver's copy of
 		 * device descriptor.
diff --git a/drivers/usb/gadget/ci13xxx_msm_hsic.c b/drivers/usb/gadget/ci13xxx_msm_hsic.c
index 30b45eb..f353b07 100644
--- a/drivers/usb/gadget/ci13xxx_msm_hsic.c
+++ b/drivers/usb/gadget/ci13xxx_msm_hsic.c
@@ -381,7 +381,7 @@
 	 */
 	mb();
 
-	if (!mhsic->pdata->keep_core_clk_on_suspend_workaround) {
+	if (!mhsic->pdata->core_clk_always_on_workaround) {
 		clk_disable(mhsic->iface_clk);
 		clk_disable(mhsic->core_clk);
 	}
@@ -438,7 +438,7 @@
 		dev_err(mhsic->dev, "%s failed to vote for TCXO %d\n",
 				__func__, ret);
 
-	if (!mhsic->pdata->keep_core_clk_on_suspend_workaround) {
+	if (!mhsic->pdata->core_clk_always_on_workaround) {
 		clk_enable(mhsic->iface_clk);
 		clk_enable(mhsic->core_clk);
 	}
diff --git a/drivers/usb/gadget/f_mbim.c b/drivers/usb/gadget/f_mbim.c
index 41a1777..6883b78 100644
--- a/drivers/usb/gadget/f_mbim.c
+++ b/drivers/usb/gadget/f_mbim.c
@@ -795,6 +795,7 @@
 		spin_unlock(&mbim->lock);
 		mbim_clear_queues(mbim);
 		mbim_reset_function_queue(mbim);
+		spin_lock(&mbim->lock);
 		break;
 	default:
 		pr_err("Unknown event %02x --> %d\n",
@@ -1450,8 +1451,6 @@
 
 	mbim->cdev = c->cdev;
 
-	spin_lock_init(&mbim->lock);
-
 	mbim_reset_values(mbim);
 
 	mbim->function.name = "usb_mbim";
@@ -1615,6 +1614,7 @@
 	pr_debug("Exit(%d)", count);
 
 	return count;
+
 }
 
 static int mbim_open(struct inode *ip, struct file *fp)
diff --git a/drivers/usb/gadget/tcm_usb_gadget.c b/drivers/usb/gadget/tcm_usb_gadget.c
new file mode 100644
index 0000000..c46439c
--- /dev/null
+++ b/drivers/usb/gadget/tcm_usb_gadget.c
@@ -0,0 +1,2480 @@
+/* Target based USB-Gadget
+ *
+ * UAS protocol handling, target callbacks, configfs handling,
+ * BBB (USB Mass Storage Class Bulk-Only (BBB) and Transport protocol handling.
+ *
+ * Author: Sebastian Andrzej Siewior <bigeasy at linutronix dot de>
+ * License: GPLv2 as published by FSF.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/configfs.h>
+#include <linux/ctype.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/composite.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/storage.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_tcq.h>
+#include <target/target_core_base.h>
+#include <target/target_core_fabric.h>
+#include <target/target_core_fabric_configfs.h>
+#include <target/target_core_configfs.h>
+#include <target/configfs_macros.h>
+#include <asm/unaligned.h>
+
+#include "usbstring.c"
+#include "epautoconf.c"
+#include "config.c"
+#include "composite.c"
+
+#include "tcm_usb_gadget.h"
+
+static struct target_fabric_configfs *usbg_fabric_configfs;
+
+static inline struct f_uas *to_f_uas(struct usb_function *f)
+{
+	return container_of(f, struct f_uas, function);
+}
+
+static void usbg_cmd_release(struct kref *);
+
+static inline void usbg_cleanup_cmd(struct usbg_cmd *cmd)
+{
+	kref_put(&cmd->ref, usbg_cmd_release);
+}
+
+/* Start bot.c code */
+
+static int bot_enqueue_cmd_cbw(struct f_uas *fu)
+{
+	int ret;
+
+	if (fu->flags & USBG_BOT_CMD_PEND)
+		return 0;
+
+	ret = usb_ep_queue(fu->ep_out, fu->cmd.req, GFP_ATOMIC);
+	if (!ret)
+		fu->flags |= USBG_BOT_CMD_PEND;
+	return ret;
+}
+
+static void bot_status_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct usbg_cmd *cmd = req->context;
+	struct f_uas *fu = cmd->fu;
+
+	usbg_cleanup_cmd(cmd);
+	if (req->status < 0) {
+		pr_err("ERR %s(%d)\n", __func__, __LINE__);
+		return;
+	}
+
+	/* CSW completed, wait for next CBW */
+	bot_enqueue_cmd_cbw(fu);
+}
+
+static void bot_enqueue_sense_code(struct f_uas *fu, struct usbg_cmd *cmd)
+{
+	struct bulk_cs_wrap *csw = &fu->bot_status.csw;
+	int ret;
+	u8 *sense;
+	unsigned int csw_stat;
+
+	csw_stat = cmd->csw_code;
+
+	/*
+	 * We can't send SENSE as a response. So we take ASC & ASCQ from our
+	 * sense buffer and queue it and hope the host sends a REQUEST_SENSE
+	 * command where it learns why we failed.
+	 */
+	sense = cmd->sense_iu.sense;
+
+	csw->Tag = cmd->bot_tag;
+	csw->Status = csw_stat;
+	fu->bot_status.req->context = cmd;
+	ret = usb_ep_queue(fu->ep_in, fu->bot_status.req, GFP_ATOMIC);
+	if (ret)
+		pr_err("%s(%d) ERR: %d\n", __func__, __LINE__, ret);
+}
+
+static void bot_err_compl(struct usb_ep *ep, struct usb_request *req)
+{
+	struct usbg_cmd *cmd = req->context;
+	struct f_uas *fu = cmd->fu;
+
+	if (req->status < 0)
+		pr_err("ERR %s(%d)\n", __func__, __LINE__);
+
+	if (cmd->data_len) {
+		if (cmd->data_len > ep->maxpacket) {
+			req->length = ep->maxpacket;
+			cmd->data_len -= ep->maxpacket;
+		} else {
+			req->length = cmd->data_len;
+			cmd->data_len = 0;
+		}
+
+		usb_ep_queue(ep, req, GFP_ATOMIC);
+		return ;
+	}
+	bot_enqueue_sense_code(fu, cmd);
+}
+
+static void bot_send_bad_status(struct usbg_cmd *cmd)
+{
+	struct f_uas *fu = cmd->fu;
+	struct bulk_cs_wrap *csw = &fu->bot_status.csw;
+	struct usb_request *req;
+	struct usb_ep *ep;
+
+	csw->Residue = cpu_to_le32(cmd->data_len);
+
+	if (cmd->data_len) {
+		if (cmd->is_read) {
+			ep = fu->ep_in;
+			req = fu->bot_req_in;
+		} else {
+			ep = fu->ep_out;
+			req = fu->bot_req_out;
+		}
+
+		if (cmd->data_len > fu->ep_in->maxpacket) {
+			req->length = ep->maxpacket;
+			cmd->data_len -= ep->maxpacket;
+		} else {
+			req->length = cmd->data_len;
+			cmd->data_len = 0;
+		}
+		req->complete = bot_err_compl;
+		req->context = cmd;
+		req->buf = fu->cmd.buf;
+		usb_ep_queue(ep, req, GFP_KERNEL);
+	} else {
+		bot_enqueue_sense_code(fu, cmd);
+	}
+}
+
+static int bot_send_status(struct usbg_cmd *cmd, bool moved_data)
+{
+	struct f_uas *fu = cmd->fu;
+	struct bulk_cs_wrap *csw = &fu->bot_status.csw;
+	int ret;
+
+	if (cmd->se_cmd.scsi_status == SAM_STAT_GOOD) {
+		if (!moved_data && cmd->data_len) {
+			/*
+			 * the host wants to move data, we don't. Fill / empty
+			 * the pipe and then send the csw with reside set.
+			 */
+			cmd->csw_code = US_BULK_STAT_OK;
+			bot_send_bad_status(cmd);
+			return 0;
+		}
+
+		csw->Tag = cmd->bot_tag;
+		csw->Residue = cpu_to_le32(0);
+		csw->Status = US_BULK_STAT_OK;
+		fu->bot_status.req->context = cmd;
+
+		ret = usb_ep_queue(fu->ep_in, fu->bot_status.req, GFP_KERNEL);
+		if (ret)
+			pr_err("%s(%d) ERR: %d\n", __func__, __LINE__, ret);
+	} else {
+		cmd->csw_code = US_BULK_STAT_FAIL;
+		bot_send_bad_status(cmd);
+	}
+	return 0;
+}
+
+/*
+ * Called after command (no data transfer) or after the write (to device)
+ * operation is completed
+ */
+static int bot_send_status_response(struct usbg_cmd *cmd)
+{
+	bool moved_data = false;
+
+	if (!cmd->is_read)
+		moved_data = true;
+	return bot_send_status(cmd, moved_data);
+}
+
+/* Read request completed, now we have to send the CSW */
+static void bot_read_compl(struct usb_ep *ep, struct usb_request *req)
+{
+	struct usbg_cmd *cmd = req->context;
+
+	if (req->status < 0)
+		pr_err("ERR %s(%d)\n", __func__, __LINE__);
+
+	bot_send_status(cmd, true);
+}
+
+static int bot_send_read_response(struct usbg_cmd *cmd)
+{
+	struct f_uas *fu = cmd->fu;
+	struct se_cmd *se_cmd = &cmd->se_cmd;
+	struct usb_gadget *gadget = fuas_to_gadget(fu);
+	int ret;
+
+	if (!cmd->data_len) {
+		cmd->csw_code = US_BULK_STAT_PHASE;
+		bot_send_bad_status(cmd);
+		return 0;
+	}
+
+	if (!gadget->sg_supported) {
+		cmd->data_buf = kmalloc(se_cmd->data_length, GFP_ATOMIC);
+		if (!cmd->data_buf)
+			return -ENOMEM;
+
+		sg_copy_to_buffer(se_cmd->t_data_sg,
+				se_cmd->t_data_nents,
+				cmd->data_buf,
+				se_cmd->data_length);
+
+		fu->bot_req_in->buf = cmd->data_buf;
+	} else {
+		fu->bot_req_in->buf = NULL;
+		fu->bot_req_in->num_sgs = se_cmd->t_data_nents;
+		fu->bot_req_in->sg = se_cmd->t_data_sg;
+	}
+
+	fu->bot_req_in->complete = bot_read_compl;
+	fu->bot_req_in->length = se_cmd->data_length;
+	fu->bot_req_in->context = cmd;
+	ret = usb_ep_queue(fu->ep_in, fu->bot_req_in, GFP_ATOMIC);
+	if (ret)
+		pr_err("%s(%d)\n", __func__, __LINE__);
+	return 0;
+}
+
+static void usbg_data_write_cmpl(struct usb_ep *, struct usb_request *);
+static int usbg_prepare_w_request(struct usbg_cmd *, struct usb_request *);
+
+static int bot_send_write_request(struct usbg_cmd *cmd)
+{
+	struct f_uas *fu = cmd->fu;
+	struct se_cmd *se_cmd = &cmd->se_cmd;
+	struct usb_gadget *gadget = fuas_to_gadget(fu);
+	int ret;
+
+	init_completion(&cmd->write_complete);
+	cmd->fu = fu;
+
+	if (!cmd->data_len) {
+		cmd->csw_code = US_BULK_STAT_PHASE;
+		return -EINVAL;
+	}
+
+	if (!gadget->sg_supported) {
+		cmd->data_buf = kmalloc(se_cmd->data_length, GFP_KERNEL);
+		if (!cmd->data_buf)
+			return -ENOMEM;
+
+		fu->bot_req_out->buf = cmd->data_buf;
+	} else {
+		fu->bot_req_out->buf = NULL;
+		fu->bot_req_out->num_sgs = se_cmd->t_data_nents;
+		fu->bot_req_out->sg = se_cmd->t_data_sg;
+	}
+
+	fu->bot_req_out->complete = usbg_data_write_cmpl;
+	fu->bot_req_out->length = se_cmd->data_length;
+	fu->bot_req_out->context = cmd;
+
+	ret = usbg_prepare_w_request(cmd, fu->bot_req_out);
+	if (ret)
+		goto cleanup;
+	ret = usb_ep_queue(fu->ep_out, fu->bot_req_out, GFP_KERNEL);
+	if (ret)
+		pr_err("%s(%d)\n", __func__, __LINE__);
+
+	wait_for_completion(&cmd->write_complete);
+	transport_generic_process_write(se_cmd);
+cleanup:
+	return ret;
+}
+
+static int bot_submit_command(struct f_uas *, void *, unsigned int);
+
+static void bot_cmd_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct f_uas *fu = req->context;
+	int ret;
+
+	fu->flags &= ~USBG_BOT_CMD_PEND;
+
+	if (req->status < 0)
+		return;
+
+	ret = bot_submit_command(fu, req->buf, req->actual);
+	if (ret)
+		pr_err("%s(%d): %d\n", __func__, __LINE__, ret);
+}
+
+static int bot_prepare_reqs(struct f_uas *fu)
+{
+	int ret;
+
+	fu->bot_req_in = usb_ep_alloc_request(fu->ep_in, GFP_KERNEL);
+	if (!fu->bot_req_in)
+		goto err;
+
+	fu->bot_req_out = usb_ep_alloc_request(fu->ep_out, GFP_KERNEL);
+	if (!fu->bot_req_out)
+		goto err_out;
+
+	fu->cmd.req = usb_ep_alloc_request(fu->ep_out, GFP_KERNEL);
+	if (!fu->cmd.req)
+		goto err_cmd;
+
+	fu->bot_status.req = usb_ep_alloc_request(fu->ep_in, GFP_KERNEL);
+	if (!fu->bot_status.req)
+		goto err_sts;
+
+	fu->bot_status.req->buf = &fu->bot_status.csw;
+	fu->bot_status.req->length = US_BULK_CS_WRAP_LEN;
+	fu->bot_status.req->complete = bot_status_complete;
+	fu->bot_status.csw.Signature = cpu_to_le32(US_BULK_CS_SIGN);
+
+	fu->cmd.buf = kmalloc(fu->ep_out->maxpacket, GFP_KERNEL);
+	if (!fu->cmd.buf)
+		goto err_buf;
+
+	fu->cmd.req->complete = bot_cmd_complete;
+	fu->cmd.req->buf = fu->cmd.buf;
+	fu->cmd.req->length = fu->ep_out->maxpacket;
+	fu->cmd.req->context = fu;
+
+	ret = bot_enqueue_cmd_cbw(fu);
+	if (ret)
+		goto err_queue;
+	return 0;
+err_queue:
+	kfree(fu->cmd.buf);
+	fu->cmd.buf = NULL;
+err_buf:
+	usb_ep_free_request(fu->ep_in, fu->bot_status.req);
+err_sts:
+	usb_ep_free_request(fu->ep_out, fu->cmd.req);
+	fu->cmd.req = NULL;
+err_cmd:
+	usb_ep_free_request(fu->ep_out, fu->bot_req_out);
+	fu->bot_req_out = NULL;
+err_out:
+	usb_ep_free_request(fu->ep_in, fu->bot_req_in);
+	fu->bot_req_in = NULL;
+err:
+	pr_err("BOT: endpoint setup failed\n");
+	return -ENOMEM;
+}
+
+void bot_cleanup_old_alt(struct f_uas *fu)
+{
+	if (!(fu->flags & USBG_ENABLED))
+		return;
+
+	usb_ep_disable(fu->ep_in);
+	usb_ep_disable(fu->ep_out);
+
+	if (!fu->bot_req_in)
+		return;
+
+	usb_ep_free_request(fu->ep_in, fu->bot_req_in);
+	usb_ep_free_request(fu->ep_out, fu->bot_req_out);
+	usb_ep_free_request(fu->ep_out, fu->cmd.req);
+	usb_ep_free_request(fu->ep_out, fu->bot_status.req);
+
+	kfree(fu->cmd.buf);
+
+	fu->bot_req_in = NULL;
+	fu->bot_req_out = NULL;
+	fu->cmd.req = NULL;
+	fu->bot_status.req = NULL;
+	fu->cmd.buf = NULL;
+}
+
+static void bot_set_alt(struct f_uas *fu)
+{
+	struct usb_function *f = &fu->function;
+	struct usb_gadget *gadget = f->config->cdev->gadget;
+	int ret;
+
+	fu->flags = USBG_IS_BOT;
+
+	config_ep_by_speed(gadget, f, fu->ep_in);
+	ret = usb_ep_enable(fu->ep_in);
+	if (ret)
+		goto err_b_in;
+
+	config_ep_by_speed(gadget, f, fu->ep_out);
+	ret = usb_ep_enable(fu->ep_out);
+	if (ret)
+		goto err_b_out;
+
+	ret = bot_prepare_reqs(fu);
+	if (ret)
+		goto err_wq;
+	fu->flags |= USBG_ENABLED;
+	pr_info("Using the BOT protocol\n");
+	return;
+err_wq:
+	usb_ep_disable(fu->ep_out);
+err_b_out:
+	usb_ep_disable(fu->ep_in);
+err_b_in:
+	fu->flags = USBG_IS_BOT;
+}
+
+static int usbg_bot_setup(struct usb_function *f,
+		const struct usb_ctrlrequest *ctrl)
+{
+	struct f_uas *fu = to_f_uas(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+	u16 w_value = le16_to_cpu(ctrl->wValue);
+	u16 w_length = le16_to_cpu(ctrl->wLength);
+	int luns;
+	u8 *ret_lun;
+
+	switch (ctrl->bRequest) {
+	case US_BULK_GET_MAX_LUN:
+		if (ctrl->bRequestType != (USB_DIR_IN | USB_TYPE_CLASS |
+					USB_RECIP_INTERFACE))
+			return -ENOTSUPP;
+
+		if (w_length < 1)
+			return -EINVAL;
+		if (w_value != 0)
+			return -EINVAL;
+		luns = atomic_read(&fu->tpg->tpg_port_count);
+		if (!luns) {
+			pr_err("No LUNs configured?\n");
+			return -EINVAL;
+		}
+		/*
+		 * If 4 LUNs are present we return 3 i.e. LUN 0..3 can be
+		 * accessed. The upper limit is 0xf
+		 */
+		luns--;
+		if (luns > 0xf) {
+			pr_info_once("Limiting the number of luns to 16\n");
+			luns = 0xf;
+		}
+		ret_lun = cdev->req->buf;
+		*ret_lun = luns;
+		cdev->req->length = 1;
+		return usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC);
+		break;
+
+	case US_BULK_RESET_REQUEST:
+		/* XXX maybe we should remove previous requests for IN + OUT */
+		bot_enqueue_cmd_cbw(fu);
+		return 0;
+		break;
+	};
+	return -ENOTSUPP;
+}
+
+/* Start uas.c code */
+
+static void uasp_cleanup_one_stream(struct f_uas *fu, struct uas_stream *stream)
+{
+	/* We have either all three allocated or none */
+	if (!stream->req_in)
+		return;
+
+	usb_ep_free_request(fu->ep_in, stream->req_in);
+	usb_ep_free_request(fu->ep_out, stream->req_out);
+	usb_ep_free_request(fu->ep_status, stream->req_status);
+
+	stream->req_in = NULL;
+	stream->req_out = NULL;
+	stream->req_status = NULL;
+}
+
+static void uasp_free_cmdreq(struct f_uas *fu)
+{
+	usb_ep_free_request(fu->ep_cmd, fu->cmd.req);
+	kfree(fu->cmd.buf);
+	fu->cmd.req = NULL;
+	fu->cmd.buf = NULL;
+}
+
+static void uasp_cleanup_old_alt(struct f_uas *fu)
+{
+	int i;
+
+	if (!(fu->flags & USBG_ENABLED))
+		return;
+
+	usb_ep_disable(fu->ep_in);
+	usb_ep_disable(fu->ep_out);
+	usb_ep_disable(fu->ep_status);
+	usb_ep_disable(fu->ep_cmd);
+
+	for (i = 0; i < UASP_SS_EP_COMP_NUM_STREAMS; i++)
+		uasp_cleanup_one_stream(fu, &fu->stream[i]);
+	uasp_free_cmdreq(fu);
+}
+
+static void uasp_status_data_cmpl(struct usb_ep *ep, struct usb_request *req);
+
+static int uasp_prepare_r_request(struct usbg_cmd *cmd)
+{
+	struct se_cmd *se_cmd = &cmd->se_cmd;
+	struct f_uas *fu = cmd->fu;
+	struct usb_gadget *gadget = fuas_to_gadget(fu);
+	struct uas_stream *stream = cmd->stream;
+
+	if (!gadget->sg_supported) {
+		cmd->data_buf = kmalloc(se_cmd->data_length, GFP_ATOMIC);
+		if (!cmd->data_buf)
+			return -ENOMEM;
+
+		sg_copy_to_buffer(se_cmd->t_data_sg,
+				se_cmd->t_data_nents,
+				cmd->data_buf,
+				se_cmd->data_length);
+
+		stream->req_in->buf = cmd->data_buf;
+	} else {
+		stream->req_in->buf = NULL;
+		stream->req_in->num_sgs = se_cmd->t_data_nents;
+		stream->req_in->sg = se_cmd->t_data_sg;
+	}
+
+	stream->req_in->complete = uasp_status_data_cmpl;
+	stream->req_in->length = se_cmd->data_length;
+	stream->req_in->context = cmd;
+
+	cmd->state = UASP_SEND_STATUS;
+	return 0;
+}
+
+static void uasp_prepare_status(struct usbg_cmd *cmd)
+{
+	struct se_cmd *se_cmd = &cmd->se_cmd;
+	struct sense_iu *iu = &cmd->sense_iu;
+	struct uas_stream *stream = cmd->stream;
+
+	cmd->state = UASP_QUEUE_COMMAND;
+	iu->iu_id = IU_ID_STATUS;
+	iu->tag = cpu_to_be16(cmd->tag);
+
+	/*
+	 * iu->status_qual = cpu_to_be16(STATUS QUALIFIER SAM-4. Where R U?);
+	 */
+	iu->len = cpu_to_be16(se_cmd->scsi_sense_length);
+	iu->status = se_cmd->scsi_status;
+	stream->req_status->context = cmd;
+	stream->req_status->length = se_cmd->scsi_sense_length + 16;
+	stream->req_status->buf = iu;
+	stream->req_status->complete = uasp_status_data_cmpl;
+}
+
+static void uasp_status_data_cmpl(struct usb_ep *ep, struct usb_request *req)
+{
+	struct usbg_cmd *cmd = req->context;
+	struct uas_stream *stream = cmd->stream;
+	struct f_uas *fu = cmd->fu;
+	int ret;
+
+	if (req->status < 0)
+		goto cleanup;
+
+	switch (cmd->state) {
+	case UASP_SEND_DATA:
+		ret = uasp_prepare_r_request(cmd);
+		if (ret)
+			goto cleanup;
+		ret = usb_ep_queue(fu->ep_in, stream->req_in, GFP_ATOMIC);
+		if (ret)
+			pr_err("%s(%d) => %d\n", __func__, __LINE__, ret);
+		break;
+
+	case UASP_RECEIVE_DATA:
+		ret = usbg_prepare_w_request(cmd, stream->req_out);
+		if (ret)
+			goto cleanup;
+		ret = usb_ep_queue(fu->ep_out, stream->req_out, GFP_ATOMIC);
+		if (ret)
+			pr_err("%s(%d) => %d\n", __func__, __LINE__, ret);
+		break;
+
+	case UASP_SEND_STATUS:
+		uasp_prepare_status(cmd);
+		ret = usb_ep_queue(fu->ep_status, stream->req_status,
+				GFP_ATOMIC);
+		if (ret)
+			pr_err("%s(%d) => %d\n", __func__, __LINE__, ret);
+		break;
+
+	case UASP_QUEUE_COMMAND:
+		usbg_cleanup_cmd(cmd);
+		usb_ep_queue(fu->ep_cmd, fu->cmd.req, GFP_ATOMIC);
+		break;
+
+	default:
+		BUG();
+	};
+	return;
+
+cleanup:
+	usbg_cleanup_cmd(cmd);
+}
+
+static int uasp_send_status_response(struct usbg_cmd *cmd)
+{
+	struct f_uas *fu = cmd->fu;
+	struct uas_stream *stream = cmd->stream;
+	struct sense_iu *iu = &cmd->sense_iu;
+
+	iu->tag = cpu_to_be16(cmd->tag);
+	stream->req_status->complete = uasp_status_data_cmpl;
+	stream->req_status->context = cmd;
+	cmd->fu = fu;
+	uasp_prepare_status(cmd);
+	return usb_ep_queue(fu->ep_status, stream->req_status, GFP_ATOMIC);
+}
+
+static int uasp_send_read_response(struct usbg_cmd *cmd)
+{
+	struct f_uas *fu = cmd->fu;
+	struct uas_stream *stream = cmd->stream;
+	struct sense_iu *iu = &cmd->sense_iu;
+	int ret;
+
+	cmd->fu = fu;
+
+	iu->tag = cpu_to_be16(cmd->tag);
+	if (fu->flags & USBG_USE_STREAMS) {
+
+		ret = uasp_prepare_r_request(cmd);
+		if (ret)
+			goto out;
+		ret = usb_ep_queue(fu->ep_in, stream->req_in, GFP_ATOMIC);
+		if (ret) {
+			pr_err("%s(%d) => %d\n", __func__, __LINE__, ret);
+			kfree(cmd->data_buf);
+			cmd->data_buf = NULL;
+		}
+
+	} else {
+
+		iu->iu_id = IU_ID_READ_READY;
+		iu->tag = cpu_to_be16(cmd->tag);
+
+		stream->req_status->complete = uasp_status_data_cmpl;
+		stream->req_status->context = cmd;
+
+		cmd->state = UASP_SEND_DATA;
+		stream->req_status->buf = iu;
+		stream->req_status->length = sizeof(struct iu);
+
+		ret = usb_ep_queue(fu->ep_status, stream->req_status,
+				GFP_ATOMIC);
+		if (ret)
+			pr_err("%s(%d) => %d\n", __func__, __LINE__, ret);
+	}
+out:
+	return ret;
+}
+
+static int uasp_send_write_request(struct usbg_cmd *cmd)
+{
+	struct f_uas *fu = cmd->fu;
+	struct se_cmd *se_cmd = &cmd->se_cmd;
+	struct uas_stream *stream = cmd->stream;
+	struct sense_iu *iu = &cmd->sense_iu;
+	int ret;
+
+	init_completion(&cmd->write_complete);
+	cmd->fu = fu;
+
+	iu->tag = cpu_to_be16(cmd->tag);
+
+	if (fu->flags & USBG_USE_STREAMS) {
+
+		ret = usbg_prepare_w_request(cmd, stream->req_out);
+		if (ret)
+			goto cleanup;
+		ret = usb_ep_queue(fu->ep_out, stream->req_out, GFP_ATOMIC);
+		if (ret)
+			pr_err("%s(%d)\n", __func__, __LINE__);
+
+	} else {
+
+		iu->iu_id = IU_ID_WRITE_READY;
+		iu->tag = cpu_to_be16(cmd->tag);
+
+		stream->req_status->complete = uasp_status_data_cmpl;
+		stream->req_status->context = cmd;
+
+		cmd->state = UASP_RECEIVE_DATA;
+		stream->req_status->buf = iu;
+		stream->req_status->length = sizeof(struct iu);
+
+		ret = usb_ep_queue(fu->ep_status, stream->req_status,
+				GFP_ATOMIC);
+		if (ret)
+			pr_err("%s(%d)\n", __func__, __LINE__);
+	}
+
+	wait_for_completion(&cmd->write_complete);
+	transport_generic_process_write(se_cmd);
+cleanup:
+	return ret;
+}
+
+static int usbg_submit_command(struct f_uas *, void *, unsigned int);
+
+static void uasp_cmd_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct f_uas *fu = req->context;
+	int ret;
+
+	if (req->status < 0)
+		return;
+
+	ret = usbg_submit_command(fu, req->buf, req->actual);
+	/*
+	 * Once we tune for performance enqueue the command req here again so
+	 * we can receive a second command while we processing this one. Pay
+	 * attention to properly sync STAUS endpoint with DATA IN + OUT so you
+	 * don't break HS.
+	 */
+	if (!ret)
+		return;
+	usb_ep_queue(fu->ep_cmd, fu->cmd.req, GFP_ATOMIC);
+}
+
+static int uasp_alloc_stream_res(struct f_uas *fu, struct uas_stream *stream)
+{
+	stream->req_in = usb_ep_alloc_request(fu->ep_in, GFP_KERNEL);
+	if (!stream->req_in)
+		goto out;
+
+	stream->req_out = usb_ep_alloc_request(fu->ep_out, GFP_KERNEL);
+	if (!stream->req_out)
+		goto err_out;
+
+	stream->req_status = usb_ep_alloc_request(fu->ep_status, GFP_KERNEL);
+	if (!stream->req_status)
+		goto err_sts;
+
+	return 0;
+err_sts:
+	usb_ep_free_request(fu->ep_status, stream->req_status);
+	stream->req_status = NULL;
+err_out:
+	usb_ep_free_request(fu->ep_out, stream->req_out);
+	stream->req_out = NULL;
+out:
+	return -ENOMEM;
+}
+
+static int uasp_alloc_cmd(struct f_uas *fu)
+{
+	fu->cmd.req = usb_ep_alloc_request(fu->ep_cmd, GFP_KERNEL);
+	if (!fu->cmd.req)
+		goto err;
+
+	fu->cmd.buf = kmalloc(fu->ep_cmd->maxpacket, GFP_KERNEL);
+	if (!fu->cmd.buf)
+		goto err_buf;
+
+	fu->cmd.req->complete = uasp_cmd_complete;
+	fu->cmd.req->buf = fu->cmd.buf;
+	fu->cmd.req->length = fu->ep_cmd->maxpacket;
+	fu->cmd.req->context = fu;
+	return 0;
+
+err_buf:
+	usb_ep_free_request(fu->ep_cmd, fu->cmd.req);
+err:
+	return -ENOMEM;
+}
+
+static void uasp_setup_stream_res(struct f_uas *fu, int max_streams)
+{
+	int i;
+
+	for (i = 0; i < max_streams; i++) {
+		struct uas_stream *s = &fu->stream[i];
+
+		s->req_in->stream_id = i + 1;
+		s->req_out->stream_id = i + 1;
+		s->req_status->stream_id = i + 1;
+	}
+}
+
+static int uasp_prepare_reqs(struct f_uas *fu)
+{
+	int ret;
+	int i;
+	int max_streams;
+
+	if (fu->flags & USBG_USE_STREAMS)
+		max_streams = UASP_SS_EP_COMP_NUM_STREAMS;
+	else
+		max_streams = 1;
+
+	for (i = 0; i < max_streams; i++) {
+		ret = uasp_alloc_stream_res(fu, &fu->stream[i]);
+		if (ret)
+			goto err_cleanup;
+	}
+
+	ret = uasp_alloc_cmd(fu);
+	if (ret)
+		goto err_free_stream;
+	uasp_setup_stream_res(fu, max_streams);
+
+	ret = usb_ep_queue(fu->ep_cmd, fu->cmd.req, GFP_ATOMIC);
+	if (ret)
+		goto err_free_stream;
+
+	return 0;
+
+err_free_stream:
+	uasp_free_cmdreq(fu);
+
+err_cleanup:
+	if (i) {
+		do {
+			uasp_cleanup_one_stream(fu, &fu->stream[i - 1]);
+			i--;
+		} while (i);
+	}
+	pr_err("UASP: endpoint setup failed\n");
+	return ret;
+}
+
+static void uasp_set_alt(struct f_uas *fu)
+{
+	struct usb_function *f = &fu->function;
+	struct usb_gadget *gadget = f->config->cdev->gadget;
+	int ret;
+
+	fu->flags = USBG_IS_UAS;
+
+	if (gadget->speed == USB_SPEED_SUPER)
+		fu->flags |= USBG_USE_STREAMS;
+
+	config_ep_by_speed(gadget, f, fu->ep_in);
+	ret = usb_ep_enable(fu->ep_in);
+	if (ret)
+		goto err_b_in;
+
+	config_ep_by_speed(gadget, f, fu->ep_out);
+	ret = usb_ep_enable(fu->ep_out);
+	if (ret)
+		goto err_b_out;
+
+	config_ep_by_speed(gadget, f, fu->ep_cmd);
+	ret = usb_ep_enable(fu->ep_cmd);
+	if (ret)
+		goto err_cmd;
+	config_ep_by_speed(gadget, f, fu->ep_status);
+	ret = usb_ep_enable(fu->ep_status);
+	if (ret)
+		goto err_status;
+
+	ret = uasp_prepare_reqs(fu);
+	if (ret)
+		goto err_wq;
+	fu->flags |= USBG_ENABLED;
+
+	pr_info("Using the UAS protocol\n");
+	return;
+err_wq:
+	usb_ep_disable(fu->ep_status);
+err_status:
+	usb_ep_disable(fu->ep_cmd);
+err_cmd:
+	usb_ep_disable(fu->ep_out);
+err_b_out:
+	usb_ep_disable(fu->ep_in);
+err_b_in:
+	fu->flags = 0;
+}
+
+static int get_cmd_dir(const unsigned char *cdb)
+{
+	int ret;
+
+	switch (cdb[0]) {
+	case READ_6:
+	case READ_10:
+	case READ_12:
+	case READ_16:
+	case INQUIRY:
+	case MODE_SENSE:
+	case MODE_SENSE_10:
+	case SERVICE_ACTION_IN:
+	case MAINTENANCE_IN:
+	case PERSISTENT_RESERVE_IN:
+	case SECURITY_PROTOCOL_IN:
+	case ACCESS_CONTROL_IN:
+	case REPORT_LUNS:
+	case READ_BLOCK_LIMITS:
+	case READ_POSITION:
+	case READ_CAPACITY:
+	case READ_TOC:
+	case READ_FORMAT_CAPACITIES:
+	case REQUEST_SENSE:
+		ret = DMA_FROM_DEVICE;
+		break;
+
+	case WRITE_6:
+	case WRITE_10:
+	case WRITE_12:
+	case WRITE_16:
+	case MODE_SELECT:
+	case MODE_SELECT_10:
+	case WRITE_VERIFY:
+	case WRITE_VERIFY_12:
+	case PERSISTENT_RESERVE_OUT:
+	case MAINTENANCE_OUT:
+	case SECURITY_PROTOCOL_OUT:
+	case ACCESS_CONTROL_OUT:
+		ret = DMA_TO_DEVICE;
+		break;
+	case ALLOW_MEDIUM_REMOVAL:
+	case TEST_UNIT_READY:
+	case SYNCHRONIZE_CACHE:
+	case START_STOP:
+	case ERASE:
+	case REZERO_UNIT:
+	case SEEK_10:
+	case SPACE:
+	case VERIFY:
+	case WRITE_FILEMARKS:
+		ret = DMA_NONE;
+		break;
+	default:
+		pr_warn("target: Unknown data direction for SCSI Opcode "
+				"0x%02x\n", cdb[0]);
+		ret = -EINVAL;
+	}
+	return ret;
+}
+
+static void usbg_data_write_cmpl(struct usb_ep *ep, struct usb_request *req)
+{
+	struct usbg_cmd *cmd = req->context;
+	struct se_cmd *se_cmd = &cmd->se_cmd;
+
+	if (req->status < 0) {
+		pr_err("%s() state %d transfer failed\n", __func__, cmd->state);
+		goto cleanup;
+	}
+
+	if (req->num_sgs == 0) {
+		sg_copy_from_buffer(se_cmd->t_data_sg,
+				se_cmd->t_data_nents,
+				cmd->data_buf,
+				se_cmd->data_length);
+	}
+
+	complete(&cmd->write_complete);
+	return;
+
+cleanup:
+	usbg_cleanup_cmd(cmd);
+}
+
+static int usbg_prepare_w_request(struct usbg_cmd *cmd, struct usb_request *req)
+{
+	struct se_cmd *se_cmd = &cmd->se_cmd;
+	struct f_uas *fu = cmd->fu;
+	struct usb_gadget *gadget = fuas_to_gadget(fu);
+
+	if (!gadget->sg_supported) {
+		cmd->data_buf = kmalloc(se_cmd->data_length, GFP_ATOMIC);
+		if (!cmd->data_buf)
+			return -ENOMEM;
+
+		req->buf = cmd->data_buf;
+	} else {
+		req->buf = NULL;
+		req->num_sgs = se_cmd->t_data_nents;
+		req->sg = se_cmd->t_data_sg;
+	}
+
+	req->complete = usbg_data_write_cmpl;
+	req->length = se_cmd->data_length;
+	req->context = cmd;
+	return 0;
+}
+
+static int usbg_send_status_response(struct se_cmd *se_cmd)
+{
+	struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd,
+			se_cmd);
+	struct f_uas *fu = cmd->fu;
+
+	if (fu->flags & USBG_IS_BOT)
+		return bot_send_status_response(cmd);
+	else
+		return uasp_send_status_response(cmd);
+}
+
+static int usbg_send_write_request(struct se_cmd *se_cmd)
+{
+	struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd,
+			se_cmd);
+	struct f_uas *fu = cmd->fu;
+
+	if (fu->flags & USBG_IS_BOT)
+		return bot_send_write_request(cmd);
+	else
+		return uasp_send_write_request(cmd);
+}
+
+static int usbg_send_read_response(struct se_cmd *se_cmd)
+{
+	struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd,
+			se_cmd);
+	struct f_uas *fu = cmd->fu;
+
+	if (fu->flags & USBG_IS_BOT)
+		return bot_send_read_response(cmd);
+	else
+		return uasp_send_read_response(cmd);
+}
+
+static void usbg_cmd_work(struct work_struct *work)
+{
+	struct usbg_cmd *cmd = container_of(work, struct usbg_cmd, work);
+	struct se_cmd *se_cmd;
+	struct tcm_usbg_nexus *tv_nexus;
+	struct usbg_tpg *tpg;
+	int dir;
+
+	se_cmd = &cmd->se_cmd;
+	tpg = cmd->fu->tpg;
+	tv_nexus = tpg->tpg_nexus;
+	dir = get_cmd_dir(cmd->cmd_buf);
+	if (dir < 0) {
+		transport_init_se_cmd(se_cmd,
+				tv_nexus->tvn_se_sess->se_tpg->se_tpg_tfo,
+				tv_nexus->tvn_se_sess, cmd->data_len, DMA_NONE,
+				cmd->prio_attr, cmd->sense_iu.sense);
+
+		transport_send_check_condition_and_sense(se_cmd,
+				TCM_UNSUPPORTED_SCSI_OPCODE, 1);
+		usbg_cleanup_cmd(cmd);
+		return;
+	}
+
+	target_submit_cmd(se_cmd, tv_nexus->tvn_se_sess,
+			cmd->cmd_buf, cmd->sense_iu.sense, cmd->unpacked_lun,
+			0, cmd->prio_attr, dir, TARGET_SCF_UNKNOWN_SIZE);
+}
+
+static int usbg_submit_command(struct f_uas *fu,
+		void *cmdbuf, unsigned int len)
+{
+	struct command_iu *cmd_iu = cmdbuf;
+	struct usbg_cmd *cmd;
+	struct usbg_tpg *tpg;
+	struct se_cmd *se_cmd;
+	struct tcm_usbg_nexus *tv_nexus;
+	u32 cmd_len;
+	int ret;
+
+	if (cmd_iu->iu_id != IU_ID_COMMAND) {
+		pr_err("Unsupported type %d\n", cmd_iu->iu_id);
+		return -EINVAL;
+	}
+
+	cmd = kzalloc(sizeof *cmd, GFP_ATOMIC);
+	if (!cmd)
+		return -ENOMEM;
+
+	cmd->fu = fu;
+
+	/* XXX until I figure out why I can't free in on complete */
+	kref_init(&cmd->ref);
+	kref_get(&cmd->ref);
+
+	tpg = fu->tpg;
+	cmd_len = (cmd_iu->len & ~0x3) + 16;
+	if (cmd_len > USBG_MAX_CMD)
+		goto err;
+
+	memcpy(cmd->cmd_buf, cmd_iu->cdb, cmd_len);
+
+	cmd->tag = be16_to_cpup(&cmd_iu->tag);
+	if (fu->flags & USBG_USE_STREAMS) {
+		if (cmd->tag > UASP_SS_EP_COMP_NUM_STREAMS)
+			goto err;
+		if (!cmd->tag)
+			cmd->stream = &fu->stream[0];
+		else
+			cmd->stream = &fu->stream[cmd->tag - 1];
+	} else {
+		cmd->stream = &fu->stream[0];
+	}
+
+	tv_nexus = tpg->tpg_nexus;
+	if (!tv_nexus) {
+		pr_err("Missing nexus, ignoring command\n");
+		goto err;
+	}
+
+	switch (cmd_iu->prio_attr & 0x7) {
+	case UAS_HEAD_TAG:
+		cmd->prio_attr = MSG_HEAD_TAG;
+		break;
+	case UAS_ORDERED_TAG:
+		cmd->prio_attr = MSG_ORDERED_TAG;
+		break;
+	case UAS_ACA:
+		cmd->prio_attr = MSG_ACA_TAG;
+		break;
+	default:
+		pr_debug_once("Unsupported prio_attr: %02x.\n",
+				cmd_iu->prio_attr);
+	case UAS_SIMPLE_TAG:
+		cmd->prio_attr = MSG_SIMPLE_TAG;
+		break;
+	}
+
+	se_cmd = &cmd->se_cmd;
+	cmd->unpacked_lun = scsilun_to_int(&cmd_iu->lun);
+
+	INIT_WORK(&cmd->work, usbg_cmd_work);
+	ret = queue_work(tpg->workqueue, &cmd->work);
+	if (ret < 0)
+		goto err;
+
+	return 0;
+err:
+	kfree(cmd);
+	return -EINVAL;
+}
+
+static void bot_cmd_work(struct work_struct *work)
+{
+	struct usbg_cmd *cmd = container_of(work, struct usbg_cmd, work);
+	struct se_cmd *se_cmd;
+	struct tcm_usbg_nexus *tv_nexus;
+	struct usbg_tpg *tpg;
+	int dir;
+
+	se_cmd = &cmd->se_cmd;
+	tpg = cmd->fu->tpg;
+	tv_nexus = tpg->tpg_nexus;
+	dir = get_cmd_dir(cmd->cmd_buf);
+	if (dir < 0) {
+		transport_init_se_cmd(se_cmd,
+				tv_nexus->tvn_se_sess->se_tpg->se_tpg_tfo,
+				tv_nexus->tvn_se_sess, cmd->data_len, DMA_NONE,
+				cmd->prio_attr, cmd->sense_iu.sense);
+
+		transport_send_check_condition_and_sense(se_cmd,
+				TCM_UNSUPPORTED_SCSI_OPCODE, 1);
+		usbg_cleanup_cmd(cmd);
+		return;
+	}
+
+	target_submit_cmd(se_cmd, tv_nexus->tvn_se_sess,
+			cmd->cmd_buf, cmd->sense_iu.sense, cmd->unpacked_lun,
+			cmd->data_len, cmd->prio_attr, dir, 0);
+}
+
+static int bot_submit_command(struct f_uas *fu,
+		void *cmdbuf, unsigned int len)
+{
+	struct bulk_cb_wrap *cbw = cmdbuf;
+	struct usbg_cmd *cmd;
+	struct usbg_tpg *tpg;
+	struct se_cmd *se_cmd;
+	struct tcm_usbg_nexus *tv_nexus;
+	u32 cmd_len;
+	int ret;
+
+	if (cbw->Signature != cpu_to_le32(US_BULK_CB_SIGN)) {
+		pr_err("Wrong signature on CBW\n");
+		return -EINVAL;
+	}
+	if (len != 31) {
+		pr_err("Wrong length for CBW\n");
+		return -EINVAL;
+	}
+
+	cmd_len = cbw->Length;
+	if (cmd_len < 1 || cmd_len > 16)
+		return -EINVAL;
+
+	cmd = kzalloc(sizeof *cmd, GFP_ATOMIC);
+	if (!cmd)
+		return -ENOMEM;
+
+	cmd->fu = fu;
+
+	/* XXX until I figure out why I can't free in on complete */
+	kref_init(&cmd->ref);
+	kref_get(&cmd->ref);
+
+	tpg = fu->tpg;
+
+	memcpy(cmd->cmd_buf, cbw->CDB, cmd_len);
+
+	cmd->bot_tag = cbw->Tag;
+
+	tv_nexus = tpg->tpg_nexus;
+	if (!tv_nexus) {
+		pr_err("Missing nexus, ignoring command\n");
+		goto err;
+	}
+
+	cmd->prio_attr = MSG_SIMPLE_TAG;
+	se_cmd = &cmd->se_cmd;
+	cmd->unpacked_lun = cbw->Lun;
+	cmd->is_read = cbw->Flags & US_BULK_FLAG_IN ? 1 : 0;
+	cmd->data_len = le32_to_cpu(cbw->DataTransferLength);
+
+	INIT_WORK(&cmd->work, bot_cmd_work);
+	ret = queue_work(tpg->workqueue, &cmd->work);
+	if (ret < 0)
+		goto err;
+
+	return 0;
+err:
+	kfree(cmd);
+	return -EINVAL;
+}
+
+/* Start fabric.c code */
+
+static int usbg_check_true(struct se_portal_group *se_tpg)
+{
+	return 1;
+}
+
+static int usbg_check_false(struct se_portal_group *se_tpg)
+{
+	return 0;
+}
+
+static char *usbg_get_fabric_name(void)
+{
+	return "usb_gadget";
+}
+
+static u8 usbg_get_fabric_proto_ident(struct se_portal_group *se_tpg)
+{
+	struct usbg_tpg *tpg = container_of(se_tpg,
+				struct usbg_tpg, se_tpg);
+	struct usbg_tport *tport = tpg->tport;
+	u8 proto_id;
+
+	switch (tport->tport_proto_id) {
+	case SCSI_PROTOCOL_SAS:
+	default:
+		proto_id = sas_get_fabric_proto_ident(se_tpg);
+		break;
+	}
+
+	return proto_id;
+}
+
+static char *usbg_get_fabric_wwn(struct se_portal_group *se_tpg)
+{
+	struct usbg_tpg *tpg = container_of(se_tpg,
+				struct usbg_tpg, se_tpg);
+	struct usbg_tport *tport = tpg->tport;
+
+	return &tport->tport_name[0];
+}
+
+static u16 usbg_get_tag(struct se_portal_group *se_tpg)
+{
+	struct usbg_tpg *tpg = container_of(se_tpg,
+				struct usbg_tpg, se_tpg);
+	return tpg->tport_tpgt;
+}
+
+static u32 usbg_get_default_depth(struct se_portal_group *se_tpg)
+{
+	return 1;
+}
+
+static u32 usbg_get_pr_transport_id(
+	struct se_portal_group *se_tpg,
+	struct se_node_acl *se_nacl,
+	struct t10_pr_registration *pr_reg,
+	int *format_code,
+	unsigned char *buf)
+{
+	struct usbg_tpg *tpg = container_of(se_tpg,
+				struct usbg_tpg, se_tpg);
+	struct usbg_tport *tport = tpg->tport;
+	int ret = 0;
+
+	switch (tport->tport_proto_id) {
+	case SCSI_PROTOCOL_SAS:
+	default:
+		ret = sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
+					format_code, buf);
+		break;
+	}
+
+	return ret;
+}
+
+static u32 usbg_get_pr_transport_id_len(
+	struct se_portal_group *se_tpg,
+	struct se_node_acl *se_nacl,
+	struct t10_pr_registration *pr_reg,
+	int *format_code)
+{
+	struct usbg_tpg *tpg = container_of(se_tpg,
+				struct usbg_tpg, se_tpg);
+	struct usbg_tport *tport = tpg->tport;
+	int ret = 0;
+
+	switch (tport->tport_proto_id) {
+	case SCSI_PROTOCOL_SAS:
+	default:
+		ret = sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
+					format_code);
+		break;
+	}
+
+	return ret;
+}
+
+static char *usbg_parse_pr_out_transport_id(
+	struct se_portal_group *se_tpg,
+	const char *buf,
+	u32 *out_tid_len,
+	char **port_nexus_ptr)
+{
+	struct usbg_tpg *tpg = container_of(se_tpg,
+				struct usbg_tpg, se_tpg);
+	struct usbg_tport *tport = tpg->tport;
+	char *tid = NULL;
+
+	switch (tport->tport_proto_id) {
+	case SCSI_PROTOCOL_SAS:
+	default:
+		tid = sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
+					port_nexus_ptr);
+	}
+
+	return tid;
+}
+
+static struct se_node_acl *usbg_alloc_fabric_acl(struct se_portal_group *se_tpg)
+{
+	struct usbg_nacl *nacl;
+
+	nacl = kzalloc(sizeof(struct usbg_nacl), GFP_KERNEL);
+	if (!nacl) {
+		printk(KERN_ERR "Unable to alocate struct usbg_nacl\n");
+		return NULL;
+	}
+
+	return &nacl->se_node_acl;
+}
+
+static void usbg_release_fabric_acl(
+	struct se_portal_group *se_tpg,
+	struct se_node_acl *se_nacl)
+{
+	struct usbg_nacl *nacl = container_of(se_nacl,
+			struct usbg_nacl, se_node_acl);
+	kfree(nacl);
+}
+
+static u32 usbg_tpg_get_inst_index(struct se_portal_group *se_tpg)
+{
+	return 1;
+}
+
+static int usbg_new_cmd(struct se_cmd *se_cmd)
+{
+	struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd,
+			se_cmd);
+	int ret;
+
+	ret = target_setup_cmd_from_cdb(se_cmd, cmd->cmd_buf);
+	if (ret)
+		return ret;
+
+	return transport_generic_map_mem_to_cmd(se_cmd, NULL, 0, NULL, 0);
+}
+
+static void usbg_cmd_release(struct kref *ref)
+{
+	struct usbg_cmd *cmd = container_of(ref, struct usbg_cmd,
+			ref);
+
+	transport_generic_free_cmd(&cmd->se_cmd, 0);
+}
+
+static void usbg_release_cmd(struct se_cmd *se_cmd)
+{
+	struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd,
+			se_cmd);
+	kfree(cmd->data_buf);
+	kfree(cmd);
+	return;
+}
+
+static int usbg_shutdown_session(struct se_session *se_sess)
+{
+	return 0;
+}
+
+static void usbg_close_session(struct se_session *se_sess)
+{
+	return;
+}
+
+static u32 usbg_sess_get_index(struct se_session *se_sess)
+{
+	return 0;
+}
+
+/*
+ * XXX Error recovery: return != 0 if we expect writes. Dunno when that could be
+ */
+static int usbg_write_pending_status(struct se_cmd *se_cmd)
+{
+	return 0;
+}
+
+static void usbg_set_default_node_attrs(struct se_node_acl *nacl)
+{
+	return;
+}
+
+static u32 usbg_get_task_tag(struct se_cmd *se_cmd)
+{
+	struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd,
+			se_cmd);
+	struct f_uas *fu = cmd->fu;
+
+	if (fu->flags & USBG_IS_BOT)
+		return le32_to_cpu(cmd->bot_tag);
+	else
+		return cmd->tag;
+}
+
+static int usbg_get_cmd_state(struct se_cmd *se_cmd)
+{
+	return 0;
+}
+
+static int usbg_queue_tm_rsp(struct se_cmd *se_cmd)
+{
+	return 0;
+}
+
+static u16 usbg_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length)
+{
+	return 0;
+}
+
+static u16 usbg_get_fabric_sense_len(void)
+{
+	return 0;
+}
+
+static const char *usbg_check_wwn(const char *name)
+{
+	const char *n;
+	unsigned int len;
+
+	n = strstr(name, "naa.");
+	if (!n)
+		return NULL;
+	n += 4;
+	len = strlen(n);
+	if (len == 0 || len > USBG_NAMELEN - 1)
+		return NULL;
+	return n;
+}
+
+static struct se_node_acl *usbg_make_nodeacl(
+	struct se_portal_group *se_tpg,
+	struct config_group *group,
+	const char *name)
+{
+	struct se_node_acl *se_nacl, *se_nacl_new;
+	struct usbg_nacl *nacl;
+	u64 wwpn = 0;
+	u32 nexus_depth;
+	const char *wnn_name;
+
+	wnn_name = usbg_check_wwn(name);
+	if (!wnn_name)
+		return ERR_PTR(-EINVAL);
+	se_nacl_new = usbg_alloc_fabric_acl(se_tpg);
+	if (!(se_nacl_new))
+		return ERR_PTR(-ENOMEM);
+
+	nexus_depth = 1;
+	/*
+	 * se_nacl_new may be released by core_tpg_add_initiator_node_acl()
+	 * when converting a NodeACL from demo mode -> explict
+	 */
+	se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
+				name, nexus_depth);
+	if (IS_ERR(se_nacl)) {
+		usbg_release_fabric_acl(se_tpg, se_nacl_new);
+		return se_nacl;
+	}
+	/*
+	 * Locate our struct usbg_nacl and set the FC Nport WWPN
+	 */
+	nacl = container_of(se_nacl, struct usbg_nacl, se_node_acl);
+	nacl->iport_wwpn = wwpn;
+	snprintf(nacl->iport_name, sizeof(nacl->iport_name), "%s", name);
+	return se_nacl;
+}
+
+static void usbg_drop_nodeacl(struct se_node_acl *se_acl)
+{
+	struct usbg_nacl *nacl = container_of(se_acl,
+				struct usbg_nacl, se_node_acl);
+	core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);
+	kfree(nacl);
+}
+
+struct usbg_tpg *the_only_tpg_I_currently_have;
+
+static struct se_portal_group *usbg_make_tpg(
+	struct se_wwn *wwn,
+	struct config_group *group,
+	const char *name)
+{
+	struct usbg_tport *tport = container_of(wwn, struct usbg_tport,
+			tport_wwn);
+	struct usbg_tpg *tpg;
+	unsigned long tpgt;
+	int ret;
+
+	if (strstr(name, "tpgt_") != name)
+		return ERR_PTR(-EINVAL);
+	if (kstrtoul(name + 5, 0, &tpgt) || tpgt > UINT_MAX)
+		return ERR_PTR(-EINVAL);
+	if (the_only_tpg_I_currently_have) {
+		pr_err("Until the gadget framework can't handle multiple\n");
+		pr_err("gadgets, you can't do this here.\n");
+		return ERR_PTR(-EBUSY);
+	}
+
+	tpg = kzalloc(sizeof(struct usbg_tpg), GFP_KERNEL);
+	if (!tpg) {
+		printk(KERN_ERR "Unable to allocate struct usbg_tpg");
+		return ERR_PTR(-ENOMEM);
+	}
+	mutex_init(&tpg->tpg_mutex);
+	atomic_set(&tpg->tpg_port_count, 0);
+	tpg->workqueue = alloc_workqueue("tcm_usb_gadget", 0, 1);
+	if (!tpg->workqueue) {
+		kfree(tpg);
+		return NULL;
+	}
+
+	tpg->tport = tport;
+	tpg->tport_tpgt = tpgt;
+
+	ret = core_tpg_register(&usbg_fabric_configfs->tf_ops, wwn,
+				&tpg->se_tpg, tpg,
+				TRANSPORT_TPG_TYPE_NORMAL);
+	if (ret < 0) {
+		destroy_workqueue(tpg->workqueue);
+		kfree(tpg);
+		return NULL;
+	}
+	the_only_tpg_I_currently_have = tpg;
+	return &tpg->se_tpg;
+}
+
+static void usbg_drop_tpg(struct se_portal_group *se_tpg)
+{
+	struct usbg_tpg *tpg = container_of(se_tpg,
+				struct usbg_tpg, se_tpg);
+
+	core_tpg_deregister(se_tpg);
+	destroy_workqueue(tpg->workqueue);
+	kfree(tpg);
+	the_only_tpg_I_currently_have = NULL;
+}
+
+static struct se_wwn *usbg_make_tport(
+	struct target_fabric_configfs *tf,
+	struct config_group *group,
+	const char *name)
+{
+	struct usbg_tport *tport;
+	const char *wnn_name;
+	u64 wwpn = 0;
+
+	wnn_name = usbg_check_wwn(name);
+	if (!wnn_name)
+		return ERR_PTR(-EINVAL);
+
+	tport = kzalloc(sizeof(struct usbg_tport), GFP_KERNEL);
+	if (!(tport)) {
+		printk(KERN_ERR "Unable to allocate struct usbg_tport");
+		return ERR_PTR(-ENOMEM);
+	}
+	tport->tport_wwpn = wwpn;
+	snprintf(tport->tport_name, sizeof(tport->tport_name), wnn_name);
+	return &tport->tport_wwn;
+}
+
+static void usbg_drop_tport(struct se_wwn *wwn)
+{
+	struct usbg_tport *tport = container_of(wwn,
+				struct usbg_tport, tport_wwn);
+	kfree(tport);
+}
+
+/*
+ * If somebody feels like dropping the version property, go ahead.
+ */
+static ssize_t usbg_wwn_show_attr_version(
+	struct target_fabric_configfs *tf,
+	char *page)
+{
+	return sprintf(page, "usb-gadget fabric module\n");
+}
+TF_WWN_ATTR_RO(usbg, version);
+
+static struct configfs_attribute *usbg_wwn_attrs[] = {
+	&usbg_wwn_version.attr,
+	NULL,
+};
+
+static ssize_t tcm_usbg_tpg_show_enable(
+		struct se_portal_group *se_tpg,
+		char *page)
+{
+	struct usbg_tpg  *tpg = container_of(se_tpg, struct usbg_tpg, se_tpg);
+
+	return snprintf(page, PAGE_SIZE, "%u\n", tpg->gadget_connect);
+}
+
+static int usbg_attach(struct usbg_tpg *);
+static void usbg_detach(struct usbg_tpg *);
+
+static ssize_t tcm_usbg_tpg_store_enable(
+		struct se_portal_group *se_tpg,
+		const char *page,
+		size_t count)
+{
+	struct usbg_tpg  *tpg = container_of(se_tpg, struct usbg_tpg, se_tpg);
+	unsigned long op;
+	ssize_t ret;
+
+	ret = kstrtoul(page, 0, &op);
+	if (ret < 0)
+		return -EINVAL;
+	if (op > 1)
+		return -EINVAL;
+
+	if (op && tpg->gadget_connect)
+		goto out;
+	if (!op && !tpg->gadget_connect)
+		goto out;
+
+	if (op) {
+		ret = usbg_attach(tpg);
+		if (ret)
+			goto out;
+	} else {
+		usbg_detach(tpg);
+	}
+	tpg->gadget_connect = op;
+out:
+	return count;
+}
+TF_TPG_BASE_ATTR(tcm_usbg, enable, S_IRUGO | S_IWUSR);
+
+static ssize_t tcm_usbg_tpg_show_nexus(
+		struct se_portal_group *se_tpg,
+		char *page)
+{
+	struct usbg_tpg *tpg = container_of(se_tpg, struct usbg_tpg, se_tpg);
+	struct tcm_usbg_nexus *tv_nexus;
+	ssize_t ret;
+
+	mutex_lock(&tpg->tpg_mutex);
+	tv_nexus = tpg->tpg_nexus;
+	if (!tv_nexus) {
+		ret = -ENODEV;
+		goto out;
+	}
+	ret = snprintf(page, PAGE_SIZE, "%s\n",
+			tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
+out:
+	mutex_unlock(&tpg->tpg_mutex);
+	return ret;
+}
+
+static int tcm_usbg_make_nexus(struct usbg_tpg *tpg, char *name)
+{
+	struct se_portal_group *se_tpg;
+	struct tcm_usbg_nexus *tv_nexus;
+	int ret;
+
+	mutex_lock(&tpg->tpg_mutex);
+	if (tpg->tpg_nexus) {
+		ret = -EEXIST;
+		pr_debug("tpg->tpg_nexus already exists\n");
+		goto err_unlock;
+	}
+	se_tpg = &tpg->se_tpg;
+
+	ret = -ENOMEM;
+	tv_nexus = kzalloc(sizeof(*tv_nexus), GFP_KERNEL);
+	if (!tv_nexus) {
+		pr_err("Unable to allocate struct tcm_vhost_nexus\n");
+		goto err_unlock;
+	}
+	tv_nexus->tvn_se_sess = transport_init_session();
+	if (IS_ERR(tv_nexus->tvn_se_sess))
+		goto err_free;
+
+	/*
+	 * Since we are running in 'demo mode' this call with generate a
+	 * struct se_node_acl for the tcm_vhost struct se_portal_group with
+	 * the SCSI Initiator port name of the passed configfs group 'name'.
+	 */
+	tv_nexus->tvn_se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
+			se_tpg, name);
+	if (!tv_nexus->tvn_se_sess->se_node_acl) {
+		pr_debug("core_tpg_check_initiator_node_acl() failed"
+				" for %s\n", name);
+		goto err_session;
+	}
+	/*
+	 * Now register the TCM vHost virtual I_T Nexus as active with the
+	 * call to __transport_register_session()
+	 */
+	__transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl,
+			tv_nexus->tvn_se_sess, tv_nexus);
+	tpg->tpg_nexus = tv_nexus;
+	mutex_unlock(&tpg->tpg_mutex);
+	return 0;
+
+err_session:
+	transport_free_session(tv_nexus->tvn_se_sess);
+err_free:
+	kfree(tv_nexus);
+err_unlock:
+	mutex_unlock(&tpg->tpg_mutex);
+	return ret;
+}
+
+static int tcm_usbg_drop_nexus(struct usbg_tpg *tpg)
+{
+	struct se_session *se_sess;
+	struct tcm_usbg_nexus *tv_nexus;
+	int ret = -ENODEV;
+
+	mutex_lock(&tpg->tpg_mutex);
+	tv_nexus = tpg->tpg_nexus;
+	if (!tv_nexus)
+		goto out;
+
+	se_sess = tv_nexus->tvn_se_sess;
+	if (!se_sess)
+		goto out;
+
+	if (atomic_read(&tpg->tpg_port_count)) {
+		ret = -EPERM;
+		pr_err("Unable to remove Host I_T Nexus with"
+				" active TPG port count: %d\n",
+				atomic_read(&tpg->tpg_port_count));
+		goto out;
+	}
+
+	pr_debug("Removing I_T Nexus to Initiator Port: %s\n",
+			tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
+	/*
+	 * Release the SCSI I_T Nexus to the emulated vHost Target Port
+	 */
+	transport_deregister_session(tv_nexus->tvn_se_sess);
+	tpg->tpg_nexus = NULL;
+
+	kfree(tv_nexus);
+out:
+	mutex_unlock(&tpg->tpg_mutex);
+	return 0;
+}
+
+static ssize_t tcm_usbg_tpg_store_nexus(
+		struct se_portal_group *se_tpg,
+		const char *page,
+		size_t count)
+{
+	struct usbg_tpg *tpg = container_of(se_tpg, struct usbg_tpg, se_tpg);
+	unsigned char i_port[USBG_NAMELEN], *ptr;
+	int ret;
+
+	if (!strncmp(page, "NULL", 4)) {
+		ret = tcm_usbg_drop_nexus(tpg);
+		return (!ret) ? count : ret;
+	}
+	if (strlen(page) > USBG_NAMELEN) {
+		pr_err("Emulated NAA Sas Address: %s, exceeds"
+				" max: %d\n", page, USBG_NAMELEN);
+		return -EINVAL;
+	}
+	snprintf(i_port, USBG_NAMELEN, "%s", page);
+
+	ptr = strstr(i_port, "naa.");
+	if (!ptr) {
+		pr_err("Missing 'naa.' prefix\n");
+		return -EINVAL;
+	}
+
+	if (i_port[strlen(i_port) - 1] == '\n')
+		i_port[strlen(i_port) - 1] = '\0';
+
+	ret = tcm_usbg_make_nexus(tpg, &i_port[4]);
+	if (ret < 0)
+		return ret;
+	return count;
+}
+TF_TPG_BASE_ATTR(tcm_usbg, nexus, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *usbg_base_attrs[] = {
+	&tcm_usbg_tpg_enable.attr,
+	&tcm_usbg_tpg_nexus.attr,
+	NULL,
+};
+
+static int usbg_port_link(struct se_portal_group *se_tpg, struct se_lun *lun)
+{
+	struct usbg_tpg *tpg = container_of(se_tpg, struct usbg_tpg, se_tpg);
+
+	atomic_inc(&tpg->tpg_port_count);
+	smp_mb__after_atomic_inc();
+	return 0;
+}
+
+static void usbg_port_unlink(struct se_portal_group *se_tpg,
+		struct se_lun *se_lun)
+{
+	struct usbg_tpg *tpg = container_of(se_tpg, struct usbg_tpg, se_tpg);
+
+	atomic_dec(&tpg->tpg_port_count);
+	smp_mb__after_atomic_dec();
+}
+
+static int usbg_check_stop_free(struct se_cmd *se_cmd)
+{
+	struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd,
+			se_cmd);
+
+	kref_put(&cmd->ref, usbg_cmd_release);
+	return 1;
+}
+
+static struct target_core_fabric_ops usbg_ops = {
+	.get_fabric_name		= usbg_get_fabric_name,
+	.get_fabric_proto_ident		= usbg_get_fabric_proto_ident,
+	.tpg_get_wwn			= usbg_get_fabric_wwn,
+	.tpg_get_tag			= usbg_get_tag,
+	.tpg_get_default_depth		= usbg_get_default_depth,
+	.tpg_get_pr_transport_id	= usbg_get_pr_transport_id,
+	.tpg_get_pr_transport_id_len	= usbg_get_pr_transport_id_len,
+	.tpg_parse_pr_out_transport_id	= usbg_parse_pr_out_transport_id,
+	.tpg_check_demo_mode		= usbg_check_true,
+	.tpg_check_demo_mode_cache	= usbg_check_false,
+	.tpg_check_demo_mode_write_protect = usbg_check_false,
+	.tpg_check_prod_mode_write_protect = usbg_check_false,
+	.tpg_alloc_fabric_acl		= usbg_alloc_fabric_acl,
+	.tpg_release_fabric_acl		= usbg_release_fabric_acl,
+	.tpg_get_inst_index		= usbg_tpg_get_inst_index,
+	.new_cmd_map			= usbg_new_cmd,
+	.release_cmd			= usbg_release_cmd,
+	.shutdown_session		= usbg_shutdown_session,
+	.close_session			= usbg_close_session,
+	.sess_get_index			= usbg_sess_get_index,
+	.sess_get_initiator_sid		= NULL,
+	.write_pending			= usbg_send_write_request,
+	.write_pending_status		= usbg_write_pending_status,
+	.set_default_node_attributes	= usbg_set_default_node_attrs,
+	.get_task_tag			= usbg_get_task_tag,
+	.get_cmd_state			= usbg_get_cmd_state,
+	.queue_data_in			= usbg_send_read_response,
+	.queue_status			= usbg_send_status_response,
+	.queue_tm_rsp			= usbg_queue_tm_rsp,
+	.get_fabric_sense_len		= usbg_get_fabric_sense_len,
+	.set_fabric_sense_len		= usbg_set_fabric_sense_len,
+	.check_stop_free		= usbg_check_stop_free,
+
+	.fabric_make_wwn		= usbg_make_tport,
+	.fabric_drop_wwn		= usbg_drop_tport,
+	.fabric_make_tpg		= usbg_make_tpg,
+	.fabric_drop_tpg		= usbg_drop_tpg,
+	.fabric_post_link		= usbg_port_link,
+	.fabric_pre_unlink		= usbg_port_unlink,
+	.fabric_make_np			= NULL,
+	.fabric_drop_np			= NULL,
+	.fabric_make_nodeacl		= usbg_make_nodeacl,
+	.fabric_drop_nodeacl		= usbg_drop_nodeacl,
+};
+
+static int usbg_register_configfs(void)
+{
+	struct target_fabric_configfs *fabric;
+	int ret;
+
+	fabric = target_fabric_configfs_init(THIS_MODULE, "usb_gadget");
+	if (IS_ERR(fabric)) {
+		printk(KERN_ERR "target_fabric_configfs_init() failed\n");
+		return PTR_ERR(fabric);
+	}
+
+	fabric->tf_ops = usbg_ops;
+	TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = usbg_wwn_attrs;
+	TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = usbg_base_attrs;
+	TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;
+	TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
+	TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
+	TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;
+	TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
+	TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
+	TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
+	ret = target_fabric_configfs_register(fabric);
+	if (ret < 0) {
+		printk(KERN_ERR "target_fabric_configfs_register() failed"
+				" for usb-gadget\n");
+		return ret;
+	}
+	usbg_fabric_configfs = fabric;
+	return 0;
+};
+
+static void usbg_deregister_configfs(void)
+{
+	if (!(usbg_fabric_configfs))
+		return;
+
+	target_fabric_configfs_deregister(usbg_fabric_configfs);
+	usbg_fabric_configfs = NULL;
+};
+
+/* Start gadget.c code */
+
+static struct usb_interface_descriptor bot_intf_desc = {
+	.bLength =              sizeof(bot_intf_desc),
+	.bDescriptorType =      USB_DT_INTERFACE,
+	.bAlternateSetting =	0,
+	.bNumEndpoints =        2,
+	.bAlternateSetting =	USB_G_ALT_INT_BBB,
+	.bInterfaceClass =      USB_CLASS_MASS_STORAGE,
+	.bInterfaceSubClass =   USB_SC_SCSI,
+	.bInterfaceProtocol =   USB_PR_BULK,
+	.iInterface =           USB_G_STR_INT_UAS,
+};
+
+static struct usb_interface_descriptor uasp_intf_desc = {
+	.bLength =		sizeof(uasp_intf_desc),
+	.bDescriptorType =	USB_DT_INTERFACE,
+	.bNumEndpoints =	4,
+	.bAlternateSetting =	USB_G_ALT_INT_UAS,
+	.bInterfaceClass =	USB_CLASS_MASS_STORAGE,
+	.bInterfaceSubClass =	USB_SC_SCSI,
+	.bInterfaceProtocol =	USB_PR_UAS,
+	.iInterface =		USB_G_STR_INT_BBB,
+};
+
+static struct usb_endpoint_descriptor uasp_bi_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor uasp_fs_bi_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_pipe_usage_descriptor uasp_bi_pipe_desc = {
+	.bLength =		sizeof(uasp_bi_pipe_desc),
+	.bDescriptorType =	USB_DT_PIPE_USAGE,
+	.bPipeID =		DATA_IN_PIPE_ID,
+};
+
+static struct usb_endpoint_descriptor uasp_ss_bi_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor uasp_bi_ep_comp_desc = {
+	.bLength =		sizeof(uasp_bi_ep_comp_desc),
+	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
+	.bMaxBurst =		0,
+	.bmAttributes =		UASP_SS_EP_COMP_LOG_STREAMS,
+	.wBytesPerInterval =	0,
+};
+
+static struct usb_ss_ep_comp_descriptor bot_bi_ep_comp_desc = {
+	.bLength =		sizeof(bot_bi_ep_comp_desc),
+	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
+	.bMaxBurst =		0,
+};
+
+static struct usb_endpoint_descriptor uasp_bo_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor uasp_fs_bo_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_pipe_usage_descriptor uasp_bo_pipe_desc = {
+	.bLength =		sizeof(uasp_bo_pipe_desc),
+	.bDescriptorType =	USB_DT_PIPE_USAGE,
+	.bPipeID =		DATA_OUT_PIPE_ID,
+};
+
+static struct usb_endpoint_descriptor uasp_ss_bo_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(0x400),
+};
+
+static struct usb_ss_ep_comp_descriptor uasp_bo_ep_comp_desc = {
+	.bLength =		sizeof(uasp_bo_ep_comp_desc),
+	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
+	.bmAttributes =		UASP_SS_EP_COMP_LOG_STREAMS,
+};
+
+static struct usb_ss_ep_comp_descriptor bot_bo_ep_comp_desc = {
+	.bLength =		sizeof(bot_bo_ep_comp_desc),
+	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
+};
+
+static struct usb_endpoint_descriptor uasp_status_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor uasp_fs_status_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_pipe_usage_descriptor uasp_status_pipe_desc = {
+	.bLength =		sizeof(uasp_status_pipe_desc),
+	.bDescriptorType =	USB_DT_PIPE_USAGE,
+	.bPipeID =		STATUS_PIPE_ID,
+};
+
+static struct usb_endpoint_descriptor uasp_ss_status_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor uasp_status_in_ep_comp_desc = {
+	.bLength =		sizeof(uasp_status_in_ep_comp_desc),
+	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
+	.bmAttributes =		UASP_SS_EP_COMP_LOG_STREAMS,
+};
+
+static struct usb_endpoint_descriptor uasp_cmd_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor uasp_fs_cmd_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_pipe_usage_descriptor uasp_cmd_pipe_desc = {
+	.bLength =		sizeof(uasp_cmd_pipe_desc),
+	.bDescriptorType =	USB_DT_PIPE_USAGE,
+	.bPipeID =		CMD_PIPE_ID,
+};
+
+static struct usb_endpoint_descriptor uasp_ss_cmd_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor uasp_cmd_comp_desc = {
+	.bLength =		sizeof(uasp_cmd_comp_desc),
+	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
+};
+
+static struct usb_descriptor_header *uasp_fs_function_desc[] = {
+	(struct usb_descriptor_header *) &bot_intf_desc,
+	(struct usb_descriptor_header *) &uasp_fs_bi_desc,
+	(struct usb_descriptor_header *) &uasp_fs_bo_desc,
+
+	(struct usb_descriptor_header *) &uasp_intf_desc,
+	(struct usb_descriptor_header *) &uasp_fs_bi_desc,
+	(struct usb_descriptor_header *) &uasp_bi_pipe_desc,
+	(struct usb_descriptor_header *) &uasp_fs_bo_desc,
+	(struct usb_descriptor_header *) &uasp_bo_pipe_desc,
+	(struct usb_descriptor_header *) &uasp_fs_status_desc,
+	(struct usb_descriptor_header *) &uasp_status_pipe_desc,
+	(struct usb_descriptor_header *) &uasp_fs_cmd_desc,
+	(struct usb_descriptor_header *) &uasp_cmd_pipe_desc,
+};
+
+static struct usb_descriptor_header *uasp_hs_function_desc[] = {
+	(struct usb_descriptor_header *) &bot_intf_desc,
+	(struct usb_descriptor_header *) &uasp_bi_desc,
+	(struct usb_descriptor_header *) &uasp_bo_desc,
+
+	(struct usb_descriptor_header *) &uasp_intf_desc,
+	(struct usb_descriptor_header *) &uasp_bi_desc,
+	(struct usb_descriptor_header *) &uasp_bi_pipe_desc,
+	(struct usb_descriptor_header *) &uasp_bo_desc,
+	(struct usb_descriptor_header *) &uasp_bo_pipe_desc,
+	(struct usb_descriptor_header *) &uasp_status_desc,
+	(struct usb_descriptor_header *) &uasp_status_pipe_desc,
+	(struct usb_descriptor_header *) &uasp_cmd_desc,
+	(struct usb_descriptor_header *) &uasp_cmd_pipe_desc,
+	NULL,
+};
+
+static struct usb_descriptor_header *uasp_ss_function_desc[] = {
+	(struct usb_descriptor_header *) &bot_intf_desc,
+	(struct usb_descriptor_header *) &uasp_ss_bi_desc,
+	(struct usb_descriptor_header *) &bot_bi_ep_comp_desc,
+	(struct usb_descriptor_header *) &uasp_ss_bo_desc,
+	(struct usb_descriptor_header *) &bot_bo_ep_comp_desc,
+
+	(struct usb_descriptor_header *) &uasp_intf_desc,
+	(struct usb_descriptor_header *) &uasp_ss_bi_desc,
+	(struct usb_descriptor_header *) &uasp_bi_ep_comp_desc,
+	(struct usb_descriptor_header *) &uasp_bi_pipe_desc,
+	(struct usb_descriptor_header *) &uasp_ss_bo_desc,
+	(struct usb_descriptor_header *) &uasp_bo_ep_comp_desc,
+	(struct usb_descriptor_header *) &uasp_bo_pipe_desc,
+	(struct usb_descriptor_header *) &uasp_ss_status_desc,
+	(struct usb_descriptor_header *) &uasp_status_in_ep_comp_desc,
+	(struct usb_descriptor_header *) &uasp_status_pipe_desc,
+	(struct usb_descriptor_header *) &uasp_ss_cmd_desc,
+	(struct usb_descriptor_header *) &uasp_cmd_comp_desc,
+	(struct usb_descriptor_header *) &uasp_cmd_pipe_desc,
+	NULL,
+};
+
+#define UAS_VENDOR_ID	0x0525	/* NetChip */
+#define UAS_PRODUCT_ID	0xa4a5	/* Linux-USB File-backed Storage Gadget */
+
+static struct usb_device_descriptor usbg_device_desc = {
+	.bLength =		sizeof(usbg_device_desc),
+	.bDescriptorType =	USB_DT_DEVICE,
+	.bcdUSB =		cpu_to_le16(0x0200),
+	.bDeviceClass =		USB_CLASS_PER_INTERFACE,
+	.idVendor =		cpu_to_le16(UAS_VENDOR_ID),
+	.idProduct =		cpu_to_le16(UAS_PRODUCT_ID),
+	.iManufacturer =	USB_G_STR_MANUFACTOR,
+	.iProduct =		USB_G_STR_PRODUCT,
+	.iSerialNumber =	USB_G_STR_SERIAL,
+
+	.bNumConfigurations =   1,
+};
+
+static struct usb_string	usbg_us_strings[] = {
+	{ USB_G_STR_MANUFACTOR,	"Target Manufactor"},
+	{ USB_G_STR_PRODUCT,	"Target Product"},
+	{ USB_G_STR_SERIAL,	"000000000001"},
+	{ USB_G_STR_CONFIG,	"default config"},
+	{ USB_G_STR_INT_UAS,	"USB Attached SCSI"},
+	{ USB_G_STR_INT_BBB,	"Bulk Only Transport"},
+	{ },
+};
+
+static struct usb_gadget_strings usbg_stringtab = {
+	.language = 0x0409,
+	.strings = usbg_us_strings,
+};
+
+static struct usb_gadget_strings *usbg_strings[] = {
+	&usbg_stringtab,
+	NULL,
+};
+
+static int guas_unbind(struct usb_composite_dev *cdev)
+{
+	return 0;
+}
+
+static struct usb_configuration usbg_config_driver = {
+	.label                  = "Linux Target",
+	.bConfigurationValue    = 1,
+	.iConfiguration		= USB_G_STR_CONFIG,
+	.bmAttributes           = USB_CONFIG_ATT_SELFPOWER,
+};
+
+static void give_back_ep(struct usb_ep **pep)
+{
+	struct usb_ep *ep = *pep;
+	if (!ep)
+		return;
+	ep->driver_data = NULL;
+}
+
+static int usbg_bind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct f_uas		*fu = to_f_uas(f);
+	struct usb_gadget	*gadget = c->cdev->gadget;
+	struct usb_ep		*ep;
+	int			iface;
+
+	iface = usb_interface_id(c, f);
+	if (iface < 0)
+		return iface;
+
+	bot_intf_desc.bInterfaceNumber = iface;
+	uasp_intf_desc.bInterfaceNumber = iface;
+	fu->iface = iface;
+	ep = usb_ep_autoconfig_ss(gadget, &uasp_ss_bi_desc,
+			&uasp_bi_ep_comp_desc);
+	if (!ep)
+		goto ep_fail;
+
+	ep->driver_data = fu;
+	fu->ep_in = ep;
+
+	ep = usb_ep_autoconfig_ss(gadget, &uasp_ss_bo_desc,
+			&uasp_bo_ep_comp_desc);
+	if (!ep)
+		goto ep_fail;
+	ep->driver_data = fu;
+	fu->ep_out = ep;
+
+	ep = usb_ep_autoconfig_ss(gadget, &uasp_ss_status_desc,
+			&uasp_status_in_ep_comp_desc);
+	if (!ep)
+		goto ep_fail;
+	ep->driver_data = fu;
+	fu->ep_status = ep;
+
+	ep = usb_ep_autoconfig_ss(gadget, &uasp_ss_cmd_desc,
+			&uasp_cmd_comp_desc);
+	if (!ep)
+		goto ep_fail;
+	ep->driver_data = fu;
+	fu->ep_cmd = ep;
+
+	/* Assume endpoint addresses are the same for both speeds */
+	uasp_bi_desc.bEndpointAddress =	uasp_ss_bi_desc.bEndpointAddress;
+	uasp_bo_desc.bEndpointAddress = uasp_ss_bo_desc.bEndpointAddress;
+	uasp_status_desc.bEndpointAddress =
+		uasp_ss_status_desc.bEndpointAddress;
+	uasp_cmd_desc.bEndpointAddress = uasp_ss_cmd_desc.bEndpointAddress;
+
+	uasp_fs_bi_desc.bEndpointAddress = uasp_ss_bi_desc.bEndpointAddress;
+	uasp_fs_bo_desc.bEndpointAddress = uasp_ss_bo_desc.bEndpointAddress;
+	uasp_fs_status_desc.bEndpointAddress =
+		uasp_ss_status_desc.bEndpointAddress;
+	uasp_fs_cmd_desc.bEndpointAddress = uasp_ss_cmd_desc.bEndpointAddress;
+
+	return 0;
+ep_fail:
+	pr_err("Can't claim all required eps\n");
+
+	give_back_ep(&fu->ep_in);
+	give_back_ep(&fu->ep_out);
+	give_back_ep(&fu->ep_status);
+	give_back_ep(&fu->ep_cmd);
+	return -ENOTSUPP;
+}
+
+static void usbg_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct f_uas *fu = to_f_uas(f);
+
+	kfree(fu);
+}
+
+struct guas_setup_wq {
+	struct work_struct work;
+	struct f_uas *fu;
+	unsigned int alt;
+};
+
+static void usbg_delayed_set_alt(struct work_struct *wq)
+{
+	struct guas_setup_wq *work = container_of(wq, struct guas_setup_wq,
+			work);
+	struct f_uas *fu = work->fu;
+	int alt = work->alt;
+
+	kfree(work);
+
+	if (fu->flags & USBG_IS_BOT)
+		bot_cleanup_old_alt(fu);
+	if (fu->flags & USBG_IS_UAS)
+		uasp_cleanup_old_alt(fu);
+
+	if (alt == USB_G_ALT_INT_BBB)
+		bot_set_alt(fu);
+	else if (alt == USB_G_ALT_INT_UAS)
+		uasp_set_alt(fu);
+	usb_composite_setup_continue(fu->function.config->cdev);
+}
+
+static int usbg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+{
+	struct f_uas *fu = to_f_uas(f);
+
+	if ((alt == USB_G_ALT_INT_BBB) || (alt == USB_G_ALT_INT_UAS)) {
+		struct guas_setup_wq *work;
+
+		work = kmalloc(sizeof(*work), GFP_ATOMIC);
+		if (!work)
+			return -ENOMEM;
+		INIT_WORK(&work->work, usbg_delayed_set_alt);
+		work->fu = fu;
+		work->alt = alt;
+		schedule_work(&work->work);
+		return USB_GADGET_DELAYED_STATUS;
+	}
+	return -EOPNOTSUPP;
+}
+
+static void usbg_disable(struct usb_function *f)
+{
+	struct f_uas *fu = to_f_uas(f);
+
+	if (fu->flags & USBG_IS_UAS)
+		uasp_cleanup_old_alt(fu);
+	else if (fu->flags & USBG_IS_BOT)
+		bot_cleanup_old_alt(fu);
+	fu->flags = 0;
+}
+
+static int usbg_setup(struct usb_function *f,
+		const struct usb_ctrlrequest *ctrl)
+{
+	struct f_uas *fu = to_f_uas(f);
+
+	if (!(fu->flags & USBG_IS_BOT))
+		return -EOPNOTSUPP;
+
+	return usbg_bot_setup(f, ctrl);
+}
+
+static int usbg_cfg_bind(struct usb_configuration *c)
+{
+	struct f_uas *fu;
+	int ret;
+
+	fu = kzalloc(sizeof(*fu), GFP_KERNEL);
+	if (!fu)
+		return -ENOMEM;
+	fu->function.name = "Target Function";
+	fu->function.descriptors = uasp_fs_function_desc;
+	fu->function.hs_descriptors = uasp_hs_function_desc;
+	fu->function.ss_descriptors = uasp_ss_function_desc;
+	fu->function.bind = usbg_bind;
+	fu->function.unbind = usbg_unbind;
+	fu->function.set_alt = usbg_set_alt;
+	fu->function.setup = usbg_setup;
+	fu->function.disable = usbg_disable;
+	fu->tpg = the_only_tpg_I_currently_have;
+
+	ret = usb_add_function(c, &fu->function);
+	if (ret)
+		goto err;
+
+	return 0;
+err:
+	kfree(fu);
+	return ret;
+}
+
+static int usb_target_bind(struct usb_composite_dev *cdev)
+{
+	int ret;
+
+	ret = usb_add_config(cdev, &usbg_config_driver,
+			usbg_cfg_bind);
+	return 0;
+}
+
+static struct usb_composite_driver usbg_driver = {
+	.name           = "g_target",
+	.dev            = &usbg_device_desc,
+	.strings        = usbg_strings,
+	.max_speed      = USB_SPEED_SUPER,
+	.unbind         = guas_unbind,
+};
+
+static int usbg_attach(struct usbg_tpg *tpg)
+{
+	return usb_composite_probe(&usbg_driver, usb_target_bind);
+}
+
+static void usbg_detach(struct usbg_tpg *tpg)
+{
+	usb_composite_unregister(&usbg_driver);
+}
+
+static int __init usb_target_gadget_init(void)
+{
+	int ret;
+
+	ret = usbg_register_configfs();
+	return ret;
+}
+module_init(usb_target_gadget_init);
+
+static void __exit usb_target_gadget_exit(void)
+{
+	usbg_deregister_configfs();
+}
+module_exit(usb_target_gadget_exit);
+
+MODULE_AUTHOR("Sebastian Andrzej Siewior <bigeasy@linutronix.de>");
+MODULE_DESCRIPTION("usb-gadget fabric");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/gadget/tcm_usb_gadget.h b/drivers/usb/gadget/tcm_usb_gadget.h
new file mode 100644
index 0000000..bb18999
--- /dev/null
+++ b/drivers/usb/gadget/tcm_usb_gadget.h
@@ -0,0 +1,146 @@
+#ifndef __TARGET_USB_GADGET_H__
+#define __TARGET_USB_GADGET_H__
+
+#include <linux/kref.h>
+/* #include <linux/usb/uas.h> */
+#include <linux/usb/composite.h>
+#include <linux/usb/uas.h>
+#include <linux/usb/storage.h>
+#include <scsi/scsi.h>
+#include <target/target_core_base.h>
+#include <target/target_core_fabric.h>
+
+#define USBG_NAMELEN 32
+
+#define fuas_to_gadget(f)	(f->function.config->cdev->gadget)
+#define UASP_SS_EP_COMP_LOG_STREAMS 4
+#define UASP_SS_EP_COMP_NUM_STREAMS (1 << UASP_SS_EP_COMP_LOG_STREAMS)
+
+#define USB_G_STR_MANUFACTOR    1
+#define USB_G_STR_PRODUCT       2
+#define USB_G_STR_SERIAL        3
+#define USB_G_STR_CONFIG        4
+#define USB_G_STR_INT_UAS       5
+#define USB_G_STR_INT_BBB       6
+
+#define USB_G_ALT_INT_BBB       0
+#define USB_G_ALT_INT_UAS       1
+
+struct usbg_nacl {
+	/* Binary World Wide unique Port Name for SAS Initiator port */
+	u64 iport_wwpn;
+	/* ASCII formatted WWPN for Sas Initiator port */
+	char iport_name[USBG_NAMELEN];
+	/* Returned by usbg_make_nodeacl() */
+	struct se_node_acl se_node_acl;
+};
+
+struct tcm_usbg_nexus {
+	struct se_session *tvn_se_sess;
+};
+
+struct usbg_tpg {
+	struct mutex tpg_mutex;
+	/* SAS port target portal group tag for TCM */
+	u16 tport_tpgt;
+	/* Pointer back to usbg_tport */
+	struct usbg_tport *tport;
+	struct workqueue_struct *workqueue;
+	/* Returned by usbg_make_tpg() */
+	struct se_portal_group se_tpg;
+	u32 gadget_connect;
+	struct tcm_usbg_nexus *tpg_nexus;
+	atomic_t tpg_port_count;
+};
+
+struct usbg_tport {
+	/* SCSI protocol the tport is providing */
+	u8 tport_proto_id;
+	/* Binary World Wide unique Port Name for SAS Target port */
+	u64 tport_wwpn;
+	/* ASCII formatted WWPN for SAS Target port */
+	char tport_name[USBG_NAMELEN];
+	/* Returned by usbg_make_tport() */
+	struct se_wwn tport_wwn;
+};
+
+enum uas_state {
+	UASP_SEND_DATA,
+	UASP_RECEIVE_DATA,
+	UASP_SEND_STATUS,
+	UASP_QUEUE_COMMAND,
+};
+
+#define USBG_MAX_CMD    64
+struct usbg_cmd {
+	/* common */
+	u8 cmd_buf[USBG_MAX_CMD];
+	u32 data_len;
+	struct work_struct work;
+	int unpacked_lun;
+	struct se_cmd se_cmd;
+	void *data_buf; /* used if no sg support available */
+	struct f_uas *fu;
+	struct completion write_complete;
+	struct kref ref;
+
+	/* UAS only */
+	u16 tag;
+	u16 prio_attr;
+	struct sense_iu sense_iu;
+	enum uas_state state;
+	struct uas_stream *stream;
+
+	/* BOT only */
+	__le32 bot_tag;
+	unsigned int csw_code;
+	unsigned is_read:1;
+
+};
+
+struct uas_stream {
+	struct usb_request	*req_in;
+	struct usb_request	*req_out;
+	struct usb_request	*req_status;
+};
+
+struct usbg_cdb {
+	struct usb_request	*req;
+	void			*buf;
+};
+
+struct bot_status {
+	struct usb_request	*req;
+	struct bulk_cs_wrap	csw;
+};
+
+struct f_uas {
+	struct usbg_tpg		*tpg;
+	struct usb_function	function;
+	u16			iface;
+
+	u32			flags;
+#define USBG_ENABLED		(1 << 0)
+#define USBG_IS_UAS		(1 << 1)
+#define USBG_USE_STREAMS	(1 << 2)
+#define USBG_IS_BOT		(1 << 3)
+#define USBG_BOT_CMD_PEND	(1 << 4)
+
+	struct usbg_cdb		cmd;
+	struct usb_ep		*ep_in;
+	struct usb_ep		*ep_out;
+
+	/* UAS */
+	struct usb_ep		*ep_status;
+	struct usb_ep		*ep_cmd;
+	struct uas_stream	stream[UASP_SS_EP_COMP_NUM_STREAMS];
+
+	/* BOT */
+	struct bot_status	bot_status;
+	struct usb_request	*bot_req_in;
+	struct usb_request	*bot_req_out;
+};
+
+extern struct usbg_tpg *the_only_tpg_I_currently_have;
+
+#endif
diff --git a/drivers/usb/otg/msm_otg.c b/drivers/usb/otg/msm_otg.c
index c0f9346..dedad53 100644
--- a/drivers/usb/otg/msm_otg.c
+++ b/drivers/usb/otg/msm_otg.c
@@ -827,8 +827,10 @@
 
 	/* Ensure that above operation is completed before turning off clocks */
 	mb();
-	clk_disable_unprepare(motg->pclk);
-	clk_disable_unprepare(motg->core_clk);
+	if (!motg->pdata->core_clk_always_on_workaround) {
+		clk_disable_unprepare(motg->pclk);
+		clk_disable_unprepare(motg->core_clk);
+	}
 
 	/* usb phy no more require TCXO clock, hence vote for TCXO disable */
 	if (!host_bus_suspend) {
@@ -891,9 +893,10 @@
 		motg->lpm_flags &= ~XO_SHUTDOWN;
 	}
 
-	clk_prepare_enable(motg->core_clk);
-
-	clk_prepare_enable(motg->pclk);
+	if (!motg->pdata->core_clk_always_on_workaround) {
+		clk_prepare_enable(motg->core_clk);
+		clk_prepare_enable(motg->pclk);
+	}
 
 	if (motg->lpm_flags & PHY_PWR_COLLAPSED) {
 		msm_hsusb_ldo_enable(motg, 1);
diff --git a/drivers/video/msm/mdss/Makefile b/drivers/video/msm/mdss/Makefile
index 2a61f07..780e0c6 100644
--- a/drivers/video/msm/mdss/Makefile
+++ b/drivers/video/msm/mdss/Makefile
@@ -1,5 +1,8 @@
 mdss-mdp-objs := mdss_mdp.o mdss_mdp_ctl.o mdss_mdp_pipe.o mdss_mdp_util.o
+mdss-mdp-objs += mdss_mdp_pp.o
+mdss-mdp-objs += mdss_mdp_intf_video.o
 mdss-mdp-objs += mdss_mdp_intf_writeback.o
+mdss-mdp-objs += mdss_mdp_rotator.o
 mdss-mdp-objs += mdss_mdp_overlay.o
 obj-$(CONFIG_FB_MSM_MDSS) += mdss-mdp.o
 obj-$(CONFIG_FB_MSM_MDSS) += mdss_fb.o
diff --git a/drivers/video/msm/mdss/mdss.h b/drivers/video/msm/mdss/mdss.h
index aaf6690..a58c3e6 100644
--- a/drivers/video/msm/mdss/mdss.h
+++ b/drivers/video/msm/mdss/mdss.h
@@ -23,13 +23,20 @@
 
 extern unsigned char *mdss_reg_base;
 
+enum mdss_mdp_clk_type {
+	MDSS_CLK_AHB,
+	MDSS_CLK_AXI,
+	MDSS_CLK_MDP_SRC,
+	MDSS_CLK_MDP_CORE,
+	MDSS_CLK_MDP_LUT,
+	MDSS_CLK_MDP_VSYNC,
+	MDSS_MAX_CLK
+};
+
 struct mdss_res_type {
 	u32 rev;
 	u32 mdp_rev;
-	struct clk *mdp_clk;
-	struct clk *mdp_pclk;
-	struct clk *mdp_lut_clk;
-	struct clk *vsync_clk;
+	struct clk *mdp_clk[MDSS_MAX_CLK];
 	struct regulator *fs;
 
 	struct workqueue_struct *clk_ctrl_wq;
@@ -40,6 +47,8 @@
 	u32 irq_ena;
 	u32 irq_buzy;
 
+	u32 mdp_irq_mask;
+
 	u32 clk_ena;
 	u32 suspend;
 	u32 timeout;
@@ -60,4 +69,22 @@
 	u32 *mixer_type_map;
 };
 extern struct mdss_res_type *mdss_res;
+
+enum mdss_hw_index {
+	MDSS_HW_MDP,
+	MDSS_HW_DSI0,
+	MDSS_HW_DSI1,
+	MDSS_HW_HDMI,
+	MDSS_HW_EDP,
+	MDSS_MAX_HW_BLK
+};
+
+struct mdss_hw {
+	u32 hw_ndx;
+	irqreturn_t (*irq_handler)(int irq, void *ptr);
+};
+
+void mdss_enable_irq(struct mdss_hw *hw);
+void mdss_disable_irq(struct mdss_hw *hw);
+void mdss_disable_irq_nosync(struct mdss_hw *hw);
 #endif /* MDSS_H */
diff --git a/drivers/video/msm/mdss/mdss_mdp.c b/drivers/video/msm/mdss/mdss_mdp.c
index d1847c3..41e0c18 100644
--- a/drivers/video/msm/mdss/mdss_mdp.c
+++ b/drivers/video/msm/mdss/mdss_mdp.c
@@ -39,14 +39,13 @@
 #include <mach/board.h>
 #include <mach/clk.h>
 #include <mach/hardware.h>
+#include <mach/msm_bus.h>
+#include <mach/msm_bus_board.h>
 
 #include "mdss.h"
 #include "mdss_fb.h"
 #include "mdss_mdp.h"
 
-/* 1.15 mdp clk factor */
-#define MDP_CLK_FACTOR(rate) (((rate) * 23) / 20)
-
 unsigned char *mdss_reg_base;
 
 struct mdss_res_type *mdss_res;
@@ -75,45 +74,261 @@
 	MDSS_MDP_MIXER_TYPE_WRITEBACK,
 };
 
-irqreturn_t mdss_irq_handler(int mdss_irq, void *ptr)
+#define MDP_BUS_VECTOR_ENTRY(ab_val, ib_val) \
+	{ \
+		.src = MSM_BUS_MASTER_MDP_PORT0,	\
+		.dst = MSM_BUS_SLAVE_EBI_CH0,		\
+		.ab = (ab_val),				\
+		.ib = (ib_val),				\
+	}
+
+#define MDP_BUS_VECTOR_ENTRY_NDX(n) \
+		MDP_BUS_VECTOR_ENTRY((n) * 100000000, (n) * 200000000)
+
+static struct msm_bus_vectors mdp_bus_vectors[] = {
+	MDP_BUS_VECTOR_ENTRY_NDX(0),
+	MDP_BUS_VECTOR_ENTRY_NDX(1),
+	MDP_BUS_VECTOR_ENTRY_NDX(2),
+	MDP_BUS_VECTOR_ENTRY_NDX(3),
+	MDP_BUS_VECTOR_ENTRY_NDX(4),
+	MDP_BUS_VECTOR_ENTRY_NDX(5),
+	MDP_BUS_VECTOR_ENTRY_NDX(6),
+	MDP_BUS_VECTOR_ENTRY_NDX(7),
+	MDP_BUS_VECTOR_ENTRY_NDX(8),
+	MDP_BUS_VECTOR_ENTRY_NDX(9),
+	MDP_BUS_VECTOR_ENTRY_NDX(10),
+	MDP_BUS_VECTOR_ENTRY(200000000, 200000000)
+};
+static struct msm_bus_paths mdp_bus_usecases[ARRAY_SIZE(mdp_bus_vectors)];
+static struct msm_bus_scale_pdata mdp_bus_scale_table = {
+	.usecase = mdp_bus_usecases,
+	.num_usecases = ARRAY_SIZE(mdp_bus_usecases),
+	.name = "mdss_mdp",
+};
+
+struct mdss_hw mdss_mdp_hw = {
+	.hw_ndx = MDSS_HW_MDP,
+	.irq_handler = mdss_mdp_isr,
+};
+
+static DEFINE_SPINLOCK(mdss_lock);
+struct mdss_hw *mdss_irq_handlers[MDSS_MAX_HW_BLK];
+
+static inline int mdss_irq_dispatch(u32 hw_ndx, int irq, void *ptr)
+{
+	struct mdss_hw *hw;
+
+	spin_lock(&mdss_lock);
+	hw = mdss_irq_handlers[hw_ndx];
+	spin_unlock(&mdss_lock);
+	if (hw)
+		return hw->irq_handler(irq, ptr);
+
+	return -ENODEV;
+}
+
+static irqreturn_t mdss_irq_handler(int irq, void *ptr)
 {
 	u32 intr = MDSS_MDP_REG_READ(MDSS_REG_HW_INTR_STATUS);
 
 	mdss_res->irq_buzy = true;
 
 	if (intr & MDSS_INTR_MDP)
-		mdss_mdp_isr(mdss_irq, ptr);
+		mdss_irq_dispatch(MDSS_HW_MDP, irq, ptr);
+
+	if (intr & MDSS_INTR_DSI0)
+		mdss_irq_dispatch(MDSS_HW_DSI0, irq, ptr);
+
+	if (intr & MDSS_INTR_DSI1)
+		mdss_irq_dispatch(MDSS_HW_DSI1, irq, ptr);
+
+	if (intr & MDSS_INTR_EDP)
+		mdss_irq_dispatch(MDSS_HW_EDP, irq, ptr);
+
+	if (intr & MDSS_INTR_HDMI)
+		mdss_irq_dispatch(MDSS_HW_HDMI, irq, ptr);
 
 	mdss_res->irq_buzy = false;
 
 	return IRQ_HANDLED;
 }
 
+
+void mdss_enable_irq(struct mdss_hw *hw)
+{
+	unsigned long irq_flags;
+	u32 ndx_bit;
+
+	if (hw->hw_ndx >= MDSS_MAX_HW_BLK)
+		return;
+
+	ndx_bit = BIT(hw->hw_ndx);
+
+	pr_debug("Enable HW=%d irq ena=%d mask=%x\n", hw->hw_ndx,
+			mdss_res->irq_ena, mdss_res->irq_mask);
+
+	spin_lock_irqsave(&mdss_lock, irq_flags);
+	if (mdss_res->irq_mask & ndx_bit) {
+		pr_debug("MDSS HW ndx=%d is already set, mask=%x\n",
+				hw->hw_ndx, mdss_res->irq_mask);
+	} else {
+		mdss_irq_handlers[hw->hw_ndx] = hw;
+		mdss_res->irq_mask |= ndx_bit;
+		if (!mdss_res->irq_ena) {
+			mdss_res->irq_ena = true;
+			enable_irq(mdss_res->irq);
+		}
+	}
+	spin_unlock_irqrestore(&mdss_lock, irq_flags);
+}
+EXPORT_SYMBOL(mdss_enable_irq);
+
+void mdss_disable_irq(struct mdss_hw *hw)
+{
+	unsigned long irq_flags;
+	u32 ndx_bit;
+
+	if (hw->hw_ndx >= MDSS_MAX_HW_BLK)
+		return;
+
+	ndx_bit = BIT(hw->hw_ndx);
+
+	pr_debug("Disable HW=%d irq ena=%d mask=%x\n", hw->hw_ndx,
+			mdss_res->irq_ena, mdss_res->irq_mask);
+
+	spin_lock_irqsave(&mdss_lock, irq_flags);
+	if (!(mdss_res->irq_mask & ndx_bit)) {
+		pr_warn("MDSS HW ndx=%d is NOT set, mask=%x\n",
+			hw->hw_ndx, mdss_res->mdp_irq_mask);
+	} else {
+		mdss_irq_handlers[hw->hw_ndx] = NULL;
+		mdss_res->irq_mask &= ~ndx_bit;
+		if (mdss_res->irq_mask == 0) {
+			mdss_res->irq_ena = false;
+			disable_irq(mdss_res->irq);
+		}
+	}
+	spin_unlock_irqrestore(&mdss_lock, irq_flags);
+}
+EXPORT_SYMBOL(mdss_disable_irq);
+
+void mdss_disable_irq_nosync(struct mdss_hw *hw)
+{
+	u32 ndx_bit;
+
+	if (hw->hw_ndx >= MDSS_MAX_HW_BLK)
+		return;
+
+	ndx_bit = BIT(hw->hw_ndx);
+
+	pr_debug("Disable HW=%d irq ena=%d mask=%x\n", hw->hw_ndx,
+			mdss_res->irq_ena, mdss_res->irq_mask);
+
+	spin_lock(&mdss_lock);
+	if (!(mdss_res->irq_mask & ndx_bit)) {
+		pr_warn("MDSS HW ndx=%d is NOT set, mask=%x\n",
+			hw->hw_ndx, mdss_res->mdp_irq_mask);
+	} else {
+		mdss_irq_handlers[hw->hw_ndx] = NULL;
+		mdss_res->irq_mask &= ~ndx_bit;
+		if (mdss_res->irq_mask == 0) {
+			mdss_res->irq_ena = false;
+			disable_irq_nosync(mdss_res->irq);
+		}
+	}
+	spin_unlock(&mdss_lock);
+}
+EXPORT_SYMBOL(mdss_disable_irq_nosync);
+
+static int mdss_mdp_bus_scale_register(void)
+{
+	if (!mdss_res->bus_hdl) {
+		struct msm_bus_scale_pdata *bus_pdata = &mdp_bus_scale_table;
+		int i;
+
+		for (i = 0; i < bus_pdata->num_usecases; i++) {
+			mdp_bus_usecases[i].num_paths = 1;
+			mdp_bus_usecases[i].vectors = &mdp_bus_vectors[i];
+		}
+
+		mdss_res->bus_hdl = msm_bus_scale_register_client(bus_pdata);
+		if (!mdss_res->bus_hdl) {
+			pr_err("not able to get bus scale\n");
+			return -ENOMEM;
+		}
+
+		pr_debug("register bus_hdl=%x\n", mdss_res->bus_hdl);
+	}
+	return 0;
+}
+
+static void mdss_mdp_bus_scale_unregister(void)
+{
+	pr_debug("unregister bus_hdl=%x\n", mdss_res->bus_hdl);
+
+	if (mdss_res->bus_hdl)
+		msm_bus_scale_unregister_client(mdss_res->bus_hdl);
+}
+
+int mdss_mdp_bus_scale_set_min_quota(u32 quota)
+{
+	struct msm_bus_scale_pdata *bus_pdata = &mdp_bus_scale_table;
+	struct msm_bus_vectors *vect = NULL;
+	int lvl;
+
+	if (mdss_res->bus_hdl < 1) {
+		pr_err("invalid bus handle %d\n", mdss_res->bus_hdl);
+		return -EINVAL;
+	}
+
+	for (lvl = 0; lvl < bus_pdata->num_usecases; lvl++) {
+		if (bus_pdata->usecase[lvl].num_paths) {
+			vect = &bus_pdata->usecase[lvl].vectors[0];
+			if (vect->ab >= quota) {
+				pr_debug("lvl=%d quota=%u ab=%u\n", lvl, quota,
+						vect->ab);
+				break;
+			}
+		}
+	}
+
+	if (lvl == bus_pdata->num_usecases) {
+		pr_warn("cannot match quota=%u try with max level\n", quota);
+		lvl--;
+	}
+
+	return msm_bus_scale_client_update_request(mdss_res->bus_hdl, lvl);
+}
+
+static inline u32 mdss_mdp_irq_mask(u32 intr_type, u32 intf_num)
+{
+	if (intr_type == MDSS_MDP_IRQ_INTF_UNDER_RUN ||
+	    intr_type == MDSS_MDP_IRQ_INTF_VSYNC)
+		intf_num = (intf_num - MDSS_MDP_INTF0) * 2;
+	return 1 << (intr_type + intf_num);
+}
+
 int mdss_mdp_irq_enable(u32 intr_type, u32 intf_num)
 {
 	u32 irq;
 	unsigned long irq_flags;
 	int ret = 0;
 
-	if (intr_type == MDSS_MDP_IRQ_INTF_UNDER_RUN ||
-	    intr_type == MDSS_MDP_IRQ_INTF_VSYNC)
-		intf_num = intf_num << 1;
-
-	irq =  BIT(intr_type + intf_num);
+	irq = mdss_mdp_irq_mask(intr_type, intf_num);
 
 	spin_lock_irqsave(&mdp_lock, irq_flags);
-	if (mdss_res->irq_mask & irq) {
-		pr_warn("MDSS IRQ-0x%x is already set, mask=%x irq=%d\n",
-			irq, mdss_res->irq_mask, mdss_res->irq_ena);
+	if (mdss_res->mdp_irq_mask & irq) {
+		pr_warn("MDSS MDP IRQ-0x%x is already set, mask=%x\n",
+				irq, mdss_res->mdp_irq_mask);
 		ret = -EBUSY;
 	} else {
-		mdss_res->irq_mask |= irq;
+		pr_debug("MDP IRQ mask old=%x new=%x\n",
+				mdss_res->mdp_irq_mask, irq);
+		mdss_res->mdp_irq_mask |= irq;
 		MDSS_MDP_REG_WRITE(MDSS_MDP_REG_INTR_CLEAR, irq);
-		MDSS_MDP_REG_WRITE(MDSS_MDP_REG_INTR_EN, mdss_res->irq_mask);
-		if (!mdss_res->irq_ena) {
-			mdss_res->irq_ena = true;
-			enable_irq(mdss_res->irq);
-		}
+		MDSS_MDP_REG_WRITE(MDSS_MDP_REG_INTR_EN,
+				mdss_res->mdp_irq_mask);
+		mdss_enable_irq(&mdss_mdp_hw);
 	}
 	spin_unlock_irqrestore(&mdp_lock, irq_flags);
 
@@ -125,24 +340,17 @@
 	u32 irq;
 	unsigned long irq_flags;
 
-
-	if (intr_type == MDSS_MDP_IRQ_INTF_UNDER_RUN ||
-	    intr_type == MDSS_MDP_IRQ_INTF_VSYNC)
-		intf_num = intf_num << 1;
-
-	irq = BIT(intr_type + intf_num);
+	irq = mdss_mdp_irq_mask(intr_type, intf_num);
 
 	spin_lock_irqsave(&mdp_lock, irq_flags);
-	if (!(mdss_res->irq_mask & irq)) {
-		pr_warn("MDSS IRQ-%x is NOT set, mask=%x irq=%d\n",
-			irq, mdss_res->irq_mask, mdss_res->irq_ena);
+	if (!(mdss_res->mdp_irq_mask & irq)) {
+		pr_warn("MDSS MDP IRQ-%x is NOT set, mask=%x\n",
+				irq, mdss_res->mdp_irq_mask);
 	} else {
-		mdss_res->irq_mask &= ~irq;
-		MDSS_MDP_REG_WRITE(MDSS_MDP_REG_INTR_EN, mdss_res->irq_mask);
-		if (!mdss_res->irq_mask) {
-			mdss_res->irq_ena = false;
-			disable_irq(mdss_res->irq);
-		}
+		mdss_res->mdp_irq_mask &= ~irq;
+		MDSS_MDP_REG_WRITE(MDSS_MDP_REG_INTR_EN,
+				mdss_res->mdp_irq_mask);
+		mdss_disable_irq(&mdss_mdp_hw);
 	}
 	spin_unlock_irqrestore(&mdp_lock, irq_flags);
 }
@@ -151,34 +359,114 @@
 {
 	u32 irq;
 
-	if (intr_type == MDSS_MDP_IRQ_INTF_UNDER_RUN ||
-	    intr_type == MDSS_MDP_IRQ_INTF_VSYNC)
-		intf_num = intf_num << 1;
-
-	irq = BIT(intr_type + intf_num);
+	irq = mdss_mdp_irq_mask(intr_type, intf_num);
 
 	spin_lock(&mdp_lock);
-	if (!(mdss_res->irq_mask & irq)) {
-		pr_warn("MDSS IRQ-%x is NOT set, mask=%x irq=%d\n",
-			irq, mdss_res->irq_mask, mdss_res->irq_ena);
+	if (!(mdss_res->mdp_irq_mask & irq)) {
+		pr_warn("MDSS MDP IRQ-%x is NOT set, mask=%x\n",
+				irq, mdss_res->mdp_irq_mask);
 	} else {
-		mdss_res->irq_mask &= ~irq;
-		MDSS_MDP_REG_WRITE(MDSS_MDP_REG_INTR_EN, mdss_res->irq_mask);
-		if (!mdss_res->irq_mask) {
-			mdss_res->irq_ena = false;
-			disable_irq_nosync(mdss_res->irq);
-		}
+		mdss_res->mdp_irq_mask &= ~irq;
+		MDSS_MDP_REG_WRITE(MDSS_MDP_REG_INTR_EN,
+				mdss_res->mdp_irq_mask);
+		mdss_disable_irq_nosync(&mdss_mdp_hw);
 	}
 	spin_unlock(&mdp_lock);
 }
 
+static inline struct clk *mdss_mdp_get_clk(u32 clk_idx)
+{
+	if (clk_idx < MDSS_MAX_CLK)
+		return mdss_res->mdp_clk[clk_idx];
+	return NULL;
+}
+
+static int mdss_mdp_clk_update(u32 clk_idx, u32 enable)
+{
+	int ret = -ENODEV;
+	struct clk *clk = mdss_mdp_get_clk(clk_idx);
+
+	if (clk) {
+		pr_debug("clk=%d en=%d\n", clk_idx, enable);
+		if (enable) {
+			ret = clk_prepare_enable(clk);
+		} else {
+			clk_disable_unprepare(clk);
+			ret = 0;
+		}
+	}
+	return ret;
+}
+
+int mdss_mdp_vsync_clk_enable(int enable)
+{
+	int ret = 0;
+	pr_debug("clk enable=%d\n", enable);
+	mutex_lock(&mdp_clk_lock);
+	if (mdss_res->vsync_ena != enable) {
+		mdss_res->vsync_ena = enable;
+		ret = mdss_mdp_clk_update(MDSS_CLK_MDP_VSYNC, enable);
+	}
+	mutex_unlock(&mdp_clk_lock);
+	return ret;
+}
+
+void mdss_mdp_set_clk_rate(unsigned long min_clk_rate)
+{
+	unsigned long clk_rate;
+	struct clk *clk = mdss_mdp_get_clk(MDSS_CLK_MDP_SRC);
+	if (clk) {
+		mutex_lock(&mdp_clk_lock);
+		clk_rate = clk_round_rate(clk, min_clk_rate);
+		if (IS_ERR_VALUE(clk_rate)) {
+			pr_err("unable to round rate err=%ld\n", clk_rate);
+		} else if (clk_rate != clk_get_rate(clk)) {
+			if (IS_ERR_VALUE(clk_set_rate(clk, clk_rate)))
+				pr_err("clk_set_rate failed\n");
+			else
+				pr_debug("mdp clk rate=%lu\n", clk_rate);
+		}
+		mutex_unlock(&mdp_clk_lock);
+	}
+}
+
+unsigned long mdss_mdp_get_clk_rate(u32 clk_idx)
+{
+	unsigned long clk_rate = 0;
+	struct clk *clk = mdss_mdp_get_clk(clk_idx);
+	mutex_lock(&mdp_clk_lock);
+	if (clk)
+		clk_rate = clk_get_rate(clk);
+	mutex_unlock(&mdp_clk_lock);
+
+	return clk_rate;
+}
+
 static void mdss_mdp_clk_ctrl_update(int enable)
 {
 	if (mdss_res->clk_ena == enable)
 		return;
 
 	pr_debug("MDP CLKS %s\n", (enable ? "Enable" : "Disable"));
+
+	mutex_lock(&mdp_clk_lock);
 	mdss_res->clk_ena = enable;
+	mb();
+
+	mdss_mdp_clk_update(MDSS_CLK_AHB, enable);
+	mdss_mdp_clk_update(MDSS_CLK_AXI, enable);
+
+	mdss_mdp_clk_update(MDSS_CLK_MDP_CORE, enable);
+	mdss_mdp_clk_update(MDSS_CLK_MDP_LUT, enable);
+	if (mdss_res->vsync_ena)
+		mdss_mdp_clk_update(MDSS_CLK_MDP_VSYNC, enable);
+
+	mutex_unlock(&mdp_clk_lock);
+}
+
+static void mdss_mdp_clk_ctrl_workqueue_handler(struct work_struct *work)
+{
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
 }
 
 void mdss_mdp_clk_ctrl(int enable, int isr)
@@ -233,14 +521,30 @@
 	}
 }
 
-static void mdss_mdp_clk_ctrl_workqueue_handler(struct work_struct *work)
+static inline int mdss_mdp_irq_clk_register(struct platform_device *pdev,
+					    char *clk_name, int clk_idx)
 {
-	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
+	struct clk *tmp;
+	if (clk_idx >= MDSS_MAX_CLK) {
+		pr_err("invalid clk index %d\n", clk_idx);
+		return -EINVAL;
+	}
+
+
+	tmp = clk_get(&pdev->dev, clk_name);
+	if (IS_ERR(tmp)) {
+		pr_err("unable to get clk: %s\n", clk_name);
+		return PTR_ERR(tmp);
+	}
+
+	mdss_res->mdp_clk[clk_idx] = tmp;
+	return 0;
 }
 
-static int mdss_mdp_irq_clk_setup(void)
+static int mdss_mdp_irq_clk_setup(struct platform_device *pdev)
 {
 	int ret;
+	int i;
 
 	ret = request_irq(mdss_res->irq, mdss_irq_handler, IRQF_DISABLED,
 			  "MDSS", 0);
@@ -250,15 +554,38 @@
 	}
 	disable_irq(mdss_res->irq);
 
-	mdss_res->fs = regulator_get(NULL, "fs_mdp");
-	if (IS_ERR(mdss_res->fs))
+	mdss_res->fs = regulator_get(NULL, "gdsc_mdss");
+	if (IS_ERR_OR_NULL(mdss_res->fs)) {
 		mdss_res->fs = NULL;
-	else {
-		regulator_enable(mdss_res->fs);
-		mdss_res->fs_ena = true;
+		pr_err("unable to get gdsc_mdss regulator\n");
+		goto error;
 	}
+	regulator_enable(mdss_res->fs);
+
+	if (mdss_mdp_irq_clk_register(pdev, "bus_clk", MDSS_CLK_AXI) ||
+	    mdss_mdp_irq_clk_register(pdev, "iface_clk", MDSS_CLK_AHB) ||
+	    mdss_mdp_irq_clk_register(pdev, "core_clk_src", MDSS_CLK_MDP_SRC) ||
+	    mdss_mdp_irq_clk_register(pdev, "core_clk", MDSS_CLK_MDP_CORE) ||
+	    mdss_mdp_irq_clk_register(pdev, "lut_clk", MDSS_CLK_MDP_LUT) ||
+	    mdss_mdp_irq_clk_register(pdev, "vsync_clk", MDSS_CLK_MDP_VSYNC))
+		goto error;
+
+	mdss_mdp_set_clk_rate(MDP_CLK_DEFAULT_RATE);
+	pr_debug("mdp clk rate=%ld\n", mdss_mdp_get_clk_rate(MDSS_CLK_MDP_SRC));
 
 	return 0;
+error:
+	for (i = 0; i < MDSS_MAX_CLK; i++) {
+		if (mdss_res->mdp_clk[i])
+			clk_put(mdss_res->mdp_clk[i]);
+	}
+	if (mdss_res->fs)
+		regulator_put(mdss_res->fs);
+	if (mdss_res->irq)
+		free_irq(mdss_res->irq, 0);
+
+	return -EINVAL;
+
 }
 
 static struct msm_panel_common_pdata *mdss_mdp_populate_pdata(
@@ -272,11 +599,11 @@
 	return pdata;
 }
 
-static u32 mdss_mdp_res_init(void)
+static u32 mdss_mdp_res_init(struct platform_device *pdev)
 {
 	u32 rc;
 
-	rc = mdss_mdp_irq_clk_setup();
+	rc = mdss_mdp_irq_clk_setup(pdev);
 	if (rc)
 		return rc;
 
@@ -365,12 +692,12 @@
 		goto probe_done;
 	}
 
-	rc = mdss_mdp_res_init();
+	rc = mdss_mdp_res_init(pdev);
 	if (rc) {
 		pr_err("unable to initialize mdss mdp resources\n");
 		goto probe_done;
 	}
-
+	rc = mdss_mdp_bus_scale_register();
 probe_done:
 	if (IS_ERR_VALUE(rc)) {
 		if (mdss_res) {
@@ -448,6 +775,7 @@
 		regulator_put(mdss_res->fs);
 	iounmap(mdss_reg_base);
 	pm_runtime_disable(&pdev->dev);
+	mdss_mdp_bus_scale_unregister();
 	return 0;
 }
 
diff --git a/drivers/video/msm/mdss/mdss_mdp.h b/drivers/video/msm/mdss/mdss_mdp.h
index c65d5a7..2cdd9f6 100644
--- a/drivers/video/msm/mdss/mdss_mdp.h
+++ b/drivers/video/msm/mdss/mdss_mdp.h
@@ -93,9 +93,18 @@
 	MDSS_MDP_BLOCK_MAX
 };
 
+enum mdss_mdp_csc_type {
+	MDSS_MDP_CSC_RGB2RGB,
+	MDSS_MDP_CSC_YUV2RGB,
+	MDSS_MDP_CSC_RGB2YUV,
+	MDSS_MDP_CSC_YUV2YUV,
+	MDSS_MDP_MAX_CSC
+};
+
 struct mdss_mdp_ctl {
 	u32 num;
 	u32 ref_cnt;
+	int power_on;
 
 	u32 intf_num;
 	u32 intf_type;
@@ -109,6 +118,8 @@
 	u16 height;
 	u32 dst_format;
 
+	u32 bus_quota;
+
 	struct msm_fb_data_type *mfd;
 	struct mdss_mdp_mixer *mixer_left;
 	struct mdss_mdp_mixer *mixer_right;
@@ -133,6 +144,8 @@
 	u8 cursor_enabled;
 	u8 rotator_mode;
 
+	u32 bus_quota;
+
 	struct mdss_mdp_ctl *ctl;
 	struct mdss_mdp_pipe *stage_pipe[MDSS_MDP_MAX_STAGE];
 };
@@ -218,6 +231,7 @@
 	struct mdss_mdp_format_params *src_fmt;
 	struct mdss_mdp_plane_sizes src_planes;
 
+	u32 bus_quota;
 	u8 mixer_stage;
 	u8 is_fg;
 	u8 alpha;
@@ -253,17 +267,22 @@
 int mdss_mdp_set_intr_callback(u32 intr_type, u32 intf_num,
 			       void (*fnc_ptr)(void *), void *arg);
 
+int mdss_mdp_bus_scale_set_min_quota(u32 quota);
+void mdss_mdp_set_clk_rate(unsigned long min_clk_rate);
 unsigned long mdss_mdp_get_clk_rate(u32 clk_idx);
 int mdss_mdp_vsync_clk_enable(int enable);
 void mdss_mdp_clk_ctrl(int enable, int isr);
 void mdss_mdp_footswitch_ctrl(int on);
 
 int mdss_mdp_overlay_init(struct msm_fb_data_type *mfd);
+int mdss_mdp_video_start(struct mdss_mdp_ctl *ctl);
 int mdss_mdp_writeback_start(struct mdss_mdp_ctl *ctl);
 
 int mdss_mdp_ctl_on(struct msm_fb_data_type *mfd);
 int mdss_mdp_ctl_off(struct msm_fb_data_type *mfd);
 
+struct mdss_mdp_mixer *mdss_mdp_wb_mixer_alloc(int rotator);
+int mdss_mdp_wb_mixer_destroy(struct mdss_mdp_mixer *mixer);
 struct mdss_mdp_mixer *mdss_mdp_mixer_get(struct mdss_mdp_ctl *ctl, int mux);
 struct mdss_mdp_pipe *mdss_mdp_mixer_stage_pipe(struct mdss_mdp_ctl *ctl,
 						int mux, int stage);
@@ -271,6 +290,9 @@
 int mdss_mdp_mixer_pipe_unstage(struct mdss_mdp_pipe *pipe);
 int mdss_mdp_display_commit(struct mdss_mdp_ctl *ctl, void *arg);
 
+int mdss_mdp_csc_setup(u32 block, u32 blk_idx, u32 tbl_idx, u32 csc_type);
+int mdss_mdp_dspp_setup(struct mdss_mdp_ctl *ctl, struct mdss_mdp_mixer *mixer);
+
 struct mdss_mdp_pipe *mdss_mdp_pipe_alloc_pnum(u32 pnum);
 struct mdss_mdp_pipe *mdss_mdp_pipe_alloc_locked(u32 type);
 struct mdss_mdp_pipe *mdss_mdp_pipe_get_locked(u32 ndx);
diff --git a/drivers/video/msm/mdss/mdss_mdp_ctl.c b/drivers/video/msm/mdss/mdss_mdp_ctl.c
index d89347e..c80527d 100644
--- a/drivers/video/msm/mdss/mdss_mdp_ctl.c
+++ b/drivers/video/msm/mdss/mdss_mdp_ctl.c
@@ -20,10 +20,127 @@
 #include "mdss_fb.h"
 #include "mdss_mdp.h"
 
+enum {
+	MDSS_MDP_BUS_UPDATE_SKIP,
+	MDSS_MDP_BUS_UPDATE_EARLY,
+	MDSS_MDP_BUS_UPDATE_LATE,
+};
+
 static DEFINE_MUTEX(mdss_mdp_ctl_lock);
 static struct mdss_mdp_ctl mdss_mdp_ctl_list[MDSS_MDP_MAX_CTL];
 static struct mdss_mdp_mixer mdss_mdp_mixer_list[MDSS_MDP_MAX_LAYERMIXER];
 
+static int mdss_mdp_ctl_update_clk_rate(void)
+{
+	struct mdss_mdp_ctl *ctl;
+	int cnum;
+	unsigned long clk_rate = MDP_CLK_DEFAULT_RATE;
+
+	mutex_lock(&mdss_mdp_ctl_lock);
+	for (cnum = 0; cnum < MDSS_MDP_MAX_CTL; cnum++) {
+		ctl = &mdss_mdp_ctl_list[cnum];
+		if (ctl->power_on && ctl->mfd) {
+			unsigned long tmp;
+			pr_debug("ctl=%d pclk_rate=%u\n", ctl->num,
+					ctl->mfd->panel_info.clk_rate);
+			tmp = (ctl->mfd->panel_info.clk_rate * 23) / 20;
+			if (tmp > clk_rate)
+				clk_rate = tmp;
+		}
+	}
+	mdss_mdp_set_clk_rate(clk_rate);
+	mutex_unlock(&mdss_mdp_ctl_lock);
+
+	return 0;
+}
+
+static int mdss_mdp_ctl_update_bus_scale(void)
+{
+	struct mdss_mdp_ctl *ctl;
+	int cnum;
+	u32 bus_quota = 0;
+
+	mutex_lock(&mdss_mdp_ctl_lock);
+	for (cnum = 0; cnum < MDSS_MDP_MAX_CTL; cnum++) {
+		ctl = &mdss_mdp_ctl_list[cnum];
+		if (ctl->power_on)
+			bus_quota += ctl->bus_quota;
+	}
+	mdss_mdp_bus_scale_set_min_quota(bus_quota);
+	mutex_unlock(&mdss_mdp_ctl_lock);
+
+	return 0;
+}
+
+static void mdss_mdp_bus_update_pipe_quota(struct mdss_mdp_pipe *pipe)
+{
+	u32 quota;
+
+	quota = pipe->img_width * pipe->img_height * 60 * pipe->src_fmt->bpp;
+	quota *= 5 / 4; /* 1.25 factor */
+
+	pr_debug("pipe=%d quota old=%u new=%u\n", pipe->num,
+		   pipe->bus_quota, quota);
+	pipe->bus_quota = quota;
+}
+
+static int mdss_mdp_bus_update_mixer_quota(struct mdss_mdp_mixer *mixer)
+{
+	struct mdss_mdp_pipe *pipe;
+	u32 quota, stage;
+
+	if (!mixer)
+		return 0;
+
+	quota = 0;
+	for (stage = 0; stage < MDSS_MDP_MAX_STAGE; stage++) {
+		pipe = mixer->stage_pipe[stage];
+		if (pipe == NULL)
+			continue;
+
+		quota += pipe->bus_quota;
+	}
+
+	pr_debug("mixer=%d quota old=%u new=%u\n", mixer->num,
+		   mixer->bus_quota, quota);
+
+	if (quota != mixer->bus_quota) {
+		mixer->bus_quota = quota;
+		return 1;
+	}
+
+	return 0;
+}
+
+static int mdss_mdp_bus_update_ctl_quota(struct mdss_mdp_ctl *ctl)
+{
+	int ret = MDSS_MDP_BUS_UPDATE_SKIP;
+
+	if (mdss_mdp_bus_update_mixer_quota(ctl->mixer_left) ||
+			mdss_mdp_bus_update_mixer_quota(ctl->mixer_right)) {
+		u32 quota = 0;
+
+		if (ctl->mixer_left)
+			quota += ctl->mixer_left->bus_quota;
+		if (ctl->mixer_right)
+			quota += ctl->mixer_right->bus_quota;
+
+		pr_debug("ctl=%d quota old=%u new=%u\n",
+			   ctl->num, ctl->bus_quota, quota);
+
+		if (quota != ctl->bus_quota) {
+			if (quota > ctl->bus_quota)
+				ret = MDSS_MDP_BUS_UPDATE_EARLY;
+			else
+				ret = MDSS_MDP_BUS_UPDATE_LATE;
+
+			ctl->bus_quota = quota;
+		}
+	}
+
+	return ret;
+}
+
 static struct mdss_mdp_ctl *mdss_mdp_ctl_alloc(void)
 {
 	struct mdss_mdp_ctl *ctl = NULL;
@@ -110,6 +227,71 @@
 	return 0;
 }
 
+struct mdss_mdp_mixer *mdss_mdp_wb_mixer_alloc(int rotator)
+{
+	struct mdss_mdp_ctl *ctl = NULL;
+	struct mdss_mdp_mixer *mixer = NULL;
+
+	ctl = mdss_mdp_ctl_alloc();
+
+	if (!ctl)
+		return NULL;
+
+	mixer = mdss_mdp_mixer_alloc(MDSS_MDP_MIXER_TYPE_WRITEBACK);
+	if (!mixer)
+		goto error;
+
+	mixer->rotator_mode = rotator;
+
+	switch (mixer->num) {
+	case MDSS_MDP_LAYERMIXER3:
+		ctl->opmode = (rotator ? MDSS_MDP_CTL_OP_ROT0_MODE :
+			       MDSS_MDP_CTL_OP_WB0_MODE);
+		break;
+	case MDSS_MDP_LAYERMIXER4:
+		ctl->opmode = (rotator ? MDSS_MDP_CTL_OP_ROT1_MODE :
+			       MDSS_MDP_CTL_OP_WB1_MODE);
+		break;
+	default:
+		pr_err("invalid layer mixer=%d\n", mixer->num);
+		goto error;
+	}
+
+	ctl->mixer_left = mixer;
+	mixer->ctl = ctl;
+
+	ctl->start_fnc = mdss_mdp_writeback_start;
+
+	if (ctl->start_fnc)
+		ctl->start_fnc(ctl);
+
+	return mixer;
+error:
+	if (mixer)
+		mdss_mdp_mixer_free(mixer);
+	if (ctl)
+		mdss_mdp_ctl_free(ctl);
+
+	return NULL;
+}
+
+int mdss_mdp_wb_mixer_destroy(struct mdss_mdp_mixer *mixer)
+{
+	struct mdss_mdp_ctl *ctl;
+
+	ctl = mixer->ctl;
+
+	pr_debug("destroy ctl=%d mixer=%d\n", ctl->num, mixer->num);
+
+	if (ctl->stop_fnc)
+		ctl->stop_fnc(ctl);
+
+	mdss_mdp_mixer_free(mixer);
+	mdss_mdp_ctl_free(ctl);
+
+	return 0;
+}
+
 static int mdss_mdp_ctl_init(struct msm_fb_data_type *mfd)
 {
 	struct mdss_mdp_ctl *ctl;
@@ -166,6 +348,27 @@
 	}
 
 	switch (mfd->panel_info.type) {
+	case EDP_PANEL:
+		ctl->intf_num = MDSS_MDP_INTF0;
+		ctl->intf_type = MDSS_INTF_EDP;
+		ctl->opmode = MDSS_MDP_CTL_OP_VIDEO_MODE;
+		ctl->start_fnc = mdss_mdp_video_start;
+		break;
+	case MIPI_VIDEO_PANEL:
+		if (mfd->panel_info.pdest == DISPLAY_1)
+			ctl->intf_num = MDSS_MDP_INTF1;
+		else
+			ctl->intf_num = MDSS_MDP_INTF2;
+		ctl->intf_type = MDSS_INTF_DSI;
+		ctl->opmode = MDSS_MDP_CTL_OP_VIDEO_MODE;
+		ctl->start_fnc = mdss_mdp_video_start;
+		break;
+	case DTV_PANEL:
+		ctl->intf_num = MDSS_MDP_INTF3;
+		ctl->intf_type = MDSS_INTF_HDMI;
+		ctl->opmode = MDSS_MDP_CTL_OP_VIDEO_MODE;
+		ctl->start_fnc = mdss_mdp_video_start;
+		break;
 	case WRITEBACK_PANEL:
 		ctl->intf_num = MDSS_MDP_NO_INTF;
 		ctl->opmode = MDSS_MDP_CTL_OP_WFD_MODE;
@@ -237,6 +440,10 @@
 	ctl = mfd->ctl;
 
 	mutex_lock(&ctl->lock);
+
+	ctl->power_on = true;
+	mdss_mdp_ctl_update_clk_rate();
+
 	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
 	if (ctl->start_fnc)
 		ret = ctl->start_fnc(ctl);
@@ -255,7 +462,7 @@
 	mixer->params_changed++;
 
 	temp = MDSS_MDP_REG_READ(MDSS_MDP_REG_DISP_INTF_SEL);
-	temp |= (ctl->intf_type << (ctl->intf_num * 8));
+	temp |= (ctl->intf_type << ((ctl->intf_num - MDSS_MDP_INTF0) * 8));
 	MDSS_MDP_REG_WRITE(MDSS_MDP_REG_DISP_INTF_SEL, temp);
 
 	outsize = (mixer->height << 16) | mixer->width;
@@ -308,6 +515,8 @@
 	pr_debug("ctl_num=%d\n", mfd->ctl->num);
 
 	mutex_lock(&ctl->lock);
+	ctl->power_on = false;
+
 	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
 	if (ctl->stop_fnc)
 		ret = ctl->stop_fnc(ctl);
@@ -321,6 +530,10 @@
 	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
 
 	ctl->play_cnt = 0;
+
+	mdss_mdp_ctl_update_bus_scale();
+	mdss_mdp_ctl_update_clk_rate();
+
 	mutex_unlock(&ctl->lock);
 
 	mdss_mdp_pipe_release_all(mfd);
@@ -328,7 +541,6 @@
 	if (!mfd->ref_cnt)
 		mdss_mdp_ctl_destroy(mfd);
 
-
 	return ret;
 }
 
@@ -493,6 +705,7 @@
 	if (params_changed) {
 		mixer->params_changed++;
 		mixer->stage_pipe[pipe->mixer_stage] = pipe;
+		mdss_mdp_bus_update_pipe_quota(pipe);
 	}
 
 	if (pipe->type == MDSS_MDP_PIPE_TYPE_DMA)
@@ -537,6 +750,9 @@
 {
 	mixer->params_changed = 0;
 
+	if (mixer->type == MDSS_MDP_MIXER_TYPE_INTF)
+		mdss_mdp_dspp_setup(mixer->ctl, mixer);
+
 	/* skip mixer setup for rotator */
 	if (!mixer->rotator_mode)
 		mdss_mdp_mixer_setup(mixer->ctl, mixer);
@@ -548,6 +764,7 @@
 {
 	int mixer1_changed, mixer2_changed;
 	int ret = 0;
+	int bus_update = MDSS_MDP_BUS_UPDATE_SKIP;
 
 	if (!ctl) {
 		pr_err("display function not set\n");
@@ -564,6 +781,8 @@
 
 	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
 	if (mixer1_changed || mixer2_changed) {
+		bus_update = mdss_mdp_bus_update_ctl_quota(ctl);
+
 		if (ctl->prepare_fnc)
 			ret = ctl->prepare_fnc(ctl, arg);
 		if (ret) {
@@ -571,6 +790,9 @@
 			goto done;
 		}
 
+		if (bus_update == MDSS_MDP_BUS_UPDATE_EARLY)
+			mdss_mdp_ctl_update_bus_scale();
+
 		if (mixer1_changed)
 			mdss_mdp_mixer_update(ctl->mixer_left);
 		if (mixer2_changed)
@@ -591,6 +813,9 @@
 
 	ctl->play_cnt++;
 
+	if (bus_update == MDSS_MDP_BUS_UPDATE_LATE)
+		mdss_mdp_ctl_update_bus_scale();
+
 done:
 	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
 
diff --git a/drivers/video/msm/mdss/mdss_mdp_intf_video.c b/drivers/video/msm/mdss/mdss_mdp_intf_video.c
new file mode 100644
index 0000000..21ef290
--- /dev/null
+++ b/drivers/video/msm/mdss/mdss_mdp_intf_video.c
@@ -0,0 +1,319 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include "mdss_fb.h"
+#include "mdss_mdp.h"
+
+/* intf timing settings */
+struct intf_timing_params {
+	u32 width;
+	u32 height;
+	u32 xres;
+	u32 yres;
+
+	u32 h_back_porch;
+	u32 h_front_porch;
+	u32 v_back_porch;
+	u32 v_front_porch;
+	u32 hsync_pulse_width;
+	u32 vsync_pulse_width;
+
+	u32 border_clr;
+	u32 underflow_clr;
+	u32 hsync_skew;
+};
+
+#define MAX_SESSIONS 3
+struct mdss_mdp_video_ctx {
+	u32 ctl_num;
+	u32 pp_num;
+	u8 ref_cnt;
+
+	u8 timegen_en;
+	struct completion pp_comp;
+	struct completion vsync_comp;
+};
+
+struct mdss_mdp_video_ctx mdss_mdp_video_ctx_list[MAX_SESSIONS];
+
+static int mdss_mdp_video_timegen_setup(struct mdss_mdp_ctl *ctl,
+					struct intf_timing_params *p)
+{
+	u32 hsync_period, vsync_period;
+	u32 hsync_start_x, hsync_end_x, display_v_start, display_v_end;
+	u32 active_h_start, active_h_end, active_v_start, active_v_end;
+	u32 display_hctl, active_hctl, hsync_ctl, polarity_ctl;
+	int off;
+
+	off = MDSS_MDP_REG_INTF_OFFSET(ctl->intf_num);
+
+	hsync_period = p->hsync_pulse_width + p->h_back_porch +
+			p->width + p->h_front_porch;
+	vsync_period = p->vsync_pulse_width + p->v_back_porch +
+			p->height + p->v_front_porch;
+
+	display_v_start = ((p->vsync_pulse_width + p->v_back_porch) *
+			hsync_period) + p->hsync_skew;
+	display_v_end = ((vsync_period - p->v_front_porch) * hsync_period) +
+			p->hsync_skew - 1;
+
+	if (ctl->intf_type == MDSS_INTF_EDP) {
+		display_v_start += p->hsync_pulse_width + p->h_back_porch;
+		display_v_end -= p->h_front_porch;
+	}
+
+	hsync_start_x = p->h_back_porch + p->hsync_pulse_width;
+	hsync_end_x = hsync_period - p->h_front_porch - 1;
+
+	if (p->width != p->xres) {
+		active_h_start = hsync_start_x;
+		active_h_end = active_h_start + p->xres - 1;
+	} else {
+		active_h_start = 0;
+		active_h_end = 0;
+	}
+
+	if (p->height != p->yres) {
+		active_v_start = display_v_start;
+		active_v_end = active_v_start + (p->yres * hsync_period) - 1;
+	} else {
+		active_v_start = 0;
+		active_v_end = 0;
+	}
+
+
+	if (active_h_end) {
+		active_hctl = (active_h_end << 16) | active_h_start;
+		active_hctl |= BIT(31);	/* ACTIVE_H_ENABLE */
+	} else {
+		active_hctl = 0;
+	}
+
+	if (active_v_end)
+		active_v_start |= BIT(31); /* ACTIVE_V_ENABLE */
+
+	hsync_ctl = (hsync_period << 16) | p->hsync_pulse_width;
+	display_hctl = (hsync_end_x << 16) | hsync_start_x;
+	polarity_ctl = (0 << 2) |	/* DEN Polarity */
+		       (0 << 1) |      /* VSYNC Polarity */
+		       (0);	       /* HSYNC Polarity */
+
+	MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_INTF_HSYNC_CTL, hsync_ctl);
+	MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_INTF_VSYNC_PERIOD_F0,
+			   vsync_period * hsync_period);
+	MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_INTF_VSYNC_PULSE_WIDTH_F0,
+			   p->vsync_pulse_width * hsync_period);
+	MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_INTF_DISPLAY_HCTL,
+			   display_hctl);
+	MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_INTF_DISPLAY_V_START_F0,
+			   display_v_start);
+	MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_INTF_DISPLAY_V_END_F0,
+			   display_v_end);
+	MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_INTF_ACTIVE_HCTL, active_hctl);
+	MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_INTF_ACTIVE_V_START_F0,
+			   active_v_start);
+	MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_INTF_ACTIVE_V_END_F0,
+			   active_v_end);
+
+	MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_INTF_BORDER_COLOR,
+			   p->border_clr);
+	MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_INTF_UNDERFLOW_COLOR,
+			   p->underflow_clr);
+	MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_INTF_HSYNC_SKEW,
+			   p->hsync_skew);
+	MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_INTF_POLARITY_CTL,
+			   polarity_ctl);
+
+	return 0;
+}
+
+static int mdss_mdp_video_stop(struct mdss_mdp_ctl *ctl)
+{
+	struct mdss_mdp_video_ctx *ctx;
+	int off;
+
+	pr_debug("stop ctl=%d\n", ctl->num);
+
+	ctx = (struct mdss_mdp_video_ctx *) ctl->priv_data;
+	if (!ctx) {
+		pr_err("invalid ctx for ctl=%d\n", ctl->num);
+		return -ENODEV;
+	}
+
+	if (ctx->timegen_en) {
+		off = MDSS_MDP_REG_INTF_OFFSET(ctl->intf_num);
+		MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_INTF_TIMING_ENGINE_EN, 0);
+		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
+		ctx->timegen_en = false;
+	}
+
+	memset(ctx, 0, sizeof(*ctx));
+
+	return 0;
+}
+
+static void mdss_mdp_video_pp_intr_done(void *arg)
+{
+	struct mdss_mdp_video_ctx *ctx;
+
+	ctx = (struct mdss_mdp_video_ctx *) arg;
+	if (!ctx) {
+		pr_err("invalid ctx\n");
+		return;
+	}
+
+	pr_debug("intr mixer=%d\n", ctx->pp_num);
+
+	complete(&ctx->pp_comp);
+}
+
+static void mdss_mdp_video_vsync_intr_done(void *arg)
+{
+	struct mdss_mdp_video_ctx *ctx;
+
+	ctx = (struct mdss_mdp_video_ctx *) arg;
+	if (!ctx) {
+		pr_err("invalid ctx\n");
+		return;
+	}
+
+	pr_debug("intr ctl=%d\n", ctx->ctl_num);
+
+	complete(&ctx->vsync_comp);
+}
+
+static int mdss_mdp_video_prepare(struct mdss_mdp_ctl *ctl, void *arg)
+{
+	struct mdss_mdp_video_ctx *ctx;
+
+	ctx = (struct mdss_mdp_video_ctx *) ctl->priv_data;
+	if (!ctx) {
+		pr_err("invalid ctx\n");
+		return -ENODEV;
+	}
+
+	if (ctx->timegen_en) {
+		u32 intr_type = MDSS_MDP_IRQ_PING_PONG_COMP;
+
+		pr_debug("waiting for ping pong %d done\n", ctx->pp_num);
+		mdss_mdp_set_intr_callback(intr_type, ctx->pp_num,
+					   mdss_mdp_video_pp_intr_done, ctx);
+		mdss_mdp_irq_enable(intr_type, ctx->pp_num);
+
+		wait_for_completion_interruptible(&ctx->pp_comp);
+		mdss_mdp_irq_disable(intr_type, ctx->pp_num);
+	}
+
+	return 0;
+}
+
+static int mdss_mdp_video_display(struct mdss_mdp_ctl *ctl, void *arg)
+{
+	struct mdss_mdp_video_ctx *ctx;
+	u32 intr_type = MDSS_MDP_IRQ_INTF_VSYNC;
+
+	pr_debug("kickoff ctl=%d\n", ctl->num);
+
+	ctx = (struct mdss_mdp_video_ctx *) ctl->priv_data;
+	if (!ctx) {
+		pr_err("invalid ctx\n");
+		return -ENODEV;
+	}
+	mdss_mdp_set_intr_callback(intr_type, ctl->intf_num,
+				   mdss_mdp_video_vsync_intr_done, ctx);
+	mdss_mdp_irq_enable(intr_type, ctl->intf_num);
+
+	if (!ctx->timegen_en) {
+		int off = MDSS_MDP_REG_INTF_OFFSET(ctl->intf_num);
+
+		pr_debug("enabling timing gen for intf=%d\n", ctl->intf_num);
+
+		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
+		MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_INTF_TIMING_ENGINE_EN, 1);
+		ctx->timegen_en = true;
+		wmb();
+	}
+
+	wait_for_completion_interruptible(&ctx->vsync_comp);
+	mdss_mdp_irq_disable(intr_type, ctl->intf_num);
+
+	return 0;
+}
+
+int mdss_mdp_video_start(struct mdss_mdp_ctl *ctl)
+{
+	struct msm_fb_data_type *mfd;
+	struct mdss_panel_info *pinfo;
+	struct mdss_mdp_video_ctx *ctx;
+	struct mdss_mdp_mixer *mixer;
+	struct intf_timing_params itp = {0};
+	struct fb_info *fbi;
+	int i;
+
+	mfd = ctl->mfd;
+	fbi = mfd->fbi;
+	pinfo = &mfd->panel_info;
+	mixer = mdss_mdp_mixer_get(ctl, MDSS_MDP_MIXER_MUX_LEFT);
+
+	if (!mixer) {
+		pr_err("mixer not setup correctly\n");
+		return -ENODEV;
+	}
+
+	pr_debug("start ctl=%u\n", ctl->num);
+
+	for (i = 0; i < MAX_SESSIONS; i++) {
+		ctx = &mdss_mdp_video_ctx_list[i];
+		if (ctx->ref_cnt == 0) {
+			ctx->ref_cnt++;
+			break;
+		}
+	}
+	if (i == MAX_SESSIONS) {
+		pr_err("too many sessions\n");
+		return -ENOMEM;
+	}
+	ctl->priv_data = ctx;
+	ctx->ctl_num = ctl->num;
+	ctx->pp_num = mixer->num;
+	init_completion(&ctx->pp_comp);
+	init_completion(&ctx->vsync_comp);
+
+	itp.width = pinfo->xres + pinfo->lcdc.xres_pad;
+	itp.height = pinfo->yres + pinfo->lcdc.yres_pad;
+	itp.border_clr = pinfo->lcdc.border_clr;
+	itp.underflow_clr = pinfo->lcdc.underflow_clr;
+	itp.hsync_skew = pinfo->lcdc.hsync_skew;
+
+	itp.xres = fbi->var.xres;
+	itp.yres = fbi->var.yres;
+	itp.h_back_porch = fbi->var.left_margin;
+	itp.h_front_porch = fbi->var.right_margin;
+	itp.v_back_porch = fbi->var.upper_margin;
+	itp.v_front_porch = fbi->var.lower_margin;
+	itp.hsync_pulse_width = fbi->var.hsync_len;
+	itp.vsync_pulse_width = fbi->var.vsync_len;
+
+	if (mdss_mdp_video_timegen_setup(ctl, &itp)) {
+		pr_err("unable to get timing parameters\n");
+		return -EINVAL;
+	}
+
+	ctl->stop_fnc = mdss_mdp_video_stop;
+	ctl->prepare_fnc = mdss_mdp_video_prepare;
+	ctl->display_fnc = mdss_mdp_video_display;
+
+	return 0;
+}
diff --git a/drivers/video/msm/mdss/mdss_mdp_intf_writeback.c b/drivers/video/msm/mdss/mdss_mdp_intf_writeback.c
index 99d4b4c..c1bc58a 100644
--- a/drivers/video/msm/mdss/mdss_mdp_intf_writeback.c
+++ b/drivers/video/msm/mdss/mdss_mdp_intf_writeback.c
@@ -15,6 +15,7 @@
 
 #include "mdss_fb.h"
 #include "mdss_mdp.h"
+#include "mdss_mdp_rotator.h"
 
 #define ROT_BLK_SIZE	128
 
@@ -193,6 +194,35 @@
 	return 0;
 }
 
+static int mdss_mdp_writeback_rot_setup(struct mdss_mdp_ctl *ctl,
+					struct mdss_mdp_writeback_ctx *ctx,
+					struct mdss_mdp_rotator_session *rot)
+{
+	pr_debug("rotator wb_num=%d\n", ctx->wb_num);
+
+	ctx->opmode = BIT(6); /* ROT EN */
+	if (ROT_BLK_SIZE == 128)
+		ctx->opmode |= BIT(4); /* block size 128 */
+
+	ctx->opmode |= rot->bwc_mode;
+
+	ctx->width = rot->src_rect.w;
+	ctx->height = rot->src_rect.h;
+
+	ctx->format = rot->format;
+
+	ctx->rot90 = !!(rot->rotations & MDP_ROT_90);
+	if (ctx->rot90) {
+		ctx->opmode |= BIT(5); /* ROT 90 */
+		swap(ctx->width, ctx->height);
+	}
+
+	if (mdss_mdp_writeback_format_setup(ctx))
+		return -EINVAL;
+
+	return 0;
+}
+
 static int mdss_mdp_writeback_stop(struct mdss_mdp_ctl *ctl)
 {
 	struct mdss_mdp_writeback_ctx *ctx;
@@ -208,6 +238,27 @@
 	return 0;
 }
 
+static int mdss_mdp_writeback_prepare(struct mdss_mdp_ctl *ctl, void *arg)
+{
+	struct mdss_mdp_writeback_ctx *ctx;
+	ctx = (struct mdss_mdp_writeback_ctx *) ctl->priv_data;
+	if (!ctx)
+		return -ENODEV;
+
+	if (ctx->type == MDSS_MDP_WRITEBACK_TYPE_ROTATOR) {
+		struct mdss_mdp_rotator_session *rot;
+		rot = (struct mdss_mdp_rotator_session *) arg;
+		if (!rot) {
+			pr_err("unable to retrieve rot session ctl=%d\n",
+			       ctl->num);
+			return -ENODEV;
+		}
+		mdss_mdp_writeback_rot_setup(ctl, ctx, rot);
+	}
+
+	return 0;
+}
+
 static void mdss_mdp_writeback_intr_done(void *arg)
 {
 	struct mdss_mdp_writeback_ctx *ctx;
@@ -229,6 +280,7 @@
 static int mdss_mdp_writeback_display(struct mdss_mdp_ctl *ctl, void *arg)
 {
 	struct mdss_mdp_writeback_ctx *ctx;
+	struct mdss_mdp_rotator_session *rot = NULL;
 	struct mdss_mdp_data *wb_data;
 	u32 flush_bits;
 	int ret;
@@ -237,7 +289,17 @@
 	if (!ctx)
 		return -ENODEV;
 
-	wb_data = &ctx->wb_data;
+	if (ctx->type == MDSS_MDP_WRITEBACK_TYPE_ROTATOR) {
+		rot = (struct mdss_mdp_rotator_session *) arg;
+		if (!rot) {
+			pr_err("unable to retrieve rot session ctl=%d\n",
+			       ctl->num);
+			return -ENODEV;
+		}
+		wb_data = rot->dst_data;
+	} else {
+		wb_data = &ctx->wb_data;
+	}
 
 	ret = mdss_mdp_writeback_addr_setup(ctx, wb_data);
 	if (ret) {
@@ -257,8 +319,16 @@
 	mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_START, 1);
 	wmb();
 
-	pr_debug("writeback kickoff wb_num=%d\n", ctx->wb_num);
-	wait_for_completion_interruptible(&ctx->comp);
+	if (rot) {
+		pr_debug("rotator kickoff wb_num=%d\n", ctx->wb_num);
+		mutex_lock(&rot->lock);
+		rot->comp = &ctx->comp;
+		rot->busy = 1;
+		mutex_unlock(&rot->lock);
+	} else {
+		pr_debug("writeback kickoff wb_num=%d\n", ctx->wb_num);
+		wait_for_completion_interruptible(&ctx->comp);
+	}
 
 	return 0;
 }
@@ -290,6 +360,8 @@
 
 	if (ctx->type == MDSS_MDP_WRITEBACK_TYPE_WFD)
 		ret = mdss_mdp_writeback_wfd_setup(ctl, ctx);
+	else if (ctx->type == MDSS_MDP_WRITEBACK_TYPE_ROTATOR)
+		ctl->prepare_fnc = mdss_mdp_writeback_prepare;
 	else /* line mode not supported */
 		return -ENOSYS;
 
diff --git a/drivers/video/msm/mdss/mdss_mdp_overlay.c b/drivers/video/msm/mdss/mdss_mdp_overlay.c
index bd4a974..f1b158d 100644
--- a/drivers/video/msm/mdss/mdss_mdp_overlay.c
+++ b/drivers/video/msm/mdss/mdss_mdp_overlay.c
@@ -21,6 +21,7 @@
 
 #include "mdss_fb.h"
 #include "mdss_mdp.h"
+#include "mdss_mdp_rotator.h"
 
 #define CHECK_BOUNDS(offset, size, max_size) \
 	(((size) > (max_size)) || ((offset) > ((max_size) - (size))))
@@ -85,28 +86,30 @@
 		dst_h = req->dst_rect.h;
 	}
 
-	if ((req->src_rect.w * MAX_UPSCALE_RATIO) < dst_w) {
-		pr_err("too much upscaling Width %d->%d\n",
-		       req->src_rect.w, req->dst_rect.w);
-		return -EINVAL;
-	}
+	if (!(req->flags & MDSS_MDP_ROT_ONLY)) {
+		if ((req->src_rect.w * MAX_UPSCALE_RATIO) < dst_w) {
+			pr_err("too much upscaling Width %d->%d\n",
+			       req->src_rect.w, req->dst_rect.w);
+			return -EINVAL;
+		}
 
-	if ((req->src_rect.h * MAX_UPSCALE_RATIO) < dst_h) {
-		pr_err("too much upscaling. Height %d->%d\n",
-		       req->src_rect.h, req->dst_rect.h);
-		return -EINVAL;
-	}
+		if ((req->src_rect.h * MAX_UPSCALE_RATIO) < dst_h) {
+			pr_err("too much upscaling. Height %d->%d\n",
+			       req->src_rect.h, req->dst_rect.h);
+			return -EINVAL;
+		}
 
-	if (req->src_rect.w > (dst_w * MAX_DOWNSCALE_RATIO)) {
-		pr_err("too much downscaling. Width %d->%d\n",
-		       req->src_rect.w, req->dst_rect.w);
-		return -EINVAL;
-	}
+		if (req->src_rect.w > (dst_w * MAX_DOWNSCALE_RATIO)) {
+			pr_err("too much downscaling. Width %d->%d\n",
+			       req->src_rect.w, req->dst_rect.w);
+			return -EINVAL;
+		}
 
-	if (req->src_rect.h > (dst_h * MAX_DOWNSCALE_RATIO)) {
-		pr_err("too much downscaling. Height %d->%d\n",
-		       req->src_rect.h, req->dst_rect.h);
-		return -EINVAL;
+		if (req->src_rect.h > (dst_h * MAX_DOWNSCALE_RATIO)) {
+			pr_err("too much downscaling. Height %d->%d\n",
+			       req->src_rect.h, req->dst_rect.h);
+			return -EINVAL;
+		}
 	}
 
 	if (fmt->is_yuv) {
@@ -141,6 +144,61 @@
 	return 0;
 }
 
+static int mdss_mdp_overlay_rotator_setup(struct msm_fb_data_type *mfd,
+					  struct mdp_overlay *req)
+{
+	struct mdss_mdp_rotator_session *rot;
+	struct mdss_mdp_format_params *fmt;
+	int ret = 0;
+
+	pr_debug("rot ctl=%u req id=%x\n", mfd->ctl->num, req->id);
+
+	fmt = mdss_mdp_get_format_params(req->src.format);
+	if (!fmt) {
+		pr_err("invalid rot format %d\n", req->src.format);
+		return -EINVAL;
+	}
+
+	ret = mdss_mdp_overlay_req_check(mfd, req, fmt);
+	if (ret)
+		return ret;
+
+	if (req->id == MSMFB_NEW_REQUEST) {
+		rot = mdss_mdp_rotator_session_alloc();
+
+		if (!rot) {
+			pr_err("unable to allocate rotator session\n");
+			return -ENOMEM;
+		}
+	} else if (req->id & MDSS_MDP_ROT_SESSION_MASK) {
+		rot = mdss_mdp_rotator_session_get(req->id);
+
+		if (!rot) {
+			pr_err("rotator session=%x not found\n", req->id);
+			return -ENODEV;
+		}
+	} else {
+		pr_err("invalid rotator session id=%x\n", req->id);
+		return -EINVAL;
+	}
+
+	rot->rotations = req->flags & (MDP_ROT_90 | MDP_FLIP_LR | MDP_FLIP_UD);
+
+	rot->format = fmt->format;
+	rot->img_width = req->src.width;
+	rot->img_height = req->src.height;
+	rot->src_rect.x = req->src_rect.x;
+	rot->src_rect.y = req->src_rect.y;
+	rot->src_rect.w = req->src_rect.w;
+	rot->src_rect.h = req->src_rect.h;
+
+	rot->params_changed++;
+
+	req->id = rot->session_id;
+
+	return ret;
+}
+
 static int mdss_mdp_overlay_pipe_setup(struct msm_fb_data_type *mfd,
 				       struct mdp_overlay *req,
 				       struct mdss_mdp_pipe **ppipe)
@@ -256,14 +314,19 @@
 				struct mdp_overlay *req)
 {
 	int ret;
-	struct mdss_mdp_pipe *pipe;
 
-	/* userspace zorder start with stage 0 */
-	req->z_order += MDSS_MDP_STAGE_0;
+	if (req->flags & MDSS_MDP_ROT_ONLY) {
+		ret = mdss_mdp_overlay_rotator_setup(mfd, req);
+	} else {
+		struct mdss_mdp_pipe *pipe;
 
-	ret = mdss_mdp_overlay_pipe_setup(mfd, req, &pipe);
+		/* userspace zorder start with stage 0 */
+		req->z_order += MDSS_MDP_STAGE_0;
 
-	req->z_order -= MDSS_MDP_STAGE_0;
+		ret = mdss_mdp_overlay_pipe_setup(mfd, req, &pipe);
+
+		req->z_order -= MDSS_MDP_STAGE_0;
+	}
 
 	return ret;
 }
@@ -280,6 +343,19 @@
 
 	pr_debug("unset ndx=%x\n", ndx);
 
+	if (ndx & MDSS_MDP_ROT_SESSION_MASK) {
+		struct mdss_mdp_rotator_session *rot;
+		rot = mdss_mdp_rotator_session_get(ndx);
+		if (rot) {
+			mdss_mdp_rotator_finish(rot);
+		} else {
+			pr_warn("unknown session id=%x\n", ndx);
+			ret = -ENODEV;
+		}
+
+		return ret;
+	}
+
 	for (i = 0; unset_ndx != ndx && i < MDSS_MDP_MAX_SSPP; i++) {
 		pipe_ndx = BIT(i);
 		if (pipe_ndx & ndx) {
@@ -319,6 +395,28 @@
 	return ret;
 }
 
+static int mdss_mdp_overlay_rotate(struct msmfb_overlay_data *req,
+				   struct mdss_mdp_data *src_data,
+				   struct mdss_mdp_data *dst_data)
+{
+	struct mdss_mdp_rotator_session *rot;
+	int ret;
+
+	rot = mdss_mdp_rotator_session_get(req->id);
+	if (!rot) {
+		pr_err("invalid session id=%x\n", req->id);
+		return -ENODEV;
+	}
+
+	ret = mdss_mdp_rotator_queue(rot, src_data, dst_data);
+	if (ret) {
+		pr_err("rotator queue error session id=%x\n", req->id);
+		return ret;
+	}
+
+	return 0;
+}
+
 static int mdss_mdp_overlay_queue(struct msmfb_overlay_data *req,
 				  struct mdss_mdp_data *src_data)
 {
@@ -364,7 +462,23 @@
 	}
 	src_data.num_planes = 1;
 
-	ret = mdss_mdp_overlay_queue(req, &src_data);
+	if (req->id & MDSS_MDP_ROT_SESSION_MASK) {
+		struct mdss_mdp_data dst_data;
+		memset(&dst_data, 0, sizeof(dst_data));
+
+		mdss_mdp_get_img(mfd->iclient, &req->dst_data, &dst_data.p[0]);
+		if (dst_data.p[0].len == 0) {
+			pr_err("dst data pmem error\n");
+			return -ENOMEM;
+		}
+		dst_data.num_planes = 1;
+
+		ret = mdss_mdp_overlay_rotate(req, &src_data, &dst_data);
+
+		mdss_mdp_put_img(&dst_data.p[0]);
+	} else {
+		ret = mdss_mdp_overlay_queue(req, &src_data);
+	}
 
 	mdss_mdp_put_img(&src_data.p[0]);
 
diff --git a/drivers/video/msm/mdss/mdss_mdp_pipe.c b/drivers/video/msm/mdss/mdss_mdp_pipe.c
index b52cff5..52f4324 100644
--- a/drivers/video/msm/mdss/mdss_mdp_pipe.c
+++ b/drivers/video/msm/mdss/mdss_mdp_pipe.c
@@ -581,6 +581,11 @@
 			  (1 << 18) |	/* SRC_DATA=YCBCR */
 			  (1 << 17);	/* CSC_1_EN */
 
+	/* only need to program once */
+	if (pipe->play_cnt == 0) {
+		mdss_mdp_csc_setup(MDSS_MDP_BLOCK_SSPP, pipe->num, 1,
+				   MDSS_MDP_CSC_YUV2RGB);
+	}
 	mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_VIG_OP_MODE, opmode);
 
 	return 0;
diff --git a/drivers/video/msm/mdss/mdss_mdp_pp.c b/drivers/video/msm/mdss/mdss_mdp_pp.c
new file mode 100644
index 0000000..db840a8
--- /dev/null
+++ b/drivers/video/msm/mdss/mdss_mdp_pp.c
@@ -0,0 +1,175 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include "mdss_mdp.h"
+
+struct mdp_csc_cfg mdp_csc_convert[MDSS_MDP_MAX_CSC] = {
+	[MDSS_MDP_CSC_RGB2RGB] = {
+		0,
+		{
+			0x0200, 0x0000, 0x0000,
+			0x0000, 0x0200, 0x0000,
+			0x0000, 0x0000, 0x0200,
+		},
+		{ 0x0, 0x0, 0x0,},
+		{ 0x0, 0x0, 0x0,},
+		{ 0x0, 0xff, 0x0, 0xff, 0x0, 0xff,},
+		{ 0x0, 0xff, 0x0, 0xff, 0x0, 0xff,},
+	},
+	[MDSS_MDP_CSC_YUV2RGB] = {
+		0,
+		{
+			0x0254, 0x0000, 0x0331,
+			0x0254, 0xff37, 0xfe60,
+			0x0254, 0x0409, 0x0000,
+		},
+		{ 0xfff0, 0xff80, 0xff80,},
+		{ 0x0, 0x0, 0x0,},
+		{ 0x0, 0xff, 0x0, 0xff, 0x0, 0xff,},
+		{ 0x0, 0xff, 0x0, 0xff, 0x0, 0xff,},
+	},
+	[MDSS_MDP_CSC_RGB2YUV] = {
+		0,
+		{
+			0x0083, 0x0102, 0x0032,
+			0x1fb5, 0x1f6c, 0x00e1,
+			0x00e1, 0x1f45, 0x1fdc
+		},
+		{ 0x0, 0x0, 0x0,},
+		{ 0x0010, 0x0080, 0x0080,},
+		{ 0x0, 0xff, 0x0, 0xff, 0x0, 0xff,},
+		{ 0x0010, 0x00eb, 0x0010, 0x00f0, 0x0010, 0x00f0,},
+	},
+	[MDSS_MDP_CSC_YUV2YUV] = {
+		0,
+		{
+			0x0200, 0x0000, 0x0000,
+			0x0000, 0x0200, 0x0000,
+			0x0000, 0x0000, 0x0200,
+		},
+		{ 0x0, 0x0, 0x0,},
+		{ 0x0, 0x0, 0x0,},
+		{ 0x0, 0xff, 0x0, 0xff, 0x0, 0xff,},
+		{ 0x0, 0xff, 0x0, 0xff, 0x0, 0xff,},
+	},
+};
+
+#define CSC_MV_OFF	0x0
+#define CSC_BV_OFF	0x2C
+#define CSC_LV_OFF	0x14
+#define CSC_POST_OFF	0xC
+
+static int mdss_mdp_csc_setup_data(u32 block, u32 blk_idx, u32 tbl_idx,
+				   struct mdp_csc_cfg *data)
+{
+	int i, ret = 0;
+	u32 *off, base, val = 0;
+
+	if (data == NULL) {
+		pr_err("no csc matrix specified\n");
+		return -EINVAL;
+	}
+
+	switch (block) {
+	case MDSS_MDP_BLOCK_SSPP:
+		if (blk_idx < MDSS_MDP_SSPP_RGB0) {
+			base = MDSS_MDP_REG_SSPP_OFFSET(blk_idx);
+			if (tbl_idx == 1)
+				base += MDSS_MDP_REG_VIG_CSC_1_BASE;
+			else
+				base += MDSS_MDP_REG_VIG_CSC_0_BASE;
+		} else {
+			ret = -EINVAL;
+		}
+		break;
+	case MDSS_MDP_BLOCK_WB:
+		if (blk_idx < MDSS_MDP_MAX_WRITEBACK) {
+			base = MDSS_MDP_REG_WB_OFFSET(blk_idx) +
+			       MDSS_MDP_REG_WB_CSC_BASE;
+		} else {
+			ret = -EINVAL;
+		}
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+	if (ret != 0) {
+		pr_err("unsupported block id for csc\n");
+		return ret;
+	}
+
+	off = (u32 *) (base + CSC_MV_OFF);
+	for (i = 0; i < 9; i++) {
+		if (i & 0x1) {
+			val |= data->csc_mv[i] << 16;
+			MDSS_MDP_REG_WRITE(off, val);
+			off++;
+		} else {
+			val = data->csc_mv[i];
+		}
+	}
+	MDSS_MDP_REG_WRITE(off, val); /* COEFF_33 */
+
+	off = (u32 *) (base + CSC_BV_OFF);
+	for (i = 0; i < 3; i++) {
+		MDSS_MDP_REG_WRITE(off, data->csc_pre_bv[i]);
+		MDSS_MDP_REG_WRITE((u32 *)(((u32)off) + CSC_POST_OFF),
+				   data->csc_post_bv[i]);
+		off++;
+	}
+
+	off = (u32 *) (base + CSC_LV_OFF);
+	for (i = 0; i < 6; i += 2) {
+		val = (data->csc_pre_lv[i] << 8) | data->csc_pre_lv[i+1];
+		MDSS_MDP_REG_WRITE(off, val);
+
+		val = (data->csc_post_lv[i] << 8) | data->csc_post_lv[i+1];
+		MDSS_MDP_REG_WRITE((u32 *)(((u32)off) + CSC_POST_OFF), val);
+		off++;
+	}
+
+	return ret;
+}
+
+int mdss_mdp_csc_setup(u32 block, u32 blk_idx, u32 tbl_idx, u32 csc_type)
+{
+	struct mdp_csc_cfg *data;
+
+	if (csc_type >= MDSS_MDP_MAX_CSC) {
+		pr_err("invalid csc matrix index %d\n", csc_type);
+		return -ERANGE;
+	}
+
+	pr_debug("csc type=%d blk=%d idx=%d tbl=%d\n", csc_type,
+		 block, blk_idx, tbl_idx);
+
+	data = &mdp_csc_convert[csc_type];
+	return mdss_mdp_csc_setup_data(block, blk_idx, tbl_idx, data);
+}
+
+int mdss_mdp_dspp_setup(struct mdss_mdp_ctl *ctl, struct mdss_mdp_mixer *mixer)
+{
+	int dspp_num;
+
+	if (!ctl || !mixer)
+		return -EINVAL;
+
+	dspp_num = mixer->num;
+
+	ctl->flush_bits |= BIT(13 + dspp_num);	/* DSPP */
+
+	return 0;
+}
diff --git a/drivers/video/msm/mdss/mdss_mdp_rotator.c b/drivers/video/msm/mdss/mdss_mdp_rotator.c
new file mode 100644
index 0000000..628b7f5
--- /dev/null
+++ b/drivers/video/msm/mdss/mdss_mdp_rotator.c
@@ -0,0 +1,228 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+
+#include "mdss_mdp.h"
+#include "mdss_mdp_rotator.h"
+
+#define MAX_ROTATOR_SESSIONS 8
+
+static DEFINE_MUTEX(rotator_lock);
+static struct mdss_mdp_rotator_session rotator_session[MAX_ROTATOR_SESSIONS];
+static LIST_HEAD(rotator_queue);
+
+struct mdss_mdp_rotator_session *mdss_mdp_rotator_session_alloc(void)
+{
+	struct mdss_mdp_rotator_session *rot;
+	int i;
+
+	mutex_lock(&rotator_lock);
+	for (i = 0; i < MAX_ROTATOR_SESSIONS; i++) {
+		rot = &rotator_session[i];
+		if (rot->ref_cnt == 0) {
+			rot->ref_cnt++;
+			rot->session_id = i | MDSS_MDP_ROT_SESSION_MASK;
+			mutex_init(&rot->lock);
+			break;
+		}
+	}
+	mutex_unlock(&rotator_lock);
+	if (i == MAX_ROTATOR_SESSIONS) {
+		pr_err("max rotator sessions reached\n");
+		return NULL;
+	}
+
+	return rot;
+}
+
+struct mdss_mdp_rotator_session *mdss_mdp_rotator_session_get(u32 session_id)
+{
+	struct mdss_mdp_rotator_session *rot;
+	u32 ndx;
+
+	ndx = session_id & ~MDSS_MDP_ROT_SESSION_MASK;
+	if (ndx < MAX_ROTATOR_SESSIONS) {
+		rot = &rotator_session[ndx];
+		if (rot->ref_cnt && rot->session_id == session_id)
+			return rot;
+	}
+	return NULL;
+}
+
+static int mdss_mdp_rotator_busy_wait(struct mdss_mdp_rotator_session *rot)
+{
+	mutex_lock(&rot->lock);
+	if (rot->busy) {
+		pr_debug("waiting for rot=%d to complete\n", rot->pipe->num);
+		wait_for_completion_interruptible(rot->comp);
+		rot->busy = 0;
+	}
+	mutex_unlock(&rot->lock);
+
+	return 0;
+}
+
+static struct mdss_mdp_pipe *mdss_mdp_rotator_pipe_alloc(void)
+{
+	struct mdss_mdp_mixer *mixer;
+	struct mdss_mdp_pipe *pipe = NULL;
+	int pnum;
+
+	mixer = mdss_mdp_wb_mixer_alloc(1);
+	if (!mixer)
+		return NULL;
+
+	switch (mixer->num) {
+	case MDSS_MDP_LAYERMIXER3:
+		pnum = MDSS_MDP_SSPP_DMA0;
+		break;
+	case MDSS_MDP_LAYERMIXER4:
+		pnum = MDSS_MDP_SSPP_DMA1;
+		break;
+	default:
+		goto done;
+	}
+
+	pipe = mdss_mdp_pipe_alloc_pnum(pnum);
+
+	if (pipe)
+		pipe->mixer = mixer;
+done:
+	if (!pipe)
+		mdss_mdp_wb_mixer_destroy(mixer);
+
+	return pipe;
+}
+
+static int mdss_mdp_rotator_pipe_dequeue(struct mdss_mdp_rotator_session *rot)
+{
+	if (rot->pipe) {
+		pr_debug("reusing existing session=%d\n", rot->pipe->num);
+		mdss_mdp_rotator_busy_wait(rot);
+		list_move_tail(&rot->head, &rotator_queue);
+	} else {
+		struct mdss_mdp_rotator_session *tmp;
+
+		rot->params_changed++;
+		rot->pipe = mdss_mdp_rotator_pipe_alloc();
+		if (rot->pipe) {
+			pr_debug("use new rotator pipe=%d\n", rot->pipe->num);
+
+			rot->pipe->mixer_stage = MDSS_MDP_STAGE_UNUSED;
+			list_add_tail(&rot->head, &rotator_queue);
+		} else if (!list_empty(&rotator_queue)) {
+			tmp = list_first_entry(&rotator_queue,
+					       struct mdss_mdp_rotator_session,
+					       head);
+
+			pr_debug("wait for rotator pipe=%d\n", tmp->pipe->num);
+			mdss_mdp_rotator_busy_wait(tmp);
+			rot->pipe = tmp->pipe;
+			tmp->pipe = NULL;
+
+			list_del(&tmp->head);
+			list_add_tail(&rot->head, &rotator_queue);
+		} else {
+			pr_err("no available rotator pipes\n");
+			return -EBUSY;
+		}
+	}
+
+	return 0;
+}
+
+int mdss_mdp_rotator_queue(struct mdss_mdp_rotator_session *rot,
+			   struct mdss_mdp_data *src_data,
+			   struct mdss_mdp_data *dst_data)
+{
+	struct mdss_mdp_pipe *rot_pipe;
+	struct mdss_mdp_ctl *ctl;
+	int ret;
+
+	if (!rot)
+		return -ENODEV;
+
+	mutex_lock(&rotator_lock);
+	ret = mdss_mdp_rotator_pipe_dequeue(rot);
+	if (ret) {
+		pr_err("unable to acquire rotator\n");
+		goto done;
+	}
+
+	rot_pipe = rot->pipe;
+
+	pr_debug("queue rotator pnum=%d\n", rot_pipe->num);
+
+	ctl = rot_pipe->mixer->ctl;
+
+	if (rot->params_changed) {
+		rot->params_changed = 0;
+		rot_pipe->flags = rot->rotations;
+		rot_pipe->src_fmt = mdss_mdp_get_format_params(rot->format);
+		rot_pipe->img_width = rot->img_width;
+		rot_pipe->img_height = rot->img_height;
+		rot_pipe->src = rot->src_rect;
+		rot_pipe->bwc_mode = rot->bwc_mode;
+		rot_pipe->params_changed++;
+	}
+
+	rot->dst_data = dst_data;
+
+	ret = mdss_mdp_pipe_queue_data(rot->pipe, src_data);
+	if (ret) {
+		pr_err("unable to queue rot data\n");
+		goto done;
+	}
+
+	ret = mdss_mdp_display_commit(ctl, rot);
+
+done:
+	mutex_unlock(&rotator_lock);
+
+	if (!rot->no_wait)
+		mdss_mdp_rotator_busy_wait(rot);
+
+	return ret;
+}
+
+int mdss_mdp_rotator_finish(struct mdss_mdp_rotator_session *rot)
+{
+	struct mdss_mdp_pipe *rot_pipe;
+
+	if (!rot)
+		return -ENODEV;
+
+	pr_debug("finish rot id=%x\n", rot->session_id);
+
+	mutex_lock(&rotator_lock);
+	rot_pipe = rot->pipe;
+	if (rot_pipe) {
+		mdss_mdp_rotator_busy_wait(rot);
+		list_del(&rot->head);
+	}
+	memset(rot, 0, sizeof(*rot));
+	if (rot_pipe) {
+		struct mdss_mdp_mixer *mixer = rot_pipe->mixer;
+		mdss_mdp_pipe_destroy(rot_pipe);
+		mdss_mdp_wb_mixer_destroy(mixer);
+	}
+	mutex_unlock(&rotator_lock);
+
+	return 0;
+}
diff --git a/drivers/video/msm/mdss/mdss_mdp_rotator.h b/drivers/video/msm/mdss/mdss_mdp_rotator.h
new file mode 100644
index 0000000..8940c46
--- /dev/null
+++ b/drivers/video/msm/mdss/mdss_mdp_rotator.h
@@ -0,0 +1,55 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef MDSS_MDP_ROTATOR_H
+#define MDSS_MDP_ROTATOR_H
+
+#include <linux/types.h>
+
+#include "mdss_mdp.h"
+
+#define MDSS_MDP_ROT_SESSION_MASK	0x80000000
+
+struct mdss_mdp_rotator_session {
+	u32 session_id;
+	u32 ref_cnt;
+	u32 params_changed;
+
+	u32 format;
+	u32 rotations;
+
+	u16 img_width;
+	u16 img_height;
+	struct mdss_mdp_img_rect src_rect;
+
+	u32 bwc_mode;
+	struct mdss_mdp_pipe *pipe;
+	struct mdss_mdp_data *dst_data;
+
+	struct mutex lock;
+	u8 busy;
+	u8 no_wait;
+	struct completion *comp;
+
+	struct list_head head;
+};
+
+struct mdss_mdp_rotator_session *mdss_mdp_rotator_session_alloc(void);
+struct mdss_mdp_rotator_session *mdss_mdp_rotator_session_get(u32 session_id);
+
+int mdss_mdp_rotator_queue(struct mdss_mdp_rotator_session *rot,
+			   struct mdss_mdp_data *src_data,
+			   struct mdss_mdp_data *dst_data);
+int mdss_mdp_rotator_finish(struct mdss_mdp_rotator_session *rot);
+int mdss_mdp_rotator_ctl_busy_wait(struct mdss_mdp_ctl *ctl);
+
+#endif /* MDSS_MDP_ROTATOR_H */
diff --git a/drivers/video/msm/mdss/mdss_mdp_util.c b/drivers/video/msm/mdss/mdss_mdp_util.c
index 25c9ac4..2e86806 100644
--- a/drivers/video/msm/mdss/mdss_mdp_util.c
+++ b/drivers/video/msm/mdss/mdss_mdp_util.c
@@ -10,7 +10,6 @@
  * GNU General Public License for more details.
  *
  */
-
 #define pr_fmt(fmt)	"%s: " fmt, __func__
 
 #include <linux/android_pmem.h>
@@ -53,7 +52,7 @@
 	int index = -1;
 	switch (intr_type) {
 	case MDSS_MDP_IRQ_INTF_VSYNC:
-		index = MDP_INTR_VSYNC_INTF_0 + intf_num;
+		index = MDP_INTR_VSYNC_INTF_0 + (intf_num - MDSS_MDP_INTF0);
 		break;
 	case MDSS_MDP_IRQ_PING_PONG_COMP:
 		index = MDP_INTR_PING_PONG_0 + intf_num;
@@ -116,11 +115,12 @@
 
 
 	isr = MDSS_MDP_REG_READ(MDSS_MDP_REG_INTR_STATUS);
+
+	pr_debug("isr=%x\n", isr);
+
 	if (isr == 0)
 		goto done;
 
-	pr_devel("isr=%x\n", isr);
-
 	mask = MDSS_MDP_REG_READ(MDSS_MDP_REG_INTR_EN);
 	MDSS_MDP_REG_WRITE(MDSS_MDP_REG_INTR_CLEAR, isr);
 
diff --git a/include/linux/msm_mdp.h b/include/linux/msm_mdp.h
index 8b6351f..19728fe 100644
--- a/include/linux/msm_mdp.h
+++ b/include/linux/msm_mdp.h
@@ -120,6 +120,7 @@
 	NUM_HSIC_PARAM,
 };
 
+#define MDSS_MDP_ROT_ONLY		0x80
 #define MDSS_MDP_RIGHT_MIXER		0x100
 
 /* mdp_blit_req flag values */
@@ -249,6 +250,7 @@
 	uint32_t version_key;
 	struct msmfb_data plane1_data;
 	struct msmfb_data plane2_data;
+	struct msmfb_data dst_data;
 };
 
 struct msmfb_img {
diff --git a/include/linux/msm_thermal.h b/include/linux/msm_thermal.h
new file mode 100644
index 0000000..fe9be89
--- /dev/null
+++ b/include/linux/msm_thermal.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_THERMAL_H
+#define __MSM_THERMAL_H
+
+struct msm_thermal_data {
+	uint32_t sensor_id;
+	uint32_t poll_ms;
+	uint32_t limit_temp;
+	uint32_t temp_hysteresis;
+	uint32_t limit_freq;
+};
+
+#ifdef CONFIG_THERMAL_MONITOR
+extern int msm_thermal_init(struct msm_thermal_data *pdata);
+#else
+static inline int msm_thermal_init(struct msm_thermal_data *pdata)
+{
+	return -ENOSYS;
+}
+#endif
+
+#endif /*__MSM_THERMAL_H*/
diff --git a/include/linux/usb/msm_hsusb.h b/include/linux/usb/msm_hsusb.h
index 5eb1845..9da1999 100644
--- a/include/linux/usb/msm_hsusb.h
+++ b/include/linux/usb/msm_hsusb.h
@@ -192,6 +192,8 @@
  * @enable_lpm_on_suspend: Enable the USB core to go into Low
  *              Power Mode, when USB bus is suspended but cable
  *              is connected.
+ * @core_clk_always_on_workaround: Don't disable core_clk when
+ *              USB enters LPM.
  * @bus_scale_table: parameters for bus bandwidth requirements
  */
 struct msm_otg_platform_data {
@@ -208,6 +210,7 @@
 	bool disable_reset_on_disconnect;
 	bool enable_dcd;
 	bool enable_lpm_on_dev_suspend;
+	bool core_clk_always_on_workaround;
 	struct msm_bus_scale_pdata *bus_scale_table;
 };
 
@@ -366,8 +369,14 @@
 	unsigned int dock_connect_irq;
 };
 
+/**
+ * struct msm_hsic_peripheral_platform_data: HSIC peripheral
+ * platform data.
+ * @core_clk_always_on_workaround: Don't disable core_clk when
+ *                                 HSIC enters LPM.
+ */
 struct msm_hsic_peripheral_platform_data {
-	bool keep_core_clk_on_suspend_workaround;
+	bool core_clk_always_on_workaround;
 };
 
 struct usb_bam_pipe_connect {
diff --git a/include/media/msm_camera.h b/include/media/msm_camera.h
index 320ac8b..3308243 100644
--- a/include/media/msm_camera.h
+++ b/include/media/msm_camera.h
@@ -446,10 +446,12 @@
 #define CMD_VFE_BUFFER_RELEASE 51
 #define CMD_VFE_PROCESS_IRQ 52
 
-#define CMD_AXI_CFG_PRIM		0xF1
-#define CMD_AXI_CFG_PRIM_ALL_CHNLS	0xF2
-#define CMD_AXI_CFG_SEC			0xF4
-#define CMD_AXI_CFG_SEC_ALL_CHNLS	0xF8
+#define CMD_AXI_CFG_PRIM		0xc1
+#define CMD_AXI_CFG_PRIM_ALL_CHNLS	0xc2
+#define CMD_AXI_CFG_SEC			0xc4
+#define CMD_AXI_CFG_SEC_ALL_CHNLS	0xc8
+#define CMD_AXI_CFG_TERT1		0xd0
+
 
 #define CMD_AXI_START  0xE1
 #define CMD_AXI_STOP   0xE2
@@ -549,10 +551,11 @@
 #define OUTPUT_ZSL_ALL_CHNLS 10
 #define LAST_AXI_OUTPUT_MODE_ENUM = OUTPUT_ZSL_ALL_CHNLS
 
-#define OUTPUT_PRIM		0xF1
-#define OUTPUT_PRIM_ALL_CHNLS	0xF2
-#define OUTPUT_SEC		0xF4
-#define OUTPUT_SEC_ALL_CHNLS	0xF8
+#define OUTPUT_PRIM		0xC1
+#define OUTPUT_PRIM_ALL_CHNLS	0xC2
+#define OUTPUT_SEC		0xC4
+#define OUTPUT_SEC_ALL_CHNLS	0xC8
+#define OUTPUT_TERT1		0xD0
 
 
 #define MSM_FRAME_PREV_1	0
diff --git a/include/media/msm_isp.h b/include/media/msm_isp.h
index 333d0df..93f6c8b 100644
--- a/include/media/msm_isp.h
+++ b/include/media/msm_isp.h
@@ -59,6 +59,8 @@
 #define MSG_ID_OUTPUT_PRIMARY           40
 #define MSG_ID_OUTPUT_SECONDARY         41
 #define MSG_ID_STATS_COMPOSITE          42
+#define MSG_ID_OUTPUT_TERTIARY1         43
+
 
 /* ISP command IDs */
 #define VFE_CMD_DUMMY_0                                 0
@@ -326,6 +328,9 @@
 #define VFE_OUTPUTS_RAW			BIT(8)
 #define VFE_OUTPUTS_JPEG_AND_THUMB	BIT(9)
 #define VFE_OUTPUTS_THUMB_AND_JPEG	BIT(10)
+#define VFE_OUTPUTS_RDI0	BIT(11)
+
+
 
 struct msm_frame_info {
 	uint32_t image_mode;
diff --git a/include/sound/apr_audio.h b/include/sound/apr_audio.h
index ec8d73e..431dedf 100644
--- a/include/sound/apr_audio.h
+++ b/include/sound/apr_audio.h
@@ -1176,6 +1176,7 @@
 } __attribute__((packed));
 
 #define ADM_CMD_CONNECT_AFE_PORT 0x00010320
+#define ADM_CMD_DISCONNECT_AFE_PORT 0x00010321
 
 struct adm_cmd_connect_afe_port {
 	struct apr_hdr     hdr;
diff --git a/include/sound/q6adm.h b/include/sound/q6adm.h
index 29fb606..56594d4 100644
--- a/include/sound/q6adm.h
+++ b/include/sound/q6adm.h
@@ -41,6 +41,7 @@
 				unsigned int *port_id, int copp_id);
 
 int adm_connect_afe_port(int mode, int session_id, int port_id);
+int adm_disconnect_afe_port(int mode, int session_id, int port_id);
 
 #ifdef CONFIG_RTAC
 int adm_get_copp_id(int port_id);
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index aaccc5f..372c60d 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -234,6 +234,7 @@
 enum target_sc_flags_table {
 	TARGET_SCF_BIDI_OP		= 0x01,
 	TARGET_SCF_ACK_KREF		= 0x02,
+	TARGET_SCF_UNKNOWN_SIZE		= 0x04,
 };
 
 /* fabric independent task management function values */
@@ -538,6 +539,7 @@
 	/* Used to signal cmd->se_tfo->check_release_cmd() usage per cmd */
 	unsigned		check_release:1;
 	unsigned		cmd_wait_set:1;
+	unsigned		unknown_data_length:1;
 	/* See se_cmd_flags_table */
 	u32			se_cmd_flags;
 	u32			se_ordered_id;
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h
index 10c6908..f27f575 100644
--- a/include/target/target_core_fabric.h
+++ b/include/target/target_core_fabric.h
@@ -112,7 +112,7 @@
 void	transport_init_se_cmd(struct se_cmd *, struct target_core_fabric_ops *,
 		struct se_session *, u32, int, int, unsigned char *);
 int	transport_lookup_cmd_lun(struct se_cmd *, u32);
-int	transport_generic_allocate_tasks(struct se_cmd *, unsigned char *);
+int	target_setup_cmd_from_cdb(struct se_cmd *, unsigned char *);
 void	target_submit_cmd(struct se_cmd *, struct se_session *, unsigned char *,
 		unsigned char *, u32, u32, int, int, int);
 int	target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
diff --git a/sound/soc/msm/msm-compr-q6.c b/sound/soc/msm/msm-compr-q6.c
index 8061b06..d4045e1 100644
--- a/sound/soc/msm/msm-compr-q6.c
+++ b/sound/soc/msm/msm-compr-q6.c
@@ -103,7 +103,6 @@
 			break;
 		} else
 			atomic_set(&prtd->pending_buffer, 0);
-
 		if (runtime->status->hw_ptr >= runtime->control->appl_ptr)
 			break;
 		buf = prtd->audio_client->port[IN].buf;
@@ -261,7 +260,8 @@
 			compr->info.codec_param.codec.bit_rate/8;
 		wma_pro_cfg.block_align = compr->info.codec_param.codec.align;
 		wma_pro_cfg.valid_bits_per_sample =
-		compr->info.codec_param.codec.options.wma.bits_per_sample;
+			compr->info.codec_param.codec\
+				.options.wma.bits_per_sample;
 		wma_pro_cfg.ch_mask =
 			compr->info.codec_param.codec.options.wma.channelmask;
 		wma_pro_cfg.encode_opt =
@@ -297,7 +297,7 @@
 				SND_AUDIOCODEC_AC3_PASS_THROUGH) {
 			msm_pcm_routing_reg_psthr_stream(
 				soc_prtd->dai_link->be_id,
-				prtd->session_id, substream->stream);
+				prtd->session_id, substream->stream, 1);
 		}
 	case SNDRV_PCM_TRIGGER_RESUME:
 	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
@@ -307,6 +307,12 @@
 		break;
 	case SNDRV_PCM_TRIGGER_STOP:
 		pr_debug("SNDRV_PCM_TRIGGER_STOP\n");
+		if (compr->info.codec_param.codec.id ==
+				SND_AUDIOCODEC_AC3_PASS_THROUGH) {
+			msm_pcm_routing_reg_psthr_stream(
+				soc_prtd->dai_link->be_id,
+				prtd->session_id, substream->stream, 0);
+		}
 		atomic_set(&prtd->start, 0);
 		break;
 	case SNDRV_PCM_TRIGGER_SUSPEND:
@@ -452,9 +458,11 @@
 	compressed_audio.prtd = NULL;
 	q6asm_audio_client_buf_free_contiguous(dir,
 				prtd->audio_client);
-
-	msm_pcm_routing_dereg_phy_stream(soc_prtd->dai_link->be_id,
-	SNDRV_PCM_STREAM_PLAYBACK);
+	if (!(compr->info.codec_param.codec.id ==
+			SND_AUDIOCODEC_AC3_PASS_THROUGH))
+		msm_pcm_routing_dereg_phy_stream(
+			soc_prtd->dai_link->be_id,
+			SNDRV_PCM_STREAM_PLAYBACK);
 	q6asm_audio_client_free(prtd->audio_client);
 	kfree(prtd);
 	return 0;
diff --git a/sound/soc/msm/msm-pcm-routing.c b/sound/soc/msm/msm-pcm-routing.c
index 8051c92..cc51a0f6 100644
--- a/sound/soc/msm/msm-pcm-routing.c
+++ b/sound/soc/msm/msm-pcm-routing.c
@@ -244,7 +244,7 @@
 }
 
 void msm_pcm_routing_reg_psthr_stream(int fedai_id, int dspst_id,
-					int stream_type)
+					int stream_type, int enable)
 {
 	int i, session_type, path_type, port_type;
 	u32 mode = 0;
@@ -274,8 +274,13 @@
 		   (msm_bedais[i].active) &&
 		   (test_bit(fedai_id, &msm_bedais[i].fe_sessions))) {
 			mode = afe_get_port_type(msm_bedais[i].port_id);
-			adm_connect_afe_port(mode, dspst_id,
+			if (enable)
+				adm_connect_afe_port(mode, dspst_id,
 					    msm_bedais[i].port_id);
+			else
+				adm_disconnect_afe_port(mode, dspst_id,
+						msm_bedais[i].port_id);
+
 			break;
 		}
 	}
diff --git a/sound/soc/msm/msm-pcm-routing.h b/sound/soc/msm/msm-pcm-routing.h
index 2f213e7..45dbf40 100644
--- a/sound/soc/msm/msm-pcm-routing.h
+++ b/sound/soc/msm/msm-pcm-routing.h
@@ -114,7 +114,7 @@
 void msm_pcm_routing_reg_phy_stream(int fedai_id, int dspst_id,
 	int stream_type);
 void msm_pcm_routing_reg_psthr_stream(int fedai_id, int dspst_id,
-		int stream_type);
+		int stream_type, int enable);
 
 void msm_pcm_routing_dereg_phy_stream(int fedai_id, int stream_type);
 
diff --git a/sound/soc/msm/qdsp6/q6adm.c b/sound/soc/msm/qdsp6/q6adm.c
index bf6f743..bc57ef3 100644
--- a/sound/soc/msm/qdsp6/q6adm.c
+++ b/sound/soc/msm/qdsp6/q6adm.c
@@ -278,6 +278,7 @@
 			case ADM_CMD_MEMORY_UNMAP_REGIONS:
 			case ADM_CMD_MATRIX_MAP_ROUTINGS:
 			case ADM_CMD_CONNECT_AFE_PORT:
+			case ADM_CMD_DISCONNECT_AFE_PORT:
 				atomic_set(&this_adm.copp_stat[index], 1);
 				wake_up(&this_adm.wait);
 				break;
@@ -523,6 +524,76 @@
 	return ret;
 }
 
+int adm_disconnect_afe_port(int mode, int session_id, int port_id)
+{
+	struct adm_cmd_connect_afe_port	cmd;
+	int ret = 0;
+	int index;
+
+	pr_debug("%s: port %d session id:%d mode:%d\n", __func__,
+				port_id, session_id, mode);
+
+	port_id = afe_convert_virtual_to_portid(port_id);
+
+	if (afe_validate_port(port_id) < 0) {
+		pr_err("%s port idi[%d] is invalid\n", __func__, port_id);
+		return -ENODEV;
+	}
+	if (this_adm.apr == NULL) {
+		this_adm.apr = apr_register("ADSP", "ADM", adm_callback,
+						0xFFFFFFFF, &this_adm);
+		if (this_adm.apr == NULL) {
+			pr_err("%s: Unable to register ADM\n", __func__);
+			ret = -ENODEV;
+			return ret;
+		}
+		rtac_set_adm_handle(this_adm.apr);
+	}
+	index = afe_get_port_index(port_id);
+	pr_debug("%s: Port ID %d, index %d\n", __func__, port_id, index);
+
+	cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+			APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	cmd.hdr.pkt_size = sizeof(cmd);
+	cmd.hdr.src_svc = APR_SVC_ADM;
+	cmd.hdr.src_domain = APR_DOMAIN_APPS;
+	cmd.hdr.src_port = port_id;
+	cmd.hdr.dest_svc = APR_SVC_ADM;
+	cmd.hdr.dest_domain = APR_DOMAIN_ADSP;
+	cmd.hdr.dest_port = port_id;
+	cmd.hdr.token = port_id;
+	cmd.hdr.opcode = ADM_CMD_DISCONNECT_AFE_PORT;
+
+	cmd.mode = mode;
+	cmd.session_id = session_id;
+	cmd.afe_port_id = port_id;
+
+	atomic_set(&this_adm.copp_stat[index], 0);
+	ret = apr_send_pkt(this_adm.apr, (uint32_t *)&cmd);
+	if (ret < 0) {
+		pr_err("%s:ADM enable for port %d failed\n",
+					__func__, port_id);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+	/* Wait for the callback with copp id */
+	ret = wait_event_timeout(this_adm.wait,
+		atomic_read(&this_adm.copp_stat[index]),
+		msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s ADM connect AFE failed for port %d\n", __func__,
+							port_id);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+	atomic_dec(&this_adm.copp_cnt[index]);
+	return 0;
+
+fail_cmd:
+
+	return ret;
+}
+
 int adm_open(int port_id, int path, int rate, int channel_mode, int topology)
 {
 	struct adm_copp_open_command	open;