Merge "msm_fb: Handle multiple histogram reader errors during suspend/resume" into msm-3.4
diff --git a/Documentation/devicetree/bindings/usb/msm-ssusb.txt b/Documentation/devicetree/bindings/usb/msm-ssusb.txt
index 40b3bc3..a2b7dfc 100644
--- a/Documentation/devicetree/bindings/usb/msm-ssusb.txt
+++ b/Documentation/devicetree/bindings/usb/msm-ssusb.txt
@@ -3,7 +3,13 @@
 Required properties :
 - compatible : should be "qcom,dwc-usb3-msm"
 - reg : offset and length of the register set in the memory map
-- interrupts: IRQ line
+- interrupts: IRQ lines used by this controller
+- interrupt-names : Required interrupt resource entries are:
+	"irq" : Interrupt for DWC3 core
+	"otg_irq" : Interrupt for DWC3 core's OTG Events
+- <supply-name>-supply: phandle to the regulator device tree node
+  Required "supply-name" examples are "SSUSB_VDDCX", "SSUSB_1p8",
+  "HSUSB_VDDCX", "HSUSB_1p8", "HSUSB_3p3".
 - qcom,dwc-usb3-msm-dbm-eps: Number of endpoints avaliable for
   the DBM (Device Bus Manager). The DBM is HW unit which is part of
   the MSM USB3.0 core (which also includes the Synopsys DesignWare
@@ -12,7 +18,13 @@
 Example MSM USB3.0 controller device node :
 	usb@f9200000 {
 		compatible = "qcom,dwc-usb3-msm";
-		reg = <0xf9200000 0xCCFF>;
-		interrupts = <0 131 0>
+		reg = <0xF9200000 0xFA000>;
+		interrupts = <0 131 0 0 179 0>;
+		interrupt-names = "irq", "otg_irq";
+		SSUSB_VDDCX-supply = <&pm8841_s2>;
+		SSUSB_1p8-supply = <&pm8941_l6>;
+		HSUSB_VDDCX-supply = <&pm8841_s2>;
+		HSUSB_1p8-supply = <&pm8941_l6>;
+		HSUSB_3p3-supply = <&pm8941_l24>;
 		qcom,dwc-usb3-msm-dbm-eps = <4>
 	};
diff --git a/arch/arm/boot/dts/msm8974.dtsi b/arch/arm/boot/dts/msm8974.dtsi
index 5205a9c..ad6f99b 100644
--- a/arch/arm/boot/dts/msm8974.dtsi
+++ b/arch/arm/boot/dts/msm8974.dtsi
@@ -321,7 +321,8 @@
 	qcom,ssusb@F9200000 {
 		compatible = "qcom,dwc-usb3-msm";
 		reg = <0xF9200000 0xFA000>;
-		interrupts = <0 131 0>;
+		interrupts = <0 131 0 0 179 0>;
+		interrupt-names = "irq", "otg_irq";
 		SSUSB_VDDCX-supply = <&pm8841_s2>;
 		SSUSB_1p8-supply = <&pm8941_l6>;
 		HSUSB_VDDCX-supply = <&pm8841_s2>;
diff --git a/arch/arm/mach-msm/acpuclock-8960.c b/arch/arm/mach-msm/acpuclock-8960.c
index 6c14efa..9809bdf 100644
--- a/arch/arm/mach-msm/acpuclock-8960.c
+++ b/arch/arm/mach-msm/acpuclock-8960.c
@@ -154,6 +154,8 @@
 	struct l2_level *l2_vote;
 	struct vreg vreg[NUM_VREG];
 	unsigned int *hfpll_vdd_tbl;
+	bool regulators_initialized;
+	bool clocks_initialized;
 };
 
 static unsigned int hfpll_vdd_tbl_8960[] = {
@@ -842,6 +844,8 @@
 	[PVS_FAST] = acpu_freq_tbl_8930_fast,
 };
 
+static struct acpu_level *max_acpu_level;
+
 static unsigned long acpuclk_8960_get_rate(int cpu)
 {
 	return scalable[cpu].current_speed->khz;
@@ -1305,7 +1309,7 @@
 }
 
 /* Initialize a HFPLL at a given rate and enable it. */
-static void __init hfpll_init(struct scalable *sc, struct core_speed *tgt_s)
+static void __cpuinit hfpll_init(struct scalable *sc, struct core_speed *tgt_s)
 {
 	pr_debug("Initializing HFPLL%d\n", sc - scalable);
 
@@ -1326,69 +1330,66 @@
 }
 
 /* Voltage regulator initialization. */
-static void __init regulator_init(struct acpu_level *lvl)
+static void __cpuinit regulator_init(int cpu, struct acpu_level *lvl)
 {
-	int cpu, ret;
-	struct scalable *sc;
+	int ret;
+	struct scalable *sc = &scalable[cpu];
 	unsigned int vdd_mem, vdd_dig, vdd_core;
 
 	vdd_mem = calculate_vdd_mem(lvl);
 	vdd_dig = calculate_vdd_dig(lvl);
 
-	for_each_possible_cpu(cpu) {
-		sc = &scalable[cpu];
-
-		/* Set initial vdd_mem vote. */
-		ret = rpm_vreg_set_voltage(sc->vreg[VREG_MEM].rpm_vreg_id,
-				sc->vreg[VREG_MEM].rpm_vreg_voter, vdd_mem,
-				sc->vreg[VREG_MEM].max_vdd, 0);
-		if (ret) {
-			pr_err("%s initialization failed (%d)\n",
-				sc->vreg[VREG_MEM].name, ret);
-			BUG();
-		}
-		sc->vreg[VREG_MEM].cur_vdd  = vdd_mem;
-
-		/* Set initial vdd_dig vote. */
-		ret = rpm_vreg_set_voltage(sc->vreg[VREG_DIG].rpm_vreg_id,
-				sc->vreg[VREG_DIG].rpm_vreg_voter, vdd_dig,
-				sc->vreg[VREG_DIG].max_vdd, 0);
-		if (ret) {
-			pr_err("%s initialization failed (%d)\n",
-				sc->vreg[VREG_DIG].name, ret);
-			BUG();
-		}
-		sc->vreg[VREG_DIG].cur_vdd  = vdd_dig;
-
-		/* Setup Krait CPU regulators and initial core voltage. */
-		sc->vreg[VREG_CORE].reg = regulator_get(NULL,
-					  sc->vreg[VREG_CORE].name);
-		if (IS_ERR(sc->vreg[VREG_CORE].reg)) {
-			pr_err("regulator_get(%s) failed (%ld)\n",
-			       sc->vreg[VREG_CORE].name,
-			       PTR_ERR(sc->vreg[VREG_CORE].reg));
-			BUG();
-		}
-		vdd_core = calculate_vdd_core(lvl);
-		ret = regulator_set_voltage(sc->vreg[VREG_CORE].reg, vdd_core,
-					    sc->vreg[VREG_CORE].max_vdd);
-		if (ret) {
-			pr_err("%s initialization failed (%d)\n",
-				sc->vreg[VREG_CORE].name, ret);
-			BUG();
-		}
-		sc->vreg[VREG_CORE].cur_vdd = vdd_core;
-		ret = regulator_enable(sc->vreg[VREG_CORE].reg);
-		if (ret) {
-			pr_err("regulator_enable(%s) failed (%d)\n",
-			       sc->vreg[VREG_CORE].name, ret);
-			BUG();
-		}
+	/* Set initial vdd_mem vote. */
+	ret = rpm_vreg_set_voltage(sc->vreg[VREG_MEM].rpm_vreg_id,
+			sc->vreg[VREG_MEM].rpm_vreg_voter, vdd_mem,
+			sc->vreg[VREG_MEM].max_vdd, 0);
+	if (ret) {
+		pr_err("%s initialization failed (%d)\n",
+			sc->vreg[VREG_MEM].name, ret);
+		BUG();
 	}
+	sc->vreg[VREG_MEM].cur_vdd  = vdd_mem;
+
+	/* Set initial vdd_dig vote. */
+	ret = rpm_vreg_set_voltage(sc->vreg[VREG_DIG].rpm_vreg_id,
+			sc->vreg[VREG_DIG].rpm_vreg_voter, vdd_dig,
+			sc->vreg[VREG_DIG].max_vdd, 0);
+	if (ret) {
+		pr_err("%s initialization failed (%d)\n",
+			sc->vreg[VREG_DIG].name, ret);
+		BUG();
+	}
+	sc->vreg[VREG_DIG].cur_vdd  = vdd_dig;
+
+	/* Setup Krait CPU regulators and initial core voltage. */
+	sc->vreg[VREG_CORE].reg = regulator_get(NULL,
+				  sc->vreg[VREG_CORE].name);
+	if (IS_ERR(sc->vreg[VREG_CORE].reg)) {
+		pr_err("regulator_get(%s) failed (%ld)\n",
+		       sc->vreg[VREG_CORE].name,
+		       PTR_ERR(sc->vreg[VREG_CORE].reg));
+		BUG();
+	}
+	vdd_core = calculate_vdd_core(lvl);
+	ret = regulator_set_voltage(sc->vreg[VREG_CORE].reg, vdd_core,
+				    sc->vreg[VREG_CORE].max_vdd);
+	if (ret) {
+		pr_err("%s initialization failed (%d)\n",
+			sc->vreg[VREG_CORE].name, ret);
+		BUG();
+	}
+	sc->vreg[VREG_CORE].cur_vdd = vdd_core;
+	ret = regulator_enable(sc->vreg[VREG_CORE].reg);
+	if (ret) {
+		pr_err("regulator_enable(%s) failed (%d)\n",
+		       sc->vreg[VREG_CORE].name, ret);
+		BUG();
+	}
+	sc->regulators_initialized = true;
 }
 
 /* Set initial rate for a given core. */
-static void __init init_clock_sources(struct scalable *sc,
+static void __cpuinit init_clock_sources(struct scalable *sc,
 				      struct core_speed *tgt_s)
 {
 	uint32_t regval;
@@ -1412,13 +1413,13 @@
 	sc->current_speed = tgt_s;
 }
 
-static void __init per_cpu_init(void *data)
+static void __cpuinit per_cpu_init(void *data)
 {
-	struct acpu_level *max_acpu_level = data;
 	int cpu = smp_processor_id();
 
 	init_clock_sources(&scalable[cpu], &max_acpu_level->speed);
 	scalable[cpu].l2_vote = max_acpu_level->l2_level;
+	scalable[cpu].clocks_initialized = true;
 }
 
 /* Register with bus driver. */
@@ -1502,17 +1503,23 @@
 		/* Fall through. */
 	case CPU_UP_CANCELED:
 	case CPU_UP_CANCELED_FROZEN:
-		acpuclk_8960_set_rate(cpu, HOT_UNPLUG_KHZ, SETRATE_HOTPLUG);
+		if (scalable[cpu].clocks_initialized)
+			acpuclk_8960_set_rate(cpu, HOT_UNPLUG_KHZ,
+					      SETRATE_HOTPLUG);
 		break;
 	case CPU_UP_PREPARE:
 	case CPU_UP_PREPARE_FROZEN:
-		if (WARN_ON(!prev_khz[cpu]))
-			return NOTIFY_BAD;
-		acpuclk_8960_set_rate(cpu, prev_khz[cpu], SETRATE_HOTPLUG);
+		if (scalable[cpu].clocks_initialized)
+			acpuclk_8960_set_rate(cpu, prev_khz[cpu],
+					      SETRATE_HOTPLUG);
+		if (!scalable[cpu].regulators_initialized)
+			regulator_init(cpu, max_acpu_level);
 		break;
 	case CPU_STARTING:
 	case CPU_STARTING_FROZEN:
-		if (cpu_is_krait_v1() || cpu_is_apq8064()) {
+		if (!scalable[cpu].clocks_initialized) {
+			per_cpu_init(NULL);
+		} else if (cpu_is_krait_v1() || cpu_is_apq8064()) {
 			set_sec_clk_src(&scalable[cpu], prev_sec_src[cpu]);
 			set_pri_clk_src(&scalable[cpu], prev_pri_src[cpu]);
 		}
@@ -1578,9 +1585,9 @@
 	}
 }
 
-static struct acpu_level * __init select_freq_plan(void)
+static void __init select_freq_plan(void)
 {
-	struct acpu_level *l, *max_acpu_level = NULL;
+	struct acpu_level *l;
 
 	/* Select frequency tables. */
 	if (cpu_is_msm8960()) {
@@ -1628,8 +1635,6 @@
 			max_acpu_level = l;
 	BUG_ON(!max_acpu_level);
 	pr_info("Max ACPU freq: %u KHz\n", max_acpu_level->speed.khz);
-
-	return max_acpu_level;
 }
 
 static struct acpuclk_data acpuclk_8960_data = {
@@ -1641,13 +1646,16 @@
 
 static int __init acpuclk_8960_probe(struct platform_device *pdev)
 {
-	struct acpu_level *max_acpu_level = select_freq_plan();
+	int cpu;
 
-	regulator_init(max_acpu_level);
+	select_freq_plan();
+
+	for_each_online_cpu(cpu)
+		regulator_init(cpu, max_acpu_level);
 	bus_init(max_acpu_level->l2_level->bw_level);
 
 	init_clock_sources(&scalable[L2], &max_acpu_level->l2_level->speed);
-	on_each_cpu(per_cpu_init, max_acpu_level, true);
+	on_each_cpu(per_cpu_init, NULL, true);
 
 	cpufreq_table_init();
 
diff --git a/arch/arm/mach-msm/board-8064-storage.c b/arch/arm/mach-msm/board-8064-storage.c
index a53f771..a33b62b 100644
--- a/arch/arm/mach-msm/board-8064-storage.c
+++ b/arch/arm/mach-msm/board-8064-storage.c
@@ -251,6 +251,7 @@
 	.pin_data	= &mmc_slot_pin_data[SDCC1],
 	.vreg_data	= &mmc_slot_vreg_data[SDCC1],
 	.uhs_caps	= MMC_CAP_1_8V_DDR | MMC_CAP_UHS_DDR50,
+	.uhs_caps2	= MMC_CAP2_HS200_1_8V_SDR,
 	.mpm_sdiowakeup_int = MSM_MPM_PIN_SDC1_DAT1,
 	.msm_bus_voting_data = &sps_to_ddr_bus_voting_data,
 };
diff --git a/arch/arm/mach-msm/board-8930-storage.c b/arch/arm/mach-msm/board-8930-storage.c
index 5c0a84c..861db6e 100644
--- a/arch/arm/mach-msm/board-8930-storage.c
+++ b/arch/arm/mach-msm/board-8930-storage.c
@@ -245,6 +245,7 @@
 	.pin_data	= &mmc_slot_pin_data[SDCC1],
 	.mpm_sdiowakeup_int = MSM_MPM_PIN_SDC1_DAT1,
 	.msm_bus_voting_data = &sps_to_ddr_bus_voting_data,
+	.uhs_caps2	= MMC_CAP2_HS200_1_8V_SDR,
 };
 #endif
 
@@ -265,7 +266,6 @@
 #endif
 	.vreg_data	= &mmc_slot_vreg_data[SDCC3],
 	.pin_data	= &mmc_slot_pin_data[SDCC3],
-#ifdef CONFIG_MMC_MSM_CARD_HW_DETECTION
 /*TODO: Insert right replacement for PM8038 */
 #ifndef MSM8930_PHASE_2
 	.status_gpio	= PM8921_GPIO_PM_TO_SYS(26),
@@ -276,7 +276,6 @@
 #endif
 	.irq_flags	= IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
 	.is_status_gpio_active_low = true,
-#endif
 	.xpc_cap	= 1,
 	.uhs_caps	= (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
 			MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_DDR50 |
diff --git a/arch/arm/mach-msm/board-8960-storage.c b/arch/arm/mach-msm/board-8960-storage.c
index 4b09f82..67f44aa 100644
--- a/arch/arm/mach-msm/board-8960-storage.c
+++ b/arch/arm/mach-msm/board-8960-storage.c
@@ -295,6 +295,7 @@
 	.pin_data	= &mmc_slot_pin_data[SDCC1],
 	.mpm_sdiowakeup_int = MSM_MPM_PIN_SDC1_DAT1,
 	.msm_bus_voting_data = &sps_to_ddr_bus_voting_data,
+	.uhs_caps2	= MMC_CAP2_HS200_1_8V_SDR,
 };
 #endif
 
@@ -326,12 +327,10 @@
 #endif
 	.vreg_data	= &mmc_slot_vreg_data[SDCC3],
 	.pin_data	= &mmc_slot_pin_data[SDCC3],
-#ifdef CONFIG_MMC_MSM_CARD_HW_DETECTION
 	.status_gpio	= PM8921_GPIO_PM_TO_SYS(26),
 	.status_irq	= PM8921_GPIO_IRQ(PM8921_IRQ_BASE, 26),
 	.irq_flags	= IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
 	.is_status_gpio_active_low = true,
-#endif
 	.xpc_cap	= 1,
 	.uhs_caps	= (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
 			MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_DDR50 |
diff --git a/arch/arm/mach-msm/board-9615-storage.c b/arch/arm/mach-msm/board-9615-storage.c
index 2025bd0..6cb34f8 100644
--- a/arch/arm/mach-msm/board-9615-storage.c
+++ b/arch/arm/mach-msm/board-9615-storage.c
@@ -178,11 +178,9 @@
 	.sup_clk_cnt	= ARRAY_SIZE(sdc1_sup_clk_rates),
 	.vreg_data	= &mmc_slot_vreg_data[SDCC1],
 	.pin_data	= &mmc_slot_pin_data[SDCC1],
-#ifdef CONFIG_MMC_MSM_CARD_HW_DETECTION
 	.status_gpio	= GPIO_SDC1_HW_DET,
 	.status_irq	= MSM_GPIO_TO_INT(GPIO_SDC1_HW_DET),
 	.irq_flags	= IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
-#endif
 	.xpc_cap	= 1,
 	.uhs_caps	= (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
 			   MMC_CAP_MAX_CURRENT_400),
diff --git a/arch/arm/mach-msm/board-msm7627a-storage.c b/arch/arm/mach-msm/board-msm7627a-storage.c
index e2184f4..49ff393 100644
--- a/arch/arm/mach-msm/board-msm7627a-storage.c
+++ b/arch/arm/mach-msm/board-msm7627a-storage.c
@@ -233,8 +233,7 @@
 	return rc;
 }
 
-#if defined(CONFIG_MMC_MSM_SDC1_SUPPORT) \
-	&& defined(CONFIG_MMC_MSM_CARD_HW_DETECTION)
+#ifdef CONFIG_MMC_MSM_SDC1_SUPPORT
 static unsigned int msm7627a_sdcc_slot_status(struct device *dev)
 {
 	int status;
@@ -266,9 +265,7 @@
 	}
 	return status;
 }
-#endif
 
-#ifdef CONFIG_MMC_MSM_SDC1_SUPPORT
 static struct mmc_platform_data sdc1_plat_data = {
 	.ocr_mask       = MMC_VDD_28_29,
 	.translate_vdd  = msm_sdcc_setup_power,
@@ -276,10 +273,8 @@
 	.msmsdcc_fmin   = 144000,
 	.msmsdcc_fmid   = 24576000,
 	.msmsdcc_fmax   = 49152000,
-#ifdef CONFIG_MMC_MSM_CARD_HW_DETECTION
 	.status      = msm7627a_sdcc_slot_status,
 	.irq_flags   = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
-#endif
 };
 #endif
 
@@ -382,13 +377,11 @@
 	gpio_sdc1_config();
 	if (mmc_regulator_init(1, "mmc", 2850000))
 		return;
-#ifdef CONFIG_MMC_MSM_CARD_HW_DETECTION
 	/* 8x25 EVT do not use hw detector */
 	if (!(machine_is_msm8625_evt()))
 		sdc1_plat_data.status_irq = MSM_GPIO_TO_INT(gpio_sdc1_hw_det);
 	if (machine_is_msm8625_evt())
 		sdc1_plat_data.status = NULL;
-#endif
 
 	msm_add_sdcc(1, &sdc1_plat_data);
 #endif
diff --git a/arch/arm/mach-msm/board-msm7x30.c b/arch/arm/mach-msm/board-msm7x30.c
index 2834f24..bb94474 100644
--- a/arch/arm/mach-msm/board-msm7x30.c
+++ b/arch/arm/mach-msm/board-msm7x30.c
@@ -255,7 +255,6 @@
 		}
 	};
 
-#ifdef CONFIG_MMC_MSM_CARD_HW_DETECTION
 	struct pm8xxx_gpio_init_info sdcc_det = {
 		PM8058_GPIO_PM_TO_SYS(PMIC_GPIO_SD_DET - 1),
 		{
@@ -275,7 +274,6 @@
 		pr_err("%s PMIC_GPIO_SD_DET config failed\n", __func__);
 		return rc;
 	}
-#endif
 
 	if (machine_is_msm8x55_svlte_surf() || machine_is_msm8x55_svlte_ffa() ||
 						machine_is_msm7x30_fluid())
@@ -6206,14 +6204,12 @@
 #endif
 
 #ifdef CONFIG_MMC_MSM_SDC4_SUPPORT
-#ifdef CONFIG_MMC_MSM_CARD_HW_DETECTION
 static unsigned int msm7x30_sdcc_slot_status(struct device *dev)
 {
 	return (unsigned int)
 		gpio_get_value_cansleep(
 			PM8058_GPIO_PM_TO_SYS(PMIC_GPIO_SD_DET - 1));
 }
-#endif
 
 static int msm_sdcc_get_wpswitch(struct device *dv)
 {
@@ -6302,11 +6298,9 @@
 	.ocr_mask	= MMC_VDD_27_28 | MMC_VDD_28_29,
 	.translate_vdd	= msm_sdcc_setup_power,
 	.mmc_bus_width  = MMC_CAP_4_BIT_DATA,
-#ifdef CONFIG_MMC_MSM_CARD_HW_DETECTION
 	.status      = msm7x30_sdcc_slot_status,
 	.status_irq  = PM8058_GPIO_IRQ(PMIC8058_IRQ_BASE, PMIC_GPIO_SD_DET - 1),
 	.irq_flags   = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
-#endif
 	.wpswitch    = msm_sdcc_get_wpswitch,
 	.msmsdcc_fmin	= 144000,
 	.msmsdcc_fmid	= 24576000,
diff --git a/arch/arm/mach-msm/board-msm8x60.c b/arch/arm/mach-msm/board-msm8x60.c
index 4e50ce5..0459240 100644
--- a/arch/arm/mach-msm/board-msm8x60.c
+++ b/arch/arm/mach-msm/board-msm8x60.c
@@ -5216,6 +5216,7 @@
 	&msm_pil_modem,
 	&msm_pil_tzapps,
 	&msm_pil_dsps,
+	&msm_pil_vidc,
 	&qseecom_device,
 #ifdef CONFIG_I2C_QUP
 	&msm_gsbi3_qup_i2c_device,
@@ -5768,7 +5769,6 @@
 				.inv_int_pol    = 0,
 			},
 		},
-#ifdef CONFIG_MMC_MSM_CARD_HW_DETECTION
 		{
 			PM8058_GPIO_PM_TO_SYS(PMIC_GPIO_SDC3_DET - 1),
 			{
@@ -5779,7 +5779,6 @@
 				.inv_int_pol    = 0,
 			},
 		},
-#endif
 		{ /* core&surf gpio expander */
 			PM8058_GPIO_PM_TO_SYS(UI_INT1_N),
 			{
@@ -8429,7 +8428,6 @@
 }
 
 #ifdef CONFIG_MMC_MSM_SDC3_SUPPORT
-#ifdef CONFIG_MMC_MSM_CARD_HW_DETECTION
 static unsigned int msm8x60_sdcc_slot_status(struct device *dev)
 {
 	int status;
@@ -8451,7 +8449,6 @@
 }
 #endif
 #endif
-#endif
 
 #define MSM_MPM_PIN_SDC3_DAT1	21
 #define MSM_MPM_PIN_SDC4_DAT1	23
@@ -8497,12 +8494,10 @@
 	.translate_vdd  = msm_sdcc_setup_power,
 	.mmc_bus_width  = MMC_CAP_4_BIT_DATA,
 	.wpswitch  	= msm_sdc3_get_wpswitch,
-#ifdef CONFIG_MMC_MSM_CARD_HW_DETECTION
 	.status      = msm8x60_sdcc_slot_status,
 	.status_irq  = PM8058_GPIO_IRQ(PM8058_IRQ_BASE,
 				       PMIC_GPIO_SDC3_DET - 1),
 	.irq_flags   = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
-#endif
 	.msmsdcc_fmin	= 400000,
 	.msmsdcc_fmid	= 24000000,
 	.msmsdcc_fmax	= 48000000,
diff --git a/arch/arm/mach-msm/clock-8960.c b/arch/arm/mach-msm/clock-8960.c
index 7826498..3f4eb8e 100644
--- a/arch/arm/mach-msm/clock-8960.c
+++ b/arch/arm/mach-msm/clock-8960.c
@@ -6077,18 +6077,19 @@
 	 */
 	/*
 	 * Initialize MM AHB registers: Enable the FPB clock and disable HW
-	 * gating on non-8960 for all clocks. Also set VFE_AHB's
+	 * gating on 8627 for all clocks. Also set VFE_AHB's
 	 * FORCE_CORE_ON bit to prevent its memory from being collapsed when
 	 * the clock is halted. The sleep and wake-up delays are set to safe
 	 * values.
 	 */
-	if (cpu_is_msm8960() || cpu_is_apq8064()) {
-		rmwreg(0x44000000, AHB_EN_REG,  0x6C000103);
-		writel_relaxed(0x3C7097F9, AHB_EN2_REG);
-	} else {
+	if (cpu_is_msm8627()) {
 		rmwreg(0x00000003, AHB_EN_REG,  0x6C000103);
 		writel_relaxed(0x000007F9, AHB_EN2_REG);
+	} else {
+		rmwreg(0x44000000, AHB_EN_REG,  0x6C000103);
+		writel_relaxed(0x3C7097F9, AHB_EN2_REG);
 	}
+
 	if (cpu_is_apq8064())
 		rmwreg(0x00000001, AHB_EN3_REG, 0x00000001);
 
@@ -6100,25 +6101,26 @@
 	 * support it. Also set FORCE_CORE_ON bits, and any sleep and wake-up
 	 * delays to safe values. */
 	if ((cpu_is_msm8960() &&
-			SOCINFO_VERSION_MAJOR(socinfo_get_version()) >= 3) ||
-			cpu_is_apq8064()) {
-		rmwreg(0x0003AFF9, MAXI_EN_REG,  0x0803FFFF);
-		rmwreg(0x3A27FCFF, MAXI_EN2_REG, 0x3A3FFFFF);
-		rmwreg(0x0027FCFF, MAXI_EN4_REG, 0x017FFFFF);
-	} else {
+			SOCINFO_VERSION_MAJOR(socinfo_get_version()) < 3) ||
+			cpu_is_msm8627()) {
 		rmwreg(0x000007F9, MAXI_EN_REG,  0x0803FFFF);
 		rmwreg(0x3027FCFF, MAXI_EN2_REG, 0x3A3FFFFF);
-		rmwreg(0x0027FCFF, MAXI_EN4_REG, 0x017FFFFF);
+	} else {
+		rmwreg(0x0003AFF9, MAXI_EN_REG,  0x0803FFFF);
+		rmwreg(0x3A27FCFF, MAXI_EN2_REG, 0x3A3FFFFF);
 	}
+
 	rmwreg(0x0027FCFF, MAXI_EN3_REG, 0x003FFFFF);
+	rmwreg(0x0027FCFF, MAXI_EN4_REG, 0x017FFFFF);
+
 	if (cpu_is_apq8064())
 		rmwreg(0x019FECFF, MAXI_EN5_REG, 0x01FFEFFF);
 	if (cpu_is_msm8930())
 		rmwreg(0x000004FF, MAXI_EN5_REG, 0x00000FFF);
-	if (cpu_is_msm8960() || cpu_is_apq8064())
-		rmwreg(0x00003C38, SAXI_EN_REG,  0x00003FFF);
-	else
+	if (cpu_is_msm8627())
 		rmwreg(0x000003C7, SAXI_EN_REG,  0x00003FFF);
+	else
+		rmwreg(0x00003C38, SAXI_EN_REG,  0x00003FFF);
 
 	/* Enable IMEM's clk_on signal */
 	imem_reg = ioremap(0x04b00040, 4);
diff --git a/arch/arm/mach-msm/clock-8x60.c b/arch/arm/mach-msm/clock-8x60.c
index becb227..4493ddc 100644
--- a/arch/arm/mach-msm/clock-8x60.c
+++ b/arch/arm/mach-msm/clock-8x60.c
@@ -3723,6 +3723,8 @@
 	CLK_LOOKUP("vcodec_iommu0_clk", vcodec_axi_clk.c, "msm_vidc.0"),
 	CLK_LOOKUP("vcodec_iommu1_clk", vcodec_axi_clk.c, "msm_vidc.0"),
 	CLK_LOOKUP("smmu_iface_clk", smmu_p_clk.c,	"msm_vidc.0"),
+	CLK_LOOKUP("core_clk",		vcodec_axi_clk.c,  "pil_vidc"),
+	CLK_LOOKUP("smmu_iface_clk",	smmu_p_clk.c,  "pil_vidc"),
 
 	CLK_LOOKUP("dfab_dsps_clk",	dfab_dsps_clk.c, NULL),
 	CLK_LOOKUP("core_clk",		dfab_usb_hs_clk.c,	"msm_otg"),
diff --git a/arch/arm/mach-msm/clock-debug.c b/arch/arm/mach-msm/clock-debug.c
index e8c3e05..7263512 100644
--- a/arch/arm/mach-msm/clock-debug.c
+++ b/arch/arm/mach-msm/clock-debug.c
@@ -169,16 +169,23 @@
 
 static int clock_debug_print_clock(struct clk *c)
 {
-	size_t ln = 0;
-	char s[128];
+	char *start = "";
 
 	if (!c || !c->count)
 		return 0;
 
-	ln += snprintf(s, sizeof(s), "\t%s", c->dbg_name);
-	while (ln < sizeof(s) && (c = clk_get_parent(c)))
-		ln += snprintf(s + ln, sizeof(s) - ln, " -> %s", c->dbg_name);
-	pr_info("%s\n", s);
+	pr_info("\t");
+	do {
+		if (c->vdd_class)
+			pr_cont("%s%s [%ld, %lu]", start, c->dbg_name, c->rate,
+				c->vdd_class->cur_level);
+		else
+			pr_cont("%s%s [%ld]", start, c->dbg_name, c->rate);
+		start = " -> ";
+	} while ((c = clk_get_parent(c)));
+
+	pr_cont("\n");
+
 	return 1;
 }
 
diff --git a/arch/arm/mach-msm/devices-msm8x60.c b/arch/arm/mach-msm/devices-msm8x60.c
index f36b4ec..5bfd823 100644
--- a/arch/arm/mach-msm/devices-msm8x60.c
+++ b/arch/arm/mach-msm/devices-msm8x60.c
@@ -231,6 +231,11 @@
 	.dev.platform_data = "dsps",
 };
 
+struct platform_device msm_pil_vidc = {
+	.name = "pil_vidc",
+	.id = -1,
+};
+
 static struct resource msm_uart1_dm_resources[] = {
 	{
 		.start = MSM_UART1DM_PHYS,
diff --git a/arch/arm/mach-msm/mdm_common.c b/arch/arm/mach-msm/mdm_common.c
index c0aed69..5b181e1 100644
--- a/arch/arm/mach-msm/mdm_common.c
+++ b/arch/arm/mach-msm/mdm_common.c
@@ -351,6 +351,7 @@
 
 static int mdm_subsys_shutdown(const struct subsys_data *crashed_subsys)
 {
+	mdm_drv->mdm_ready = 0;
 	gpio_direction_output(mdm_drv->ap2mdm_errfatal_gpio, 1);
 	if (mdm_drv->pdata->ramdump_delay_ms > 0) {
 		/* Wait for the external modem to complete
diff --git a/arch/arm/mach-msm/msm_rq_stats.c b/arch/arm/mach-msm/msm_rq_stats.c
index 738819f..2ea7ed3 100644
--- a/arch/arm/mach-msm/msm_rq_stats.c
+++ b/arch/arm/mach-msm/msm_rq_stats.c
@@ -44,7 +44,7 @@
 	sysfs_notify(rq_info.kobj, NULL, "def_timer_ms");
 }
 
-static ssize_t show_run_queue_avg(struct kobject *kobj,
+static ssize_t run_queue_avg_show(struct kobject *kobj,
 		struct kobj_attribute *attr, char *buf)
 {
 	unsigned int val = 0;
@@ -59,6 +59,8 @@
 	return snprintf(buf, PAGE_SIZE, "%d.%d\n", val/10, val%10);
 }
 
+static struct kobj_attribute run_queue_avg_attr = __ATTR_RO(run_queue_avg);
+
 static ssize_t show_run_queue_poll_ms(struct kobject *kobj,
 				      struct kobj_attribute *attr, char *buf)
 {
@@ -93,6 +95,10 @@
 	return count;
 }
 
+static struct kobj_attribute run_queue_poll_ms_attr =
+	__ATTR(run_queue_poll_ms, S_IWUSR | S_IRUSR, show_run_queue_poll_ms,
+			store_run_queue_poll_ms);
+
 static ssize_t show_def_timer_ms(struct kobject *kobj,
 		struct kobj_attribute *attr, char *buf)
 {
@@ -111,67 +117,33 @@
 	return count;
 }
 
-#define MSM_RQ_STATS_RO_ATTRIB(att) ({ \
-		struct attribute *attrib = NULL; \
-		struct kobj_attribute *ptr = NULL; \
-		ptr = kzalloc(sizeof(struct kobj_attribute), GFP_KERNEL); \
-		if (ptr) { \
-			ptr->attr.name = #att; \
-			ptr->attr.mode = S_IRUGO; \
-			ptr->show = show_##att; \
-			ptr->store = NULL; \
-			attrib = &ptr->attr; \
-		} \
-		attrib; })
+static struct kobj_attribute def_timer_ms_attr =
+	__ATTR(def_timer_ms, S_IWUSR | S_IRUSR, show_def_timer_ms,
+			store_def_timer_ms);
 
-#define MSM_RQ_STATS_RW_ATTRIB(att) ({ \
-		struct attribute *attrib = NULL; \
-		struct kobj_attribute *ptr = NULL; \
-		ptr = kzalloc(sizeof(struct kobj_attribute), GFP_KERNEL); \
-		if (ptr) { \
-			ptr->attr.name = #att; \
-			ptr->attr.mode = S_IWUSR|S_IRUSR; \
-			ptr->show = show_##att; \
-			ptr->store = store_##att; \
-			attrib = &ptr->attr; \
-		} \
-		attrib; })
+static struct attribute *rq_attrs[] = {
+	&def_timer_ms_attr.attr,
+	&run_queue_avg_attr.attr,
+	&run_queue_poll_ms_attr.attr,
+	NULL,
+};
+
+static struct attribute_group rq_attr_group = {
+	.attrs = rq_attrs,
+};
 
 static int init_rq_attribs(void)
 {
-	int i;
-	int err = 0;
-	const int attr_count = 4;
-
-	struct attribute **attribs =
-		kzalloc(sizeof(struct attribute *) * attr_count, GFP_KERNEL);
-
-	if (!attribs)
-		goto rel;
+	int err;
 
 	rq_info.rq_avg = 0;
-
-	attribs[0] = MSM_RQ_STATS_RW_ATTRIB(def_timer_ms);
-	attribs[1] = MSM_RQ_STATS_RO_ATTRIB(run_queue_avg);
-	attribs[2] = MSM_RQ_STATS_RW_ATTRIB(run_queue_poll_ms);
-	attribs[3] = NULL;
-
-	for (i = 0; i < attr_count - 1 ; i++) {
-		if (!attribs[i])
-			goto rel2;
-	}
-
-	rq_info.attr_group = kzalloc(sizeof(struct attribute_group),
-						GFP_KERNEL);
-	if (!rq_info.attr_group)
-		goto rel3;
-	rq_info.attr_group->attrs = attribs;
+	rq_info.attr_group = &rq_attr_group;
 
 	/* Create /sys/devices/system/cpu/cpu0/rq-stats/... */
 	rq_info.kobj = kobject_create_and_add("rq-stats",
-	&get_cpu_device(0)->kobj);
+			&get_cpu_device(0)->kobj);
 	if (!rq_info.kobj)
-		goto rel3;
+		return -ENOMEM;
 
 	err = sysfs_create_group(rq_info.kobj, rq_info.attr_group);
 	if (err)
@@ -179,19 +151,7 @@
 	else
 		kobject_uevent(rq_info.kobj, KOBJ_ADD);
 
-	if (!err)
-		return err;
-
-rel3:
-	kfree(rq_info.attr_group);
-	kfree(rq_info.kobj);
-rel2:
-	for (i = 0; i < attr_count - 1; i++)
-		kfree(attribs[i]);
-rel:
-	kfree(attribs);
-
-	return -ENOMEM;
+	return err;
 }
 
 static int __init msm_rq_stats_init(void)
diff --git a/arch/arm/mach-msm/smd_tty.c b/arch/arm/mach-msm/smd_tty.c
index 68e0f41..44ef822 100644
--- a/arch/arm/mach-msm/smd_tty.c
+++ b/arch/arm/mach-msm/smd_tty.c
@@ -144,14 +144,8 @@
 
 		avail = tty_prepare_flip_string(tty, &ptr, avail);
 		if (avail <= 0) {
-			if (!timer_pending(&info->buf_req_timer)) {
-				init_timer(&info->buf_req_timer);
-				info->buf_req_timer.expires = jiffies +
-							((30 * HZ)/1000);
-				info->buf_req_timer.function = buf_req_retry;
-				info->buf_req_timer.data = param;
-				add_timer(&info->buf_req_timer);
-			}
+			mod_timer(&info->buf_req_timer,
+					jiffies + msecs_to_jiffies(30));
 			return;
 		}
 
@@ -572,6 +566,8 @@
 		smd_tty[idx].driver.driver.owner = THIS_MODULE;
 		spin_lock_init(&smd_tty[idx].reset_lock);
 		smd_tty[idx].is_open = 0;
+		setup_timer(&smd_tty[idx].buf_req_timer, buf_req_retry,
+				(unsigned long)&smd_tty[idx]);
 		init_waitqueue_head(&smd_tty[idx].ch_opened_wait_queue);
 		ret = platform_driver_register(&smd_tty[idx].driver);
 
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index ad0ec48..41fd72f 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -111,6 +111,21 @@
 	.ib_check_level = 0,
 };
 
+/* This set of registers are used for Hang detection
+ * If the values of these registers are same after
+ * KGSL_TIMEOUT_PART time, GPU hang is reported in
+ * kernel log.
+ */
+unsigned int hang_detect_regs[] = {
+	A3XX_RBBM_STATUS,
+	REG_CP_RB_RPTR,
+	REG_CP_IB1_BASE,
+	REG_CP_IB1_BUFSZ,
+	REG_CP_IB2_BASE,
+	REG_CP_IB2_BUFSZ,
+};
+
+const unsigned int hang_detect_regs_count = ARRAY_SIZE(hang_detect_regs);
 
 /*
  * This is the master list of all GPU cores that are supported by this
@@ -246,6 +261,7 @@
 }
 
 static void adreno_iommu_setstate(struct kgsl_device *device,
+					unsigned int context_id,
 					uint32_t flags)
 {
 	unsigned int pt_val, reg_pt_val;
@@ -256,17 +272,27 @@
 	struct kgsl_memdesc **reg_map_desc;
 	void *reg_map_array = NULL;
 	int num_iommu_units, i;
+	struct kgsl_context *context;
+	struct adreno_context *adreno_ctx = NULL;
 
 	if (!adreno_dev->drawctxt_active)
 		return kgsl_mmu_device_setstate(&device->mmu, flags);
 	num_iommu_units = kgsl_mmu_get_reg_map_desc(&device->mmu,
 							&reg_map_array);
+
+	context = idr_find(&device->context_idr, context_id);
+	adreno_ctx = context->devctxt;
+
 	reg_map_desc = reg_map_array;
 
 	if (kgsl_mmu_enable_clk(&device->mmu,
 				KGSL_IOMMU_CONTEXT_USER))
 		goto done;
 
+	cmds += __adreno_add_idle_indirect_cmds(cmds,
+		device->mmu.setstate_memory.gpuaddr +
+		KGSL_IOMMU_SETSTATE_NOP_OFFSET);
+
 	if (cpu_is_msm8960())
 		cmds += adreno_add_change_mh_phys_limit_cmds(cmds, 0xFFFFF000,
 					device->mmu.setstate_memory.gpuaddr +
@@ -335,10 +361,9 @@
 		*cmds++ = cp_type3_packet(CP_INVALIDATE_STATE, 1);
 		*cmds++ = 0x7fff;
 
-		if (flags & KGSL_MMUFLAGS_TLBFLUSH)
-			cmds += __adreno_add_idle_indirect_cmds(cmds,
-				device->mmu.setstate_memory.gpuaddr +
-				KGSL_IOMMU_SETSTATE_NOP_OFFSET);
+		cmds += __adreno_add_idle_indirect_cmds(cmds,
+			device->mmu.setstate_memory.gpuaddr +
+			KGSL_IOMMU_SETSTATE_NOP_OFFSET);
 	}
 	if (flags & KGSL_MMUFLAGS_TLBFLUSH) {
 		/*
@@ -375,7 +400,6 @@
 
 	sizedwords += (cmds - &link[0]);
 	if (sizedwords) {
-		unsigned int ts;
 		/*
 		 * add an interrupt at the end of commands so that the smmu
 		 * disable clock off function will get called
@@ -383,9 +407,13 @@
 		*cmds++ = cp_type3_packet(CP_INTERRUPT, 1);
 		*cmds++ = CP_INT_CNTL__RB_INT_MASK;
 		sizedwords += 2;
-		ts = adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_PMODE,
+		/* This returns the per context timestamp but we need to
+		 * use the global timestamp for iommu clock disablement */
+		adreno_ringbuffer_issuecmds(device, adreno_ctx,
+			KGSL_CMD_FLAGS_PMODE,
 			&link[0], sizedwords);
-		kgsl_mmu_disable_clk_on_ts(&device->mmu, ts, true);
+		kgsl_mmu_disable_clk_on_ts(&device->mmu,
+		adreno_dev->ringbuffer.timestamp[KGSL_MEMSTORE_GLOBAL], true);
 	}
 done:
 	if (num_iommu_units)
@@ -393,6 +421,7 @@
 }
 
 static void adreno_gpummu_setstate(struct kgsl_device *device,
+					unsigned int context_id,
 					uint32_t flags)
 {
 	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
@@ -400,6 +429,8 @@
 	unsigned int *cmds = &link[0];
 	int sizedwords = 0;
 	unsigned int mh_mmu_invalidate = 0x00000003; /*invalidate all and tc */
+	struct kgsl_context *context;
+	struct adreno_context *adreno_ctx = NULL;
 
 	/*
 	 * Fix target freeze issue by adding TLB flush for each submit
@@ -414,6 +445,9 @@
 	 * easier to filter out the mmu accesses from the dump
 	 */
 	if (!kgsl_cff_dump_enable && adreno_dev->drawctxt_active) {
+		context = idr_find(&device->context_idr, context_id);
+		adreno_ctx = context->devctxt;
+
 		if (flags & KGSL_MMUFLAGS_PTUPDATE) {
 			/* wait for graphics pipe to be idle */
 			*cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
@@ -486,7 +520,8 @@
 			sizedwords += 2;
 		}
 
-		adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_PMODE,
+		adreno_ringbuffer_issuecmds(device, adreno_ctx,
+					KGSL_CMD_FLAGS_PMODE,
 					&link[0], sizedwords);
 	} else {
 		kgsl_mmu_device_setstate(&device->mmu, flags);
@@ -494,13 +529,14 @@
 }
 
 static void adreno_setstate(struct kgsl_device *device,
+			unsigned int context_id,
 			uint32_t flags)
 {
 	/* call the mmu specific handler */
 	if (KGSL_MMU_TYPE_GPU == kgsl_mmu_get_mmutype())
-		return adreno_gpummu_setstate(device, flags);
+		return adreno_gpummu_setstate(device, context_id, flags);
 	else if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype())
-		return adreno_iommu_setstate(device, flags);
+		return adreno_iommu_setstate(device, context_id, flags);
 }
 
 static unsigned int
@@ -730,6 +766,10 @@
 		kgsl_mh_start(device);
 	}
 
+	/* Assign correct RBBM status register to hang detect regs
+	 */
+	hang_detect_regs[0] = adreno_dev->gpudev->reg_rbbm_status;
+
 	status = kgsl_mmu_start(device);
 	if (status)
 		goto error_clk_off;
@@ -896,8 +936,7 @@
 	return ret;
 }
 
-static int
-adreno_dump_and_recover(struct kgsl_device *device)
+int adreno_dump_and_recover(struct kgsl_device *device)
 {
 	int result = -ETIMEDOUT;
 
@@ -937,6 +976,7 @@
 done:
 	return result;
 }
+EXPORT_SYMBOL(adreno_dump_and_recover);
 
 static int adreno_getproperty(struct kgsl_device *device,
 				enum kgsl_property_type type,
@@ -1096,7 +1136,10 @@
 	unsigned long wait_time_part;
 	unsigned int msecs;
 	unsigned int msecs_first;
-	unsigned int msecs_part;
+	unsigned int msecs_part = KGSL_TIMEOUT_PART;
+	unsigned int prev_reg_val[hang_detect_regs_count];
+
+	memset(prev_reg_val, 0, sizeof(prev_reg_val));
 
 	kgsl_cffdump_regpoll(device->id,
 		adreno_dev->gpudev->reg_rbbm_status << 2,
@@ -1108,7 +1151,6 @@
 	if (rb->flags & KGSL_FLAGS_STARTED) {
 		msecs = adreno_dev->wait_timeout;
 		msecs_first = (msecs <= 100) ? ((msecs + 4) / 5) : 100;
-		msecs_part = (msecs - msecs_first + 3) / 4;
 		wait_time = jiffies + wait_timeout;
 		wait_time_part = jiffies + msecs_to_jiffies(msecs_first);
 		adreno_poke(device);
@@ -1117,6 +1159,8 @@
 				adreno_poke(device);
 				wait_time_part = jiffies +
 					msecs_to_jiffies(msecs_part);
+				if ((adreno_hang_detect(device, prev_reg_val)))
+					goto err;
 			}
 			GSL_RB_GET_READPTR(rb, &rb->rptr);
 			if (time_after(jiffies, wait_time)) {
@@ -1129,6 +1173,7 @@
 
 	/* now, wait for the GPU to finish its operations */
 	wait_time = jiffies + wait_timeout;
+	wait_time_part = jiffies + msecs_to_jiffies(msecs_part);
 	while (time_before(jiffies, wait_time)) {
 		adreno_regread(device, adreno_dev->gpudev->reg_rbbm_status,
 			&rbbm_status);
@@ -1139,6 +1184,16 @@
 			if (!(rbbm_status & 0x80000000))
 				return 0;
 		}
+
+		/* Dont wait for timeout, detect hang faster.
+		 */
+		if (time_after(jiffies, wait_time_part)) {
+				wait_time_part = jiffies +
+					msecs_to_jiffies(msecs_part);
+				if ((adreno_hang_detect(device, prev_reg_val)))
+					goto err;
+		}
+
 	}
 
 err:
@@ -1325,6 +1380,7 @@
 	int status;
 	unsigned int ref_ts, enableflag;
 	unsigned int context_id;
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
 
 	mutex_lock(&device->mutex);
 	context_id = _get_context_id(context);
@@ -1370,8 +1426,15 @@
 			* get an interrupt */
 			cmds[0] = cp_type3_packet(CP_NOP, 1);
 			cmds[1] = 0;
-			adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_NONE,
-				&cmds[0], 2);
+
+			if (adreno_dev->drawctxt_active)
+				adreno_ringbuffer_issuecmds(device,
+					adreno_dev->drawctxt_active,
+					KGSL_CMD_FLAGS_NONE, &cmds[0], 2);
+			else
+				/* We would never call this function if there
+				 * was no active contexts running */
+				BUG();
 		}
 	}
 unlock:
@@ -1396,6 +1459,32 @@
 	__ret;								\
 })
 
+
+
+unsigned int adreno_hang_detect(struct kgsl_device *device,
+						unsigned int *prev_reg_val)
+{
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	unsigned int curr_reg_val[hang_detect_regs_count];
+	unsigned int hang_detected = 1;
+	unsigned int i;
+
+	if (!adreno_dev->fast_hang_detect)
+		return 0;
+
+	for (i = 0; i < hang_detect_regs_count; i++) {
+		adreno_regread(device, hang_detect_regs[i],
+					   &curr_reg_val[i]);
+		if (curr_reg_val[i] != prev_reg_val[i]) {
+			prev_reg_val[i] = curr_reg_val[i];
+			hang_detected = 0;
+		}
+	}
+
+	return hang_detected;
+}
+
+
 /* MUST be called with the device mutex held */
 static int adreno_waittimestamp(struct kgsl_device *device,
 				struct kgsl_context *context,
@@ -1407,16 +1496,20 @@
 	static uint io_cnt;
 	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
 	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
-	int retries;
+	int retries = 0;
 	unsigned int msecs_first;
-	unsigned int msecs_part;
+	unsigned int msecs_part = KGSL_TIMEOUT_PART;
 	unsigned int ts_issued;
 	unsigned int context_id = _get_context_id(context);
+	unsigned int time_elapsed = 0;
+	unsigned int prev_reg_val[hang_detect_regs_count];
+
+	memset(prev_reg_val, 0, sizeof(prev_reg_val));
 
 	ts_issued = adreno_dev->ringbuffer.timestamp[context_id];
 
 	/* Don't wait forever, set a max value for now */
-	if (msecs == -1)
+	if (msecs == KGSL_TIMEOUT_DEFAULT)
 		msecs = adreno_dev->wait_timeout;
 
 	if (timestamp_cmp(timestamp, ts_issued) > 0) {
@@ -1432,8 +1525,7 @@
 	 * been updated properly.
 	 */
 	msecs_first = (msecs <= 100) ? ((msecs + 4) / 5) : 100;
-	msecs_part = (msecs - msecs_first + 3) / 4;
-	for (retries = 0; retries < 5; retries++) {
+	do {
 		/*
 		 * If the context ID is invalid, we are in a race with
 		 * the context being destroyed by userspace so bail.
@@ -1458,6 +1550,11 @@
 		if (io_cnt <
 		    pwr->pwrlevels[pwr->active_pwrlevel].io_fraction)
 			io = 0;
+
+		if ((retries > 0) &&
+			(adreno_hang_detect(device, prev_reg_val)))
+			goto hang_dump;
+
 		mutex_unlock(&device->mutex);
 		/* We need to make sure that the process is
 		 * placed in wait-q before its condition is called
@@ -1479,7 +1576,14 @@
 			goto done;
 		}
 		/*this wait timed out*/
-	}
+
+		time_elapsed = time_elapsed +
+				(retries ? msecs_part : msecs_first);
+		retries++;
+
+	} while (time_elapsed < msecs);
+
+hang_dump:
 	status = -ETIMEDOUT;
 	KGSL_DRV_ERR(device,
 		     "Device hang detected while waiting for timestamp: "
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index feaa36f..fcbf1d9 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -82,6 +82,7 @@
 	unsigned int pix_shader_start;
 	unsigned int instruction_size;
 	unsigned int ib_check_level;
+	unsigned int fast_hang_detect;
 };
 
 struct adreno_gpudev {
@@ -99,7 +100,8 @@
 	int (*ctxt_create)(struct adreno_device *, struct adreno_context *);
 	void (*ctxt_save)(struct adreno_device *, struct adreno_context *);
 	void (*ctxt_restore)(struct adreno_device *, struct adreno_context *);
-	void (*ctxt_draw_workaround)(struct adreno_device *);
+	void (*ctxt_draw_workaround)(struct adreno_device *,
+					struct adreno_context *);
 	irqreturn_t (*irq_handler)(struct adreno_device *);
 	void (*irq_control)(struct adreno_device *, int);
 	void * (*snapshot)(struct adreno_device *, void *, int *, int);
@@ -123,6 +125,10 @@
 extern const unsigned int a3xx_registers[];
 extern const unsigned int a3xx_registers_count;
 
+extern unsigned int hang_detect_regs[];
+extern const unsigned int hang_detect_regs_count;
+
+
 int adreno_idle(struct kgsl_device *device, unsigned int timeout);
 void adreno_regread(struct kgsl_device *device, unsigned int offsetwords,
 				unsigned int *value);
@@ -143,6 +149,11 @@
 void *adreno_snapshot(struct kgsl_device *device, void *snapshot, int *remain,
 		int hang);
 
+int adreno_dump_and_recover(struct kgsl_device *device);
+
+unsigned int adreno_hang_detect(struct kgsl_device *device,
+						unsigned int *prev_reg_val);
+
 static inline int adreno_is_a200(struct adreno_device *adreno_dev)
 {
 	return (adreno_dev->gpurev == ADRENO_REV_A200);
@@ -250,7 +261,6 @@
 {
 	unsigned int *start = cmds;
 
-	cmds += __adreno_add_idle_indirect_cmds(cmds, nop_gpuaddr);
 	*cmds++ = cp_type0_packet(MH_MMU_MPU_END, 1);
 	*cmds++ = new_phys_limit;
 	cmds += __adreno_add_idle_indirect_cmds(cmds, nop_gpuaddr);
@@ -263,7 +273,6 @@
 {
 	unsigned int *start = cmds;
 
-	cmds += __adreno_add_idle_indirect_cmds(cmds, nop_gpuaddr);
 	*cmds++ = cp_type0_packet(REG_CP_STATE_DEBUG_INDEX, 1);
 	*cmds++ = (cur_ctx_bank ? 0 : 0x20);
 	cmds += __adreno_add_idle_indirect_cmds(cmds, nop_gpuaddr);
diff --git a/drivers/gpu/msm/adreno_a2xx.c b/drivers/gpu/msm/adreno_a2xx.c
index d846d3d..3aa3be5 100644
--- a/drivers/gpu/msm/adreno_a2xx.c
+++ b/drivers/gpu/msm/adreno_a2xx.c
@@ -1450,7 +1450,8 @@
 	return ret;
 }
 
-static void a2xx_drawctxt_workaround(struct adreno_device *adreno_dev)
+static void a2xx_drawctxt_draw_workaround(struct adreno_device *adreno_dev,
+					struct adreno_context *context)
 {
 	struct kgsl_device *device = &adreno_dev->dev;
 	unsigned int cmd[11];
@@ -1497,7 +1498,7 @@
 					| adreno_dev->pix_shader_start;
 	}
 
-	adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_PMODE,
+	adreno_ringbuffer_issuecmds(device, context, KGSL_CMD_FLAGS_PMODE,
 			&cmd[0], cmds - cmd);
 }
 
@@ -1516,12 +1517,13 @@
 	if (!(context->flags & CTXT_FLAGS_PREAMBLE)) {
 
 		/* save registers and constants. */
-		adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_NONE,
+		adreno_ringbuffer_issuecmds(device, context,
+			KGSL_CMD_FLAGS_NONE,
 			context->reg_save, 3);
 
 		if (context->flags & CTXT_FLAGS_SHADER_SAVE) {
 			/* save shader partitioning and instructions. */
-			adreno_ringbuffer_issuecmds(device,
+			adreno_ringbuffer_issuecmds(device, context,
 				KGSL_CMD_FLAGS_PMODE,
 				context->shader_save, 3);
 
@@ -1529,7 +1531,8 @@
 			 * fixup shader partitioning parameter for
 			 *  SET_SHADER_BASES.
 			 */
-			adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_NONE,
+			adreno_ringbuffer_issuecmds(device, context,
+				KGSL_CMD_FLAGS_NONE,
 				context->shader_fixup, 3);
 
 			context->flags |= CTXT_FLAGS_SHADER_RESTORE;
@@ -1541,19 +1544,21 @@
 		/* save gmem.
 		 * (note: changes shader. shader must already be saved.)
 		 */
-		adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_PMODE,
+		adreno_ringbuffer_issuecmds(device, context,
+			KGSL_CMD_FLAGS_PMODE,
 			context->context_gmem_shadow.gmem_save, 3);
 
 		/* Restore TP0_CHICKEN */
 		if (!(context->flags & CTXT_FLAGS_PREAMBLE)) {
-			adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_NONE,
+			adreno_ringbuffer_issuecmds(device, context,
+				KGSL_CMD_FLAGS_NONE,
 				context->chicken_restore, 3);
 		}
 		adreno_dev->gpudev->ctx_switches_since_last_draw = 0;
 
 		context->flags |= CTXT_FLAGS_GMEM_RESTORE;
 	} else if (adreno_is_a2xx(adreno_dev))
-		a2xx_drawctxt_workaround(adreno_dev);
+		a2xx_drawctxt_draw_workaround(adreno_dev, context);
 }
 
 static void a2xx_drawctxt_restore(struct adreno_device *adreno_dev,
@@ -1564,7 +1569,8 @@
 
 	if (context == NULL) {
 		/* No context - set the default apgetable and thats it */
-		kgsl_mmu_setstate(&device->mmu, device->mmu.defaultpagetable);
+		kgsl_mmu_setstate(&device->mmu, device->mmu.defaultpagetable,
+				adreno_dev->drawctxt_active->id);
 		return;
 	}
 
@@ -1576,8 +1582,9 @@
 	cmds[3] = device->memstore.gpuaddr +
 		KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL, current_context);
 	cmds[4] = context->id;
-	adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_NONE, cmds, 5);
-	kgsl_mmu_setstate(&device->mmu, context->pagetable);
+	adreno_ringbuffer_issuecmds(device, context, KGSL_CMD_FLAGS_NONE,
+					cmds, 5);
+	kgsl_mmu_setstate(&device->mmu, context->pagetable, context->id);
 
 #ifndef CONFIG_MSM_KGSL_CFF_DUMP_NO_CONTEXT_MEM_DUMP
 	kgsl_cffdump_syncmem(NULL, &context->gpustate,
@@ -1589,12 +1596,14 @@
 	 *  (note: changes shader. shader must not already be restored.)
 	 */
 	if (context->flags & CTXT_FLAGS_GMEM_RESTORE) {
-		adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_PMODE,
+		adreno_ringbuffer_issuecmds(device, context,
+			KGSL_CMD_FLAGS_PMODE,
 			context->context_gmem_shadow.gmem_restore, 3);
 
 		if (!(context->flags & CTXT_FLAGS_PREAMBLE)) {
 			/* Restore TP0_CHICKEN */
-			adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_NONE,
+			adreno_ringbuffer_issuecmds(device, context,
+				KGSL_CMD_FLAGS_NONE,
 				context->chicken_restore, 3);
 		}
 
@@ -1604,12 +1613,12 @@
 	if (!(context->flags & CTXT_FLAGS_PREAMBLE)) {
 
 		/* restore registers and constants. */
-		adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_NONE,
-			context->reg_restore, 3);
+		adreno_ringbuffer_issuecmds(device, context,
+			KGSL_CMD_FLAGS_NONE, context->reg_restore, 3);
 
 		/* restore shader instructions & partitioning. */
 		if (context->flags & CTXT_FLAGS_SHADER_RESTORE) {
-			adreno_ringbuffer_issuecmds(device,
+			adreno_ringbuffer_issuecmds(device, context,
 				KGSL_CMD_FLAGS_NONE,
 				context->shader_restore, 3);
 		}
@@ -1618,8 +1627,8 @@
 	if (adreno_is_a20x(adreno_dev)) {
 		cmds[0] = cp_type3_packet(CP_SET_BIN_BASE_OFFSET, 1);
 		cmds[1] = context->bin_base_offset;
-		adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_NONE,
-			cmds, 2);
+		adreno_ringbuffer_issuecmds(device, context,
+			KGSL_CMD_FLAGS_NONE, cmds, 2);
 	}
 }
 
@@ -2011,7 +2020,7 @@
 	.ctxt_create = a2xx_drawctxt_create,
 	.ctxt_save = a2xx_drawctxt_save,
 	.ctxt_restore = a2xx_drawctxt_restore,
-	.ctxt_draw_workaround = a2xx_drawctxt_workaround,
+	.ctxt_draw_workaround = a2xx_drawctxt_draw_workaround,
 	.irq_handler = a2xx_irq_handler,
 	.irq_control = a2xx_irq_control,
 	.snapshot = a2xx_snapshot,
diff --git a/drivers/gpu/msm/adreno_a3xx.c b/drivers/gpu/msm/adreno_a3xx.c
index a6b4210..6eebeb8 100644
--- a/drivers/gpu/msm/adreno_a3xx.c
+++ b/drivers/gpu/msm/adreno_a3xx.c
@@ -2226,16 +2226,17 @@
 
 	if (!(context->flags & CTXT_FLAGS_PREAMBLE)) {
 		/* Fixup self modifying IBs for save operations */
-		adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_NONE,
-			context->save_fixup, 3);
+		adreno_ringbuffer_issuecmds(device, context,
+			KGSL_CMD_FLAGS_NONE, context->save_fixup, 3);
 
 		/* save registers and constants. */
-		adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_NONE,
+		adreno_ringbuffer_issuecmds(device, context,
+			KGSL_CMD_FLAGS_NONE,
 			context->regconstant_save, 3);
 
 		if (context->flags & CTXT_FLAGS_SHADER_SAVE) {
 			/* Save shader instructions */
-			adreno_ringbuffer_issuecmds(device,
+			adreno_ringbuffer_issuecmds(device, context,
 				KGSL_CMD_FLAGS_PMODE, context->shader_save, 3);
 
 			context->flags |= CTXT_FLAGS_SHADER_RESTORE;
@@ -2249,7 +2250,8 @@
 		 * already be saved.)
 		 */
 
-		adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_PMODE,
+		adreno_ringbuffer_issuecmds(device, context,
+					KGSL_CMD_FLAGS_PMODE,
 					    context->context_gmem_shadow.
 					    gmem_save, 3);
 		context->flags |= CTXT_FLAGS_GMEM_RESTORE;
@@ -2264,7 +2266,8 @@
 
 	if (context == NULL) {
 		/* No context - set the default pagetable and thats it */
-		kgsl_mmu_setstate(&device->mmu, device->mmu.defaultpagetable);
+		kgsl_mmu_setstate(&device->mmu, device->mmu.defaultpagetable,
+				adreno_dev->drawctxt_active->id);
 		return;
 	}
 
@@ -2276,8 +2279,9 @@
 	cmds[3] = device->memstore.gpuaddr +
 		KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL, current_context);
 	cmds[4] = context->id;
-	adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_NONE, cmds, 5);
-	kgsl_mmu_setstate(&device->mmu, context->pagetable);
+	adreno_ringbuffer_issuecmds(device, context, KGSL_CMD_FLAGS_NONE,
+					cmds, 5);
+	kgsl_mmu_setstate(&device->mmu, context->pagetable, context->id);
 
 	/*
 	 * Restore GMEM.  (note: changes shader.
@@ -2285,29 +2289,34 @@
 	 */
 
 	if (context->flags & CTXT_FLAGS_GMEM_RESTORE) {
-		adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_PMODE,
+		adreno_ringbuffer_issuecmds(device, context,
+					KGSL_CMD_FLAGS_PMODE,
 					    context->context_gmem_shadow.
 					    gmem_restore, 3);
 		context->flags &= ~CTXT_FLAGS_GMEM_RESTORE;
 	}
 
 	if (!(context->flags & CTXT_FLAGS_PREAMBLE)) {
-		adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_NONE,
-			context->reg_restore, 3);
+		adreno_ringbuffer_issuecmds(device, context,
+			KGSL_CMD_FLAGS_NONE, context->reg_restore, 3);
 
 		/* Fixup self modifying IBs for restore operations */
-		adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_NONE,
+		adreno_ringbuffer_issuecmds(device, context,
+			KGSL_CMD_FLAGS_NONE,
 			context->restore_fixup, 3);
 
-		adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_NONE,
+		adreno_ringbuffer_issuecmds(device, context,
+			KGSL_CMD_FLAGS_NONE,
 			context->constant_restore, 3);
 
 		if (context->flags & CTXT_FLAGS_SHADER_RESTORE)
-			adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_NONE,
+			adreno_ringbuffer_issuecmds(device, context,
+				KGSL_CMD_FLAGS_NONE,
 				context->shader_restore, 3);
 
 		/* Restore HLSQ_CONTROL_0 register */
-		adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_NONE,
+		adreno_ringbuffer_issuecmds(device, context,
+			KGSL_CMD_FLAGS_NONE,
 			context->hlsqcontrol_restore, 3);
 	}
 }
diff --git a/drivers/gpu/msm/adreno_debugfs.c b/drivers/gpu/msm/adreno_debugfs.c
index 822cf14..e3c9a18 100644
--- a/drivers/gpu/msm/adreno_debugfs.c
+++ b/drivers/gpu/msm/adreno_debugfs.c
@@ -117,6 +117,11 @@
 	debugfs_create_u32("ib_check", 0644, device->d_debugfs,
 			   &adreno_dev->ib_check_level);
 
+	/* By Default enable fast hang detection */
+	adreno_dev->fast_hang_detect = 1;
+	debugfs_create_u32("fast_hang_detect", 0644, device->d_debugfs,
+			   &adreno_dev->fast_hang_detect);
+
 	/* Create post mortem control files */
 
 	pm_d_debugfs = debugfs_create_dir("postmortem", device->d_debugfs);
diff --git a/drivers/gpu/msm/adreno_drawctxt.c b/drivers/gpu/msm/adreno_drawctxt.c
index 267fd45..098c4f5 100644
--- a/drivers/gpu/msm/adreno_drawctxt.c
+++ b/drivers/gpu/msm/adreno_drawctxt.c
@@ -274,7 +274,7 @@
 		if (adreno_dev->gpudev->ctxt_draw_workaround &&
 			adreno_is_a225(adreno_dev))
 				adreno_dev->gpudev->ctxt_draw_workaround(
-					adreno_dev);
+					adreno_dev, drawctxt);
 		return;
 	}
 
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index 347a57d..afcceee 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -53,6 +53,14 @@
 	unsigned int freecmds;
 	unsigned int *cmds;
 	uint cmds_gpu;
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(rb->device);
+	unsigned long wait_timeout = msecs_to_jiffies(adreno_dev->wait_timeout);
+	unsigned long wait_time;
+	unsigned long wait_time_part;
+	unsigned int msecs_part = KGSL_TIMEOUT_PART;
+	unsigned int prev_reg_val[hang_detect_regs_count];
+
+	memset(prev_reg_val, 0, sizeof(prev_reg_val));
 
 	/* if wptr ahead, fill the remaining with NOPs */
 	if (wptr_ahead) {
@@ -79,13 +87,46 @@
 		rb->wptr = 0;
 	}
 
+	wait_time = jiffies + wait_timeout;
+	wait_time_part = jiffies + msecs_to_jiffies(msecs_part);
 	/* wait for space in ringbuffer */
-	do {
+	while (1) {
 		GSL_RB_GET_READPTR(rb, &rb->rptr);
 
 		freecmds = rb->rptr - rb->wptr;
 
-	} while ((freecmds != 0) && (freecmds <= numcmds));
+		if (freecmds == 0 || freecmds > numcmds)
+			break;
+
+		/* Dont wait for timeout, detect hang faster.
+		 */
+		if (time_after(jiffies, wait_time_part)) {
+			wait_time_part = jiffies +
+				msecs_to_jiffies(msecs_part);
+			if ((adreno_hang_detect(rb->device,
+						prev_reg_val))){
+				KGSL_DRV_ERR(rb->device,
+				"Hang detected while waiting for freespace in"
+				"ringbuffer rptr: 0x%x, wptr: 0x%x\n",
+				rb->rptr, rb->wptr);
+				goto err;
+			}
+		}
+
+		if (time_after(jiffies, wait_time)) {
+			KGSL_DRV_ERR(rb->device,
+			"Timed out while waiting for freespace in ringbuffer "
+			"rptr: 0x%x, wptr: 0x%x\n", rb->rptr, rb->wptr);
+			goto err;
+		}
+
+err:
+		if (!adreno_dump_and_recover(rb->device))
+				wait_time = jiffies + wait_timeout;
+			else
+				/* GPU is hung and we cannot recover */
+				BUG();
+	}
 }
 
 unsigned int *adreno_ringbuffer_allocspace(struct adreno_ringbuffer *rb,
@@ -439,15 +480,13 @@
 	unsigned int context_id = KGSL_MEMSTORE_GLOBAL;
 	unsigned int gpuaddr = rb->device->memstore.gpuaddr;
 
-	if (context != NULL) {
-		/*
-		 * if the context was not created with per context timestamp
-		 * support, we must use the global timestamp since issueibcmds
-		 * will be returning that one.
-		 */
-		if (context->flags & CTXT_FLAGS_PER_CONTEXT_TS)
-			context_id = context->id;
-	}
+	/*
+	 * if the context was not created with per context timestamp
+	 * support, we must use the global timestamp since issueibcmds
+	 * will be returning that one.
+	 */
+	if (context->flags & CTXT_FLAGS_PER_CONTEXT_TS)
+		context_id = context->id;
 
 	/* reserve space to temporarily turn off protected mode
 	*  error checking if needed
@@ -460,7 +499,7 @@
 		total_sizedwords += 7;
 
 	total_sizedwords += 2; /* scratchpad ts for recovery */
-	if (context) {
+	if (context->flags & CTXT_FLAGS_PER_CONTEXT_TS) {
 		total_sizedwords += 3; /* sop timestamp */
 		total_sizedwords += 4; /* eop timestamp */
 		total_sizedwords += 3; /* global timestamp without cache
@@ -470,6 +509,15 @@
 	}
 
 	ringcmds = adreno_ringbuffer_allocspace(rb, total_sizedwords);
+	/* GPU may hang during space allocation, if thats the case the current
+	 * context may have hung the GPU */
+	if (context->flags & CTXT_FLAGS_GPU_HANG) {
+		KGSL_CTXT_WARN(rb->device,
+		"Context %p caused a gpu hang. Will not accept commands for context %d\n",
+		context, context->id);
+		return rb->timestamp[context_id];
+	}
+
 	rcmd_gpu = rb->buffer_desc.gpuaddr
 		+ sizeof(uint)*(rb->wptr-total_sizedwords);
 
@@ -525,7 +573,7 @@
 		GSL_RB_WRITE(ringcmds, rcmd_gpu, 0x00);
 	}
 
-	if (context) {
+	if (context->flags & CTXT_FLAGS_PER_CONTEXT_TS) {
 		/* start-of-pipeline timestamp */
 		GSL_RB_WRITE(ringcmds, rcmd_gpu,
 			cp_type3_packet(CP_MEM_WRITE, 2));
@@ -593,6 +641,7 @@
 
 unsigned int
 adreno_ringbuffer_issuecmds(struct kgsl_device *device,
+						struct adreno_context *drawctxt,
 						unsigned int flags,
 						unsigned int *cmds,
 						int sizedwords)
@@ -603,7 +652,7 @@
 	if (device->state & KGSL_STATE_HUNG)
 		return kgsl_readtimestamp(device, KGSL_MEMSTORE_GLOBAL,
 					KGSL_TIMESTAMP_RETIRED);
-	return adreno_ringbuffer_addcmds(rb, NULL, flags, cmds, sizedwords);
+	return adreno_ringbuffer_addcmds(rb, drawctxt, flags, cmds, sizedwords);
 }
 
 static bool _parse_ibs(struct kgsl_device_private *dev_priv, uint gpuaddr,
@@ -870,7 +919,7 @@
 	*cmds++ = cp_nop_packet(1);
 	*cmds++ = KGSL_END_OF_IB_IDENTIFIER;
 
-	kgsl_setstate(&device->mmu,
+	kgsl_setstate(&device->mmu, context->id,
 		      kgsl_mmu_pt_get_flags(device->mmu.hwpagetable,
 					device->id));
 
diff --git a/drivers/gpu/msm/adreno_ringbuffer.h b/drivers/gpu/msm/adreno_ringbuffer.h
index ebea4ed..6429f46 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.h
+++ b/drivers/gpu/msm/adreno_ringbuffer.h
@@ -104,6 +104,7 @@
 void adreno_ringbuffer_close(struct adreno_ringbuffer *rb);
 
 unsigned int adreno_ringbuffer_issuecmds(struct kgsl_device *device,
+					struct adreno_context *drawctxt,
 					unsigned int flags,
 					unsigned int *cmdaddr,
 					int sizedwords);
diff --git a/drivers/gpu/msm/kgsl.h b/drivers/gpu/msm/kgsl.h
index f367166..7ffa83b 100644
--- a/drivers/gpu/msm/kgsl.h
+++ b/drivers/gpu/msm/kgsl.h
@@ -220,7 +220,8 @@
 
 static inline void *kgsl_memdesc_map(struct kgsl_memdesc *memdesc)
 {
-	if (memdesc->hostptr == NULL && memdesc->ops->map_kernel_mem)
+	if (memdesc->hostptr == NULL && memdesc->ops &&
+		memdesc->ops->map_kernel_mem)
 		memdesc->ops->map_kernel_mem(memdesc);
 
 	return memdesc->hostptr;
diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h
index 4524668..75b688b 100644
--- a/drivers/gpu/msm/kgsl_device.h
+++ b/drivers/gpu/msm/kgsl_device.h
@@ -25,6 +25,7 @@
 
 #define KGSL_TIMEOUT_NONE       0
 #define KGSL_TIMEOUT_DEFAULT    0xFFFFFFFF
+#define KGSL_TIMEOUT_PART       2000 /* 2 sec */
 
 #define FIRST_TIMEOUT (HZ / 2)
 
@@ -97,7 +98,8 @@
 	/* Optional functions - these functions are not mandatory.  The
 	   driver will check that the function pointer is not NULL before
 	   calling the hook */
-	void (*setstate) (struct kgsl_device *device, uint32_t flags);
+	void (*setstate) (struct kgsl_device *device, unsigned int context_id,
+			uint32_t flags);
 	int (*drawctxt_create) (struct kgsl_device *device,
 		struct kgsl_pagetable *pagetable, struct kgsl_context *context,
 		uint32_t flags);
diff --git a/drivers/gpu/msm/kgsl_gpummu.c b/drivers/gpu/msm/kgsl_gpummu.c
index d9fe3c6..998eaab 100644
--- a/drivers/gpu/msm/kgsl_gpummu.c
+++ b/drivers/gpu/msm/kgsl_gpummu.c
@@ -485,7 +485,8 @@
 }
 
 static void kgsl_gpummu_setstate(struct kgsl_mmu *mmu,
-				struct kgsl_pagetable *pagetable)
+				struct kgsl_pagetable *pagetable,
+				unsigned int context_id)
 {
 	if (mmu->flags & KGSL_FLAGS_STARTED) {
 		/* page table not current, then setup mmu to use new
@@ -499,7 +500,7 @@
 			kgsl_mmu_pt_get_flags(pagetable, mmu->device->id);
 
 			/* call device specific set page table */
-			kgsl_setstate(mmu, KGSL_MMUFLAGS_TLBFLUSH |
+			kgsl_setstate(mmu, context_id, KGSL_MMUFLAGS_TLBFLUSH |
 				KGSL_MMUFLAGS_PTUPDATE);
 		}
 	}
@@ -583,7 +584,7 @@
 	kgsl_regwrite(mmu->device, MH_MMU_VA_RANGE,
 		      (KGSL_PAGETABLE_BASE |
 		      (CONFIG_MSM_KGSL_PAGE_TABLE_SIZE >> 16)));
-	kgsl_setstate(mmu, KGSL_MMUFLAGS_TLBFLUSH);
+	kgsl_setstate(mmu, KGSL_MEMSTORE_GLOBAL, KGSL_MMUFLAGS_TLBFLUSH);
 	mmu->flags |= KGSL_FLAGS_STARTED;
 
 	return 0;
@@ -591,7 +592,8 @@
 
 static int
 kgsl_gpummu_unmap(void *mmu_specific_pt,
-		struct kgsl_memdesc *memdesc)
+		struct kgsl_memdesc *memdesc,
+		unsigned int *tlb_flags)
 {
 	unsigned int numpages;
 	unsigned int pte, ptefirst, ptelast, superpte;
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index 8d66eaa..25d0463 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -617,7 +617,8 @@
 }
 
 static void kgsl_iommu_setstate(struct kgsl_mmu *mmu,
-				struct kgsl_pagetable *pagetable)
+				struct kgsl_pagetable *pagetable,
+				unsigned int context_id)
 {
 	if (mmu->flags & KGSL_FLAGS_STARTED) {
 		struct kgsl_iommu *iommu = mmu->priv;
@@ -634,7 +635,8 @@
 				flags |= KGSL_MMUFLAGS_TLBFLUSH;
 			flags |= kgsl_mmu_pt_get_flags(mmu->hwpagetable,
 							mmu->device->id);
-			kgsl_setstate(mmu, KGSL_MMUFLAGS_PTUPDATE | flags);
+			kgsl_setstate(mmu, context_id,
+				KGSL_MMUFLAGS_PTUPDATE | flags);
 		}
 	}
 }
@@ -847,7 +849,8 @@
 
 static int
 kgsl_iommu_unmap(void *mmu_specific_pt,
-		struct kgsl_memdesc *memdesc)
+		struct kgsl_memdesc *memdesc,
+		unsigned int *tlb_flags)
 {
 	int ret;
 	unsigned int range = kgsl_sg_size(memdesc->sg, memdesc->sglen);
@@ -868,6 +871,14 @@
 			"with err: %d\n", iommu_pt->domain, gpuaddr,
 			range, ret);
 
+#ifdef CONFIG_KGSL_PER_PROCESS_PAGE_TABLE
+	/*
+	 * Flushing only required if per process pagetables are used. With
+	 * global case, flushing will happen inside iommu_map function
+	 */
+	if (!ret)
+		*tlb_flags = UINT_MAX;
+#endif
 	return 0;
 }
 
@@ -897,14 +908,6 @@
 		return ret;
 	}
 
-#ifdef CONFIG_KGSL_PER_PROCESS_PAGE_TABLE
-	/*
-	 * Flushing only required if per process pagetables are used. With
-	 * global case, flushing will happen inside iommu_map function
-	 */
-	if (!ret)
-		*tlb_flags = UINT_MAX;
-#endif
 	return ret;
 }
 
diff --git a/drivers/gpu/msm/kgsl_mmu.c b/drivers/gpu/msm/kgsl_mmu.c
index dfaadba..c02274d 100644
--- a/drivers/gpu/msm/kgsl_mmu.c
+++ b/drivers/gpu/msm/kgsl_mmu.c
@@ -543,13 +543,14 @@
 }
 EXPORT_SYMBOL(kgsl_mmu_putpagetable);
 
-void kgsl_setstate(struct kgsl_mmu *mmu, uint32_t flags)
+void kgsl_setstate(struct kgsl_mmu *mmu, unsigned int context_id,
+			uint32_t flags)
 {
 	struct kgsl_device *device = mmu->device;
 	if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type)
 		return;
 	else if (device->ftbl->setstate)
-		device->ftbl->setstate(device, flags);
+		device->ftbl->setstate(device, context_id, flags);
 	else if (mmu->mmu_ops->mmu_device_setstate)
 		mmu->mmu_ops->mmu_device_setstate(mmu, flags);
 }
@@ -684,7 +685,8 @@
 
 	if (KGSL_MMU_TYPE_IOMMU != kgsl_mmu_get_mmutype())
 		spin_lock(&pagetable->lock);
-	pagetable->pt_ops->mmu_unmap(pagetable->priv, memdesc);
+	pagetable->pt_ops->mmu_unmap(pagetable->priv, memdesc,
+					&pagetable->tlb_flags);
 	if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype())
 		spin_lock(&pagetable->lock);
 	/* Remove the statistics */
diff --git a/drivers/gpu/msm/kgsl_mmu.h b/drivers/gpu/msm/kgsl_mmu.h
index de53946..d06ce45 100644
--- a/drivers/gpu/msm/kgsl_mmu.h
+++ b/drivers/gpu/msm/kgsl_mmu.h
@@ -125,7 +125,8 @@
 	int (*mmu_start) (struct kgsl_mmu *mmu);
 	void (*mmu_stop) (struct kgsl_mmu *mmu);
 	void (*mmu_setstate) (struct kgsl_mmu *mmu,
-		struct kgsl_pagetable *pagetable);
+		struct kgsl_pagetable *pagetable,
+		unsigned int context_id);
 	void (*mmu_device_setstate) (struct kgsl_mmu *mmu,
 					uint32_t flags);
 	void (*mmu_pagefault) (struct kgsl_mmu *mmu);
@@ -149,7 +150,8 @@
 			unsigned int protflags,
 			unsigned int *tlb_flags);
 	int (*mmu_unmap) (void *mmu_pt,
-			struct kgsl_memdesc *memdesc);
+			struct kgsl_memdesc *memdesc,
+			unsigned int *tlb_flags);
 	void *(*mmu_create_pagetable) (void);
 	void (*mmu_destroy_pagetable) (void *pt);
 	int (*mmu_pt_equal) (struct kgsl_pagetable *pt,
@@ -193,7 +195,8 @@
 int kgsl_mmu_unmap(struct kgsl_pagetable *pagetable,
 		    struct kgsl_memdesc *memdesc);
 unsigned int kgsl_virtaddr_to_physaddr(void *virtaddr);
-void kgsl_setstate(struct kgsl_mmu *mmu, uint32_t flags);
+void kgsl_setstate(struct kgsl_mmu *mmu, unsigned int context_id,
+			uint32_t flags);
 int kgsl_mmu_get_ptname_from_ptbase(unsigned int pt_base);
 int kgsl_mmu_pt_get_flags(struct kgsl_pagetable *pt,
 			enum kgsl_deviceid id);
@@ -219,10 +222,11 @@
 }
 
 static inline void kgsl_mmu_setstate(struct kgsl_mmu *mmu,
-			struct kgsl_pagetable *pagetable)
+			struct kgsl_pagetable *pagetable,
+			unsigned int context_id)
 {
 	if (mmu->mmu_ops && mmu->mmu_ops->mmu_setstate)
-		mmu->mmu_ops->mmu_setstate(mmu, pagetable);
+		mmu->mmu_ops->mmu_setstate(mmu, pagetable, context_id);
 }
 
 static inline void kgsl_mmu_device_setstate(struct kgsl_mmu *mmu,
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index 325cd98..6df073a 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -439,6 +439,8 @@
 		if (test_and_clear_bit(KGSL_PWRFLAGS_POWER_ON,
 			&pwr->power_flags)) {
 			trace_kgsl_rail(device, state);
+			if (pwr->gpu_dig)
+				regulator_disable(pwr->gpu_dig);
 			if (pwr->gpu_reg)
 				regulator_disable(pwr->gpu_reg);
 		}
@@ -449,8 +451,18 @@
 			if (pwr->gpu_reg) {
 				int status = regulator_enable(pwr->gpu_reg);
 				if (status)
-					KGSL_DRV_ERR(device, "regulator_enable "
-							"failed: %d\n", status);
+					KGSL_DRV_ERR(device,
+							"core regulator_enable "
+							"failed: %d\n",
+							status);
+			}
+			if (pwr->gpu_dig) {
+				int status = regulator_enable(pwr->gpu_dig);
+				if (status)
+					KGSL_DRV_ERR(device,
+							"cx regulator_enable "
+							"failed: %d\n",
+							status);
 			}
 		}
 	}
@@ -534,6 +546,13 @@
 	if (IS_ERR(pwr->gpu_reg))
 		pwr->gpu_reg = NULL;
 
+	if (pwr->gpu_reg) {
+		pwr->gpu_dig = regulator_get(&pdev->dev, "vdd_dig");
+		if (IS_ERR(pwr->gpu_dig))
+			pwr->gpu_dig = NULL;
+	} else
+		pwr->gpu_dig = NULL;
+
 	pwr->power_flags = 0;
 
 	pwr->nap_allowed = pdata->nap_allowed;
@@ -596,6 +615,11 @@
 		pwr->gpu_reg = NULL;
 	}
 
+	if (pwr->gpu_dig) {
+		regulator_put(pwr->gpu_dig);
+		pwr->gpu_dig = NULL;
+	}
+
 	for (i = 1; i < KGSL_MAX_CLKS; i++)
 		if (pwr->grp_clks[i]) {
 			clk_put(pwr->grp_clks[i]);
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.h b/drivers/gpu/msm/kgsl_pwrctrl.h
index 1e5c21c..954c818 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.h
+++ b/drivers/gpu/msm/kgsl_pwrctrl.h
@@ -50,6 +50,7 @@
 	unsigned int interval_timeout;
 	bool strtstp_sleepwake;
 	struct regulator *gpu_reg;
+	struct regulator *gpu_dig;
 	uint32_t pcl;
 	unsigned int nap_allowed;
 	unsigned int idle_needed;
diff --git a/drivers/gpu/msm/z180.c b/drivers/gpu/msm/z180.c
index bc2685c..6efba45 100644
--- a/drivers/gpu/msm/z180.c
+++ b/drivers/gpu/msm/z180.c
@@ -444,11 +444,13 @@
 	    (ctrl & KGSL_CONTEXT_CTX_SWITCH)) {
 		KGSL_CMD_INFO(device, "context switch %d -> %d\n",
 			context->id, z180_dev->ringbuffer.prevctx);
-		kgsl_mmu_setstate(&device->mmu, pagetable);
+		kgsl_mmu_setstate(&device->mmu, pagetable,
+				KGSL_MEMSTORE_GLOBAL);
 		cnt = PACKETSIZE_STATESTREAM;
 		ofs = 0;
 	}
 	kgsl_setstate(&device->mmu,
+			KGSL_MEMSTORE_GLOBAL,
 			kgsl_mmu_pt_get_flags(device->mmu.hwpagetable,
 			device->id));
 
@@ -861,7 +863,8 @@
 	if (z180_dev->ringbuffer.prevctx == context->id) {
 		z180_dev->ringbuffer.prevctx = Z180_INVALID_CONTEXT;
 		device->mmu.hwpagetable = device->mmu.defaultpagetable;
-		kgsl_setstate(&device->mmu, KGSL_MMUFLAGS_PTUPDATE);
+		kgsl_setstate(&device->mmu, KGSL_MEMSTORE_GLOBAL,
+				KGSL_MMUFLAGS_PTUPDATE);
 	}
 }
 
diff --git a/drivers/hwmon/epm_adc.c b/drivers/hwmon/epm_adc.c
index a8b99b9..fd4524e 100644
--- a/drivers/hwmon/epm_adc.c
+++ b/drivers/hwmon/epm_adc.c
@@ -71,8 +71,6 @@
 struct epm_adc_drv {
 	struct platform_device		*pdev;
 	struct device			*hwmon;
-	struct sensor_device_attribute	*sens_attr;
-	char				**fnames;
 	struct spi_device		*epm_spi_client;
 	struct mutex			conv_lock;
 	uint32_t			bus_id;
@@ -723,60 +721,57 @@
 	return snprintf(buf, 16, "Result: %d\n", conv.physical);
 }
 
-static struct sensor_device_attribute epm_adc_in_attr =
-	SENSOR_ATTR(NULL, S_IRUGO, epm_adc_show_in, NULL, 0);
+static struct sensor_device_attribute epm_adc_in_attrs[] = {
+	SENSOR_ATTR(ads0_chan0,  S_IRUGO, epm_adc_show_in, NULL, 0),
+	SENSOR_ATTR(ads0_chan1,  S_IRUGO, epm_adc_show_in, NULL, 1),
+	SENSOR_ATTR(ads0_chan2,  S_IRUGO, epm_adc_show_in, NULL, 2),
+	SENSOR_ATTR(ads0_chan3,  S_IRUGO, epm_adc_show_in, NULL, 3),
+	SENSOR_ATTR(ads0_chan4,  S_IRUGO, epm_adc_show_in, NULL, 4),
+	SENSOR_ATTR(ads0_chan5,  S_IRUGO, epm_adc_show_in, NULL, 5),
+	SENSOR_ATTR(ads0_chan6,  S_IRUGO, epm_adc_show_in, NULL, 6),
+	SENSOR_ATTR(ads0_chan7,  S_IRUGO, epm_adc_show_in, NULL, 7),
+	SENSOR_ATTR(ads0_chan8,  S_IRUGO, epm_adc_show_in, NULL, 8),
+	SENSOR_ATTR(ads0_chan9,  S_IRUGO, epm_adc_show_in, NULL, 9),
+	SENSOR_ATTR(ads0_chan10, S_IRUGO, epm_adc_show_in, NULL, 10),
+	SENSOR_ATTR(ads0_chan11, S_IRUGO, epm_adc_show_in, NULL, 11),
+	SENSOR_ATTR(ads0_chan12, S_IRUGO, epm_adc_show_in, NULL, 12),
+	SENSOR_ATTR(ads0_chan13, S_IRUGO, epm_adc_show_in, NULL, 13),
+	SENSOR_ATTR(ads0_chan14, S_IRUGO, epm_adc_show_in, NULL, 14),
+	SENSOR_ATTR(ads0_chan15, S_IRUGO, epm_adc_show_in, NULL, 15),
+	SENSOR_ATTR(ads1_chan0,  S_IRUGO, epm_adc_show_in, NULL, 16),
+	SENSOR_ATTR(ads1_chan1,  S_IRUGO, epm_adc_show_in, NULL, 17),
+	SENSOR_ATTR(ads1_chan2,  S_IRUGO, epm_adc_show_in, NULL, 18),
+	SENSOR_ATTR(ads1_chan3,  S_IRUGO, epm_adc_show_in, NULL, 19),
+	SENSOR_ATTR(ads1_chan4,  S_IRUGO, epm_adc_show_in, NULL, 20),
+	SENSOR_ATTR(ads1_chan5,  S_IRUGO, epm_adc_show_in, NULL, 21),
+	SENSOR_ATTR(ads1_chan6,  S_IRUGO, epm_adc_show_in, NULL, 22),
+	SENSOR_ATTR(ads1_chan7,  S_IRUGO, epm_adc_show_in, NULL, 23),
+	SENSOR_ATTR(ads1_chan8,  S_IRUGO, epm_adc_show_in, NULL, 24),
+	SENSOR_ATTR(ads1_chan9,  S_IRUGO, epm_adc_show_in, NULL, 25),
+	SENSOR_ATTR(ads1_chan10, S_IRUGO, epm_adc_show_in, NULL, 26),
+	SENSOR_ATTR(ads1_chan11, S_IRUGO, epm_adc_show_in, NULL, 27),
+	SENSOR_ATTR(ads1_chan12, S_IRUGO, epm_adc_show_in, NULL, 28),
+	SENSOR_ATTR(ads1_chan13, S_IRUGO, epm_adc_show_in, NULL, 29),
+	SENSOR_ATTR(ads1_chan14, S_IRUGO, epm_adc_show_in, NULL, 30),
+	SENSOR_ATTR(ads1_chan15, S_IRUGO, epm_adc_show_in, NULL, 31),
+};
 
 static int __devinit epm_adc_init_hwmon(struct platform_device *pdev,
 					       struct epm_adc_drv *epm_adc)
 {
 	struct epm_adc_platform_data *pdata = pdev->dev.platform_data;
-	int num_chans = pdata->num_channels, dev_idx = 0, chan_idx = 0;
-	int i = 0, rc = 0;
-	const char prefix[] = "ads", postfix[] = "_chan";
-	char tmpbuf[3];
+	int i, rc, num_chans = pdata->num_channels;
 
-	epm_adc->fnames = devm_kzalloc(&pdev->dev,
-				num_chans * EPM_ADC_MAX_FNAME +
-				num_chans * sizeof(char *), GFP_KERNEL);
-	if (!epm_adc->fnames) {
-		dev_err(&pdev->dev, "Unable to allocate memory\n");
-		return -ENOMEM;
-	}
-
-	epm_adc->sens_attr = devm_kzalloc(&pdev->dev, num_chans *
-			    sizeof(struct sensor_device_attribute), GFP_KERNEL);
-	if (!epm_adc->sens_attr) {
-		dev_err(&pdev->dev, "Unable to allocate memory\n");
-		rc = -ENOMEM;
-	}
-
-	for (i = 0; i < num_chans; i++, chan_idx++) {
-		epm_adc->fnames[i] = (char *)epm_adc->fnames +
-			(i * EPM_ADC_MAX_FNAME) + (num_chans *
-			sizeof(char *));
-		if (chan_idx == pdata->chan_per_adc) {
-			chan_idx = 0;
-			dev_idx++;
-		}
-		strlcpy(epm_adc->fnames[i], prefix, EPM_ADC_MAX_FNAME);
-		snprintf(tmpbuf, sizeof(tmpbuf), "%d", dev_idx);
-		strlcat(epm_adc->fnames[i], tmpbuf, EPM_ADC_MAX_FNAME);
-		strlcat(epm_adc->fnames[i], postfix, EPM_ADC_MAX_FNAME);
-		snprintf(tmpbuf, sizeof(tmpbuf), "%d", chan_idx);
-		strlcat(epm_adc->fnames[i], tmpbuf, EPM_ADC_MAX_FNAME);
-		epm_adc_in_attr.index = i;
-		epm_adc_in_attr.dev_attr.attr.name = epm_adc->fnames[i];
-		memcpy(&epm_adc->sens_attr[i], &epm_adc_in_attr,
-						sizeof(epm_adc_in_attr));
+	for (i = 0; i < num_chans; i++) {
 		rc = device_create_file(&pdev->dev,
-				&epm_adc->sens_attr[i].dev_attr);
+				&epm_adc_in_attrs[i].dev_attr);
 		if (rc) {
 			dev_err(&pdev->dev, "device_create_file failed\n");
 			return rc;
 		}
 	}
 
-	return rc;
+	return 0;
 }
 
 static int __devinit epm_adc_spi_probe(struct spi_device *spi)
@@ -866,10 +861,8 @@
 	int num_chans = pdata->num_channels;
 	int i = 0;
 
-	if (epm_adc->sens_attr)
-		for (i = 0; i < num_chans; i++)
-			device_remove_file(&pdev->dev,
-					&epm_adc->sens_attr[i].dev_attr);
+	for (i = 0; i < num_chans; i++)
+		device_remove_file(&pdev->dev, &epm_adc_in_attrs[i].dev_attr);
 	hwmon_device_unregister(epm_adc->hwmon);
 	misc_deregister(&epm_adc->misc);
 	epm_adc = NULL;
diff --git a/drivers/media/video/msm/msm_mctl.c b/drivers/media/video/msm/msm_mctl.c
index 0da5043..be6c543 100644
--- a/drivers/media/video/msm/msm_mctl.c
+++ b/drivers/media/video/msm/msm_mctl.c
@@ -897,6 +897,7 @@
 	pcam_inst->sensor_pxlcode = pcam->usr_fmts[0].pxlcode;
 	pcam_inst->my_index = i;
 	pcam_inst->pcam = pcam;
+	mutex_init(&pcam_inst->inst_lock);
 	pcam->mctl_node.dev_inst[i] = pcam_inst;
 
 	D("%s pcam_inst %p my_index = %d\n", __func__,
@@ -1006,6 +1007,7 @@
 	pcam->mctl_node.dev_inst[pcam_inst->my_index] = NULL;
 	v4l2_fh_del(&pcam_inst->eventHandle);
 	v4l2_fh_exit(&pcam_inst->eventHandle);
+	mutex_destroy(&pcam_inst->inst_lock);
 
 	kfree(pcam_inst);
 	if (NULL != pmctl) {
diff --git a/drivers/tty/n_smux.c b/drivers/tty/n_smux.c
index d03b4b4..0d2fdb8 100644
--- a/drivers/tty/n_smux.c
+++ b/drivers/tty/n_smux.c
@@ -33,8 +33,6 @@
 
 #define SMUX_NOTIFY_FIFO_SIZE	128
 #define SMUX_TX_QUEUE_SIZE	256
-#define SMUX_WM_LOW 2
-#define SMUX_WM_HIGH 4
 #define SMUX_PKT_LOG_SIZE 80
 
 /* Maximum size we can accept in a single RX buffer */
@@ -83,6 +81,30 @@
 			pr_info(x);  \
 } while (0)
 
+#define SMUX_PWR_PKT_RX(pkt) do { \
+	if (smux_debug_mask & MSM_SMUX_POWER_INFO) \
+			smux_log_pkt(pkt, 1); \
+} while (0)
+
+#define SMUX_PWR_PKT_TX(pkt) do { \
+	if (smux_debug_mask & MSM_SMUX_POWER_INFO) { \
+			if (pkt->hdr.cmd == SMUX_CMD_BYTE && \
+					pkt->hdr.flags == SMUX_WAKEUP_ACK) \
+				pr_info("smux: TX Wakeup ACK\n"); \
+			else if (pkt->hdr.cmd == SMUX_CMD_BYTE && \
+					pkt->hdr.flags == SMUX_WAKEUP_REQ) \
+				pr_info("smux: TX Wakeup REQ\n"); \
+			else \
+				smux_log_pkt(pkt, 0); \
+	} \
+} while (0)
+
+#define SMUX_PWR_BYTE_TX(pkt) do { \
+	if (smux_debug_mask & MSM_SMUX_POWER_INFO) { \
+			smux_log_pkt(pkt, 0); \
+	} \
+} while (0)
+
 #define SMUX_LOG_PKT_RX(pkt) do { \
 	if (smux_debug_mask & MSM_SMUX_PKT) \
 			smux_log_pkt(pkt, 1); \
@@ -172,12 +194,15 @@
 	unsigned local_state;
 	unsigned local_mode;
 	uint8_t local_tiocm;
+	unsigned options;
 
 	unsigned remote_state;
 	unsigned remote_mode;
 	uint8_t remote_tiocm;
 
 	int tx_flow_control;
+	int rx_flow_control_auto;
+	int rx_flow_control_client;
 
 	/* client callbacks and private data */
 	void *priv;
@@ -330,6 +355,8 @@
 static int ssr_notifier_cb(struct notifier_block *this,
 				unsigned long code,
 				void *data);
+static void smux_uart_power_on_atomic(void);
+static int smux_rx_flow_control_updated(struct smux_lch_t *ch);
 
 /**
  * Convert TTY Error Flags to string for logging purposes.
@@ -402,10 +429,13 @@
 		ch->local_state = SMUX_LCH_LOCAL_CLOSED;
 		ch->local_mode = SMUX_LCH_MODE_NORMAL;
 		ch->local_tiocm = 0x0;
+		ch->options = SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP;
 		ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
 		ch->remote_mode = SMUX_LCH_MODE_NORMAL;
 		ch->remote_tiocm = 0x0;
 		ch->tx_flow_control = 0;
+		ch->rx_flow_control_auto = 0;
+		ch->rx_flow_control_client = 0;
 		ch->priv = 0;
 		ch->notify = 0;
 		ch->get_rx_buffer = 0;
@@ -486,6 +516,8 @@
 		ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
 		ch->remote_mode = SMUX_LCH_MODE_NORMAL;
 		ch->tx_flow_control = 0;
+		ch->rx_flow_control_auto = 0;
+		ch->rx_flow_control_client = 0;
 
 		/* Purge RX retry queue */
 		if (ch->rx_retry_queue_cnt)
@@ -537,67 +569,76 @@
 	char local_mode;
 	char remote_state;
 	char remote_mode;
-	struct smux_lch_t *ch;
+	struct smux_lch_t *ch = NULL;
 	unsigned char *data;
 
-	ch = &smux_lch[pkt->hdr.lcid];
+	if (!smux_assert_lch_id(pkt->hdr.lcid))
+		ch = &smux_lch[pkt->hdr.lcid];
 
-	switch (ch->local_state) {
-	case SMUX_LCH_LOCAL_CLOSED:
-		local_state = 'C';
-		break;
-	case SMUX_LCH_LOCAL_OPENING:
-		local_state = 'o';
-		break;
-	case SMUX_LCH_LOCAL_OPENED:
-		local_state = 'O';
-		break;
-	case SMUX_LCH_LOCAL_CLOSING:
-		local_state = 'c';
-		break;
-	default:
-		local_state = 'U';
-		break;
-	}
+	if (ch) {
+		switch (ch->local_state) {
+		case SMUX_LCH_LOCAL_CLOSED:
+			local_state = 'C';
+			break;
+		case SMUX_LCH_LOCAL_OPENING:
+			local_state = 'o';
+			break;
+		case SMUX_LCH_LOCAL_OPENED:
+			local_state = 'O';
+			break;
+		case SMUX_LCH_LOCAL_CLOSING:
+			local_state = 'c';
+			break;
+		default:
+			local_state = 'U';
+			break;
+		}
 
-	switch (ch->local_mode) {
-	case SMUX_LCH_MODE_LOCAL_LOOPBACK:
-		local_mode = 'L';
-		break;
-	case SMUX_LCH_MODE_REMOTE_LOOPBACK:
-		local_mode = 'R';
-		break;
-	case SMUX_LCH_MODE_NORMAL:
-		local_mode = 'N';
-		break;
-	default:
-		local_mode = 'U';
-		break;
-	}
+		switch (ch->local_mode) {
+		case SMUX_LCH_MODE_LOCAL_LOOPBACK:
+			local_mode = 'L';
+			break;
+		case SMUX_LCH_MODE_REMOTE_LOOPBACK:
+			local_mode = 'R';
+			break;
+		case SMUX_LCH_MODE_NORMAL:
+			local_mode = 'N';
+			break;
+		default:
+			local_mode = 'U';
+			break;
+		}
 
-	switch (ch->remote_state) {
-	case SMUX_LCH_REMOTE_CLOSED:
-		remote_state = 'C';
-		break;
-	case SMUX_LCH_REMOTE_OPENED:
-		remote_state = 'O';
-		break;
+		switch (ch->remote_state) {
+		case SMUX_LCH_REMOTE_CLOSED:
+			remote_state = 'C';
+			break;
+		case SMUX_LCH_REMOTE_OPENED:
+			remote_state = 'O';
+			break;
 
-	default:
-		remote_state = 'U';
-		break;
-	}
+		default:
+			remote_state = 'U';
+			break;
+		}
 
-	switch (ch->remote_mode) {
-	case SMUX_LCH_MODE_REMOTE_LOOPBACK:
-		remote_mode = 'R';
-		break;
-	case SMUX_LCH_MODE_NORMAL:
-		remote_mode = 'N';
-		break;
-	default:
-		remote_mode = 'U';
-		break;
+		switch (ch->remote_mode) {
+		case SMUX_LCH_MODE_REMOTE_LOOPBACK:
+			remote_mode = 'R';
+			break;
+		case SMUX_LCH_MODE_NORMAL:
+			remote_mode = 'N';
+			break;
+		default:
+			remote_mode = 'U';
+			break;
+		}
+	} else {
+		/* broadcast channel */
+		local_state = '-';
+		local_mode = '-';
+		remote_state = '-';
+		remote_mode = '-';
 	}
 
 	/* determine command type (ACK, etc) */
@@ -611,6 +652,11 @@
 		if (pkt->hdr.flags & SMUX_CMD_CLOSE_ACK)
 			snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
 		break;
+
+	case SMUX_CMD_PWR_CTL:
+	   if (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK)
+			snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
+	   break;
 	};
 
 	i += snprintf(logbuf + i, SMUX_PKT_LOG_SIZE - i,
@@ -1342,6 +1388,7 @@
 	uint8_t lcid;
 	int ret = 0;
 	int do_retry = 0;
+	int tx_ready = 0;
 	int tmp;
 	int rx_len;
 	struct smux_lch_t *ch;
@@ -1385,8 +1432,20 @@
 
 	if (!list_empty(&ch->rx_retry_queue)) {
 		do_retry = 1;
+
+		if ((ch->options & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) &&
+			!ch->rx_flow_control_auto &&
+			((ch->rx_retry_queue_cnt + 1) >= SMUX_RX_WM_HIGH)) {
+			/* need to flow control RX */
+			ch->rx_flow_control_auto = 1;
+			tx_ready |= smux_rx_flow_control_updated(ch);
+			schedule_notify(ch->lcid, SMUX_RX_RETRY_HIGH_WM_HIT,
+					NULL);
+		}
 		if ((ch->rx_retry_queue_cnt + 1) > SMUX_RX_RETRY_MAX_PKTS) {
 			/* retry queue full */
+			pr_err("%s: ch %d RX retry queue full\n",
+					__func__, lcid);
 			schedule_notify(lcid, SMUX_READ_FAIL, NULL);
 			ret = -ENOMEM;
 			spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
@@ -1410,7 +1469,7 @@
 			}
 			ack_pkt->hdr.pad_len = pkt->hdr.pad_len;
 			smux_tx_queue(ack_pkt, ch, 0);
-			list_channel(ch);
+			tx_ready = 1;
 		} else {
 			pr_err("%s: Remote loopack allocation failure\n",
 					__func__);
@@ -1436,6 +1495,8 @@
 			/* buffer allocation failed - add to retry queue */
 			do_retry = 1;
 		} else if (tmp < 0) {
+			pr_err("%s: ch %d Client RX buffer alloc failed %d\n",
+					__func__, lcid, tmp);
 			schedule_notify(lcid, SMUX_READ_FAIL, NULL);
 			ret = -ENOMEM;
 		}
@@ -1482,6 +1543,8 @@
 		spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
 	}
 
+	if (tx_ready)
+		list_channel(ch);
 out:
 	return ret;
 }
@@ -1601,6 +1664,8 @@
 	struct smux_pkt_t *ack_pkt = NULL;
 	unsigned long flags;
 
+	SMUX_PWR_PKT_RX(pkt);
+
 	spin_lock_irqsave(&smux.tx_lock_lha2, flags);
 	if (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK) {
 		/* local sleep request ack */
@@ -1665,10 +1730,9 @@
 {
 	int ret = -ENXIO;
 
-	SMUX_LOG_PKT_RX(pkt);
-
 	switch (pkt->hdr.cmd) {
 	case SMUX_CMD_OPEN_LCH:
+		SMUX_LOG_PKT_RX(pkt);
 		if (smux_assert_lch_id(pkt->hdr.lcid)) {
 			pr_err("%s: invalid channel id %d\n",
 					__func__, pkt->hdr.lcid);
@@ -1678,6 +1742,7 @@
 		break;
 
 	case SMUX_CMD_DATA:
+		SMUX_LOG_PKT_RX(pkt);
 		if (smux_assert_lch_id(pkt->hdr.lcid)) {
 			pr_err("%s: invalid channel id %d\n",
 					__func__, pkt->hdr.lcid);
@@ -1687,6 +1752,7 @@
 		break;
 
 	case SMUX_CMD_CLOSE_LCH:
+		SMUX_LOG_PKT_RX(pkt);
 		if (smux_assert_lch_id(pkt->hdr.lcid)) {
 			pr_err("%s: invalid channel id %d\n",
 					__func__, pkt->hdr.lcid);
@@ -1696,6 +1762,7 @@
 		break;
 
 	case SMUX_CMD_STATUS:
+		SMUX_LOG_PKT_RX(pkt);
 		if (smux_assert_lch_id(pkt->hdr.lcid)) {
 			pr_err("%s: invalid channel id %d\n",
 					__func__, pkt->hdr.lcid);
@@ -1709,10 +1776,12 @@
 		break;
 
 	case SMUX_CMD_BYTE:
+		SMUX_LOG_PKT_RX(pkt);
 		ret = smux_handle_rx_byte_cmd(pkt);
 		break;
 
 	default:
+		SMUX_LOG_PKT_RX(pkt);
 		pr_err("%s: command %d unknown\n", __func__, pkt->hdr.cmd);
 		ret = -EINVAL;
 	}
@@ -1828,9 +1897,11 @@
 			smux.rx_state = SMUX_RX_MAGIC;
 			break;
 		case SMUX_WAKEUP_REQ:
+			SMUX_PWR("smux: RX Wakeup REQ\n");
 			smux_handle_wakeup_req();
 			break;
 		case SMUX_WAKEUP_ACK:
+			SMUX_PWR("smux: RX Wakeup ACK\n");
 			smux_handle_wakeup_ack();
 			break;
 		default:
@@ -2048,8 +2119,10 @@
  */
 static void smux_flush_tty(void)
 {
+	mutex_lock(&smux.mutex_lha0);
 	if (!smux.tty) {
 		pr_err("%s: ldisc not loaded\n", __func__);
+		mutex_unlock(&smux.mutex_lha0);
 		return;
 	}
 
@@ -2058,6 +2131,8 @@
 
 	if (tty_chars_in_buffer(smux.tty) > 0)
 		pr_err("%s: unable to flush UART queue\n", __func__);
+
+	mutex_unlock(&smux.mutex_lha0);
 }
 
 /**
@@ -2106,8 +2181,10 @@
 
 /**
  * Power-up the UART.
+ *
+ * Must be called with smux.mutex_lha0 already locked.
  */
-static void smux_uart_power_on(void)
+static void smux_uart_power_on_atomic(void)
 {
 	struct uart_state *state;
 
@@ -2121,19 +2198,32 @@
 }
 
 /**
+ * Power-up the UART.
+ */
+static void smux_uart_power_on(void)
+{
+	mutex_lock(&smux.mutex_lha0);
+	smux_uart_power_on_atomic();
+	mutex_unlock(&smux.mutex_lha0);
+}
+
+/**
  * Power down the UART.
  */
 static void smux_uart_power_off(void)
 {
 	struct uart_state *state;
 
+	mutex_lock(&smux.mutex_lha0);
 	if (!smux.tty || !smux.tty->driver_data) {
 		pr_err("%s: unable to find UART port for tty %p\n",
 				__func__, smux.tty);
+		mutex_unlock(&smux.mutex_lha0);
 		return;
 	}
 	state = smux.tty->driver_data;
 	msm_hs_request_clock_off(state->uart_port);
+	mutex_unlock(&smux.mutex_lha0);
 }
 
 /**
@@ -2148,44 +2238,16 @@
 {
 	unsigned long flags;
 	unsigned wakeup_delay;
-	int complete = 0;
 
-	while (!smux.in_reset) {
-		spin_lock_irqsave(&smux.tx_lock_lha2, flags);
-		if (smux.power_state == SMUX_PWR_ON) {
-			/* wakeup complete */
-			complete = 1;
-			spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
-			break;
-		} else {
-			/* retry */
-			wakeup_delay = smux.pwr_wakeup_delay_us;
-			smux.pwr_wakeup_delay_us <<= 1;
-			if (smux.pwr_wakeup_delay_us > SMUX_WAKEUP_DELAY_MAX)
-				smux.pwr_wakeup_delay_us =
-					SMUX_WAKEUP_DELAY_MAX;
-		}
+	if (smux.in_reset)
+		return;
+
+	spin_lock_irqsave(&smux.tx_lock_lha2, flags);
+	if (smux.power_state == SMUX_PWR_ON) {
+		/* wakeup complete */
 		spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
-		SMUX_DBG("%s: triggering wakeup\n", __func__);
-		smux_send_byte(SMUX_WAKEUP_REQ);
-
-		if (wakeup_delay < SMUX_WAKEUP_DELAY_MIN) {
-			SMUX_DBG("%s: sleeping for %u us\n", __func__,
-					wakeup_delay);
-			usleep_range(wakeup_delay, 2*wakeup_delay);
-		} else {
-			/* schedule delayed work */
-			SMUX_DBG("%s: scheduling delayed wakeup in %u ms\n",
-					__func__, wakeup_delay / 1000);
-			queue_delayed_work(smux_tx_wq,
-					&smux_wakeup_delayed_work,
-					msecs_to_jiffies(wakeup_delay / 1000));
-			break;
-		}
-	}
-
-	if (complete) {
 		SMUX_DBG("%s: wakeup complete\n", __func__);
+
 		/*
 		 * Cancel any pending retry.  This avoids a race condition with
 		 * a new power-up request because:
@@ -2194,6 +2256,31 @@
 		 *    workqueue as new TX wakeup requests
 		 */
 		cancel_delayed_work(&smux_wakeup_delayed_work);
+	} else {
+		/* retry wakeup */
+		wakeup_delay = smux.pwr_wakeup_delay_us;
+		smux.pwr_wakeup_delay_us <<= 1;
+		if (smux.pwr_wakeup_delay_us > SMUX_WAKEUP_DELAY_MAX)
+			smux.pwr_wakeup_delay_us =
+				SMUX_WAKEUP_DELAY_MAX;
+
+		spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
+		SMUX_DBG("%s: triggering wakeup\n", __func__);
+		smux_send_byte(SMUX_WAKEUP_REQ);
+
+		if (wakeup_delay < SMUX_WAKEUP_DELAY_MIN) {
+			SMUX_DBG("%s: sleeping for %u us\n", __func__,
+					wakeup_delay);
+			usleep_range(wakeup_delay, 2*wakeup_delay);
+			queue_work(smux_tx_wq, &smux_wakeup_work);
+		} else {
+			/* schedule delayed work */
+			SMUX_DBG("%s: scheduling delayed wakeup in %u ms\n",
+					__func__, wakeup_delay / 1000);
+			queue_delayed_work(smux_tx_wq,
+					&smux_wakeup_delayed_work,
+					msecs_to_jiffies(wakeup_delay / 1000));
+		}
 	}
 }
 
@@ -2275,18 +2362,32 @@
 /**
  * Remove RX retry packet from channel and free it.
  *
- * Must be called with state_lock_lhb1 locked.
- *
  * @ch    Channel for retry packet
  * @retry Retry packet to remove
+ *
+ * @returns 1 if flow control updated; 0 otherwise
+ *
+ * Must be called with state_lock_lhb1 locked.
  */
-void smux_remove_rx_retry(struct smux_lch_t *ch,
+int smux_remove_rx_retry(struct smux_lch_t *ch,
 		struct smux_rx_pkt_retry *retry)
 {
+	int tx_ready = 0;
+
 	list_del(&retry->rx_retry_list);
 	--ch->rx_retry_queue_cnt;
 	smux_free_pkt(retry->pkt);
 	kfree(retry);
+
+	if ((ch->options & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) &&
+			(ch->rx_retry_queue_cnt <= SMUX_RX_WM_LOW) &&
+			ch->rx_flow_control_auto) {
+		ch->rx_flow_control_auto = 0;
+		smux_rx_flow_control_updated(ch);
+		schedule_notify(ch->lcid, SMUX_RX_RETRY_LOW_WM_HIT, NULL);
+		tx_ready = 1;
+	}
+	return tx_ready;
 }
 
 /**
@@ -2357,6 +2458,8 @@
 	union notifier_metadata metadata;
 	int tmp;
 	unsigned long flags;
+	int immediate_retry = 0;
+	int tx_ready = 0;
 
 	ch = container_of(work, struct smux_lch_t, rx_retry_work.work);
 
@@ -2368,7 +2471,7 @@
 			retry = list_first_entry(&ch->rx_retry_queue,
 						struct smux_rx_pkt_retry,
 						rx_retry_list);
-			smux_remove_rx_retry(ch, retry);
+			(void)smux_remove_rx_retry(ch, retry);
 		}
 	}
 
@@ -2383,7 +2486,8 @@
 					rx_retry_list);
 	spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
 
-	SMUX_DBG("%s: retrying rx pkt %p\n", __func__, retry);
+	SMUX_DBG("%s: ch %d retrying rx pkt %p\n",
+			__func__, ch->lcid, retry);
 	metadata.read.pkt_priv = 0;
 	metadata.read.buffer = 0;
 	tmp = ch->get_rx_buffer(ch->priv,
@@ -2392,33 +2496,44 @@
 			retry->pkt->hdr.payload_len);
 	if (tmp == 0 && metadata.read.buffer) {
 		/* have valid RX buffer */
+
 		memcpy(metadata.read.buffer, retry->pkt->payload,
 						retry->pkt->hdr.payload_len);
 		metadata.read.len = retry->pkt->hdr.payload_len;
 
 		spin_lock_irqsave(&ch->state_lock_lhb1, flags);
-		smux_remove_rx_retry(ch, retry);
+		tx_ready = smux_remove_rx_retry(ch, retry);
 		spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
-
 		schedule_notify(ch->lcid, SMUX_READ_DONE, &metadata);
+		if (tx_ready)
+			list_channel(ch);
+
+		immediate_retry = 1;
 	} else if (tmp == -EAGAIN ||
 			(tmp == 0 && !metadata.read.buffer)) {
 		/* retry again */
 		retry->timeout_in_ms <<= 1;
 		if (retry->timeout_in_ms > SMUX_RX_RETRY_MAX_MS) {
 			/* timed out */
+			pr_err("%s: ch %d RX retry client timeout\n",
+					__func__, ch->lcid);
 			spin_lock_irqsave(&ch->state_lock_lhb1, flags);
-			smux_remove_rx_retry(ch, retry);
-			schedule_notify(ch->lcid, SMUX_READ_FAIL, NULL);
+			tx_ready = smux_remove_rx_retry(ch, retry);
 			spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
+			schedule_notify(ch->lcid, SMUX_READ_FAIL, NULL);
+			if (tx_ready)
+				list_channel(ch);
 		}
 	} else {
 		/* client error - drop packet */
+		pr_err("%s: ch %d RX retry client failed (%d)\n",
+				__func__, ch->lcid, tmp);
 		spin_lock_irqsave(&ch->state_lock_lhb1, flags);
-		smux_remove_rx_retry(ch, retry);
+		tx_ready = smux_remove_rx_retry(ch, retry);
 		spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
-
 		schedule_notify(ch->lcid, SMUX_READ_FAIL, NULL);
+		if (tx_ready)
+			list_channel(ch);
 	}
 
 	/* schedule next retry */
@@ -2427,8 +2542,12 @@
 		retry = list_first_entry(&ch->rx_retry_queue,
 						struct smux_rx_pkt_retry,
 						rx_retry_list);
-		queue_delayed_work(smux_rx_wq, &ch->rx_retry_work,
-				msecs_to_jiffies(retry->timeout_in_ms));
+
+		if (immediate_retry)
+			queue_delayed_work(smux_rx_wq, &ch->rx_retry_work, 0);
+		else
+			queue_delayed_work(smux_rx_wq, &ch->rx_retry_work,
+					msecs_to_jiffies(retry->timeout_in_ms));
 	}
 	spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
 }
@@ -2497,7 +2616,7 @@
 			spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
 
 			/* send the packet */
-			SMUX_LOG_PKT_TX(pkt);
+			SMUX_PWR_PKT_TX(pkt);
 			if (!smux_byte_loopback) {
 				smux_tx_tty(pkt);
 				smux_flush_tty();
@@ -2534,7 +2653,7 @@
 
 		if (smux.power_state != SMUX_PWR_ON) {
 			/* channel not ready to transmit */
-			SMUX_DBG("%s: can not tx with power state %d\n",
+			SMUX_DBG("%s: waiting for link up (state %d)\n",
 					__func__,
 					smux.power_state);
 			spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
@@ -2577,7 +2696,7 @@
 				--ch->tx_pending_data_cnt;
 				if (ch->notify_lwm &&
 					ch->tx_pending_data_cnt
-						<= SMUX_WM_LOW) {
+						<= SMUX_TX_WM_LOW) {
 					ch->notify_lwm = 0;
 					low_wm_notif = 1;
 				}
@@ -2604,6 +2723,34 @@
 	}
 }
 
+/**
+ * Update the RX flow control (sent in the TIOCM Status command).
+ *
+ * @ch  Channel for update
+ *
+ * @returns 1 for updated, 0 for not updated
+ *
+ * Must be called with ch->state_lock_lhb1 locked.
+ */
+static int smux_rx_flow_control_updated(struct smux_lch_t *ch)
+{
+	int updated = 0;
+	int prev_state;
+
+	prev_state = ch->local_tiocm & SMUX_CMD_STATUS_FLOW_CNTL;
+
+	if (ch->rx_flow_control_client || ch->rx_flow_control_auto)
+		ch->local_tiocm |= SMUX_CMD_STATUS_FLOW_CNTL;
+	else
+		ch->local_tiocm &= ~SMUX_CMD_STATUS_FLOW_CNTL;
+
+	if (prev_state != (ch->local_tiocm & SMUX_CMD_STATUS_FLOW_CNTL)) {
+		smux_send_status_cmd(ch);
+		updated = 1;
+	}
+
+	return updated;
+}
 
 /**********************************************************************/
 /* Kernel API                                                         */
@@ -2646,17 +2793,30 @@
 	if (clear & SMUX_CH_OPTION_REMOTE_LOOPBACK)
 		ch->local_mode = SMUX_LCH_MODE_NORMAL;
 
-	/* Flow control */
+	/* RX Flow control */
 	if (set & SMUX_CH_OPTION_REMOTE_TX_STOP) {
-		ch->local_tiocm |= SMUX_CMD_STATUS_FLOW_CNTL;
-		ret = smux_send_status_cmd(ch);
-		tx_ready = 1;
+		ch->rx_flow_control_client = 1;
+		tx_ready |= smux_rx_flow_control_updated(ch);
 	}
 
 	if (clear & SMUX_CH_OPTION_REMOTE_TX_STOP) {
-		ch->local_tiocm &= ~SMUX_CMD_STATUS_FLOW_CNTL;
-		ret = smux_send_status_cmd(ch);
-		tx_ready = 1;
+		ch->rx_flow_control_client = 0;
+		tx_ready |= smux_rx_flow_control_updated(ch);
+	}
+
+	/* Auto RX Flow Control */
+	if (set & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) {
+		SMUX_DBG("%s: auto rx flow control option enabled\n",
+			__func__);
+		ch->options |= SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP;
+	}
+
+	if (clear & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) {
+		SMUX_DBG("%s: auto rx flow control option disabled\n",
+			__func__);
+		ch->options &= ~SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP;
+		ch->rx_flow_control_auto = 0;
+		tx_ready |= smux_rx_flow_control_updated(ch);
 	}
 
 	spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
@@ -2880,16 +3040,16 @@
 	/* verify high watermark */
 	SMUX_DBG("%s: pending %d", __func__, ch->tx_pending_data_cnt);
 
-	if (ch->tx_pending_data_cnt >= SMUX_WM_HIGH) {
+	if (ch->tx_pending_data_cnt >= SMUX_TX_WM_HIGH) {
 		pr_err("%s: ch %d high watermark %d exceeded %d\n",
-				__func__, lcid, SMUX_WM_HIGH,
+				__func__, lcid, SMUX_TX_WM_HIGH,
 				ch->tx_pending_data_cnt);
 		ret = -EAGAIN;
 		goto out_inner;
 	}
 
 	/* queue packet for transmit */
-	if (++ch->tx_pending_data_cnt == SMUX_WM_HIGH) {
+	if (++ch->tx_pending_data_cnt == SMUX_TX_WM_HIGH) {
 		ch->notify_lwm = 1;
 		pr_err("%s: high watermark hit\n", __func__);
 		schedule_notify(lcid, SMUX_HIGH_WM_HIT, NULL);
@@ -2936,7 +3096,7 @@
 	ch = &smux_lch[lcid];
 
 	spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
-	if (ch->tx_pending_data_cnt >= SMUX_WM_HIGH)
+	if (ch->tx_pending_data_cnt >= SMUX_TX_WM_HIGH)
 		is_full = 1;
 	spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
 
@@ -2964,7 +3124,7 @@
 	ch = &smux_lch[lcid];
 
 	spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
-	if (ch->tx_pending_data_cnt <= SMUX_WM_LOW)
+	if (ch->tx_pending_data_cnt <= SMUX_TX_WM_LOW)
 		is_low = 1;
 	spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
 
@@ -3264,7 +3424,7 @@
 	spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
 
 	if (power_up_uart)
-		smux_uart_power_on();
+		smux_uart_power_on_atomic();
 
 	/* Disconnect from TTY */
 	smux.tty = NULL;
diff --git a/drivers/tty/smux_private.h b/drivers/tty/smux_private.h
index f644ff0..353c762 100644
--- a/drivers/tty/smux_private.h
+++ b/drivers/tty/smux_private.h
@@ -32,6 +32,10 @@
 
 /* Maximum number of packets in retry queue */
 #define SMUX_RX_RETRY_MAX_PKTS 32
+#define SMUX_RX_WM_HIGH        16
+#define SMUX_RX_WM_LOW          4
+#define SMUX_TX_WM_LOW          2
+#define SMUX_TX_WM_HIGH         4
 
 struct tty_struct;
 
diff --git a/drivers/tty/smux_test.c b/drivers/tty/smux_test.c
index 62e9465..e488a63 100644
--- a/drivers/tty/smux_test.c
+++ b/drivers/tty/smux_test.c
@@ -43,37 +43,46 @@
  * @failed - set to true if test fails
  */
 #define UT_ASSERT_INT(a, cmp, b) \
-	if (!((a)cmp(b))) { \
+	{ \
+	int a_tmp = (a); \
+	int b_tmp = (b); \
+	if (!((a_tmp)cmp(b_tmp))) { \
 		i += scnprintf(buf + i, max - i, \
 			"%s:%d Fail: " #a "(%d) " #cmp " " #b "(%d)\n", \
 				__func__, __LINE__, \
-				a, b); \
+				a_tmp, b_tmp); \
 		failed = 1; \
 		break; \
 	} \
-	do {} while (0)
+	}
 
 #define UT_ASSERT_PTR(a, cmp, b) \
-	if (!((a)cmp(b))) { \
+	{ \
+	void *a_tmp = (a); \
+	void *b_tmp = (b); \
+	if (!((a_tmp)cmp(b_tmp))) { \
 		i += scnprintf(buf + i, max - i, \
 			"%s:%d Fail: " #a "(%p) " #cmp " " #b "(%p)\n", \
 				__func__, __LINE__, \
-				a, b); \
+				a_tmp, b_tmp); \
 		failed = 1; \
 		break; \
 	} \
-	do {} while (0)
+	}
 
 #define UT_ASSERT_UINT(a, cmp, b) \
-	if (!((a)cmp(b))) { \
+	{ \
+	unsigned a_tmp = (a); \
+	unsigned b_tmp = (b); \
+	if (!((a_tmp)cmp(b_tmp))) { \
 		i += scnprintf(buf + i, max - i, \
 			"%s:%d Fail: " #a "(%u) " #cmp " " #b "(%u)\n", \
 				__func__, __LINE__, \
-				a, b); \
+				a_tmp, b_tmp); \
 		failed = 1; \
 		break; \
 	} \
-	do {} while (0)
+	}
 
 /**
  * In-range unit test assertion for test cases.
@@ -94,16 +103,20 @@
  * @failed - set to true if test fails
  */
 #define UT_ASSERT_INT_IN_RANGE(a, minv, maxv) \
-	if (((a) < (minv)) || ((a) > (maxv))) { \
+	{ \
+	int a_tmp = (a); \
+	int minv_tmp = (minv); \
+	int maxv_tmp = (maxv); \
+	if (((a_tmp) < (minv_tmp)) || ((a_tmp) > (maxv_tmp))) { \
 		i += scnprintf(buf + i, max - i, \
 			"%s:%d Fail: " #a "(%d) < " #minv "(%d) or " \
 				 #a "(%d) > " #maxv "(%d)\n", \
 				__func__, __LINE__, \
-				a, minv, a, maxv); \
+				a_tmp, minv_tmp, a_tmp, maxv_tmp); \
 		failed = 1; \
 		break; \
 	} \
-	do {} while (0)
+	}
 
 
 static unsigned char test_array[] = {1, 1, 2, 3, 5, 8, 13, 21, 34, 55,
@@ -172,6 +185,8 @@
 	int event_disconnected_ssr;
 	int event_low_wm;
 	int event_high_wm;
+	int event_rx_retry_high_wm;
+	int event_rx_retry_low_wm;
 
 	/* TIOCM changes */
 	int event_tiocm;
@@ -222,6 +237,8 @@
 	cb->event_disconnected_ssr = 0;
 	cb->event_low_wm = 0;
 	cb->event_high_wm = 0;
+	cb->event_rx_retry_high_wm = 0;
+	cb->event_rx_retry_low_wm = 0;
 	cb->event_tiocm = 0;
 	cb->tiocm_meta.tiocm_old = 0;
 	cb->tiocm_meta.tiocm_new = 0;
@@ -282,15 +299,17 @@
 		"\tevent_disconnected_ssr=%d\n"
 		"\tevent_low_wm=%d\n"
 		"\tevent_high_wm=%d\n"
+		"\tevent_rx_retry_high_wm=%d\n"
+		"\tevent_rx_retry_low_wm=%d\n"
 		"\tevent_tiocm=%d\n"
 		"\tevent_read_done=%d\n"
 		"\tevent_read_failed=%d\n"
-		"\tread_events=%d\n"
+		"\tread_events empty=%d\n"
 		"\tget_rx_retry=%d\n"
-		"\tget_rx_retry_events=%d\n"
+		"\tget_rx_retry_events empty=%d\n"
 		"\tevent_write_done=%d\n"
 		"\tevent_write_failed=%d\n"
-		"\twrite_events=%d\n",
+		"\twrite_events empty=%d\n",
 		cb->cb_count,
 		cb->cb_completion.done,
 		cb->event_connected,
@@ -298,12 +317,14 @@
 		cb->event_disconnected_ssr,
 		cb->event_low_wm,
 		cb->event_high_wm,
+		cb->event_rx_retry_high_wm,
+		cb->event_rx_retry_low_wm,
 		cb->event_tiocm,
 		cb->event_read_done,
 		cb->event_read_failed,
-		!list_empty(&cb->read_events),
+		list_empty(&cb->read_events),
 		cb->get_rx_buff_retry_count,
-		!list_empty(&cb->get_rx_buff_retry_events),
+		list_empty(&cb->get_rx_buff_retry_events),
 		cb->event_write_done,
 		cb->event_write_failed,
 		list_empty(&cb->write_events)
@@ -416,6 +437,19 @@
 		spin_unlock_irqrestore(&cb_data_ptr->lock, flags);
 		break;
 
+	case SMUX_RX_RETRY_HIGH_WM_HIT:
+		spin_lock_irqsave(&cb_data_ptr->lock, flags);
+		++cb_data_ptr->event_rx_retry_high_wm;
+		spin_unlock_irqrestore(&cb_data_ptr->lock, flags);
+		break;
+
+	case SMUX_RX_RETRY_LOW_WM_HIT:
+		spin_lock_irqsave(&cb_data_ptr->lock, flags);
+		++cb_data_ptr->event_rx_retry_low_wm;
+		spin_unlock_irqrestore(&cb_data_ptr->lock, flags);
+		break;
+
+
 	case SMUX_TIOCM_UPDATE:
 		spin_lock_irqsave(&cb_data_ptr->lock, flags);
 		++cb_data_ptr->event_tiocm;
@@ -1315,7 +1349,7 @@
 		/* open port for loopback */
 		ret = msm_smux_set_ch_option(SMUX_TEST_LCID,
 				SMUX_CH_OPTION_LOCAL_LOOPBACK,
-				0);
+				SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP);
 		UT_ASSERT_INT(ret, ==, 0);
 
 		ret = msm_smux_open(SMUX_TEST_LCID, &cb_data,
@@ -1568,6 +1602,132 @@
 	return i;
 }
 
+/**
+ * Verify get_rx_buffer callback retry for auto-rx flow control.
+ *
+ * @buf  Buffer for status message
+ * @max  Size of buffer
+ *
+ * @returns Number of bytes written to @buf
+ */
+static int smux_ut_local_get_rx_buff_retry_auto(char *buf, int max)
+{
+	static struct smux_mock_callback cb_data;
+	static int cb_initialized;
+	int i = 0;
+	int failed = 0;
+	int ret;
+	int try;
+	int try_rx_retry_wm;
+
+	i += scnprintf(buf + i, max - i, "Running %s\n", __func__);
+	pr_err("%s", buf);
+
+	if (!cb_initialized)
+		mock_cb_data_init(&cb_data);
+
+	mock_cb_data_reset(&cb_data);
+	smux_byte_loopback = SMUX_TEST_LCID;
+	while (!failed) {
+		/* open port for loopback */
+		ret = msm_smux_set_ch_option(SMUX_TEST_LCID,
+				SMUX_CH_OPTION_LOCAL_LOOPBACK
+				| SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP,
+				0);
+		UT_ASSERT_INT(ret, ==, 0);
+
+		ret = msm_smux_open(SMUX_TEST_LCID, &cb_data,
+				smux_mock_cb, get_rx_buffer_mock);
+		UT_ASSERT_INT(ret, ==, 0);
+		UT_ASSERT_INT(
+				(int)wait_for_completion_timeout(
+					&cb_data.cb_completion, HZ), >, 0);
+		UT_ASSERT_INT(cb_data.cb_count, ==, 1);
+		UT_ASSERT_INT(cb_data.event_connected, ==, 1);
+		mock_cb_data_reset(&cb_data);
+
+		/* Test high rx-retry watermark */
+		get_rx_buffer_mock_fail = 1;
+		try_rx_retry_wm = 0;
+		for (try = 0; try < SMUX_RX_RETRY_MAX_PKTS; ++try) {
+			pr_err("%s: try %d\n", __func__, try);
+			ret = msm_smux_write(SMUX_TEST_LCID, (void *)1,
+						test_array, sizeof(test_array));
+			UT_ASSERT_INT(ret, ==, 0);
+			if (failed)
+				break;
+
+			if (!try_rx_retry_wm &&
+					cb_data.event_rx_retry_high_wm) {
+				/* RX high watermark hit */
+				try_rx_retry_wm = try + 1;
+				break;
+			}
+
+			while (cb_data.event_write_done <= try) {
+				UT_ASSERT_INT(
+					(int)wait_for_completion_timeout(
+						&cb_data.cb_completion, HZ),
+					>, 0);
+				INIT_COMPLETION(cb_data.cb_completion);
+			}
+			if (failed)
+				break;
+		}
+		if (failed)
+			break;
+
+		/* RX retry high watermark should have been set */
+		UT_ASSERT_INT(cb_data.event_rx_retry_high_wm, ==, 1);
+		UT_ASSERT_INT(try_rx_retry_wm, ==, SMUX_RX_WM_HIGH);
+
+		/*
+		 * Disabled RX buffer allocation failure and wait for
+		 * the SMUX_RX_WM_HIGH count successful packets.
+		 */
+		get_rx_buffer_mock_fail = 0;
+		while (cb_data.event_read_done < SMUX_RX_WM_HIGH) {
+			UT_ASSERT_INT(
+				(int)wait_for_completion_timeout(
+					&cb_data.cb_completion, 2*HZ),
+				>, 0);
+			INIT_COMPLETION(cb_data.cb_completion);
+		}
+		if (failed)
+			break;
+
+		UT_ASSERT_INT(0, ==, cb_data.event_read_failed);
+		UT_ASSERT_INT(SMUX_RX_WM_HIGH, ==,
+				cb_data.event_read_done);
+		UT_ASSERT_INT(cb_data.event_rx_retry_low_wm, ==, 1);
+		mock_cb_data_reset(&cb_data);
+
+		/* close port */
+		ret = msm_smux_close(SMUX_TEST_LCID);
+		UT_ASSERT_INT(ret, ==, 0);
+		UT_ASSERT_INT(
+			(int)wait_for_completion_timeout(
+				&cb_data.cb_completion, HZ),
+			>, 0);
+		UT_ASSERT_INT(cb_data.cb_count, ==, 1);
+		UT_ASSERT_INT(cb_data.event_disconnected, ==, 1);
+		UT_ASSERT_INT(cb_data.event_disconnected_ssr, ==, 0);
+		break;
+	}
+
+	if (!failed) {
+		i += scnprintf(buf + i, max - i, "\tOK\n");
+	} else {
+		pr_err("%s: Failed\n", __func__);
+		i += scnprintf(buf + i, max - i, "\tFailed\n");
+		i += mock_cb_data_print(&cb_data, buf + i, max - i);
+		msm_smux_close(SMUX_TEST_LCID);
+	}
+	smux_byte_loopback = 0;
+	mock_cb_data_reset(&cb_data);
+	return i;
+}
+
 static char debug_buffer[DEBUG_BUFMAX];
 
 static ssize_t debug_read(struct file *file, char __user *buf,
@@ -1631,6 +1791,8 @@
 			smux_ut_local_smuxld_receive_buf);
 	debug_create("ut_local_get_rx_buff_retry", 0444, dent,
 			smux_ut_local_get_rx_buff_retry);
+	debug_create("ut_local_get_rx_buff_retry_auto", 0444, dent,
+			smux_ut_local_get_rx_buff_retry_auto);
 
 	return 0;
 }
diff --git a/drivers/usb/dwc3/dwc3_otg.c b/drivers/usb/dwc3/dwc3_otg.c
index 5df030a..23b582d 100644
--- a/drivers/usb/dwc3/dwc3_otg.c
+++ b/drivers/usb/dwc3/dwc3_otg.c
@@ -579,9 +579,11 @@
 		return -ENOMEM;
 	}
 
-	dotg->irq = platform_get_irq(to_platform_device(dwc->dev), 0);
+	/* DWC3 has separate IRQ line for OTG events (ID/BSV etc.) */
+	dotg->irq = platform_get_irq_byname(to_platform_device(dwc->dev),
+								"otg_irq");
 	if (dotg->irq < 0) {
-		dev_err(dwc->dev, "%s: missing IRQ\n", __func__);
+		dev_err(dwc->dev, "%s: missing OTG IRQ\n", __func__);
 		ret = -ENODEV;
 		goto err1;
 	}
diff --git a/include/linux/smux.h b/include/linux/smux.h
index 308f969..24a6371 100644
--- a/include/linux/smux.h
+++ b/include/linux/smux.h
@@ -77,6 +77,8 @@
 	SMUX_TIOCM_UPDATE,
 	SMUX_LOW_WM_HIT,      /* @metadata is NULL */
 	SMUX_HIGH_WM_HIT,     /* @metadata is NULL */
+	SMUX_RX_RETRY_HIGH_WM_HIT,  /* @metadata is NULL */
+	SMUX_RX_RETRY_LOW_WM_HIT,   /* @metadata is NULL */
 };
 
 /**
@@ -86,6 +88,7 @@
 	SMUX_CH_OPTION_LOCAL_LOOPBACK = 1 << 0,
 	SMUX_CH_OPTION_REMOTE_LOOPBACK = 1 << 1,
 	SMUX_CH_OPTION_REMOTE_TX_STOP = 1 << 2,
+	SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP = 1 << 3,
 };
 
 /**
diff --git a/sound/soc/msm/msm8930.c b/sound/soc/msm/msm8930.c
index 20ac6e1..439d4b1 100644
--- a/sound/soc/msm/msm8930.c
+++ b/sound/soc/msm/msm8930.c
@@ -307,8 +307,8 @@
 	{"Ext Spk Left Neg", NULL, "LINEOUT2"},
 
 	/* Headset Mic */
-	{"AMIC2", NULL, "MIC BIAS2 Internal1"},
-	{"MIC BIAS2 Internal1", NULL, "Headset Mic"},
+	{"AMIC2", NULL, "MIC BIAS2 External"},
+	{"MIC BIAS2 External", NULL, "Headset Mic"},
 
 	/* Microphone path */
 	{"AMIC1", NULL, "MIC BIAS2 External"},