Merge "arm/dt: mpq8092: Add qcom,not-wakeup property for SPMI arbiter"
diff --git a/Documentation/devicetree/bindings/tty/serial/msm_serial.txt b/Documentation/devicetree/bindings/tty/serial/msm_serial.txt
index 3b0426b..e784bfa 100644
--- a/Documentation/devicetree/bindings/tty/serial/msm_serial.txt
+++ b/Documentation/devicetree/bindings/tty/serial/msm_serial.txt
@@ -34,10 +34,15 @@
 - reg : offset and length of the register set for the device.
 - interrupts : should contain the uart interrupt.
 
+Optional properties:
+- cell-index: An integer specifying the line number of the UART device that
+  represents this HSL hardware instance.
+
 Example:
 
 	serial@19c400000 {
 		compatible = "qcom,msm-lsuart-v14"
 		reg = <0x19c40000 0x1000">;
 		interrupts = <195>;
+		cell-index = <0>;	// this device will be named ttyHSL0
 	};
diff --git a/arch/arm/configs/msm8974_defconfig b/arch/arm/configs/msm8974_defconfig
index cbcd546..e2524ba 100644
--- a/arch/arm/configs/msm8974_defconfig
+++ b/arch/arm/configs/msm8974_defconfig
@@ -404,6 +404,7 @@
 CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y
 # CONFIG_DEBUG_PREEMPT is not set
 CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_MUTEXES=y
 CONFIG_DEBUG_ATOMIC_SLEEP=y
 CONFIG_DEBUG_STACK_USAGE=y
 CONFIG_DEBUG_INFO=y
diff --git a/arch/arm/mach-msm/board-9615-gpiomux.c b/arch/arm/mach-msm/board-9615-gpiomux.c
index 9339638..cef967c 100644
--- a/arch/arm/mach-msm/board-9615-gpiomux.c
+++ b/arch/arm/mach-msm/board-9615-gpiomux.c
@@ -201,6 +201,23 @@
 	},
 };
 
+static struct gpiomux_setting sd_card_det = {
+	.func = GPIOMUX_FUNC_GPIO,
+	.drv = GPIOMUX_DRV_2MA,
+	.pull = GPIOMUX_PULL_NONE,
+	.dir = GPIOMUX_IN,
+};
+
+struct msm_gpiomux_config sd_card_det_config[] __initdata = {
+	{
+		.gpio = 80,
+		.settings = {
+			[GPIOMUX_ACTIVE]    = &sd_card_det,
+			[GPIOMUX_SUSPENDED] = &sd_card_det,
+		},
+	},
+};
+
 #ifdef CONFIG_LTC4088_CHARGER
 static struct msm_gpiomux_config
 	msm9615_ltc4088_charger_config[] __initdata = {
@@ -362,6 +379,8 @@
 
 	msm_gpiomux_install(msm9615_ps_hold_config,
 			ARRAY_SIZE(msm9615_ps_hold_config));
+	msm_gpiomux_install(sd_card_det_config,
+			ARRAY_SIZE(sd_card_det_config));
 	msm_gpiomux_install(msm9615_sdcc2_configs,
 			ARRAY_SIZE(msm9615_sdcc2_configs));
 #ifdef CONFIG_LTC4088_CHARGER
diff --git a/arch/arm/mach-msm/clock-8974.c b/arch/arm/mach-msm/clock-8974.c
index d080a379..777e0bf 100644
--- a/arch/arm/mach-msm/clock-8974.c
+++ b/arch/arm/mach-msm/clock-8974.c
@@ -2829,8 +2829,8 @@
 	.div_src_val = BVAL(10, 8, dsipll0_byte_mm_source_val),
 };
 static struct clk_freq_tbl pixel_freq = {
-	.src_clk = &dsipll0_byte_clk_src,
-	.div_src_val = BVAL(10, 8, dsipll0_byte_mm_source_val),
+	.src_clk = &dsipll0_pixel_clk_src,
+	.div_src_val = BVAL(10, 8, dsipll0_pixel_mm_source_val),
 };
 static struct clk_ops clk_ops_byte;
 static struct clk_ops clk_ops_pixel;
diff --git a/arch/arm/mach-msm/devices-8064.c b/arch/arm/mach-msm/devices-8064.c
index b0ed79f..3b3425f 100644
--- a/arch/arm/mach-msm/devices-8064.c
+++ b/arch/arm/mach-msm/devices-8064.c
@@ -2401,16 +2401,28 @@
 };
 
 static struct msm_rpmstats_platform_data msm_rpm_stat_pdata = {
-	.phys_addr_base = 0x0010DD04,
-	.phys_size = SZ_256,
+	.version = 1,
 };
 
+
+static struct resource msm_rpm_stat_resource[] = {
+	{
+		.start	= 0x0010D204,
+		.end	= 0x0010D204 + SZ_8K,
+		.flags	= IORESOURCE_MEM,
+		.name	= "phys_addr_base"
+	},
+};
+
+
 struct platform_device apq8064_rpm_stat_device = {
 	.name = "msm_rpm_stat",
 	.id = -1,
-	.dev = {
+	.resource = msm_rpm_stat_resource,
+	.num_resources	= ARRAY_SIZE(msm_rpm_stat_resource),
+	.dev	= {
 		.platform_data = &msm_rpm_stat_pdata,
-	},
+	}
 };
 
 static struct resource resources_rpm_master_stats[] = {
diff --git a/arch/arm/mach-msm/devices-8930.c b/arch/arm/mach-msm/devices-8930.c
index 322347b..6e305e0 100644
--- a/arch/arm/mach-msm/devices-8930.c
+++ b/arch/arm/mach-msm/devices-8930.c
@@ -547,16 +547,28 @@
 };
 
 static struct msm_rpmstats_platform_data msm_rpm_stat_pdata = {
-	.phys_addr_base = 0x0010DD04,
-	.phys_size = SZ_256,
+	.version = 1,
 };
 
+static struct resource msm_rpm_stat_resource[] = {
+	{
+		.start	= 0x0010D204,
+		.end	= 0x0010D204 + SZ_8K,
+		.flags	= IORESOURCE_MEM,
+		.name	= "phys_addr_base"
+
+	},
+};
+
+
 struct platform_device msm8930_rpm_stat_device = {
 	.name = "msm_rpm_stat",
 	.id = -1,
-	.dev = {
+	.resource = msm_rpm_stat_resource,
+	.num_resources	= ARRAY_SIZE(msm_rpm_stat_resource),
+	.dev	= {
 		.platform_data = &msm_rpm_stat_pdata,
-	},
+	}
 };
 
 static struct resource resources_rpm_master_stats[] = {
diff --git a/arch/arm/mach-msm/devices-8960.c b/arch/arm/mach-msm/devices-8960.c
index f62c2b0..50040a8 100644
--- a/arch/arm/mach-msm/devices-8960.c
+++ b/arch/arm/mach-msm/devices-8960.c
@@ -3773,16 +3773,28 @@
 };
 
 static struct msm_rpmstats_platform_data msm_rpm_stat_pdata = {
-	.phys_addr_base = 0x0010DD04,
-	.phys_size = SZ_256,
+	.version = 1,
 };
 
+static struct resource msm_rpm_stat_resource[] = {
+	{
+		.start	= 0x0010D204,
+		.end	= 0x0010D204 + SZ_8K,
+		.flags	= IORESOURCE_MEM,
+		.name	= "phys_addr_base"
+	},
+};
+
+
+
 struct platform_device msm8960_rpm_stat_device = {
 	.name = "msm_rpm_stat",
 	.id = -1,
-	.dev = {
+	.resource = msm_rpm_stat_resource,
+	.num_resources	= ARRAY_SIZE(msm_rpm_stat_resource),
+	.dev	= {
 		.platform_data = &msm_rpm_stat_pdata,
-	},
+	}
 };
 
 static struct resource resources_rpm_master_stats[] = {
diff --git a/arch/arm/mach-msm/devices-9615.c b/arch/arm/mach-msm/devices-9615.c
index 7307f62..af8687e 100644
--- a/arch/arm/mach-msm/devices-9615.c
+++ b/arch/arm/mach-msm/devices-9615.c
@@ -1361,16 +1361,29 @@
 };
 
 static struct msm_rpmstats_platform_data msm_rpm_stat_pdata = {
-	.phys_addr_base = 0x0010DD04,
-	.phys_size = SZ_256,
+	.version = 1,
 };
 
+
+static struct resource msm_rpm_stat_resource[] = {
+	{
+		.start	= 0x0010D204,
+		.end	= 0x0010D204 + SZ_8K,
+		.flags	= IORESOURCE_MEM,
+		.name	= "phys_addr_base"
+	},
+};
+
+
+
 struct platform_device msm9615_rpm_stat_device = {
 	.name = "msm_rpm_stat",
 	.id = -1,
-	.dev = {
+	.resource = msm_rpm_stat_resource,
+	.num_resources	= ARRAY_SIZE(msm_rpm_stat_resource),
+	.dev	= {
 		.platform_data = &msm_rpm_stat_pdata,
-	},
+	}
 };
 
 static struct resource resources_rpm_master_stats[] = {
diff --git a/arch/arm/mach-msm/devices-fsm9xxx.c b/arch/arm/mach-msm/devices-fsm9xxx.c
index 639eeae..9043223 100644
--- a/arch/arm/mach-msm/devices-fsm9xxx.c
+++ b/arch/arm/mach-msm/devices-fsm9xxx.c
@@ -228,6 +228,7 @@
 	.parts		= NULL,
 	.nr_parts	= 0,
 	.interleave     = 0,
+	.version	= VERSION_2,
 };
 
 struct platform_device msm_device_nand = {
diff --git a/arch/arm/mach-msm/devices-msm7x27a.c b/arch/arm/mach-msm/devices-msm7x27a.c
index ddb98b4..189a616 100644
--- a/arch/arm/mach-msm/devices-msm7x27a.c
+++ b/arch/arm/mach-msm/devices-msm7x27a.c
@@ -780,6 +780,16 @@
 		.notify_rpm = false,
 		.cmd = spm_pc_without_modem,
 	},
+	[2] = {
+		.mode = MSM_SPM_MODE_POWER_COLLAPSE,
+		.notify_rpm = false,
+		.cmd = spm_pc_without_modem,
+	},
+	[3] = {
+		.mode = MSM_SPM_MODE_POWER_COLLAPSE,
+		.notify_rpm = false,
+		.cmd = spm_pc_without_modem,
+	},
 };
 
 static struct msm_spm_platform_data msm_spm_data[] __initdata = {
@@ -797,6 +807,20 @@
 		.num_modes = ARRAY_SIZE(msm_spm_seq_list),
 		.modes = msm_spm_seq_list,
 	},
+	[2] = {
+		.reg_base_addr = MSM_SAW2_BASE,
+		.reg_init_values[MSM_SPM_REG_SAW2_CFG] = 0x0,
+		.reg_init_values[MSM_SPM_REG_SAW2_SPM_CTL] = 0x01,
+		.num_modes = ARRAY_SIZE(msm_spm_seq_list),
+		.modes = msm_spm_seq_list,
+	},
+	[3] = {
+		.reg_base_addr = MSM_SAW3_BASE,
+		.reg_init_values[MSM_SPM_REG_SAW2_CFG] = 0x0,
+		.reg_init_values[MSM_SPM_REG_SAW2_SPM_CTL] = 0x01,
+		.num_modes = ARRAY_SIZE(msm_spm_seq_list),
+		.modes = msm_spm_seq_list,
+	},
 };
 
 void __init msm8x25_spm_device_init(void)
diff --git a/arch/arm/mach-msm/include/mach/msm_tspp.h b/arch/arm/mach-msm/include/mach/msm_tspp.h
index 5395b88..a024a99 100644
--- a/arch/arm/mach-msm/include/mach/msm_tspp.h
+++ b/arch/arm/mach-msm/include/mach/msm_tspp.h
@@ -25,31 +25,34 @@
 struct tspp_data_descriptor {
 	void *virt_base;   /* logical address of the actual data */
 	u32 phys_base;     /* physical address of the actual data */
-	int size;			 /* size of buffer in bytes */
+	u32 size;          /* size of buffer in bytes */
 	int id;            /* unique identifier */
 	void *user;        /* user-defined data */
 };
 
-typedef void (tspp_notifier)(int channel, void *user);
-typedef void* (tspp_allocator)(int channel, int size,
+typedef void (tspp_notifier)(int channel_id, void *user);
+typedef void* (tspp_allocator)(int channel_id, u32 size,
 	u32 *phys_base, void *user);
+typedef void (tspp_memfree)(int channel_id, u32 size,
+	void *virt_base, u32 phys_base, void *user);
 
 /* Kernel API functions */
 int tspp_open_stream(u32 dev, u32 channel_id,
-	struct tspp_select_source *source);
+			struct tspp_select_source *source);
 int tspp_close_stream(u32 dev, u32 channel_id);
 int tspp_open_channel(u32 dev, u32 channel_id);
 int tspp_close_channel(u32 dev, u32 channel_id);
-int tspp_add_filter(u32 dev, u32 channel_id,	struct tspp_filter *filter);
+int tspp_add_filter(u32 dev, u32 channel_id, struct tspp_filter *filter);
 int tspp_remove_filter(u32 dev, u32 channel_id,	struct tspp_filter *filter);
 int tspp_set_key(u32 dev, u32 channel_id, struct tspp_key *key);
-int tspp_register_notification(u32 dev, u32 channel, tspp_notifier *notify,
+int tspp_register_notification(u32 dev, u32 channel_id, tspp_notifier *notify,
 	void *data, u32 timer_ms);
-int tspp_unregister_notification(u32 dev, u32 channel);
-const struct tspp_data_descriptor *tspp_get_buffer(u32 dev, u32 channel);
-int tspp_release_buffer(u32 dev, u32 channel, u32 descriptor_id);
+int tspp_unregister_notification(u32 dev, u32 channel_id);
+const struct tspp_data_descriptor *tspp_get_buffer(u32 dev, u32 channel_id);
+int tspp_release_buffer(u32 dev, u32 channel_id, u32 descriptor_id);
 int tspp_allocate_buffers(u32 dev, u32 channel_id, u32 count,
-	u32 size, u32 int_freq, tspp_allocator *alloc, void *user);
+	u32 size, u32 int_freq, tspp_allocator *alloc,
+	tspp_memfree *memfree, void *user);
 
 #endif /* _MSM_TSPP_H_ */
 
diff --git a/arch/arm/mach-msm/pm-boot.c b/arch/arm/mach-msm/pm-boot.c
index 7bc4fe0..f32e149 100644
--- a/arch/arm/mach-msm/pm-boot.c
+++ b/arch/arm/mach-msm/pm-boot.c
@@ -158,13 +158,17 @@
 			msm_pm_boot_after_pc
 				= msm_pm_config_rst_vector_after_pc;
 		} else {
+			uint32_t mpa5_boot_remap_addr[2] = {0x34, 0x4C};
+			uint32_t mpa5_cfg_ctl[2] = {0x30, 0x48};
+
 			warm_boot_ptr = ioremap_nocache(
 						MSM8625_WARM_BOOT_PHYS, SZ_64);
 			ret = msm_pm_boot_reset_vector_init(warm_boot_ptr);
 
 			entry = virt_to_phys(msm_pm_boot_entry);
 
-			/* Below sequence is a work around for cores
+			/*
+			 * Below sequence is a work around for cores
 			 * to come out of GDFS properly on 8625 target.
 			 * On 8625 while cores coming out of GDFS observed
 			 * the memory corruption at very first memory read.
@@ -176,7 +180,8 @@
 			msm_pm_reset_vector[4] = 0xE12FFF10; /* bx  r0 */
 			msm_pm_reset_vector[5] = entry; /* 0x14 */
 
-			/* Here upper 16bits[16:31] used by CORE1
+			/*
+			 * Here upper 16bits[16:31] used by CORE1
 			 * lower 16bits[0:15] used by CORE0
 			 */
 			entry = (MSM8625_WARM_BOOT_PHYS |
@@ -184,17 +189,30 @@
 
 			/* write 'entry' to boot remapper register */
 			__raw_writel(entry, (pdata->v_addr +
-						MPA5_BOOT_REMAP_ADDR));
+						mpa5_boot_remap_addr[0]));
 
-			/* Enable boot remapper for C0 [bit:25th] */
+			/*
+			 * Enable boot remapper for C0 [bit:25th]
+			 * Enable boot remapper for C1 [bit:26th]
+			 */
 			__raw_writel(readl_relaxed(pdata->v_addr +
-					MPA5_CFG_CTL_REG) | BIT(25),
-					pdata->v_addr + MPA5_CFG_CTL_REG);
+					mpa5_cfg_ctl[0]) | (0x3 << 25),
+					pdata->v_addr + mpa5_cfg_ctl[0]);
 
-			/* Enable boot remapper for C1 [bit:26th] */
-			__raw_writel(readl_relaxed(pdata->v_addr +
-					MPA5_CFG_CTL_REG) | BIT(26),
-					pdata->v_addr + MPA5_CFG_CTL_REG);
+			/* 8x25Q changes */
+			if (SOCINFO_VERSION_MAJOR(socinfo_get_version()) >= 3) {
+				/* write 'entry' to boot remapper register */
+				__raw_writel(entry, (pdata->v_addr +
+						mpa5_boot_remap_addr[1]));
+
+				/*
+				 * Enable boot remapper for C2 [bit:25th]
+				 * Enable boot remapper for C3 [bit:26th]
+				 */
+				__raw_writel(readl_relaxed(pdata->v_addr +
+					mpa5_cfg_ctl[1]) | (0x3 << 25),
+					pdata->v_addr + mpa5_cfg_ctl[1]);
+			}
 			msm_pm_boot_before_pc = msm_pm_write_boot_vector;
 		}
 		break;
diff --git a/arch/arm/mach-msm/pm-boot.h b/arch/arm/mach-msm/pm-boot.h
index 30b67c21..e39ca75 100644
--- a/arch/arm/mach-msm/pm-boot.h
+++ b/arch/arm/mach-msm/pm-boot.h
@@ -15,7 +15,6 @@
 
 /* 8x25 specific macros */
 #define MPA5_CFG_CTL_REG	0x30
-#define MPA5_BOOT_REMAP_ADDR	0x34
 /* end */
 
 enum {
diff --git a/arch/arm/mach-msm/pm2.c b/arch/arm/mach-msm/pm2.c
index 427e39f..ae2a4bc 100644
--- a/arch/arm/mach-msm/pm2.c
+++ b/arch/arm/mach-msm/pm2.c
@@ -482,66 +482,71 @@
  * Program the top csr from core0 context to put the
  * core1 into GDFS, as core1 is not running yet.
  */
-static void configure_top_csr(void)
+static void msm_pm_configure_top_csr(void)
 {
+	/*
+	 * Enable TCSR for core
+	 * Set reset bit for SPM
+	 * Set CLK_OFF bit
+	 * Set clamps bit
+	 * Set power_up bit
+	 * Disable TSCR for core
+	 */
+	uint32_t bit_pos[][6] = {
+		/* c2 */
+		{17, 15, 13, 16, 14, 17},
+		/* c1 & c3*/
+		{22, 20, 18, 21, 19, 22},
+	};
+	uint32_t mpa5_cfg_ctl[2] = {0x30, 0x48};
 	void __iomem *base_ptr;
 	unsigned int value = 0;
+	unsigned int cpu;
+	int i;
 
-	base_ptr = core_reset_base(1);
-	if (!base_ptr)
-		return;
-
-	/* bring the core1 out of reset */
-	__raw_writel(0x3, base_ptr);
-	mb();
-	/*
-	 * override DBGNOPOWERDN and program the GDFS
-	 * count val
-	 */
-
-	 __raw_writel(0x00030002, (MSM_CFG_CTL_BASE + 0x38));
-	mb();
-
-	/* Initialize the SPM0 and SPM1 registers */
+	/* Initialize all the SPM registers */
 	msm_spm_reinit();
 
-	/* enable TCSR for core1 */
-	value = __raw_readl((MSM_CFG_CTL_BASE + MPA5_CFG_CTL_REG));
-	value |= BIT(22);
-	__raw_writel(value,  MSM_CFG_CTL_BASE + MPA5_CFG_CTL_REG);
-	mb();
+	for_each_possible_cpu(cpu) {
+		/* skip for C0 */
+		if (!cpu)
+			continue;
 
-	/* set reset bit for SPM1 */
-	value = __raw_readl((MSM_CFG_CTL_BASE + MPA5_CFG_CTL_REG));
-	value |= BIT(20);
-	__raw_writel(value,  MSM_CFG_CTL_BASE + MPA5_CFG_CTL_REG);
-	mb();
+		base_ptr = core_reset_base(cpu);
+		if (!base_ptr)
+			return;
 
-	/* set CLK_OFF bit */
-	value = __raw_readl((MSM_CFG_CTL_BASE + MPA5_CFG_CTL_REG));
-	value |= BIT(18);
-	__raw_writel(value,  MSM_CFG_CTL_BASE + MPA5_CFG_CTL_REG);
-	mb();
+		/* bring the core out of reset */
+		__raw_writel(0x3, base_ptr);
+		mb();
 
-	/* set clamps bit */
-	value = __raw_readl((MSM_CFG_CTL_BASE + MPA5_CFG_CTL_REG));
-	value |= BIT(21);
-	__raw_writel(value,  MSM_CFG_CTL_BASE + MPA5_CFG_CTL_REG);
-	mb();
+		/*
+		 * i == 0, Enable TCSR for core
+		 * i == 1, Set reset bit for SPM
+		 * i == 2, Set CLK_OFF bit
+		 * i == 3, Set clamps bit
+		 * i == 4, Set power_up bit
+		 */
+		for (i = 0; i < 5; i++) {
+			value = __raw_readl(MSM_CFG_CTL_BASE +
+							mpa5_cfg_ctl[cpu/2]);
+			value |= BIT(bit_pos[cpu%2][i]);
+			__raw_writel(value,  MSM_CFG_CTL_BASE +
+							mpa5_cfg_ctl[cpu/2]);
+			mb();
+		}
 
-	/* set power_up bit */
-	value = __raw_readl((MSM_CFG_CTL_BASE + MPA5_CFG_CTL_REG));
-	value |= BIT(19);
-	__raw_writel(value,  MSM_CFG_CTL_BASE + MPA5_CFG_CTL_REG);
-	mb();
+		/* i == 5, Disable TCSR for core */
+		value = __raw_readl(MSM_CFG_CTL_BASE +
+						mpa5_cfg_ctl[cpu/2]);
+		value &= ~BIT(bit_pos[cpu%2][i]);
+		__raw_writel(value,  MSM_CFG_CTL_BASE +
+						mpa5_cfg_ctl[cpu/2]);
+		mb();
 
-	/* Disable TSCR for core0 */
-	value = __raw_readl((MSM_CFG_CTL_BASE + MPA5_CFG_CTL_REG));
-	value &= ~BIT(22);
-	__raw_writel(value,  MSM_CFG_CTL_BASE + MPA5_CFG_CTL_REG);
-	mb();
-	__raw_writel(0x0, base_ptr);
-	mb();
+		__raw_writel(0x0, base_ptr);
+		mb();
+	}
 }
 
 /*
@@ -569,7 +574,7 @@
 			/*
 			 * Program the top csr to put the core1 into GDFS.
 			 */
-			configure_top_csr();
+			msm_pm_configure_top_csr();
 		}
 	} else {
 		__raw_writel(0, APPS_PWRDOWN);
@@ -981,17 +986,38 @@
 		/*
 		 * on system reset, default value of MPA5_GDFS_CNT_VAL
 		 * is = 0x0, later modem reprogram this value to
-		 * 0x00030004. Once APPS did a power collapse and
-		 * coming out of it expected value of this register
-		 * always be 0x00030004. Incase if APPS sees the value
-		 * as 0x00030002 consider this case as a modem early
-		 * exit.
+		 * 0x00030004/0x000F0004(8x25Q). Once APPS did
+		 * a power collapse and coming out of it expected value
+		 * of this register always be 0x00030004/0x000F0004(8x25Q).
+		 * Incase if APPS sees the value as 0x00030002/0x000F0002(8x25Q)
+		 * consider this case as a modem early exit.
 		 */
 		val = __raw_readl(MSM_CFG_CTL_BASE + 0x38);
-		if (val != 0x00030002)
-			power_collapsed = 1;
-		else
-			modem_early_exit = 1;
+
+		/* 8x25Q */
+		if (SOCINFO_VERSION_MAJOR(socinfo_get_version()) >= 3) {
+			if (val != 0x000F0002) {
+				power_collapsed = 1;
+				/*
+				 * override DBGNOPOWERDN and program the GDFS
+				 * count val
+				 */
+				 __raw_writel(0x000F0002,
+						 (MSM_CFG_CTL_BASE + 0x38));
+			} else
+				modem_early_exit = 1;
+		} else {
+			if (val != 0x00030002) {
+				power_collapsed = 1;
+				/*
+				 * override DBGNOPOWERDN and program the GDFS
+				 * count val
+				 */
+				 __raw_writel(0x00030002,
+						 (MSM_CFG_CTL_BASE + 0x38));
+			} else
+				modem_early_exit = 1;
+		}
 	}
 
 #ifdef CONFIG_CACHE_L2X0
@@ -1684,12 +1710,16 @@
 
 		/*
 		 * Configure the MPA5_GDFS_CNT_VAL register for
-		 * DBGPWRUPEREQ_OVERRIDE[17:16] = Override the
+		 * DBGPWRUPEREQ_OVERRIDE[19:16] = Override the
 		 * DBGNOPOWERDN for each cpu.
 		 * MPA5_GDFS_CNT_VAL[9:0] = Delay counter for
 		 * GDFS control.
 		 */
-		val = 0x00030002;
+		if (SOCINFO_VERSION_MAJOR(socinfo_get_version()) >= 3)
+			val = 0x000F0002;
+		else
+			val = 0x00030002;
+
 		__raw_writel(val, (MSM_CFG_CTL_BASE + 0x38));
 
 		l2x0_base_addr = MSM_L2CC_BASE;
diff --git a/arch/arm/mach-msm/rpm_stats.c b/arch/arm/mach-msm/rpm_stats.c
index a831bd5..032633c 100644
--- a/arch/arm/mach-msm/rpm_stats.c
+++ b/arch/arm/mach-msm/rpm_stats.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -22,6 +22,7 @@
 #include <linux/slab.h>
 #include <linux/types.h>
 #include <linux/mm.h>
+#include <linux/types.h>
 #include <asm/uaccess.h>
 
 #include <mach/msm_iomap.h>
@@ -44,7 +45,6 @@
 	uint32_t	id;
 	uint32_t	val;
 };
-
 struct msm_rpmstats_private_data{
 	void __iomem *reg_base;
 	u32 num_records;
@@ -53,7 +53,6 @@
 	char buf[128];
 	struct msm_rpmstats_platform_data *platform_data;
 };
-
 static inline unsigned long  msm_rpmstats_read_register(void __iomem *regbase,
 		int index, int offset)
 {
@@ -120,7 +119,6 @@
 			msm_rpmstats_id_labels[record.id],
 			usec);
 }
-
 static int msm_rpmstats_file_read(struct file *file, char __user *bufu,
 				  size_t count, loff_t *ppos)
 {
@@ -133,13 +131,16 @@
 	if (!bufu || count < 0)
 		return -EINVAL;
 
-	if (!prvdata->num_records)
-		prvdata->num_records = readl_relaxed(prvdata->reg_base);
+	if (prvdata->platform_data->version == 1) {
+		if (!prvdata->num_records)
+			prvdata->num_records = readl_relaxed(prvdata->reg_base);
+	}
 
 	if ((*ppos >= prvdata->len)
-			&& (prvdata->read_idx < prvdata->num_records)) {
-		prvdata->len = msm_rpmstats_copy_stats(prvdata);
-		*ppos = 0;
+		&& (prvdata->read_idx < prvdata->num_records)) {
+			if (prvdata->platform_data->version == 1)
+				prvdata->len = msm_rpmstats_copy_stats(prvdata);
+			*ppos = 0;
 	}
 
 	return simple_read_from_buffer(bufu, count, ppos,
@@ -198,15 +199,37 @@
 {
 	struct dentry *dent;
 	struct msm_rpmstats_platform_data *pdata;
+	struct msm_rpmstats_platform_data *pd;
+	struct resource *res = NULL;
 
-	pdata = pdev->dev.platform_data;
-	if (!pdata)
+	if (!pdev)
 		return -EINVAL;
+
+	pdata = kzalloc(sizeof(struct msm_rpmstats_platform_data), GFP_KERNEL);
+
+	if (!pdata)
+		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+	if (!res)
+		return -EINVAL;
+
+	pdata->phys_addr_base  = res->start;
+
+	pdata->phys_size = resource_size(res);
+
+	if (pdev->dev.platform_data) {
+		pd = pdev->dev.platform_data;
+		pdata->version = pd->version ;
+	}
+
 	dent = debugfs_create_file("rpm_stats", S_IRUGO, NULL,
-			pdev->dev.platform_data, &msm_rpmstats_fops);
+			pdata, &msm_rpmstats_fops);
 
 	if (!dent) {
 		pr_err("%s: ERROR debugfs_create_file failed\n", __func__);
+		kfree(pdata);
 		return -ENOMEM;
 	}
 	platform_set_drvdata(pdev, dent);
diff --git a/arch/arm/mach-msm/rpm_stats.h b/arch/arm/mach-msm/rpm_stats.h
index a3beaa4..c1dfe34 100644
--- a/arch/arm/mach-msm/rpm_stats.h
+++ b/arch/arm/mach-msm/rpm_stats.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -19,6 +19,7 @@
 struct msm_rpmstats_platform_data {
 	phys_addr_t phys_addr_base;
 	u32 phys_size;
+	u32 version;
 };
 
 struct msm_rpm_master_stats_platform_data {
diff --git a/arch/arm/mach-msm/spm.c b/arch/arm/mach-msm/spm.c
index 8337fd1..ea0b56c 100644
--- a/arch/arm/mach-msm/spm.c
+++ b/arch/arm/mach-msm/spm.c
@@ -132,6 +132,11 @@
 /******************************************************************************
  * Public functions
  *****************************************************************************/
+/**
+ * msm_spm_set_low_power_mode() - Configure SPM start address for low power mode
+ * @mode: SPM LPM mode to enter
+ * @notify_rpm: Notify RPM in this mode
+ */
 int msm_spm_set_low_power_mode(unsigned int mode, bool notify_rpm)
 {
 	struct msm_spm_device *dev = &__get_cpu_var(msm_spm_devices);
@@ -185,6 +190,11 @@
 	return 0;
 }
 
+/**
+ * msm_spm_set_vdd(): Set core voltage
+ * @cpu: core id
+ * @vlevel: Encoded PMIC data.
+ */
 int msm_spm_set_vdd(unsigned int cpu, unsigned int vlevel)
 {
 	struct msm_spm_device *dev;
@@ -235,6 +245,11 @@
 	return -EIO;
 }
 
+/**
+ * msm_spm_get_vdd(): Get core voltage
+ * @cpu: core id
+ * @return: Returns encoded PMIC data.
+ */
 unsigned int msm_spm_get_vdd(unsigned int cpu)
 {
 	struct msm_spm_device *dev = &per_cpu(msm_spm_devices, cpu);
@@ -253,6 +268,11 @@
 	mb();
 }
 
+/**
+ * msm_spm_init(): Board initalization function
+ * @data: platform specific SPM register configuration data
+ * @nr_devs: Number of SPM devices being initialized
+ */
 int __init msm_spm_init(struct msm_spm_platform_data *data, int nr_devs)
 {
 	unsigned int cpu;
diff --git a/arch/arm/mach-msm/spm.h b/arch/arm/mach-msm/spm.h
index 4cdfcf8..a353ce0 100644
--- a/arch/arm/mach-msm/spm.h
+++ b/arch/arm/mach-msm/spm.h
@@ -127,93 +127,29 @@
 
 /* Public functions */
 
-/**
- * msm_spm_set_low_power_mode() - Configure SPM start address for low power mode
- * @mode: SPM LPM mode to enter
- * @notify_rpm: Notify RPM in this mode
- */
 int msm_spm_set_low_power_mode(unsigned int mode, bool notify_rpm);
-
-/**
- * msm_spm_set_vdd(): Set core voltage
- * @cpu: core id
- * @vlevel: Encoded PMIC data.
- */
 int msm_spm_set_vdd(unsigned int cpu, unsigned int vlevel);
-
-/**
- * msm_spm_get_vdd(): Get core voltage
- * @cpu: core id
- * @return: Returns encoded PMIC data.
- */
 unsigned int msm_spm_get_vdd(unsigned int cpu);
-
-/**
- * msm_spm_turn_on_cpu_rail(): Power on cpu rail before turning on core
- * @cpu: core id
- */
 int msm_spm_turn_on_cpu_rail(unsigned int cpu);
 
 /* Internal low power management specific functions */
 
-/**
- * msm_spm_reinit(): Reinitialize SPM registers
- */
 void msm_spm_reinit(void);
-
-/**
- * msm_spm_init(): Board initalization function
- * @data: platform specific SPM register configuration data
- * @nr_devs: Number of SPM devices being initialized
- */
 int msm_spm_init(struct msm_spm_platform_data *data, int nr_devs);
-
-/**
- * msm_spm_device_init(): Device tree initialization function
- */
 int msm_spm_device_init(void);
 
 #if defined(CONFIG_MSM_L2_SPM)
 
 /* Public functions */
 
-/**
- * msm_spm_l2_set_low_power_mode(): Configure L2 SPM start address
- *                                  for low power mode
- * @mode: SPM LPM mode to enter
- * @notify_rpm: Notify RPM in this mode
- */
 int msm_spm_l2_set_low_power_mode(unsigned int mode, bool notify_rpm);
-
-/**
- * msm_spm_apcs_set_vdd(): Set Apps processor core sub-system voltage
- * @vlevel: Encoded PMIC data.
- */
 int msm_spm_apcs_set_vdd(unsigned int vlevel);
-
-/**
- * msm_spm_apcs_set_phase(): Set number of SMPS phases.
- * phase_cnt: Number of phases to be set active
- */
 int msm_spm_apcs_set_phase(unsigned int phase_cnt);
-
-/** msm_spm_enable_fts_lpm() : Enable FTS to switch to low power
- *                             when the cores are in low power modes
- * @mode: The mode configuration for FTS
- */
 int msm_spm_enable_fts_lpm(uint32_t mode);
 
 /* Internal low power management specific functions */
 
-/**
- * msm_spm_l2_init(): Board initialization function
- * @data: SPM target specific register configuration
- */
 int msm_spm_l2_init(struct msm_spm_platform_data *data);
-
-/**
- * msm_spm_l2_reinit(): Reinitialize L2 SPM registers
- */
 void msm_spm_l2_reinit(void);
 
 #else
diff --git a/arch/arm/mach-msm/spm_devices.c b/arch/arm/mach-msm/spm_devices.c
index 3fe3bd7..b378d3b 100644
--- a/arch/arm/mach-msm/spm_devices.c
+++ b/arch/arm/mach-msm/spm_devices.c
@@ -57,6 +57,11 @@
 	info->err = msm_spm_drv_set_vdd(&dev->reg_data, info->vlevel);
 }
 
+/**
+ * msm_spm_set_vdd(): Set core voltage
+ * @cpu: core id
+ * @vlevel: Encoded PMIC data.
+ */
 int msm_spm_set_vdd(unsigned int cpu, unsigned int vlevel)
 {
 	struct msm_spm_vdd_info info;
@@ -91,6 +96,11 @@
 }
 EXPORT_SYMBOL(msm_spm_set_vdd);
 
+/**
+ * msm_spm_get_vdd(): Get core voltage
+ * @cpu: core id
+ * @return: Returns encoded PMIC data.
+ */
 unsigned int msm_spm_get_vdd(unsigned int cpu)
 {
 	struct msm_spm_device *dev;
@@ -166,6 +176,10 @@
 	return ret;
 }
 
+/**
+ * msm_spm_turn_on_cpu_rail(): Power on cpu rail before turning on core
+ * @cpu: core id
+ */
 int msm_spm_turn_on_cpu_rail(unsigned int cpu)
 {
 	uint32_t val = 0;
@@ -208,6 +222,11 @@
 }
 EXPORT_SYMBOL(msm_spm_reinit);
 
+/**
+ * msm_spm_set_low_power_mode() - Configure SPM start address for low power mode
+ * @mode: SPM LPM mode to enter
+ * @notify_rpm: Notify RPM in this mode
+ */
 int msm_spm_set_low_power_mode(unsigned int mode, bool notify_rpm)
 {
 	struct msm_spm_device *dev = &__get_cpu_var(msm_cpu_spm_device);
@@ -215,7 +234,11 @@
 }
 EXPORT_SYMBOL(msm_spm_set_low_power_mode);
 
-/* Board file init function */
+/**
+ * msm_spm_init(): Board initalization function
+ * @data: platform specific SPM register configuration data
+ * @nr_devs: Number of SPM devices being initialized
+ */
 int __init msm_spm_init(struct msm_spm_platform_data *data, int nr_devs)
 {
 	unsigned int cpu;
@@ -238,6 +261,12 @@
 
 #ifdef CONFIG_MSM_L2_SPM
 
+/**
+ * msm_spm_l2_set_low_power_mode(): Configure L2 SPM start address
+ *                                  for low power mode
+ * @mode: SPM LPM mode to enter
+ * @notify_rpm: Notify RPM in this mode
+ */
 int msm_spm_l2_set_low_power_mode(unsigned int mode, bool notify_rpm)
 {
 	return msm_spm_dev_set_low_power_mode(
@@ -251,12 +280,20 @@
 }
 EXPORT_SYMBOL(msm_spm_l2_reinit);
 
+/**
+ * msm_spm_apcs_set_vdd(): Set Apps processor core sub-system voltage
+ * @vlevel: Encoded PMIC data.
+ */
 int msm_spm_apcs_set_vdd(unsigned int vlevel)
 {
 	return msm_spm_drv_set_vdd(&msm_spm_l2_device.reg_data, vlevel);
 }
 EXPORT_SYMBOL(msm_spm_apcs_set_vdd);
 
+/**
+ * msm_spm_apcs_set_phase(): Set number of SMPS phases.
+ * phase_cnt: Number of phases to be set active
+ */
 int msm_spm_apcs_set_phase(unsigned int phase_cnt)
 {
 	return msm_spm_drv_set_pmic_data(&msm_spm_l2_device.reg_data,
@@ -264,6 +301,10 @@
 }
 EXPORT_SYMBOL(msm_spm_apcs_set_phase);
 
+/** msm_spm_enable_fts_lpm() : Enable FTS to switch to low power
+ *                             when the cores are in low power modes
+ * @mode: The mode configuration for FTS
+ */
 int msm_spm_enable_fts_lpm(uint32_t mode)
 {
 	return msm_spm_drv_set_pmic_data(&msm_spm_l2_device.reg_data,
@@ -271,7 +312,10 @@
 }
 EXPORT_SYMBOL(msm_spm_enable_fts_lpm);
 
-/* Board file init function */
+/**
+ * msm_spm_l2_init(): Board initialization function
+ * @data: SPM target specific register configuration
+ */
 int __init msm_spm_l2_init(struct msm_spm_platform_data *data)
 {
 	return msm_spm_dev_init(&msm_spm_l2_device, data);
@@ -453,6 +497,9 @@
 	},
 };
 
+/**
+ * msm_spm_device_init(): Device tree initialization function
+ */
 int __init msm_spm_device_init(void)
 {
 	return platform_driver_register(&msm_spm_device_driver);
diff --git a/drivers/gpu/ion/ion_cp_heap.c b/drivers/gpu/ion/ion_cp_heap.c
index f9a9212..aa3469c 100644
--- a/drivers/gpu/ion/ion_cp_heap.c
+++ b/drivers/gpu/ion/ion_cp_heap.c
@@ -399,14 +399,16 @@
 		return ION_CP_ALLOCATE_FAIL;
 	}
 
-	if (secure_allocation &&
-	    (cp_heap->umap_count > 0 || cp_heap->kmap_cached_count > 0)) {
-		mutex_unlock(&cp_heap->lock);
-		pr_err("ION cannot allocate secure memory from heap with "
-			"outstanding mappings: User space: %lu, kernel space "
-			"(cached): %lu\n", cp_heap->umap_count,
-					   cp_heap->kmap_cached_count);
-		return ION_CP_ALLOCATE_FAIL;
+	/*
+	 * The check above already checked for non-secure allocations when the
+	 * heap is protected. HEAP_PROTECTED implies that this must be a secure
+	 * allocation. If the heap is protected and there are userspace or
+	 * cached kernel mappings, something has gone wrong in the security
+	 * model.
+	 */
+	if (cp_heap->heap_protected == HEAP_PROTECTED) {
+		BUG_ON(cp_heap->umap_count != 0);
+		BUG_ON(cp_heap->kmap_cached_count != 0);
 	}
 
 	/*
@@ -569,18 +571,11 @@
 	if (!table)
 		return ERR_PTR(-ENOMEM);
 
-	if (buf->is_secure) {
+	if (buf->is_secure && IS_ALIGNED(buffer->size, SZ_1M)) {
 		int n_chunks;
 		int i;
 		struct scatterlist *sg;
 
-		if (!IS_ALIGNED(buffer->size, SZ_1M)) {
-			pr_err("%s: buffer is marked as secure but buffer size %x is not aligned to 1MB\n",
-				__func__, buffer->size);
-
-			return ERR_PTR(-EINVAL);
-		}
-
 		/* Count number of 1MB chunks. Alignment is already checked. */
 		n_chunks = buffer->size >> 20;
 
diff --git a/drivers/gpu/msm/kgsl_sharedmem.c b/drivers/gpu/msm/kgsl_sharedmem.c
index a70647a..be51c11 100644
--- a/drivers/gpu/msm/kgsl_sharedmem.c
+++ b/drivers/gpu/msm/kgsl_sharedmem.c
@@ -541,27 +541,8 @@
 	struct page **pages = NULL;
 	pgprot_t page_prot = pgprot_writecombine(PAGE_KERNEL);
 	void *ptr;
-	struct sysinfo si;
 	unsigned int align;
 
-	/*
-	 * Get the current memory information to be used in deciding if we
-	 * should go ahead with this allocation
-	 */
-
-	si_meminfo(&si);
-
-	/*
-	 * Limit the size of the allocation to the amount of free memory minus
-	 * 32MB. Why 32MB?  Because thats the buffer that page_alloc uses and
-	 * it just seems like a reasonable limit that won't make the OOM killer
-	 * go all serial on us.  Of course, if we are down this low all bets
-	 * are off but above all do no harm.
-	 */
-
-	if (size >= ((si.freeram << PAGE_SHIFT) - SZ_32M))
-		return -ENOMEM;
-
 	align = (memdesc->flags & KGSL_MEMALIGN_MASK) >> KGSL_MEMALIGN_SHIFT;
 
 	page_size = (align >= ilog2(SZ_64K) && size >= SZ_64K)
@@ -623,7 +604,7 @@
 	while (len > 0) {
 		struct page *page;
 		unsigned int gfp_mask = GFP_KERNEL | __GFP_HIGHMEM |
-			__GFP_NOWARN;
+			__GFP_NOWARN | __GFP_NORETRY;
 		int j;
 
 		/* don't waste space at the end of the allocation*/
@@ -640,6 +621,13 @@
 				page_size = PAGE_SIZE;
 				continue;
 			}
+
+			KGSL_CORE_ERR(
+				"Out of memory: only allocated %dKB of %dKB requested\n",
+				(size - len) >> 10, size >> 10);
+
+			ret = -ENOMEM;
+			goto done;
 		}
 
 		for (j = 0; j < page_size >> PAGE_SHIFT; j++)
diff --git a/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_tspp_v1.c b/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_tspp_v1.c
index 191a0c4..9fb6004 100644
--- a/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_tspp_v1.c
+++ b/drivers/media/dvb/mpq/demux/mpq_dmx_plugin_tspp_v1.c
@@ -18,15 +18,15 @@
 #include "mpq_dmx_plugin_common.h"
 
 
-#define TSIF_COUNT					2
+#define TSIF_COUNT			2
 
 #define TSPP_MAX_PID_FILTER_NUM		16
 
 /* Max number of section filters */
-#define TSPP_MAX_SECTION_FILTER_NUM		64
+#define TSPP_MAX_SECTION_FILTER_NUM	64
 
 /* For each TSIF we allocate two pipes, one for PES and one for sections */
-#define TSPP_PES_CHANNEL				0
+#define TSPP_PES_CHANNEL			0
 #define TSPP_SECTION_CHANNEL			1
 
 /* the channel_id set to TSPP driver based on TSIF number and channel type */
@@ -35,7 +35,7 @@
 #define TSPP_GET_TSIF_NUM(ch_id)		(ch_id >> 1)
 
 /* mask that set to care for all bits in pid filter */
-#define TSPP_PID_MASK					0x1FFF
+#define TSPP_PID_MASK				0x1FFF
 
 /* dvb-demux defines pid 0x2000 as full capture pid */
 #define TSPP_PASS_THROUGH_PID			0x2000
@@ -53,6 +53,7 @@
  * Meanning about 82 notifications per second.
  */
 #define MAX_BAM_DESCRIPTOR_SIZE		(32*1024 - 1)
+
 #define TSPP_BUFFER_SIZE			\
 	((MAX_BAM_DESCRIPTOR_SIZE / TSPP_RAW_TTS_SIZE) * TSPP_RAW_TTS_SIZE)
 
@@ -65,11 +66,18 @@
 /* Channel timeout in msec */
 #define TSPP_CHANNEL_TIMEOUT			16
 
+enum mem_buffer_allocation_mode {
+	MPQ_DMX_TSPP_INTERNAL_ALLOC = 0,
+	MPQ_DMX_TSPP_CONTIGUOUS_PHYS_ALLOC = 1
+};
+
 /* module parameters for load time configuration */
 static int clock_inv;
 static int tsif_mode = 2;
+static int allocation_mode = MPQ_DMX_TSPP_INTERNAL_ALLOC;
 module_param(tsif_mode, int, S_IRUGO);
 module_param(clock_inv, int, S_IRUGO);
+module_param(allocation_mode, int, S_IRUGO);
 
 /*
  * Work scheduled each time TSPP notifies dmx
@@ -97,6 +105,15 @@
 		/* work used to submit to workqueue to process pes channel */
 		struct tspp_work pes_work;
 
+		/* ION handle used for TSPP data buffer allocation */
+		struct ion_handle *pes_mem_heap_handle;
+		/* TSPP data buffer heap virtual base address */
+		void *pes_mem_heap_virt_base;
+		/* TSPP data buffer heap physical base address */
+		ion_phys_addr_t pes_mem_heap_phys_base;
+		/* buffer allocation index */
+		int pes_index;
+
 		/*
 		 * TSPP pipe holding all TS packets with section data.
 		 * The following is reference count for number of feeds
@@ -107,6 +124,15 @@
 		/* work used to submit to workqueue to process pes channel */
 		struct tspp_work section_work;
 
+		/* ION handle used for TSPP data buffer allocation */
+		struct ion_handle *section_mem_heap_handle;
+		/* TSPP data buffer heap virtual base address */
+		void *section_mem_heap_virt_base;
+		/* TSPP data buffer heap physical base address */
+		ion_phys_addr_t section_mem_heap_phys_base;
+		/* buffer allocation index */
+		int section_index;
+
 		/*
 		 * Holds PIDs of allocated TSPP filters along with
 		 * how many feeds are opened on same PID.
@@ -128,8 +154,65 @@
 		/* mutex protecting the data-structure */
 		struct mutex mutex;
 	} tsif[TSIF_COUNT];
+
+	/* ION client used for TSPP data buffer allocation */
+	struct ion_client *ion_client;
 } mpq_dmx_tspp_info;
 
+static void *tspp_mem_allocator(int channel_id, u32 size,
+				u32 *phys_base, void *user)
+{
+	void *virt_addr = NULL;
+	int i = TSPP_GET_TSIF_NUM(channel_id);
+
+	if (TSPP_IS_PES_CHANNEL(channel_id)) {
+		if (mpq_dmx_tspp_info.tsif[i].pes_index == TSPP_BUFFER_COUNT)
+			return NULL;
+		virt_addr =
+			(mpq_dmx_tspp_info.tsif[i].pes_mem_heap_virt_base +
+			(mpq_dmx_tspp_info.tsif[i].pes_index * size));
+		*phys_base =
+			(mpq_dmx_tspp_info.tsif[i].pes_mem_heap_phys_base +
+			(mpq_dmx_tspp_info.tsif[i].pes_index * size));
+		mpq_dmx_tspp_info.tsif[i].pes_index++;
+	} else {
+		if (mpq_dmx_tspp_info.tsif[i].section_index ==
+						TSPP_BUFFER_COUNT)
+			return NULL;
+		virt_addr =
+			(mpq_dmx_tspp_info.tsif[i].section_mem_heap_virt_base +
+			(mpq_dmx_tspp_info.tsif[i].section_index * size));
+		*phys_base =
+			(mpq_dmx_tspp_info.tsif[i].section_mem_heap_phys_base +
+			(mpq_dmx_tspp_info.tsif[i].section_index * size));
+		mpq_dmx_tspp_info.tsif[i].section_index++;
+	}
+
+	return virt_addr;
+}
+
+static void tspp_mem_free(int channel_id, u32 size,
+			void *virt_base, u32 phys_base, void *user)
+{
+	int i = TSPP_GET_TSIF_NUM(channel_id);
+
+	/*
+	 * actual buffer heap free is done in mpq_dmx_tspp_plugin_exit().
+	 * we update index here, so if this function is called repetitively
+	 * for all the buffers, then afterwards tspp_mem_allocator()
+	 * can be called again.
+	 * Note: it would be incorrect to call tspp_mem_allocator()
+	 * a few times, then call tspp_mem_free(), then call
+	 * tspp_mem_allocator() again.
+	 */
+	if (TSPP_IS_PES_CHANNEL(channel_id)) {
+		if (mpq_dmx_tspp_info.tsif[i].pes_index > 0)
+			mpq_dmx_tspp_info.tsif[i].pes_index--;
+	} else {
+		if (mpq_dmx_tspp_info.tsif[i].section_index > 0)
+			mpq_dmx_tspp_info.tsif[i].section_index--;
+	}
+}
 
 /**
  * Returns a free filter slot that can be used.
@@ -374,20 +457,34 @@
 					   (void *)tsif,
 					   TSPP_CHANNEL_TIMEOUT);
 
-		/* TODO: register allocater and provide allocation function
-		 * that allocate from continous memory so that we can have
+		/* register allocater and provide allocation function
+		 * that allocates from continous memory so that we can have
 		 * big notification size, smallest descriptor, and still provide
 		 * TZ with single big buffer based on notification size.
 		 */
 
-		/* set buffer/descriptor size and count */
-		ret = tspp_allocate_buffers(0,
-					    channel_id,
-					    TSPP_BUFFER_COUNT,
-					    TSPP_BUFFER_SIZE,
-					    TSPP_NOTIFICATION_SIZE,
-					    NULL,
-					    NULL);
+		/* set buffer/descriptor size and count,
+		 * allocate TSPP data buffers
+		 */
+		if (allocation_mode == MPQ_DMX_TSPP_CONTIGUOUS_PHYS_ALLOC) {
+			ret = tspp_allocate_buffers(0,
+						    channel_id,
+						    TSPP_BUFFER_COUNT,
+						    TSPP_BUFFER_SIZE,
+						    TSPP_NOTIFICATION_SIZE,
+						    tspp_mem_allocator,
+						    tspp_mem_free,
+						    NULL);
+		} else {
+			ret = tspp_allocate_buffers(0,
+						    channel_id,
+						    TSPP_BUFFER_COUNT,
+						    TSPP_BUFFER_SIZE,
+						    TSPP_NOTIFICATION_SIZE,
+						    NULL,
+						    NULL,
+						    NULL);
+		}
 		if (ret < 0) {
 			MPQ_DVB_ERR_PRINT(
 				"%s: tspp_allocate_buffers(%d) failed (%d)\n",
@@ -745,20 +842,152 @@
 	return 0;
 }
 
+static void mpq_dmx_tsif_ion_cleanup(int i)
+{
+	mpq_dmx_tspp_info.tsif[i].pes_mem_heap_phys_base = 0;
+	mpq_dmx_tspp_info.tsif[i].section_mem_heap_phys_base = 0;
+
+	if (!IS_ERR_OR_NULL(mpq_dmx_tspp_info.tsif[i].pes_mem_heap_handle)) {
+		if (!IS_ERR_OR_NULL(mpq_dmx_tspp_info.tsif[i].
+				pes_mem_heap_virt_base))
+			ion_unmap_kernel(mpq_dmx_tspp_info.ion_client,
+				mpq_dmx_tspp_info.tsif[i].pes_mem_heap_handle);
+
+		ion_free(mpq_dmx_tspp_info.ion_client,
+			mpq_dmx_tspp_info.tsif[i].pes_mem_heap_handle);
+	}
+
+	if (!IS_ERR_OR_NULL(mpq_dmx_tspp_info.tsif[i].
+			section_mem_heap_handle)) {
+		if (!IS_ERR_OR_NULL(mpq_dmx_tspp_info.tsif[i].
+					section_mem_heap_virt_base))
+			ion_unmap_kernel(mpq_dmx_tspp_info.ion_client,
+					mpq_dmx_tspp_info.tsif[i].
+						section_mem_heap_handle);
+
+		ion_free(mpq_dmx_tspp_info.ion_client,
+			mpq_dmx_tspp_info.tsif[i].section_mem_heap_handle);
+	}
+
+	mpq_dmx_tspp_info.tsif[i].pes_mem_heap_virt_base = NULL;
+	mpq_dmx_tspp_info.tsif[i].section_mem_heap_virt_base = NULL;
+	mpq_dmx_tspp_info.tsif[i].pes_mem_heap_handle = NULL;
+	mpq_dmx_tspp_info.tsif[i].section_mem_heap_handle = NULL;
+}
+
+static void mpq_dmx_tspp_ion_cleanup(void)
+{
+	int i;
+
+	for (i = 0; i < TSIF_COUNT; i++)
+		mpq_dmx_tsif_ion_cleanup(i);
+}
+
 static int mpq_tspp_dmx_init(
 			struct dvb_adapter *mpq_adapter,
 			struct mpq_demux *mpq_demux)
 {
-	int result;
+	int i, result;
+	size_t len;
 
 	MPQ_DVB_DBG_PRINT("%s executed\n", __func__);
 
+	if (allocation_mode == MPQ_DMX_TSPP_CONTIGUOUS_PHYS_ALLOC) {
+		/*
+		 * Save ION client, used to allocate memory
+		 * for TSPP's buffers.
+		 */
+		mpq_dmx_tspp_info.ion_client = mpq_demux->ion_client;
+
+		if (IS_ERR_OR_NULL(mpq_dmx_tspp_info.ion_client))
+			return -EINVAL;
+
+		for (i = 0; i < TSIF_COUNT; i++) {
+			mpq_dmx_tspp_info.tsif[i].pes_mem_heap_handle =
+				ion_alloc(mpq_dmx_tspp_info.ion_client,
+					(TSPP_BUFFER_COUNT * TSPP_BUFFER_SIZE),
+					TSPP_RAW_TTS_SIZE,
+					ION_HEAP(ION_CP_MM_HEAP_ID),
+					0); /* non-cached */
+			if (IS_ERR_OR_NULL(mpq_dmx_tspp_info.tsif[i].
+						pes_mem_heap_handle)) {
+				MPQ_DVB_ERR_PRINT("%s: ion_alloc() failed\n",
+						__func__);
+				mpq_dmx_tspp_ion_cleanup();
+				return -ENOMEM;
+			}
+			/* save virtual base address of heap */
+			mpq_dmx_tspp_info.tsif[i].pes_mem_heap_virt_base =
+				ion_map_kernel(mpq_dmx_tspp_info.ion_client,
+					mpq_dmx_tspp_info.tsif[i].
+						pes_mem_heap_handle);
+			if (IS_ERR_OR_NULL(mpq_dmx_tspp_info.tsif[i].
+						pes_mem_heap_virt_base)) {
+				MPQ_DVB_ERR_PRINT(
+					"%s: ion_map_kernel() failed\n",
+					__func__);
+				mpq_dmx_tspp_ion_cleanup();
+				return -ENOMEM;
+			}
+			/* save physical base address of heap */
+			result = ion_phys(mpq_dmx_tspp_info.ion_client,
+				mpq_dmx_tspp_info.tsif[i].pes_mem_heap_handle,
+				&(mpq_dmx_tspp_info.tsif[i].
+					pes_mem_heap_phys_base), &len);
+			if (result < 0) {
+				MPQ_DVB_ERR_PRINT("%s: ion_phys() failed\n",
+						__func__);
+				mpq_dmx_tspp_ion_cleanup();
+				return -ENOMEM;
+			}
+
+			mpq_dmx_tspp_info.tsif[i].section_mem_heap_handle =
+				ion_alloc(mpq_dmx_tspp_info.ion_client,
+					(TSPP_BUFFER_COUNT * TSPP_BUFFER_SIZE),
+					TSPP_RAW_TTS_SIZE,
+					ION_HEAP(ION_CP_MM_HEAP_ID),
+					0); /* non-cached */
+			if (IS_ERR_OR_NULL(mpq_dmx_tspp_info.tsif[i].
+						section_mem_heap_handle)) {
+				MPQ_DVB_ERR_PRINT("%s: ion_alloc() failed\n",
+						__func__);
+				mpq_dmx_tspp_ion_cleanup();
+				return -ENOMEM;
+			}
+			/* save virtual base address of heap */
+			mpq_dmx_tspp_info.tsif[i].section_mem_heap_virt_base =
+				ion_map_kernel(mpq_dmx_tspp_info.ion_client,
+				mpq_dmx_tspp_info.tsif[i].
+					section_mem_heap_handle);
+			if (IS_ERR_OR_NULL(mpq_dmx_tspp_info.tsif[i].
+					section_mem_heap_virt_base)) {
+				MPQ_DVB_ERR_PRINT(
+					"%s: ion_map_kernel() failed\n",
+					__func__);
+				mpq_dmx_tspp_ion_cleanup();
+				return -ENOMEM;
+			}
+			/* save physical base address of heap */
+			result = ion_phys(mpq_dmx_tspp_info.ion_client,
+				mpq_dmx_tspp_info.tsif[i].
+					section_mem_heap_handle,
+				&(mpq_dmx_tspp_info.tsif[i].
+					section_mem_heap_phys_base), &len);
+			if (result < 0) {
+				MPQ_DVB_ERR_PRINT("%s: ion_phys() failed\n",
+						__func__);
+				mpq_dmx_tspp_ion_cleanup();
+				return -ENOMEM;
+			}
+		}
+	}
+
 	/* Set the kernel-demux object capabilities */
 	mpq_demux->demux.dmx.capabilities =
 		DMX_TS_FILTERING			|
 		DMX_PES_FILTERING			|
-		DMX_SECTION_FILTERING		|
-		DMX_MEMORY_BASED_FILTERING	|
+		DMX_SECTION_FILTERING			|
+		DMX_MEMORY_BASED_FILTERING		|
 		DMX_CRC_CHECKING			|
 		DMX_TS_DESCRAMBLING;
 
@@ -818,6 +1047,7 @@
 init_failed_dmx_release:
 	dvb_dmx_release(&mpq_demux->demux);
 init_failed:
+	mpq_dmx_tspp_ion_cleanup();
 	return result;
 }
 
@@ -831,6 +1061,10 @@
 
 	for (i = 0; i < TSIF_COUNT; i++) {
 		mpq_dmx_tspp_info.tsif[i].pes_channel_ref = 0;
+		mpq_dmx_tspp_info.tsif[i].pes_index = 0;
+		mpq_dmx_tspp_info.tsif[i].pes_mem_heap_handle = NULL;
+		mpq_dmx_tspp_info.tsif[i].pes_mem_heap_virt_base = NULL;
+		mpq_dmx_tspp_info.tsif[i].pes_mem_heap_phys_base = 0;
 
 		mpq_dmx_tspp_info.tsif[i].pes_work.channel_id =
 			TSPP_CHANNEL_ID(i, TSPP_PES_CHANNEL);
@@ -839,6 +1073,10 @@
 				  mpq_dmx_tspp_work);
 
 		mpq_dmx_tspp_info.tsif[i].section_channel_ref = 0;
+		mpq_dmx_tspp_info.tsif[i].section_index = 0;
+		mpq_dmx_tspp_info.tsif[i].section_mem_heap_handle = NULL;
+		mpq_dmx_tspp_info.tsif[i].section_mem_heap_virt_base = NULL;
+		mpq_dmx_tspp_info.tsif[i].section_mem_heap_phys_base = 0;
 
 		mpq_dmx_tspp_info.tsif[i].section_work.channel_id =
 			TSPP_CHANNEL_ID(i, TSPP_SECTION_CHANNEL);
@@ -861,14 +1099,12 @@
 				mpq_dmx_tspp_info.tsif[i].name);
 
 		if (mpq_dmx_tspp_info.tsif[i].workqueue == NULL) {
-
 			for (j = 0; j < i; j++) {
 				destroy_workqueue(
 					mpq_dmx_tspp_info.tsif[j].workqueue);
 
 				mutex_destroy(&mpq_dmx_tspp_info.tsif[j].mutex);
 			}
-
 			MPQ_DVB_ERR_PRINT(
 				"%s: create_singlethread_workqueue failed\n",
 				__func__);
@@ -905,6 +1141,11 @@
 	for (i = 0; i < TSIF_COUNT; i++) {
 		mutex_lock(&mpq_dmx_tspp_info.tsif[i].mutex);
 
+		/*
+		 * Note: tspp_close_channel will also free the TSPP buffers
+		 * even if we allocated them ourselves,
+		 * using our free function.
+		 */
 		if (mpq_dmx_tspp_info.tsif[i].pes_channel_ref) {
 			tspp_unregister_notification(0, TSPP_PES_CHANNEL);
 			tspp_close_channel(0,
@@ -917,9 +1158,11 @@
 				TSPP_CHANNEL_ID(i, TSPP_SECTION_CHANNEL));
 		}
 
-		/* TODO: if we allocate buffer
-		 * to TSPP ourself, need to free those as well
+		/* if we allocated buffer pools
+		 * to TSPP, need to free those as well
 		 */
+		if (allocation_mode == MPQ_DMX_TSPP_CONTIGUOUS_PHYS_ALLOC)
+			mpq_dmx_tsif_ion_cleanup(i);
 
 		mutex_unlock(&mpq_dmx_tspp_info.tsif[i].mutex);
 		flush_workqueue(mpq_dmx_tspp_info.tsif[i].workqueue);
diff --git a/drivers/media/video/msm_vidc/msm_vidc_common.c b/drivers/media/video/msm_vidc/msm_vidc_common.c
index 16f7e2c..772f0de 100644
--- a/drivers/media/video/msm_vidc/msm_vidc_common.c
+++ b/drivers/media/video/msm_vidc/msm_vidc_common.c
@@ -520,6 +520,71 @@
 	}
 }
 
+static void handle_session_error(enum command_response cmd, void *data)
+{
+	struct msm_vidc_cb_cmd_done *response = data;
+	struct msm_vidc_inst *inst = NULL;
+	struct v4l2_event dqevent;
+	if (response) {
+		inst = (struct msm_vidc_inst *)response->session_id;
+		if (inst) {
+			dprintk(VIDC_WARN,
+				"Session error receivd for session %p\n", inst);
+			mutex_lock(&inst->sync_lock);
+			inst->state = MSM_VIDC_CORE_INVALID;
+			mutex_unlock(&inst->sync_lock);
+			dqevent.type = V4L2_EVENT_MSM_VIDC_SYS_ERROR;
+			dqevent.id = 0;
+			v4l2_event_queue_fh(&inst->event_handler, &dqevent);
+			wake_up(&inst->kernel_event_queue);
+		}
+	} else {
+		dprintk(VIDC_ERR,
+			"Failed to get valid response for session error\n");
+	}
+}
+static void handle_sys_error(enum command_response cmd, void *data)
+{
+	struct msm_vidc_cb_cmd_done *response = data;
+	struct msm_vidc_inst *inst = NULL ;
+	struct msm_vidc_core *core = NULL;
+	struct v4l2_event dqevent;
+	unsigned long flags;
+	if (response) {
+		inst = (struct msm_vidc_inst *)response->session_id;
+		dprintk(VIDC_WARN,
+				"Sys error received for session %p\n", inst);
+		if (inst) {
+			core = inst->core;
+			if (core) {
+				spin_lock_irqsave(&core->lock, flags);
+				core->state = VIDC_CORE_INVALID;
+				spin_unlock_irqrestore(&core->lock, flags);
+				dqevent.type = V4L2_EVENT_MSM_VIDC_SYS_ERROR;
+				dqevent.id = 0;
+				list_for_each_entry(inst, &core->instances,
+					list) {
+					if (inst) {
+						v4l2_event_queue_fh(
+							&inst->event_handler,
+								&dqevent);
+						spin_lock_irqsave(&inst->lock,
+							flags);
+						inst->state =
+							MSM_VIDC_CORE_INVALID;
+						spin_unlock_irqrestore(
+							&inst->lock, flags);
+					}
+				}
+			wake_up(&inst->kernel_event_queue);
+			}
+		}
+	} else {
+		dprintk(VIDC_ERR,
+			"Failed to get valid response for sys error\n");
+	}
+}
+
 static void handle_sys_watchdog_timeout(enum command_response cmd, void *data)
 {
 	subsystem_restart("msm_vidc");
@@ -766,6 +831,12 @@
 	case SYS_WATCHDOG_TIMEOUT:
 		handle_sys_watchdog_timeout(cmd, data);
 		break;
+	case SYS_ERROR:
+		handle_sys_error(cmd, data);
+		break;
+	case SESSION_ERROR:
+		handle_session_error(cmd, data);
+		break;
 	default:
 		dprintk(VIDC_ERR, "response unhandled\n");
 		break;
diff --git a/drivers/media/video/msm_vidc/vidc_hal.c b/drivers/media/video/msm_vidc/vidc_hal.c
index 190e132..55f9a07 100644
--- a/drivers/media/video/msm_vidc/vidc_hal.c
+++ b/drivers/media/video/msm_vidc/vidc_hal.c
@@ -613,11 +613,9 @@
 	write_register(device->hal_data->register_base_addr,
 				   VIDC_VENUS_VBIF_CLK_ON, 1, 0);
 	write_register(device->hal_data->register_base_addr,
-			VIDC_VBIF_OUT_AXI_AOOO_EN, 0x00000FFF, 0);
+			VIDC_VBIF_OUT_AXI_AOOO_EN, 0x00001FFF, 0);
 	write_register(device->hal_data->register_base_addr,
-			VIDC_VBIF_OUT_AXI_AOOO, 0x0FFF0FFF, 0);
-	write_register(device->hal_data->register_base_addr,
-			VIDC_VENUS_VBIF_CLK_ON, 1, 0);
+			VIDC_VBIF_OUT_AXI_AOOO, 0x1FFF1FFF, 0);
 	write_register(device->hal_data->register_base_addr,
 			VIDC_VBIF_IN_RD_LIM_CONF0, 0x10101001, 0);
 	write_register(device->hal_data->register_base_addr,
@@ -641,7 +639,15 @@
 	write_register(device->hal_data->register_base_addr,
 			VIDC_VBIF_ARB_CTL, 0x00000030, 0);
 	write_register(device->hal_data->register_base_addr,
+			VIDC_VENUS_VBIF_DDR_OUT_MAX_BURST, 0x00000707, 0);
+	write_register(device->hal_data->register_base_addr,
+			VIDC_VENUS_VBIF_OCMEM_OUT_MAX_BURST, 0x00000707, 0);
+	write_register(device->hal_data->register_base_addr,
+			VIDC_VENUS_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000001, 0);
+	write_register(device->hal_data->register_base_addr,
 			VIDC_VENUS0_WRAPPER_VBIF_REQ_PRIORITY, 0x5555556, 0);
+	write_register(device->hal_data->register_base_addr,
+			VIDC_VENUS0_WRAPPER_VBIF_PRIORITY_LEVEL, 0, 0);
 }
 
 static int vidc_hal_sys_set_debug(struct hal_device *device, int debug)
diff --git a/drivers/media/video/msm_vidc/vidc_hal_api.h b/drivers/media/video/msm_vidc/vidc_hal_api.h
index d3fa1d0..8aff5af 100644
--- a/drivers/media/video/msm_vidc/vidc_hal_api.h
+++ b/drivers/media/video/msm_vidc/vidc_hal_api.h
@@ -814,6 +814,7 @@
 	SYS_IDLE,
 	SYS_DEBUG,
 	SYS_WATCHDOG_TIMEOUT,
+	SYS_ERROR,
 /* SESSION COMMANDS_DONE */
 	SESSION_LOAD_RESOURCE_DONE,
 	SESSION_INIT_DONE,
@@ -833,6 +834,7 @@
 	SESSION_RELEASE_BUFFER_DONE,
 	SESSION_RELEASE_RESOURCE_DONE,
 	SESSION_PROPERTY_INFO,
+	SESSION_ERROR,
 	RESPONSE_UNUSED = 0x10000000,
 };
 
diff --git a/drivers/media/video/msm_vidc/vidc_hal_interrupt_handler.c b/drivers/media/video/msm_vidc/vidc_hal_interrupt_handler.c
index e15edbe..ba599ec 100644
--- a/drivers/media/video/msm_vidc/vidc_hal_interrupt_handler.c
+++ b/drivers/media/video/msm_vidc/vidc_hal_interrupt_handler.c
@@ -146,6 +146,21 @@
 	cmd_done.device_id = device->device_id;
 	device->callback(SYS_WATCHDOG_TIMEOUT, &cmd_done);
 }
+static void hal_process_sys_error(struct hal_device *device)
+{
+	struct msm_vidc_cb_cmd_done cmd_done;
+	disable_irq_nosync(device->hal_data->irq);
+	memset(&cmd_done, 0, sizeof(struct msm_vidc_cb_cmd_done));
+	cmd_done.device_id = device->device_id;
+	device->callback(SYS_ERROR, &cmd_done);
+}
+static void hal_process_session_error(struct hal_device *device)
+{
+	struct msm_vidc_cb_cmd_done cmd_done;
+	memset(&cmd_done, 0, sizeof(struct msm_vidc_cb_cmd_done));
+	cmd_done.device_id = device->device_id;
+	device->callback(SESSION_ERROR, &cmd_done);
+}
 static void hal_process_event_notify(struct hal_device *device,
 	struct hfi_msg_event_notify_packet *pkt)
 {
@@ -160,9 +175,11 @@
 	switch (pkt->event_id) {
 	case HFI_EVENT_SYS_ERROR:
 		dprintk(VIDC_INFO, "HFI_EVENT_SYS_ERROR");
+		hal_process_sys_error(device);
 		break;
 	case HFI_EVENT_SESSION_ERROR:
 		dprintk(VIDC_INFO, "HFI_EVENT_SESSION_ERROR");
+		hal_process_session_error(device);
 		break;
 	case HFI_EVENT_SESSION_SEQUENCE_CHANGED:
 		dprintk(VIDC_INFO, "HFI_EVENT_SESSION_SEQUENCE_CHANGED");
diff --git a/drivers/media/video/msm_vidc/vidc_hal_io.h b/drivers/media/video/msm_vidc/vidc_hal_io.h
index b85e015..6845ac5 100644
--- a/drivers/media/video/msm_vidc/vidc_hal_io.h
+++ b/drivers/media/video/msm_vidc/vidc_hal_io.h
@@ -22,6 +22,12 @@
 
 #define VIDC_VBIF_BASE_OFFS			0x00080000
 #define VIDC_VBIF_VERSION			(VIDC_VBIF_BASE_OFFS + 0x00)
+#define VIDC_VENUS_VBIF_DDR_OUT_MAX_BURST		\
+			(VIDC_VBIF_BASE_OFFS + 0xD8)
+#define VIDC_VENUS_VBIF_OCMEM_OUT_MAX_BURST		\
+			(VIDC_VBIF_BASE_OFFS + 0xDC)
+#define VIDC_VENUS_VBIF_ROUND_ROBIN_QOS_ARB		\
+			(VIDC_VBIF_BASE_OFFS + 0x124)
 
 #define VIDC_CPU_BASE_OFFS			0x000C0000
 #define VIDC_CPU_CS_BASE_OFFS		(VIDC_CPU_BASE_OFFS + 0x00012000)
@@ -128,7 +134,5 @@
 	(VIDC_WRAPPER_BASE_OFFS + 0x20)
 #define VIDC_VENUS0_WRAPPER_VBIF_PRIORITY_LEVEL \
 	(VIDC_WRAPPER_BASE_OFFS + 0x24)
-#define VIDC_VENUS_VBIF_REQ_PRIORITY    (VIDC_WRAPPER_BASE_OFFS + 0x20)
-#define VIDC_VENUS_VBIF_PRIORITY_LEVEL  (VIDC_WRAPPER_BASE_OFFS + 0x24)
 
 #endif
diff --git a/drivers/media/video/msm_wfd/enc-mfc-subdev.c b/drivers/media/video/msm_wfd/enc-mfc-subdev.c
index 09a5e32..6db2ad1 100644
--- a/drivers/media/video/msm_wfd/enc-mfc-subdev.c
+++ b/drivers/media/video/msm_wfd/enc-mfc-subdev.c
@@ -1975,6 +1975,7 @@
 	unsigned long phy_addr;
 	int i = 0;
 	int heap_mask = 0;
+	u32 ion_flags = 0;
 	u32 len;
 	control.width = inst->width;
 	control.height = inst->height;
@@ -1988,7 +1989,8 @@
 		goto err;
 	}
 	heap_mask = ION_HEAP(ION_CP_MM_HEAP_ID);
-	heap_mask |= inst->secure ? ION_SECURE : ION_HEAP(ION_IOMMU_HEAP_ID);
+	heap_mask |= inst->secure ? 0 : ION_HEAP(ION_IOMMU_HEAP_ID);
+	ion_flags |= inst->secure ? ION_SECURE : 0;
 
 	if (vcd_get_ion_status()) {
 		for (i = 0; i < 4; ++i) {
@@ -1999,7 +2001,7 @@
 			ctrl->user_virtual_addr = (void *)i;
 			client_ctx->recon_buffer_ion_handle[i]
 				= ion_alloc(client_ctx->user_ion_client,
-			control.size, SZ_8K, heap_mask, 0);
+			control.size, SZ_8K, heap_mask, ion_flags);
 
 			ctrl->kernel_virtual_addr = ion_map_kernel(
 				client_ctx->user_ion_client,
diff --git a/drivers/media/video/msm_wfd/wfd-ioctl.c b/drivers/media/video/msm_wfd/wfd-ioctl.c
index d8080dd..371ae3c 100644
--- a/drivers/media/video/msm_wfd/wfd-ioctl.c
+++ b/drivers/media/video/msm_wfd/wfd-ioctl.c
@@ -155,13 +155,15 @@
 	struct ion_handle *handle = NULL;
 	void *kvaddr = NULL;
 	unsigned int alloc_regions = 0;
+	unsigned int ion_flags = 0;
 	int rc = 0;
 
 	alloc_regions = ION_HEAP(ION_CP_MM_HEAP_ID);
-	alloc_regions |= secure ? ION_SECURE :
+	alloc_regions |= secure ? 0 :
 				ION_HEAP(ION_IOMMU_HEAP_ID);
+	ion_flags |= secure ? ION_SECURE : 0;
 	handle = ion_alloc(client,
-			mregion->size, SZ_4K, alloc_regions, 0);
+			mregion->size, SZ_4K, alloc_regions, ion_flags);
 
 	if (IS_ERR_OR_NULL(handle)) {
 		WFD_MSG_ERR("Failed to allocate input buffer\n");
diff --git a/drivers/media/video/vcap_v4l2.c b/drivers/media/video/vcap_v4l2.c
index 51a798f..2b73b11 100644
--- a/drivers/media/video/vcap_v4l2.c
+++ b/drivers/media/video/vcap_v4l2.c
@@ -2267,7 +2267,7 @@
 
 
 	ret = request_irq(dev->vcirq->start, vcap_vc_handler,
-		IRQF_TRIGGER_RISING, "vc_irq", 0);
+		IRQF_TRIGGER_HIGH, "vc_irq", 0);
 	if (ret < 0) {
 		pr_err("%s: vc irq request fail\n", __func__);
 		ret = -EBUSY;
diff --git a/drivers/media/video/vcap_vc.c b/drivers/media/video/vcap_vc.c
index f3c9362..08dcdf3 100644
--- a/drivers/media/video/vcap_vc.c
+++ b/drivers/media/video/vcap_vc.c
@@ -126,28 +126,9 @@
 	return tv;
 }
 
-irqreturn_t vc_handler(struct vcap_dev *dev)
+inline void vc_isr_error_checking(struct vcap_dev *dev,
+		struct v4l2_event v4l2_evt, uint32_t irq)
 {
-	uint32_t irq, timestamp;
-	struct vcap_buffer *buf;
-	struct vb2_buffer *vb = NULL;
-	struct vcap_client_data *c_data;
-	struct v4l2_event v4l2_evt;
-	uint8_t i, idx, buf_num, tot, done_count = 0;
-	bool work_todo = false;
-
-	irq = readl_relaxed(VCAP_VC_INT_STATUS);
-
-	pr_debug("%s: irq=0x%08x\n", __func__, irq);
-
-	c_data = dev->vc_client;
-	if (!c_data->streaming) {
-		writel_iowmb(irq, VCAP_VC_INT_CLEAR);
-		pr_err("VC no longer streaming\n");
-		return IRQ_HANDLED;
-	}
-
-	v4l2_evt.id = 0;
 	if (irq & 0x8000200) {
 		writel_iowmb(0x00000102, VCAP_VC_NPL_CTRL);
 		v4l2_evt.type = V4L2_EVENT_PRIVATE_START +
@@ -166,6 +147,12 @@
 			VCAP_VC_VSYNC_ERR_EVENT;
 		v4l2_event_queue(dev->vfd, &v4l2_evt);
 	}
+	if (irq & 0x00001000) {
+		writel_iowmb(0x00000102, VCAP_VC_NPL_CTRL);
+		v4l2_evt.type = V4L2_EVENT_PRIVATE_START +
+			VCAP_VC_VSYNC_SEQ_ERR;
+		v4l2_event_queue(dev->vfd, &v4l2_evt);
+	}
 	if (irq & 0x00000800) {
 		writel_iowmb(0x00000102, VCAP_VC_NPL_CTRL);
 		v4l2_evt.type = V4L2_EVENT_PRIVATE_START +
@@ -178,33 +165,26 @@
 			VCAP_VC_LBUF_OFLOW_ERR_EVENT;
 		v4l2_event_queue(dev->vfd, &v4l2_evt);
 	}
+}
 
-	if (!(irq & VC_BUFFER_MASK)) {
-		writel_relaxed(irq, VCAP_VC_INT_CLEAR);
-		pr_err("VC IRQ shows some error\n");
-		return IRQ_HANDLED;
-	}
-
-	if (dev->vc_client == NULL) {
-		/* This should never happen */
-		writel_relaxed(irq, VCAP_VC_INT_CLEAR);
-		pr_err("VC: There is no active vc client\n");
-		return IRQ_HANDLED;
-	}
-	c_data = dev->vc_client;
-
+inline uint8_t vc_isr_buffer_done_count(struct vcap_dev *dev,
+		struct vcap_client_data *c_data, uint32_t irq)
+{
+	int i;
+	uint8_t done_count = 0;
 	for (i = 0; i < VCAP_VC_MAX_BUF; i++) {
 		if (0x2 & (irq >> i))
 			done_count++;
 	}
+	return done_count;
+}
 
-	/* Assign field value in case somehow got out of sync */
-	if (c_data->vc_format.mode == HAL_VCAP_MODE_INT && done_count == 1)
-		c_data->vc_action.top_field = !(irq & 0x1);
-
+inline bool vc_isr_verify_expect_buf_rdy(struct vcap_dev *dev,
+		struct vcap_client_data *c_data, struct v4l2_event v4l2_evt,
+		uint32_t irq, uint8_t done_count, uint8_t tot, uint8_t buf_num)
+{
+	int i;
 	/* Double check expected buffers are done */
-	buf_num = c_data->vc_action.buf_num;
-	tot = c_data->vc_action.tot_buf;
 	for (i = 0; i < done_count; i++) {
 		if (!(irq & (0x1 << (((buf_num + i) % tot) + 1)))) {
 			v4l2_evt.type = V4L2_EVENT_PRIVATE_START +
@@ -213,12 +193,17 @@
 			pr_debug("Unexpected buffer done\n");
 			c_data->vc_action.buf_num =
 				correct_buf_num(irq) % tot;
-			writel_relaxed(irq, VCAP_VC_INT_CLEAR);
-			return IRQ_HANDLED;
+			return true;
 		}
 	}
+	return false;
+}
 
-	/* If here we know which buffers are done */
+inline void vc_isr_update_timestamp(struct vcap_dev *dev,
+		struct vcap_client_data *c_data)
+{
+	uint32_t timestamp;
+
 	timestamp = readl_relaxed(VCAP_VC_TIMESTAMP);
 	if (timestamp < c_data->vc_action.last_ts) {
 		c_data->vc_action.vc_ts.tv_usec +=
@@ -234,65 +219,142 @@
 	c_data->vc_action.vc_ts.tv_usec =
 		c_data->vc_action.vc_ts.tv_usec % VCAP_USEC;
 	c_data->vc_action.last_ts = timestamp;
+}
 
-	c_data->vc_action.buf_num = (buf_num + done_count) % tot;
+inline void vc_isr_no_new_buffer(struct vcap_dev *dev,
+		struct vcap_client_data *c_data, struct v4l2_event v4l2_evt)
+{
+	v4l2_evt.type = V4L2_EVENT_PRIVATE_START +
+		VCAP_VC_BUF_OVERWRITE_EVENT;
+	v4l2_event_queue(dev->vfd, &v4l2_evt);
+
+	c_data->vc_action.field_dropped =
+		!c_data->vc_action.field_dropped;
+
+	c_data->vc_action.field1 =
+		!c_data->vc_action.field1;
+	atomic_inc(&dev->dbg_p.vc_drop_count);
+}
+
+inline void vc_isr_switch_buffers(struct vcap_dev *dev,
+		struct vcap_client_data *c_data, struct vcap_buffer *buf,
+		struct vb2_buffer *vb, uint8_t idx, int done_count, int i)
+{
+	/* Config vc with this new buffer */
+	config_buffer(c_data, buf, VCAP_VC_Y_ADDR_1 + 0x8 * idx,
+			VCAP_VC_C_ADDR_1 + 0x8 * idx);
+	vb->v4l2_buf.timestamp = interpolate_ts(
+		c_data->vc_action.vc_ts,
+		1000000 / c_data->vc_format.frame_rate *
+		(done_count - 1 - i));
+	if (c_data->vc_format.mode == HAL_VCAP_MODE_INT) {
+		if (c_data->vc_action.field1)
+			vb->v4l2_buf.field = V4L2_FIELD_TOP;
+		else
+			vb->v4l2_buf.field = V4L2_FIELD_BOTTOM;
+
+		c_data->vc_action.field1 =
+			!c_data->vc_action.field1;
+	}
+	vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
+	c_data->vc_action.buf[idx] = buf;
+}
+
+inline bool vc_isr_change_buffers(struct vcap_dev *dev,
+		struct vcap_client_data *c_data, struct v4l2_event v4l2_evt,
+		int done_count, uint8_t tot, uint8_t buf_num)
+{
+	struct vb2_buffer *vb = NULL;
+	struct vcap_buffer *buf;
+	bool schedule_work = false;
+	uint8_t idx;
+	int i;
+
 	for (i = 0; i < done_count; i++) {
 		idx = (buf_num + i) % tot;
 		vb = &c_data->vc_action.buf[idx]->vb;
 		spin_lock(&c_data->cap_slock);
 		if (list_empty(&c_data->vc_action.active)) {
 			spin_unlock(&c_data->cap_slock);
-			v4l2_evt.type = V4L2_EVENT_PRIVATE_START +
-				VCAP_VC_BUF_OVERWRITE_EVENT;
-			v4l2_event_queue(dev->vfd, &v4l2_evt);
-			c_data->vc_action.top_field =
-				!c_data->vc_action.top_field;
-
-			if (c_data->vc_format.mode == HAL_VCAP_MODE_INT)
-				c_data->vc_action.field_dropped =
-					!c_data->vc_action.field_dropped;
-
-			atomic_inc(&dev->dbg_p.vc_drop_count);
+			vc_isr_no_new_buffer(dev, c_data, v4l2_evt);
 			continue;
 		}
 		if (c_data->vc_format.mode == HAL_VCAP_MODE_INT &&
 				c_data->vc_action.field_dropped) {
 			spin_unlock(&c_data->cap_slock);
-			c_data->vc_action.field_dropped =
-				!c_data->vc_action.field_dropped;
-			c_data->vc_action.top_field =
-				!c_data->vc_action.top_field;
-			atomic_inc(&dev->dbg_p.vc_drop_count);
+			vc_isr_no_new_buffer(dev, c_data, v4l2_evt);
 			continue;
 		}
 		buf = list_entry(c_data->vc_action.active.next,
 				struct vcap_buffer, list);
 		list_del(&buf->list);
 		spin_unlock(&c_data->cap_slock);
-		/* Config vc with this new buffer */
-		config_buffer(c_data, buf, VCAP_VC_Y_ADDR_1 + 0x8 * idx,
-				VCAP_VC_C_ADDR_1 + 0x8 * idx);
-		vb->v4l2_buf.timestamp = interpolate_ts(
-			c_data->vc_action.vc_ts,
-			1000000 / c_data->vc_format.frame_rate *
-			(done_count - 1 - i));
-		if (c_data->vc_format.mode == HAL_VCAP_MODE_INT) {
-			if (c_data->vc_action.top_field)
-				vb->v4l2_buf.field = V4L2_FIELD_TOP;
-			else
-				vb->v4l2_buf.field = V4L2_FIELD_BOTTOM;
-			c_data->vc_action.top_field =
-				!c_data->vc_action.top_field;
-		}
-		vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
-		work_todo = true;
-		c_data->vc_action.buf[idx] = buf;
+		vc_isr_switch_buffers(dev, c_data, buf, vb, idx, done_count, i);
+		schedule_work = true;
+	}
+	return schedule_work;
+}
+
+irqreturn_t vc_handler(struct vcap_dev *dev)
+{
+	uint32_t irq;
+	struct vcap_client_data *c_data;
+	struct v4l2_event v4l2_evt;
+	uint8_t done_count = 0, buf_num, tot;
+	bool schedule_work = false;
+
+	v4l2_evt.id = 0;
+	irq = readl_relaxed(VCAP_VC_INT_STATUS);
+	writel_relaxed(irq, VCAP_VC_INT_CLEAR);
+
+	pr_debug("%s: irq=0x%08x\n", __func__, irq);
+
+	if (dev->vc_client == NULL) {
+		/* This should never happen */
+		pr_err("VC: There is no active vc client\n");
+		return IRQ_HANDLED;
 	}
 
-	if (work_todo && c_data->op_mode == VC_AND_VP_VCAP_OP)
+	c_data = dev->vc_client;
+	if (!c_data->streaming) {
+		pr_err("VC no longer streaming\n");
+		return IRQ_HANDLED;
+	}
+
+	if (irq == VC_VSYNC_MASK) {
+		if (c_data->vc_format.mode == HAL_VCAP_MODE_INT)
+			c_data->vc_action.field1 = irq & 0x1;
+		return IRQ_HANDLED;
+	}
+
+	if (irq & VC_ERR_MASK) {
+		vc_isr_error_checking(dev, v4l2_evt, irq);
+		return IRQ_HANDLED;
+	}
+
+	if (!(irq & VC_BUFFER_MASK)) {
+		pr_debug("No frames done\n");
+		return IRQ_HANDLED;
+	}
+
+	done_count = vc_isr_buffer_done_count(dev, c_data, irq);
+	buf_num = c_data->vc_action.buf_num;
+	tot = c_data->vc_action.tot_buf;
+
+	if (vc_isr_verify_expect_buf_rdy(dev, c_data,
+			v4l2_evt, irq, done_count, tot, buf_num))
+		return IRQ_HANDLED;
+
+	vc_isr_update_timestamp(dev, c_data);
+
+	c_data->vc_action.buf_num = (buf_num + done_count) % tot;
+
+	schedule_work = vc_isr_change_buffers(dev, c_data, v4l2_evt,
+		done_count, tot, buf_num);
+
+	if (schedule_work && c_data->op_mode == VC_AND_VP_VCAP_OP)
 		queue_work(dev->vcap_wq, &dev->vc_to_vp_work.work);
 
-	writel_relaxed(irq, VCAP_VC_INT_CLEAR);
 	return IRQ_HANDLED;
 }
 
@@ -362,6 +424,8 @@
 	rc = 0;
 	for (i = 0; i < c_data->vc_action.tot_buf; i++)
 		rc = rc << 1 | 0x2;
+	rc |= VC_ERR_MASK;
+	rc |= VC_VSYNC_MASK;
 	writel_relaxed(rc, VCAP_VC_INT_MASK);
 
 	enable_irq(dev->vcirq->start);
diff --git a/drivers/media/video/vcap_vc.h b/drivers/media/video/vcap_vc.h
index 7f42c7f..9c3f5a7 100644
--- a/drivers/media/video/vcap_vc.h
+++ b/drivers/media/video/vcap_vc.h
@@ -63,6 +63,8 @@
 
 #define VC_BUFFER_WRITTEN (0x3 << 1)
 #define VC_BUFFER_MASK 0x7E
+#define VC_ERR_MASK 0xE0001E00
+#define VC_VSYNC_MASK 0x1
 
 int vc_start_capture(struct vcap_client_data *c_data);
 int vc_hw_kick_off(struct vcap_client_data *c_data);
diff --git a/drivers/media/video/vcap_vp.c b/drivers/media/video/vcap_vp.c
index 57813f5..82f9e58 100644
--- a/drivers/media/video/vcap_vp.c
+++ b/drivers/media/video/vcap_vp.c
@@ -267,7 +267,7 @@
 	}
 
 	/* Config VP */
-	if (vp_act->bufT2->vb.v4l2_buf.field == V4L2_FIELD_TOP)
+	if (vp_act->bufT2->vb.v4l2_buf.field == V4L2_FIELD_BOTTOM)
 		top_field = 1;
 
 	writel_iowmb(0x00000000 | top_field, VCAP_VP_CTRL);
@@ -832,7 +832,7 @@
 			chroma_fmt << 11 | 0x1 << 4, VCAP_VP_OUT_CONFIG);
 
 	/* Enable Interrupt */
-	if (vp_act->bufT2->vb.v4l2_buf.field == V4L2_FIELD_TOP)
+	if (vp_act->bufT2->vb.v4l2_buf.field == V4L2_FIELD_BOTTOM)
 		top_field = 1;
 	vp_act->vp_state = VP_FRAME2;
 	writel_relaxed(0x01100001, VCAP_VP_INTERRUPT_ENABLE);
@@ -875,7 +875,7 @@
 	if (rc < 0)
 		return rc;
 
-	if (vp_act->bufT2->vb.v4l2_buf.field == V4L2_FIELD_TOP)
+	if (vp_act->bufT2->vb.v4l2_buf.field == V4L2_FIELD_BOTTOM)
 		top_field = 1;
 
 	/* Config VP & Enable Interrupt */
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index 26ba379..e71c95d 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -47,7 +47,9 @@
 #define QSEECOM_DEV			"qseecom"
 #define QSEOS_VERSION_13		0x13
 #define QSEOS_VERSION_14		0x14
-#define QSEOS_CHECK_VERSION_CMD		0x00001803;
+#define QSEEE_VERSION_00		0x400000
+
+#define QSEOS_CHECK_VERSION_CMD		0x00001803
 
 enum qseecom_command_scm_resp_type {
 	QSEOS_APP_ID = 0xEE01,
@@ -64,6 +66,9 @@
 	QSEOS_LISTENER_DATA_RSP_COMMAND,
 	QSEOS_LOAD_EXTERNAL_ELF_COMMAND,
 	QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND,
+	QSEOS_GET_APP_STATE_COMMAND,
+	QSEOS_LOAD_SERV_IMAGE_COMMAND,
+	QSEOS_UNLOAD_SERV_IMAGE_COMMAND,
 	QSEOS_CMD_MAX     = 0xEFFFFFFF
 };
 
@@ -96,6 +101,17 @@
 	uint32_t  app_id;
 };
 
+__packed struct qseecom_load_lib_image_ireq {
+	uint32_t qsee_cmd_id;
+	uint32_t mdt_len;
+	uint32_t img_len;
+	uint32_t phy_addr;
+};
+
+__packed struct qseecom_unload_lib_image_ireq {
+	uint32_t qsee_cmd_id;
+};
+
 __packed struct qseecom_register_listener_ireq {
 	uint32_t qsee_cmd_id;
 	uint32_t listener_id;
@@ -191,7 +207,9 @@
 	int               send_resp_flag;
 
 	uint32_t          qseos_version;
+	uint32_t          qsee_version;
 	struct device *pdev;
+	bool  commonlib_loaded;
 };
 
 struct qseecom_client_handle {
@@ -1352,10 +1370,92 @@
 	return ret;
 }
 
+static int qseecom_load_commonlib_image(void)
+{
+	int32_t ret = 0;
+	uint32_t fw_size = 0;
+	struct qseecom_load_app_ireq load_req = {0, 0, 0, 0};
+	struct qseecom_command_scm_resp resp;
+	u8 *img_data = NULL;
+
+	if (__qseecom_get_fw_size("commonlib", &fw_size))
+		return -EIO;
+
+	img_data = kzalloc(fw_size, GFP_KERNEL);
+	if (!img_data) {
+		pr_err("Mem allocation for lib image data failed\n");
+		return -ENOMEM;
+	}
+	ret = __qseecom_get_fw_data("commonlib", img_data, &load_req);
+	if (ret) {
+		kzfree(img_data);
+		return -EIO;
+	}
+	/* Populate the remaining parameters */
+	load_req.qsee_cmd_id = QSEOS_LOAD_SERV_IMAGE_COMMAND;
+	/* SCM_CALL to load the image */
+	ret = scm_call(SCM_SVC_TZSCHEDULER, 1, &load_req,
+				sizeof(struct qseecom_load_lib_image_ireq),
+							&resp, sizeof(resp));
+	kzfree(img_data);
+	if (ret) {
+		pr_err("scm_call to load failed : ret %d\n", ret);
+		ret = -EIO;
+	} else {
+		switch (resp.result) {
+		case QSEOS_RESULT_SUCCESS:
+			break;
+		case QSEOS_RESULT_FAILURE:
+			pr_err("scm call failed w/response result%d\n",
+						resp.result);
+			ret = -EINVAL;
+			break;
+		default:
+			pr_err("scm call return unknown response %d\n",
+						resp.result);
+			ret = -EINVAL;
+			break;
+		}
+	}
+	return ret;
+}
+
+static int qseecom_unload_commonlib_image(void)
+{
+	int ret = -EINVAL;
+	struct qseecom_unload_lib_image_ireq unload_req = {0};
+	struct qseecom_command_scm_resp resp;
+
+	/* Populate the remaining parameters */
+	unload_req.qsee_cmd_id = QSEOS_UNLOAD_SERV_IMAGE_COMMAND;
+	/* SCM_CALL to load the image */
+	ret = scm_call(SCM_SVC_TZSCHEDULER, 1,	&unload_req,
+			sizeof(struct qseecom_unload_lib_image_ireq),
+						&resp, sizeof(resp));
+	if (ret) {
+		pr_err("scm_call to unload lib failed : ret %d\n", ret);
+		ret = -EIO;
+	} else {
+		switch (resp.result) {
+		case QSEOS_RESULT_SUCCESS:
+			break;
+		case QSEOS_RESULT_FAILURE:
+			pr_err("scm fail resp.result QSEOS_RESULT FAILURE\n");
+			break;
+		default:
+			pr_err("scm call return unknown response %d\n",
+					resp.result);
+			ret = -EINVAL;
+			break;
+		}
+	}
+	return ret;
+}
+
 int qseecom_start_app(struct qseecom_handle **handle,
 						char *app_name, uint32_t size)
 {
-	int32_t ret;
+	int32_t ret = 0;
 	unsigned long flags = 0;
 	struct qseecom_dev_handle *data = NULL;
 	struct qseecom_check_app_ireq app_ireq;
@@ -1370,6 +1470,19 @@
 		return -EINVAL;
 	}
 
+	if (qseecom.qsee_version > QSEEE_VERSION_00) {
+		mutex_lock(&app_access_lock);
+		if (qseecom.commonlib_loaded == false) {
+			ret = qseecom_load_commonlib_image();
+			if (ret == 0)
+				qseecom.commonlib_loaded = true;
+		}
+		mutex_unlock(&app_access_lock);
+	}
+
+	if (ret)
+		return -EIO;
+
 	*handle = kzalloc(sizeof(struct qseecom_handle), GFP_KERNEL);
 	if (!(*handle)) {
 		pr_err("failed to allocate memory for kernel client handle\n");
@@ -1984,7 +2097,15 @@
 	case QSEECOM_IOCTL_LOAD_APP_REQ: {
 		mutex_lock(&app_access_lock);
 		atomic_inc(&data->ioctl_count);
-		ret = qseecom_load_app(data, argp);
+		if (qseecom.qsee_version > QSEEE_VERSION_00) {
+			if (qseecom.commonlib_loaded == false) {
+				ret = qseecom_load_commonlib_image();
+				if (ret == 0)
+					qseecom.commonlib_loaded = true;
+			}
+		}
+		if (ret == 0)
+			ret = qseecom_load_app(data, argp);
 		atomic_dec(&data->ioctl_count);
 		mutex_unlock(&app_access_lock);
 		if (ret)
@@ -2315,17 +2436,27 @@
 	rc = scm_call(6, 1, &system_call_id, sizeof(system_call_id),
 				&qsee_not_legacy, sizeof(qsee_not_legacy));
 	if (rc) {
-		pr_err("Failed to retrieve QSEE version information %d\n", rc);
+		pr_err("Failed to retrieve QSEOS version information %d\n", rc);
 		goto err;
 	}
-	if (qsee_not_legacy)
+	if (qsee_not_legacy) {
+		uint32_t feature = 10;
+
+		qseecom.qsee_version = QSEEE_VERSION_00;
+		rc = scm_call(6, 3, &feature, sizeof(feature),
+			&qseecom.qsee_version, sizeof(qseecom.qsee_version));
+		if (rc) {
+			pr_err("Failed to get QSEE version info %d\n", rc);
+			goto err;
+		}
 		qseecom.qseos_version = QSEOS_VERSION_14;
-	else {
+	} else {
 		qseecom.qseos_version = QSEOS_VERSION_13;
+		qseecom.qsee_version = 0;
 		pil = NULL;
 		pil_ref_cnt = 0;
 	}
-
+	qseecom.commonlib_loaded = false;
 	qseecom.pdev = class_dev;
 	/* Create ION msm client */
 	qseecom.ion_clnt = msm_ion_client_create(-1, "qseecom-kernel");
@@ -2417,6 +2548,8 @@
 			break;
 		}
 	}
+	if (qseecom.qseos_version  > QSEEE_VERSION_00)
+		qseecom_unload_commonlib_image();
 	return ret;
 };
 
diff --git a/drivers/misc/tspp.c b/drivers/misc/tspp.c
index 8a1e0da..3b678c5 100644
--- a/drivers/misc/tspp.c
+++ b/drivers/misc/tspp.c
@@ -103,29 +103,29 @@
 /*
  * TSPP register offsets
  */
-#define TSPP_RST					0x00
+#define TSPP_RST			0x00
 #define TSPP_CLK_CONTROL		0x04
-#define TSPP_CONFIG				0x08
-#define TSPP_CONTROL				0x0C
+#define TSPP_CONFIG			0x08
+#define TSPP_CONTROL			0x0C
 #define TSPP_PS_DISABLE			0x10
-#define TSPP_MSG_IRQ_STATUS	0x14
+#define TSPP_MSG_IRQ_STATUS		0x14
 #define TSPP_MSG_IRQ_MASK		0x18
 #define TSPP_IRQ_STATUS			0x1C
 #define TSPP_IRQ_MASK			0x20
 #define TSPP_IRQ_CLEAR			0x24
 #define TSPP_PIPE_ERROR_STATUS(_n)	(0x28 + (_n << 2))
-#define TSPP_STATUS				0x68
-#define TSPP_CURR_TSP_HEADER	0x6C
-#define TSPP_CURR_PID_FILTER	0x70
-#define TSPP_SYSTEM_KEY(_n)	(0x74 + (_n << 2))
-#define TSPP_CBC_INIT_VAL(_n)	(0x94 + (_n << 2))
-#define TSPP_DATA_KEY_RESET	0x9C
+#define TSPP_STATUS			0x68
+#define TSPP_CURR_TSP_HEADER		0x6C
+#define TSPP_CURR_PID_FILTER		0x70
+#define TSPP_SYSTEM_KEY(_n)		(0x74 + (_n << 2))
+#define TSPP_CBC_INIT_VAL(_n)		(0x94 + (_n << 2))
+#define TSPP_DATA_KEY_RESET		0x9C
 #define TSPP_KEY_VALID			0xA0
 #define TSPP_KEY_ERROR			0xA4
 #define TSPP_TEST_CTRL			0xA8
-#define TSPP_VERSION				0xAC
+#define TSPP_VERSION			0xAC
 #define TSPP_GENERICS			0xB0
-#define TSPP_NOP					0xB4
+#define TSPP_NOP			0xB4
 
 /*
  * Register bit definitions
@@ -172,30 +172,30 @@
 #define TSPP_MSG_TSIF_0_IRQ               BIT(0)
 
 /* TSPP_IRQ_STATUS + TSPP_IRQ_MASK + TSPP_IRQ_CLEAR */
-#define TSPP_IRQ_STATUS_TSP_RD_CMPL			BIT(19)
-#define TSPP_IRQ_STATUS_KEY_ERROR			BIT(18)
+#define TSPP_IRQ_STATUS_TSP_RD_CMPL		BIT(19)
+#define TSPP_IRQ_STATUS_KEY_ERROR		BIT(18)
 #define TSPP_IRQ_STATUS_KEY_SWITCHED_BAD	BIT(17)
 #define TSPP_IRQ_STATUS_KEY_SWITCHED		BIT(16)
 #define TSPP_IRQ_STATUS_PS_BROKEN(_n)		BIT((_n))
 
 /* TSPP_PIPE_ERROR_STATUS */
-#define TSPP_PIPE_PES_SYNC_ERROR				BIT(3)
-#define TSPP_PIPE_PS_LENGTH_ERROR			BIT(2)
+#define TSPP_PIPE_PES_SYNC_ERROR		BIT(3)
+#define TSPP_PIPE_PS_LENGTH_ERROR		BIT(2)
 #define TSPP_PIPE_PS_CONTINUITY_ERROR		BIT(1)
-#define TSPP_PIP_PS_LOST_START				BIT(0)
+#define TSPP_PIP_PS_LOST_START			BIT(0)
 
 /* TSPP_STATUS			*/
-#define TSPP_STATUS_TSP_PKT_AVAIL			BIT(10)
-#define TSPP_STATUS_TSIF1_DM_REQ				BIT(6)
-#define TSPP_STATUS_TSIF0_DM_REQ				BIT(2)
-#define TSPP_CURR_FILTER_TABLE				BIT(0)
+#define TSPP_STATUS_TSP_PKT_AVAIL		BIT(10)
+#define TSPP_STATUS_TSIF1_DM_REQ		BIT(6)
+#define TSPP_STATUS_TSIF0_DM_REQ		BIT(2)
+#define TSPP_CURR_FILTER_TABLE			BIT(0)
 
 /* TSPP_GENERICS		*/
-#define TSPP_GENERICS_CRYPTO_GEN				BIT(12)
+#define TSPP_GENERICS_CRYPTO_GEN		BIT(12)
 #define TSPP_GENERICS_MAX_CONS_PIPES		BIT(7)
-#define TSPP_GENERICS_MAX_PIPES				BIT(2)
-#define TSPP_GENERICS_TSIF_1_GEN				BIT(1)
-#define TSPP_GENERICS_TSIF_0_GEN				BIT(0)
+#define TSPP_GENERICS_MAX_PIPES			BIT(2)
+#define TSPP_GENERICS_TSIF_1_GEN		BIT(1)
+#define TSPP_GENERICS_TSIF_0_GEN		BIT(0)
 
 /*
  * TSPP memory regions
@@ -375,6 +375,8 @@
 	tspp_notifier *notifier; /* used only with kernel api */
 	void *notify_data;       /* data to be passed with the notifier */
 	u32 notify_timer;        /* notification for partially filled buffers */
+	tspp_memfree *memfree;   /* user defined memory free function */
+	void *user_info; /* user cookie passed to memory alloc/free function */
 };
 
 struct tspp_pid_filter_table {
@@ -584,8 +586,7 @@
 		g = table + i;
 		tmp = gpio_tlmm_config(g->gpio_cfg, GPIO_CFG_DISABLE);
 		if (tmp) {
-			pr_err("tspp_gpios_disable(0x%08x, GPIO_CFG_DISABLE)"
-			       " <%s> failed: %d\n",
+			pr_err("tspp_gpios_disable(0x%08x, GPIO_CFG_DISABLE) <%s> failed: %d\n",
 			       g->gpio_cfg, g->label ?: "?", rc);
 			pr_err("tspp: pin %d func %d dir %d pull %d drvstr %d\n",
 			       GPIO_PIN(g->gpio_cfg), GPIO_FUNC(g->gpio_cfg),
@@ -608,8 +609,7 @@
 		g = table + i;
 		rc = gpio_tlmm_config(g->gpio_cfg, GPIO_CFG_ENABLE);
 		if (rc) {
-			pr_err("tspp: gpio_tlmm_config(0x%08x, GPIO_CFG_ENABLE)"
-			       " <%s> failed: %d\n",
+			pr_err("tspp: gpio_tlmm_config(0x%08x, GPIO_CFG_ENABLE) <%s> failed: %d\n",
 			       g->gpio_cfg, g->label ?: "?", rc);
 			pr_err("tspp: pin %d func %d dir %d pull %d drvstr %d\n",
 			       GPIO_PIN(g->gpio_cfg), GPIO_FUNC(g->gpio_cfg),
@@ -820,12 +820,12 @@
 		desc->virt_base = alloc(channel_id, size,
 			&desc->phys_base, user);
 	} else {
-	desc->virt_base = dma_alloc_coherent(NULL, size,
-		&desc->phys_base, GFP_KERNEL);
-	if (desc->virt_base == 0) {
-		pr_err("tspp dma alloc coherent failed %i", size);
-		return -ENOMEM;
-	}
+		desc->virt_base = dma_alloc_coherent(NULL, size,
+			&desc->phys_base, GFP_KERNEL);
+		if (desc->virt_base == 0) {
+			pr_err("tspp dma alloc coherent failed %i", size);
+			return -ENOMEM;
+		}
 	}
 
 	desc->size = size;
@@ -977,9 +977,13 @@
 	channel->buffer_count = 0;
 	channel->filter_count = 0;
 	channel->int_freq = 1;
+	channel->src = TSPP_SOURCE_NONE;
+	channel->mode = TSPP_MODE_DISABLED;
 	channel->notifier = NULL;
 	channel->notify_data = NULL;
 	channel->notify_timer = 0;
+	channel->memfree = NULL;
+	channel->user_info = NULL;
 	init_waitqueue_head(&channel->in_queue);
 
 	if (cdev_add(&channel->cdev, tspp_minor++, 1) != 0) {
@@ -1002,6 +1006,11 @@
 static int tspp_set_buffer_size(struct tspp_channel *channel,
 	struct tspp_buffer *buf)
 {
+	if (channel->buffer_count > 0) {
+		pr_err("tspp: cannot set buffer size - buffers already allocated\n");
+		return -EPERM;
+	}
+
 	if (buf->size < TSPP_MIN_BUFFER_SIZE)
 		channel->buffer_size = TSPP_MIN_BUFFER_SIZE;
 	else if (buf->size > TSPP_MAX_BUFFER_SIZE)
@@ -1033,8 +1042,8 @@
 }
 
 static void tspp_set_signal_inversion(struct tspp_channel *channel,
-	int clock_inverse, int data_inverse,
-	int sync_inverse, int enable_inverse)
+					int clock_inverse, int data_inverse,
+					int sync_inverse, int enable_inverse)
 {
 	int index;
 
@@ -1054,8 +1063,106 @@
 	channel->pdev->tsif[index].enable_inverse = enable_inverse;
 }
 
+static int tspp_is_buffer_size_aligned(u32 size, enum tspp_mode mode)
+{
+	u32 alignment;
+
+	switch (mode) {
+	case TSPP_MODE_RAW:
+		/* must be a multiple of 192 */
+		alignment = (TSPP_PACKET_LENGTH + 4);
+		if (size % alignment)
+			return 0;
+		return 1;
+
+	case TSPP_MODE_RAW_NO_SUFFIX:
+		/* must be a multiple of 188 */
+		alignment = TSPP_PACKET_LENGTH;
+		if (size % alignment)
+			return 0;
+		return 1;
+
+	case TSPP_MODE_DISABLED:
+	case TSPP_MODE_PES:
+	default:
+		/* no alignment requirement */
+		return 1;
+	}
+
+}
+
+static u32 tspp_align_buffer_size_by_mode(u32 size, enum tspp_mode mode)
+{
+	u32 new_size;
+	u32 alignment;
+
+	switch (mode) {
+	case TSPP_MODE_RAW:
+		/* must be a multiple of 192 */
+		alignment = (TSPP_PACKET_LENGTH + 4);
+		break;
+
+	case TSPP_MODE_RAW_NO_SUFFIX:
+		/* must be a multiple of 188 */
+		alignment = TSPP_PACKET_LENGTH;
+		break;
+
+	case TSPP_MODE_DISABLED:
+	case TSPP_MODE_PES:
+	default:
+		/* no alignment requirement - give the user what he asks for */
+		alignment = 1;
+		break;
+	}
+	/* align up */
+	new_size = (((size + alignment - 1) / alignment) * alignment);
+	return new_size;
+}
+
+static void tspp_destroy_buffers(u32 channel_id, struct tspp_channel *channel)
+{
+	int i;
+	struct tspp_mem_buffer *pbuf, *temp;
+
+	pbuf = channel->data;
+	for (i = 0; i < channel->buffer_count; i++) {
+		if (pbuf->desc.phys_base) {
+			if (channel->memfree) {
+				channel->memfree(channel_id,
+					pbuf->desc.size,
+					pbuf->desc.virt_base,
+					pbuf->desc.phys_base,
+					channel->user_info);
+			} else {
+				dma_free_coherent(NULL,
+					pbuf->desc.size,
+					pbuf->desc.virt_base,
+					pbuf->desc.phys_base);
+			}
+			pbuf->desc.phys_base = 0;
+		}
+		pbuf->desc.virt_base = 0;
+		pbuf->state = TSPP_BUF_STATE_EMPTY;
+		temp = pbuf;
+		pbuf = pbuf->next;
+		kfree(temp);
+	}
+}
+
 /*** TSPP API functions ***/
-int tspp_open_stream(u32 dev, u32 channel_id, struct tspp_select_source *source)
+
+/**
+ * tspp_open_stream - open a TSPP stream for use.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @channel_id: Channel ID number (up to TSPP_NUM_CHANNELS)
+ * @source: stream source parameters.
+ *
+ * Return  error status
+ *
+ */
+int tspp_open_stream(u32 dev, u32 channel_id,
+			struct tspp_select_source *source)
 {
 	u32 val;
 	struct tspp_device *pdev;
@@ -1063,6 +1170,7 @@
 
 	TSPP_DEBUG("tspp_open_stream %i %i %i %i",
 		dev, channel_id, source->source, source->mode);
+
 	if (dev >= TSPP_MAX_DEVICES) {
 		pr_err("tspp: device id out of range");
 		return -ENODEV;
@@ -1082,8 +1190,8 @@
 	channel->src = source->source;
 	tspp_set_tsif_mode(channel, source->mode);
 	tspp_set_signal_inversion(channel, source->clk_inverse,
-		source->data_inverse, source->sync_inverse,
-		source->enable_inverse);
+			source->data_inverse, source->sync_inverse,
+			source->enable_inverse);
 
 	switch (source->source) {
 	case TSPP_SOURCE_TSIF0:
@@ -1120,6 +1228,15 @@
 }
 EXPORT_SYMBOL(tspp_open_stream);
 
+/**
+ * tspp_close_stream - close a TSPP stream.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @channel_id: Channel ID number (up to TSPP_NUM_CHANNELS)
+ *
+ * Return  error status
+ *
+ */
 int tspp_close_stream(u32 dev, u32 channel_id)
 {
 	u32 val;
@@ -1162,6 +1279,15 @@
 }
 EXPORT_SYMBOL(tspp_close_stream);
 
+/**
+ * tspp_open_channel - open a TSPP channel.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @channel_id: Channel ID number (up to TSPP_NUM_CHANNELS)
+ *
+ * Return  error status
+ *
+ */
 int tspp_open_channel(u32 dev, u32 channel_id)
 {
 	int rc = 0;
@@ -1269,6 +1395,15 @@
 }
 EXPORT_SYMBOL(tspp_open_channel);
 
+/**
+ * tspp_close_channel - close a TSPP channel.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @channel_id: Channel ID number (up to TSPP_NUM_CHANNELS)
+ *
+ * Return  error status
+ *
+ */
 int tspp_close_channel(u32 dev, u32 channel_id)
 {
 	int i;
@@ -1278,7 +1413,6 @@
 	struct sps_connect *config;
 	struct tspp_device *pdev;
 	struct tspp_channel *channel;
-	struct tspp_mem_buffer *pbuf, *temp;
 
 	if (channel_id >= TSPP_NUM_CHANNELS) {
 		pr_err("tspp: channel id out of range");
@@ -1333,21 +1467,12 @@
 	dma_free_coherent(NULL, config->desc.size, config->desc.base,
 		config->desc.phys_base);
 
-	pbuf = channel->data;
-	for (i = 0; i < channel->buffer_count; i++) {
-		if (pbuf->desc.phys_base) {
-			dma_free_coherent(NULL,
-				pbuf->desc.size,
-				pbuf->desc.virt_base,
-				pbuf->desc.phys_base);
-			pbuf->desc.phys_base = 0;
-		}
-		pbuf->desc.virt_base = 0;
-		pbuf->state = TSPP_BUF_STATE_EMPTY;
-		temp = pbuf;
-		pbuf = pbuf->next;
-		kfree(temp);
-	}
+	tspp_destroy_buffers(channel_id, channel);
+
+	channel->src = TSPP_SOURCE_NONE;
+	channel->mode = TSPP_MODE_DISABLED;
+	channel->memfree = NULL;
+	channel->user_info = NULL;
 	channel->buffer_count = 0;
 	channel->data = NULL;
 	channel->read = NULL;
@@ -1363,10 +1488,20 @@
 }
 EXPORT_SYMBOL(tspp_close_channel);
 
+/**
+ * tspp_add_filter - add a TSPP filter to a channel.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @channel_id: Channel ID number (up to TSPP_NUM_CHANNELS)
+ * @filter: TSPP filter parameters
+ *
+ * Return  error status
+ *
+ */
 int tspp_add_filter(u32 dev, u32 channel_id,
 	struct tspp_filter *filter)
 {
-	int i;
+	int i, rc;
 	int other_channel;
 	int entry;
 	u32 val, pid, enabled;
@@ -1397,19 +1532,14 @@
 		return -ENOSR;
 	}
 
-	/* make sure this filter mode matches the channel mode */
-	switch (channel->mode) {
-	case TSPP_MODE_DISABLED:
-		channel->mode = filter->mode;
-		break;
-	case TSPP_MODE_RAW:
-	case TSPP_MODE_PES:
-	case TSPP_MODE_RAW_NO_SUFFIX:
-		if (filter->mode != channel->mode) {
-			pr_err("tspp: wrong filter mode");
-			return -EBADSLT;
-		}
-	}
+	channel->mode = filter->mode;
+	/*
+	 * if buffers are already allocated, verify they fulfil
+	 * the alignment requirements.
+	 */
+	if ((channel->buffer_count > 0) &&
+	   (!tspp_is_buffer_size_aligned(channel->buffer_size, channel->mode)))
+		pr_warn("tspp: buffers allocated with incorrect alignment\n");
 
 	if (filter->mode == TSPP_MODE_PES) {
 		for (i = 0; i < TSPP_NUM_PRIORITIES; i++) {
@@ -1468,13 +1598,22 @@
 	pdev->filters[channel->src]->
 		filter[filter->priority].filter = p.filter;
 
-	/* allocate buffers if needed */
-	tspp_allocate_buffers(dev, channel->id, channel->max_buffers,
-		channel->buffer_size, channel->int_freq, 0, 0);
-	if (channel->buffer_count < MIN_ACCEPTABLE_BUFFER_COUNT) {
-		pr_err("tspp: failed to allocate at least %i buffers",
-			MIN_ACCEPTABLE_BUFFER_COUNT);
-		return -ENOMEM;
+	/*
+	 * allocate buffers if needed (i.e. if user did has not already called
+	 * tspp_allocate_buffers() explicitly).
+	 */
+	if (channel->buffer_count == 0) {
+		channel->buffer_size =
+			tspp_align_buffer_size_by_mode(channel->buffer_size,
+							channel->mode);
+		rc = tspp_allocate_buffers(dev, channel->id,
+					channel->max_buffers,
+					channel->buffer_size,
+					channel->int_freq, NULL, NULL, NULL);
+		if (rc != 0) {
+			pr_err("tspp: tspp_allocate_buffers failed\n");
+			return rc;
+		}
 	}
 
 	/* reenable pipe */
@@ -1489,6 +1628,16 @@
 }
 EXPORT_SYMBOL(tspp_add_filter);
 
+/**
+ * tspp_remove_filter - remove a TSPP filter from a channel.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @channel_id: Channel ID number (up to TSPP_NUM_CHANNELS)
+ * @filter: TSPP filter parameters
+ *
+ * Return  error status
+ *
+ */
 int tspp_remove_filter(u32 dev, u32 channel_id,
 	struct tspp_filter *filter)
 {
@@ -1541,6 +1690,16 @@
 }
 EXPORT_SYMBOL(tspp_remove_filter);
 
+/**
+ * tspp_set_key - set TSPP key in key table.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @channel_id: Channel ID number (up to TSPP_NUM_CHANNELS)
+ * @key: TSPP key parameters
+ *
+ * Return  error status
+ *
+ */
 int tspp_set_key(u32 dev, u32 channel_id, struct tspp_key *key)
 {
 	int i;
@@ -1591,6 +1750,18 @@
 }
 EXPORT_SYMBOL(tspp_set_key);
 
+/**
+ * tspp_register_notification - register TSPP channel notification function.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @channel_id: Channel ID number (up to TSPP_NUM_CHANNELS)
+ * @pNotify: notification function
+ * @userdata: user data to pass to notification function
+ * @timer_ms: notification for partially filled buffers
+ *
+ * Return  error status
+ *
+ */
 int tspp_register_notification(u32 dev, u32 channel_id,
 	tspp_notifier *pNotify, void *userdata, u32 timer_ms)
 {
@@ -1614,6 +1785,15 @@
 }
 EXPORT_SYMBOL(tspp_register_notification);
 
+/**
+ * tspp_unregister_notification - unregister TSPP channel notification function.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @channel_id: Channel ID number (up to TSPP_NUM_CHANNELS)
+ *
+ * Return  error status
+ *
+ */
 int tspp_unregister_notification(u32 dev, u32 channel_id)
 {
 	struct tspp_channel *channel;
@@ -1635,6 +1815,15 @@
 }
 EXPORT_SYMBOL(tspp_unregister_notification);
 
+/**
+ * tspp_get_buffer - get TSPP data buffer.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @channel_id: Channel ID number (up to TSPP_NUM_CHANNELS)
+ *
+ * Return  error status
+ *
+ */
 const struct tspp_data_descriptor *tspp_get_buffer(u32 dev, u32 channel_id)
 {
 	struct tspp_mem_buffer *buffer;
@@ -1675,6 +1864,16 @@
 }
 EXPORT_SYMBOL(tspp_get_buffer);
 
+/**
+ * tspp_release_buffer - release TSPP data buffer back to TSPP.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @channel_id: Channel ID number (up to TSPP_NUM_CHANNELS)
+ * @descriptor_id: buffer descriptor ID
+ *
+ * Return  error status
+ *
+ */
 int tspp_release_buffer(u32 dev, u32 channel_id, u32 descriptor_id)
 {
 	int i, found = 0;
@@ -1726,8 +1925,27 @@
 }
 EXPORT_SYMBOL(tspp_release_buffer);
 
-int tspp_allocate_buffers(u32 dev, u32 channel_id,	u32 count,
-	u32 size, u32 int_freq, tspp_allocator *alloc, void *user)
+/**
+ * tspp_allocate_buffers - allocate TSPP data buffers.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @channel_id: Channel ID number (up to TSPP_NUM_CHANNELS)
+ * @count: number of buffers to allocate
+ * @size: size of each buffer to allocate
+ * @int_freq: interrupt frequency
+ * @alloc: user defined memory allocator function. Pass NULL for default.
+ * @memfree: user defined memory free function. Pass NULL for default.
+ * @user: user data to pass to the memory allocator/free function
+ *
+ * Return  error status
+ *
+ * The user can optionally call this function explicitly to allocate the TSPP
+ * data buffers. Alternatively, if the user did not call this function, it
+ * is called implicitly by tspp_add_filter().
+ */
+int tspp_allocate_buffers(u32 dev, u32 channel_id, u32 count, u32 size,
+			u32 int_freq, tspp_allocator *alloc,
+			tspp_memfree *memfree, void *user)
 {
 	struct tspp_channel *channel;
 	struct tspp_device *pdev;
@@ -1736,56 +1954,62 @@
 	TSPP_DEBUG("tspp_allocate_buffers");
 
 	if (channel_id >= TSPP_NUM_CHANNELS) {
-		pr_err("tspp: channel id out of range");
+		pr_err("%s: channel id out of range", __func__);
 		return -ECHRNG;
 	}
+
 	pdev = tspp_find_by_id(dev);
 	if (!pdev) {
-		pr_err("tspp_alloc: can't find device %i", dev);
+		pr_err("%s: can't find device %i", __func__, dev);
 		return -ENODEV;
 	}
+
+	if (count < MIN_ACCEPTABLE_BUFFER_COUNT) {
+		pr_err("%s: tspp requires a minimum of %i buffers\n",
+			__func__, MIN_ACCEPTABLE_BUFFER_COUNT);
+		return -EINVAL;
+	}
+
 	channel = &pdev->channels[channel_id];
+	/* allow buffer allocation only if there was no previous buffer
+	 * allocation for this channel.
+	 */
+	if (channel->buffer_count > 0) {
+		pr_err("%s: buffers already allocated for channel %u",
+			__func__, channel_id);
+		return -EINVAL;
+	}
 
 	channel->max_buffers = count;
 
 	/* set up interrupt frequency */
-	if (int_freq > channel->max_buffers)
+	if (int_freq > channel->max_buffers) {
 		int_freq = channel->max_buffers;
-	channel->int_freq = int_freq;
-
-	switch (channel->mode) {
-	case TSPP_MODE_DISABLED:
-	case TSPP_MODE_PES:
-		/* give the user what he asks for */
-		channel->buffer_size = size;
-		break;
-
-	case TSPP_MODE_RAW:
-		/* must be a multiple of 192 */
-		if (size < (TSPP_PACKET_LENGTH+4))
-			channel->buffer_size = (TSPP_PACKET_LENGTH+4);
-		else
-			channel->buffer_size = (size /
-				(TSPP_PACKET_LENGTH+4)) *
-				(TSPP_PACKET_LENGTH+4);
-		break;
-
-	case TSPP_MODE_RAW_NO_SUFFIX:
-		/* must be a multiple of 188 */
-		channel->buffer_size = (size / TSPP_PACKET_LENGTH) *
-			TSPP_PACKET_LENGTH;
-		break;
+		pr_warn("%s: setting interrupt frequency to %u\n",
+			__func__, int_freq);
 	}
+	channel->int_freq = int_freq;
+	/*
+	 * it is the responsibility of the caller to tspp_allocate_buffers(),
+	 * whether it's the user or the driver, to make sure the size parameter
+	 * is compatible to the channel mode.
+	 */
+	channel->buffer_size = size;
 
-	for (; channel->buffer_count < channel->max_buffers;
+	/* save user defined memory free function for later use */
+	channel->memfree = memfree;
+	channel->user_info = user;
+
+	for (channel->buffer_count = 0;
+		channel->buffer_count < channel->max_buffers;
 		channel->buffer_count++) {
 
 		/* allocate the descriptor */
 		struct tspp_mem_buffer *desc = (struct tspp_mem_buffer *)
 			kmalloc(sizeof(struct tspp_mem_buffer), GFP_KERNEL);
 		if (!desc) {
-			pr_warn("tspp: Can't allocate desc %i",
-			channel->buffer_count);
+			pr_warn("%s: Can't allocate desc %i",
+				__func__, channel->buffer_count);
 			break;
 		}
 
@@ -1794,8 +2018,8 @@
 		if (tspp_alloc_buffer(channel_id, &desc->desc,
 			channel->buffer_size, alloc, user) != 0) {
 			kfree(desc);
-			pr_warn("tspp: Can't allocate buffer %i",
-				channel->buffer_count);
+			pr_warn("%s: Can't allocate buffer %i",
+				__func__, channel->buffer_count);
 			break;
 		}
 
@@ -1818,12 +2042,24 @@
 
 		/* start the transfer */
 		if (tspp_queue_buffer(channel, desc))
-			pr_err("tspp: can't queue buffer %i", desc->desc.id);
+			pr_err("%s: can't queue buffer %i",
+				__func__, desc->desc.id);
+	}
+
+	if (channel->buffer_count < channel->max_buffers) {
+		/*
+		 * we failed to allocate the requested number of buffers.
+		 * we don't allow a partial success, so need to clean up here.
+		 */
+		tspp_destroy_buffers(channel_id, channel);
+		channel->buffer_count = 0;
+		return -ENOMEM;
 	}
 
 	channel->waiting = channel->data;
 	channel->read = channel->data;
 	channel->locked = channel->data;
+
 	return 0;
 }
 EXPORT_SYMBOL(tspp_allocate_buffers);
@@ -1942,8 +2178,10 @@
 		transferred += size;
 		buffer->read_index += size;
 
-		/* after reading the end of the buffer, requeue it,
-			and set up for reading the next one */
+		/*
+		 * after reading the end of the buffer, requeue it,
+		 * and set up for reading the next one
+		 */
 		if (buffer->read_index == buffer->filled) {
 			buffer->state = TSPP_BUF_STATE_WAITING;
 			if (tspp_queue_buffer(channel, buffer))
@@ -2042,8 +2280,10 @@
 		pr_err("tspp: Unknown ioctl %i", param0);
 	}
 
-	/* normalize the return code in case one of the subfunctions does
-		something weird */
+	/*
+	 * normalize the return code in case one of the subfunctions does
+	 * something weird
+	 */
 	if (rc != 0)
 		rc = -ENOIOCTLCMD;
 
@@ -2136,13 +2376,14 @@
 {
 	int rc = -ENODEV;
 	u32 version;
-	u32 i;
+	u32 i, j;
 	struct msm_tspp_platform_data *data;
 	struct tspp_device *device;
 	struct resource *mem_tsif0;
 	struct resource *mem_tsif1;
 	struct resource *mem_tspp;
 	struct resource *mem_bam;
+	struct tspp_channel *channel;
 
 	/* must have platform data */
 	data = pdev->dev.platform_data;
@@ -2338,6 +2579,12 @@
 	return 0;
 
 err_channel:
+	/* uninitialize channels */
+	for (j = 0; j < i; j++) {
+		channel = &(device->channels[i]);
+		device_destroy(tspp_class, channel->cdev.dev);
+		cdev_del(&channel->cdev);
+	}
 err_clock:
 	sps_deregister_bam_device(device->bam_handle);
 err_bam:
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index fe41cdd..ade2b97 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -1472,64 +1472,6 @@
 }
 EXPORT_SYMBOL(mmc_blk_init_packed_statistics);
 
-void print_mmc_packing_stats(struct mmc_card *card)
-{
-	int i;
-	int max_num_of_packed_reqs = 0;
-
-	if ((!card) || (!card->wr_pack_stats.packing_events))
-		return;
-
-	max_num_of_packed_reqs = card->ext_csd.max_packed_writes;
-
-	spin_lock(&card->wr_pack_stats.lock);
-
-	pr_info("%s: write packing statistics:\n",
-		mmc_hostname(card->host));
-
-	for (i = 1 ; i <= max_num_of_packed_reqs ; ++i) {
-		if (card->wr_pack_stats.packing_events[i] != 0)
-			pr_info("%s: Packed %d reqs - %d times\n",
-				mmc_hostname(card->host), i,
-				card->wr_pack_stats.packing_events[i]);
-	}
-
-	pr_info("%s: stopped packing due to the following reasons:\n",
-		mmc_hostname(card->host));
-
-	if (card->wr_pack_stats.pack_stop_reason[EXCEEDS_SEGMENTS])
-		pr_info("%s: %d times: exceedmax num of segments\n",
-			mmc_hostname(card->host),
-			card->wr_pack_stats.pack_stop_reason[EXCEEDS_SEGMENTS]);
-	if (card->wr_pack_stats.pack_stop_reason[EXCEEDS_SECTORS])
-		pr_info("%s: %d times: exceeding the max num of sectors\n",
-			mmc_hostname(card->host),
-			card->wr_pack_stats.pack_stop_reason[EXCEEDS_SECTORS]);
-	if (card->wr_pack_stats.pack_stop_reason[WRONG_DATA_DIR])
-		pr_info("%s: %d times: wrong data direction\n",
-			mmc_hostname(card->host),
-			card->wr_pack_stats.pack_stop_reason[WRONG_DATA_DIR]);
-	if (card->wr_pack_stats.pack_stop_reason[FLUSH_OR_DISCARD])
-		pr_info("%s: %d times: flush or discard\n",
-			mmc_hostname(card->host),
-			card->wr_pack_stats.pack_stop_reason[FLUSH_OR_DISCARD]);
-	if (card->wr_pack_stats.pack_stop_reason[EMPTY_QUEUE])
-		pr_info("%s: %d times: empty queue\n",
-			mmc_hostname(card->host),
-			card->wr_pack_stats.pack_stop_reason[EMPTY_QUEUE]);
-	if (card->wr_pack_stats.pack_stop_reason[REL_WRITE])
-		pr_info("%s: %d times: rel write\n",
-			mmc_hostname(card->host),
-			card->wr_pack_stats.pack_stop_reason[REL_WRITE]);
-	if (card->wr_pack_stats.pack_stop_reason[THRESHOLD])
-		pr_info("%s: %d times: Threshold\n",
-			mmc_hostname(card->host),
-			card->wr_pack_stats.pack_stop_reason[THRESHOLD]);
-
-	spin_unlock(&card->wr_pack_stats.lock);
-}
-EXPORT_SYMBOL(print_mmc_packing_stats);
-
 static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
 {
 	struct request_queue *q = mq->queue;
diff --git a/drivers/mmc/card/mmc_block_test.c b/drivers/mmc/card/mmc_block_test.c
index 2307d7a..c5551b8 100644
--- a/drivers/mmc/card/mmc_block_test.c
+++ b/drivers/mmc/card/mmc_block_test.c
@@ -221,6 +221,63 @@
 
 static struct mmc_block_test_data *mbtd;
 
+void print_mmc_packing_stats(struct mmc_card *card)
+{
+	int i;
+	int max_num_of_packed_reqs = 0;
+
+	if ((!card) || (!card->wr_pack_stats.packing_events))
+		return;
+
+	max_num_of_packed_reqs = card->ext_csd.max_packed_writes;
+
+	spin_lock(&card->wr_pack_stats.lock);
+
+	pr_info("%s: write packing statistics:\n",
+		mmc_hostname(card->host));
+
+	for (i = 1 ; i <= max_num_of_packed_reqs ; ++i) {
+		if (card->wr_pack_stats.packing_events[i] != 0)
+			pr_info("%s: Packed %d reqs - %d times\n",
+				mmc_hostname(card->host), i,
+				card->wr_pack_stats.packing_events[i]);
+	}
+
+	pr_info("%s: stopped packing due to the following reasons:\n",
+		mmc_hostname(card->host));
+
+	if (card->wr_pack_stats.pack_stop_reason[EXCEEDS_SEGMENTS])
+		pr_info("%s: %d times: exceedmax num of segments\n",
+			mmc_hostname(card->host),
+			card->wr_pack_stats.pack_stop_reason[EXCEEDS_SEGMENTS]);
+	if (card->wr_pack_stats.pack_stop_reason[EXCEEDS_SECTORS])
+		pr_info("%s: %d times: exceeding the max num of sectors\n",
+			mmc_hostname(card->host),
+			card->wr_pack_stats.pack_stop_reason[EXCEEDS_SECTORS]);
+	if (card->wr_pack_stats.pack_stop_reason[WRONG_DATA_DIR])
+		pr_info("%s: %d times: wrong data direction\n",
+			mmc_hostname(card->host),
+			card->wr_pack_stats.pack_stop_reason[WRONG_DATA_DIR]);
+	if (card->wr_pack_stats.pack_stop_reason[FLUSH_OR_DISCARD])
+		pr_info("%s: %d times: flush or discard\n",
+			mmc_hostname(card->host),
+			card->wr_pack_stats.pack_stop_reason[FLUSH_OR_DISCARD]);
+	if (card->wr_pack_stats.pack_stop_reason[EMPTY_QUEUE])
+		pr_info("%s: %d times: empty queue\n",
+			mmc_hostname(card->host),
+			card->wr_pack_stats.pack_stop_reason[EMPTY_QUEUE]);
+	if (card->wr_pack_stats.pack_stop_reason[REL_WRITE])
+		pr_info("%s: %d times: rel write\n",
+			mmc_hostname(card->host),
+			card->wr_pack_stats.pack_stop_reason[REL_WRITE]);
+	if (card->wr_pack_stats.pack_stop_reason[THRESHOLD])
+		pr_info("%s: %d times: Threshold\n",
+			mmc_hostname(card->host),
+			card->wr_pack_stats.pack_stop_reason[THRESHOLD]);
+
+	spin_unlock(&card->wr_pack_stats.lock);
+}
+
 /*
  * A callback assigned to the packed_test_fn field.
  * Called from block layer in mmc_blk_packed_hdr_wrq_prep.
@@ -499,11 +556,11 @@
 
 	switch (td->test_info.testcase) {
 	case TEST_STOP_DUE_TO_FLUSH:
-		return "Test stop due to flush";
+		return " stop due to flush";
 	case TEST_STOP_DUE_TO_FLUSH_AFTER_MAX_REQS:
-		return "Test stop due to flush after max-1 reqs";
+		return " stop due to flush after max-1 reqs";
 	case TEST_STOP_DUE_TO_READ:
-		return "Test stop due to read";
+		return " stop due to read";
 	case TEST_STOP_DUE_TO_READ_AFTER_MAX_REQS:
 		return "Test stop due to read after max-1 reqs";
 	case TEST_STOP_DUE_TO_EMPTY_QUEUE:
diff --git a/drivers/power/bq28400_battery.c b/drivers/power/bq28400_battery.c
index 7b82b32..39eac60 100644
--- a/drivers/power/bq28400_battery.c
+++ b/drivers/power/bq28400_battery.c
@@ -757,6 +757,12 @@
 static void bq28400_external_power_changed(struct power_supply *psy)
 {
 	pr_debug("Notify power_supply_changed.\n");
+
+	/* The battery gauge monitors the current and voltage every 1 second.
+	 * Therefore a delay from the time that the charger start/stop charging
+	 * until the battery gauge detects it.
+	 */
+	msleep(1000);
 	/* Update LEDs and notify uevents */
 	power_supply_changed(&bq28400_dev->batt_psy);
 }
diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c
index 91f4964..432abe5 100644
--- a/drivers/staging/android/lowmemorykiller.c
+++ b/drivers/staging/android/lowmemorykiller.c
@@ -62,6 +62,65 @@
 			printk(x);			\
 	} while (0)
 
+static int nr_free_zone_mtype_pages(struct zone *zone, int mtype)
+{
+	int order;
+	int sum = 0;
+
+	for (order = 0; order < MAX_ORDER; ++order) {
+		unsigned long freecount = 0;
+		struct free_area *area;
+		struct list_head *curr;
+
+		area = &(zone->free_area[order]);
+
+		list_for_each(curr, &area->free_list[mtype])
+			freecount++;
+
+		sum += freecount << order;
+	}
+	return sum;
+}
+
+static int nr_free_zone_pages(struct zone *zone, gfp_t gfp_mask)
+{
+	int sum = 0;
+	int mtype = allocflags_to_migratetype(gfp_mask);
+	int i = 0;
+	int *mtype_fallbacks = get_migratetype_fallbacks(mtype);
+
+	sum = nr_free_zone_mtype_pages(zone, mtype);
+
+	/*
+	 * Also count the fallback pages
+	 */
+	for (i = 0;; i++) {
+		int fallbacktype = mtype_fallbacks[i];
+		sum += nr_free_zone_mtype_pages(zone, fallbacktype);
+
+		if (fallbacktype == MIGRATE_RESERVE)
+			break;
+	}
+
+	return sum;
+}
+
+static int nr_free_pages(gfp_t gfp_mask)
+{
+	struct zoneref *z;
+	struct zone *zone;
+	int sum = 0;
+
+	struct zonelist *zonelist = node_zonelist(numa_node_id(), gfp_mask);
+
+	for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
+		sum += nr_free_zone_pages(zone, gfp_mask);
+	}
+
+	return sum;
+}
+
+
 static int test_task_flag(struct task_struct *p, int flag)
 {
 	struct task_struct *t = p;
@@ -93,6 +152,15 @@
 	int other_file = global_page_state(NR_FILE_PAGES) -
 						global_page_state(NR_SHMEM);
 
+	if (sc->nr_to_scan > 0 && other_free > other_file) {
+		/*
+		 * If the number of free pages is going to affect the decision
+		 * of which process is selected then ensure only free pages
+		 * which can satisfy the request are considered.
+		 */
+		other_free = nr_free_pages(sc->gfp_mask);
+	}
+
 	if (lowmem_adj_size < array_size)
 		array_size = lowmem_adj_size;
 	if (lowmem_minfree_size < array_size)
diff --git a/drivers/tty/serial/msm_serial_hs_lite.c b/drivers/tty/serial/msm_serial_hs_lite.c
index f065eaa..2f3f83d 100644
--- a/drivers/tty/serial/msm_serial_hs_lite.c
+++ b/drivers/tty/serial/msm_serial_hs_lite.c
@@ -149,12 +149,9 @@
 
 static int get_line(struct platform_device *pdev)
 {
-	const struct msm_serial_hslite_platform_data *pdata =
-					pdev->dev.platform_data;
-	if (pdata)
-		return pdata->line;
+	struct msm_hsl_port *msm_hsl_port = platform_get_drvdata(pdev);
 
-	return pdev->id;
+	return msm_hsl_port->uart.line;
 }
 
 static int clk_en(struct uart_port *port, int enable)
@@ -1357,18 +1354,32 @@
 	struct resource *uart_resource;
 	struct resource *gsbi_resource;
 	struct uart_port *port;
+	const struct msm_serial_hslite_platform_data *pdata;
 	const struct of_device_id *match;
+	u32 line;
 	int ret;
 
 	if (pdev->id == -1)
 		pdev->id = atomic_inc_return(&msm_serial_hsl_next_id) - 1;
 
-	if (unlikely(get_line(pdev) < 0 || get_line(pdev) >= UART_NR))
+	/* Use line (ttyHSLx) number from pdata or device tree if specified */
+	pdata = pdev->dev.platform_data;
+	if (pdata)
+		line = pdata->line;
+	else
+		line = pdev->id;
+
+	/* Use line number from device tree if present */
+	if (pdev->dev.of_node)
+		of_property_read_u32(pdev->dev.of_node, "cell-index", &line);
+
+	if (unlikely(line < 0 || line >= UART_NR))
 		return -ENXIO;
 
-	printk(KERN_INFO "msm_serial_hsl: detected port #%d\n", pdev->id);
+	printk(KERN_INFO "msm_serial_hsl: detected port #%d (ttyHSL%d)\n",
+	       pdev->id, line);
 
-	port = get_port_from_line(get_line(pdev));
+	port = get_port_from_line(line);
 	port->dev = &pdev->dev;
 	msm_hsl_port = UART_TO_MSM(port);
 
diff --git a/drivers/usb/gadget/f_accessory.c b/drivers/usb/gadget/f_accessory.c
index 42a6c43..5659c79 100644
--- a/drivers/usb/gadget/f_accessory.c
+++ b/drivers/usb/gadget/f_accessory.c
@@ -132,6 +132,41 @@
 	.bInterfaceProtocol     = 0,
 };
 
+static struct usb_endpoint_descriptor acc_superspeed_in_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_IN,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize         = __constant_cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor acc_superspeed_in_comp_desc = {
+	.bLength =		sizeof acc_superspeed_in_comp_desc,
+	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
+
+	/* the following 2 values can be tweaked if necessary */
+	/* .bMaxBurst =		0, */
+	/* .bmAttributes =	0, */
+};
+
+static struct usb_endpoint_descriptor acc_superspeed_out_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_OUT,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize         = __constant_cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor acc_superspeed_out_comp_desc = {
+	.bLength =		sizeof acc_superspeed_out_comp_desc,
+	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
+
+	/* the following 2 values can be tweaked if necessary */
+	/* .bMaxBurst =		0, */
+	/* .bmAttributes =	0, */
+};
+
+
 static struct usb_endpoint_descriptor acc_highspeed_in_desc = {
 	.bLength                = USB_DT_ENDPOINT_SIZE,
 	.bDescriptorType        = USB_DT_ENDPOINT,
@@ -176,6 +211,15 @@
 	NULL,
 };
 
+static struct usb_descriptor_header *ss_acc_descs[] = {
+	(struct usb_descriptor_header *) &acc_interface_desc,
+	(struct usb_descriptor_header *) &acc_superspeed_in_desc,
+	(struct usb_descriptor_header *) &acc_superspeed_in_comp_desc,
+	(struct usb_descriptor_header *) &acc_superspeed_out_desc,
+	(struct usb_descriptor_header *) &acc_superspeed_out_comp_desc,
+	NULL,
+};
+
 static struct usb_string acc_string_defs[] = {
 	[INTERFACE_STRING_INDEX].s	= "Android Accessory Interface",
 	{  },	/* end of list */
@@ -907,6 +951,14 @@
 			acc_fullspeed_out_desc.bEndpointAddress;
 	}
 
+	/* support super speed hardware */
+	if (gadget_is_superspeed(c->cdev->gadget)) {
+		acc_superspeed_in_desc.bEndpointAddress =
+			acc_fullspeed_in_desc.bEndpointAddress;
+		acc_superspeed_out_desc.bEndpointAddress =
+			acc_fullspeed_out_desc.bEndpointAddress;
+	}
+
 	DBG(cdev, "%s speed %s: IN/%s, OUT/%s\n",
 			gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
 			f->name, dev->ep_in->name, dev->ep_out->name);
@@ -1135,6 +1187,8 @@
 	dev->function.strings = acc_strings,
 	dev->function.descriptors = fs_acc_descs;
 	dev->function.hs_descriptors = hs_acc_descs;
+	if (gadget_is_superspeed(c->cdev->gadget))
+		dev->function.ss_descriptors = ss_acc_descs;
 	dev->function.bind = acc_function_bind;
 	dev->function.unbind = acc_function_unbind;
 	dev->function.set_alt = acc_function_set_alt;
diff --git a/drivers/usb/gadget/f_adb.c b/drivers/usb/gadget/f_adb.c
index 14e5b60..68c99a3 100644
--- a/drivers/usb/gadget/f_adb.c
+++ b/drivers/usb/gadget/f_adb.c
@@ -69,6 +69,40 @@
 	.bInterfaceProtocol     = 1,
 };
 
+static struct usb_endpoint_descriptor adb_superspeed_in_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_IN,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize         = __constant_cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor adb_superspeed_in_comp_desc = {
+	.bLength =		sizeof adb_superspeed_in_comp_desc,
+	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
+
+	/* the following 2 values can be tweaked if necessary */
+	/* .bMaxBurst =		0, */
+	/* .bmAttributes =	0, */
+};
+
+static struct usb_endpoint_descriptor adb_superspeed_out_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_OUT,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize         = __constant_cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor adb_superspeed_out_comp_desc = {
+	.bLength =		sizeof adb_superspeed_out_comp_desc,
+	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
+
+	/* the following 2 values can be tweaked if necessary */
+	/* .bMaxBurst =		0, */
+	/* .bmAttributes =	0, */
+};
+
 static struct usb_endpoint_descriptor adb_highspeed_in_desc = {
 	.bLength                = USB_DT_ENDPOINT_SIZE,
 	.bDescriptorType        = USB_DT_ENDPOINT,
@@ -113,6 +147,15 @@
 	NULL,
 };
 
+static struct usb_descriptor_header *ss_adb_descs[] = {
+	(struct usb_descriptor_header *) &adb_interface_desc,
+	(struct usb_descriptor_header *) &adb_superspeed_in_desc,
+	(struct usb_descriptor_header *) &adb_superspeed_in_comp_desc,
+	(struct usb_descriptor_header *) &adb_superspeed_out_desc,
+	(struct usb_descriptor_header *) &adb_superspeed_out_comp_desc,
+	NULL,
+};
+
 static void adb_ready_callback(void);
 static void adb_closed_callback(void);
 
@@ -503,6 +546,13 @@
 		adb_highspeed_out_desc.bEndpointAddress =
 			adb_fullspeed_out_desc.bEndpointAddress;
 	}
+	/* support super speed hardware */
+	if (gadget_is_superspeed(c->cdev->gadget)) {
+		adb_superspeed_in_desc.bEndpointAddress =
+			adb_fullspeed_in_desc.bEndpointAddress;
+		adb_superspeed_out_desc.bEndpointAddress =
+			adb_fullspeed_out_desc.bEndpointAddress;
+	}
 
 	DBG(cdev, "%s speed %s: IN/%s, OUT/%s\n",
 			gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
@@ -605,6 +655,8 @@
 	dev->function.name = "adb";
 	dev->function.descriptors = fs_adb_descs;
 	dev->function.hs_descriptors = hs_adb_descs;
+	if (gadget_is_superspeed(c->cdev->gadget))
+		dev->function.ss_descriptors = ss_adb_descs;
 	dev->function.bind = adb_function_bind;
 	dev->function.unbind = adb_function_unbind;
 	dev->function.set_alt = adb_function_set_alt;
diff --git a/drivers/usb/gadget/f_diag.c b/drivers/usb/gadget/f_diag.c
index 87597d5..8f68234 100644
--- a/drivers/usb/gadget/f_diag.c
+++ b/drivers/usb/gadget/f_diag.c
@@ -73,6 +73,40 @@
 	.bInterval        =	0,
 };
 
+static struct usb_endpoint_descriptor ss_bulk_in_desc = {
+	.bLength          =	USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType  =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes     =	USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize   = __constant_cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor ss_bulk_in_comp_desc = {
+	.bLength =		sizeof ss_bulk_in_comp_desc,
+	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
+
+	/* the following 2 values can be tweaked if necessary */
+	/* .bMaxBurst =		0, */
+	/* .bmAttributes =	0, */
+};
+
+static struct usb_endpoint_descriptor ss_bulk_out_desc = {
+	.bLength          =	USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType  =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes     =	USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize   = __constant_cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor ss_bulk_out_comp_desc = {
+	.bLength =		sizeof ss_bulk_out_comp_desc,
+	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
+
+	/* the following 2 values can be tweaked if necessary */
+	/* .bMaxBurst =		0, */
+	/* .bmAttributes =	0, */
+};
+
 static struct usb_descriptor_header *fs_diag_desc[] = {
 	(struct usb_descriptor_header *) &intf_desc,
 	(struct usb_descriptor_header *) &fs_bulk_in_desc,
@@ -86,6 +120,15 @@
 	NULL,
 };
 
+static struct usb_descriptor_header *ss_diag_desc[] = {
+	(struct usb_descriptor_header *) &intf_desc,
+	(struct usb_descriptor_header *) &ss_bulk_in_desc,
+	(struct usb_descriptor_header *) &ss_bulk_in_comp_desc,
+	(struct usb_descriptor_header *) &ss_bulk_out_desc,
+	(struct usb_descriptor_header *) &ss_bulk_out_comp_desc,
+	NULL,
+};
+
 /**
  * struct diag_context - USB diag function driver private structure
  * @function: function structure for USB interface
@@ -551,6 +594,8 @@
 {
 	struct diag_context *ctxt = func_to_diag(f);
 
+	if (gadget_is_superspeed(c->cdev->gadget))
+		usb_free_descriptors(f->ss_descriptors);
 	if (gadget_is_dualspeed(c->cdev->gadget))
 		usb_free_descriptors(f->hs_descriptors);
 
@@ -580,6 +625,7 @@
 	ctxt->out = ep;
 	ep->driver_data = ctxt;
 
+	status = -ENOMEM;
 	/* copy descriptors, and track endpoint copies */
 	f->descriptors = usb_copy_descriptors(fs_diag_desc);
 	if (!f->descriptors)
@@ -593,9 +639,29 @@
 
 		/* copy descriptors, and track endpoint copies */
 		f->hs_descriptors = usb_copy_descriptors(hs_diag_desc);
+		if (!f->hs_descriptors)
+			goto fail;
+	}
+
+	if (gadget_is_superspeed(c->cdev->gadget)) {
+		ss_bulk_in_desc.bEndpointAddress =
+				fs_bulk_in_desc.bEndpointAddress;
+		ss_bulk_out_desc.bEndpointAddress =
+				fs_bulk_out_desc.bEndpointAddress;
+
+		/* copy descriptors, and track endpoint copies */
+		f->ss_descriptors = usb_copy_descriptors(ss_diag_desc);
+		if (!f->ss_descriptors)
+			goto fail;
 	}
 	return 0;
 fail:
+	if (f->ss_descriptors)
+		usb_free_descriptors(f->ss_descriptors);
+	if (f->hs_descriptors)
+		usb_free_descriptors(f->hs_descriptors);
+	if (f->descriptors)
+		usb_free_descriptors(f->descriptors);
 	if (ctxt->out)
 		ctxt->out->driver_data = NULL;
 	if (ctxt->in)
diff --git a/drivers/usb/gadget/f_mtp.c b/drivers/usb/gadget/f_mtp.c
index ccbc330..82ffbba 100644
--- a/drivers/usb/gadget/f_mtp.c
+++ b/drivers/usb/gadget/f_mtp.c
@@ -129,6 +129,40 @@
 	.bInterfaceProtocol     = 1,
 };
 
+static struct usb_endpoint_descriptor mtp_superspeed_in_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_IN,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize         = __constant_cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor mtp_superspeed_in_comp_desc = {
+	.bLength =		sizeof mtp_superspeed_in_comp_desc,
+	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
+
+	/* the following 2 values can be tweaked if necessary */
+	/* .bMaxBurst =		0, */
+	/* .bmAttributes =	0, */
+};
+
+static struct usb_endpoint_descriptor mtp_superspeed_out_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_OUT,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize         = __constant_cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor mtp_superspeed_out_comp_desc = {
+	.bLength =		sizeof mtp_superspeed_out_comp_desc,
+	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
+
+	/* the following 2 values can be tweaked if necessary */
+	/* .bMaxBurst =		0, */
+	/* .bmAttributes =	0, */
+};
+
 static struct usb_endpoint_descriptor mtp_highspeed_in_desc = {
 	.bLength                = USB_DT_ENDPOINT_SIZE,
 	.bDescriptorType        = USB_DT_ENDPOINT,
@@ -168,6 +202,16 @@
 	.bInterval              = 6,
 };
 
+static struct usb_ss_ep_comp_descriptor mtp_superspeed_intr_comp_desc = {
+	.bLength =		sizeof mtp_superspeed_intr_comp_desc,
+	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
+
+	/* the following 3 values can be tweaked if necessary */
+	/* .bMaxBurst =		0, */
+	/* .bmAttributes =	0, */
+	.wBytesPerInterval =	cpu_to_le16(INTR_BUFFER_SIZE),
+};
+
 static struct usb_descriptor_header *fs_mtp_descs[] = {
 	(struct usb_descriptor_header *) &mtp_interface_desc,
 	(struct usb_descriptor_header *) &mtp_fullspeed_in_desc,
@@ -184,6 +228,17 @@
 	NULL,
 };
 
+static struct usb_descriptor_header *ss_mtp_descs[] = {
+	(struct usb_descriptor_header *) &mtp_interface_desc,
+	(struct usb_descriptor_header *) &mtp_superspeed_in_desc,
+	(struct usb_descriptor_header *) &mtp_superspeed_in_comp_desc,
+	(struct usb_descriptor_header *) &mtp_superspeed_out_desc,
+	(struct usb_descriptor_header *) &mtp_superspeed_out_comp_desc,
+	(struct usb_descriptor_header *) &mtp_intr_desc,
+	(struct usb_descriptor_header *) &mtp_superspeed_intr_comp_desc,
+	NULL,
+};
+
 static struct usb_descriptor_header *fs_ptp_descs[] = {
 	(struct usb_descriptor_header *) &ptp_interface_desc,
 	(struct usb_descriptor_header *) &mtp_fullspeed_in_desc,
@@ -200,6 +255,17 @@
 	NULL,
 };
 
+static struct usb_descriptor_header *ss_ptp_descs[] = {
+	(struct usb_descriptor_header *) &ptp_interface_desc,
+	(struct usb_descriptor_header *) &mtp_superspeed_in_desc,
+	(struct usb_descriptor_header *) &mtp_superspeed_in_comp_desc,
+	(struct usb_descriptor_header *) &mtp_superspeed_out_desc,
+	(struct usb_descriptor_header *) &mtp_superspeed_out_comp_desc,
+	(struct usb_descriptor_header *) &mtp_intr_desc,
+	(struct usb_descriptor_header *) &mtp_superspeed_intr_comp_desc,
+	NULL,
+};
+
 static struct usb_string mtp_string_defs[] = {
 	/* Naming interface "MTP" so libmtp will recognize us */
 	[INTERFACE_STRING_INDEX].s	= "MTP",
@@ -1126,6 +1192,14 @@
 			mtp_fullspeed_out_desc.bEndpointAddress;
 	}
 
+	/* support super speed hardware */
+	if (gadget_is_superspeed(c->cdev->gadget)) {
+		mtp_superspeed_in_desc.bEndpointAddress =
+			mtp_fullspeed_in_desc.bEndpointAddress;
+		mtp_superspeed_out_desc.bEndpointAddress =
+			mtp_fullspeed_out_desc.bEndpointAddress;
+	}
+
 	DBG(cdev, "%s speed %s: IN/%s, OUT/%s\n",
 			gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
 			f->name, dev->ep_in->name, dev->ep_out->name);
@@ -1239,9 +1313,13 @@
 	if (ptp_config) {
 		dev->function.descriptors = fs_ptp_descs;
 		dev->function.hs_descriptors = hs_ptp_descs;
+		if (gadget_is_superspeed(c->cdev->gadget))
+			dev->function.ss_descriptors = ss_ptp_descs;
 	} else {
 		dev->function.descriptors = fs_mtp_descs;
 		dev->function.hs_descriptors = hs_mtp_descs;
+		if (gadget_is_superspeed(c->cdev->gadget))
+			dev->function.ss_descriptors = ss_mtp_descs;
 	}
 	dev->function.bind = mtp_function_bind;
 	dev->function.unbind = mtp_function_unbind;
diff --git a/drivers/usb/gadget/f_rmnet.c b/drivers/usb/gadget/f_rmnet.c
index aa9daf3..4357e0d 100644
--- a/drivers/usb/gadget/f_rmnet.c
+++ b/drivers/usb/gadget/f_rmnet.c
@@ -147,6 +147,71 @@
 	NULL,
 };
 
+/* Super speed support */
+static struct usb_endpoint_descriptor rmnet_ss_notify_desc  = {
+	.bLength =		sizeof rmnet_ss_notify_desc,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	__constant_cpu_to_le16(RMNET_MAX_NOTIFY_SIZE),
+	.bInterval =		RMNET_NOTIFY_INTERVAL + 4,
+};
+
+static struct usb_ss_ep_comp_descriptor rmnet_ss_notify_comp_desc = {
+	.bLength =		sizeof rmnet_ss_notify_comp_desc,
+	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
+
+	/* the following 3 values can be tweaked if necessary */
+	/* .bMaxBurst =		0, */
+	/* .bmAttributes =	0, */
+	.wBytesPerInterval =	cpu_to_le16(RMNET_MAX_NOTIFY_SIZE),
+};
+
+static struct usb_endpoint_descriptor rmnet_ss_in_desc = {
+	.bLength =		sizeof rmnet_ss_in_desc,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	__constant_cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor rmnet_ss_in_comp_desc = {
+	.bLength =		sizeof rmnet_ss_in_comp_desc,
+	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
+
+	/* the following 2 values can be tweaked if necessary */
+	/* .bMaxBurst =		0, */
+	/* .bmAttributes =	0, */
+};
+
+static struct usb_endpoint_descriptor rmnet_ss_out_desc = {
+	.bLength =		sizeof rmnet_ss_out_desc,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	__constant_cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor rmnet_ss_out_comp_desc = {
+	.bLength =		sizeof rmnet_ss_out_comp_desc,
+	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
+
+	/* the following 2 values can be tweaked if necessary */
+	/* .bMaxBurst =		0, */
+	/* .bmAttributes =	0, */
+};
+
+static struct usb_descriptor_header *rmnet_ss_function[] = {
+	(struct usb_descriptor_header *) &rmnet_interface_desc,
+	(struct usb_descriptor_header *) &rmnet_ss_notify_desc,
+	(struct usb_descriptor_header *) &rmnet_ss_notify_comp_desc,
+	(struct usb_descriptor_header *) &rmnet_ss_in_desc,
+	(struct usb_descriptor_header *) &rmnet_ss_in_comp_desc,
+	(struct usb_descriptor_header *) &rmnet_ss_out_desc,
+	(struct usb_descriptor_header *) &rmnet_ss_out_comp_desc,
+	NULL,
+};
+
 /* String descriptors */
 
 static struct usb_string rmnet_string_defs[] = {
@@ -460,6 +525,8 @@
 
 	pr_debug("%s: portno:%d\n", __func__, dev->port_num);
 
+	if (gadget_is_superspeed(c->cdev->gadget))
+		usb_free_descriptors(f->ss_descriptors);
 	if (gadget_is_dualspeed(c->cdev->gadget))
 		usb_free_descriptors(f->hs_descriptors);
 	usb_free_descriptors(f->descriptors);
@@ -964,6 +1031,7 @@
 	dev->notify_req->complete = frmnet_notify_complete;
 	dev->notify_req->context = dev;
 
+	ret = -ENOMEM;
 	f->descriptors = usb_copy_descriptors(rmnet_fs_function);
 
 	if (!f->descriptors)
@@ -984,6 +1052,21 @@
 			goto fail;
 	}
 
+	if (gadget_is_superspeed(cdev->gadget)) {
+		rmnet_ss_in_desc.bEndpointAddress =
+				rmnet_fs_in_desc.bEndpointAddress;
+		rmnet_ss_out_desc.bEndpointAddress =
+				rmnet_fs_out_desc.bEndpointAddress;
+		rmnet_ss_notify_desc.bEndpointAddress =
+				rmnet_fs_notify_desc.bEndpointAddress;
+
+		/* copy descriptors, and track endpoint copies */
+		f->ss_descriptors = usb_copy_descriptors(rmnet_ss_function);
+
+		if (!f->ss_descriptors)
+			goto fail;
+	}
+
 	pr_info("%s: RmNet(%d) %s Speed, IN:%s OUT:%s\n",
 			__func__, dev->port_num,
 			gadget_is_dualspeed(cdev->gadget) ? "dual" : "full",
@@ -992,8 +1075,14 @@
 	return 0;
 
 fail:
+	if (f->ss_descriptors)
+		usb_free_descriptors(f->ss_descriptors);
+	if (f->hs_descriptors)
+		usb_free_descriptors(f->hs_descriptors);
 	if (f->descriptors)
 		usb_free_descriptors(f->descriptors);
+	if (dev->notify_req)
+		frmnet_free_req(dev->notify, dev->notify_req);
 ep_notify_alloc_fail:
 	dev->notify->driver_data = NULL;
 	dev->notify = NULL;
diff --git a/drivers/usb/gadget/f_serial.c b/drivers/usb/gadget/f_serial.c
index 649fe14..43347b3 100644
--- a/drivers/usb/gadget/f_serial.c
+++ b/drivers/usb/gadget/f_serial.c
@@ -244,8 +244,37 @@
 	.bDescriptorType =      USB_DT_SS_ENDPOINT_COMP,
 };
 
+#ifdef CONFIG_MODEM_SUPPORT
+static struct usb_endpoint_descriptor gser_ss_notify_desc  = {
+	.bLength =		sizeof gser_ss_notify_desc,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	__constant_cpu_to_le16(GS_NOTIFY_MAXPACKET),
+	.bInterval =		GS_LOG2_NOTIFY_INTERVAL+4,
+};
+
+static struct usb_ss_ep_comp_descriptor gser_ss_notify_comp_desc = {
+	.bLength =		sizeof gser_ss_notify_comp_desc,
+	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
+
+	/* the following 2 values can be tweaked if necessary */
+	/* .bMaxBurst =		0, */
+	/* .bmAttributes =	0, */
+	.wBytesPerInterval =	cpu_to_le16(GS_NOTIFY_MAXPACKET),
+};
+#endif
+
 static struct usb_descriptor_header *gser_ss_function[] = {
 	(struct usb_descriptor_header *) &gser_interface_desc,
+#ifdef CONFIG_MODEM_SUPPORT
+	(struct usb_descriptor_header *) &gser_header_desc,
+	(struct usb_descriptor_header *) &gser_call_mgmt_descriptor,
+	(struct usb_descriptor_header *) &gser_descriptor,
+	(struct usb_descriptor_header *) &gser_union_desc,
+	(struct usb_descriptor_header *) &gser_ss_notify_desc,
+	(struct usb_descriptor_header *) &gser_ss_notify_comp_desc,
+#endif
 	(struct usb_descriptor_header *) &gser_ss_in_desc,
 	(struct usb_descriptor_header *) &gser_ss_bulk_comp_desc,
 	(struct usb_descriptor_header *) &gser_ss_out_desc,
@@ -304,7 +333,6 @@
 		ret = ghsic_ctrl_setup(no_hsic_sports, USB_GADGET_SERIAL);
 		if (ret < 0)
 			return ret;
-		return 0;
 	}
 	if (no_hsuart_sports) {
 		port_idx = ghsuart_data_setup(no_hsuart_sports,
@@ -319,8 +347,6 @@
 				port_idx++;
 			}
 		}
-
-		return 0;
 	}
 	return ret;
 }
@@ -824,6 +850,10 @@
 			gser_fs_in_desc.bEndpointAddress;
 		gser_ss_out_desc.bEndpointAddress =
 			gser_fs_out_desc.bEndpointAddress;
+#ifdef CONFIG_MODEM_SUPPORT
+		gser_ss_notify_desc.bEndpointAddress =
+				gser_fs_notify_desc.bEndpointAddress;
+#endif
 
 		/* copy descriptors, and track endpoint copies */
 		f->ss_descriptors = usb_copy_descriptors(gser_ss_function);
@@ -839,6 +869,10 @@
 	return 0;
 
 fail:
+	if (f->ss_descriptors)
+		usb_free_descriptors(f->ss_descriptors);
+	if (f->hs_descriptors)
+		usb_free_descriptors(f->hs_descriptors);
 	if (f->descriptors)
 		usb_free_descriptors(f->descriptors);
 #ifdef CONFIG_MODEM_SUPPORT
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_utils.c b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_utils.c
index 9d5368b..4b5fbf5 100644
--- a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_utils.c
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_utils.c
@@ -70,7 +70,7 @@
 		alloc_size = (alloc_size+4095) & ~4095;
 		addr->alloc_handle = ion_alloc(
 		ddl_context->video_ion_client, alloc_size, SZ_4K,
-			res_trk_get_mem_type(), 0);
+			res_trk_get_mem_type(), res_trk_get_ion_flags());
 		if (IS_ERR_OR_NULL(addr->alloc_handle)) {
 			DDL_MSG_ERROR("%s() :DDL ION alloc failed\n",
 						 __func__);
diff --git a/drivers/video/msm/vidc/1080p/resource_tracker/vcd_res_tracker.c b/drivers/video/msm/vidc/1080p/resource_tracker/vcd_res_tracker.c
index 75856b2a..d9cadef 100644
--- a/drivers/video/msm/vidc/1080p/resource_tracker/vcd_res_tracker.c
+++ b/drivers/video/msm/vidc/1080p/resource_tracker/vcd_res_tracker.c
@@ -209,7 +209,8 @@
 			addr->alloc_handle = ion_alloc(
 					ddl_context->video_ion_client,
 					 alloc_size, SZ_4K,
-					res_trk_get_mem_type(), 0);
+					res_trk_get_mem_type(),
+					res_trk_get_ion_flags());
 			if (IS_ERR_OR_NULL(addr->alloc_handle)) {
 				DDL_MSG_ERROR("%s() :DDL ION alloc failed\n",
 						__func__);
@@ -830,17 +831,31 @@
 	if (resource_context.vidc_platform_data->enable_ion) {
 		if (res_trk_check_for_sec_session()) {
 			mem_type = ION_HEAP(mem_type);
-	if (resource_context.res_mem_type != DDL_FW_MEM)
-		mem_type |= ION_SECURE;
-	else if (res_trk_is_cp_enabled())
-		mem_type |= ION_SECURE;
 	} else
 		mem_type = (ION_HEAP(mem_type) |
 			ION_HEAP(ION_IOMMU_HEAP_ID));
 	}
+
 	return mem_type;
 }
 
+unsigned int res_trk_get_ion_flags(void)
+{
+	unsigned int flags = 0;
+	if (resource_context.res_mem_type == DDL_FW_MEM)
+		return flags;
+
+	if (resource_context.vidc_platform_data->enable_ion) {
+		if (res_trk_check_for_sec_session()) {
+			if (resource_context.res_mem_type != DDL_FW_MEM)
+				flags |= ION_SECURE;
+			else if (res_trk_is_cp_enabled())
+				flags |= ION_SECURE;
+		}
+	}
+	return flags;
+}
+
 u32 res_trk_is_cp_enabled(void)
 {
 	if (resource_context.vidc_platform_data->cp_enabled)
diff --git a/drivers/video/msm/vidc/1080p/resource_tracker/vcd_res_tracker_api.h b/drivers/video/msm/vidc/1080p/resource_tracker/vcd_res_tracker_api.h
index 2ae2512..ee876f4 100644
--- a/drivers/video/msm/vidc/1080p/resource_tracker/vcd_res_tracker_api.h
+++ b/drivers/video/msm/vidc/1080p/resource_tracker/vcd_res_tracker_api.h
@@ -30,6 +30,7 @@
 u32 res_trk_get_core_type(void);
 u32 res_trk_get_firmware_addr(struct ddl_buf_addr *firm_addr);
 int res_trk_get_mem_type(void);
+unsigned int res_trk_get_ion_flags(void);
 u32 res_trk_get_enable_ion(void);
 u32 res_trk_is_cp_enabled(void);
 u32 res_trk_get_disable_fullhd(void);
diff --git a/drivers/video/msm/vidc/720p/resource_tracker/vcd_res_tracker.c b/drivers/video/msm/vidc/720p/resource_tracker/vcd_res_tracker.c
index d8b8f18..c83faa6 100644
--- a/drivers/video/msm/vidc/720p/resource_tracker/vcd_res_tracker.c
+++ b/drivers/video/msm/vidc/720p/resource_tracker/vcd_res_tracker.c
@@ -764,6 +764,11 @@
 	return 0;
 }
 
+u32 res_trk_get_ion_flags(void)
+{
+	return 0;
+}
+
 int res_trk_check_for_sec_session()
 {
 	return 0;
diff --git a/drivers/video/msm/vidc/720p/resource_tracker/vcd_res_tracker_api.h b/drivers/video/msm/vidc/720p/resource_tracker/vcd_res_tracker_api.h
index 75fdb3e..a20d9f2 100644
--- a/drivers/video/msm/vidc/720p/resource_tracker/vcd_res_tracker_api.h
+++ b/drivers/video/msm/vidc/720p/resource_tracker/vcd_res_tracker_api.h
@@ -29,6 +29,7 @@
 u32 res_trk_get_core_type(void);
 u32 res_trk_get_mem_type(void);
 u32 res_trk_get_disable_fullhd(void);
+u32 res_trk_get_ion_flags(void);
 u32 res_trk_get_enable_ion(void);
 u32 res_trk_is_cp_enabled(void);
 struct ion_client *res_trk_get_ion_client(void);
diff --git a/drivers/video/msm/vidc/common/vcd/vcd_sub.c b/drivers/video/msm/vidc/common/vcd/vcd_sub.c
index 8d28fd5..a9709fb 100644
--- a/drivers/video/msm/vidc/common/vcd/vcd_sub.c
+++ b/drivers/video/msm/vidc/common/vcd/vcd_sub.c
@@ -92,7 +92,7 @@
 	} else {
 		map_buffer->alloc_handle = ion_alloc(
 			    cctxt->vcd_ion_client, sz, SZ_4K,
-			    memtype, 0);
+			    memtype, res_trk_get_ion_flags());
 		if (IS_ERR_OR_NULL(map_buffer->alloc_handle)) {
 			pr_err("%s() ION alloc failed", __func__);
 			goto bailout;
diff --git a/include/linux/ion.h b/include/linux/ion.h
index 85e5002..73fb995 100644
--- a/include/linux/ion.h
+++ b/include/linux/ion.h
@@ -418,7 +418,7 @@
 }
 
 static inline void *ion_map_kernel(struct ion_client *client,
-	struct ion_handle *handle, unsigned long flags)
+	struct ion_handle *handle)
 {
 	return ERR_PTR(-ENODEV);
 }
@@ -453,6 +453,12 @@
 	return -ENODEV;
 }
 
+static inline int ion_handle_get_size(struct ion_client *client,
+				struct ion_handle *handle, unsigned long *size)
+{
+	return -ENODEV;
+}
+
 static inline void ion_unmap_iommu(struct ion_client *client,
 			struct ion_handle *handle, int domain_num,
 			int partition_num)
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index f8a3a10..08f74e6 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -61,6 +61,14 @@
 	MIGRATE_TYPES
 };
 
+/*
+ * Returns a list which contains the migrate types on to which
+ * an allocation falls back when the free list for the migrate
+ * type mtype is depleted.
+ * The end of the list is delimited by the type MIGRATE_RESERVE.
+ */
+extern int *get_migratetype_fallbacks(int mtype);
+
 #ifdef CONFIG_CMA
 #  define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
 #  define cma_wmark_pages(zone)	zone->min_cma_pages
diff --git a/include/media/vcap_fmt.h b/include/media/vcap_fmt.h
index 3b1bd7c2..13b3f05 100644
--- a/include/media/vcap_fmt.h
+++ b/include/media/vcap_fmt.h
@@ -24,12 +24,13 @@
 #define VCAP_VC_NPL_OFLOW_ERR_EVENT 4
 #define VCAP_VC_LBUF_OFLOW_ERR_EVENT 5
 #define VCAP_VC_BUF_OVERWRITE_EVENT 6
-#define VCAP_VP_REG_R_ERR_EVENT 7
-#define VCAP_VP_REG_W_ERR_EVENT 8
-#define VCAP_VP_IN_HEIGHT_ERR_EVENT 9
-#define VCAP_VP_IN_WIDTH_ERR_EVENT 10
-#define VCAP_VC_UNEXPECT_BUF_DONE 11
-#define VCAP_MAX_NOTIFY_EVENT 12
+#define VCAP_VC_VSYNC_SEQ_ERR 7
+#define VCAP_VP_REG_R_ERR_EVENT 8
+#define VCAP_VP_REG_W_ERR_EVENT 9
+#define VCAP_VP_IN_HEIGHT_ERR_EVENT 10
+#define VCAP_VP_IN_WIDTH_ERR_EVENT 11
+#define VCAP_VC_UNEXPECT_BUF_DONE 12
+#define VCAP_MAX_NOTIFY_EVENT 13
 
 enum hal_vcap_mode {
 	HAL_VCAP_MODE_PRO = 0,
diff --git a/include/media/vcap_v4l2.h b/include/media/vcap_v4l2.h
index 3db949c..55d67bf 100644
--- a/include/media/vcap_v4l2.h
+++ b/include/media/vcap_v4l2.h
@@ -109,7 +109,7 @@
 	uint8_t					tot_buf;
 	uint8_t					buf_num;
 
-	bool					top_field;
+	bool					field1;
 	bool					field_dropped;
 
 	struct timeval			vc_ts;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 831509c..c3142e8 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -913,6 +913,11 @@
 	[MIGRATE_ISOLATE]     = { MIGRATE_RESERVE }, /* Never used */
 };
 
+int *get_migratetype_fallbacks(int mtype)
+{
+	return fallbacks[mtype];
+}
+
 /*
  * Move the free pages in a range to the free lists of the requested type.
  * Note that start_page and end_pages are not aligned on a pageblock
diff --git a/sound/soc/msm/qdsp6v2/q6voice.c b/sound/soc/msm/qdsp6v2/q6voice.c
index 4e41c80..338cfe3 100644
--- a/sound/soc/msm/qdsp6v2/q6voice.c
+++ b/sound/soc/msm/qdsp6v2/q6voice.c
@@ -83,6 +83,10 @@
 static int32_t qdsp_cvs_callback(struct apr_client_data *data, void *priv);
 static int32_t qdsp_cvp_callback(struct apr_client_data *data, void *priv);
 
+static int voice_send_set_widevoice_enable_cmd(struct voice_data *v);
+static int voice_send_set_pp_enable_cmd(struct voice_data *v,
+					uint32_t module_id, int enable);
+
 static u16 voice_get_mvm_handle(struct voice_data *v)
 {
 	if (v == NULL) {
@@ -764,6 +768,114 @@
 	return -EINVAL;
 }
 
+static int voice_send_set_widevoice_enable_cmd(struct voice_data *v)
+{
+	struct mvm_set_widevoice_enable_cmd mvm_set_wv_cmd;
+	int ret = 0;
+	void *apr_mvm;
+	u16 mvm_handle;
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		return -EINVAL;
+	}
+	apr_mvm = common.apr_q6_mvm;
+
+	if (!apr_mvm) {
+		pr_err("%s: apr_mvm is NULL.\n", __func__);
+		return -EINVAL;
+	}
+	mvm_handle = voice_get_mvm_handle(v);
+
+	mvm_set_wv_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+						     APR_HDR_LEN(APR_HDR_SIZE),
+						     APR_PKT_VER);
+	mvm_set_wv_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+						   sizeof(mvm_set_wv_cmd) -
+						   APR_HDR_SIZE);
+	mvm_set_wv_cmd.hdr.src_port = v->session_id;
+	mvm_set_wv_cmd.hdr.dest_port = mvm_handle;
+	mvm_set_wv_cmd.hdr.token = 0;
+	mvm_set_wv_cmd.hdr.opcode = VSS_IWIDEVOICE_CMD_SET_WIDEVOICE;
+
+	mvm_set_wv_cmd.vss_set_wv.enable = v->wv_enable;
+
+	v->mvm_state = CMD_STATUS_FAIL;
+	ret = apr_send_pkt(apr_mvm, (uint32_t *) &mvm_set_wv_cmd);
+	if (ret < 0) {
+		pr_err("Fail: sending mvm set widevoice enable,\n");
+		goto fail;
+	}
+	ret = wait_event_timeout(v->mvm_wait,
+				 (v->mvm_state == CMD_STATUS_SUCCESS),
+				 msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n", __func__);
+		goto fail;
+	}
+	return 0;
+fail:
+	return -EINVAL;
+}
+
+static int voice_send_set_pp_enable_cmd(struct voice_data *v,
+					uint32_t module_id, int enable)
+{
+	struct cvs_set_pp_enable_cmd cvs_set_pp_cmd;
+	int ret = 0;
+	void *apr_cvs;
+	u16 cvs_handle;
+
+	if (v == NULL) {
+		pr_err("%s: v is NULL\n", __func__);
+		return -EINVAL;
+	}
+	apr_cvs = common.apr_q6_cvs;
+
+	if (!apr_cvs) {
+		pr_err("%s: apr_cvs is NULL.\n", __func__);
+		return -EINVAL;
+	}
+	cvs_handle = voice_get_cvs_handle(v);
+
+	cvs_set_pp_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+						     APR_HDR_LEN(APR_HDR_SIZE),
+						     APR_PKT_VER);
+	cvs_set_pp_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+						   sizeof(cvs_set_pp_cmd) -
+						   APR_HDR_SIZE);
+	cvs_set_pp_cmd.hdr.src_port = v->session_id;
+	cvs_set_pp_cmd.hdr.dest_port = cvs_handle;
+	cvs_set_pp_cmd.hdr.token = 0;
+	cvs_set_pp_cmd.hdr.opcode = VSS_ICOMMON_CMD_SET_UI_PROPERTY;
+
+	cvs_set_pp_cmd.vss_set_pp.module_id = module_id;
+	cvs_set_pp_cmd.vss_set_pp.param_id = VOICE_PARAM_MOD_ENABLE;
+	cvs_set_pp_cmd.vss_set_pp.param_size = MOD_ENABLE_PARAM_LEN;
+	cvs_set_pp_cmd.vss_set_pp.reserved = 0;
+	cvs_set_pp_cmd.vss_set_pp.enable = enable;
+	cvs_set_pp_cmd.vss_set_pp.reserved_field = 0;
+	pr_debug("voice_send_set_pp_enable_cmd, module_id=%d, enable=%d\n",
+		module_id, enable);
+
+	v->cvs_state = CMD_STATUS_FAIL;
+	ret = apr_send_pkt(apr_cvs, (uint32_t *) &cvs_set_pp_cmd);
+	if (ret < 0) {
+		pr_err("Fail: sending cvs set pp enable,\n");
+		goto fail;
+	}
+	ret = wait_event_timeout(v->cvs_wait,
+				 (v->cvs_state == CMD_STATUS_SUCCESS),
+				 msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout\n", __func__);
+		goto fail;
+	}
+	return 0;
+fail:
+	return -EINVAL;
+}
+
 static int voice_set_dtx(struct voice_data *v)
 {
 	int ret = 0;
@@ -2042,6 +2154,20 @@
 		voice_send_netid_timing_cmd(v);
 	}
 
+	/* enable widevoice if wv_enable is set */
+	if (v->wv_enable)
+		voice_send_set_widevoice_enable_cmd(v);
+
+	/* enable slowtalk if st_enable is set */
+	if (v->st_enable)
+		voice_send_set_pp_enable_cmd(v,
+					     MODULE_ID_VOICE_MODULE_ST,
+					     v->st_enable);
+
+	voice_send_set_pp_enable_cmd(v,
+				     MODULE_ID_VOICE_MODULE_FENS,
+				     v->fens_enable);
+
 	/* Start in-call music delivery if this feature is enabled */
 	if (v->music_info.play_enable)
 		voice_cvs_start_playback(v);
@@ -3207,6 +3333,22 @@
 	/* Send tty mode if tty device is used */
 	voice_send_tty_mode_cmd(v);
 
+	/* enable widevoice if wv_enable is set */
+	if (v->wv_enable)
+		voice_send_set_widevoice_enable_cmd(v);
+
+	/* enable slowtalk */
+	if (v->st_enable)
+		voice_send_set_pp_enable_cmd(v,
+					     MODULE_ID_VOICE_MODULE_ST,
+					     v->st_enable);
+
+	/* enable FENS */
+	if (v->fens_enable)
+		voice_send_set_pp_enable_cmd(v,
+					     MODULE_ID_VOICE_MODULE_FENS,
+					     v->fens_enable);
+
 	v->voc_state = VOC_RUN;
 	}
 
@@ -3370,7 +3512,8 @@
 	v->wv_enable = wv_enable;
 
 	mvm_handle = voice_get_mvm_handle(v);
-
+	if (mvm_handle != 0)
+		voice_send_set_widevoice_enable_cmd(v);
 
 	mutex_unlock(&v->lock);
 
@@ -3414,6 +3557,17 @@
 	else if (module_id == MODULE_ID_VOICE_MODULE_FENS)
 		v->fens_enable = enable;
 
+	if (v->voc_state == VOC_RUN) {
+		if (module_id == MODULE_ID_VOICE_MODULE_ST)
+			ret = voice_send_set_pp_enable_cmd(v,
+						MODULE_ID_VOICE_MODULE_ST,
+						enable);
+		else if (module_id == MODULE_ID_VOICE_MODULE_FENS)
+			ret = voice_send_set_pp_enable_cmd(v,
+						MODULE_ID_VOICE_MODULE_FENS,
+						enable);
+	}
+
 	mutex_unlock(&v->lock);
 
 	return ret;