Merge "bluetooth: Add support for LE conn param" into msm-3.4
diff --git a/Documentation/devicetree/bindings/cache/msm_cache_erp.txt b/Documentation/devicetree/bindings/cache/msm_cache_erp.txt
new file mode 100644
index 0000000..400b299
--- /dev/null
+++ b/Documentation/devicetree/bindings/cache/msm_cache_erp.txt
@@ -0,0 +1,16 @@
+* Qualcomm Krait L1 / L2 cache error reporting driver
+
+Required properties:
+- compatible: Should be "qcom,cache_erp"
+- interrupts: Should contain the L1/CPU error interrupt number and
+  the L2 cache interrupt number
+- interrupt-names: Should contain the interrupt names "l1_irq" and
+  "l2_irq"
+
+Example:
+	qcom,cache_erp {
+		compatible = "qcom,cache_erp";
+		interrupts = <1 9 0>, <0 2 0>;
+		interrupt-names = "l1_irq", "l2_irq";
+	};
+
diff --git a/arch/arm/boot/dts/msm8974.dtsi b/arch/arm/boot/dts/msm8974.dtsi
index e83730f..edb86be 100644
--- a/arch/arm/boot/dts/msm8974.dtsi
+++ b/arch/arm/boot/dts/msm8974.dtsi
@@ -479,7 +479,7 @@
 
 		partition@E0000 {
 			reg = <0x120000 0x20000>;
-			qcom,ocmem-part-name = "blast";
+			qcom,ocmem-part-name = "other_os";
 			qcom,ocmem-part-min = <0x20000>;
 		};
 
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c
index 88c4862..bbd6c63 100644
--- a/arch/arm/common/gic.c
+++ b/arch/arm/common/gic.c
@@ -47,6 +47,8 @@
 #include <asm/hardware/gic.h>
 #include <asm/system.h>
 
+#include <mach/socinfo.h>
+
 union gic_base {
 	void __iomem *common_base;
 	void __percpu __iomem **percpu_base;
@@ -56,6 +58,7 @@
 	unsigned int irq_offset;
 	union gic_base dist_base;
 	union gic_base cpu_base;
+	bool need_access_lock;
 #ifdef CONFIG_CPU_PM
 	u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)];
 	u32 saved_spi_conf[DIV_ROUND_UP(1020, 16)];
@@ -207,14 +210,8 @@
 {
 	unsigned int i;
 	void __iomem *base = gic_data_dist_base(gic);
-#ifdef CONFIG_ARCH_MSM8625
-	unsigned long flags;
-#endif
 
 	for (i = 0; i * 32 < gic->max_irq; i++) {
-#ifdef CONFIG_ARCH_MSM8625
-		raw_spin_lock_irqsave(&irq_controller_lock, flags);
-#endif
 		gic->enabled_irqs[i]
 			= readl_relaxed(base + GIC_DIST_ENABLE_SET + i * 4);
 		/* disable all of them */
@@ -222,9 +219,6 @@
 		/* enable the wakeup set */
 		writel_relaxed(gic->wakeup_irqs[i],
 			base + GIC_DIST_ENABLE_SET + i * 4);
-#ifdef CONFIG_ARCH_MSM8625
-		raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
-#endif
 	}
 	mb();
 	return 0;
@@ -305,18 +299,19 @@
 
 static void gic_eoi_irq(struct irq_data *d)
 {
+	struct gic_chip_data *gic = irq_data_get_irq_chip_data(d);
+
 	if (gic_arch_extn.irq_eoi) {
 		raw_spin_lock(&irq_controller_lock);
 		gic_arch_extn.irq_eoi(d);
 		raw_spin_unlock(&irq_controller_lock);
 	}
-#ifdef CONFIG_ARCH_MSM8625
-	raw_spin_lock(&irq_controller_lock);
-#endif
+
+	if (gic->need_access_lock)
+		raw_spin_lock(&irq_controller_lock);
 	writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI);
-#ifdef CONFIG_ARCH_MSM8625
-	raw_spin_unlock(&irq_controller_lock);
-#endif
+	if (gic->need_access_lock)
+		raw_spin_unlock(&irq_controller_lock);
 }
 
 static int gic_set_type(struct irq_data *d, unsigned int type)
@@ -435,13 +430,11 @@
 	void __iomem *cpu_base = gic_data_cpu_base(gic);
 
 	do {
-#ifdef CONFIG_ARCH_MSM8625
-		raw_spin_lock(&irq_controller_lock);
-#endif
+		if (gic->need_access_lock)
+			raw_spin_lock(&irq_controller_lock);
 		irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK);
-#ifdef CONFIG_ARCH_MSM8625
-		raw_spin_unlock(&irq_controller_lock);
-#endif
+		if (gic->need_access_lock)
+			raw_spin_unlock(&irq_controller_lock);
 		irqnr = irqstat & ~0x1c00;
 
 		if (likely(irqnr > 15 && irqnr < 1021)) {
@@ -450,13 +443,11 @@
 			continue;
 		}
 		if (irqnr < 16) {
-#ifdef CONFIG_ARCH_MSM8625
-			raw_spin_lock(&irq_controller_lock);
-#endif
+			if (gic->need_access_lock)
+				raw_spin_lock(&irq_controller_lock);
 			writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI);
-#ifdef CONFIG_ARCH_MSM8625
-			raw_spin_unlock(&irq_controller_lock);
-#endif
+			if (gic->need_access_lock)
+				raw_spin_unlock(&irq_controller_lock);
 #ifdef CONFIG_SMP
 			handle_IPI(irqnr, regs);
 #endif
@@ -583,9 +574,8 @@
 	 * Deal with the banked PPI and SGI interrupts - disable all
 	 * PPI interrupts, ensure all SGI interrupts are enabled.
 	 */
-#ifdef CONFIG_ARCH_MSM8625
-	raw_spin_lock(&irq_controller_lock);
-#endif
+	if (gic->need_access_lock)
+		raw_spin_lock(&irq_controller_lock);
 	writel_relaxed(0xffff0000, dist_base + GIC_DIST_ENABLE_CLEAR);
 	writel_relaxed(0x0000ffff, dist_base + GIC_DIST_ENABLE_SET);
 
@@ -605,9 +595,8 @@
 		writel_relaxed(0xF, base + GIC_CPU_CTRL);
 	else
 		writel_relaxed(1, base + GIC_CPU_CTRL);
-#ifdef CONFIG_ARCH_MSM8625
-	raw_spin_unlock(&irq_controller_lock);
-#endif
+	if (gic->need_access_lock)
+		raw_spin_unlock(&irq_controller_lock);
     mb();
 }
 
@@ -883,6 +872,10 @@
 	BUG_ON(gic_nr >= MAX_GIC_NR);
 
 	gic = &gic_data[gic_nr];
+	if (cpu_is_msm8625() &&
+			(SOCINFO_VERSION_MAJOR(socinfo_get_version()) <= 1))
+		gic->need_access_lock = true;
+
 #ifdef CONFIG_GIC_NON_BANKED
 	if (percpu_offset) { /* Frankein-GIC without banked registers... */
 		unsigned int cpu;
@@ -967,9 +960,8 @@
 	int cpu;
 	unsigned long sgir;
 	unsigned long map = 0;
-#ifdef CONFIG_ARCH_MSM8625
-	unsigned long flags;
-#endif
+	unsigned long flags = 0;
+	struct gic_chip_data *gic = &gic_data[0];
 
 	/* Convert our logical CPU mask into a physical one. */
 	for_each_cpu(cpu, mask)
@@ -985,16 +977,12 @@
 	 */
 	dsb();
 
-#ifdef CONFIG_ARCH_MSM8625
-	raw_spin_lock_irqsave(&irq_controller_lock, flags);
-#endif
+	if (gic->need_access_lock)
+		raw_spin_lock_irqsave(&irq_controller_lock, flags);
 	/* this always happens on GIC0 */
-
-	writel_relaxed(sgir,
-			gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
-#ifdef CONFIG_ARCH_MSM8625
-	raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
-#endif
+	writel_relaxed(sgir, gic_data_dist_base(gic) + GIC_DIST_SOFTINT);
+	if (gic->need_access_lock)
+		raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
 	mb();
 }
 #endif
@@ -1151,7 +1139,7 @@
 }
 #endif
 
-void msm_gic_save(bool modem_wake, int from_idle)
+void msm_gic_save(void)
 {
 	unsigned int i;
 	struct gic_chip_data *gic = &gic_data[0];
diff --git a/arch/arm/include/asm/hardware/gic.h b/arch/arm/include/asm/hardware/gic.h
index 5078148..ad12bcd 100644
--- a/arch/arm/include/asm/hardware/gic.h
+++ b/arch/arm/include/asm/hardware/gic.h
@@ -60,7 +60,7 @@
 }
 void gic_set_irq_secure(unsigned int irq);
 
-void msm_gic_save(bool modem_wake, int from_idle);
+void msm_gic_save(void);
 void msm_gic_restore(void);
 void core1_gic_configure_and_raise(void);
 #endif
diff --git a/arch/arm/mach-msm/Makefile b/arch/arm/mach-msm/Makefile
index 59e285b..4c020e3 100644
--- a/arch/arm/mach-msm/Makefile
+++ b/arch/arm/mach-msm/Makefile
@@ -339,7 +339,7 @@
 obj-$(CONFIG_ARCH_MSM8X60) += board-msm8x60-vcm.o
 endif
 obj-$(CONFIG_MSM_OCMEM) += ocmem.o ocmem_allocator.o ocmem_notifier.o
-obj-$(CONFIG_MSM_OCMEM) += ocmem_sched.o ocmem_api.o
+obj-$(CONFIG_MSM_OCMEM) += ocmem_sched.o ocmem_api.o ocmem_rdm.o
 
 obj-$(CONFIG_ARCH_MSM7X27) += gpiomux-7x27.o gpiomux-v1.o gpiomux.o
 obj-$(CONFIG_ARCH_MSM7X30) += gpiomux-7x30.o gpiomux-v1.o gpiomux.o
diff --git a/arch/arm/mach-msm/cache_erp.c b/arch/arm/mach-msm/cache_erp.c
index c3302ec..8a73c84 100644
--- a/arch/arm/mach-msm/cache_erp.c
+++ b/arch/arm/mach-msm/cache_erp.c
@@ -253,22 +253,11 @@
 	int print_regs = cesr & CESR_PRINT_MASK;
 	int log_event = cesr & CESR_LOG_EVENT_MASK;
 
-	void *const saw_bases[] = {
-		MSM_SAW0_BASE,
-		MSM_SAW1_BASE,
-		MSM_SAW2_BASE,
-		MSM_SAW3_BASE,
-	};
-
 	if (print_regs) {
 		pr_alert("L1 / TLB Error detected on CPU %d!\n", cpu);
 		pr_alert("\tCESR      = 0x%08x\n", cesr);
 		pr_alert("\tCPU speed = %lu\n", acpuclk_get_rate(cpu));
 		pr_alert("\tMIDR      = 0x%08x\n", read_cpuid_id());
-		pr_alert("\tPTE fuses = 0x%08x\n",
-					readl_relaxed(MSM_QFPROM_BASE + 0xC0));
-		pr_alert("\tPMIC_VREG = 0x%08x\n",
-					readl_relaxed(saw_bases[cpu] + 0x14));
 	}
 
 	if (cesr & CESR_DCTPE) {
@@ -550,12 +539,18 @@
 	return 0;
 }
 
+static struct of_device_id cache_erp_match_table[] = {
+	{	.compatible = "qcom,cache_erp",	},
+	{}
+};
+
 static struct platform_driver msm_cache_erp_driver = {
 	.probe = msm_cache_erp_probe,
 	.remove = msm_cache_erp_remove,
 	.driver = {
 		.name = MODULE_NAME,
 		.owner = THIS_MODULE,
+		.of_match_table = cache_erp_match_table,
 	},
 };
 
diff --git a/arch/arm/mach-msm/include/mach/msm_memory_dump.h b/arch/arm/mach-msm/include/mach/msm_memory_dump.h
new file mode 100644
index 0000000..3b8c9fd
--- /dev/null
+++ b/arch/arm/mach-msm/include/mach/msm_memory_dump.h
@@ -0,0 +1,57 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_MEMORY_DUMP_H
+#define __MSM_MEMORY_DUMP_H
+
+#include <linux/types.h>
+
+enum dump_client_type {
+	MSM_CPU_CTXT = 0,
+	MSM_CACHE,
+	MSM_OCMEM,
+	MSM_ETB,
+	MSM_ETM,
+	MSM_TMC,
+	MAX_NUM_CLIENTS,
+};
+
+struct msm_client_dump {
+	enum dump_client_type id;
+	unsigned long start_addr;
+	unsigned long end_addr;
+};
+
+struct msm_dump_table {
+	u32 version;
+	u32 num_entries;
+	struct msm_client_dump client_entries[MAX_NUM_CLIENTS];
+};
+
+struct msm_memory_dump {
+	unsigned long dump_table_phys;
+	struct msm_dump_table *dump_table_ptr;
+};
+
+#define TABLE_MAJOR(val)	(val >> 20)
+#define TABLE_MINOR(val)	(val & 0xFFFFF)
+#define MK_TABLE(ma, mi)	((ma << 20) | mi)
+
+#ifndef CONFIG_MSM_MEMORY_DUMP
+static inline int msm_dump_table_register(struct msm_client_dump *entry)
+{
+	return -EIO;
+}
+#else
+int msm_dump_table_register(struct msm_client_dump *client_entry);
+#endif
+#endif
diff --git a/arch/arm/mach-msm/include/mach/ocmem.h b/arch/arm/mach-msm/include/mach/ocmem.h
index 927624d..415f8ed 100644
--- a/arch/arm/mach-msm/include/mach/ocmem.h
+++ b/arch/arm/mach-msm/include/mach/ocmem.h
@@ -22,7 +22,7 @@
 
 /* Maximum number of slots in DM */
 #define OCMEM_MAX_CHUNKS 32
-#define MIN_CHUNK_SIZE (SZ_1K/8)
+#define MIN_CHUNK_SIZE 128
 
 struct ocmem_notifier;
 
@@ -61,7 +61,7 @@
 	/* IMEM Clients */
 	OCMEM_LP_AUDIO,
 	OCMEM_SENSORS,
-	OCMEM_BLAST,
+	OCMEM_OTHER_OS,
 	OCMEM_CLIENT_MAX,
 };
 
@@ -108,8 +108,13 @@
 int ocmem_shrink(int client_id, struct ocmem_buf *buf,
 			unsigned long new_size);
 
-int ocmem_expand(int client_id, struct ocmem_buf *buf,
-			unsigned long new_size);
+/* Transfer APIs */
+int ocmem_map(int client_id, struct ocmem_buf *buffer,
+			struct ocmem_map_list *list);
+
+
+int ocmem_unmap(int client_id, struct ocmem_buf *buffer,
+			struct ocmem_map_list *list);
 
 /* Priority Enforcement APIs */
 int ocmem_evict(int client_id);
diff --git a/arch/arm/mach-msm/include/mach/ocmem_priv.h b/arch/arm/mach-msm/include/mach/ocmem_priv.h
index 64c9ffe..70b5a45 100644
--- a/arch/arm/mach-msm/include/mach/ocmem_priv.h
+++ b/arch/arm/mach-msm/include/mach/ocmem_priv.h
@@ -20,12 +20,13 @@
 #include "ocmem.h"
 #include <mach/msm_iomap.h>
 #include <asm/io.h>
+#include <linux/platform_device.h>
 
 #define OCMEM_PHYS_BASE 0xFEC00000
 #define OCMEM_PHYS_SIZE 0x180000
 
-#define TO_OCMEM 0x1
-#define TO_DDR 0x2
+#define TO_OCMEM 0x0
+#define TO_DDR 0x1
 
 struct ocmem_zone;
 
@@ -38,7 +39,7 @@
 	int owner;
 	int active_regions;
 	int max_regions;
-	struct list_head region_list;
+	struct list_head req_list;
 	unsigned long z_start;
 	unsigned long z_end;
 	unsigned long z_head;
@@ -78,12 +79,24 @@
 	bool interleaved;
 };
 
+struct ocmem_eviction_data {
+	struct completion completion;
+	struct list_head victim_list;
+	struct list_head req_list;
+	struct work_struct work;
+	int prio;
+	int pending;
+	bool passive;
+};
+
 struct ocmem_req {
 	struct rw_semaphore rw_sem;
 	/* Chain in sched queue */
 	struct list_head sched_list;
 	/* Chain in zone list */
 	struct list_head zone_list;
+	/* Chain in eviction list */
+	struct list_head eviction_list;
 	int owner;
 	int prio;
 	uint32_t req_id;
@@ -100,6 +113,7 @@
 	unsigned long req_start;
 	unsigned long req_end;
 	unsigned long req_sz;
+	struct ocmem_eviction_data *edata;
 };
 
 struct ocmem_handle {
@@ -150,13 +164,20 @@
 
 int ocmem_notifier_init(void);
 int check_notifier(int);
+const char *get_name(int);
+int check_id(int);
 int dispatch_notification(int, enum ocmem_notif_type, struct ocmem_buf *);
 
 int ocmem_sched_init(void);
+int ocmem_rdm_init(struct platform_device *);
 int process_allocate(int, struct ocmem_handle *, unsigned long, unsigned long,
 			unsigned long, bool, bool);
 int process_free(int, struct ocmem_handle *);
-int process_xfer(int, struct ocmem_handle *, struct ocmem_map_list *,
-			int direction);
+int process_xfer(int, struct ocmem_handle *, struct ocmem_map_list *, int);
+int process_evict(int);
+int process_restore(int);
+int process_shrink(int, struct ocmem_handle *, unsigned long);
+int ocmem_rdm_transfer(int, struct ocmem_map_list *,
+				unsigned long, int);
 unsigned long process_quota(int);
 #endif
diff --git a/arch/arm/mach-msm/mpm-8625.c b/arch/arm/mach-msm/mpm-8625.c
index 954e5cc..3c219be 100644
--- a/arch/arm/mach-msm/mpm-8625.c
+++ b/arch/arm/mach-msm/mpm-8625.c
@@ -221,7 +221,7 @@
 		/* save the contents of GIC CPU interface and Distributor
 		 * Disable all the Interrupts, if we enter from idle pc
 		 */
-		msm_gic_save(modem_wake, from_idle);
+		msm_gic_save();
 		irq_set_irq_type(MSM8625_INT_A9_M2A_6, IRQF_TRIGGER_RISING);
 		enable_irq(MSM8625_INT_A9_M2A_6);
 		pr_debug("%s going for sleep now\n", __func__);
diff --git a/arch/arm/mach-msm/msm_memory_dump.c b/arch/arm/mach-msm/msm_memory_dump.c
new file mode 100644
index 0000000..4f48a0d
--- /dev/null
+++ b/arch/arm/mach-msm/msm_memory_dump.c
@@ -0,0 +1,78 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <asm/cacheflush.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/notifier.h>
+#include <linux/export.h>
+#include <mach/msm_iomap.h>
+#include <mach/msm_memory_dump.h>
+
+
+/*TODO: Needs to be set to correct value */
+#define DUMP_TABLE_OFFSET	0x20
+#define MSM_DUMP_TABLE_VERSION	MK_TABLE(1, 0)
+
+static struct msm_memory_dump mem_dump_data;
+
+static int msm_memory_dump_panic(struct notifier_block *this,
+				unsigned long event, void *ptr)
+{
+	writel_relaxed(0, MSM_IMEM_BASE + DUMP_TABLE_OFFSET);
+	return 0;
+}
+
+static struct notifier_block msm_memory_dump_blk = {
+	.notifier_call  = msm_memory_dump_panic,
+};
+
+int msm_dump_table_register(struct msm_client_dump *client_entry)
+{
+	struct msm_client_dump *entry;
+	struct msm_dump_table *table = mem_dump_data.dump_table_ptr;
+
+	if (!table || table->num_entries >= MAX_NUM_CLIENTS)
+		return -EINVAL;
+	entry = &table->client_entries[table->num_entries];
+	entry->id = client_entry->id;
+	entry->start_addr = client_entry->start_addr;
+	entry->end_addr = client_entry->end_addr;
+	table->num_entries++;
+	/* flush cache */
+	dmac_flush_range(table, table + sizeof(struct msm_dump_table));
+	return 0;
+}
+EXPORT_SYMBOL(msm_dump_table_register);
+
+static int __init init_memory_dump(void)
+{
+	struct msm_dump_table *table;
+
+	mem_dump_data.dump_table_ptr = kzalloc(sizeof(struct msm_dump_table),
+						GFP_KERNEL);
+	if (!mem_dump_data.dump_table_ptr) {
+		printk(KERN_ERR "unable to allocate memory for dump table\n");
+		return -ENOMEM;
+	}
+	table = mem_dump_data.dump_table_ptr;
+	table->version = MSM_DUMP_TABLE_VERSION;
+	mem_dump_data.dump_table_phys = virt_to_phys(table);
+	/* TODO: Need to write physical address of table to IMEM */
+	atomic_notifier_chain_register(&panic_notifier_list,
+						&msm_memory_dump_blk);
+	printk(KERN_INFO "MSM Memory Dump table set up\n");
+	return 0;
+}
+
+early_initcall(init_memory_dump);
+
diff --git a/arch/arm/mach-msm/msm_smem_iface.c b/arch/arm/mach-msm/msm_smem_iface.c
new file mode 100644
index 0000000..b09fda5
--- /dev/null
+++ b/arch/arm/mach-msm/msm_smem_iface.c
@@ -0,0 +1,44 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "msm_smem_iface.h"
+
+/**
+ * mem_get_cpr_info() - Copy Core Power Reduction (CPR) driver specific
+ *			data from Shared memory (SMEM).
+ * @cpr_info - Pointer to CPR data. Memory to be allocated and freed by
+ *             calling function.
+ *
+ * Copy CPR specific data from SMEM to cpr_info.
+ */
+
+void msm_smem_get_cpr_info(struct cpr_info_type *cpr_info)
+{
+	struct boot_info_for_apps *boot_info;
+	struct cpr_info_type *temp_cpr_info;
+	uint32_t smem_boot_info_size;
+
+	boot_info = smem_get_entry(SMEM_BOOT_INFO_FOR_APPS,
+					&smem_boot_info_size);
+	BUG_ON(!boot_info);
+	if (smem_boot_info_size < sizeof(struct boot_info_for_apps)) {
+		pr_err("%s: Shared boot info data structure too small!\n",
+			__func__);
+		BUG();
+	} else {
+		pr_debug("%s: Shared boot info available.\n", __func__);
+	}
+	temp_cpr_info = (struct cpr_info_type *) &(boot_info->cpr_info);
+	cpr_info->ring_osc = temp_cpr_info->ring_osc;
+	cpr_info->turbo_quot = temp_cpr_info->turbo_quot;
+	cpr_info->pvs_fuse = temp_cpr_info->pvs_fuse;
+}
diff --git a/arch/arm/mach-msm/msm_smem_iface.h b/arch/arm/mach-msm/msm_smem_iface.h
new file mode 100644
index 0000000..2da0232
--- /dev/null
+++ b/arch/arm/mach-msm/msm_smem_iface.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_SMEM_IFACE_H
+#define __ARCH_ARM_MACH_MSM_SMEM_IFACE_H
+
+#include <mach/msm_smsm.h>
+#include "smd_private.h"
+
+#define MAX_KEY_EVENTS 10
+#define MAX_SEC_KEY_PAYLOAD 32
+
+struct boot_shared_ssd_status_info {
+	uint32_t update_status;  /* To check if process is successful or not */
+	uint32_t bl_error_code;  /* To indicate error code in bootloader */
+};
+
+struct boot_symmetric_key_info {
+	uint32_t key_len; /* Encrypted Symmetric Key Length */
+	uint32_t iv_len;  /* Initialization Vector Length */
+	uint8_t  key[MAX_SEC_KEY_PAYLOAD]; /* Encrypted Symmetric Key */
+	uint8_t  iv[MAX_SEC_KEY_PAYLOAD]; /* Initialization Vector */
+};
+
+struct cpr_info_type {
+	uint8_t ring_osc;         /* CPR FUSE [0]: TURBO RO SEL BIT */
+	uint8_t turbo_quot;        /* CPRFUSE[1:7] : TURBO QUOT*/
+	uint8_t pvs_fuse;         /* TURBO PVS FUSE */
+};
+
+struct boot_info_for_apps {
+	uint32_t apps_image_start_addr; /* apps image start address */
+	uint32_t boot_flags; /* bit mask of upto 32 flags */
+	struct boot_shared_ssd_status_info ssd_status_info; /* SSD status */
+	struct boot_symmetric_key_info key_info;
+	uint16_t boot_keys_pressed[MAX_KEY_EVENTS]; /* Log of key presses */
+	uint32_t timetick; /* Modem tick timer value before apps out of reset */
+	struct cpr_info_type cpr_info;
+	uint8_t PAD[25];
+};
+
+void msm_smem_get_cpr_info(struct cpr_info_type *cpr_info);
+
+#endif /* __ARCH_ARM_MACH_MSM_SMEM_IFACE_H */
diff --git a/arch/arm/mach-msm/ocmem.c b/arch/arm/mach-msm/ocmem.c
index a233080..753f6fb 100644
--- a/arch/arm/mach-msm/ocmem.c
+++ b/arch/arm/mach-msm/ocmem.c
@@ -30,8 +30,9 @@
 */
 
 #define OCMEM_REGION_CTL_BASE 0xFDD0003C
-#define OCMEM_REGION_CTL_SIZE 0xC
+#define OCMEM_REGION_CTL_SIZE 0xFD0
 #define REGION_ENABLE 0x00003333
+#define GRAPHICS_REGION_CTL (0x17F000)
 
 struct ocmem_partition {
 	const char *name;
@@ -55,6 +56,7 @@
 static struct ocmem_plat_data *ocmem_pdata;
 
 #define CLIENT_NAME_MAX 10
+
 /* Must be in sync with enum ocmem_client */
 static const char *client_names[OCMEM_CLIENT_MAX] = {
 	"graphics",
@@ -64,7 +66,7 @@
 	"voice",
 	"lp_audio",
 	"sensors",
-	"blast",
+	"other_os",
 };
 
 struct ocmem_quota_table {
@@ -85,7 +87,7 @@
 	{ "voice", OCMEM_VOICE,  0x0, 0x0, 0x0, 0 },
 	{ "hp_audio", OCMEM_HP_AUDIO, 0x0, 0x0, 0x0, 0},
 	{ "lp_audio", OCMEM_LP_AUDIO, 0x80000, 0xA0000, 0xA0000, 0},
-	{ "blast", OCMEM_BLAST, 0x120000, 0x20000, 0x20000, 0},
+	{ "other_os", OCMEM_OTHER_OS, 0x120000, 0x20000, 0x20000, 0},
 	{ "sensors", OCMEM_SENSORS, 0x140000, 0x40000, 0x40000, 0},
 };
 
@@ -99,6 +101,18 @@
 	return -EINVAL;
 }
 
+int check_id(int id)
+{
+	return (id < OCMEM_CLIENT_MAX && id >= OCMEM_GRAPHICS);
+}
+
+const char *get_name(int id)
+{
+	if (!check_id(id))
+		return NULL;
+	return client_names[id];
+}
+
 inline unsigned long phys_to_offset(unsigned long addr)
 {
 	if (!ocmem_pdata)
@@ -455,7 +469,7 @@
 		zone->owner = part->id;
 		zone->active_regions = 0;
 		zone->max_regions = 0;
-		INIT_LIST_HEAD(&zone->region_list);
+		INIT_LIST_HEAD(&zone->req_list);
 		zone->z_ops = z_ops;
 		if (part->p_tail) {
 			z_ops->allocate = allocate_tail;
@@ -519,6 +533,13 @@
 	writel_relaxed(REGION_ENABLE, ocmem_region_vbase);
 	writel_relaxed(REGION_ENABLE, ocmem_region_vbase + 4);
 	writel_relaxed(REGION_ENABLE, ocmem_region_vbase + 8);
+	/* Enable the ocmem graphics mpU as a workaround in Virtio */
+	/* This will be programmed by TZ after TZ support is integrated */
+	writel_relaxed(GRAPHICS_REGION_CTL, ocmem_region_vbase + 0xFCC);
+
+	if (ocmem_rdm_init(pdev))
+		return -EBUSY;
+
 	dev_dbg(dev, "initialized successfully\n");
 	return 0;
 }
diff --git a/arch/arm/mach-msm/ocmem_api.c b/arch/arm/mach-msm/ocmem_api.c
index bed13de..bb32fca 100644
--- a/arch/arm/mach-msm/ocmem_api.c
+++ b/arch/arm/mach-msm/ocmem_api.c
@@ -13,10 +13,8 @@
 #include <linux/slab.h>
 #include <mach/ocmem_priv.h>
 
-static inline int check_id(int id)
-{
-	return (id < OCMEM_CLIENT_MAX && id >= OCMEM_GRAPHICS);
-}
+static DEFINE_MUTEX(ocmem_eviction_lock);
+static DECLARE_BITMAP(evicted, OCMEM_CLIENT_MAX);
 
 static struct ocmem_handle *generate_handle(void)
 {
@@ -61,6 +59,24 @@
 	return 0;
 }
 
+static int __ocmem_shrink(int id, struct ocmem_buf *buf, unsigned long len)
+{
+	int ret = 0;
+	struct ocmem_handle *handle = buffer_to_handle(buf);
+
+	if (!handle)
+		return -EINVAL;
+
+	mutex_lock(&handle->handle_mutex);
+	ret = process_shrink(id, handle, len);
+	mutex_unlock(&handle->handle_mutex);
+
+	if (ret)
+		return -EINVAL;
+
+	return 0;
+}
+
 static struct ocmem_buf *__ocmem_allocate_range(int id, unsigned long min,
 		unsigned long max, unsigned long step, bool block, bool wait)
 {
@@ -218,6 +234,15 @@
 	return __ocmem_free(client_id, buffer);
 }
 
+int ocmem_shrink(int client_id, struct ocmem_buf *buffer, unsigned long len)
+{
+	if (!buffer)
+		return -EINVAL;
+	if (len >= buffer->len)
+		return -EINVAL;
+	return __ocmem_shrink(client_id, buffer, len);
+}
+
 int pre_validate_chunk_list(struct ocmem_map_list *list)
 {
 	int i = 0;
@@ -236,8 +261,12 @@
 
 	for (i = 0; i < list->num_chunks; i++) {
 		if (!chunks[i].ddr_paddr ||
-			chunks[i].size < MIN_CHUNK_SIZE)
+			chunks[i].size < MIN_CHUNK_SIZE ||
+			!IS_ALIGNED(chunks[i].size, MIN_CHUNK_SIZE)) {
+			pr_err("Invalid ocmem chunk at index %d (p: %lx, size %lx)\n",
+					i, chunks[i].ddr_paddr, chunks[i].size);
 			return -EINVAL;
+		}
 	}
 	return 0;
 }
@@ -265,7 +294,7 @@
 		return -EINVAL;
 	}
 
-	if (!pre_validate_chunk_list(list))
+	if (pre_validate_chunk_list(list) != 0)
 		return -EINVAL;
 
 	handle = buffer_to_handle(buffer);
@@ -303,14 +332,10 @@
 		return -EINVAL;
 	}
 
-	if (!pre_validate_chunk_list(list))
+	if (pre_validate_chunk_list(list) != 0)
 		return -EINVAL;
 
 	handle = buffer_to_handle(buffer);
-
-	if (!handle)
-		return -EINVAL;
-
 	mutex_lock(&handle->handle_mutex);
 	ret = process_xfer(client_id, handle, list, TO_DDR);
 	mutex_unlock(&handle->handle_mutex);
@@ -325,3 +350,52 @@
 	}
 	return process_quota(client_id);
 }
+
+/* Synchronous eviction/restore calls */
+/* Only a single eviction or restoration is allowed */
+/* Evictions/Restorations cannot be concurrent with other maps */
+int ocmem_evict(int client_id)
+{
+	int ret = 0;
+
+	if (!check_id(client_id)) {
+		pr_err("ocmem: Invalid client id: %d\n", client_id);
+		return -EINVAL;
+	}
+
+	mutex_lock(&ocmem_eviction_lock);
+	if (test_bit(client_id, evicted)) {
+		pr_err("ocmem: Previous eviction was not restored by %d\n",
+			client_id);
+		mutex_unlock(&ocmem_eviction_lock);
+		return -EINVAL;
+	}
+
+	ret = process_evict(client_id);
+	if (ret == 0)
+		set_bit(client_id, evicted);
+
+	mutex_unlock(&ocmem_eviction_lock);
+	return ret;
+}
+
+int ocmem_restore(int client_id)
+{
+	int ret = 0;
+
+	if (!check_id(client_id)) {
+		pr_err("ocmem: Invalid client id: %d\n", client_id);
+		return -EINVAL;
+	}
+
+	mutex_lock(&ocmem_eviction_lock);
+	if (!test_bit(client_id, evicted)) {
+		pr_err("ocmem: No previous eviction by %d\n", client_id);
+		mutex_unlock(&ocmem_eviction_lock);
+		return -EINVAL;
+	}
+	ret = process_restore(client_id);
+	clear_bit(client_id, evicted);
+	mutex_unlock(&ocmem_eviction_lock);
+	return ret;
+}
diff --git a/arch/arm/mach-msm/ocmem_notifier.c b/arch/arm/mach-msm/ocmem_notifier.c
index 4754f44..9fbcd73 100644
--- a/arch/arm/mach-msm/ocmem_notifier.c
+++ b/arch/arm/mach-msm/ocmem_notifier.c
@@ -24,11 +24,6 @@
 	unsigned listeners;
 } notifiers[OCMEM_CLIENT_MAX];
 
-static int check_id(int id)
-{
-	return (id < OCMEM_CLIENT_MAX && id >= OCMEM_GRAPHICS);
-}
-
 int check_notifier(int id)
 {
 	int ret = 0;
diff --git a/arch/arm/mach-msm/ocmem_rdm.c b/arch/arm/mach-msm/ocmem_rdm.c
new file mode 100644
index 0000000..6b93d04
--- /dev/null
+++ b/arch/arm/mach-msm/ocmem_rdm.c
@@ -0,0 +1,238 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/rbtree.h>
+#include <linux/genalloc.h>
+#include <linux/of.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <mach/ocmem_priv.h>
+
+#define RDM_MAX_ENTRIES 32
+#define RDM_MAX_CLIENTS 2
+
+/* Data Mover Parameters */
+#define DM_BLOCK_128 0x0
+#define DM_BLOCK_256 0x1
+#define DM_BR_ID_LPASS 0x0
+#define DM_BR_ID_GPS 0x1
+
+#define DM_INTR_CLR (0x8)
+#define DM_INTR_MASK (0xC)
+#define DM_GEN_STATUS (0x10)
+#define DM_STATUS (0x14)
+#define DM_CTRL (0x1000)
+#define DM_TBL_BASE (0x1010)
+#define DM_TBL_IDX(x) ((x) * 0x18)
+#define DM_TBL_n(x) (DM_TBL_BASE + (DM_TBL_IDX(x)))
+#define DM_TBL_n_offset(x) DM_TBL_n(x)
+#define DM_TBL_n_size(x) (DM_TBL_n(x)+0x4)
+#define DM_TBL_n_paddr(x) (DM_TBL_n(x)+0x8)
+#define DM_TBL_n_ctrl(x) (DM_TBL_n(x)+0x10)
+
+#define BR_CTRL (0x0)
+#define BR_CLIENT_BASE (0x4)
+#define BR_CLIENT_n_IDX(x) ((x) * 0x4)
+#define BR_CLIENT_n_ctrl(x) (BR_CLIENT_BASE + (BR_CLIENT_n_IDX(x)))
+#define BR_STATUS (0x14)
+/* 16 entries per client are supported */
+/* Use entries 0 - 15 for client0 */
+#define BR_CLIENT0_MASK	(0x1000)
+/* Use entries 16- 31 for client1 */
+#define BR_CLIENT1_MASK	(0x2010)
+
+#define BR_TBL_BASE (0x40)
+#define BR_TBL_IDX(x) ((x) * 0x18)
+#define BR_TBL_n(x) (BR_TBL_BASE + (BR_TBL_IDX(x)))
+#define BR_TBL_n_offset(x) BR_TBL_n(x)
+#define BR_TBL_n_size(x) (BR_TBL_n(x)+0x4)
+#define BR_TBL_n_paddr(x) (BR_TBL_n(x)+0x8)
+#define BR_TBL_n_ctrl(x) (BR_TBL_n(x)+0x10)
+
+/* Constants and Shifts */
+#define BR_TBL_ENTRY_ENABLE 0x1
+#define BR_TBL_START 0x0
+#define BR_TBL_END 0x8
+#define BR_RW_SHIFT 0x2
+
+#define DM_TBL_START 0x10
+#define DM_TBL_END 0x18
+#define DM_CLIENT_SHIFT 0x8
+#define DM_BR_ID_SHIFT 0x4
+#define DM_BR_BLK_SHIFT 0x1
+#define DM_DIR_SHIFT 0x0
+
+#define DM_DONE 0x1
+#define DM_INTR_ENABLE 0x0
+#define DM_INTR_DISABLE 0x1
+
+static void *br_base;
+static void *dm_base;
+
+static atomic_t dm_pending;
+static wait_queue_head_t dm_wq;
+/* Shadow tables for debug purposes */
+struct ocmem_br_table {
+	unsigned int offset;
+	unsigned int size;
+	unsigned int ddr_low;
+	unsigned int ddr_high;
+	unsigned int ctrl;
+} br_table[RDM_MAX_ENTRIES];
+
+/* DM Table replicates an entire BR table */
+/* Note: There are more than 1 BRs in the system */
+struct ocmem_dm_table {
+	unsigned int offset;
+	unsigned int size;
+	unsigned int ddr_low;
+	unsigned int ddr_high;
+	unsigned int ctrl;
+} dm_table[RDM_MAX_ENTRIES];
+
+/* Wrapper that will shadow these values later */
+static int ocmem_read(void *at)
+{
+	return readl_relaxed(at);
+}
+
+/* Wrapper that will shadow these values later */
+static int ocmem_write(unsigned long val, void *at)
+{
+	writel_relaxed(val, at);
+	return 0;
+}
+
+static inline int client_ctrl_id(int id)
+{
+	return (id == OCMEM_SENSORS) ? 1 : 0;
+}
+
+static inline int client_slot_start(int id)
+{
+
+	return client_ctrl_id(id) * 16;
+}
+
+static irqreturn_t ocmem_dm_irq_handler(int irq, void *dev_id)
+{
+	atomic_set(&dm_pending, 0);
+	ocmem_write(DM_INTR_DISABLE, dm_base + DM_INTR_CLR);
+	wake_up_interruptible(&dm_wq);
+	return IRQ_HANDLED;
+}
+
+/* Lock during transfers */
+int ocmem_rdm_transfer(int id, struct ocmem_map_list *clist,
+			unsigned long start, int direction)
+{
+	int num_chunks = clist->num_chunks;
+	int slot = client_slot_start(id);
+	int table_start = 0;
+	int table_end = 0;
+	int br_ctrl = 0;
+	int br_id = 0;
+	int dm_ctrl = 0;
+	int i = 0;
+	int j = 0;
+	int status = 0;
+
+	for (i = 0, j = slot; i < num_chunks; i++, j++) {
+
+		struct ocmem_chunk *chunk = &clist->chunks[i];
+		int sz = chunk->size;
+		int paddr = chunk->ddr_paddr;
+		int tbl_n_ctrl = 0;
+
+		tbl_n_ctrl |= BR_TBL_ENTRY_ENABLE;
+		if (chunk->ro)
+			tbl_n_ctrl |= (1 << BR_RW_SHIFT);
+
+		/* Table Entry n of BR and DM */
+		ocmem_write(start, br_base + BR_TBL_n_offset(j));
+		ocmem_write(sz, br_base + BR_TBL_n_size(j));
+		ocmem_write(paddr, br_base + BR_TBL_n_paddr(j));
+		ocmem_write(tbl_n_ctrl, br_base + BR_TBL_n_ctrl(j));
+
+		ocmem_write(start, dm_base + DM_TBL_n_offset(j));
+		ocmem_write(sz, dm_base + DM_TBL_n_size(j));
+		ocmem_write(paddr, dm_base + DM_TBL_n_paddr(j));
+		ocmem_write(tbl_n_ctrl, dm_base + DM_TBL_n_ctrl(j));
+
+		start += sz;
+	}
+
+	br_id = client_ctrl_id(id);
+	table_start = slot;
+	table_end = slot + num_chunks - 1;
+	br_ctrl |= (table_start << BR_TBL_START);
+	br_ctrl |= (table_end << BR_TBL_END);
+
+	ocmem_write(br_ctrl, (br_base + BR_CLIENT_n_ctrl(br_id)));
+	/* Enable BR */
+	ocmem_write(0x1, br_base + BR_CTRL);
+
+	/* Compute DM Control Value */
+	dm_ctrl |= (table_start << DM_TBL_START);
+	dm_ctrl |= (table_end << DM_TBL_END);
+
+	dm_ctrl |= (DM_BR_ID_LPASS << DM_BR_ID_SHIFT);
+	dm_ctrl |= (DM_BLOCK_256 << DM_BR_BLK_SHIFT);
+	dm_ctrl |= (direction << DM_DIR_SHIFT);
+
+	status = ocmem_read(dm_base + DM_STATUS);
+	pr_debug("Transfer status before %x\n", status);
+	atomic_set(&dm_pending, 1);
+	/* Trigger DM */
+	ocmem_write(dm_ctrl, dm_base + DM_CTRL);
+	pr_debug("ocmem: rdm: dm_ctrl %x br_ctrl %x\n", dm_ctrl, br_ctrl);
+
+	wait_event_interruptible(dm_wq,
+		atomic_read(&dm_pending) == 0);
+
+	return 0;
+}
+
+int ocmem_rdm_init(struct platform_device *pdev)
+{
+
+	struct ocmem_plat_data *pdata = NULL;
+	int rc = 0;
+
+	pdata = platform_get_drvdata(pdev);
+
+	br_base = pdata->br_base;
+	dm_base = pdata->dm_base;
+
+	rc = devm_request_irq(&pdev->dev, pdata->dm_irq, ocmem_dm_irq_handler,
+				IRQF_TRIGGER_RISING, "ocmem_dm_irq", pdata);
+
+	if (rc) {
+		dev_err(&pdev->dev, "Failed to request dm irq");
+		return -EINVAL;
+	}
+
+	init_waitqueue_head(&dm_wq);
+	/* enable dm interrupts */
+	ocmem_write(DM_INTR_ENABLE, dm_base + DM_INTR_MASK);
+	return 0;
+}
diff --git a/arch/arm/mach-msm/ocmem_sched.c b/arch/arm/mach-msm/ocmem_sched.c
index 10a267c..f6d066d 100644
--- a/arch/arm/mach-msm/ocmem_sched.c
+++ b/arch/arm/mach-msm/ocmem_sched.c
@@ -54,7 +54,7 @@
 	MIN_PRIO = 0x0,
 	NO_PRIO = MIN_PRIO,
 	PRIO_SENSORS = 0x1,
-	PRIO_BLAST = 0x1,
+	PRIO_OTHER_OS = 0x1,
 	PRIO_LP_AUDIO = 0x1,
 	PRIO_HP_AUDIO = 0x2,
 	PRIO_VOICE = 0x3,
@@ -75,6 +75,21 @@
 */
 #define SCHED_DELAY 10
 
+static struct list_head rdm_queue;
+static struct mutex rdm_mutex;
+static struct workqueue_struct *ocmem_rdm_wq;
+static struct workqueue_struct *ocmem_eviction_wq;
+
+static struct ocmem_eviction_data *evictions[OCMEM_CLIENT_MAX];
+
+struct ocmem_rdm_work {
+	int id;
+	struct ocmem_map_list *list;
+	struct ocmem_handle *handle;
+	int direction;
+	struct work_struct work;
+};
+
 /* OCMEM Operational modes */
 enum ocmem_client_modes {
 	OCMEM_PERFORMANCE = 1,
@@ -107,7 +122,7 @@
 	{OCMEM_VOICE, PRIO_VOICE, OCMEM_PASSIVE, OCMEM_BLOCKED},
 	{OCMEM_LP_AUDIO, PRIO_LP_AUDIO, OCMEM_LOW_POWER, OCMEM_SYSNOC},
 	{OCMEM_SENSORS, PRIO_SENSORS, OCMEM_LOW_POWER, OCMEM_SYSNOC},
-	{OCMEM_BLAST, PRIO_BLAST, OCMEM_LOW_POWER, OCMEM_SYSNOC},
+	{OCMEM_OTHER_OS, PRIO_OTHER_OS, OCMEM_LOW_POWER, OCMEM_SYSNOC},
 };
 
 static struct rb_root sched_tree;
@@ -119,6 +134,8 @@
 	struct rb_node region_rb;
 	/* Hash map of requests */
 	struct idr region_idr;
+	/* Chain in eviction list */
+	struct list_head eviction_list;
 	unsigned long r_start;
 	unsigned long r_end;
 	unsigned long r_sz;
@@ -244,6 +261,7 @@
 	if (!p)
 		return NULL;
 	idr_init(&p->region_idr);
+	INIT_LIST_HEAD(&p->eviction_list);
 	p->r_start = p->r_end = p->r_sz = 0x0;
 	p->max_prio = NO_PRIO;
 	return p;
@@ -461,10 +479,14 @@
 {
 	int rc = 0;
 
+	down_write(&req->rw_sem);
+
 	mutex_lock(&sched_mutex);
 	rc = __sched_map(req);
 	mutex_unlock(&sched_mutex);
 
+	up_write(&req->rw_sem);
+
 	if (rc == OP_FAIL)
 		return -EINVAL;
 
@@ -475,10 +497,14 @@
 {
 	int rc = 0;
 
+	down_write(&req->rw_sem);
+
 	mutex_lock(&sched_mutex);
 	rc = __sched_unmap(req);
 	mutex_unlock(&sched_mutex);
 
+	up_write(&req->rw_sem);
+
 	if (rc == OP_FAIL)
 		return -EINVAL;
 
@@ -713,6 +739,104 @@
 }
 
 /* Must be called with sched_mutex held */
+static int __sched_shrink(struct ocmem_req *req, unsigned long new_sz)
+{
+	int owner = req->owner;
+	int ret = 0;
+
+	struct ocmem_req *matched_req = NULL;
+	struct ocmem_region *matched_region = NULL;
+	struct ocmem_region *region = NULL;
+	unsigned long alloc_addr = 0x0;
+
+	struct ocmem_zone *zone = get_zone(owner);
+
+	BUG_ON(!zone);
+
+	/* The shrink should not be called for zero size */
+	BUG_ON(new_sz == 0);
+
+	matched_region = find_region_match(req->req_start, req->req_end);
+	matched_req = find_req_match(req->req_id, matched_region);
+
+	if (!matched_region || !matched_req)
+		goto invalid_op_error;
+	if (matched_req != req)
+		goto invalid_op_error;
+
+
+	ret = zone->z_ops->free(zone,
+		matched_req->req_start, matched_req->req_sz);
+
+	if (ret < 0) {
+		pr_err("Zone Allocation operation failed\n");
+		goto internal_error;
+	}
+
+	alloc_addr = zone->z_ops->allocate(zone, new_sz);
+
+	if (alloc_addr < 0) {
+		pr_err("Zone Allocation operation failed\n");
+		goto internal_error;
+	}
+
+	/* Detach the region from the interval tree */
+	/* This is to guarantee that the change in size
+	 * causes the tree to be rebalanced if required */
+
+	detach_req(matched_region, req);
+	if (req_count(matched_region) == 0) {
+		remove_region(matched_region);
+		region = matched_region;
+	} else {
+		region = create_region();
+		if (!region) {
+			pr_err("ocmem: Unable to create region\n");
+			goto internal_error;
+		}
+	}
+	/* update the request */
+	req->req_start = alloc_addr;
+	req->req_sz = new_sz;
+	req->req_end = alloc_addr + req->req_sz;
+
+	if (req_count(region) == 0) {
+		remove_region(matched_region);
+		destroy_region(matched_region);
+	}
+
+	/* update request state */
+	SET_STATE(req, R_MUST_GROW);
+	SET_STATE(req, R_MUST_MAP);
+	req->op = SCHED_MAP;
+
+	/* attach the request to the region */
+	attach_req(region, req);
+	populate_region(region, req);
+	update_region_prio(region);
+
+	/* update the tree with new region */
+	if (insert_region(region)) {
+		pr_err("ocmem: Failed to insert the region\n");
+		zone->z_ops->free(zone, alloc_addr, new_sz);
+		detach_req(region, req);
+		update_region_prio(region);
+		/* req will be destroyed by the caller */
+		goto region_error;
+	}
+	return OP_COMPLETE;
+
+region_error:
+	destroy_region(region);
+internal_error:
+	pr_err("ocmem: shrink: Failed\n");
+	return OP_FAIL;
+invalid_op_error:
+	pr_err("ocmem: shrink: Failed to find matching region\n");
+	return OP_FAIL;
+}
+
+/* Must be called with sched_mutex held */
 static int __sched_allocate(struct ocmem_req *req, bool can_block,
 				bool can_wait)
 {
@@ -906,12 +1030,6 @@
 	return req;
 }
 
-int process_xfer(int id, struct ocmem_handle *handle,
-		struct ocmem_map_list *list, int direction)
-{
-
-	return 0;
-}
 
 unsigned long process_quota(int id)
 {
@@ -989,6 +1107,35 @@
 	return 0;
 }
 
+static int do_shrink(struct ocmem_req *req, unsigned long shrink_size)
+{
+
+	int rc = 0;
+	struct ocmem_buf *buffer = NULL;
+
+	down_write(&req->rw_sem);
+	buffer = req->buffer;
+
+	/* Take the scheduler mutex */
+	mutex_lock(&sched_mutex);
+	rc = __sched_shrink(req, shrink_size);
+	mutex_unlock(&sched_mutex);
+
+	if (rc == OP_FAIL)
+		goto err_op_fail;
+
+	else if (rc == OP_COMPLETE) {
+		buffer->addr = device_address(req->owner, req->req_start);
+		buffer->len = req->req_sz;
+	}
+
+	up_write(&req->rw_sem);
+	return 0;
+err_op_fail:
+	up_write(&req->rw_sem);
+	return -EINVAL;
+}
+
 static void ocmem_sched_wk_func(struct work_struct *work);
 DECLARE_DELAYED_WORK(ocmem_sched_thread, ocmem_sched_wk_func);
 
@@ -1076,6 +1223,250 @@
 	return 0;
 }
 
+static void ocmem_rdm_worker(struct work_struct *work)
+{
+	int offset = 0;
+	int rc = 0;
+	int event;
+	struct ocmem_rdm_work *work_data = container_of(work,
+				struct ocmem_rdm_work, work);
+	int id = work_data->id;
+	struct ocmem_map_list *list = work_data->list;
+	int direction = work_data->direction;
+	struct ocmem_handle *handle = work_data->handle;
+	struct ocmem_req *req = handle_to_req(handle);
+	struct ocmem_buf *buffer = handle_to_buffer(handle);
+
+	down_write(&req->rw_sem);
+	offset = phys_to_offset(req->req_start);
+	rc = ocmem_rdm_transfer(id, list, offset, direction);
+	if (work_data->direction == TO_OCMEM)
+		event = (rc == 0) ? OCMEM_MAP_DONE : OCMEM_MAP_FAIL;
+	else
+		event = (rc == 0) ? OCMEM_UNMAP_DONE : OCMEM_UNMAP_FAIL;
+
+	up_write(&req->rw_sem);
+	kfree(work_data);
+	dispatch_notification(id, event, buffer);
+}
+
+int queue_transfer(struct ocmem_req *req, struct ocmem_handle *handle,
+			struct ocmem_map_list *list, int direction)
+{
+	struct ocmem_rdm_work *work_data = NULL;
+
+	down_write(&req->rw_sem);
+
+	work_data = kzalloc(sizeof(struct ocmem_rdm_work), GFP_ATOMIC);
+	if (!work_data)
+		BUG();
+
+	work_data->handle = handle;
+	work_data->list = list;
+	work_data->id = req->owner;
+	work_data->direction = direction;
+	INIT_WORK(&work_data->work, ocmem_rdm_worker);
+	up_write(&req->rw_sem);
+	queue_work(ocmem_rdm_wq, &work_data->work);
+	return 0;
+}
+
+int process_xfer_out(int id, struct ocmem_handle *handle,
+			struct ocmem_map_list *list)
+{
+	struct ocmem_req *req = NULL;
+	int rc = 0;
+
+	req = handle_to_req(handle);
+
+	if (!req)
+		return -EINVAL;
+
+	if (!is_mapped(req)) {
+		pr_err("Buffer is not already mapped\n");
+		goto transfer_out_error;
+	}
+
+	rc = process_unmap(req, req->req_start, req->req_end);
+	if (rc < 0) {
+		pr_err("Unmapping the buffer failed\n");
+		goto transfer_out_error;
+	}
+
+	rc = queue_transfer(req, handle, list, TO_DDR);
+
+	if (rc < 0) {
+		pr_err("Failed to queue rdm transfer to DDR\n");
+		goto transfer_out_error;
+	}
+
+	return 0;
+
+transfer_out_error:
+	return -EINVAL;
+}
+
+int process_xfer_in(int id, struct ocmem_handle *handle,
+			struct ocmem_map_list *list)
+{
+	struct ocmem_req *req = NULL;
+	int rc = 0;
+
+	req = handle_to_req(handle);
+
+	if (!req)
+		return -EINVAL;
+
+	if (is_mapped(req)) {
+		pr_err("Buffer is already mapped\n");
+		goto transfer_in_error;
+	}
+
+	rc = process_map(req, req->req_start, req->req_end);
+	if (rc < 0) {
+		pr_err("Mapping the buffer failed\n");
+		goto transfer_in_error;
+	}
+
+	rc = queue_transfer(req, handle, list, TO_OCMEM);
+
+	if (rc < 0) {
+		pr_err("Failed to queue rdm transfer to OCMEM\n");
+		goto transfer_in_error;
+	}
+
+	return 0;
+transfer_in_error:
+	return -EINVAL;
+}
+
+int process_shrink(int id, struct ocmem_handle *handle, unsigned long size)
+{
+	struct ocmem_req *req = NULL;
+	struct ocmem_buf *buffer = NULL;
+	struct ocmem_eviction_data *edata = NULL;
+	int rc = 0;
+
+	if (is_blocked(id)) {
+		pr_err("Client %d cannot request free\n", id);
+		return -EINVAL;
+	}
+
+	req = handle_to_req(handle);
+	buffer = handle_to_buffer(handle);
+
+	if (!req)
+		return -EINVAL;
+
+	if (req->req_start != core_address(id, buffer->addr)) {
+		pr_err("Invalid buffer handle passed for shrink\n");
+		return -EINVAL;
+	}
+
+	edata = req->edata;
+
+	if (is_tcm(req->owner))
+		do_unmap(req);
+
+	if (size == 0) {
+		pr_info("req %p being shrunk to zero\n", req);
+		rc = do_free(req);
+		if (rc < 0)
+			return -EINVAL;
+	} else {
+		rc = do_shrink(req, size);
+		if (rc < 0)
+			return -EINVAL;
+	}
+
+	edata->pending--;
+	if (edata->pending == 0) {
+		pr_debug("All regions evicted");
+		complete(&edata->completion);
+	}
+
+	return 0;
+}
+
+int process_xfer(int id, struct ocmem_handle *handle,
+		struct ocmem_map_list *list, int direction)
+{
+	int rc = 0;
+
+	if (is_tcm(id)) {
+		WARN(1, "Mapping operation is invalid for client\n");
+		return -EINVAL;
+	}
+
+	if (direction == TO_DDR)
+		rc = process_xfer_out(id, handle, list);
+	else if (direction == TO_OCMEM)
+		rc = process_xfer_in(id, handle, list);
+	return rc;
+}
+
+int ocmem_eviction_thread(struct work_struct *work)
+{
+	return 0;
+}
+
+int process_evict(int id)
+{
+	struct ocmem_eviction_data *edata = NULL;
+	int prio = ocmem_client_table[id].priority;
+	struct rb_node *rb_node = NULL;
+	struct ocmem_req *req = NULL;
+	struct ocmem_buf buffer;
+	int j = 0;
+
+	edata = kzalloc(sizeof(struct ocmem_eviction_data), GFP_ATOMIC);
+
+	INIT_LIST_HEAD(&edata->victim_list);
+	INIT_LIST_HEAD(&edata->req_list);
+	edata->prio = prio;
+	edata->pending = 0;
+	edata->passive = 1;
+	evictions[id] = edata;
+
+	mutex_lock(&sched_mutex);
+
+	for (rb_node = rb_first(&sched_tree); rb_node;
+				rb_node = rb_next(rb_node)) {
+		struct ocmem_region *tmp_region = NULL;
+		tmp_region = rb_entry(rb_node, struct ocmem_region, region_rb);
+		if (tmp_region->max_prio < prio) {
+			for (j = id - 1; j > NO_PRIO; j--) {
+				req = find_req_match(j, tmp_region);
+				if (req) {
+					pr_info("adding %p to eviction list\n",
+							tmp_region);
+					list_add_tail(
+						&tmp_region->eviction_list,
+						&edata->victim_list);
+					list_add_tail(
+						&req->eviction_list,
+						&edata->req_list);
+					edata->pending++;
+					req->edata = edata;
+					buffer.addr = req->req_start;
+					buffer.len = 0x0;
+					dispatch_notification(req->owner,
+						OCMEM_ALLOC_SHRINK, &buffer);
+				}
+			}
+		} else {
+			pr_info("skipping %p from eviction\n", tmp_region);
+		}
+	}
+	mutex_unlock(&sched_mutex);
+	pr_debug("Waiting for all regions to be shrunk\n");
+	if (edata->pending > 0) {
+		init_completion(&edata->completion);
+		wait_for_completion(&edata->completion);
+	}
+	return 0;
+}
+
 static int do_allocate(struct ocmem_req *req, bool can_block, bool can_wait)
 {
 	int rc = 0;
@@ -1113,6 +1504,31 @@
 	return -EINVAL;
 }
 
+int process_restore(int id)
+{
+	struct ocmem_req *req = NULL;
+	struct ocmem_req *next = NULL;
+	struct ocmem_eviction_data *edata = evictions[id];
+
+	if (!edata)
+		return 0;
+
+	list_for_each_entry_safe(req, next, &edata->req_list, eviction_list)
+	{
+		if (req) {
+			pr_debug("ocmem: Fetched evicted request %p\n",
+								req);
+			list_del(&req->sched_list);
+			req->op = SCHED_ALLOCATE;
+			sched_enqueue(req);
+		}
+	}
+	kfree(edata);
+	evictions[id] = NULL;
+	pr_debug("Restore all evicted regions\n");
+	ocmem_schedule_pending();
+	return 0;
+}
 
 int process_allocate(int id, struct ocmem_handle *handle,
 			unsigned long min, unsigned long max,
@@ -1185,6 +1601,7 @@
 
 	rc = do_allocate(req, true, false);
 
+
 	if (rc < 0)
 		goto do_allocate_error;
 
@@ -1251,5 +1668,13 @@
 	for (i = MIN_PRIO; i < MAX_OCMEM_PRIO; i++)
 		INIT_LIST_HEAD(&sched_queue[i]);
 
+	mutex_init(&rdm_mutex);
+	INIT_LIST_HEAD(&rdm_queue);
+	ocmem_rdm_wq = alloc_workqueue("ocmem_rdm_wq", 0, 0);
+	if (!ocmem_rdm_wq)
+		return -ENOMEM;
+	ocmem_eviction_wq = alloc_workqueue("ocmem_eviction_wq", 0, 0);
+	if (!ocmem_eviction_wq)
+		return -ENOMEM;
 	return 0;
 }
diff --git a/drivers/media/dvb/dvb-core/demux.h b/drivers/media/dvb/dvb-core/demux.h
index 9ad79e9..071d209 100644
--- a/drivers/media/dvb/dvb-core/demux.h
+++ b/drivers/media/dvb/dvb-core/demux.h
@@ -168,6 +168,7 @@
 struct dmx_ts_feed {
 	int is_filtering; /* Set to non-zero when filtering in progress */
 	struct dmx_demux *parent; /* Back-pointer */
+	const struct dvb_ringbuffer *buffer;
 	void *priv; /* Pointer to private data of the API client */
 	int (*set) (struct dmx_ts_feed *feed,
 		    u16 pid,
@@ -184,6 +185,8 @@
 			struct dmx_buffer_status *dmx_buffer_status);
 	int (*data_ready_cb)(struct dmx_ts_feed *feed,
 			dmx_ts_data_ready_cb callback);
+	int (*notify_data_read)(struct dmx_ts_feed *feed,
+			u32 bytes_num);
 };
 
 /*--------------------------------------------------------------------------*/
@@ -195,6 +198,7 @@
 	u8 filter_mask [DMX_MAX_FILTER_SIZE];
 	u8 filter_mode [DMX_MAX_FILTER_SIZE];
 	struct dmx_section_feed* parent; /* Back-pointer */
+	const struct dvb_ringbuffer *buffer;
 	void* priv; /* Pointer to private data of the API client */
 };
 
@@ -227,6 +231,8 @@
 	int (*stop_filtering) (struct dmx_section_feed* feed);
 	int (*data_ready_cb)(struct dmx_section_feed *feed,
 			dmx_section_data_ready_cb callback);
+	int (*notify_data_read)(struct dmx_section_filter *filter,
+			u32 bytes_num);
 };
 
 /*--------------------------------------------------------------------------*/
diff --git a/drivers/media/dvb/dvb-core/dmxdev.c b/drivers/media/dvb/dvb-core/dmxdev.c
index 433e796..8353f6f 100644
--- a/drivers/media/dvb/dvb-core/dmxdev.c
+++ b/drivers/media/dvb/dvb-core/dmxdev.c
@@ -60,6 +60,35 @@
 	return dvb_ringbuffer_write(buf, src, len);
 }
 
+static inline void dvb_dmxdev_notify_data_read(struct dmxdev_filter *filter,
+					int bytes_read)
+{
+	if (!filter)
+		return;
+
+	if (filter->type == DMXDEV_TYPE_SEC) {
+		if (filter->feed.sec->notify_data_read)
+			filter->feed.sec->notify_data_read(
+						filter->filter.sec,
+						bytes_read);
+	} else {
+		struct dmxdev_feed *feed;
+
+		/*
+		 * All feeds of same demux-handle share the same output
+		 * buffer, it is enough to notify on the buffer status
+		 * on one of the feeds
+		 */
+		feed = list_first_entry(&filter->feed.ts,
+					struct dmxdev_feed, next);
+
+		if (feed->ts->notify_data_read)
+			feed->ts->notify_data_read(
+						feed->ts,
+						bytes_read);
+	}
+}
+
 static inline u32 dvb_dmxdev_advance_event_idx(u32 index)
 {
 	index++;
@@ -460,6 +489,7 @@
 		}
 		dvb_ringbuffer_init(&dmxdev->dvr_buffer, mem, DVR_BUFFER_SIZE);
 		dvb_dmxdev_flush_events(&dmxdev->dvr_output_events);
+		dmxdev->dvr_feeds_count = 0;
 
 		dvbdev->readers--;
 	} else if (!dvbdev->writers) {
@@ -719,9 +749,22 @@
 				      buf, count, ppos);
 
 	if (res > 0) {
+		dvb_dmxdev_notify_data_read(dmxdev->dvr_feed, res);
 		spin_lock_irq(&dmxdev->lock);
 		dvb_dmxdev_update_events(&dmxdev->dvr_output_events, res);
 		spin_unlock_irq(&dmxdev->lock);
+	} else if (res == -EOVERFLOW) {
+		/*
+		 * When buffer overflowed, demux-dev flushed the
+		 * buffer and marked the buffer in error state.
+		 * Data from underlying driver is discarded until
+		 * user gets notified that buffer has overflowed.
+		 * Now that the user is notified, notify underlying
+		 * driver that data was flushed from output buffer.
+		 */
+		dvb_dmxdev_notify_data_read(dmxdev->dvr_feed,
+			dmxdev->dvr_flush_data_len);
+		dmxdev->dvr_flush_data_len = 0;
 	}
 
 	return res;
@@ -873,8 +916,20 @@
 
 	res = dvb_dmxdev_remove_event(&dmxdev->dvr_output_events, event);
 
-	if (event->type == DMX_EVENT_BUFFER_OVERFLOW)
+	if (event->type == DMX_EVENT_BUFFER_OVERFLOW) {
+		/*
+		 * When buffer overflowed, demux-dev flushed the
+		 * buffer and marked the buffer in error state.
+		 * Data from underlying driver is discarded until
+		 * user gets notified that buffer has overflowed.
+		 * Now that the user is notified, notify underlying
+		 * driver that data was flushed from output buffer.
+		 */
+		dvb_dmxdev_notify_data_read(dmxdev->dvr_feed,
+			dmxdev->dvr_flush_data_len);
+		dmxdev->dvr_flush_data_len = 0;
 		dmxdev->dvr_buffer.error = 0;
+	}
 
 	spin_unlock_irq(&dmxdev->lock);
 
@@ -899,7 +954,23 @@
 	spin_lock_irq(lock);
 
 	dmx_buffer_status->error = buf->error;
-	buf->error = 0;
+	if (buf->error) {
+		if (buf->error == -EOVERFLOW) {
+			/*
+			 * When buffer overflowed, demux-dev flushed the
+			 * buffer and marked the buffer in error state.
+			 * Data from underlying driver is discarded until
+			 * user gets notified that buffer has overflowed.
+			 * Now that the user is notified, notify underlying
+			 * driver that data was flushed from output buffer.
+			 */
+			dvb_dmxdev_notify_data_read(dmxdev->dvr_feed,
+				dmxdev->dvr_flush_data_len);
+			dmxdev->dvr_flush_data_len = 0;
+		}
+
+		buf->error = 0;
+	}
 
 	dmx_buffer_status->fullness = dvb_ringbuffer_avail(buf);
 	dmx_buffer_status->free_bytes = dvb_ringbuffer_free(buf);
@@ -931,6 +1002,7 @@
 
 	DVB_RINGBUFFER_SKIP(&dmxdev->dvr_buffer, bytes_count);
 
+	dvb_dmxdev_notify_data_read(dmxdev->dvr_feed, bytes_count);
 	spin_lock_irq(&dmxdev->lock);
 	dvb_dmxdev_update_events(&dmxdev->dvr_output_events, bytes_count);
 	spin_unlock_irq(&dmxdev->lock);
@@ -1183,27 +1255,39 @@
 		struct dmxdev_feed *feed;
 		int ret;
 
-		/* Ask for status of decoder's buffer from underlying HW */
-		list_for_each_entry(feed, &dmxdevfilter->feed.ts,
-							next) {
-			if (feed->ts->get_decoder_buff_status)
-				ret = feed->ts->get_decoder_buff_status(
-						feed->ts,
-						dmx_buffer_status);
-			else
-				ret = -ENODEV;
+		/* Only one feed should be in the list in case of decoder */
+		feed = list_first_entry(&dmxdevfilter->feed.ts,
+					struct dmxdev_feed, next);
 
-			/*
-			 * There should not be more than one ts feed
-			 * in the list as this is DECODER feed.
-			 */
-			spin_unlock_irq(&dmxdevfilter->dev->lock);
-			return ret;
-		}
+		/* Ask for status of decoder's buffer from underlying HW */
+		if (feed->ts->get_decoder_buff_status)
+			ret = feed->ts->get_decoder_buff_status(
+					feed->ts,
+					dmx_buffer_status);
+		else
+			ret = -ENODEV;
+
+		spin_unlock_irq(&dmxdevfilter->dev->lock);
+		return ret;
 	}
 
 	dmx_buffer_status->error = buf->error;
-	buf->error = 0;
+	if (buf->error) {
+		if (buf->error == -EOVERFLOW) {
+			/*
+			 * When buffer overflowed, demux-dev flushed the
+			 * buffer and marked the buffer in error state.
+			 * Data from underlying driver is discarded until
+			 * user gets notified that buffer has overflowed.
+			 * Now that the user is notified, notify underlying
+			 * driver that data was flushed from output buffer.
+			 */
+			dvb_dmxdev_notify_data_read(dmxdevfilter,
+				dmxdevfilter->flush_data_len);
+			dmxdevfilter->flush_data_len = 0;
+		}
+		buf->error = 0;
+	}
 
 	dmx_buffer_status->fullness = dvb_ringbuffer_avail(buf);
 	dmx_buffer_status->free_bytes = dvb_ringbuffer_free(buf);
@@ -1234,6 +1318,7 @@
 
 	DVB_RINGBUFFER_SKIP(&dmxdevfilter->buffer, bytes_count);
 
+	dvb_dmxdev_notify_data_read(dmxdevfilter, bytes_count);
 	spin_lock_irq(&dmxdevfilter->dev->lock);
 	dvb_dmxdev_update_events(&dmxdevfilter->events, bytes_count);
 	spin_unlock_irq(&dmxdevfilter->dev->lock);
@@ -1252,8 +1337,20 @@
 
 	res = dvb_dmxdev_remove_event(&dmxdevfilter->events, event);
 
-	if (event->type == DMX_EVENT_BUFFER_OVERFLOW)
+	if (event->type == DMX_EVENT_BUFFER_OVERFLOW) {
+		/*
+		 * When buffer overflowed, demux-dev flushed the
+		 * buffer and marked the buffer in error state.
+		 * Data from underlying driver is discarded until
+		 * user gets notified that buffer has overflowed.
+		 * Now that the user is notified, notify underlying
+		 * driver that data was flushed from output buffer.
+		 */
+		dvb_dmxdev_notify_data_read(dmxdevfilter,
+			dmxdevfilter->flush_data_len);
+		dmxdevfilter->flush_data_len = 0;
 		dmxdevfilter->buffer.error = 0;
+	}
 
 	spin_unlock_irq(&dmxdevfilter->dev->lock);
 
@@ -1334,6 +1431,8 @@
 					      buffer2_len);
 
 	if (ret < 0) {
+		dmxdevfilter->flush_data_len =
+			dvb_ringbuffer_avail(&dmxdevfilter->buffer);
 		dvb_dmxdev_flush_output(&dmxdevfilter->buffer,
 			&dmxdevfilter->events);
 		dmxdevfilter->buffer.error = ret;
@@ -1372,6 +1471,7 @@
 	struct dmxdev_events_queue *events;
 	struct dmx_filter_event event;
 	int ret;
+	u32 *flush_data_len;
 
 	spin_lock(&dmxdevfilter->dev->lock);
 	if (dmxdevfilter->params.pes.output == DMX_OUT_DECODER) {
@@ -1383,9 +1483,11 @@
 	    || dmxdevfilter->params.pes.output == DMX_OUT_TSDEMUX_TAP) {
 		buffer = &dmxdevfilter->buffer;
 		events = &dmxdevfilter->events;
+		flush_data_len = &dmxdevfilter->flush_data_len;
 	} else {
 		buffer = &dmxdevfilter->dev->dvr_buffer;
 		events = &dmxdevfilter->dev->dvr_output_events;
+		flush_data_len = &dmxdevfilter->dev->dvr_flush_data_len;
 	}
 
 	if (buffer->error) {
@@ -1435,6 +1537,8 @@
 			ret = dvb_dmxdev_buffer_write(buffer, buffer2,
 								buffer2_len);
 		if (ret < 0) {
+			*flush_data_len =
+				dvb_ringbuffer_avail(&dmxdevfilter->buffer);
 			dvb_dmxdev_flush_output(buffer, events);
 			buffer->error = ret;
 
@@ -1507,6 +1611,8 @@
 
 	if ((DMX_OVERRUN_ERROR == dmx_data_ready->status) ||
 		(dmx_data_ready->data_length > free)) {
+		dmxdevfilter->flush_data_len =
+			dvb_ringbuffer_avail(&dmxdevfilter->buffer);
 		dvb_dmxdev_flush_output(&dmxdevfilter->buffer,
 				&dmxdevfilter->events);
 
@@ -1548,6 +1654,7 @@
 	struct dvb_ringbuffer *buffer;
 	struct dmxdev_events_queue *events;
 	struct dmx_filter_event event;
+	u32 *flush_data_len;
 	int free;
 
 	spin_lock(&dmxdevfilter->dev->lock);
@@ -1560,9 +1667,11 @@
 	if (dmxdevfilter->params.pes.output != DMX_OUT_TS_TAP) {
 		buffer = &dmxdevfilter->buffer;
 		events = &dmxdevfilter->events;
+		flush_data_len = &dmxdevfilter->flush_data_len;
 	} else {
 		buffer = &dmxdevfilter->dev->dvr_buffer;
 		events = &dmxdevfilter->dev->dvr_output_events;
+		flush_data_len = &dmxdevfilter->dev->dvr_flush_data_len;
 	}
 
 	if (dmx_data_ready->status == DMX_OK_PCR) {
@@ -1592,6 +1701,8 @@
 
 	if ((DMX_OVERRUN_ERROR == dmx_data_ready->status) ||
 		(dmx_data_ready->data_length > free)) {
+		*flush_data_len =
+				dvb_ringbuffer_avail(&dmxdevfilter->buffer);
 		dvb_dmxdev_flush_output(buffer, events);
 
 		dprintk("dmxdev: buffer overflow\n");
@@ -1761,6 +1872,12 @@
 	case DMXDEV_TYPE_PES:
 		dvb_dmxdev_feed_stop(dmxdevfilter);
 		demux = dmxdevfilter->dev->demux;
+		if (dmxdevfilter->params.pes.output == DMX_OUT_TS_TAP) {
+			dmxdevfilter->dev->dvr_feeds_count--;
+			if (!dmxdevfilter->dev->dvr_feeds_count)
+				dmxdevfilter->dev->dvr_feed = NULL;
+		}
+
 		list_for_each_entry(feed, &dmxdevfilter->feed.ts, next) {
 			demux->release_ts_feed(demux, feed->ts);
 			feed->ts = NULL;
@@ -1844,6 +1961,15 @@
 	tsfeed = feed->ts;
 	tsfeed->priv = filter;
 
+	if (filter->params.pes.output == DMX_OUT_TS_TAP) {
+		tsfeed->buffer = &dmxdev->dvr_buffer;
+		if (!dmxdev->dvr_feeds_count)
+			dmxdev->dvr_feed = filter;
+		dmxdev->dvr_feeds_count++;
+	} else {
+		tsfeed->buffer = &filter->buffer;
+	}
+
 	if (tsfeed->data_ready_cb) {
 		ret = tsfeed->data_ready_cb(tsfeed, dvb_dmxdev_ts_event_cb);
 
@@ -1979,6 +2105,7 @@
 		}
 
 		(*secfilter)->priv = filter;
+		(*secfilter)->buffer = &filter->buffer;
 
 		memcpy(&((*secfilter)->filter_value[3]),
 		       &(para->filter.filter[1]), DMX_FILTER_SIZE - 1);
@@ -2278,9 +2405,22 @@
 					    buf, count, ppos);
 
 	if (ret > 0) {
+		dvb_dmxdev_notify_data_read(dmxdevfilter, ret);
 		spin_lock_irq(&dmxdevfilter->dev->lock);
 		dvb_dmxdev_update_events(&dmxdevfilter->events, ret);
 		spin_unlock_irq(&dmxdevfilter->dev->lock);
+	} else if (ret == -EOVERFLOW) {
+		/*
+		 * When buffer overflowed, demux-dev flushed the
+		 * buffer and marked the buffer in error state.
+		 * Data from underlying driver is discarded until
+		 * user gets notified that buffer has overflowed.
+		 * Now that the user is notified, notify underlying
+		 * driver that data was flushed from output buffer.
+		 */
+		dvb_dmxdev_notify_data_read(dmxdevfilter->dev->dvr_feed,
+			dmxdevfilter->flush_data_len);
+		dmxdevfilter->flush_data_len = 0;
 	}
 
 	mutex_unlock(&dmxdevfilter->mutex);
diff --git a/drivers/media/dvb/dvb-core/dmxdev.h b/drivers/media/dvb/dvb-core/dmxdev.h
index 6fa7054..9fd900e 100644
--- a/drivers/media/dvb/dvb-core/dmxdev.h
+++ b/drivers/media/dvb/dvb-core/dmxdev.h
@@ -113,6 +113,7 @@
 	enum dmxdev_state state;
 	struct dmxdev *dev;
 	struct dvb_ringbuffer buffer;
+	u32 flush_data_len;
 
 	struct mutex mutex;
 
@@ -153,6 +154,9 @@
 
 	struct dvb_ringbuffer dvr_buffer;
 	struct dmxdev_events_queue dvr_output_events;
+	struct dmxdev_filter *dvr_feed;
+	u32 dvr_flush_data_len;
+	int dvr_feeds_count;
 
 	struct dvb_ringbuffer dvr_input_buffer;
 	struct workqueue_struct *dvr_input_workqueue;
diff --git a/drivers/media/dvb/dvb-core/dvb_demux.c b/drivers/media/dvb/dvb-core/dvb_demux.c
index 0be6a22..0bdf6cb 100644
--- a/drivers/media/dvb/dvb-core/dvb_demux.c
+++ b/drivers/media/dvb/dvb-core/dvb_demux.c
@@ -1137,6 +1137,7 @@
 	(*ts_feed)->set_indexing_params = dmx_ts_set_indexing_params;
 	(*ts_feed)->get_decoder_buff_status = dmx_ts_feed_decoder_buff_status;
 	(*ts_feed)->data_ready_cb = dmx_ts_feed_data_ready_cb;
+	(*ts_feed)->notify_data_read = NULL;
 
 	if (!(feed->filter = dvb_dmx_filter_alloc(demux))) {
 		feed->state = DMX_STATE_FREE;
@@ -1434,6 +1435,7 @@
 	(*feed)->stop_filtering = dmx_section_feed_stop_filtering;
 	(*feed)->release_filter = dmx_section_feed_release_filter;
 	(*feed)->data_ready_cb = dmx_section_feed_data_ready_cb;
+	(*feed)->notify_data_read = NULL;
 
 	mutex_unlock(&dvbdmx->mutex);
 	return 0;
diff --git a/drivers/video/msm/mdp4_overlay.c b/drivers/video/msm/mdp4_overlay.c
index d045e69..703d65d 100644
--- a/drivers/video/msm/mdp4_overlay.c
+++ b/drivers/video/msm/mdp4_overlay.c
@@ -2558,17 +2558,19 @@
 	struct msm_fb_data_type *mfd, uint32 perf_level)
 {
 	u32 clk_rate = mfd->panel_info.clk_rate;
-	u32 pull_mode = 0, use_blt = 0;
+	u32 blt_chq_req  = 0, use_blt = 0;
 
-	if (mfd->panel_info.type == MIPI_VIDEO_PANEL)
+	if ((mfd->panel_info.type == MIPI_VIDEO_PANEL) ||
+		 (mfd->panel_info.type == MIPI_CMD_PANEL))
 		clk_rate = (&mfd->panel_info.mipi)->dsi_pclk_rate;
 
 	if ((mfd->panel_info.type == LCDC_PANEL) ||
 	    (mfd->panel_info.type == MIPI_VIDEO_PANEL) ||
-	    (mfd->panel_info.type == DTV_PANEL))
-		pull_mode = 1;
+	    (mfd->panel_info.type == DTV_PANEL) ||
+	    (mfd->panel_info.type == MIPI_CMD_PANEL))
+		blt_chq_req = 1;
 
-	if (pull_mode && (req->src_rect.h > req->dst_rect.h ||
+	if (blt_chq_req && (req->src_rect.h > req->dst_rect.h ||
 		req->src_rect.w > req->dst_rect.w)) {
 		if (mdp4_overlay_validate_downscale(req, mfd, perf_level,
 			clk_rate))
@@ -2798,10 +2800,15 @@
 		mdp4_mixer_stage_down(pipe);
 
 		if (pipe->mixer_num == MDP4_MIXER0) {
+			mfd->use_ov0_blt &= ~(1 << (pipe->pipe_ndx-1));
+			mdp4_overlay_update_blt_mode(mfd);
 #ifdef CONFIG_FB_MSM_MIPI_DSI
 			if (ctrl->panel_mode & MDP4_PANEL_DSI_CMD) {
-				if (mfd->panel_power_on)
+				if (mfd->panel_power_on) {
 					mdp4_dsi_cmd_overlay_restore();
+					mdp4_dsi_cmd_dma_busy_wait(mfd);
+					mdp4_dsi_blt_dmap_busy_wait(mfd);
+				}
 			} else if (ctrl->panel_mode & MDP4_PANEL_DSI_VIDEO) {
 				pipe->flags &= ~MDP_OV_PLAY_NOWAIT;
 				if (mfd->panel_power_on)
@@ -2810,8 +2817,11 @@
 			}
 #else
 			if (ctrl->panel_mode & MDP4_PANEL_MDDI) {
-				if (mfd->panel_power_on)
+				if (mfd->panel_power_on) {
 					mdp4_mddi_overlay_restore();
+					mdp4_mddi_dma_busy_wait(mfd);
+					mdp4_mddi_blt_dmap_busy_wait(mfd);
+				}
 			}
 #endif
 			else if (ctrl->panel_mode & MDP4_PANEL_LCDC) {
@@ -2819,8 +2829,6 @@
 				if (mfd->panel_power_on)
 					mdp4_overlay_lcdc_vsync_push(mfd, pipe);
 			}
-			mfd->use_ov0_blt &= ~(1 << (pipe->pipe_ndx-1));
-			mdp4_overlay_update_blt_mode(mfd);
 			if (!mfd->use_ov0_blt)
 				mdp4_free_writeback_buf(mfd, MDP4_MIXER0);
 		} else {	/* mixer1, DTV, ATV */
diff --git a/sound/soc/msm/msm8930.c b/sound/soc/msm/msm8930.c
index a0cad55..374e875 100644
--- a/sound/soc/msm/msm8930.c
+++ b/sound/soc/msm/msm8930.c
@@ -877,7 +877,7 @@
 		.name = "MSM8930 Media2",
 		.stream_name = "MultiMedia2",
 		.cpu_dai_name	= "MultiMedia2",
-		.platform_name  = "msm-pcm-dsp",
+		.platform_name  = "msm-multi-ch-pcm-dsp",
 		.dynamic = 1,
 		.codec_dai_name = "snd-soc-dummy-dai",
 		.codec_name = "snd-soc-dummy",