Merge "clk: qcom: Remove GPU_CC_GX_CXO_CLK control for SDM845"
diff --git a/Documentation/devicetree/bindings/arm/msm/msm.txt b/Documentation/devicetree/bindings/arm/msm/msm.txt
index 6451b34..76b6141 100644
--- a/Documentation/devicetree/bindings/arm/msm/msm.txt
+++ b/Documentation/devicetree/bindings/arm/msm/msm.txt
@@ -157,6 +157,8 @@
 - RUMI device:
   compatible = "qcom,rumi"
 
+- VR device:
+  compatible = "qcom,qvr"
 
 
 Boards (SoC type + board variant):
@@ -267,6 +269,7 @@
 compatible = "qcom,sdm845-mtp"
 compatible = "qcom,sdm845-mtp"
 compatible = "qcom,sdm845-qrd"
+compatible = "qcom,sdm845-qvr"
 compatible = "qcom,sdm670-rumi"
 compatible = "qcom,sdm670-cdp"
 compatible = "qcom,sdm670-mtp"
diff --git a/Documentation/devicetree/bindings/iommu/arm,smmu.txt b/Documentation/devicetree/bindings/iommu/arm,smmu.txt
index 0f8dc27..a014dac 100644
--- a/Documentation/devicetree/bindings/iommu/arm,smmu.txt
+++ b/Documentation/devicetree/bindings/iommu/arm,smmu.txt
@@ -96,6 +96,11 @@
 		  retention. No cache invalidation operations involving asid
 		  may be used.
 
+- qcom,actlr:
+		  An array of <sid mask actlr-setting>.
+		  Any sid X for which X&~mask==sid will be programmed with the
+		  given actlr-setting.
+
 - qcom,deferred-regulator-disable-delay : The time delay for deferred regulator
                   disable in ms. In case of unmap call, regulator is
                   enabled/disabled. This may introduce additional delay. For
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index 771896f..cf61b80 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -62,6 +62,17 @@
 	      8 - SIGSEGV faults
 	     16 - SIGBUS faults
 
+config FORCE_PAGES
+	bool "Force lowmem to be mapped with 4K pages"
+        help
+          There are some advanced debug features that can only be done when
+          memory is mapped with pages instead of sections. Enable this option
+          to always map lowmem pages with pages. This may have a performance
+          cost due to increased TLB pressure.
+
+          If unsure say N.
+
+
 # These options are only for real kernel hackers who want to get their hands dirty.
 config DEBUG_LL
 	bool "Kernel low-level debugging functions (read help!)"
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index 66003a8..d1df9cc 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -363,6 +363,7 @@
 #define writel_relaxed(v, c)	__raw_writel((__force u32) cpu_to_le32(v), c)
 #define writeq_relaxed(v, c)	__raw_writeq((__force u64) cpu_to_le64(v), c)
 #define writeb_relaxed_no_log(v, c)	((void)__raw_writeb_no_log((v), (c)))
+#define writew_relaxed_no_log(v, c) __raw_writew_no_log((__force u16) cpu_to_le16(v), c)
 #define writel_relaxed_no_log(v, c) __raw_writel_no_log((__force u32) cpu_to_le32(v), c)
 #define writeq_relaxed_no_log(v, c) __raw_writeq_no_log((__force u64) cpu_to_le64(v), c)
 
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index cb2c9f4..baf63ea 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -505,6 +505,15 @@
 		struct map_desc map;
 		unsigned long addr;
 
+		/*
+		 * Make start and end PMD_SIZE aligned, observing memory
+		 * boundaries
+		 */
+		if (memblock_is_memory(start & PMD_MASK))
+			start = start & PMD_MASK;
+		if (memblock_is_memory(ALIGN(end, PMD_SIZE)))
+			end = ALIGN(end, PMD_SIZE);
+
 		if (end > arm_lowmem_limit)
 			end = arm_lowmem_limit;
 		if (start >= end)
@@ -525,8 +534,13 @@
 		 * and ensures that this code is architecturally compliant.
 		 */
 		for (addr = __phys_to_virt(start); addr < __phys_to_virt(end);
-		     addr += PMD_SIZE)
-			pmd_clear(pmd_off_k(addr));
+		     addr += PMD_SIZE) {
+			pmd_t *pmd;
+
+			pmd = pmd_off_k(addr);
+			if (pmd_bad(*pmd))
+				pmd_clear(pmd);
+		}
 
 		flush_tlb_kernel_range(__phys_to_virt(start),
 				       __phys_to_virt(end));
@@ -697,9 +711,14 @@
 
 static inline pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot)
 {
-	prot = (attrs & DMA_ATTR_WRITE_COMBINE) ?
-			pgprot_writecombine(prot) :
-			pgprot_dmacoherent(prot);
+	if (attrs & DMA_ATTR_WRITE_COMBINE)
+		prot = pgprot_writecombine(prot);
+	else if (attrs & DMA_ATTR_STRONGLY_ORDERED)
+		prot = pgprot_stronglyordered(prot);
+	/* if non-consistent just pass back what was given */
+	else if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0)
+		prot = pgprot_dmacoherent(prot);
+
 	return prot;
 }
 
@@ -955,6 +974,7 @@
 	unsigned int flags = VM_ARM_DMA_CONSISTENT | VM_USERMAP;
 	struct vm_struct *area;
 
+	size = PAGE_ALIGN(size);
 	remapped_addr = (void *)((unsigned long)remapped_addr & PAGE_MASK);
 
 	area = find_vm_area(remapped_addr);
@@ -965,6 +985,8 @@
 	}
 
 	vunmap(remapped_addr);
+	flush_tlb_kernel_range((unsigned long)remapped_addr,
+			(unsigned long)(remapped_addr + size));
 }
 /*
  * Create userspace mapping for the DMA-coherent memory.
@@ -1912,7 +1934,31 @@
 int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg,
 		int nents, enum dma_data_direction dir, unsigned long attrs)
 {
-	return __iommu_map_sg(dev, sg, nents, dir, attrs, false);
+	struct scatterlist *s;
+	int i;
+	size_t ret;
+	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+	unsigned int total_length = 0, current_offset = 0;
+	dma_addr_t iova;
+	int prot = __dma_direction_to_prot(dir);
+
+	for_each_sg(sg, s, nents, i)
+		total_length += s->length;
+
+	iova = __alloc_iova(mapping, total_length);
+	ret = iommu_map_sg(mapping->domain, iova, sg, nents, prot);
+	if (ret != total_length) {
+		__free_iova(mapping, iova, total_length);
+		return 0;
+	}
+
+	for_each_sg(sg, s, nents, i) {
+		s->dma_address = iova + current_offset;
+		s->dma_length = total_length - current_offset;
+		current_offset += s->length;
+	}
+
+	return nents;
 }
 
 static void __iommu_unmap_sg(struct device *dev, struct scatterlist *sg,
@@ -1963,7 +2009,15 @@
 			enum dma_data_direction dir,
 			unsigned long attrs)
 {
-	__iommu_unmap_sg(dev, sg, nents, dir, attrs, false);
+	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+	unsigned int total_length = sg_dma_len(sg);
+	dma_addr_t iova = sg_dma_address(sg);
+
+	total_length = PAGE_ALIGN((iova & ~PAGE_MASK) + total_length);
+	iova &= PAGE_MASK;
+
+	iommu_unmap(mapping->domain, iova, total_length);
+	__free_iova(mapping, iova, total_length);
 }
 
 /**
@@ -2078,9 +2132,6 @@
 	int offset = handle & ~PAGE_MASK;
 	int len = PAGE_ALIGN(size + offset);
 
-	if (!iova)
-		return;
-
 	iommu_unmap(mapping->domain, iova, len);
 	__free_iova(mapping, iova, len);
 }
@@ -2178,9 +2229,6 @@
 	struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
 	unsigned int offset = handle & ~PAGE_MASK;
 
-	if (!iova)
-		return;
-
 	__dma_page_dev_to_cpu(page, offset, size, dir);
 }
 
@@ -2192,9 +2240,6 @@
 	struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
 	unsigned int offset = handle & ~PAGE_MASK;
 
-	if (!iova)
-		return;
-
 	__dma_page_cpu_to_dev(page, offset, size, dir);
 }
 
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 51496dd..b46d914 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -630,6 +630,9 @@
 	pmdval_t mask;
 	pmdval_t prot;
 	pmdval_t clear;
+	pteval_t ptemask;
+	pteval_t pteprot;
+	pteval_t pteclear;
 };
 
 /* First section-aligned location at or after __start_rodata. */
@@ -643,6 +646,8 @@
 		.end	= (unsigned long)_stext,
 		.mask	= ~PMD_SECT_XN,
 		.prot	= PMD_SECT_XN,
+		.ptemask = ~L_PTE_XN,
+		.pteprot = L_PTE_XN,
 	},
 	/* Make init RW (set NX). */
 	{
@@ -651,6 +656,8 @@
 		.end	= (unsigned long)_sdata,
 		.mask	= ~PMD_SECT_XN,
 		.prot	= PMD_SECT_XN,
+		.ptemask = ~L_PTE_XN,
+		.pteprot = L_PTE_XN,
 	},
 	/* Make rodata NX (set RO in ro_perms below). */
 	{
@@ -659,6 +666,8 @@
 		.end    = (unsigned long)__init_begin,
 		.mask   = ~PMD_SECT_XN,
 		.prot   = PMD_SECT_XN,
+		.ptemask = ~L_PTE_XN,
+		.pteprot = L_PTE_XN,
 	},
 };
 
@@ -676,6 +685,8 @@
 		.prot   = PMD_SECT_APX | PMD_SECT_AP_WRITE,
 		.clear  = PMD_SECT_AP_WRITE,
 #endif
+		.ptemask = ~L_PTE_RDONLY,
+		.pteprot = L_PTE_RDONLY,
 	},
 };
 
@@ -684,6 +695,35 @@
  * copied into each mm). During startup, this is the init_mm. Is only
  * safe to be called with preemption disabled, as under stop_machine().
  */
+struct pte_data {
+	pteval_t mask;
+	pteval_t val;
+};
+
+static int __pte_update(pte_t *ptep, pgtable_t token, unsigned long addr,
+			void *d)
+{
+	struct pte_data *data = d;
+	pte_t pte = *ptep;
+
+	pte = __pte((pte_val(*ptep) & data->mask) | data->val);
+	set_pte_ext(ptep, pte, 0);
+
+	return 0;
+}
+
+static inline void pte_update(unsigned long addr, pteval_t mask,
+				  pteval_t prot, struct mm_struct *mm)
+{
+	struct pte_data data;
+
+	data.mask = mask;
+	data.val = prot;
+
+	apply_to_page_range(mm, addr, SECTION_SIZE, __pte_update, &data);
+	flush_tlb_kernel_range(addr, addr + SECTION_SIZE);
+}
+
 static inline void section_update(unsigned long addr, pmdval_t mask,
 				  pmdval_t prot, struct mm_struct *mm)
 {
@@ -732,11 +772,21 @@
 
 		for (addr = perms[i].start;
 		     addr < perms[i].end;
-		     addr += SECTION_SIZE)
-			section_update(addr, perms[i].mask,
-				set ? perms[i].prot : perms[i].clear, mm);
-	}
+		     addr += SECTION_SIZE) {
+			pmd_t *pmd;
 
+			pmd = pmd_offset(pud_offset(pgd_offset(mm, addr),
+						addr), addr);
+			if (pmd_bad(*pmd))
+				section_update(addr, perms[i].mask,
+					       set ? perms[i].prot : perms[i].clear,
+					       mm);
+			else
+				pte_update(addr, perms[i].ptemask,
+					       set ? perms[i].pteprot : perms[i].pteclear,
+					       mm);
+		}
+	}
 }
 
 static void update_sections_early(struct section_perm perms[], int n)
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index ddc72dc..219aa9c 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -1633,6 +1633,119 @@
 
 #endif
 
+#ifdef CONFIG_FORCE_PAGES
+/*
+ * remap a PMD into pages
+ * We split a single pmd here none of this two pmd nonsense
+ */
+static noinline void __init split_pmd(pmd_t *pmd, unsigned long addr,
+				unsigned long end, unsigned long pfn,
+				const struct mem_type *type)
+{
+	pte_t *pte, *start_pte;
+	pmd_t *base_pmd;
+
+	base_pmd = pmd_offset(
+			pud_offset(pgd_offset(&init_mm, addr), addr), addr);
+
+	if (pmd_none(*base_pmd) || pmd_bad(*base_pmd)) {
+		start_pte = early_alloc(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE);
+#ifndef CONFIG_ARM_LPAE
+		/*
+		 * Following is needed when new pte is allocated for pmd[1]
+		 * cases, which may happen when base (start) address falls
+		 * under pmd[1].
+		 */
+		if (addr & SECTION_SIZE)
+			start_pte += pte_index(addr);
+#endif
+	} else {
+		start_pte = pte_offset_kernel(base_pmd, addr);
+	}
+
+	pte = start_pte;
+
+	do {
+		set_pte_ext(pte, pfn_pte(pfn, type->prot_pte), 0);
+		pfn++;
+	} while (pte++, addr += PAGE_SIZE, addr != end);
+
+	*pmd = __pmd((__pa(start_pte) + PTE_HWTABLE_OFF) | type->prot_l1);
+	mb(); /* let pmd be programmed */
+	flush_pmd_entry(pmd);
+	flush_tlb_all();
+}
+
+/*
+ * It's significantly easier to remap as pages later after all memory is
+ * mapped. Everything is sections so all we have to do is split
+ */
+static void __init remap_pages(void)
+{
+	struct memblock_region *reg;
+
+	for_each_memblock(memory, reg) {
+		phys_addr_t phys_start = reg->base;
+		phys_addr_t phys_end = reg->base + reg->size;
+		unsigned long addr = (unsigned long)__va(phys_start);
+		unsigned long end = (unsigned long)__va(phys_end);
+		pmd_t *pmd = NULL;
+		unsigned long next;
+		unsigned long pfn = __phys_to_pfn(phys_start);
+		bool fixup = false;
+		unsigned long saved_start = addr;
+
+		if (phys_start > arm_lowmem_limit)
+			break;
+		if (phys_end > arm_lowmem_limit)
+			end = (unsigned long)__va(arm_lowmem_limit);
+		if (phys_start >= phys_end)
+			break;
+
+		pmd = pmd_offset(
+			pud_offset(pgd_offset(&init_mm, addr), addr), addr);
+
+#ifndef	CONFIG_ARM_LPAE
+		if (addr & SECTION_SIZE) {
+			fixup = true;
+			pmd_empty_section_gap((addr - SECTION_SIZE) & PMD_MASK);
+			pmd++;
+		}
+
+		if (end & SECTION_SIZE)
+			pmd_empty_section_gap(end);
+#endif
+
+		do {
+			next = addr + SECTION_SIZE;
+
+			if (pmd_none(*pmd) || pmd_bad(*pmd))
+				split_pmd(pmd, addr, next, pfn,
+						&mem_types[MT_MEMORY_RWX]);
+			pmd++;
+			pfn += SECTION_SIZE >> PAGE_SHIFT;
+
+		} while (addr = next, addr < end);
+
+		if (fixup) {
+			/*
+			 * Put a faulting page table here to avoid detecting no
+			 * pmd when accessing an odd section boundary. This
+			 * needs to be faulting to help catch errors and avoid
+			 * speculation
+			 */
+			pmd = pmd_off_k(saved_start);
+			pmd[0] = pmd[1] & ~1;
+		}
+	}
+}
+#else
+static void __init remap_pages(void)
+{
+
+}
+#endif
+
 static void __init early_fixmap_shutdown(void)
 {
 	int i;
@@ -1676,6 +1789,7 @@
 	memblock_set_current_limit(arm_lowmem_limit);
 	dma_contiguous_remap();
 	early_fixmap_shutdown();
+	remap_pages();
 	devicemaps_init(mdesc);
 	kmap_init();
 	tcm_init();
diff --git a/arch/arm/mm/pageattr.c b/arch/arm/mm/pageattr.c
index d19b1ad..0b26a84 100644
--- a/arch/arm/mm/pageattr.c
+++ b/arch/arm/mm/pageattr.c
@@ -52,11 +52,13 @@
 	if (!numpages)
 		return 0;
 
-	if (start < MODULES_VADDR || start >= MODULES_END)
-		return -EINVAL;
+	if (!IS_ENABLED(CONFIG_FORCE_PAGES)) {
+		if (start < MODULES_VADDR || start >= MODULES_END)
+			return -EINVAL;
 
-	if (end < MODULES_VADDR || start >= MODULES_END)
-		return -EINVAL;
+		if (end < MODULES_VADDR || start >= MODULES_END)
+			return -EINVAL;
+	}
 
 	data.set_mask = set_mask;
 	data.clear_mask = clear_mask;
diff --git a/arch/arm64/boot/dts/qcom/Makefile b/arch/arm64/boot/dts/qcom/Makefile
index c18ea56..b37c377 100644
--- a/arch/arm64/boot/dts/qcom/Makefile
+++ b/arch/arm64/boot/dts/qcom/Makefile
@@ -7,6 +7,7 @@
 		sdm845-cdp-overlay.dtbo \
 		sdm845-mtp-overlay.dtbo \
 		sdm845-qrd-overlay.dtbo \
+		sdm845-qvr-overlay.dtbo \
 		sdm845-v2-cdp-overlay.dtbo \
 		sdm845-v2-mtp-overlay.dtbo \
 		sdm845-v2-qrd-overlay.dtbo \
@@ -19,6 +20,7 @@
 sdm845-cdp-overlay.dtbo-base := sdm845.dtb
 sdm845-mtp-overlay.dtbo-base := sdm845.dtb
 sdm845-qrd-overlay.dtbo-base := sdm845.dtb
+sdm845-qvr-overlay.dtbo-base := sdm845.dtb
 sdm845-v2-cdp-overlay.dtbo-base := sdm845-v2.dtb
 sdm845-v2-mtp-overlay.dtbo-base := sdm845-v2.dtb
 sdm845-v2-qrd-overlay.dtbo-base := sdm845-v2.dtb
@@ -37,6 +39,7 @@
 	sdm845-v2-cdp.dtb \
 	sdm845-qrd.dtb \
 	sdm845-v2-qrd.dtb \
+	sdm845-qvr.dtb \
 	sdm845-4k-panel-mtp.dtb \
 	sdm845-4k-panel-cdp.dtb \
 	sdm845-4k-panel-qrd.dtb \
diff --git a/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm845.dtsi b/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm845.dtsi
index 56e74be..5de0e44 100644
--- a/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm845.dtsi
@@ -343,3 +343,14 @@
 		dma-coherent;
 	};
 };
+
+&apps_smmu {
+	qcom,actlr =	<0x0000 0x3ff 0x3>,
+			<0x0400 0x3ff 0x3>,
+			<0x0800 0x3ff 0x103>,
+			<0x0c00 0x3ff 0x103>,
+			<0x1000 0x3ff 0x103>,
+			<0x1400 0x3ff 0x3>,
+			<0x1800 0x3ff 0x3>,
+			<0x1c00 0x3ff 0x3>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670.dtsi b/arch/arm64/boot/dts/qcom/sdm670.dtsi
index f6cd37b..42c072b 100644
--- a/arch/arm64/boot/dts/qcom/sdm670.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670.dtsi
@@ -303,6 +303,23 @@
 		compatible = "simple-bus";
 	};
 
+	firmware: firmware {
+		android {
+			compatible = "android,firmware";
+
+			fstab {
+				compatible = "android,fstab";
+				vendor {
+					compatible = "android,vendor";
+					dev = "/dev/block/platform/soc/1d84000.ufshc/by-name/vendor";
+					type = "ext4";
+					mnt_flags = "ro,barrier=1,discard";
+					fsmgr_flags = "wait,slotselect";
+				};
+			};
+		};
+	};
+
 	reserved-memory {
 		#address-cells = <2>;
 		#size-cells = <2>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-audio-overlay.dtsi b/arch/arm64/boot/dts/qcom/sdm845-audio-overlay.dtsi
index 9208302..9bd1d54 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-audio-overlay.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-audio-overlay.dtsi
@@ -51,8 +51,9 @@
 	qcom,hph-en1-gpio = <&tavil_hph_en1>;
 	qcom,tavil-mclk-clk-freq = <9600000>;
 
-	asoc-codec = <&stub_codec>;
-	asoc-codec-names = "msm-stub-codec.1";
+	asoc-codec = <&stub_codec>, <&ext_disp_audio_codec>;
+	asoc-codec-names = "msm-stub-codec.1",
+			   "msm-ext-disp-audio-codec-rx";
 
 	qcom,usbc-analog-en1-gpio = <&wcd_usbc_analog_en1_gpio>;
 	qcom,usbc-analog-en2-gpio = <&tlmm 51 0>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-audio.dtsi b/arch/arm64/boot/dts/qcom/sdm845-audio.dtsi
index dd82ad7..a5c6d84 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-audio.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-audio.dtsi
@@ -31,6 +31,7 @@
 	snd_934x: sound-tavil {
 		compatible = "qcom,sdm845-asoc-snd-tavil";
 		qcom,model = "sdm845-tavil-snd-card";
+		qcom,ext-disp-audio-rx;
 		qcom,wcn-btfm;
 		qcom,mi2s-audio-intf;
 		qcom,auxpcm-audio-intf;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi b/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi
index b34751b..19d69e9 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi
@@ -264,8 +264,14 @@
 		label = "kgsl-gmu";
 		compatible = "qcom,gpu-gmu";
 
-		reg = <0x506a000 0x30000>, <0xb200000 0x300000>;
-		reg-names = "kgsl_gmu_reg", "kgsl_gmu_pdc_reg";
+		reg =
+			<0x506a000 0x31000>,
+			<0xb200000 0x300000>,
+			<0xc200000 0x10000>;
+		reg-names =
+			"kgsl_gmu_reg",
+			"kgsl_gmu_pdc_reg",
+			"kgsl_gmu_cpr_reg";
 
 		interrupts = <0 304 0>, <0 305 0>;
 		interrupt-names = "kgsl_hfi_irq", "kgsl_gmu_irq";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qvr-overlay.dts b/arch/arm64/boot/dts/qcom/sdm845-qvr-overlay.dts
new file mode 100644
index 0000000..a47219d
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm845-qvr-overlay.dts
@@ -0,0 +1,29 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/clock/qcom,gcc-sdm845.h>
+#include <dt-bindings/clock/qcom,camcc-sdm845.h>
+#include <dt-bindings/clock/qcom,dispcc-sdm845.h>
+#include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#include "sdm845-qvr.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SDM845 v1 QVR";
+	compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
+	qcom,msm-id = <321 0x10000>;
+	qcom,board-id = <0x01000B 0x20>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qvr.dts b/arch/arm64/boot/dts/qcom/sdm845-qvr.dts
new file mode 100644
index 0000000..f344803
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm845-qvr.dts
@@ -0,0 +1,23 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+
+#include "sdm845.dtsi"
+#include "sdm845-qvr.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SDM845 QVR";
+	compatible = "qcom,sdm845-qvr", "qcom,sdm845", "qcom,qvr";
+	qcom,board-id = <0x01000B 0x20>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qvr.dtsi b/arch/arm64/boot/dts/qcom/sdm845-qvr.dtsi
new file mode 100644
index 0000000..6ea92ee
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm845-qvr.dtsi
@@ -0,0 +1,11 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
index 88f2e05d..71e49fe 100644
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -536,24 +536,6 @@
 			reg = <0 0x8ab00000 0 0x500000>;
 		};
 
-		pil_ipa_fw_mem: pil_ipa_fw_region@8b000000 {
-			compatible = "removed-dma-pool";
-			no-map;
-			reg = <0 0x8b000000 0 0x10000>;
-		};
-
-		pil_ipa_gsi_mem: pil_ipa_gsi_region@8b010000 {
-			compatible = "removed-dma-pool";
-			no-map;
-			reg = <0 0x8b010000 0 0x5000>;
-		};
-
-		pil_gpu_mem: pil_gpu_region@8b015000 {
-			compatible = "removed-dma-pool";
-			no-map;
-			reg = <0 0x8b015000 0 0x1000>;
-		};
-
 		pil_adsp_mem: pil_adsp_region@8b100000 {
 			compatible = "removed-dma-pool";
 			no-map;
@@ -3691,6 +3673,7 @@
 			 <&clock_rpmh RPMH_RF_CLK3_A>;
 		clock-names = "rf_clk3_clk", "rf_clk3_pin_clk";
 		qcom,smmu-support;
+		qcom,keep-radio-on-during-sleep;
 		status = "disabled";
 	};
 };
diff --git a/arch/arm64/configs/sdm845-perf_defconfig b/arch/arm64/configs/sdm845-perf_defconfig
index ff696dd..a78b67d 100644
--- a/arch/arm64/configs/sdm845-perf_defconfig
+++ b/arch/arm64/configs/sdm845-perf_defconfig
@@ -278,6 +278,7 @@
 # CONFIG_WIL6210_TRACING is not set
 CONFIG_WCNSS_MEM_PRE_ALLOC=y
 CONFIG_CLD_LL_CORE=y
+CONFIG_CNSS_GENL=y
 CONFIG_INPUT_EVDEV=y
 CONFIG_KEYBOARD_GPIO=y
 # CONFIG_INPUT_MOUSE is not set
diff --git a/arch/arm64/configs/sdm845_defconfig b/arch/arm64/configs/sdm845_defconfig
index a341dd3..5ae3e2c 100644
--- a/arch/arm64/configs/sdm845_defconfig
+++ b/arch/arm64/configs/sdm845_defconfig
@@ -285,6 +285,7 @@
 CONFIG_WIL6210=m
 CONFIG_WCNSS_MEM_PRE_ALLOC=y
 CONFIG_CLD_LL_CORE=y
+CONFIG_CNSS_GENL=y
 CONFIG_INPUT_EVDEV=y
 CONFIG_KEYBOARD_GPIO=y
 # CONFIG_INPUT_MOUSE is not set
@@ -553,6 +554,7 @@
 CONFIG_QTI_RPM_STATS_LOG=y
 CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
 CONFIG_QMP_DEBUGFS_CLIENT=y
+CONFIG_MSM_REMOTEQDSS=y
 CONFIG_QCOM_BIMC_BWMON=y
 CONFIG_ARM_MEMLAT_MON=y
 CONFIG_QCOMCCI_HWMON=y
diff --git a/drivers/gpu/drm/msm/dp/dp_link.c b/drivers/gpu/drm/msm/dp/dp_link.c
index 8bf3471..48e6c8f 100644
--- a/drivers/gpu/drm/msm/dp/dp_link.c
+++ b/drivers/gpu/drm/msm/dp/dp_link.c
@@ -905,7 +905,6 @@
 	u8 data;
 	int rlen;
 	u32 const param_len = 0x1;
-	u8 buf[4];
 
 	/**
 	 * Read the device service IRQ vector (Byte 0x201) to determine
@@ -969,10 +968,6 @@
 		ret = dp_link_parse_audio_pattern_params(link);
 	}
 end:
-	/* clear the link request IRQ */
-	buf[0] = 1;
-	drm_dp_dpcd_write(link->aux->drm_aux, DP_TEST_REQUEST, buf, 1);
-
 	/**
 	 * Send a DP_TEST_ACK if all link parameters are valid, otherwise send
 	 * a DP_TEST_NAK.
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
index 837fdef..3a8df99 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
@@ -933,6 +933,13 @@
 		host->dma_cmd_trigger = DSI_TRIGGER_SW;
 	}
 
+	rc = of_property_read_u32(of_node, "qcom,mdss-dsi-te-pin-select",
+			&host->te_mode);
+	if (rc) {
+		pr_warn("[%s] fallback to default te-pin-select\n", name);
+		host->te_mode = 1;
+		rc = 0;
+	}
 
 	return rc;
 }
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
index c715eae..8397e1c 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.c
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -1372,9 +1372,6 @@
 					mixer, &cstate->dim_layer[i]);
 	}
 
-	if (ctl->ops.setup_sbuf_cfg)
-		ctl->ops.setup_sbuf_cfg(ctl, &cstate->sbuf_cfg);
-
 	_sde_crtc_program_lm_output_roi(crtc);
 }
 
@@ -2699,7 +2696,8 @@
 	struct msm_drm_private *priv;
 	struct sde_kms *sde_kms;
 	struct sde_crtc_state *cstate;
-	int ret;
+	struct sde_hw_ctl *ctl;
+	int ret, i;
 
 	if (!crtc) {
 		SDE_ERROR("invalid argument\n");
@@ -2767,6 +2765,12 @@
 		drm_atomic_crtc_for_each_plane(plane, crtc)
 			sde_plane_kickoff(plane);
 
+	for (i = 0; i < sde_crtc->num_mixers; i++) {
+		ctl = sde_crtc->mixers[i].hw_ctl;
+		if (ctl && ctl->ops.setup_sbuf_cfg)
+			ctl->ops.setup_sbuf_cfg(ctl, &cstate->sbuf_cfg);
+	}
+
 	sde_vbif_clear_errors(sde_kms);
 
 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
index a313e34..78c22c9 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
@@ -809,10 +809,6 @@
 
 	sde_encoder_helper_unregister_irq(phys_enc, INTR_IDX_UNDERRUN);
 
-	if (atomic_read(&phys_enc->vblank_refcount))
-		SDE_ERROR_VIDENC(vid_enc, "invalid vblank refcount %d\n",
-				atomic_read(&phys_enc->vblank_refcount));
-
 	phys_enc->enable_state = SDE_ENC_DISABLED;
 }
 
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ad4.c b/drivers/gpu/drm/msm/sde/sde_hw_ad4.c
index 5307464..bf48271 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_ad4.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_ad4.c
@@ -16,7 +16,7 @@
 #include "sde_hw_lm.h"
 #include "sde_ad4.h"
 
-#define IDLE_2_RUN(x) ((x) == (ad4_init | ad4_cfg | ad4_mode | ad4_input))
+#define AD_STATE_READY(x) ((x) == (ad4_init | ad4_cfg | ad4_mode | ad4_input))
 #define MERGE_WIDTH_RIGHT 6
 #define MERGE_WIDTH_LEFT 5
 #define AD_IPC_FRAME_COUNT 2
@@ -31,7 +31,10 @@
 
 enum ad4_state {
 	ad4_state_idle,
+	ad4_state_startup,
 	ad4_state_run,
+	/* idle power collapse suspend state */
+	ad4_state_ipcs,
 	/* idle power collapse resume state */
 	ad4_state_ipcr,
 	ad4_state_max,
@@ -43,8 +46,8 @@
 static int ad4_params_check(struct sde_hw_dspp *dspp,
 		struct sde_ad_hw_cfg *cfg);
 
-static int ad4_no_op_setup(struct sde_hw_dspp *dspp,
-		struct sde_ad_hw_cfg *cfg);
+static int ad4_no_op_setup(struct sde_hw_dspp *dspp, struct sde_ad_hw_cfg *cfg);
+static int ad4_setup_debug(struct sde_hw_dspp *dspp, struct sde_ad_hw_cfg *cfg);
 static int ad4_mode_setup(struct sde_hw_dspp *dspp, enum ad4_modes mode);
 static int ad4_mode_setup_common(struct sde_hw_dspp *dspp,
 		struct sde_ad_hw_cfg *cfg);
@@ -72,23 +75,23 @@
 		struct sde_ad_hw_cfg *cfg);
 static int ad4_assertive_setup(struct sde_hw_dspp *dspp,
 		struct sde_ad_hw_cfg *cfg);
+static int ad4_assertive_setup_ipcr(struct sde_hw_dspp *dspp,
+		struct sde_ad_hw_cfg *cfg);
 static int ad4_backlight_setup(struct sde_hw_dspp *dspp,
 		struct sde_ad_hw_cfg *cfg);
+static int ad4_backlight_setup_ipcr(struct sde_hw_dspp *dspp,
+		struct sde_ad_hw_cfg *cfg);
 
 static int ad4_ipc_suspend_setup_run(struct sde_hw_dspp *dspp,
 		struct sde_ad_hw_cfg *cfg);
-static int ad4_ipc_resume_setup_run(struct sde_hw_dspp *dspp,
+static int ad4_ipc_suspend_setup_ipcr(struct sde_hw_dspp *dspp,
 		struct sde_ad_hw_cfg *cfg);
-static int ad4_ipc_resume_setup_ipcr(struct sde_hw_dspp *dspp,
+static int ad4_ipc_resume_setup_ipcs(struct sde_hw_dspp *dspp,
+		struct sde_ad_hw_cfg *cfg);
+static int ad4_ipc_reset_setup_startup(struct sde_hw_dspp *dspp,
 		struct sde_ad_hw_cfg *cfg);
 static int ad4_ipc_reset_setup_ipcr(struct sde_hw_dspp *dspp,
 		struct sde_ad_hw_cfg *cfg);
-static int ad4_mem_init_enable(struct sde_hw_dspp *dspp,
-		struct sde_ad_hw_cfg *cfg);
-static int ad4_mem_init_disable(struct sde_hw_dspp *dspp,
-		struct sde_ad_hw_cfg *cfg);
-static int ad4_cfg_ipc_resume(struct sde_hw_dspp *dspp,
-		struct sde_ad_hw_cfg *cfg);
 static int ad4_cfg_ipc_reset(struct sde_hw_dspp *dspp,
 		struct sde_ad_hw_cfg *cfg);
 
@@ -103,6 +106,18 @@
 	[ad4_state_idle][AD_IPC_SUSPEND] = ad4_no_op_setup,
 	[ad4_state_idle][AD_IPC_RESUME] = ad4_no_op_setup,
 	[ad4_state_idle][AD_IPC_RESET] = ad4_no_op_setup,
+
+	[ad4_state_startup][AD_MODE] = ad4_mode_setup_common,
+	[ad4_state_startup][AD_INIT] = ad4_init_setup,
+	[ad4_state_startup][AD_CFG] = ad4_cfg_setup,
+	[ad4_state_startup][AD_INPUT] = ad4_input_setup,
+	[ad4_state_startup][AD_SUSPEND] = ad4_suspend_setup,
+	[ad4_state_startup][AD_ASSERTIVE] = ad4_assertive_setup,
+	[ad4_state_startup][AD_BACKLIGHT] = ad4_backlight_setup,
+	[ad4_state_startup][AD_IPC_SUSPEND] = ad4_no_op_setup,
+	[ad4_state_startup][AD_IPC_RESUME] = ad4_no_op_setup,
+	[ad4_state_startup][AD_IPC_RESET] = ad4_ipc_reset_setup_startup,
+
 	[ad4_state_run][AD_MODE] = ad4_mode_setup_common,
 	[ad4_state_run][AD_INIT] = ad4_init_setup_run,
 	[ad4_state_run][AD_CFG] = ad4_cfg_setup_run,
@@ -111,17 +126,29 @@
 	[ad4_state_run][AD_ASSERTIVE] = ad4_assertive_setup,
 	[ad4_state_run][AD_BACKLIGHT] = ad4_backlight_setup,
 	[ad4_state_run][AD_IPC_SUSPEND] = ad4_ipc_suspend_setup_run,
-	[ad4_state_run][AD_IPC_RESUME] = ad4_ipc_resume_setup_run,
-	[ad4_state_run][AD_IPC_RESET] = ad4_no_op_setup,
+	[ad4_state_run][AD_IPC_RESUME] = ad4_no_op_setup,
+	[ad4_state_run][AD_IPC_RESET] = ad4_setup_debug,
+
+	[ad4_state_ipcs][AD_MODE] = ad4_no_op_setup,
+	[ad4_state_ipcs][AD_INIT] = ad4_no_op_setup,
+	[ad4_state_ipcs][AD_CFG] = ad4_no_op_setup,
+	[ad4_state_ipcs][AD_INPUT] = ad4_no_op_setup,
+	[ad4_state_ipcs][AD_SUSPEND] = ad4_no_op_setup,
+	[ad4_state_ipcs][AD_ASSERTIVE] = ad4_no_op_setup,
+	[ad4_state_ipcs][AD_BACKLIGHT] = ad4_no_op_setup,
+	[ad4_state_ipcs][AD_IPC_SUSPEND] = ad4_no_op_setup,
+	[ad4_state_ipcs][AD_IPC_RESUME] = ad4_ipc_resume_setup_ipcs,
+	[ad4_state_ipcs][AD_IPC_RESET] = ad4_no_op_setup,
+
 	[ad4_state_ipcr][AD_MODE] = ad4_mode_setup_common,
 	[ad4_state_ipcr][AD_INIT] = ad4_init_setup_ipcr,
 	[ad4_state_ipcr][AD_CFG] = ad4_cfg_setup_ipcr,
 	[ad4_state_ipcr][AD_INPUT] = ad4_input_setup_ipcr,
 	[ad4_state_ipcr][AD_SUSPEND] = ad4_suspend_setup,
-	[ad4_state_ipcr][AD_ASSERTIVE] = ad4_assertive_setup,
-	[ad4_state_ipcr][AD_BACKLIGHT] = ad4_backlight_setup,
-	[ad4_state_ipcr][AD_IPC_SUSPEND] = ad4_no_op_setup,
-	[ad4_state_ipcr][AD_IPC_RESUME] = ad4_ipc_resume_setup_ipcr,
+	[ad4_state_ipcr][AD_ASSERTIVE] = ad4_assertive_setup_ipcr,
+	[ad4_state_ipcr][AD_BACKLIGHT] = ad4_backlight_setup_ipcr,
+	[ad4_state_ipcr][AD_IPC_SUSPEND] = ad4_ipc_suspend_setup_ipcr,
+	[ad4_state_ipcr][AD_IPC_RESUME] = ad4_no_op_setup,
 	[ad4_state_ipcr][AD_IPC_RESET] = ad4_ipc_reset_setup_ipcr,
 };
 
@@ -129,20 +156,27 @@
 	enum ad4_state state;
 	u32 completed_ops_mask;
 	bool ad4_support;
-	enum ad4_modes cached_mode;
+	enum ad4_modes mode;
 	bool is_master;
+	u32 last_assertive;
+	u32 cached_assertive;
+	u64 last_als;
+	u64 cached_als;
+	u64 last_bl;
+	u64 cached_bl;
+	u32 last_str;
 	u32 frame_count;
+	u32 frmt_mode;
+	u32 irdx_control_0;
 	u32 tf_ctrl;
 	u32 vc_control_0;
-	u32 last_str;
-	u32 cached_als;
 };
 
 static struct ad4_info info[DSPP_MAX] = {
-	[DSPP_0] = {ad4_state_idle, 0, true, AD4_OFF, false},
-	[DSPP_1] = {ad4_state_idle, 0, true, AD4_OFF, false},
-	[DSPP_2] = {ad4_state_max, 0, false, AD4_OFF, false},
-	[DSPP_3] = {ad4_state_max, 0, false, AD4_OFF, false},
+	[DSPP_0] = {ad4_state_idle, 0, true, AD4_OFF, false, 0x80, 0x80},
+	[DSPP_1] = {ad4_state_idle, 0, true, AD4_OFF, false, 0x80, 0x80},
+	[DSPP_2] = {ad4_state_max, 0, false, AD4_OFF, false, 0x80, 0x80},
+	[DSPP_3] = {ad4_state_max, 0, false, AD4_OFF, false, 0x80, 0x80},
 };
 
 void sde_setup_dspp_ad4(struct sde_hw_dspp *dspp, void *ad_cfg)
@@ -252,6 +286,29 @@
 	return 0;
 }
 
+static int ad4_setup_debug(struct sde_hw_dspp *dspp, struct sde_ad_hw_cfg *cfg)
+{
+	u32 strength = 0, i = 0;
+	struct sde_hw_mixer *hw_lm;
+
+	hw_lm = cfg->hw_cfg->mixer_info;
+	if ((cfg->hw_cfg->num_of_mixers == 2) && hw_lm->cfg.right_mixer) {
+		/* this AD core is the salve core */
+		for (i = DSPP_0; i < DSPP_MAX; i++) {
+			if (info[i].is_master) {
+				strength = info[i].last_str;
+				break;
+			}
+		}
+	} else {
+		strength = SDE_REG_READ(&dspp->hw,
+				dspp->cap->sblk->ad.base + 0x4c);
+		pr_debug("%s(): AD strength = %d\n", __func__, strength);
+	}
+
+	return 0;
+}
+
 static int ad4_mode_setup(struct sde_hw_dspp *dspp, enum ad4_modes mode)
 {
 	u32 blk_offset;
@@ -261,10 +318,21 @@
 		SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
 				0x101);
 		info[dspp->idx].state = ad4_state_idle;
+		pr_debug("%s(): AD state move to idle\n", __func__);
 		info[dspp->idx].completed_ops_mask = 0;
+		/* reset last values to register default */
+		info[dspp->idx].last_assertive = 0x80;
+		info[dspp->idx].cached_assertive = U8_MAX;
+		info[dspp->idx].last_bl = 0xFFFF;
+		info[dspp->idx].cached_bl = U64_MAX;
+		info[dspp->idx].last_als = 0x0;
+		info[dspp->idx].cached_als = U64_MAX;
 	} else {
-		if (info[dspp->idx].state == ad4_state_idle)
-			info[dspp->idx].state = ad4_state_run;
+		if (info[dspp->idx].state == ad4_state_idle) {
+			info[dspp->idx].frame_count = 0;
+			info[dspp->idx].state = ad4_state_startup;
+			pr_debug("%s(): AD state move to startup\n", __func__);
+		}
 		SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
 				0x100);
 	}
@@ -323,6 +391,8 @@
 
 	init = cfg->hw_cfg->payload;
 
+	info[dspp->idx].frmt_mode = (init->init_param_009 & (BIT(14) - 1));
+
 	blk_offset = 0xc;
 	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
 			init->init_param_010);
@@ -442,9 +512,8 @@
 	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
 			(init->init_param_047 & (BIT(8) - 1)));
 
-	blk_offset = 0x13c;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-			(init->init_param_048 & (BIT(5) - 1)));
+	info[dspp->idx].irdx_control_0 = (init->init_param_048 & (BIT(5) - 1));
+
 	blk_offset = 0x140;
 	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
 			(init->init_param_049 & (BIT(8) - 1)));
@@ -655,10 +724,6 @@
 	val = (ad_cfg->cfg_param_006 & (BIT(7) - 1));
 	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
 
-	blk_offset = 0x30;
-	val = (ad_cfg->cfg_param_007 & (BIT(8) - 1));
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
-
 	info[dspp->idx].tf_ctrl = (ad_cfg->cfg_param_008 & (BIT(8) - 1));
 
 	blk_offset = 0x38;
@@ -801,11 +866,10 @@
 		als = 0;
 		val = &als;
 	}
-	info[dspp->idx].cached_als = *val;
+	info[dspp->idx].last_als = (*val & (BIT(16) - 1));
 	info[dspp->idx].completed_ops_mask |= ad4_input;
 	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-			(*val & (BIT(16) - 1)));
-
+			info[dspp->idx].last_als);
 	return 0;
 }
 
@@ -813,6 +877,7 @@
 		struct sde_ad_hw_cfg *cfg)
 {
 	info[dspp->idx].state = ad4_state_idle;
+	pr_debug("%s(): AD state move to idle\n", __func__);
 	info[dspp->idx].completed_ops_mask = 0;
 	return 0;
 }
@@ -827,13 +892,13 @@
 		return -EINVAL;
 	}
 
-	info[dspp->idx].cached_mode = *((enum ad4_modes *)
+	info[dspp->idx].mode = *((enum ad4_modes *)
 					(cfg->hw_cfg->payload));
 	info[dspp->idx].completed_ops_mask |= ad4_mode;
 
-	if (IDLE_2_RUN(info[dspp->idx].completed_ops_mask) ||
-					info[dspp->idx].cached_mode == AD4_OFF)
-		ad4_mode_setup(dspp, info[dspp->idx].cached_mode);
+	if (AD_STATE_READY(info[dspp->idx].completed_ops_mask) ||
+					info[dspp->idx].mode == AD4_OFF)
+		ad4_mode_setup(dspp, info[dspp->idx].mode);
 
 	return 0;
 }
@@ -842,6 +907,7 @@
 		struct sde_ad_hw_cfg *cfg)
 {
 	int ret;
+	u32 blk_offset;
 
 	if (!cfg->hw_cfg->payload) {
 		info[dspp->idx].completed_ops_mask &= ~ad4_init;
@@ -852,14 +918,24 @@
 	if (ret)
 		return ret;
 
-	ret = ad4_mem_init_enable(dspp, cfg);
-	if (ret)
-		return ret;
+	/* enable memory initialization*/
+	/* frmt mode */
+	blk_offset = 0x8;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(info[dspp->idx].frmt_mode & 0x1fff));
+	/* memory init */
+	blk_offset = 0x450;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, 0x1);
+
+	/* enforce 0 initial strength when powering up AD config */
+	/* irdx_control_0 */
+	blk_offset = 0x13c;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, 0x6);
 
 	info[dspp->idx].completed_ops_mask |= ad4_init;
 
-	if (IDLE_2_RUN(info[dspp->idx].completed_ops_mask))
-		ad4_mode_setup(dspp, info[dspp->idx].cached_mode);
+	if (AD_STATE_READY(info[dspp->idx].completed_ops_mask))
+		ad4_mode_setup(dspp, info[dspp->idx].mode);
 
 	return 0;
 }
@@ -868,6 +944,7 @@
 		struct sde_ad_hw_cfg *cfg)
 {
 	int ret;
+	u32 blk_offset;
 
 	if (!cfg->hw_cfg->payload) {
 		info[dspp->idx].completed_ops_mask &= ~ad4_init;
@@ -877,9 +954,20 @@
 	ret = ad4_init_setup(dspp, cfg);
 	if (ret)
 		return ret;
-	ret = ad4_mem_init_disable(dspp, cfg);
-	if (ret)
-		return ret;
+
+	/* disable memory initialization*/
+	/* frmt mode */
+	blk_offset = 0x8;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(info[dspp->idx].frmt_mode | 0x2000));
+	/* no need to explicitly set memory initialization sequence,
+	 * since AD hw were not powered off.
+	 */
+
+	/* irdx_control_0 */
+	blk_offset = 0x13c;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			info[dspp->idx].irdx_control_0);
 
 	return 0;
 }
@@ -888,6 +976,7 @@
 		struct sde_ad_hw_cfg *cfg)
 {
 	int ret;
+	u32 blk_offset;
 
 	if (!cfg->hw_cfg->payload) {
 		info[dspp->idx].completed_ops_mask &= ~ad4_init;
@@ -897,11 +986,21 @@
 	ret = ad4_init_setup(dspp, cfg);
 	if (ret)
 		return ret;
+	/* no need to explicitly set memory initialization sequence,
+	 * since register reset values are the correct configuration
+	 */
+	/* frmt mode */
+	blk_offset = 0x8;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(info[dspp->idx].frmt_mode | 0x2000));
+	/* irdx_control_0 */
+	blk_offset = 0x13c;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			info[dspp->idx].irdx_control_0);
 
 	info[dspp->idx].completed_ops_mask |= ad4_init;
-
-	if (IDLE_2_RUN(info[dspp->idx].completed_ops_mask))
-		ad4_mode_setup(dspp, info[dspp->idx].cached_mode);
+	if (AD_STATE_READY(info[dspp->idx].completed_ops_mask))
+		ad4_mode_setup(dspp, info[dspp->idx].mode);
 
 	return 0;
 }
@@ -910,6 +1009,7 @@
 		struct sde_ad_hw_cfg *cfg)
 {
 	int ret;
+	u32 blk_offset;
 
 	if (!cfg->hw_cfg->payload) {
 		info[dspp->idx].completed_ops_mask &= ~ad4_cfg;
@@ -919,13 +1019,23 @@
 	ret = ad4_cfg_setup(dspp, cfg);
 	if (ret)
 		return ret;
-	ret = ad4_cfg_ipc_reset(dspp, cfg);
-	if (ret)
-		return ret;
+
+	/* enforce 0 initial strength when powering up AD config */
+	/* assertiveness */
+	blk_offset = 0x30;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, 0x0);
+	/* tf control */
+	blk_offset = 0x34;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, 0x55);
+
+	/* vc_control_0 */
+	blk_offset = 0x138;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+		info[dspp->idx].vc_control_0);
 
 	info[dspp->idx].completed_ops_mask |= ad4_cfg;
-	if (IDLE_2_RUN(info[dspp->idx].completed_ops_mask))
-		ad4_mode_setup(dspp, info[dspp->idx].cached_mode);
+	if (AD_STATE_READY(info[dspp->idx].completed_ops_mask))
+		ad4_mode_setup(dspp, info[dspp->idx].mode);
 	return 0;
 }
 
@@ -933,6 +1043,7 @@
 		struct sde_ad_hw_cfg *cfg)
 {
 	int ret;
+	u32 blk_offset;
 
 	if (!cfg->hw_cfg->payload) {
 		info[dspp->idx].completed_ops_mask &= ~ad4_cfg;
@@ -942,9 +1053,19 @@
 	ret = ad4_cfg_setup(dspp, cfg);
 	if (ret)
 		return ret;
-	ret = ad4_cfg_ipc_reset(dspp, cfg);
-	if (ret)
-		return ret;
+
+	/* assertiveness */
+	blk_offset = 0x30;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			info[dspp->idx].last_assertive);
+	/* tf control */
+	blk_offset = 0x34;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+		info[dspp->idx].tf_ctrl);
+	/* vc_control_0 */
+	blk_offset = 0x138;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+		info[dspp->idx].vc_control_0);
 
 	return 0;
 }
@@ -953,6 +1074,7 @@
 		struct sde_ad_hw_cfg *cfg)
 {
 	int ret;
+	u32 blk_offset;
 
 	if (!cfg->hw_cfg->payload) {
 		info[dspp->idx].completed_ops_mask &= ~ad4_cfg;
@@ -962,13 +1084,15 @@
 	ret = ad4_cfg_setup(dspp, cfg);
 	if (ret)
 		return ret;
-	ret = ad4_cfg_ipc_resume(dspp, cfg);
-	if (ret)
-		return ret;
+
+	/* assertiveness */
+	blk_offset = 0x30;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			info[dspp->idx].last_assertive);
 
 	info[dspp->idx].completed_ops_mask |= ad4_cfg;
-	if (IDLE_2_RUN(info[dspp->idx].completed_ops_mask))
-		ad4_mode_setup(dspp, info[dspp->idx].cached_mode);
+	if (AD_STATE_READY(info[dspp->idx].completed_ops_mask))
+		ad4_mode_setup(dspp, info[dspp->idx].mode);
 	return 0;
 }
 
@@ -982,8 +1106,8 @@
 		return ret;
 
 	info[dspp->idx].completed_ops_mask |= ad4_input;
-	if (IDLE_2_RUN(info[dspp->idx].completed_ops_mask))
-		ad4_mode_setup(dspp, info[dspp->idx].cached_mode);
+	if (AD_STATE_READY(info[dspp->idx].completed_ops_mask))
+		ad4_mode_setup(dspp, info[dspp->idx].mode);
 
 	return 0;
 }
@@ -991,15 +1115,29 @@
 static int ad4_input_setup_ipcr(struct sde_hw_dspp *dspp,
 		struct sde_ad_hw_cfg *cfg)
 {
-	int ret;
+	u64 *val, als;
+	u32 blk_offset;
 
-	ret = ad4_input_setup(dspp, cfg);
-	if (ret)
-		return ret;
+	if (cfg->hw_cfg->len != sizeof(u64) && cfg->hw_cfg->payload) {
+		DRM_ERROR("invalid sz param exp %zd given %d cfg %pK\n",
+			sizeof(u64), cfg->hw_cfg->len, cfg->hw_cfg->payload);
+		return -EINVAL;
+	}
 
+	blk_offset = 0x28;
+	if (cfg->hw_cfg->payload) {
+		val = cfg->hw_cfg->payload;
+	} else {
+		als = 0;
+		val = &als;
+	}
+	info[dspp->idx].cached_als = *val & (BIT(16) - 1);
 	info[dspp->idx].completed_ops_mask |= ad4_input;
-	if (IDLE_2_RUN(info[dspp->idx].completed_ops_mask))
-		ad4_mode_setup(dspp, info[dspp->idx].cached_mode);
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			info[dspp->idx].last_als);
+
+	if (AD_STATE_READY(info[dspp->idx].completed_ops_mask))
+		ad4_mode_setup(dspp, info[dspp->idx].mode);
 
 	return 0;
 }
@@ -1023,8 +1161,37 @@
 		assertive = 0;
 		val = &assertive;
 	}
+
+	info[dspp->idx].last_assertive = *val & (BIT(8) - 1);
 	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-			(*val & (BIT(8) - 1)));
+			(info[dspp->idx].last_assertive));
+	return 0;
+}
+
+static int ad4_assertive_setup_ipcr(struct sde_hw_dspp *dspp,
+		struct sde_ad_hw_cfg *cfg)
+{
+	u64 *val, assertive;
+	u32 blk_offset;
+
+	if (cfg->hw_cfg->len != sizeof(u64) && cfg->hw_cfg->payload) {
+		DRM_ERROR("invalid sz param exp %zd given %d cfg %pK\n",
+			sizeof(u64), cfg->hw_cfg->len, cfg->hw_cfg->payload);
+		return -EINVAL;
+	}
+
+	blk_offset = 0x30;
+	if (cfg->hw_cfg->payload) {
+		val = cfg->hw_cfg->payload;
+	} else {
+		assertive = 0;
+		val = &assertive;
+	}
+
+	info[dspp->idx].cached_assertive = *val & (BIT(8) - 1);
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			info[dspp->idx].last_assertive);
+
 	return 0;
 }
 
@@ -1048,8 +1215,36 @@
 		val = &bl;
 	}
 
+	info[dspp->idx].last_bl = *val & (BIT(16) - 1);
 	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-			(*val & (BIT(16) - 1)));
+			info[dspp->idx].last_bl);
+	return 0;
+}
+
+static int ad4_backlight_setup_ipcr(struct sde_hw_dspp *dspp,
+		struct sde_ad_hw_cfg *cfg)
+{
+	u64 *val, bl;
+	u32 blk_offset;
+
+	if (cfg->hw_cfg->len != sizeof(u64) && cfg->hw_cfg->payload) {
+		DRM_ERROR("invalid sz param exp %zd given %d cfg %pK\n",
+			sizeof(u64), cfg->hw_cfg->len, cfg->hw_cfg->payload);
+		return -EINVAL;
+	}
+
+	blk_offset = 0x2c;
+	if (cfg->hw_cfg->payload) {
+		val = cfg->hw_cfg->payload;
+	} else {
+		bl = 0;
+		val = &bl;
+	}
+
+	info[dspp->idx].cached_bl = *val & (BIT(16) - 1);
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			info[dspp->idx].last_bl);
+
 	return 0;
 }
 
@@ -1088,29 +1283,50 @@
 	} else {
 		strength = SDE_REG_READ(&dspp->hw,
 				dspp->cap->sblk->ad.base + 0x4c);
+		pr_debug("%s(): AD strength = %d\n", __func__, strength);
 	}
 	info[dspp->idx].last_str = strength;
+	info[dspp->idx].state = ad4_state_ipcs;
+	pr_debug("%s(): AD state move to ipcs\n", __func__);
 
 	return 0;
 }
 
-static int ad4_ipc_resume_setup_run(struct sde_hw_dspp *dspp,
+static int ad4_ipc_resume_setup_ipcs(struct sde_hw_dspp *dspp,
 		struct sde_ad_hw_cfg *cfg)
 {
-	int ret;
-
-	info[dspp->idx].state = ad4_state_ipcr;
+	u32 blk_offset, val;
 
 	info[dspp->idx].frame_count = 0;
-	ret = ad4_cfg_ipc_resume(dspp, cfg);
+	info[dspp->idx].state = ad4_state_ipcr;
+	pr_debug("%s(): AD state move to ipcr\n", __func__);
 
-	return ret;
+	/* no need to rewrite frmt_mode bit 13 and mem_init,
+	 * since the default register values are exactly what
+	 * we wanted.
+	 */
+
+	/* ipc resume with manual strength */
+	/* tf control */
+	blk_offset = 0x34;
+	val = (0x55 & (BIT(8) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+	/* set manual strength */
+	blk_offset = 0x15c;
+	val = (info[dspp->idx].last_str & (BIT(10) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+	/* enable manual mode */
+	blk_offset = 0x138;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, 0);
+
+	return 0;
 }
 
-static int ad4_ipc_resume_setup_ipcr(struct sde_hw_dspp *dspp,
+static int ad4_ipc_suspend_setup_ipcr(struct sde_hw_dspp *dspp,
 		struct sde_ad_hw_cfg *cfg)
 {
-	info[dspp->idx].frame_count = 0;
+	info[dspp->idx].state = ad4_state_ipcs;
+	pr_debug("%s(): AD state move to ipcs\n", __func__);
 	return 0;
 }
 
@@ -1138,10 +1354,12 @@
 	} else {
 		strength = SDE_REG_READ(&dspp->hw,
 				dspp->cap->sblk->ad.base + 0x4c);
+		pr_debug("%s(): AD strength = %d\n", __func__, strength);
 	}
 
 	if (info[dspp->idx].frame_count == AD_IPC_FRAME_COUNT) {
 		info[dspp->idx].state = ad4_state_run;
+		pr_debug("%s(): AD state move to run\n", __func__);
 		info[dspp->idx].last_str = strength;
 		ret = ad4_cfg_ipc_reset(dspp, cfg);
 		if (ret)
@@ -1153,98 +1371,73 @@
 	return 0;
 }
 
-static int ad4_mem_init_enable(struct sde_hw_dspp *dspp,
-		struct sde_ad_hw_cfg *cfg)
-{
-	u32 blk_offset;
-	struct drm_msm_ad4_init *init;
-
-	if (!cfg->hw_cfg->payload) {
-		info[dspp->idx].completed_ops_mask &= ~ad4_init;
-		return 0;
-	}
-
-	if (cfg->hw_cfg->len != sizeof(struct drm_msm_ad4_init)) {
-		DRM_ERROR("invalid sz param exp %zd given %d cfg %pK\n",
-			sizeof(struct drm_msm_ad4_init), cfg->hw_cfg->len,
-			cfg->hw_cfg->payload);
-		return -EINVAL;
-	}
-
-	init = cfg->hw_cfg->payload;
-	blk_offset = 0x8;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-			(init->init_param_009 & 0xdfff));
-	blk_offset = 0x450;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, 1);
-
-	return 0;
-}
-
-static int ad4_mem_init_disable(struct sde_hw_dspp *dspp,
-		struct sde_ad_hw_cfg *cfg)
-{
-	u32 blk_offset;
-	struct drm_msm_ad4_init *init;
-
-	if (!cfg->hw_cfg->payload) {
-		info[dspp->idx].completed_ops_mask &= ~ad4_init;
-		return 0;
-	}
-
-	if (cfg->hw_cfg->len != sizeof(struct drm_msm_ad4_init)) {
-		DRM_ERROR("invalid sz param exp %zd given %d cfg %pK\n",
-			sizeof(struct drm_msm_ad4_init), cfg->hw_cfg->len,
-			cfg->hw_cfg->payload);
-		return -EINVAL;
-	}
-
-	init = cfg->hw_cfg->payload;
-	blk_offset = 0x8;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
-			(init->init_param_009 | 0x2000));
-	blk_offset = 0x450;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, 0);
-
-	return 0;
-}
-
-static int ad4_cfg_ipc_resume(struct sde_hw_dspp *dspp,
-		struct sde_ad_hw_cfg *cfg)
-{
-	u32 blk_offset, val;
-
-	/* disable temporal filters */
-	blk_offset = 0x34;
-	val = (0x55 & (BIT(8) - 1));
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
-
-	/* set manual strength */
-	blk_offset = 0x15c;
-	val = (info[dspp->idx].last_str & (BIT(10) - 1));
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
-
-	/* enable manul mode */
-	blk_offset = 0x138;
-	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, 0);
-
-	return 0;
-}
-
 static int ad4_cfg_ipc_reset(struct sde_hw_dspp *dspp,
 		struct sde_ad_hw_cfg *cfg)
 {
 	u32 blk_offset;
 
-	/* enable temporal filters */
+	/* revert manual strength */
+	/* tf control */
 	blk_offset = 0x34;
 	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
 		info[dspp->idx].tf_ctrl);
-
-	/* disable manul mode */
+	/* vc_control_0 */
 	blk_offset = 0x138;
 	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
 		info[dspp->idx].vc_control_0);
 
+	/* reset cached ALS, backlight and assertiveness */
+	if (info[dspp->idx].cached_als != U64_MAX) {
+		SDE_REG_WRITE(&dspp->hw,
+				dspp->cap->sblk->ad.base + 0x28,
+				info[dspp->idx].cached_als);
+		info[dspp->idx].last_als = info[dspp->idx].cached_als;
+		info[dspp->idx].cached_als = U64_MAX;
+	}
+	if (info[dspp->idx].cached_bl != U64_MAX) {
+		SDE_REG_WRITE(&dspp->hw,
+				dspp->cap->sblk->ad.base + 0x2c,
+				info[dspp->idx].cached_bl);
+		info[dspp->idx].last_bl = info[dspp->idx].cached_bl;
+		info[dspp->idx].cached_bl = U64_MAX;
+	}
+	if (info[dspp->idx].cached_assertive != U8_MAX) {
+		SDE_REG_WRITE(&dspp->hw,
+				dspp->cap->sblk->ad.base + 0x30,
+				info[dspp->idx].cached_assertive);
+		info[dspp->idx].last_assertive =
+				info[dspp->idx].cached_assertive;
+		info[dspp->idx].cached_assertive = U8_MAX;
+	}
+
+	return 0;
+}
+
+static int ad4_ipc_reset_setup_startup(struct sde_hw_dspp *dspp,
+		struct sde_ad_hw_cfg *cfg)
+{
+	u32 blk_offset;
+
+	if (info[dspp->idx].frame_count == AD_IPC_FRAME_COUNT) {
+		info[dspp->idx].state = ad4_state_run;
+		pr_debug("%s(): AD state move to run\n", __func__);
+
+		/* revert enforce 0 initial strength */
+		/* irdx_control_0 */
+		blk_offset = 0x13c;
+		SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+				info[dspp->idx].irdx_control_0);
+		/* assertiveness */
+		blk_offset = 0x30;
+		SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+				info[dspp->idx].last_assertive);
+		/* tf control */
+		blk_offset = 0x34;
+		SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+				info[dspp->idx].tf_ctrl);
+	} else {
+		info[dspp->idx].frame_count++;
+	}
+
 	return 0;
 }
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ctl.c b/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
index 3b3854d..606fdeb 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
@@ -36,6 +36,7 @@
 #define   CTL_ROT_START                 0x0CC
 
 #define CTL_MIXER_BORDER_OUT            BIT(24)
+#define CTL_FLUSH_MASK_ROT              BIT(27)
 #define CTL_FLUSH_MASK_CTL              BIT(17)
 
 #define SDE_REG_RESET_TIMEOUT_COUNT    20
@@ -123,6 +124,13 @@
 static inline u32 sde_hw_ctl_get_flush_register(struct sde_hw_ctl *ctx)
 {
 	struct sde_hw_blk_reg_map *c = &ctx->hw;
+	u32 rot_op_mode;
+
+	rot_op_mode = SDE_REG_READ(c, CTL_ROT_TOP) & 0x3;
+
+	/* rotate flush bit is undefined if offline mode, so ignore it */
+	if (rot_op_mode == SDE_CTL_ROT_OP_MODE_OFFLINE)
+		return SDE_REG_READ(c, CTL_FLUSH) & ~CTL_FLUSH_MASK_ROT;
 
 	return SDE_REG_READ(c, CTL_FLUSH);
 }
@@ -273,7 +281,7 @@
 {
 	switch (rot) {
 	case ROT_0:
-		*flushbits |= BIT(27);
+		*flushbits |= CTL_FLUSH_MASK_ROT;
 		break;
 	default:
 		return -EINVAL;
diff --git a/drivers/gpu/msm/a6xx_reg.h b/drivers/gpu/msm/a6xx_reg.h
index e112fd1..41b64cdc 100644
--- a/drivers/gpu/msm/a6xx_reg.h
+++ b/drivers/gpu/msm/a6xx_reg.h
@@ -965,6 +965,10 @@
 #define A6XX_RSCC_TCS2_DRV0_STATUS                      0x23A16
 #define A6XX_RSCC_TCS3_DRV0_STATUS                      0x23B7E
 
+
+/* CPR controller */
+#define A6XX_GPU_CPR_FSM_CTL				0x26801
+
 /* GPU PDC sequencer registers in AOSS.RPMh domain */
 #define	PDC_GPU_ENABLE_PDC			0x21140
 #define PDC_GPU_SEQ_START_ADDR			0x21148
@@ -982,5 +986,7 @@
 #define PDC_GPU_TCS1_CMD0_DATA			0x21577
 #define PDC_GPU_SEQ_MEM_0			0xA0000
 
+/* GFX rail CPR registers in AOSS.CPR domain */
+#define CPR_CPRF_CPRF5_CTRL			0x1801
 #endif /* _A6XX_REG_H */
 
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index a4c189f..87a7f00 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -1161,6 +1161,12 @@
 		(ADRENO_CHIPID_PATCH(adreno_dev->chipid) == 0);
 }
 
+static inline int adreno_is_a630v2(struct adreno_device *adreno_dev)
+{
+	return (ADRENO_GPUREV(adreno_dev) == ADRENO_REV_A630) &&
+		(ADRENO_CHIPID_PATCH(adreno_dev->chipid) == 1);
+}
+
 /*
  * adreno_checkreg_off() - Checks the validity of a register enum
  * @adreno_dev:		Pointer to adreno device
diff --git a/drivers/gpu/msm/adreno_a6xx.c b/drivers/gpu/msm/adreno_a6xx.c
index 386c3a9..c6f7402 100644
--- a/drivers/gpu/msm/adreno_a6xx.c
+++ b/drivers/gpu/msm/adreno_a6xx.c
@@ -871,6 +871,16 @@
 
 #define RSC_CMD_OFFSET 2
 #define PDC_CMD_OFFSET 4
+static void _regread(void __iomem *regbase,
+		unsigned int offsetwords, unsigned int *value)
+{
+	void __iomem *reg;
+
+	reg = regbase + (offsetwords << 2);
+	*value = __raw_readl(reg);
+	/* Ensure read completes */
+	rmb();
+}
 
 static void _regwrite(void __iomem *regbase,
 		unsigned int offsetwords, unsigned int value)
@@ -1387,6 +1397,8 @@
 {
 	struct gmu_device *gmu = &device->gmu;
 	struct device *dev = &gmu->pdev->dev;
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	unsigned int tmp;
 	int val;
 
 	kgsl_gmu_regread(device, A6XX_GPU_CC_GX_DOMAIN_MISC, &val);
@@ -1416,6 +1428,19 @@
 
 	kgsl_gmu_regwrite(device, A6XX_GMU_RSCC_CONTROL_REQ, 0);
 
+	/* Turn on GFX rail CPR */
+	if (adreno_is_a630v2(adreno_dev)) {
+		_regread(gmu->cpr_reg_virt, CPR_CPRF_CPRF5_CTRL, &tmp);
+		tmp |= BIT(2);
+		_regwrite(gmu->cpr_reg_virt, CPR_CPRF_CPRF5_CTRL, tmp);
+
+		kgsl_gmu_regread(device, A6XX_GPU_CPR_FSM_CTL, &tmp);
+		tmp |= BIT(0);
+		kgsl_gmu_regwrite(device, A6XX_GPU_CPR_FSM_CTL, tmp);
+		/* Ensure write happens before exit the function */
+		wmb();
+	}
+
 	/* Enable the power counter because it was disabled before slumber */
 	kgsl_gmu_regwrite(device, A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 1);
 
@@ -1428,9 +1453,23 @@
 static int a6xx_rpmh_power_off_gpu(struct kgsl_device *device)
 {
 	struct gmu_device *gmu = &device->gmu;
-	const struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	unsigned int tmp;
 	int val;
 
+	/* Turn off GFX rail CPR */
+	if (adreno_is_a630v2(adreno_dev)) {
+		_regread(gmu->cpr_reg_virt, CPR_CPRF_CPRF5_CTRL, &tmp);
+		tmp &= ~BIT(2);
+		_regwrite(gmu->cpr_reg_virt, CPR_CPRF_CPRF5_CTRL, tmp);
+
+		kgsl_gmu_regread(device, A6XX_GPU_CPR_FSM_CTL, &tmp);
+		tmp &= ~BIT(0);
+		kgsl_gmu_regwrite(device, A6XX_GPU_CPR_FSM_CTL, tmp);
+		/* Ensure write completes before starting sleep seq */
+		wmb();
+	}
+
 	/* RSC sleep sequence */
 	kgsl_gmu_regwrite(device, A6XX_RSCC_TIMESTAMP_UNIT1_EN_DRV0, 1);
 	kgsl_gmu_regwrite(device, A6XX_GMU_RSCC_CONTROL_REQ, 1);
@@ -1942,7 +1981,7 @@
 	a6xx_sptprac_disable(adreno_dev);
 
 	/* Disconnect GPU from BUS. Clear and reconnected after reset */
-	adreno_vbif_clear_pending_transactions(device);
+	/* adreno_vbif_clear_pending_transactions(device); */
 	/* Unnecessary: a6xx_soft_reset(adreno_dev); */
 
 	/* Check no outstanding RPMh voting */
diff --git a/drivers/gpu/msm/kgsl_gmu.c b/drivers/gpu/msm/kgsl_gmu.c
index 8060672..48a70b0 100644
--- a/drivers/gpu/msm/kgsl_gmu.c
+++ b/drivers/gpu/msm/kgsl_gmu.c
@@ -862,7 +862,7 @@
 	return 0;
 }
 
-static int gmu_reg_probe(struct gmu_device *gmu, const char *name, bool is_gmu)
+static int gmu_reg_probe(struct gmu_device *gmu, const char *name)
 {
 	struct resource *res;
 
@@ -880,7 +880,7 @@
 		return -EINVAL;
 	}
 
-	if (is_gmu) {
+	if (!strcmp(name, "kgsl_gmu_reg")) {
 		gmu->reg_phys = res->start;
 		gmu->reg_len = resource_size(res);
 		gmu->reg_virt = devm_ioremap(&gmu->pdev->dev, res->start,
@@ -891,13 +891,20 @@
 			return -ENODEV;
 		}
 
-	} else {
+	} else if (!strcmp(name, "kgsl_gmu_pdc_reg")) {
 		gmu->pdc_reg_virt = devm_ioremap(&gmu->pdev->dev, res->start,
 				resource_size(res));
 		if (gmu->pdc_reg_virt == NULL) {
 			dev_err(&gmu->pdev->dev, "PDC regs ioremap failed\n");
 			return -ENODEV;
 		}
+	} else if (!strcmp(name, "kgsl_gmu_cpr_reg")) {
+		gmu->cpr_reg_virt = devm_ioremap(&gmu->pdev->dev, res->start,
+				resource_size(res));
+		if (gmu->cpr_reg_virt == NULL) {
+			dev_err(&gmu->pdev->dev, "CPR regs ioremap failed\n");
+			return -ENODEV;
+		}
 	}
 
 	return 0;
@@ -1118,11 +1125,15 @@
 	mem_addr = gmu->hfi_mem;
 
 	/* Map and reserve GMU CSRs registers */
-	ret = gmu_reg_probe(gmu, "kgsl_gmu_reg", true);
+	ret = gmu_reg_probe(gmu, "kgsl_gmu_reg");
 	if (ret)
 		goto error;
 
-	ret = gmu_reg_probe(gmu, "kgsl_gmu_pdc_reg", false);
+	ret = gmu_reg_probe(gmu, "kgsl_gmu_pdc_reg");
+	if (ret)
+		goto error;
+
+	ret = gmu_reg_probe(gmu, "kgsl_gmu_cpr_reg");
 	if (ret)
 		goto error;
 
diff --git a/drivers/gpu/msm/kgsl_gmu.h b/drivers/gpu/msm/kgsl_gmu.h
index 63ca028..c558067 100644
--- a/drivers/gpu/msm/kgsl_gmu.h
+++ b/drivers/gpu/msm/kgsl_gmu.h
@@ -166,6 +166,7 @@
  *	and GPU register set, the offset will be used when accessing
  *	gmu registers using offset defined in GPU register space.
  * @pdc_reg_virt: starting kernel virtual address for RPMh PDC registers
+ * @cpr_reg_virt: starting kernel virtual address for RPMh CPR controller
  * @gmu_interrupt_num: GMU interrupt number
  * @fw_image: descriptor of GMU memory that has GMU image in it
  * @hfi_mem: pointer to HFI shared memory
@@ -202,6 +203,7 @@
 	unsigned int reg_len;
 	unsigned int gmu2gpu_offset;
 	void __iomem *pdc_reg_virt;
+	void __iomem *cpr_reg_virt;
 	unsigned int gmu_interrupt_num;
 	struct gmu_memdesc fw_image;
 	struct gmu_memdesc *hfi_mem;
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index a06a6c7..23eab00 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -319,14 +319,6 @@
 	QCOM_SMMUV500,
 };
 
-struct arm_smmu_device;
-struct arm_smmu_arch_ops {
-	int (*init)(struct arm_smmu_device *smmu);
-	void (*device_reset)(struct arm_smmu_device *smmu);
-	phys_addr_t (*iova_to_phys_hard)(struct iommu_domain *domain,
-					 dma_addr_t iova);
-};
-
 struct arm_smmu_impl_def_reg {
 	u32 offset;
 	u32 value;
@@ -400,6 +392,7 @@
 	int				regulator_defer;
 };
 
+struct arm_smmu_arch_ops;
 struct arm_smmu_device {
 	struct device			*dev;
 
@@ -561,9 +554,6 @@
 static int arm_smmu_assign_table(struct arm_smmu_domain *smmu_domain);
 static void arm_smmu_unassign_table(struct arm_smmu_domain *smmu_domain);
 
-static int arm_smmu_arch_init(struct arm_smmu_device *smmu);
-static void arm_smmu_arch_device_reset(struct arm_smmu_device *smmu);
-
 static uint64_t arm_smmu_iova_to_pte(struct iommu_domain *domain,
 				    dma_addr_t iova);
 
@@ -627,6 +617,76 @@
 		mutex_unlock(&smmu_domain->assign_lock);
 }
 
+/*
+ * init()
+ * Hook for additional device tree parsing at probe time.
+ *
+ * device_reset()
+ * Hook for one-time architecture-specific register settings.
+ *
+ * iova_to_phys_hard()
+ * Provides debug information. May be called from the context fault irq handler.
+ *
+ * init_context_bank()
+ * Hook for architecture-specific settings which require knowledge of the
+ * dynamically allocated context bank number.
+ *
+ * device_group()
+ * Hook for checking whether a device is compatible with a said group.
+ */
+struct arm_smmu_arch_ops {
+	int (*init)(struct arm_smmu_device *smmu);
+	void (*device_reset)(struct arm_smmu_device *smmu);
+	phys_addr_t (*iova_to_phys_hard)(struct iommu_domain *domain,
+					 dma_addr_t iova);
+	void (*init_context_bank)(struct arm_smmu_domain *smmu_domain,
+					struct device *dev);
+	int (*device_group)(struct device *dev, struct iommu_group *group);
+};
+
+static int arm_smmu_arch_init(struct arm_smmu_device *smmu)
+{
+	if (!smmu->arch_ops)
+		return 0;
+	if (!smmu->arch_ops->init)
+		return 0;
+	return smmu->arch_ops->init(smmu);
+}
+
+static void arm_smmu_arch_device_reset(struct arm_smmu_device *smmu)
+{
+	if (!smmu->arch_ops)
+		return;
+	if (!smmu->arch_ops->device_reset)
+		return;
+	return smmu->arch_ops->device_reset(smmu);
+}
+
+static void arm_smmu_arch_init_context_bank(
+		struct arm_smmu_domain *smmu_domain, struct device *dev)
+{
+	struct arm_smmu_device *smmu = smmu_domain->smmu;
+
+	if (!smmu->arch_ops)
+		return;
+	if (!smmu->arch_ops->init_context_bank)
+		return;
+	return smmu->arch_ops->init_context_bank(smmu_domain, dev);
+}
+
+static int arm_smmu_arch_device_group(struct device *dev,
+					struct iommu_group *group)
+{
+	struct iommu_fwspec *fwspec = dev->iommu_fwspec;
+	struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
+
+	if (!smmu->arch_ops)
+		return 0;
+	if (!smmu->arch_ops->device_group)
+		return 0;
+	return smmu->arch_ops->device_group(dev, group);
+}
+
 static struct device_node *dev_get_dev_node(struct device *dev)
 {
 	if (dev_is_pci(dev)) {
@@ -1754,6 +1814,8 @@
 		arm_smmu_init_context_bank(smmu_domain,
 						&smmu_domain->pgtbl_cfg);
 
+		arm_smmu_arch_init_context_bank(smmu_domain, dev);
+
 		/*
 		 * Request context fault interrupt. Do this last to avoid the
 		 * handler seeing a half-initialised domain state.
@@ -2659,13 +2721,20 @@
 		group = smmu->s2crs[idx].group;
 	}
 
-	if (group)
-		return group;
+	if (!group) {
+		if (dev_is_pci(dev))
+			group = pci_device_group(dev);
+		else
+			group = generic_device_group(dev);
 
-	if (dev_is_pci(dev))
-		group = pci_device_group(dev);
-	else
-		group = generic_device_group(dev);
+		if (IS_ERR(group))
+			return NULL;
+	}
+
+	if (arm_smmu_arch_device_group(dev, group)) {
+		iommu_group_put(group);
+		return ERR_PTR(-EINVAL);
+	}
 
 	return group;
 }
@@ -3941,24 +4010,6 @@
 	return 0;
 }
 
-static int arm_smmu_arch_init(struct arm_smmu_device *smmu)
-{
-	if (!smmu->arch_ops)
-		return 0;
-	if (!smmu->arch_ops->init)
-		return 0;
-	return smmu->arch_ops->init(smmu);
-}
-
-static void arm_smmu_arch_device_reset(struct arm_smmu_device *smmu)
-{
-	if (!smmu->arch_ops)
-		return;
-	if (!smmu->arch_ops->device_reset)
-		return;
-	return smmu->arch_ops->device_reset(smmu);
-}
-
 struct arm_smmu_match_data {
 	enum arm_smmu_arch_version version;
 	enum arm_smmu_implementation model;
@@ -4283,10 +4334,19 @@
 
 #define TBU_DBG_TIMEOUT_US		30000
 
+
+struct actlr_setting {
+	struct arm_smmu_smr smr;
+	u32 actlr;
+};
+
 struct qsmmuv500_archdata {
 	struct list_head		tbus;
 	void __iomem			*tcu_base;
 	u32				version;
+
+	struct actlr_setting		*actlrs;
+	u32				actlr_tbl_size;
 };
 #define get_qsmmuv500_archdata(smmu)				\
 	((struct qsmmuv500_archdata *)(smmu->archdata))
@@ -4307,6 +4367,14 @@
 	u32				halt_count;
 };
 
+struct qsmmuv500_group_iommudata {
+	bool has_actlr;
+	u32 actlr;
+};
+#define to_qsmmuv500_group_iommudata(group)				\
+	((struct qsmmuv500_group_iommudata *)				\
+		(iommu_group_get_iommudata(group)))
+
 static int qsmmuv500_tbu_halt(struct qsmmuv500_tbu_device *tbu)
 {
 	unsigned long flags;
@@ -4560,6 +4628,79 @@
 	return qsmmuv500_iova_to_phys(domain, iova, sid);
 }
 
+static void qsmmuv500_release_group_iommudata(void *data)
+{
+	kfree(data);
+}
+
+/* If a device has a valid actlr, it must match */
+static int qsmmuv500_device_group(struct device *dev,
+				struct iommu_group *group)
+{
+	struct iommu_fwspec *fwspec = dev->iommu_fwspec;
+	struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
+	struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
+	struct qsmmuv500_group_iommudata *iommudata;
+	u32 actlr, i, j, idx;
+	struct arm_smmu_smr *smr, *smr2;
+
+	iommudata = to_qsmmuv500_group_iommudata(group);
+	if (!iommudata) {
+		iommudata = kzalloc(sizeof(*iommudata), GFP_KERNEL);
+		if (!iommudata)
+			return -ENOMEM;
+
+		iommu_group_set_iommudata(group, iommudata,
+				qsmmuv500_release_group_iommudata);
+	}
+
+	for_each_cfg_sme(fwspec, i, idx) {
+		smr = &smmu->smrs[idx];
+		for (j = 0; j < data->actlr_tbl_size; j++) {
+			smr2 = &data->actlrs[j].smr;
+			actlr = data->actlrs[j].actlr;
+
+			/* Continue if table entry does not match */
+			if ((smr->id ^ smr2->id) & ~(smr->mask | smr2->mask))
+				continue;
+
+			if (!iommudata->has_actlr) {
+				iommudata->actlr = actlr;
+				iommudata->has_actlr = true;
+			} else if (iommudata->actlr != actlr) {
+				return -EINVAL;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static void qsmmuv500_init_cb(struct arm_smmu_domain *smmu_domain,
+				struct device *dev)
+{
+	struct arm_smmu_device *smmu = smmu_domain->smmu;
+	struct qsmmuv500_group_iommudata *iommudata =
+		to_qsmmuv500_group_iommudata(dev->iommu_group);
+	void __iomem *cb_base;
+	const struct iommu_gather_ops *tlb;
+
+	if (!iommudata->has_actlr)
+		return;
+
+	tlb = smmu_domain->pgtbl_cfg.tlb;
+	cb_base = ARM_SMMU_CB_BASE(smmu) +
+			ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx);
+
+	writel_relaxed(iommudata->actlr, cb_base + ARM_SMMU_CB_ACTLR);
+
+	/*
+	 * Flush the context bank after modifying ACTLR to ensure there
+	 * are no cache entries with stale state
+	 */
+	tlb->tlb_flush_all(smmu_domain);
+}
+
 static int qsmmuv500_tbu_register(struct device *dev, void *cookie)
 {
 	struct arm_smmu_device *smmu = cookie;
@@ -4579,6 +4720,38 @@
 	return 0;
 }
 
+static int qsmmuv500_read_actlr_tbl(struct arm_smmu_device *smmu)
+{
+	int len, i;
+	struct device *dev = smmu->dev;
+	struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
+	struct actlr_setting *actlrs;
+	const __be32 *cell;
+
+	cell = of_get_property(dev->of_node, "qcom,actlr", NULL);
+	if (!cell)
+		return 0;
+
+	len = of_property_count_elems_of_size(dev->of_node, "qcom,actlr",
+						sizeof(u32) * 3);
+	if (len < 0)
+		return 0;
+
+	actlrs = devm_kzalloc(dev, sizeof(*actlrs) * len, GFP_KERNEL);
+	if (!actlrs)
+		return -ENOMEM;
+
+	for (i = 0; i < len; i++) {
+		actlrs[i].smr.id = of_read_number(cell++, 1);
+		actlrs[i].smr.mask = of_read_number(cell++, 1);
+		actlrs[i].actlr = of_read_number(cell++, 1);
+	}
+
+	data->actlrs = actlrs;
+	data->actlr_tbl_size = len;
+	return 0;
+}
+
 static int qsmmuv500_arch_init(struct arm_smmu_device *smmu)
 {
 	struct resource *res;
@@ -4586,6 +4759,8 @@
 	struct qsmmuv500_archdata *data;
 	struct platform_device *pdev;
 	int ret;
+	u32 val;
+	void __iomem *reg;
 
 	data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
 	if (!data)
@@ -4602,6 +4777,23 @@
 	data->version = readl_relaxed(data->tcu_base + TCU_HW_VERSION_HLOS1);
 	smmu->archdata = data;
 
+	ret = qsmmuv500_read_actlr_tbl(smmu);
+	if (ret)
+		return ret;
+
+	reg = ARM_SMMU_GR0(smmu);
+	val = readl_relaxed(reg + ARM_SMMU_GR0_sACR);
+	val &= ~ARM_MMU500_ACR_CACHE_LOCK;
+	writel_relaxed(val, reg + ARM_SMMU_GR0_sACR);
+	val = readl_relaxed(reg + ARM_SMMU_GR0_sACR);
+	/*
+	 * Modifiying the nonsecure copy of the sACR register is only
+	 * allowed if permission is given in the secure sACR register.
+	 * Attempt to detect if we were able to update the value.
+	 */
+	WARN_ON(val & ARM_MMU500_ACR_CACHE_LOCK);
+
+
 	ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
 	if (ret)
 		return ret;
@@ -4617,6 +4809,8 @@
 struct arm_smmu_arch_ops qsmmuv500_arch_ops = {
 	.init = qsmmuv500_arch_init,
 	.iova_to_phys_hard = qsmmuv500_iova_to_phys_hard,
+	.init_context_bank = qsmmuv500_init_cb,
+	.device_group = qsmmuv500_device_group,
 };
 
 static const struct of_device_id qsmmuv500_tbu_of_match[] = {
diff --git a/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c b/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c
index 54766a2..4b16af6 100644
--- a/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c
@@ -493,14 +493,15 @@
 	struct device *dev;
 	int nr = BASE_DEVICE_NUMBER;
 
-	core = kzalloc(sizeof(*core), GFP_KERNEL);
-	if (!core || !vidc_driver) {
-		dprintk(VIDC_ERR,
-			"Failed to allocate memory for device core\n");
-		rc = -ENOMEM;
-		goto err_no_mem;
+	if (!vidc_driver) {
+		dprintk(VIDC_ERR, "Invalid vidc driver\n");
+		return -EINVAL;
 	}
 
+	core = kzalloc(sizeof(*core), GFP_KERNEL);
+	if (!core)
+		return -ENOMEM;
+
 	core->platform_data = vidc_get_drv_data(&pdev->dev);
 	dev_set_drvdata(&pdev->dev, core);
 	rc = msm_vidc_initialize_core(pdev, core);
@@ -666,7 +667,6 @@
 err_core_init:
 	dev_set_drvdata(&pdev->dev, NULL);
 	kfree(core);
-err_no_mem:
 	return rc;
 }
 
diff --git a/drivers/power/supply/qcom/smb-reg.h b/drivers/power/supply/qcom/smb-reg.h
index d8671ab..80573fc 100644
--- a/drivers/power/supply/qcom/smb-reg.h
+++ b/drivers/power/supply/qcom/smb-reg.h
@@ -1022,6 +1022,9 @@
 
 #define TM_IO_DTEST4_SEL			(MISC_BASE + 0xE9)
 
+#define ENG_SDCDC_CFG7_REG			(MISC_BASE + 0xC6)
+#define ENG_SDCDC_BST_SET_POINT_MASK		GENMASK(7, 6)
+
 /* CHGR FREQ Peripheral registers */
 #define FREQ_CLK_DIV_REG			(CHGR_FREQ_BASE + 0x50)
 
diff --git a/drivers/power/supply/qcom/smb138x-charger.c b/drivers/power/supply/qcom/smb138x-charger.c
index dd949e7..20f1838 100644
--- a/drivers/power/supply/qcom/smb138x-charger.c
+++ b/drivers/power/supply/qcom/smb138x-charger.c
@@ -948,6 +948,14 @@
 		return rc;
 	}
 
+	/* increase the concurrent mode threshold */
+	rc = smblib_masked_write(chg, ENG_SDCDC_CFG7_REG,
+				 ENG_SDCDC_BST_SET_POINT_MASK, 0);
+	if (rc < 0) {
+		pr_err("Couldn't set concurrent mode threshold\n");
+		return rc;
+	}
+
 	return 0;
 }
 
diff --git a/drivers/tty/serial/msm_geni_serial.c b/drivers/tty/serial/msm_geni_serial.c
index c92bb32..cfc3762 100644
--- a/drivers/tty/serial/msm_geni_serial.c
+++ b/drivers/tty/serial/msm_geni_serial.c
@@ -1342,7 +1342,7 @@
 	}
 
 	disable_irq(uport->irq);
-	free_irq(uport->irq, msm_port);
+	free_irq(uport->irq, uport);
 	spin_lock_irqsave(&uport->lock, flags);
 	msm_geni_serial_stop_tx(uport);
 	msm_geni_serial_stop_rx(uport);
@@ -1355,7 +1355,7 @@
 		if (msm_port->wakeup_irq > 0) {
 			irq_set_irq_wake(msm_port->wakeup_irq, 0);
 			disable_irq(msm_port->wakeup_irq);
-			free_irq(msm_port->wakeup_irq, msm_port);
+			free_irq(msm_port->wakeup_irq, uport);
 		}
 	}
 	IPC_LOG_MSG(msm_port->ipc_log_misc, "%s\n", __func__);
@@ -1490,8 +1490,6 @@
 		dev_err(uport->dev, "%s: Invalid FW %d loaded.\n",
 				 __func__, get_se_proto(uport->membase));
 		ret = -ENXIO;
-		disable_irq(uport->irq);
-		free_irq(uport->irq, msm_port);
 		goto exit_startup;
 	}
 
@@ -1508,7 +1506,7 @@
 	 */
 	mb();
 	ret = request_irq(uport->irq, msm_geni_serial_isr, IRQF_TRIGGER_HIGH,
-			msm_port->name, msm_port);
+			msm_port->name, uport);
 	if (unlikely(ret)) {
 		dev_err(uport->dev, "%s: Failed to get IRQ ret %d\n",
 							__func__, ret);
@@ -2336,6 +2334,7 @@
 		se_geni_resources_on(&port->serial_rsc);
 		uart_resume_port((struct uart_driver *)uport->private_data,
 									uport);
+		disable_irq(uport->irq);
 	}
 	return 0;
 }
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index 01c67be..27f5d34 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -1159,10 +1159,10 @@
 
 config SND_SOC_MSM_HDMI_CODEC_RX
 	bool "HDMI Audio Playback"
-	depends on FB_MSM_MDSS_HDMI_PANEL && (SND_SOC_APQ8084 || SND_SOC_MSM8994 || SND_SOC_MSM8996 || SND_SOC_MSM8998 || SND_SOC_660)
+	depends on DRM_MSM && (SND_SOC_MSM8998 || SND_SOC_660 || SND_SOC_SDM845)
 	help
 	HDMI audio drivers should be built only if the platform
-        supports hdmi panel.
+	supports hdmi panel.
 
 source "sound/soc/codecs/sdm660_cdc/Kconfig"
 source "sound/soc/codecs/msm_sdw/Kconfig"
diff --git a/sound/soc/msm/Kconfig b/sound/soc/msm/Kconfig
index 66ecd6a..547330c 100644
--- a/sound/soc/msm/Kconfig
+++ b/sound/soc/msm/Kconfig
@@ -236,6 +236,7 @@
 	select MSM_QDSP6_PDR
 	select MSM_QDSP6_NOTIFIER
 	select MSM_QDSP6V2_CODECS
+	select SND_SOC_MSM_HDMI_CODEC_RX
 	select DTS_SRS_TM
 	select QTI_PP
 	select MSM_ULTRASOUND
diff --git a/sound/soc/msm/qdsp6v2/q6core.c b/sound/soc/msm/qdsp6v2/q6core.c
index 4c3a3a1..d806004 100644
--- a/sound/soc/msm/qdsp6v2/q6core.c
+++ b/sound/soc/msm/qdsp6v2/q6core.c
@@ -373,7 +373,7 @@
 
 	svc_size = q6core_get_avcs_service_size(service_id);
 	if (svc_size != size) {
-		pr_err("%s: Expected size: %ld, Provided size: %ld",
+		pr_err("%s: Expected size: %zu, Provided size: %zu",
 		       __func__, svc_size, size);
 		return -EINVAL;
 	}