Merge "soc: qcom: Kconfig: Make rpm stats dependent on sysfs" into msm-4.9
diff --git a/Documentation/arm64/tagged-pointers.txt b/Documentation/arm64/tagged-pointers.txt
index d9995f1..a25a99e 100644
--- a/Documentation/arm64/tagged-pointers.txt
+++ b/Documentation/arm64/tagged-pointers.txt
@@ -11,24 +11,56 @@
 The kernel configures the translation tables so that translations made
 via TTBR0 (i.e. userspace mappings) have the top byte (bits 63:56) of
 the virtual address ignored by the translation hardware. This frees up
-this byte for application use, with the following caveats:
+this byte for application use.
 
-	(1) The kernel requires that all user addresses passed to EL1
-	    are tagged with tag 0x00. This means that any syscall
-	    parameters containing user virtual addresses *must* have
-	    their top byte cleared before trapping to the kernel.
 
-	(2) Non-zero tags are not preserved when delivering signals.
-	    This means that signal handlers in applications making use
-	    of tags cannot rely on the tag information for user virtual
-	    addresses being maintained for fields inside siginfo_t.
-	    One exception to this rule is for signals raised in response
-	    to watchpoint debug exceptions, where the tag information
-	    will be preserved.
+Passing tagged addresses to the kernel
+--------------------------------------
 
-	(3) Special care should be taken when using tagged pointers,
-	    since it is likely that C compilers will not hazard two
-	    virtual addresses differing only in the upper byte.
+All interpretation of userspace memory addresses by the kernel assumes
+an address tag of 0x00.
+
+This includes, but is not limited to, addresses found in:
+
+ - pointer arguments to system calls, including pointers in structures
+   passed to system calls,
+
+ - the stack pointer (sp), e.g. when interpreting it to deliver a
+   signal,
+
+ - the frame pointer (x29) and frame records, e.g. when interpreting
+   them to generate a backtrace or call graph.
+
+Using non-zero address tags in any of these locations may result in an
+error code being returned, a (fatal) signal being raised, or other modes
+of failure.
+
+For these reasons, passing non-zero address tags to the kernel via
+system calls is forbidden, and using a non-zero address tag for sp is
+strongly discouraged.
+
+Programs maintaining a frame pointer and frame records that use non-zero
+address tags may suffer impaired or inaccurate debug and profiling
+visibility.
+
+
+Preserving tags
+---------------
+
+Non-zero tags are not preserved when delivering signals. This means that
+signal handlers in applications making use of tags cannot rely on the
+tag information for user virtual addresses being maintained for fields
+inside siginfo_t. One exception to this rule is for signals raised in
+response to watchpoint debug exceptions, where the tag information will
+be preserved.
 
 The architecture prevents the use of a tagged PC, so the upper byte will
 be set to a sign-extension of bit 55 on exception return.
+
+
+Other considerations
+--------------------
+
+Special care should be taken when using tagged pointers, since it is
+likely that C compilers will not hazard two virtual addresses differing
+only in the upper byte.
diff --git a/Documentation/devicetree/bindings/display/msm/sde.txt b/Documentation/devicetree/bindings/display/msm/sde.txt
index c766df8..b0eed20 100644
--- a/Documentation/devicetree/bindings/display/msm/sde.txt
+++ b/Documentation/devicetree/bindings/display/msm/sde.txt
@@ -297,6 +297,10 @@
 - qcom,sde-downscaling-prefill-lines:	A u32 value indicates the latency of downscaling in lines.
 - qcom,sde-max-per-pipe-bw-kbps:	Array of u32 value indicates the max per pipe bandwidth in Kbps.
 - qcom,sde-amortizable-threshold:	This value indicates the min for traffic shaping in lines.
+- qcom,sde-vbif-qos-rt-remap:	This array is used to program vbif qos remapper register
+				priority for realtime clients.
+- qcom,sde-vbif-qos-nrt-remap:	This array is used to program vbif qos remapper register
+				priority for non-realtime clients.
 
 Bus Scaling Subnodes:
 - qcom,sde-reg-bus:		Property to provide Bus scaling for register access for
@@ -500,6 +504,9 @@
         2400000 2400000 2400000 2400000>;
     qcom,sde-amortizable-threshold = <11>;
 
+    qcom,sde-vbif-qos-rt-remap = <3 3 4 4 5 5 6 6>;
+    qcom,sde-vbif-qos-nrt-remap = <3 3 3 3 3 3 3 3>;
+
     qcom,sde-sspp-vig-blocks {
         qcom,sde-vig-csc-off = <0x320>;
         qcom,sde-vig-qseed-off = <0x200>;
diff --git a/Documentation/devicetree/bindings/nfc/nq-nci.txt b/Documentation/devicetree/bindings/nfc/nq-nci.txt
new file mode 100644
index 0000000..b85e070
--- /dev/null
+++ b/Documentation/devicetree/bindings/nfc/nq-nci.txt
@@ -0,0 +1,49 @@
+Qualcomm Technologies, Inc NQxxxx NFC NCI device
+
+Near Field Communication (NFC) device is based on NFC Controller Interface (NCI)
+
+Required properties:
+
+- compatible: "qcom,nq-nci"
+- reg: NCI i2c slave address.
+- qcom,nq-ven: specific gpio for hardware reset.
+- qcom,nq-irq: specific gpio for read interrupt.
+- qcom,nq-firm: gpio for firmware download
+- qcom,nq-clkreq: gpio for clock
+- interrupt-parent: Should be phandle for the interrupt controller
+                    that services interrupts for this device.
+- interrupts: Nfc read interrupt,gpio-clk-req interrupt
+
+
+Recommended properties:
+
+- interrupt-names: names of interrupts, should include "nfc_irq", used for reference
+
+
+Optional properties:
+
+- pinctrl-names, pinctrl-0, pincntrl-1: references to our pincntrl settings
+- clocks, clock-names: must contain the NQxxxx's core clock.
+- qcom,nq-esepwr: gpio to control power of secure element
+
+Example:
+
+	nq-nci@2b {
+		compatible = "qcom,nq-nci";
+		reg = <0x2b>;
+		qcom,nq-irq = <&tlmm 29 0x00>;
+		qcom,nq-ven = <&tlmm 30 0x00>;
+		qcom,nq-firm = <&tlmm 93 0x00>;
+		qcom,nq-clkreq = <&pm8998_gpios 21 0x00>;
+		qcom,nq-esepwr = <&tlmm 116 0x00>;
+		qcom,clk-src = "BBCLK2";
+		interrupt-parent = <&tlmm>;
+		interrupts = <29 0>;
+		interrupt-names = "nfc_irq";
+		pinctrl-names = "nfc_active","nfc_suspend";
+		pinctrl-0 = <&nfc_int_active &nfc_disable_active>;
+		pinctrl-1 = <&nfc_int_suspend &nfc_disable_suspend>;
+		qcom,clk-gpio = <&pm8916_gpios 2 0>;
+		clocks = <&clock_rpm clk_bb_clk2_pin>;
+		clock-names = "ref_clk";
+	};
diff --git a/Documentation/devicetree/bindings/pil/subsys-pil-tz.txt b/Documentation/devicetree/bindings/pil/subsys-pil-tz.txt
index d7edafc..4a69e03 100644
--- a/Documentation/devicetree/bindings/pil/subsys-pil-tz.txt
+++ b/Documentation/devicetree/bindings/pil/subsys-pil-tz.txt
@@ -67,6 +67,7 @@
 - qcom,complete-ramdump: Boolean. If set, complete ramdump i.e. region between start address of
 			first segment to end address of last segment will be collected without
 			leaving any hole in between.
+- qcom,ignore-ssr-failure: Boolean. If set, SSR failures are not considered fatal.
 
 Example:
 	qcom,venus@fdce0000 {
diff --git a/Documentation/gpu/drm-kms.rst b/Documentation/gpu/drm-kms.rst
index 53b872c..db86cda 100644
--- a/Documentation/gpu/drm-kms.rst
+++ b/Documentation/gpu/drm-kms.rst
@@ -308,6 +308,12 @@
 .. kernel-doc:: drivers/gpu/drm/drm_color_mgmt.c
    :export:
 
+Explicit Fencing Properties
+---------------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_atomic.c
+   :doc: explicit fencing properties
+
 Existing KMS Properties
 -----------------------
 
diff --git a/Makefile b/Makefile
index b9aba93..df4d437 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
 VERSION = 4
 PATCHLEVEL = 9
-SUBLEVEL = 29
+SUBLEVEL = 30
 EXTRAVERSION =
 NAME = Roaring Lionus
 
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
index ffb93f49..4f95577 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
@@ -1188,8 +1188,10 @@
 	if (!access_ok(VERIFY_WRITE, ur, sizeof(*ur)))
 		return -EFAULT;
 
-	err = 0;
-	err |= put_user(status, ustatus);
+	err = put_user(status, ustatus);
+	if (ret < 0)
+		return err ? err : ret;
+
 	err |= __put_user(r.ru_utime.tv_sec, &ur->ru_utime.tv_sec);
 	err |= __put_user(r.ru_utime.tv_usec, &ur->ru_utime.tv_usec);
 	err |= __put_user(r.ru_stime.tv_sec, &ur->ru_stime.tv_sec);
diff --git a/arch/arm/boot/dts/at91-sama5d3_xplained.dts b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
index c51fc65..5a53fcf 100644
--- a/arch/arm/boot/dts/at91-sama5d3_xplained.dts
+++ b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
@@ -162,9 +162,10 @@
 			};
 
 			adc0: adc@f8018000 {
+				atmel,adc-vref = <3300>;
+				atmel,adc-channels-used = <0xfe>;
 				pinctrl-0 = <
 					&pinctrl_adc0_adtrg
-					&pinctrl_adc0_ad0
 					&pinctrl_adc0_ad1
 					&pinctrl_adc0_ad2
 					&pinctrl_adc0_ad3
@@ -172,8 +173,6 @@
 					&pinctrl_adc0_ad5
 					&pinctrl_adc0_ad6
 					&pinctrl_adc0_ad7
-					&pinctrl_adc0_ad8
-					&pinctrl_adc0_ad9
 					>;
 				status = "okay";
 			};
diff --git a/arch/arm/boot/dts/imx6sx-sdb.dts b/arch/arm/boot/dts/imx6sx-sdb.dts
index 5bb8fd5..d71da30 100644
--- a/arch/arm/boot/dts/imx6sx-sdb.dts
+++ b/arch/arm/boot/dts/imx6sx-sdb.dts
@@ -12,23 +12,6 @@
 	model = "Freescale i.MX6 SoloX SDB RevB Board";
 };
 
-&cpu0 {
-	operating-points = <
-		/* kHz    uV */
-		996000  1250000
-		792000  1175000
-		396000  1175000
-		198000  1175000
-		>;
-	fsl,soc-operating-points = <
-		/* ARM kHz      SOC uV */
-		996000	1250000
-		792000	1175000
-		396000	1175000
-		198000  1175000
-	>;
-};
-
 &i2c1 {
 	clock-frequency = <100000>;
 	pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/tegra20-paz00.dts b/arch/arm/boot/dts/tegra20-paz00.dts
index 4e361a8..b4bfa55 100644
--- a/arch/arm/boot/dts/tegra20-paz00.dts
+++ b/arch/arm/boot/dts/tegra20-paz00.dts
@@ -569,6 +569,7 @@
 			regulator-name = "+3VS,vdd_pnl";
 			regulator-min-microvolt = <3300000>;
 			regulator-max-microvolt = <3300000>;
+			regulator-boot-on;
 			gpio = <&gpio TEGRA_GPIO(A, 4) GPIO_ACTIVE_HIGH>;
 			enable-active-high;
 		};
diff --git a/arch/arm/include/asm/kvm_coproc.h b/arch/arm/include/asm/kvm_coproc.h
index 4917c2f..e74ab0f 100644
--- a/arch/arm/include/asm/kvm_coproc.h
+++ b/arch/arm/include/asm/kvm_coproc.h
@@ -31,7 +31,8 @@
 int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run);
 int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run);
 int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run);
-int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run);
 int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
 int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run);
 
diff --git a/arch/arm/include/asm/module.h b/arch/arm/include/asm/module.h
index 464748b..ed23196 100644
--- a/arch/arm/include/asm/module.h
+++ b/arch/arm/include/asm/module.h
@@ -18,13 +18,18 @@
 };
 #endif
 
+struct mod_plt_sec {
+	struct elf32_shdr	*plt;
+	int			plt_count;
+};
+
 struct mod_arch_specific {
 #ifdef CONFIG_ARM_UNWIND
 	struct unwind_table *unwind[ARM_SEC_MAX];
 #endif
 #ifdef CONFIG_ARM_MODULE_PLTS
-	struct elf32_shdr   *plt;
-	int		    plt_count;
+	struct mod_plt_sec	core;
+	struct mod_plt_sec	init;
 #endif
 };
 
diff --git a/arch/arm/kernel/module-plts.c b/arch/arm/kernel/module-plts.c
index 3a5cba9..3d0c2e4 100644
--- a/arch/arm/kernel/module-plts.c
+++ b/arch/arm/kernel/module-plts.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2014 Linaro Ltd. <ard.biesheuvel@linaro.org>
+ * Copyright (C) 2014-2017 Linaro Ltd. <ard.biesheuvel@linaro.org>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -31,9 +31,17 @@
 	u32	lit[PLT_ENT_COUNT];
 };
 
+static bool in_init(const struct module *mod, unsigned long loc)
+{
+	return loc - (u32)mod->init_layout.base < mod->init_layout.size;
+}
+
 u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val)
 {
-	struct plt_entries *plt = (struct plt_entries *)mod->arch.plt->sh_addr;
+	struct mod_plt_sec *pltsec = !in_init(mod, loc) ? &mod->arch.core :
+							  &mod->arch.init;
+
+	struct plt_entries *plt = (struct plt_entries *)pltsec->plt->sh_addr;
 	int idx = 0;
 
 	/*
@@ -41,9 +49,9 @@
 	 * relocations are sorted, this will be the last entry we allocated.
 	 * (if one exists).
 	 */
-	if (mod->arch.plt_count > 0) {
-		plt += (mod->arch.plt_count - 1) / PLT_ENT_COUNT;
-		idx = (mod->arch.plt_count - 1) % PLT_ENT_COUNT;
+	if (pltsec->plt_count > 0) {
+		plt += (pltsec->plt_count - 1) / PLT_ENT_COUNT;
+		idx = (pltsec->plt_count - 1) % PLT_ENT_COUNT;
 
 		if (plt->lit[idx] == val)
 			return (u32)&plt->ldr[idx];
@@ -53,8 +61,8 @@
 			plt++;
 	}
 
-	mod->arch.plt_count++;
-	BUG_ON(mod->arch.plt_count * PLT_ENT_SIZE > mod->arch.plt->sh_size);
+	pltsec->plt_count++;
+	BUG_ON(pltsec->plt_count * PLT_ENT_SIZE > pltsec->plt->sh_size);
 
 	if (!idx)
 		/* Populate a new set of entries */
@@ -129,7 +137,7 @@
 
 /* Count how many PLT entries we may need */
 static unsigned int count_plts(const Elf32_Sym *syms, Elf32_Addr base,
-			       const Elf32_Rel *rel, int num)
+			       const Elf32_Rel *rel, int num, Elf32_Word dstidx)
 {
 	unsigned int ret = 0;
 	const Elf32_Sym *s;
@@ -144,13 +152,17 @@
 		case R_ARM_THM_JUMP24:
 			/*
 			 * We only have to consider branch targets that resolve
-			 * to undefined symbols. This is not simply a heuristic,
-			 * it is a fundamental limitation, since the PLT itself
-			 * is part of the module, and needs to be within range
-			 * as well, so modules can never grow beyond that limit.
+			 * to symbols that are defined in a different section.
+			 * This is not simply a heuristic, it is a fundamental
+			 * limitation, since there is no guaranteed way to emit
+			 * PLT entries sufficiently close to the branch if the
+			 * section size exceeds the range of a branch
+			 * instruction. So ignore relocations against defined
+			 * symbols if they live in the same section as the
+			 * relocation target.
 			 */
 			s = syms + ELF32_R_SYM(rel[i].r_info);
-			if (s->st_shndx != SHN_UNDEF)
+			if (s->st_shndx == dstidx)
 				break;
 
 			/*
@@ -161,7 +173,12 @@
 			 * So we need to support them, but there is no need to
 			 * take them into consideration when trying to optimize
 			 * this code. So let's only check for duplicates when
-			 * the addend is zero.
+			 * the addend is zero. (Note that calls into the core
+			 * module via init PLT entries could involve section
+			 * relative symbol references with non-zero addends, for
+			 * which we may end up emitting duplicates, but the init
+			 * PLT is released along with the rest of the .init
+			 * region as soon as module loading completes.)
 			 */
 			if (!is_zero_addend_relocation(base, rel + i) ||
 			    !duplicate_rel(base, rel, i))
@@ -174,7 +191,8 @@
 int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
 			      char *secstrings, struct module *mod)
 {
-	unsigned long plts = 0;
+	unsigned long core_plts = 0;
+	unsigned long init_plts = 0;
 	Elf32_Shdr *s, *sechdrs_end = sechdrs + ehdr->e_shnum;
 	Elf32_Sym *syms = NULL;
 
@@ -184,13 +202,15 @@
 	 */
 	for (s = sechdrs; s < sechdrs_end; ++s) {
 		if (strcmp(".plt", secstrings + s->sh_name) == 0)
-			mod->arch.plt = s;
+			mod->arch.core.plt = s;
+		else if (strcmp(".init.plt", secstrings + s->sh_name) == 0)
+			mod->arch.init.plt = s;
 		else if (s->sh_type == SHT_SYMTAB)
 			syms = (Elf32_Sym *)s->sh_addr;
 	}
 
-	if (!mod->arch.plt) {
-		pr_err("%s: module PLT section missing\n", mod->name);
+	if (!mod->arch.core.plt || !mod->arch.init.plt) {
+		pr_err("%s: module PLT section(s) missing\n", mod->name);
 		return -ENOEXEC;
 	}
 	if (!syms) {
@@ -213,16 +233,29 @@
 		/* sort by type and symbol index */
 		sort(rels, numrels, sizeof(Elf32_Rel), cmp_rel, NULL);
 
-		plts += count_plts(syms, dstsec->sh_addr, rels, numrels);
+		if (strncmp(secstrings + dstsec->sh_name, ".init", 5) != 0)
+			core_plts += count_plts(syms, dstsec->sh_addr, rels,
+						numrels, s->sh_info);
+		else
+			init_plts += count_plts(syms, dstsec->sh_addr, rels,
+						numrels, s->sh_info);
 	}
 
-	mod->arch.plt->sh_type = SHT_NOBITS;
-	mod->arch.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
-	mod->arch.plt->sh_addralign = L1_CACHE_BYTES;
-	mod->arch.plt->sh_size = round_up(plts * PLT_ENT_SIZE,
-					  sizeof(struct plt_entries));
-	mod->arch.plt_count = 0;
+	mod->arch.core.plt->sh_type = SHT_NOBITS;
+	mod->arch.core.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
+	mod->arch.core.plt->sh_addralign = L1_CACHE_BYTES;
+	mod->arch.core.plt->sh_size = round_up(core_plts * PLT_ENT_SIZE,
+					       sizeof(struct plt_entries));
+	mod->arch.core.plt_count = 0;
 
-	pr_debug("%s: plt=%x\n", __func__, mod->arch.plt->sh_size);
+	mod->arch.init.plt->sh_type = SHT_NOBITS;
+	mod->arch.init.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
+	mod->arch.init.plt->sh_addralign = L1_CACHE_BYTES;
+	mod->arch.init.plt->sh_size = round_up(init_plts * PLT_ENT_SIZE,
+					       sizeof(struct plt_entries));
+	mod->arch.init.plt_count = 0;
+
+	pr_debug("%s: plt=%x, init.plt=%x\n", __func__,
+		 mod->arch.core.plt->sh_size, mod->arch.init.plt->sh_size);
 	return 0;
 }
diff --git a/arch/arm/kernel/module.lds b/arch/arm/kernel/module.lds
index 05881e2..eacb5c6 100644
--- a/arch/arm/kernel/module.lds
+++ b/arch/arm/kernel/module.lds
@@ -1,3 +1,4 @@
 SECTIONS {
 	.plt : { BYTE(0) }
+	.init.plt : { BYTE(0) }
 }
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c
index 3e5e419..c3ed6bd 100644
--- a/arch/arm/kvm/coproc.c
+++ b/arch/arm/kvm/coproc.c
@@ -93,12 +93,6 @@
 	return 1;
 }
 
-int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run)
-{
-	kvm_inject_undefined(vcpu);
-	return 1;
-}
-
 static void reset_mpidr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
 {
 	/*
@@ -514,12 +508,7 @@
 	return 1;
 }
 
-/**
- * kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access
- * @vcpu: The VCPU pointer
- * @run:  The kvm_run struct
- */
-int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
+static struct coproc_params decode_64bit_hsr(struct kvm_vcpu *vcpu)
 {
 	struct coproc_params params;
 
@@ -533,9 +522,38 @@
 	params.Rt2 = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf;
 	params.CRm = 0;
 
+	return params;
+}
+
+/**
+ * kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access
+ * @vcpu: The VCPU pointer
+ * @run:  The kvm_run struct
+ */
+int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+	struct coproc_params params = decode_64bit_hsr(vcpu);
+
 	return emulate_cp15(vcpu, &params);
 }
 
+/**
+ * kvm_handle_cp14_64 -- handles a mrrc/mcrr trap on a guest CP14 access
+ * @vcpu: The VCPU pointer
+ * @run:  The kvm_run struct
+ */
+int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+	struct coproc_params params = decode_64bit_hsr(vcpu);
+
+	/* raz_wi cp14 */
+	pm_fake(vcpu, &params, NULL);
+
+	/* handled */
+	kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
+	return 1;
+}
+
 static void reset_coproc_regs(struct kvm_vcpu *vcpu,
 			      const struct coproc_reg *table, size_t num)
 {
@@ -546,12 +564,7 @@
 			table[i].reset(vcpu, &table[i]);
 }
 
-/**
- * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access
- * @vcpu: The VCPU pointer
- * @run:  The kvm_run struct
- */
-int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
+static struct coproc_params decode_32bit_hsr(struct kvm_vcpu *vcpu)
 {
 	struct coproc_params params;
 
@@ -565,9 +578,37 @@
 	params.Op2 = (kvm_vcpu_get_hsr(vcpu) >> 17) & 0x7;
 	params.Rt2 = 0;
 
+	return params;
+}
+
+/**
+ * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access
+ * @vcpu: The VCPU pointer
+ * @run:  The kvm_run struct
+ */
+int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+	struct coproc_params params = decode_32bit_hsr(vcpu);
 	return emulate_cp15(vcpu, &params);
 }
 
+/**
+ * kvm_handle_cp14_32 -- handles a mrc/mcr trap on a guest CP14 access
+ * @vcpu: The VCPU pointer
+ * @run:  The kvm_run struct
+ */
+int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+	struct coproc_params params = decode_32bit_hsr(vcpu);
+
+	/* raz_wi cp14 */
+	pm_fake(vcpu, &params, NULL);
+
+	/* handled */
+	kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
+	return 1;
+}
+
 /******************************************************************************
  * Userspace API
  *****************************************************************************/
diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c
index 4e40d19..066b6d4 100644
--- a/arch/arm/kvm/handle_exit.c
+++ b/arch/arm/kvm/handle_exit.c
@@ -83,9 +83,9 @@
 	[HSR_EC_WFI]		= kvm_handle_wfx,
 	[HSR_EC_CP15_32]	= kvm_handle_cp15_32,
 	[HSR_EC_CP15_64]	= kvm_handle_cp15_64,
-	[HSR_EC_CP14_MR]	= kvm_handle_cp14_access,
+	[HSR_EC_CP14_MR]	= kvm_handle_cp14_32,
 	[HSR_EC_CP14_LS]	= kvm_handle_cp14_load_store,
-	[HSR_EC_CP14_64]	= kvm_handle_cp14_access,
+	[HSR_EC_CP14_64]	= kvm_handle_cp14_64,
 	[HSR_EC_CP_0_13]	= kvm_handle_cp_0_13_access,
 	[HSR_EC_CP10_ID]	= kvm_handle_cp10_id,
 	[HSR_EC_HVC]		= handle_hvc,
diff --git a/arch/arm/kvm/hyp/Makefile b/arch/arm/kvm/hyp/Makefile
index 3023bb5..8679405 100644
--- a/arch/arm/kvm/hyp/Makefile
+++ b/arch/arm/kvm/hyp/Makefile
@@ -2,6 +2,8 @@
 # Makefile for Kernel-based Virtual Machine module, HYP part
 #
 
+ccflags-y += -fno-stack-protector
+
 KVM=../../../../virt/kvm
 
 obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o
diff --git a/arch/arm/kvm/hyp/switch.c b/arch/arm/kvm/hyp/switch.c
index 92678b7..624a510 100644
--- a/arch/arm/kvm/hyp/switch.c
+++ b/arch/arm/kvm/hyp/switch.c
@@ -48,7 +48,9 @@
 	write_sysreg(HSTR_T(15), HSTR);
 	write_sysreg(HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11), HCPTR);
 	val = read_sysreg(HDCR);
-	write_sysreg(val | HDCR_TPM | HDCR_TPMCR, HDCR);
+	val |= HDCR_TPM | HDCR_TPMCR; /* trap performance monitors */
+	val |= HDCR_TDRA | HDCR_TDOSA | HDCR_TDA; /* trap debug regs */
+	write_sysreg(val, HDCR);
 }
 
 static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
diff --git a/arch/arm/mm/proc-v7m.S b/arch/arm/mm/proc-v7m.S
index 8dea616..5049777 100644
--- a/arch/arm/mm/proc-v7m.S
+++ b/arch/arm/mm/proc-v7m.S
@@ -147,10 +147,10 @@
 
 	@ Configure caches (if implemented)
 	teq     r8, #0
-	stmneia	r12, {r0-r6, lr}	@ v7m_invalidate_l1 touches r0-r6
+	stmneia	sp, {r0-r6, lr}		@ v7m_invalidate_l1 touches r0-r6
 	blne	v7m_invalidate_l1
 	teq     r8, #0			@ re-evalutae condition
-	ldmneia	r12, {r0-r6, lr}
+	ldmneia	sp, {r0-r6, lr}
 
 	@ Configure the System Control Register to ensure 8-byte stack alignment
 	@ Note the STKALIGN bit is either RW or RAO.
diff --git a/arch/arm64/boot/dts/hisilicon/hi6220.dtsi b/arch/arm64/boot/dts/hisilicon/hi6220.dtsi
index 17839db..509a2ed 100644
--- a/arch/arm64/boot/dts/hisilicon/hi6220.dtsi
+++ b/arch/arm64/boot/dts/hisilicon/hi6220.dtsi
@@ -772,6 +772,7 @@
 			clocks = <&sys_ctrl 2>, <&sys_ctrl 1>;
 			clock-names = "ciu", "biu";
 			resets = <&sys_ctrl PERIPH_RSTDIS0_MMC0>;
+			reset-names = "reset";
 			bus-width = <0x8>;
 			vmmc-supply = <&ldo19>;
 			pinctrl-names = "default";
@@ -795,6 +796,7 @@
 			clocks = <&sys_ctrl 4>, <&sys_ctrl 3>;
 			clock-names = "ciu", "biu";
 			resets = <&sys_ctrl PERIPH_RSTDIS0_MMC1>;
+			reset-names = "reset";
 			vqmmc-supply = <&ldo7>;
 			vmmc-supply = <&ldo10>;
 			bus-width = <0x4>;
@@ -813,6 +815,7 @@
 			clocks = <&sys_ctrl HI6220_MMC2_CIUCLK>, <&sys_ctrl HI6220_MMC2_CLK>;
 			clock-names = "ciu", "biu";
 			resets = <&sys_ctrl PERIPH_RSTDIS0_MMC2>;
+			reset-names = "reset";
 			bus-width = <0x4>;
 			broken-cd;
 			pinctrl-names = "default", "idle";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-bus.dtsi b/arch/arm64/boot/dts/qcom/sdm845-bus.dtsi
index ea66a13..b1c91bf 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-bus.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-bus.dtsi
@@ -1644,7 +1644,7 @@
 			qcom,buswidth = <4>;
 			qcom,agg-ports = <4>;
 			qcom,bus-dev = <&fab_mc_virt>;
-			qcom,bcms = <&bcm_mc0>;
+			qcom,bcms = <&bcm_mc0>, <&bcm_acv>;
 		};
 
 		slv_qhs_mdsp_ms_mpu_cfg:slv-qhs-mdsp-ms-mpu-cfg {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi b/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
index 4c642e3..cb20e0f 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
@@ -464,7 +464,7 @@
 		cell-index = <0>;
 		label = "cam-cdm-intf";
 		num-hw-cdm = <1>;
-		cdm-client-names = "ife",
+		cdm-client-names = "vfe",
 			"jpeg-dma",
 			"jpeg",
 			"fd";
@@ -493,7 +493,7 @@
 			<&clock_camcc CAM_CC_CPAS_AHB_CLK>,
 			<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>;
 		clock-rates = <0 0 0 0 0>;
-		cdm-client-names = "vfe";
+		cdm-client-names = "ife";
 		status = "ok";
 	};
 
@@ -761,24 +761,18 @@
 			"cpas_ahb_clk",
 			"camnoc_axi_clk",
 			"icp_apb_clk",
-			"icp_atb_clk",
 			"icp_clk",
-			"icp_clk_src",
-			"icp_cti_clk",
-			"icp_ts_clk";
+			"icp_clk_src";
 		clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>,
 				<&clock_gcc GCC_CAMERA_AXI_CLK>,
 				<&clock_camcc CAM_CC_SOC_AHB_CLK>,
 				<&clock_camcc CAM_CC_CPAS_AHB_CLK>,
 				<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
 				<&clock_camcc CAM_CC_ICP_APB_CLK>,
-				<&clock_camcc CAM_CC_ICP_ATB_CLK>,
 				<&clock_camcc CAM_CC_ICP_CLK>,
-				<&clock_camcc CAM_CC_ICP_CLK_SRC>,
-				<&clock_camcc CAM_CC_ICP_CTI_CLK>,
-				<&clock_camcc CAM_CC_ICP_TS_CLK>;
+				<&clock_camcc CAM_CC_ICP_CLK_SRC>;
 
-		clock-rates = <0 0 0 80000000 0 0 0 0 600000000 0 0>;
+		clock-rates = <0 0 0 80000000 0 0 0 600000000>;
 		fw_name = "CAMERA_ICP.elf";
 		status = "ok";
 	};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
index 1fdf740..7038d48 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
@@ -291,6 +291,24 @@
 
 &qupv3_se3_i2c {
 	status = "ok";
+	nq@28 {
+		compatible = "qcom,nq-nci";
+		reg = <0x28>;
+		qcom,nq-irq = <&tlmm 63 0x00>;
+		qcom,nq-ven = <&tlmm 12 0x00>;
+		qcom,nq-firm = <&tlmm 62 0x00>;
+		qcom,nq-clkreq = <&pm8998_gpios 21 0x00>;
+		qcom,nq-esepwr = <&tlmm 116 0x00>;
+		interrupt-parent = <&tlmm>;
+		qcom,clk-src = "BBCLK3";
+		interrupts = <63 0>;
+		interrupt-names = "nfc_irq";
+		pinctrl-names = "nfc_active", "nfc_suspend";
+		pinctrl-0 = <&nfc_int_active &nfc_enable_active>;
+		pinctrl-1 = <&nfc_int_suspend &nfc_enable_suspend>;
+		clocks = <&clock_rpmh RPMH_LN_BB_CLK3>;
+		clock-names = "ref_clk";
+	};
 };
 
 &qupv3_se10_i2c {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi b/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi
index 1b3f2a6..92f8586 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi
@@ -65,7 +65,7 @@
 
 		qcom,gpu-quirk-hfi-use-reg;
 
-		qcom,idle-timeout = <100000000>; //msecs
+		qcom,idle-timeout = <80>; //msecs
 		qcom,no-nap;
 
 		qcom,highest-bank-bit = <15>;
@@ -237,7 +237,7 @@
 		label = "kgsl-gmu";
 		compatible = "qcom,gpu-gmu";
 
-		reg = <0x506a000 0x26000>, <0xb200000 0x300000>;
+		reg = <0x506a000 0x30000>, <0xb200000 0x300000>;
 		reg-names = "kgsl_gmu_reg", "kgsl_gmu_pdc_reg";
 
 		interrupts = <0 304 0>, <0 305 0>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
index 508b645..521fd6b3 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
@@ -262,6 +262,24 @@
 
 &qupv3_se3_i2c {
 	status = "ok";
+	nq@28 {
+		compatible = "qcom,nq-nci";
+		reg = <0x28>;
+		qcom,nq-irq = <&tlmm 63 0x00>;
+		qcom,nq-ven = <&tlmm 12 0x00>;
+		qcom,nq-firm = <&tlmm 62 0x00>;
+		qcom,nq-clkreq = <&pm8998_gpios 21 0x00>;
+		qcom,nq-esepwr = <&tlmm 116 0x00>;
+		interrupt-parent = <&tlmm>;
+		qcom,clk-src = "BBCLK3";
+		interrupts = <63 0>;
+		interrupt-names = "nfc_irq";
+		pinctrl-names = "nfc_active", "nfc_suspend";
+		pinctrl-0 = <&nfc_int_active &nfc_enable_active>;
+		pinctrl-1 = <&nfc_int_suspend &nfc_enable_suspend>;
+		clocks = <&clock_rpmh RPMH_LN_BB_CLK3>;
+		clock-names = "ref_clk";
+	};
 };
 
 &qupv3_se10_i2c {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
index cfbf3e5..f534891 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
@@ -1556,6 +1556,68 @@
 			};
 		};
 
+		nfc {
+			nfc_int_active: nfc_int_active {
+				/* active state */
+				mux {
+					/* GPIO 63 NFC Read Interrupt */
+					pins = "gpio63";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio63";
+					drive-strength = <2>; /* 2 MA */
+					bias-pull-up;
+				};
+			};
+
+			nfc_int_suspend: nfc_int_suspend {
+				/* sleep state */
+				mux {
+					/* GPIO 63 NFC Read Interrupt */
+					pins = "gpio63";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio63";
+					drive-strength = <2>; /* 2 MA */
+					bias-pull-up;
+				};
+			};
+
+			nfc_enable_active: nfc_enable_active {
+				/* active state */
+				mux {
+					/* 12: NFC ENABLE 116:ESE Enable */
+					pins = "gpio12", "gpio62", "gpio116";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio12", "gpio62", "gpio116";
+					drive-strength = <2>; /* 2 MA */
+					bias-pull-up;
+				};
+			};
+
+			nfc_enable_suspend: nfc_enable_suspend {
+				/* sleep state */
+				mux {
+					/* 12: NFC ENABLE 116:ESE Enable */
+					pins = "gpio12", "gpio62", "gpio116";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio12", "gpio62", "gpio116";
+					drive-strength = <2>; /* 2 MA */
+					bias-disable;
+				};
+			};
+		};
+
 		qupv3_se3_spi_pins: qupv3_se3_spi_pins {
 			qupv3_se3_spi_active: qupv3_se3_spi_active {
 				mux {
@@ -2681,6 +2743,14 @@
 };
 
 &pm8998_gpios {
+	gpio@d400 {
+		qcom,mode = <0>;
+		qcom,vin-sel = <1>;
+		qcom,src-sel = <0>;
+		qcom,master-en = <1>;
+		status = "okay";
+	};
+
 	key_home {
 		key_home_default: key_home_default {
 			pins = "gpio5";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi b/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
index 0aaac6f..3bf1ea4 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
@@ -60,6 +60,24 @@
 
 &qupv3_se3_i2c {
 	status = "ok";
+	nq@28 {
+		compatible = "qcom,nq-nci";
+		reg = <0x28>;
+		qcom,nq-irq = <&tlmm 63 0x00>;
+		qcom,nq-ven = <&tlmm 12 0x00>;
+		qcom,nq-firm = <&tlmm 62 0x00>;
+		qcom,nq-clkreq = <&pm8998_gpios 21 0x00>;
+		qcom,nq-esepwr = <&tlmm 116 0x00>;
+		interrupt-parent = <&tlmm>;
+		qcom,clk-src = "BBCLK3";
+		interrupts = <63 0>;
+		interrupt-names = "nfc_irq";
+		pinctrl-names = "nfc_active", "nfc_suspend";
+		pinctrl-0 = <&nfc_int_active &nfc_enable_active>;
+		pinctrl-1 = <&nfc_int_suspend &nfc_enable_suspend>;
+		clocks = <&clock_rpmh RPMH_LN_BB_CLK3>;
+		clock-names = "ref_clk";
+	};
 };
 
 &qupv3_se10_i2c {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
index 21819a9..76c07b7 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
@@ -137,6 +137,9 @@
 		qcom,sde-vbif-size = <0x1040>;
 		qcom,sde-vbif-id = <0>;
 
+		qcom,sde-vbif-qos-rt-remap = <3 3 4 4 5 5 6 6>;
+		qcom,sde-vbif-qos-nrt-remap = <3 3 3 3 3 3 3 3>;
+
 		qcom,sde-inline-rotator = <&mdss_rotator 0>;
 
 		qcom,sde-reg-dma-off = <0>;
@@ -261,7 +264,7 @@
 		interrupt-parent = <&mdss_mdp>;
 		interrupts = <2 0>;
 
-		qcom,mdss-rot-vbif-qos-setting = <1 1 1 1>;
+		qcom,mdss-rot-vbif-qos-setting = <3 3 3 3 3 3 3 3>;
 
 		qcom,mdss-default-ot-rd-limit = <32>;
 		qcom,mdss-default-ot-wr-limit = <32>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
index 7fea651..fc90fef 100644
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -808,7 +808,8 @@
 			< 1881600 >,
 			< 1958400 >,
 			< 2035200 >,
-			< 2092800 >;
+			< 2092800 >,
+			< 2208000 >;
 	};
 
 	cpubw: qcom,cpubw {
@@ -962,7 +963,9 @@
 			< 883200 >,
 			< 960000 >,
 			< 1036800 >,
-			< 1094400 >;
+			< 1094400 >,
+			< 1209600 >,
+			< 1305600 >;
 	};
 
 	l3_cpu4: qcom,l3-cpu4 {
@@ -982,7 +985,9 @@
 			< 883200 >,
 			< 960000 >,
 			< 1036800 >,
-			< 1094400 >;
+			< 1094400 >,
+			< 1209600 >,
+			< 1305600 >;
 	};
 
 	devfreq_l3lat_0: qcom,cpu0-l3lat-mon {
@@ -1042,8 +1047,8 @@
 			cpu-to-dev-map-0 =
 				< 1708800  762 >;
 			cpu-to-dev-map-4 =
-				< 2035200  762 >,
-				< 2092800 2597 >;
+				< 1881600  762 >,
+				< 2208000 2597 >;
 		};
 	};
 
@@ -1141,6 +1146,9 @@
 		vdd-pwrcl-supply = <&apc0_pwrcl_vreg>;
 		vdd-perfcl-supply = <&apc1_perfcl_vreg>;
 
+		l3-dev0 = <&l3_cpu0>;
+		l3-dev4 = <&l3_cpu4>;
+
 		qcom,l3-speedbin0-v0 =
 			<   300000000 0x000c000f 0x00002020 0x1 1 >,
 			<   422400000 0x50140116 0x00002020 0x1 2 >,
@@ -1165,6 +1173,21 @@
 			<  1036800000 0x40240936 0x00002b2b 0x3 10 >,
 			<  1094400000 0x402c0a39 0x00002e2e 0x3 11 >;
 
+		qcom,l3-speedbin2-v0 =
+			<   300000000 0x000c000f 0x00002020 0x1 1 >,
+			<   422400000 0x50140116 0x00002020 0x1 2 >,
+			<   499200000 0x5014021a 0x00002020 0x1 3 >,
+			<   576000000 0x5014031e 0x00002020 0x1 4 >,
+			<   652800000 0x401c0422 0x00002020 0x1 5 >,
+			<   729600000 0x401c0526 0x00002020 0x1 6 >,
+			<   806400000 0x401c062a 0x00002222 0x1 7 >,
+			<   883200000 0x4024072e 0x00002525 0x2 8 >,
+			<   960000000 0x40240832 0x00002828 0x2 9 >,
+			<  1036800000 0x40240936 0x00002b2b 0x3 10 >,
+			<  1113600000 0x402c0a3a 0x00002e2e 0x3 11 >,
+			<  1209600000 0x402c0b3f 0x00003232 0x3 12 >,
+			<  1305600000 0x40340c44 0x00003636 0x3 13 >;
+
 		qcom,pwrcl-speedbin0-v0 =
 			<   300000000 0x000c000f 0x00002020 0x1 1 >,
 			<   422400000 0x50140116 0x00002020 0x1 2 >,
@@ -1205,6 +1228,27 @@
 			<  1651200000 0x403c1156 0x00004545 0x3 18 >,
 			<  1708800000 0x40441259 0x00004747 0x3 19 >;
 
+		qcom,pwrcl-speedbin2-v0 =
+			<   300000000 0x000c000f 0x00002020 0x1 1 >,
+			<   422400000 0x50140116 0x00002020 0x1 2 >,
+			<   499200000 0x5014021a 0x00002020 0x1 3 >,
+			<   576000000 0x5014031e 0x00002020 0x1 4 >,
+			<   652800000 0x401c0422 0x00002020 0x1 5 >,
+			<   748800000 0x401c0527 0x00002020 0x1 6 >,
+			<   825600000 0x401c062b 0x00002222 0x1 7 >,
+			<   902400000 0x4024072f 0x00002626 0x1 8 >,
+			<   979200000 0x40240833 0x00002929 0x1 9 >,
+			<  1056000000 0x402c0937 0x00002c2c 0x1 10 >,
+			<  1132800000 0x402c0a3b 0x00002f2f 0x1 11 >,
+			<  1209600000 0x402c0b3f 0x00003232 0x1 12 >,
+			<  1286400000 0x40340c43 0x00003636 0x2 13 >,
+			<  1363200000 0x40340d47 0x00003939 0x2 14 >,
+			<  1440000000 0x40340e4b 0x00003c3c 0x2 15 >,
+			<  1516800000 0x403c0f4f 0x00003f3f 0x2 16 >,
+			<  1593600000 0x403c1053 0x00004242 0x2 17 >,
+			<  1670400000 0x40441157 0x00004646 0x3 18 >,
+			<  1747200000 0x4044125b 0x00004949 0x3 19 >;
+
 		qcom,perfcl-speedbin0-v0 =
 			<   300000000 0x000c000f 0x00002020 0x1 1 >,
 			<   422400000 0x50140116 0x00002020 0x1 2 >,
@@ -1255,6 +1299,33 @@
 			<  2035200000 0x404c166a 0x00005555 0x3 23 >,
 			<  2092800000 0x4054176d 0x00005757 0x3 24 >;
 
+		qcom,perfcl-speedbin2-v0 =
+			<   300000000 0x000c000f 0x00002020 0x1 1 >,
+			<   422400000 0x50140116 0x00002020 0x1 2 >,
+			<   499200000 0x5014021a 0x00002020 0x1 3 >,
+			<   576000000 0x5014031e 0x00002020 0x1 4 >,
+			<   652800000 0x401c0422 0x00002020 0x1 5 >,
+			<   729600000 0x401c0526 0x00002020 0x1 6 >,
+			<   806400000 0x401c062a 0x00002222 0x1 7 >,
+			<   883200000 0x4024072e 0x00002525 0x1 8 >,
+			<   960000000 0x40240832 0x00002828 0x1 9 >,
+			<  1036800000 0x40240936 0x00002b2b 0x1 10 >,
+			<  1113600000 0x402c0a3a 0x00002e2e 0x1 11 >,
+			<  1190400000 0x402c0b3e 0x00003232 0x1 12 >,
+			<  1267200000 0x40340c42 0x00003535 0x2 13 >,
+			<  1344000000 0x40340d46 0x00003838 0x2 14 >,
+			<  1420800000 0x40340e4a 0x00003b3b 0x2 15 >,
+			<  1497600000 0x403c0f4e 0x00003e3e 0x2 16 >,
+			<  1574400000 0x403c1052 0x00004242 0x2 17 >,
+			<  1651200000 0x403c1156 0x00004545 0x2 18 >,
+			<  1728000000 0x4044125a 0x00004848 0x3 19 >,
+			<  1804800000 0x4044135e 0x00004b4b 0x3 20 >,
+			<  1881600000 0x404c1462 0x00004e4e 0x3 21 >,
+			<  1958400000 0x404c1566 0x00005252 0x3 22 >,
+			<  2035200000 0x404c166a 0x00005555 0x3 23 >,
+			<  2112000000 0x4054176e 0x00005858 0x3 24 >,
+			<  2208000000 0x40541873 0x00005c5c 0x3 25 >;
+
 		qcom,l3-min-cpr-vc-bin0 = <7>;
 		qcom,pwrcl-min-cpr-vc-bin0 = <6>;
 		qcom,perfcl-min-cpr-vc-bin0 = <7>;
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index 4e0497f..0fe7e43 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -42,25 +42,35 @@
 #define __smp_rmb()	dmb(ishld)
 #define __smp_wmb()	dmb(ishst)
 
-#define __smp_store_release(p, v)						\
+#define __smp_store_release(p, v)					\
 do {									\
+	union { typeof(*p) __val; char __c[1]; } __u =			\
+		{ .__val = (__force typeof(*p)) (v) }; 			\
 	compiletime_assert_atomic_type(*p);				\
 	switch (sizeof(*p)) {						\
 	case 1:								\
 		asm volatile ("stlrb %w1, %0"				\
-				: "=Q" (*p) : "r" (v) : "memory");	\
+				: "=Q" (*p)				\
+				: "r" (*(__u8 *)__u.__c)		\
+				: "memory");				\
 		break;							\
 	case 2:								\
 		asm volatile ("stlrh %w1, %0"				\
-				: "=Q" (*p) : "r" (v) : "memory");	\
+				: "=Q" (*p)				\
+				: "r" (*(__u16 *)__u.__c)		\
+				: "memory");				\
 		break;							\
 	case 4:								\
 		asm volatile ("stlr %w1, %0"				\
-				: "=Q" (*p) : "r" (v) : "memory");	\
+				: "=Q" (*p)				\
+				: "r" (*(__u32 *)__u.__c)		\
+				: "memory");				\
 		break;							\
 	case 8:								\
 		asm volatile ("stlr %1, %0"				\
-				: "=Q" (*p) : "r" (v) : "memory");	\
+				: "=Q" (*p)				\
+				: "r" (*(__u64 *)__u.__c)		\
+				: "memory");				\
 		break;							\
 	}								\
 } while (0)
diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h
index 91b26d2..ae852ad 100644
--- a/arch/arm64/include/asm/cmpxchg.h
+++ b/arch/arm64/include/asm/cmpxchg.h
@@ -46,7 +46,7 @@
 	"	swp" #acq_lse #rel #sz "\t%" #w "3, %" #w "0, %2\n"	\
 		__nops(3)						\
 	"	" #nop_lse)						\
-	: "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr)			\
+	: "=&r" (ret), "=&r" (tmp), "+Q" (*(unsigned long *)ptr)	\
 	: "r" (x)							\
 	: cl);								\
 									\
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 73fee2c..21934d1 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -98,11 +98,12 @@
  */
 #define __range_ok(addr, size)						\
 ({									\
+	unsigned long __addr = (unsigned long __force)(addr);		\
 	unsigned long flag, roksum;					\
 	__chk_user_ptr(addr);						\
 	asm("adds %1, %1, %3; ccmp %1, %4, #2, cc; cset %0, ls"		\
 		: "=&r" (flag), "=&r" (roksum)				\
-		: "1" (addr), "Ir" (size),				\
+		: "1" (__addr), "Ir" (size),				\
 		  "r" (current_thread_info()->addr_limit)		\
 		: "cc");						\
 	flag;								\
diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c
index bdb35b9..29d2ad8 100644
--- a/arch/arm64/kernel/armv8_deprecated.c
+++ b/arch/arm64/kernel/armv8_deprecated.c
@@ -306,7 +306,8 @@
 	_ASM_EXTABLE(0b, 4b)					\
 	_ASM_EXTABLE(1b, 4b)					\
 	: "=&r" (res), "+r" (data), "=&r" (temp), "=&r" (temp2)	\
-	: "r" (addr), "i" (-EAGAIN), "i" (-EFAULT),		\
+	: "r" ((unsigned long)addr), "i" (-EAGAIN),		\
+	  "i" (-EFAULT),					\
 	  "i" (__SWP_LL_SC_LOOPS)				\
 	: "memory");						\
 	uaccess_disable();					\
diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile
index aaf42ae..14c4e3b 100644
--- a/arch/arm64/kvm/hyp/Makefile
+++ b/arch/arm64/kvm/hyp/Makefile
@@ -2,6 +2,8 @@
 # Makefile for Kernel-based Virtual Machine module, HYP part
 #
 
+ccflags-y += -fno-stack-protector
+
 KVM=../../../../virt/kvm
 
 obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o
diff --git a/arch/metag/include/asm/uaccess.h b/arch/metag/include/asm/uaccess.h
index 07238b3..3db3812 100644
--- a/arch/metag/include/asm/uaccess.h
+++ b/arch/metag/include/asm/uaccess.h
@@ -28,24 +28,32 @@
 
 #define segment_eq(a, b)	((a).seg == (b).seg)
 
-#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
-/*
- * Explicitly allow NULL pointers here. Parts of the kernel such
- * as readv/writev use access_ok to validate pointers, but want
- * to allow NULL pointers for various reasons. NULL pointers are
- * safe to allow through because the first page is not mappable on
- * Meta.
- *
- * We also wish to avoid letting user code access the system area
- * and the kernel half of the address space.
- */
-#define __user_bad(addr, size) (((addr) > 0 && (addr) < META_MEMORY_BASE) || \
-				((addr) > PAGE_OFFSET &&		\
-				 (addr) < LINCORE_BASE))
-
 static inline int __access_ok(unsigned long addr, unsigned long size)
 {
-	return __kernel_ok || !__user_bad(addr, size);
+	/*
+	 * Allow access to the user mapped memory area, but not the system area
+	 * before it. The check extends to the top of the address space when
+	 * kernel access is allowed (there's no real reason to user copy to the
+	 * system area in any case).
+	 */
+	if (likely(addr >= META_MEMORY_BASE && addr < get_fs().seg &&
+		   size <= get_fs().seg - addr))
+		return true;
+	/*
+	 * Explicitly allow NULL pointers here. Parts of the kernel such
+	 * as readv/writev use access_ok to validate pointers, but want
+	 * to allow NULL pointers for various reasons. NULL pointers are
+	 * safe to allow through because the first page is not mappable on
+	 * Meta.
+	 */
+	if (!addr)
+		return true;
+	/* Allow access to core code memory area... */
+	if (addr >= LINCORE_CODE_BASE && addr <= LINCORE_CODE_LIMIT &&
+	    size <= LINCORE_CODE_LIMIT + 1 - addr)
+		return true;
+	/* ... but no other areas. */
+	return false;
 }
 
 #define access_ok(type, addr, size) __access_ok((unsigned long)(addr),	\
@@ -186,8 +194,13 @@
 extern long __must_check __strncpy_from_user(char *dst, const char __user *src,
 					     long count);
 
-#define strncpy_from_user(dst, src, count) __strncpy_from_user(dst, src, count)
-
+static inline long
+strncpy_from_user(char *dst, const char __user *src, long count)
+{
+	if (!access_ok(VERIFY_READ, src, 1))
+		return -EFAULT;
+	return __strncpy_from_user(dst, src, count);
+}
 /*
  * Return the size of a string (including the ending 0)
  *
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 5a4f2eb..5e844f6 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -1368,6 +1368,7 @@
 	select WEAK_ORDERING
 	select WEAK_REORDERING_BEYOND_LLSC
 	select MIPS_PGD_C0_CONTEXT
+	select MIPS_L1_CACHE_SHIFT_6
 	select GPIOLIB
 	help
 		The Loongson 3 processor implements the MIPS64R2 instruction
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
index b9e3f0a..0012f03 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -70,8 +70,9 @@
  * switch_mm is the entry point called from the architecture independent
  * code in kernel/sched/core.c
  */
-static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
-			     struct task_struct *tsk)
+static inline void switch_mm_irqs_off(struct mm_struct *prev,
+				      struct mm_struct *next,
+				      struct task_struct *tsk)
 {
 	/* Mark this context has been used on the new CPU */
 	if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(next)))
@@ -110,6 +111,18 @@
 	switch_mmu_context(prev, next, tsk);
 }
 
+static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+			     struct task_struct *tsk)
+{
+	unsigned long flags;
+
+	local_irq_save(flags);
+	switch_mm_irqs_off(prev, next, tsk);
+	local_irq_restore(flags);
+}
+#define switch_mm_irqs_off switch_mm_irqs_off
+
+
 #define deactivate_mm(tsk,mm)	do { } while (0)
 
 /*
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index a5dd493..6ef8f0b 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -724,7 +724,7 @@
  */
 #define MAX_WAIT_FOR_RECOVERY 300
 
-static void eeh_handle_normal_event(struct eeh_pe *pe)
+static bool eeh_handle_normal_event(struct eeh_pe *pe)
 {
 	struct pci_bus *frozen_bus;
 	struct eeh_dev *edev, *tmp;
@@ -736,7 +736,7 @@
 	if (!frozen_bus) {
 		pr_err("%s: Cannot find PCI bus for PHB#%d-PE#%x\n",
 			__func__, pe->phb->global_number, pe->addr);
-		return;
+		return false;
 	}
 
 	eeh_pe_update_time_stamp(pe);
@@ -870,7 +870,7 @@
 	pr_info("EEH: Notify device driver to resume\n");
 	eeh_pe_dev_traverse(pe, eeh_report_resume, NULL);
 
-	return;
+	return false;
 
 excess_failures:
 	/*
@@ -915,8 +915,12 @@
 			pci_lock_rescan_remove();
 			pci_hp_remove_devices(frozen_bus);
 			pci_unlock_rescan_remove();
+
+			/* The passed PE should no longer be used */
+			return true;
 		}
 	}
+	return false;
 }
 
 static void eeh_handle_special_event(void)
@@ -982,7 +986,14 @@
 		 */
 		if (rc == EEH_NEXT_ERR_FROZEN_PE ||
 		    rc == EEH_NEXT_ERR_FENCED_PHB) {
-			eeh_handle_normal_event(pe);
+			/*
+			 * eeh_handle_normal_event() can make the PE stale if it
+			 * determines that the PE cannot possibly be recovered.
+			 * Don't modify the PE state if that's the case.
+			 */
+			if (eeh_handle_normal_event(pe))
+				continue;
+
 			eeh_pe_state_clear(pe, EEH_PE_RECOVERING);
 		} else {
 			pci_lock_rescan_remove();
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
index 38a1f96..ca03eb2 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -735,8 +735,14 @@
 	andis.	r15,r14,(DBSR_IC|DBSR_BT)@h
 	beq+	1f
 
+#ifdef CONFIG_RELOCATABLE
+	ld	r15,PACATOC(r13)
+	ld	r14,interrupt_base_book3e@got(r15)
+	ld	r15,__end_interrupts@got(r15)
+#else
 	LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e)
 	LOAD_REG_IMMEDIATE(r15,__end_interrupts)
+#endif
 	cmpld	cr0,r10,r14
 	cmpld	cr1,r10,r15
 	blt+	cr0,1f
@@ -799,8 +805,14 @@
 	andis.	r15,r14,(DBSR_IC|DBSR_BT)@h
 	beq+	1f
 
+#ifdef CONFIG_RELOCATABLE
+	ld	r15,PACATOC(r13)
+	ld	r14,interrupt_base_book3e@got(r15)
+	ld	r15,__end_interrupts@got(r15)
+#else
 	LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e)
 	LOAD_REG_IMMEDIATE(r15,__end_interrupts)
+#endif
 	cmpld	cr0,r10,r14
 	cmpld	cr1,r10,r15
 	blt+	cr0,1f
diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c
index 5e7ece0..ea236bf 100644
--- a/arch/powerpc/kernel/mce.c
+++ b/arch/powerpc/kernel/mce.c
@@ -205,6 +205,8 @@
 {
 	int index;
 
+	add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
+
 	/*
 	 * For now just print it to console.
 	 * TODO: log this error event to FSP or nvram.
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 49a680d..c716473 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -839,6 +839,25 @@
 	if (!MSR_TM_SUSPENDED(mfmsr()))
 		return;
 
+	/*
+	 * If we are in a transaction and FP is off then we can't have
+	 * used FP inside that transaction. Hence the checkpointed
+	 * state is the same as the live state. We need to copy the
+	 * live state to the checkpointed state so that when the
+	 * transaction is restored, the checkpointed state is correct
+	 * and the aborted transaction sees the correct state. We use
+	 * ckpt_regs.msr here as that's what tm_reclaim will use to
+	 * determine if it's going to write the checkpointed state or
+	 * not. So either this will write the checkpointed registers,
+	 * or reclaim will. Similarly for VMX.
+	 */
+	if ((thr->ckpt_regs.msr & MSR_FP) == 0)
+		memcpy(&thr->ckfp_state, &thr->fp_state,
+		       sizeof(struct thread_fp_state));
+	if ((thr->ckpt_regs.msr & MSR_VEC) == 0)
+		memcpy(&thr->ckvr_state, &thr->vr_state,
+		       sizeof(struct thread_vr_state));
+
 	giveup_all(container_of(thr, struct task_struct, thread));
 
 	tm_reclaim(thr, thr->ckpt_regs.msr, cause);
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 023a462..43021f8 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -302,8 +302,6 @@
 
 	__this_cpu_inc(irq_stat.mce_exceptions);
 
-	add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
-
 	if (cur_cpu_spec && cur_cpu_spec->machine_check_early)
 		handled = cur_cpu_spec->machine_check_early(regs);
 	return handled;
@@ -737,6 +735,8 @@
 
 	__this_cpu_inc(irq_stat.mce_exceptions);
 
+	add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
+
 	/* See if any machine dependent calls. In theory, we would want
 	 * to call the CPU first, and call the ppc_md. one if the CPU
 	 * one returns a positive number. However there is existing code
diff --git a/arch/powerpc/mm/mmu_context_iommu.c b/arch/powerpc/mm/mmu_context_iommu.c
index 7de7124..fd59680 100644
--- a/arch/powerpc/mm/mmu_context_iommu.c
+++ b/arch/powerpc/mm/mmu_context_iommu.c
@@ -81,7 +81,7 @@
 	gfp_t gfp_mask = GFP_USER;
 	struct page *new_page;
 
-	if (PageHuge(page) || PageTransHuge(page) || PageCompound(page))
+	if (PageCompound(page))
 		return NULL;
 
 	if (PageHighMem(page))
@@ -100,7 +100,7 @@
 	LIST_HEAD(cma_migrate_pages);
 
 	/* Ignore huge pages for now */
-	if (PageHuge(page) || PageTransHuge(page) || PageCompound(page))
+	if (PageCompound(page))
 		return -EBUSY;
 
 	lru_add_drain();
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
index 423e450..72ae2cd 100644
--- a/arch/powerpc/platforms/pseries/dlpar.c
+++ b/arch/powerpc/platforms/pseries/dlpar.c
@@ -288,7 +288,6 @@
 	if (rc)
 		return rc;
 
-	of_node_put(dn); /* Must decrement the refcount */
 	return 0;
 }
 
diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
index 408b4f4..5982544 100644
--- a/arch/s390/kernel/crash_dump.c
+++ b/arch/s390/kernel/crash_dump.c
@@ -427,6 +427,20 @@
 }
 
 /*
+ * Initialize final note (needed for /proc/vmcore code)
+ */
+static void *nt_final(void *ptr)
+{
+	Elf64_Nhdr *note;
+
+	note = (Elf64_Nhdr *) ptr;
+	note->n_namesz = 0;
+	note->n_descsz = 0;
+	note->n_type = 0;
+	return PTR_ADD(ptr, sizeof(Elf64_Nhdr));
+}
+
+/*
  * Initialize ELF header (new kernel)
  */
 static void *ehdr_init(Elf64_Ehdr *ehdr, int mem_chunk_cnt)
@@ -513,6 +527,7 @@
 		if (sa->prefix != 0)
 			ptr = fill_cpu_elf_notes(ptr, cpu++, sa);
 	ptr = nt_vmcoreinfo(ptr);
+	ptr = nt_final(ptr);
 	memset(phdr, 0, sizeof(*phdr));
 	phdr->p_type = PT_NOTE;
 	phdr->p_offset = notes_offset;
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 49a3073..c438168 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -321,6 +321,7 @@
 	lg	%r14,__LC_VDSO_PER_CPU
 	lmg	%r0,%r10,__PT_R0(%r11)
 	mvc	__LC_RETURN_PSW(16),__PT_PSW(%r11)
+.Lsysc_exit_timer:
 	stpt	__LC_EXIT_TIMER
 	mvc	__VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
 	lmg	%r11,%r15,__PT_R11(%r11)
@@ -606,6 +607,7 @@
 	lg	%r14,__LC_VDSO_PER_CPU
 	lmg	%r0,%r10,__PT_R0(%r11)
 	mvc	__LC_RETURN_PSW(16),__PT_PSW(%r11)
+.Lio_exit_timer:
 	stpt	__LC_EXIT_TIMER
 	mvc	__VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
 	lmg	%r11,%r15,__PT_R11(%r11)
@@ -1135,15 +1137,23 @@
 	br	%r14
 
 .Lcleanup_sysc_restore:
+	# check if stpt has been executed
 	clg	%r9,BASED(.Lcleanup_sysc_restore_insn)
+	jh	0f
+	mvc	__LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
+	cghi	%r11,__LC_SAVE_AREA_ASYNC
 	je	0f
+	mvc	__LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
+0:	clg	%r9,BASED(.Lcleanup_sysc_restore_insn+8)
+	je	1f
 	lg	%r9,24(%r11)		# get saved pointer to pt_regs
 	mvc	__LC_RETURN_PSW(16),__PT_PSW(%r9)
 	mvc	0(64,%r11),__PT_R8(%r9)
 	lmg	%r0,%r7,__PT_R0(%r9)
-0:	lmg	%r8,%r9,__LC_RETURN_PSW
+1:	lmg	%r8,%r9,__LC_RETURN_PSW
 	br	%r14
 .Lcleanup_sysc_restore_insn:
+	.quad	.Lsysc_exit_timer
 	.quad	.Lsysc_done - 4
 
 .Lcleanup_io_tif:
@@ -1151,15 +1161,20 @@
 	br	%r14
 
 .Lcleanup_io_restore:
+	# check if stpt has been executed
 	clg	%r9,BASED(.Lcleanup_io_restore_insn)
-	je	0f
+	jh	0f
+	mvc	__LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
+0:	clg	%r9,BASED(.Lcleanup_io_restore_insn+8)
+	je	1f
 	lg	%r9,24(%r11)		# get saved r11 pointer to pt_regs
 	mvc	__LC_RETURN_PSW(16),__PT_PSW(%r9)
 	mvc	0(64,%r11),__PT_R8(%r9)
 	lmg	%r0,%r7,__PT_R0(%r9)
-0:	lmg	%r8,%r9,__LC_RETURN_PSW
+1:	lmg	%r8,%r9,__LC_RETURN_PSW
 	br	%r14
 .Lcleanup_io_restore_insn:
+	.quad	.Lio_exit_timer
 	.quad	.Lio_done - 4
 
 .Lcleanup_idle:
diff --git a/arch/um/kernel/initrd.c b/arch/um/kernel/initrd.c
index 48bae81..6f6e789 100644
--- a/arch/um/kernel/initrd.c
+++ b/arch/um/kernel/initrd.c
@@ -14,7 +14,7 @@
 static char *initrd __initdata = NULL;
 static int load_initrd(char *filename, void *buf, int size);
 
-static int __init read_initrd(void)
+int __init read_initrd(void)
 {
 	void *area;
 	long long size;
@@ -46,8 +46,6 @@
 	return 0;
 }
 
-__uml_postsetup(read_initrd);
-
 static int __init uml_initrd_setup(char *line, int *add)
 {
 	initrd = line;
diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c
index e8175a8..26b47de 100644
--- a/arch/um/kernel/um_arch.c
+++ b/arch/um/kernel/um_arch.c
@@ -336,11 +336,17 @@
 	return start_uml();
 }
 
+int __init __weak read_initrd(void)
+{
+	return 0;
+}
+
 void __init setup_arch(char **cmdline_p)
 {
 	stack_protections((unsigned long) &init_thread_info);
 	setup_physmem(uml_physmem, uml_reserved, physmem_size, highmem);
 	mem_total_pages(physmem_size, iomem_size, highmem);
+	read_initrd();
 
 	paging_init();
 	strlcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index faf3687..a300aa1 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -315,10 +315,10 @@
 #define __get_user_asm_u64(x, ptr, retval, errret)			\
 ({									\
 	__typeof__(ptr) __ptr = (ptr);					\
-	asm volatile(ASM_STAC "\n"					\
+	asm volatile("\n"					\
 		     "1:	movl %2,%%eax\n"			\
 		     "2:	movl %3,%%edx\n"			\
-		     "3: " ASM_CLAC "\n"				\
+		     "3:\n"				\
 		     ".section .fixup,\"ax\"\n"				\
 		     "4:	mov %4,%0\n"				\
 		     "	xorl %%eax,%%eax\n"				\
@@ -327,7 +327,7 @@
 		     ".previous\n"					\
 		     _ASM_EXTABLE(1b, 4b)				\
 		     _ASM_EXTABLE(2b, 4b)				\
-		     : "=r" (retval), "=A"(x)				\
+		     : "=r" (retval), "=&A"(x)				\
 		     : "m" (__m(__ptr)), "m" __m(((u32 *)(__ptr)) + 1),	\
 		       "i" (errret), "0" (retval));			\
 })
diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c
index 2f2b8c7..6f0ab305 100644
--- a/arch/x86/kernel/fpu/init.c
+++ b/arch/x86/kernel/fpu/init.c
@@ -101,6 +101,7 @@
  * Boot time FPU feature detection code:
  */
 unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu;
+EXPORT_SYMBOL_GPL(mxcsr_feature_mask);
 
 static void __init fpu__init_system_mxcsr(void)
 {
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 43c1528..81bba3c 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1735,6 +1735,7 @@
 {
 	struct kvm_arch *ka = &kvm->arch;
 	struct pvclock_vcpu_time_info hv_clock;
+	u64 ret;
 
 	spin_lock(&ka->pvclock_gtod_sync_lock);
 	if (!ka->use_master_clock) {
@@ -1746,10 +1747,17 @@
 	hv_clock.system_time = ka->master_kernel_ns + ka->kvmclock_offset;
 	spin_unlock(&ka->pvclock_gtod_sync_lock);
 
+	/* both __this_cpu_read() and rdtsc() should be on the same cpu */
+	get_cpu();
+
 	kvm_get_time_scale(NSEC_PER_SEC, __this_cpu_read(cpu_tsc_khz) * 1000LL,
 			   &hv_clock.tsc_shift,
 			   &hv_clock.tsc_to_system_mul);
-	return __pvclock_read_cycles(&hv_clock, rdtsc());
+	ret = __pvclock_read_cycles(&hv_clock, rdtsc());
+
+	put_cpu();
+
+	return ret;
 }
 
 u64 get_kvmclock_ns(struct kvm *kvm)
@@ -3231,11 +3239,14 @@
 	}
 }
 
+#define XSAVE_MXCSR_OFFSET 24
+
 static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
 					struct kvm_xsave *guest_xsave)
 {
 	u64 xstate_bv =
 		*(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)];
+	u32 mxcsr = *(u32 *)&guest_xsave->region[XSAVE_MXCSR_OFFSET / sizeof(u32)];
 
 	if (boot_cpu_has(X86_FEATURE_XSAVE)) {
 		/*
@@ -3243,11 +3254,13 @@
 		 * CPUID leaf 0xD, index 0, EDX:EAX.  This is for compatibility
 		 * with old userspace.
 		 */
-		if (xstate_bv & ~kvm_supported_xcr0())
+		if (xstate_bv & ~kvm_supported_xcr0() ||
+			mxcsr & ~mxcsr_feature_mask)
 			return -EINVAL;
 		load_xsave(vcpu, (u8 *)guest_xsave->region);
 	} else {
-		if (xstate_bv & ~XFEATURE_MASK_FPSSE)
+		if (xstate_bv & ~XFEATURE_MASK_FPSSE ||
+			mxcsr & ~mxcsr_feature_mask)
 			return -EINVAL;
 		memcpy(&vcpu->arch.guest_fpu.state.fxsave,
 			guest_xsave->region, sizeof(struct fxregs_state));
@@ -4750,16 +4763,20 @@
 
 static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)
 {
-	/* TODO: String I/O for in kernel device */
-	int r;
+	int r = 0, i;
 
-	if (vcpu->arch.pio.in)
-		r = kvm_io_bus_read(vcpu, KVM_PIO_BUS, vcpu->arch.pio.port,
-				    vcpu->arch.pio.size, pd);
-	else
-		r = kvm_io_bus_write(vcpu, KVM_PIO_BUS,
-				     vcpu->arch.pio.port, vcpu->arch.pio.size,
-				     pd);
+	for (i = 0; i < vcpu->arch.pio.count; i++) {
+		if (vcpu->arch.pio.in)
+			r = kvm_io_bus_read(vcpu, KVM_PIO_BUS, vcpu->arch.pio.port,
+					    vcpu->arch.pio.size, pd);
+		else
+			r = kvm_io_bus_write(vcpu, KVM_PIO_BUS,
+					     vcpu->arch.pio.port, vcpu->arch.pio.size,
+					     pd);
+		if (r)
+			break;
+		pd += vcpu->arch.pio.size;
+	}
 	return r;
 }
 
@@ -4797,6 +4814,8 @@
 	if (vcpu->arch.pio.count)
 		goto data_avail;
 
+	memset(vcpu->arch.pio_data, 0, size * count);
+
 	ret = emulator_pio_in_out(vcpu, size, port, val, count, true);
 	if (ret) {
 data_avail:
diff --git a/drivers/Makefile b/drivers/Makefile
index 990f63c..d0abb5a 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -73,6 +73,7 @@
 obj-$(CONFIG_NUBUS)		+= nubus/
 obj-y				+= macintosh/
 obj-$(CONFIG_IDE)		+= ide/
+obj-$(CONFIG_CRYPTO)		+= crypto/
 obj-$(CONFIG_SCSI)		+= scsi/
 obj-y				+= nvme/
 obj-$(CONFIG_ATA)		+= ata/
@@ -131,7 +132,6 @@
 obj-$(CONFIG_INFINIBAND)	+= infiniband/
 obj-$(CONFIG_SGI_SN)		+= sn/
 obj-y				+= firmware/
-obj-$(CONFIG_CRYPTO)		+= crypto/
 obj-$(CONFIG_SUPERH)		+= sh/
 ifndef CONFIG_ARCH_USES_GETTIMEOFFSET
 obj-y				+= clocksource/
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 4256d9b..b0beb52 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -206,6 +206,7 @@
 	buf->data = dbuf;
 	buf->allocated_size = size;
 	init_completion(&buf->completion);
+	INIT_LIST_HEAD(&buf->list);
 #ifdef CONFIG_FW_LOADER_USER_HELPER
 	INIT_LIST_HEAD(&buf->pending_list);
 #endif
diff --git a/drivers/char/lp.c b/drivers/char/lp.c
index c4094c4..34ef474 100644
--- a/drivers/char/lp.c
+++ b/drivers/char/lp.c
@@ -859,7 +859,11 @@
 	} else if (!strcmp(str, "auto")) {
 		parport_nr[0] = LP_PARPORT_AUTO;
 	} else if (!strcmp(str, "none")) {
-		parport_nr[parport_ptr++] = LP_PARPORT_NONE;
+		if (parport_ptr < LP_NO)
+			parport_nr[parport_ptr++] = LP_PARPORT_NONE;
+		else
+			printk(KERN_INFO "lp: too many ports, %s ignored.\n",
+			       str);
 	} else if (!strcmp(str, "reset")) {
 		reset = 1;
 	}
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 7e4a9d1..6e0cbe0 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -340,6 +340,11 @@
 static int mmap_mem(struct file *file, struct vm_area_struct *vma)
 {
 	size_t size = vma->vm_end - vma->vm_start;
+	phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
+
+	/* It's illegal to wrap around the end of the physical address space. */
+	if (offset + (phys_addr_t)size < offset)
+		return -EINVAL;
 
 	if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
 		return -EINVAL;
diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
index a7c870a..fa0f668 100644
--- a/drivers/char/tpm/tpm_crb.c
+++ b/drivers/char/tpm/tpm_crb.c
@@ -111,8 +111,7 @@
 
 	memcpy_fromio(buf, priv->rsp, 6);
 	expected = be32_to_cpup((__be32 *) &buf[2]);
-
-	if (expected > count)
+	if (expected > count || expected < 6)
 		return -EIO;
 
 	memcpy_fromio(&buf[6], &priv->rsp[6], expected - 6);
diff --git a/drivers/char/tpm/tpm_i2c_nuvoton.c b/drivers/char/tpm/tpm_i2c_nuvoton.c
index e3a9155..c642877 100644
--- a/drivers/char/tpm/tpm_i2c_nuvoton.c
+++ b/drivers/char/tpm/tpm_i2c_nuvoton.c
@@ -49,9 +49,10 @@
  */
 #define TPM_I2C_MAX_BUF_SIZE           32
 #define TPM_I2C_RETRY_COUNT            32
-#define TPM_I2C_BUS_DELAY              1       /* msec */
-#define TPM_I2C_RETRY_DELAY_SHORT      2       /* msec */
-#define TPM_I2C_RETRY_DELAY_LONG       10      /* msec */
+#define TPM_I2C_BUS_DELAY              1000      	/* usec */
+#define TPM_I2C_RETRY_DELAY_SHORT      (2 * 1000)	/* usec */
+#define TPM_I2C_RETRY_DELAY_LONG       (10 * 1000) 	/* usec */
+#define TPM_I2C_DELAY_RANGE            300		/* usec */
 
 #define OF_IS_TPM2 ((void *)1)
 #define I2C_IS_TPM2 1
@@ -123,7 +124,9 @@
 	/* this causes the current command to be aborted */
 	for (i = 0, status = -1; i < TPM_I2C_RETRY_COUNT && status < 0; i++) {
 		status = i2c_nuvoton_write_buf(client, TPM_STS, 1, &data);
-		msleep(TPM_I2C_BUS_DELAY);
+		if (status < 0)
+			usleep_range(TPM_I2C_BUS_DELAY, TPM_I2C_BUS_DELAY
+				     + TPM_I2C_DELAY_RANGE);
 	}
 	return status;
 }
@@ -160,7 +163,8 @@
 			burst_count = min_t(u8, TPM_I2C_MAX_BUF_SIZE, data);
 			break;
 		}
-		msleep(TPM_I2C_BUS_DELAY);
+		usleep_range(TPM_I2C_BUS_DELAY, TPM_I2C_BUS_DELAY
+			     + TPM_I2C_DELAY_RANGE);
 	} while (time_before(jiffies, stop));
 
 	return burst_count;
@@ -203,13 +207,17 @@
 			return 0;
 
 		/* use polling to wait for the event */
-		ten_msec = jiffies + msecs_to_jiffies(TPM_I2C_RETRY_DELAY_LONG);
+		ten_msec = jiffies + usecs_to_jiffies(TPM_I2C_RETRY_DELAY_LONG);
 		stop = jiffies + timeout;
 		do {
 			if (time_before(jiffies, ten_msec))
-				msleep(TPM_I2C_RETRY_DELAY_SHORT);
+				usleep_range(TPM_I2C_RETRY_DELAY_SHORT,
+					     TPM_I2C_RETRY_DELAY_SHORT
+					     + TPM_I2C_DELAY_RANGE);
 			else
-				msleep(TPM_I2C_RETRY_DELAY_LONG);
+				usleep_range(TPM_I2C_RETRY_DELAY_LONG,
+					     TPM_I2C_RETRY_DELAY_LONG
+					     + TPM_I2C_DELAY_RANGE);
 			status_valid = i2c_nuvoton_check_status(chip, mask,
 								value);
 			if (status_valid)
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
index a1ce060..4d24ec3 100644
--- a/drivers/char/tpm/tpm_tis_core.c
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -160,8 +160,10 @@
 	u32 value;
 
 	/* wait for burstcount */
-	/* which timeout value, spec has 2 answers (c & d) */
-	stop = jiffies + chip->timeout_d;
+	if (chip->flags & TPM_CHIP_FLAG_TPM2)
+		stop = jiffies + chip->timeout_a;
+	else
+		stop = jiffies + chip->timeout_d;
 	do {
 		rc = tpm_tis_read32(priv, TPM_STS(priv->locality), &value);
 		if (rc < 0)
diff --git a/drivers/char/tpm/tpm_tis_spi.c b/drivers/char/tpm/tpm_tis_spi.c
index dbaad9c..3b97b14 100644
--- a/drivers/char/tpm/tpm_tis_spi.c
+++ b/drivers/char/tpm/tpm_tis_spi.c
@@ -48,8 +48,8 @@
 	struct tpm_tis_data priv;
 	struct spi_device *spi_device;
 
-	u8 tx_buf[MAX_SPI_FRAMESIZE + 4];
-	u8 rx_buf[MAX_SPI_FRAMESIZE + 4];
+	u8 tx_buf[4];
+	u8 rx_buf[4];
 };
 
 static inline struct tpm_tis_spi_phy *to_tpm_tis_spi_phy(struct tpm_tis_data *data)
@@ -57,120 +57,96 @@
 	return container_of(data, struct tpm_tis_spi_phy, priv);
 }
 
-static int tpm_tis_spi_read_bytes(struct tpm_tis_data *data, u32 addr,
-				  u16 len, u8 *result)
+static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
+				u8 *buffer, u8 direction)
 {
 	struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data);
-	int ret, i;
+	int ret = 0;
+	int i;
 	struct spi_message m;
-	struct spi_transfer spi_xfer = {
-		.tx_buf = phy->tx_buf,
-		.rx_buf = phy->rx_buf,
-		.len = 4,
-	};
-
-	if (len > MAX_SPI_FRAMESIZE)
-		return -ENOMEM;
-
-	phy->tx_buf[0] = 0x80 | (len - 1);
-	phy->tx_buf[1] = 0xd4;
-	phy->tx_buf[2] = (addr >> 8)  & 0xFF;
-	phy->tx_buf[3] = addr	      & 0xFF;
-
-	spi_xfer.cs_change = 1;
-	spi_message_init(&m);
-	spi_message_add_tail(&spi_xfer, &m);
+	struct spi_transfer spi_xfer;
+	u8 transfer_len;
 
 	spi_bus_lock(phy->spi_device->master);
-	ret = spi_sync_locked(phy->spi_device, &m);
-	if (ret < 0)
-		goto exit;
 
-	memset(phy->tx_buf, 0, len);
+	while (len) {
+		transfer_len = min_t(u16, len, MAX_SPI_FRAMESIZE);
 
-	/* According to TCG PTP specification, if there is no TPM present at
-	 * all, then the design has a weak pull-up on MISO. If a TPM is not
-	 * present, a pull-up on MISO means that the SB controller sees a 1,
-	 * and will latch in 0xFF on the read.
-	 */
-	for (i = 0; (phy->rx_buf[0] & 0x01) == 0 && i < TPM_RETRY; i++) {
-		spi_xfer.len = 1;
+		phy->tx_buf[0] = direction | (transfer_len - 1);
+		phy->tx_buf[1] = 0xd4;
+		phy->tx_buf[2] = addr >> 8;
+		phy->tx_buf[3] = addr;
+
+		memset(&spi_xfer, 0, sizeof(spi_xfer));
+		spi_xfer.tx_buf = phy->tx_buf;
+		spi_xfer.rx_buf = phy->rx_buf;
+		spi_xfer.len = 4;
+		spi_xfer.cs_change = 1;
+
 		spi_message_init(&m);
 		spi_message_add_tail(&spi_xfer, &m);
 		ret = spi_sync_locked(phy->spi_device, &m);
 		if (ret < 0)
 			goto exit;
+
+		if ((phy->rx_buf[3] & 0x01) == 0) {
+			// handle SPI wait states
+			phy->tx_buf[0] = 0;
+
+			for (i = 0; i < TPM_RETRY; i++) {
+				spi_xfer.len = 1;
+				spi_message_init(&m);
+				spi_message_add_tail(&spi_xfer, &m);
+				ret = spi_sync_locked(phy->spi_device, &m);
+				if (ret < 0)
+					goto exit;
+				if (phy->rx_buf[0] & 0x01)
+					break;
+			}
+
+			if (i == TPM_RETRY) {
+				ret = -ETIMEDOUT;
+				goto exit;
+			}
+		}
+
+		spi_xfer.cs_change = 0;
+		spi_xfer.len = transfer_len;
+		spi_xfer.delay_usecs = 5;
+
+		if (direction) {
+			spi_xfer.tx_buf = NULL;
+			spi_xfer.rx_buf = buffer;
+		} else {
+			spi_xfer.tx_buf = buffer;
+			spi_xfer.rx_buf = NULL;
+		}
+
+		spi_message_init(&m);
+		spi_message_add_tail(&spi_xfer, &m);
+		ret = spi_sync_locked(phy->spi_device, &m);
+		if (ret < 0)
+			goto exit;
+
+		len -= transfer_len;
+		buffer += transfer_len;
 	}
 
-	spi_xfer.cs_change = 0;
-	spi_xfer.len = len;
-	spi_xfer.rx_buf = result;
-
-	spi_message_init(&m);
-	spi_message_add_tail(&spi_xfer, &m);
-	ret = spi_sync_locked(phy->spi_device, &m);
-
 exit:
 	spi_bus_unlock(phy->spi_device->master);
 	return ret;
 }
 
+static int tpm_tis_spi_read_bytes(struct tpm_tis_data *data, u32 addr,
+				  u16 len, u8 *result)
+{
+	return tpm_tis_spi_transfer(data, addr, len, result, 0x80);
+}
+
 static int tpm_tis_spi_write_bytes(struct tpm_tis_data *data, u32 addr,
 				   u16 len, u8 *value)
 {
-	struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data);
-	int ret, i;
-	struct spi_message m;
-	struct spi_transfer spi_xfer = {
-		.tx_buf = phy->tx_buf,
-		.rx_buf = phy->rx_buf,
-		.len = 4,
-	};
-
-	if (len > MAX_SPI_FRAMESIZE)
-		return -ENOMEM;
-
-	phy->tx_buf[0] = len - 1;
-	phy->tx_buf[1] = 0xd4;
-	phy->tx_buf[2] = (addr >> 8)  & 0xFF;
-	phy->tx_buf[3] = addr         & 0xFF;
-
-	spi_xfer.cs_change = 1;
-	spi_message_init(&m);
-	spi_message_add_tail(&spi_xfer, &m);
-
-	spi_bus_lock(phy->spi_device->master);
-	ret = spi_sync_locked(phy->spi_device, &m);
-	if (ret < 0)
-		goto exit;
-
-	memset(phy->tx_buf, 0, len);
-
-	/* According to TCG PTP specification, if there is no TPM present at
-	 * all, then the design has a weak pull-up on MISO. If a TPM is not
-	 * present, a pull-up on MISO means that the SB controller sees a 1,
-	 * and will latch in 0xFF on the read.
-	 */
-	for (i = 0; (phy->rx_buf[0] & 0x01) == 0 && i < TPM_RETRY; i++) {
-		spi_xfer.len = 1;
-		spi_message_init(&m);
-		spi_message_add_tail(&spi_xfer, &m);
-		ret = spi_sync_locked(phy->spi_device, &m);
-		if (ret < 0)
-			goto exit;
-	}
-
-	spi_xfer.len = len;
-	spi_xfer.tx_buf = value;
-	spi_xfer.cs_change = 0;
-	spi_xfer.tx_buf = value;
-	spi_message_init(&m);
-	spi_message_add_tail(&spi_xfer, &m);
-	ret = spi_sync_locked(phy->spi_device, &m);
-
-exit:
-	spi_bus_unlock(phy->spi_device->master);
-	return ret;
+	return tpm_tis_spi_transfer(data, addr, len, value, 0);
 }
 
 static int tpm_tis_spi_read16(struct tpm_tis_data *data, u32 addr, u16 *result)
diff --git a/drivers/clk/qcom/clk-cpu-osm.c b/drivers/clk/qcom/clk-cpu-osm.c
index 4efecef..3a0677f 100644
--- a/drivers/clk/qcom/clk-cpu-osm.c
+++ b/drivers/clk/qcom/clk-cpu-osm.c
@@ -1737,6 +1737,8 @@
 	struct device *cpu_dev;
 	struct clk_osm *c, *parent;
 	struct clk_hw *hw_parent;
+	struct device_node *l3_node_0, *l3_node_4;
+	struct platform_device *l3_dev_0, *l3_dev_4;
 
 	for_each_possible_cpu(cpu) {
 		c = logical_cpu_to_clk(cpu);
@@ -1754,7 +1756,35 @@
 					dev_name(cpu_dev));
 	}
 
-	/*TODO: Figure out which device to tag the L3 table to */
+	l3_node_0 = of_parse_phandle(pdev->dev.of_node, "l3-dev0", 0);
+	if (!l3_node_0) {
+		pr_err("can't find the L3 cluster 0 dt node\n");
+		return;
+	}
+
+	l3_dev_0 = of_find_device_by_node(l3_node_0);
+	if (!l3_dev_0) {
+		pr_err("can't find the L3 cluster 0 dt device\n");
+		return;
+	}
+
+	if (add_opp(&l3_clk, &l3_dev_0->dev))
+		pr_err("Failed to add OPP levels for L3 cluster 0\n");
+
+	l3_node_4 = of_parse_phandle(pdev->dev.of_node, "l3-dev4", 0);
+	if (!l3_node_4) {
+		pr_err("can't find the L3 cluster 1 dt node\n");
+		return;
+	}
+
+	l3_dev_4 = of_find_device_by_node(l3_node_4);
+	if (!l3_dev_4) {
+		pr_err("can't find the L3 cluster 1 dt device\n");
+		return;
+	}
+
+	if (add_opp(&l3_clk, &l3_dev_4->dev))
+		pr_err("Failed to add OPP levels for L3 cluster 1\n");
 }
 
 static u64 clk_osm_get_cpu_cycle_counter(int cpu)
diff --git a/drivers/devfreq/governor_memlat.h b/drivers/devfreq/governor_memlat.h
index a0e52a0..8c533ee 100644
--- a/drivers/devfreq/governor_memlat.h
+++ b/drivers/devfreq/governor_memlat.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -11,8 +11,8 @@
  * GNU General Public License for more details.
  */
 
-#ifndef _GOVERNOR_BW_HWMON_H
-#define _GOVERNOR_BW_HWMON_H
+#ifndef _GOVERNOR_MEMLAT_H
+#define _GOVERNOR_MEMLAT_H
 
 #include <linux/kernel.h>
 #include <linux/devfreq.h>
diff --git a/drivers/dma-buf/fence.c b/drivers/dma-buf/fence.c
index 2453e07..094548b 100644
--- a/drivers/dma-buf/fence.c
+++ b/drivers/dma-buf/fence.c
@@ -68,6 +68,8 @@
 	struct fence_cb *cur, *tmp;
 	int ret = 0;
 
+	lockdep_assert_held(fence->lock);
+
 	if (WARN_ON(!fence))
 		return -EINVAL;
 
@@ -159,9 +161,6 @@
 	if (WARN_ON(timeout < 0))
 		return -EINVAL;
 
-	if (timeout == 0)
-		return fence_is_signaled(fence);
-
 	trace_fence_wait_start(fence);
 	ret = fence->ops->wait(fence, intr, timeout);
 	trace_fence_wait_end(fence);
diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c
index 723d8af..82f35a4 100644
--- a/drivers/dma-buf/reservation.c
+++ b/drivers/dma-buf/reservation.c
@@ -280,18 +280,24 @@
 				      unsigned *pshared_count,
 				      struct fence ***pshared)
 {
-	unsigned shared_count = 0;
-	unsigned retry = 1;
-	struct fence **shared = NULL, *fence_excl = NULL;
-	int ret = 0;
+	struct fence **shared = NULL;
+	struct fence *fence_excl;
+	unsigned int shared_count;
+	int ret = 1;
 
-	while (retry) {
+	do {
 		struct reservation_object_list *fobj;
 		unsigned seq;
+		unsigned int i;
 
-		seq = read_seqcount_begin(&obj->seq);
+		shared_count = i = 0;
 
 		rcu_read_lock();
+		seq = read_seqcount_begin(&obj->seq);
+
+		fence_excl = rcu_dereference(obj->fence_excl);
+		if (fence_excl && !fence_get_rcu(fence_excl))
+			goto unlock;
 
 		fobj = rcu_dereference(obj->fence);
 		if (fobj) {
@@ -309,52 +315,37 @@
 				}
 
 				ret = -ENOMEM;
-				shared_count = 0;
 				break;
 			}
 			shared = nshared;
-			memcpy(shared, fobj->shared, sz);
 			shared_count = fobj->shared_count;
-		} else
-			shared_count = 0;
-		fence_excl = rcu_dereference(obj->fence_excl);
-
-		retry = read_seqcount_retry(&obj->seq, seq);
-		if (retry)
-			goto unlock;
-
-		if (!fence_excl || fence_get_rcu(fence_excl)) {
-			unsigned i;
 
 			for (i = 0; i < shared_count; ++i) {
-				if (fence_get_rcu(shared[i]))
-					continue;
-
-				/* uh oh, refcount failed, abort and retry */
-				while (i--)
-					fence_put(shared[i]);
-
-				if (fence_excl) {
-					fence_put(fence_excl);
-					fence_excl = NULL;
-				}
-
-				retry = 1;
-				break;
+				shared[i] = rcu_dereference(fobj->shared[i]);
+				if (!fence_get_rcu(shared[i]))
+					break;
 			}
-		} else
-			retry = 1;
+		}
 
+		if (i != shared_count || read_seqcount_retry(&obj->seq, seq)) {
+			while (i--)
+				fence_put(shared[i]);
+			fence_put(fence_excl);
+			goto unlock;
+		}
+
+		ret = 0;
 unlock:
 		rcu_read_unlock();
-	}
-	*pshared_count = shared_count;
-	if (shared_count)
-		*pshared = shared;
-	else {
-		*pshared = NULL;
+	} while (ret);
+
+	if (!shared_count) {
 		kfree(shared);
+		shared = NULL;
 	}
+
+	*pshared_count = shared_count;
+	*pshared = shared;
 	*pfence_excl = fence_excl;
 
 	return ret;
@@ -379,10 +370,7 @@
 {
 	struct fence *fence;
 	unsigned seq, shared_count, i = 0;
-	long ret = timeout;
-
-	if (!timeout)
-		return reservation_object_test_signaled_rcu(obj, wait_all);
+	long ret = timeout ? timeout : 1;
 
 retry:
 	fence = NULL;
@@ -397,9 +385,6 @@
 		if (fobj)
 			shared_count = fobj->shared_count;
 
-		if (read_seqcount_retry(&obj->seq, seq))
-			goto unlock_retry;
-
 		for (i = 0; i < shared_count; ++i) {
 			struct fence *lfence = rcu_dereference(fobj->shared[i]);
 
@@ -422,9 +407,6 @@
 	if (!shared_count) {
 		struct fence *fence_excl = rcu_dereference(obj->fence_excl);
 
-		if (read_seqcount_retry(&obj->seq, seq))
-			goto unlock_retry;
-
 		if (fence_excl &&
 		    !test_bit(FENCE_FLAG_SIGNALED_BIT, &fence_excl->flags)) {
 			if (!fence_get_rcu(fence_excl))
@@ -439,6 +421,11 @@
 
 	rcu_read_unlock();
 	if (fence) {
+		if (read_seqcount_retry(&obj->seq, seq)) {
+			fence_put(fence);
+			goto retry;
+		}
+
 		ret = fence_wait_timeout(fence, intr, ret);
 		fence_put(fence);
 		if (ret > 0 && wait_all && (i + 1 < shared_count))
@@ -484,12 +471,13 @@
 					  bool test_all)
 {
 	unsigned seq, shared_count;
-	int ret = true;
+	int ret;
 
+	rcu_read_lock();
 retry:
+	ret = true;
 	shared_count = 0;
 	seq = read_seqcount_begin(&obj->seq);
-	rcu_read_lock();
 
 	if (test_all) {
 		unsigned i;
@@ -500,46 +488,35 @@
 		if (fobj)
 			shared_count = fobj->shared_count;
 
-		if (read_seqcount_retry(&obj->seq, seq))
-			goto unlock_retry;
-
 		for (i = 0; i < shared_count; ++i) {
 			struct fence *fence = rcu_dereference(fobj->shared[i]);
 
 			ret = reservation_object_test_signaled_single(fence);
 			if (ret < 0)
-				goto unlock_retry;
+				goto retry;
 			else if (!ret)
 				break;
 		}
 
-		/*
-		 * There could be a read_seqcount_retry here, but nothing cares
-		 * about whether it's the old or newer fence pointers that are
-		 * signaled. That race could still have happened after checking
-		 * read_seqcount_retry. If you care, use ww_mutex_lock.
-		 */
+		if (read_seqcount_retry(&obj->seq, seq))
+			goto retry;
 	}
 
 	if (!shared_count) {
 		struct fence *fence_excl = rcu_dereference(obj->fence_excl);
 
-		if (read_seqcount_retry(&obj->seq, seq))
-			goto unlock_retry;
-
 		if (fence_excl) {
 			ret = reservation_object_test_signaled_single(
 								fence_excl);
 			if (ret < 0)
-				goto unlock_retry;
+				goto retry;
+
+			if (read_seqcount_retry(&obj->seq, seq))
+				goto retry;
 		}
 	}
 
 	rcu_read_unlock();
 	return ret;
-
-unlock_retry:
-	rcu_read_unlock();
-	goto retry;
 }
 EXPORT_SYMBOL_GPL(reservation_object_test_signaled_rcu);
diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c
index 454d3b3..0cb8d9d 100644
--- a/drivers/dma-buf/sw_sync.c
+++ b/drivers/dma-buf/sw_sync.c
@@ -324,8 +324,8 @@
 	}
 
 	sync_file = sync_file_create(&pt->base);
+	fence_put(&pt->base);
 	if (!sync_file) {
-		fence_put(&pt->base);
 		err = -ENOMEM;
 		goto err;
 	}
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
index 8a9cf92..5a9166a 100644
--- a/drivers/dma-buf/sync_file.c
+++ b/drivers/dma-buf/sync_file.c
@@ -67,9 +67,10 @@
  * sync_file_create() - creates a sync file
  * @fence:	fence to add to the sync_fence
  *
- * Creates a sync_file containg @fence. Once this is called, the sync_file
- * takes ownership of @fence. The sync_file can be released with
- * fput(sync_file->file). Returns the sync_file or NULL in case of error.
+ * Creates a sync_file containg @fence. This function acquires and additional
+ * reference of @fence for the newly-created &sync_file, if it succeeds. The
+ * sync_file can be released with fput(sync_file->file). Returns the
+ * sync_file or NULL in case of error.
  */
 struct sync_file *sync_file_create(struct fence *fence)
 {
@@ -79,7 +80,7 @@
 	if (!sync_file)
 		return NULL;
 
-	sync_file->fence = fence;
+	sync_file->fence = fence_get(fence);
 
 	snprintf(sync_file->name, sizeof(sync_file->name), "%s-%s%llu-%d",
 		 fence->ops->get_driver_name(fence),
@@ -90,13 +91,6 @@
 }
 EXPORT_SYMBOL(sync_file_create);
 
-/**
- * sync_file_fdget() - get a sync_file from an fd
- * @fd:		fd referencing a fence
- *
- * Ensures @fd references a valid sync_file, increments the refcount of the
- * backing file. Returns the sync_file or NULL in case of error.
- */
 static struct sync_file *sync_file_fdget(int fd)
 {
 	struct file *file = fget(fd);
@@ -311,10 +305,9 @@
 
 	poll_wait(file, &sync_file->wq, wait);
 
-	if (!poll_does_not_wait(wait) &&
-	    !test_and_set_bit(POLL_ENABLED, &sync_file->fence->flags)) {
+	if (!test_and_set_bit(POLL_ENABLED, &sync_file->fence->flags)) {
 		if (fence_add_callback(sync_file->fence, &sync_file->cb,
-				       fence_check_cb_func) < 0)
+					   fence_check_cb_func) < 0)
 			wake_up_all(&sync_file->wq);
 	}
 
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index b98ede7..6f9c9ac 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -208,9 +208,11 @@
  * OMAP's debounce time is in 31us steps
  *   <debounce time> = (GPIO_DEBOUNCINGTIME[7:0].DEBOUNCETIME + 1) x 31
  * so we need to convert and round up to the closest unit.
+ *
+ * Return: 0 on success, negative error otherwise.
  */
-static void omap2_set_gpio_debounce(struct gpio_bank *bank, unsigned offset,
-				    unsigned debounce)
+static int omap2_set_gpio_debounce(struct gpio_bank *bank, unsigned offset,
+				   unsigned debounce)
 {
 	void __iomem		*reg;
 	u32			val;
@@ -218,11 +220,12 @@
 	bool			enable = !!debounce;
 
 	if (!bank->dbck_flag)
-		return;
+		return -ENOTSUPP;
 
 	if (enable) {
 		debounce = DIV_ROUND_UP(debounce, 31) - 1;
-		debounce &= OMAP4_GPIO_DEBOUNCINGTIME_MASK;
+		if ((debounce & OMAP4_GPIO_DEBOUNCINGTIME_MASK) != debounce)
+			return -EINVAL;
 	}
 
 	l = BIT(offset);
@@ -255,6 +258,8 @@
 		bank->context.debounce = debounce;
 		bank->context.debounce_en = val;
 	}
+
+	return 0;
 }
 
 /**
@@ -964,14 +969,20 @@
 {
 	struct gpio_bank *bank;
 	unsigned long flags;
+	int ret;
 
 	bank = gpiochip_get_data(chip);
 
 	raw_spin_lock_irqsave(&bank->lock, flags);
-	omap2_set_gpio_debounce(bank, offset, debounce);
+	ret = omap2_set_gpio_debounce(bank, offset, debounce);
 	raw_spin_unlock_irqrestore(&bank->lock, flags);
 
-	return 0;
+	if (ret)
+		dev_info(chip->parent,
+			 "Could not set line %u debounce to %u microseconds (%d)",
+			 offset, debounce, ret);
+
+	return ret;
 }
 
 static void omap_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 6898aa0..2f936a7 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -13,6 +13,7 @@
 	select I2C
 	select I2C_ALGOBIT
 	select DMA_SHARED_BUFFER
+	select SYNC_FILE
 	help
 	  Kernel-level support for the Direct Rendering Infrastructure (DRI)
 	  introduced in XFree86 4.0. If you say Y here, you need to select
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 882404c..42448c7 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -1173,23 +1173,10 @@
 	a.full = dfixed_const(available_bandwidth);
 	b.full = dfixed_const(wm->num_heads);
 	a.full = dfixed_div(a, b);
+	tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
+	tmp = min(dfixed_trunc(a), tmp);
 
-	b.full = dfixed_const(mc_latency + 512);
-	c.full = dfixed_const(wm->disp_clk);
-	b.full = dfixed_div(b, c);
-
-	c.full = dfixed_const(dmif_size);
-	b.full = dfixed_div(c, b);
-
-	tmp = min(dfixed_trunc(a), dfixed_trunc(b));
-
-	b.full = dfixed_const(1000);
-	c.full = dfixed_const(wm->disp_clk);
-	b.full = dfixed_div(c, b);
-	c.full = dfixed_const(wm->bytes_per_pixel);
-	b.full = dfixed_mul(b, c);
-
-	lb_fill_bw = min(tmp, dfixed_trunc(b));
+	lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
 
 	a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
 	b.full = dfixed_const(1000);
@@ -1297,14 +1284,14 @@
 {
 	struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
 	struct dce10_wm_params wm_low, wm_high;
-	u32 pixel_period;
+	u32 active_time;
 	u32 line_time = 0;
 	u32 latency_watermark_a = 0, latency_watermark_b = 0;
 	u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
 
 	if (amdgpu_crtc->base.enabled && num_heads && mode) {
-		pixel_period = 1000000 / (u32)mode->clock;
-		line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
+		active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
+		line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
 
 		/* watermark for high clocks */
 		if (adev->pm.dpm_enabled) {
@@ -1319,7 +1306,7 @@
 
 		wm_high.disp_clk = mode->clock;
 		wm_high.src_width = mode->crtc_hdisplay;
-		wm_high.active_time = mode->crtc_hdisplay * pixel_period;
+		wm_high.active_time = active_time;
 		wm_high.blank_time = line_time - wm_high.active_time;
 		wm_high.interlaced = false;
 		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
@@ -1358,7 +1345,7 @@
 
 		wm_low.disp_clk = mode->clock;
 		wm_low.src_width = mode->crtc_hdisplay;
-		wm_low.active_time = mode->crtc_hdisplay * pixel_period;
+		wm_low.active_time = active_time;
 		wm_low.blank_time = line_time - wm_low.active_time;
 		wm_low.interlaced = false;
 		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 64a1df6..904dabd 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -1140,23 +1140,10 @@
 	a.full = dfixed_const(available_bandwidth);
 	b.full = dfixed_const(wm->num_heads);
 	a.full = dfixed_div(a, b);
+	tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
+	tmp = min(dfixed_trunc(a), tmp);
 
-	b.full = dfixed_const(mc_latency + 512);
-	c.full = dfixed_const(wm->disp_clk);
-	b.full = dfixed_div(b, c);
-
-	c.full = dfixed_const(dmif_size);
-	b.full = dfixed_div(c, b);
-
-	tmp = min(dfixed_trunc(a), dfixed_trunc(b));
-
-	b.full = dfixed_const(1000);
-	c.full = dfixed_const(wm->disp_clk);
-	b.full = dfixed_div(c, b);
-	c.full = dfixed_const(wm->bytes_per_pixel);
-	b.full = dfixed_mul(b, c);
-
-	lb_fill_bw = min(tmp, dfixed_trunc(b));
+	lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
 
 	a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
 	b.full = dfixed_const(1000);
@@ -1264,14 +1251,14 @@
 {
 	struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
 	struct dce10_wm_params wm_low, wm_high;
-	u32 pixel_period;
+	u32 active_time;
 	u32 line_time = 0;
 	u32 latency_watermark_a = 0, latency_watermark_b = 0;
 	u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
 
 	if (amdgpu_crtc->base.enabled && num_heads && mode) {
-		pixel_period = 1000000 / (u32)mode->clock;
-		line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
+		active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
+		line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
 
 		/* watermark for high clocks */
 		if (adev->pm.dpm_enabled) {
@@ -1286,7 +1273,7 @@
 
 		wm_high.disp_clk = mode->clock;
 		wm_high.src_width = mode->crtc_hdisplay;
-		wm_high.active_time = mode->crtc_hdisplay * pixel_period;
+		wm_high.active_time = active_time;
 		wm_high.blank_time = line_time - wm_high.active_time;
 		wm_high.interlaced = false;
 		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
@@ -1325,7 +1312,7 @@
 
 		wm_low.disp_clk = mode->clock;
 		wm_low.src_width = mode->crtc_hdisplay;
-		wm_low.active_time = mode->crtc_hdisplay * pixel_period;
+		wm_low.active_time = active_time;
 		wm_low.blank_time = line_time - wm_low.active_time;
 		wm_low.interlaced = false;
 		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index fde6ee1..6d02bdb 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -932,23 +932,10 @@
 	a.full = dfixed_const(available_bandwidth);
 	b.full = dfixed_const(wm->num_heads);
 	a.full = dfixed_div(a, b);
+	tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
+	tmp = min(dfixed_trunc(a), tmp);
 
-	b.full = dfixed_const(mc_latency + 512);
-	c.full = dfixed_const(wm->disp_clk);
-	b.full = dfixed_div(b, c);
-
-	c.full = dfixed_const(dmif_size);
-	b.full = dfixed_div(c, b);
-
-	tmp = min(dfixed_trunc(a), dfixed_trunc(b));
-
-	b.full = dfixed_const(1000);
-	c.full = dfixed_const(wm->disp_clk);
-	b.full = dfixed_div(c, b);
-	c.full = dfixed_const(wm->bytes_per_pixel);
-	b.full = dfixed_mul(b, c);
-
-	lb_fill_bw = min(tmp, dfixed_trunc(b));
+	lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
 
 	a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
 	b.full = dfixed_const(1000);
@@ -1057,18 +1044,18 @@
 	struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
 	struct dce6_wm_params wm_low, wm_high;
 	u32 dram_channels;
-	u32 pixel_period;
+	u32 active_time;
 	u32 line_time = 0;
 	u32 latency_watermark_a = 0, latency_watermark_b = 0;
 	u32 priority_a_mark = 0, priority_b_mark = 0;
 	u32 priority_a_cnt = PRIORITY_OFF;
 	u32 priority_b_cnt = PRIORITY_OFF;
-	u32 tmp, arb_control3;
+	u32 tmp, arb_control3, lb_vblank_lead_lines = 0;
 	fixed20_12 a, b, c;
 
 	if (amdgpu_crtc->base.enabled && num_heads && mode) {
-		pixel_period = 1000000 / (u32)mode->clock;
-		line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
+		active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
+		line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
 		priority_a_cnt = 0;
 		priority_b_cnt = 0;
 
@@ -1087,7 +1074,7 @@
 
 		wm_high.disp_clk = mode->clock;
 		wm_high.src_width = mode->crtc_hdisplay;
-		wm_high.active_time = mode->crtc_hdisplay * pixel_period;
+		wm_high.active_time = active_time;
 		wm_high.blank_time = line_time - wm_high.active_time;
 		wm_high.interlaced = false;
 		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
@@ -1114,7 +1101,7 @@
 
 		wm_low.disp_clk = mode->clock;
 		wm_low.src_width = mode->crtc_hdisplay;
-		wm_low.active_time = mode->crtc_hdisplay * pixel_period;
+		wm_low.active_time = active_time;
 		wm_low.blank_time = line_time - wm_low.active_time;
 		wm_low.interlaced = false;
 		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
@@ -1175,6 +1162,8 @@
 		c.full = dfixed_div(c, a);
 		priority_b_mark = dfixed_trunc(c);
 		priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
+
+		lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
 	}
 
 	/* select wm A */
@@ -1204,6 +1193,9 @@
 	/* save values for DPM */
 	amdgpu_crtc->line_time = line_time;
 	amdgpu_crtc->wm_high = latency_watermark_a;
+
+	/* Save number of lines the linebuffer leads before the scanout */
+	amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
 }
 
 /* watermark setup */
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 7d9ffde..b1fb601 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -1094,23 +1094,10 @@
 	a.full = dfixed_const(available_bandwidth);
 	b.full = dfixed_const(wm->num_heads);
 	a.full = dfixed_div(a, b);
+	tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
+	tmp = min(dfixed_trunc(a), tmp);
 
-	b.full = dfixed_const(mc_latency + 512);
-	c.full = dfixed_const(wm->disp_clk);
-	b.full = dfixed_div(b, c);
-
-	c.full = dfixed_const(dmif_size);
-	b.full = dfixed_div(c, b);
-
-	tmp = min(dfixed_trunc(a), dfixed_trunc(b));
-
-	b.full = dfixed_const(1000);
-	c.full = dfixed_const(wm->disp_clk);
-	b.full = dfixed_div(c, b);
-	c.full = dfixed_const(wm->bytes_per_pixel);
-	b.full = dfixed_mul(b, c);
-
-	lb_fill_bw = min(tmp, dfixed_trunc(b));
+	lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
 
 	a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
 	b.full = dfixed_const(1000);
@@ -1218,14 +1205,14 @@
 {
 	struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
 	struct dce8_wm_params wm_low, wm_high;
-	u32 pixel_period;
+	u32 active_time;
 	u32 line_time = 0;
 	u32 latency_watermark_a = 0, latency_watermark_b = 0;
 	u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
 
 	if (amdgpu_crtc->base.enabled && num_heads && mode) {
-		pixel_period = 1000000 / (u32)mode->clock;
-		line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
+		active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
+		line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
 
 		/* watermark for high clocks */
 		if (adev->pm.dpm_enabled) {
@@ -1240,7 +1227,7 @@
 
 		wm_high.disp_clk = mode->clock;
 		wm_high.src_width = mode->crtc_hdisplay;
-		wm_high.active_time = mode->crtc_hdisplay * pixel_period;
+		wm_high.active_time = active_time;
 		wm_high.blank_time = line_time - wm_high.active_time;
 		wm_high.interlaced = false;
 		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
@@ -1279,7 +1266,7 @@
 
 		wm_low.disp_clk = mode->clock;
 		wm_low.src_width = mode->crtc_hdisplay;
-		wm_low.active_time = mode->crtc_hdisplay * pixel_period;
+		wm_low.active_time = active_time;
 		wm_low.blank_time = line_time - wm_low.active_time;
 		wm_low.interlaced = false;
 		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 4e19bde..99011621 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -30,6 +30,7 @@
 #include <drm/drm_atomic.h>
 #include <drm/drm_mode.h>
 #include <drm/drm_plane_helper.h>
+#include <linux/sync_file.h>
 
 #include "drm_crtc_internal.h"
 
@@ -292,6 +293,23 @@
 }
 EXPORT_SYMBOL(drm_atomic_get_crtc_state);
 
+static void set_out_fence_for_crtc(struct drm_atomic_state *state,
+				   struct drm_crtc *crtc, s32 __user *fence_ptr)
+{
+	state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = fence_ptr;
+}
+
+static s32 __user *get_out_fence_for_crtc(struct drm_atomic_state *state,
+					  struct drm_crtc *crtc)
+{
+	s32 __user *fence_ptr;
+
+	fence_ptr = state->crtcs[drm_crtc_index(crtc)].out_fence_ptr;
+	state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = NULL;
+
+	return fence_ptr;
+}
+
 /**
  * drm_atomic_set_mode_for_crtc - set mode for CRTC
  * @state: the CRTC whose incoming state to update
@@ -496,6 +514,16 @@
 					&replaced);
 		state->color_mgmt_changed |= replaced;
 		return ret;
+	} else if (property == config->prop_out_fence_ptr) {
+		s32 __user *fence_ptr = u64_to_user_ptr(val);
+
+		if (!fence_ptr)
+			return 0;
+
+		if (put_user(-1, fence_ptr))
+			return -EFAULT;
+
+		set_out_fence_for_crtc(state->state, crtc, fence_ptr);
 	} else if (crtc->funcs->atomic_set_property)
 		return crtc->funcs->atomic_set_property(crtc, state, property, val);
 	else
@@ -538,6 +566,8 @@
 		*val = (state->ctm) ? state->ctm->base.id : 0;
 	else if (property == config->gamma_lut_property)
 		*val = (state->gamma_lut) ? state->gamma_lut->base.id : 0;
+	else if (property == config->prop_out_fence_ptr)
+		*val = 0;
 	else if (crtc->funcs->atomic_get_property)
 		return crtc->funcs->atomic_get_property(crtc, state, property, val);
 	else
@@ -693,6 +723,17 @@
 		drm_atomic_set_fb_for_plane(state, fb);
 		if (fb)
 			drm_framebuffer_unreference(fb);
+	} else if (property == config->prop_in_fence_fd) {
+		if (state->fence)
+			return -EINVAL;
+
+		if (U642I64(val) == -1)
+			return 0;
+
+		state->fence = sync_file_get_fence(val);
+		if (!state->fence)
+			return -EINVAL;
+
 	} else if (property == config->prop_crtc_id) {
 		struct drm_crtc *crtc = drm_crtc_find(dev, val);
 		return drm_atomic_set_crtc_for_plane(state, crtc);
@@ -752,6 +793,8 @@
 
 	if (property == config->prop_fb_id) {
 		*val = (state->fb) ? state->fb->base.id : 0;
+	} else if (property == config->prop_in_fence_fd) {
+		*val = -1;
 	} else if (property == config->prop_crtc_id) {
 		*val = (state->crtc) ? state->crtc->base.id : 0;
 	} else if (property == config->prop_crtc_x) {
@@ -1152,6 +1195,36 @@
 EXPORT_SYMBOL(drm_atomic_set_fb_for_plane);
 
 /**
+ * drm_atomic_set_fence_for_plane - set fence for plane
+ * @plane_state: atomic state object for the plane
+ * @fence: fence to use for the plane
+ *
+ * Helper to setup the plane_state fence in case it is not set yet.
+ * By using this drivers doesn't need to worry if the user choose
+ * implicit or explicit fencing.
+ *
+ * This function will not set the fence to the state if it was set
+ * via explicit fencing interfaces on the atomic ioctl. It will
+ * all drope the reference to the fence as we not storing it
+ * anywhere.
+ *
+ * Otherwise, if plane_state->fence is not set this function we
+ * just set it with the received implict fence.
+ */
+void
+drm_atomic_set_fence_for_plane(struct drm_plane_state *plane_state,
+			       struct fence *fence)
+{
+	if (plane_state->fence) {
+		fence_put(fence);
+		return;
+	}
+
+	plane_state->fence = fence;
+}
+EXPORT_SYMBOL(drm_atomic_set_fence_for_plane);
+
+/**
  * drm_atomic_set_crtc_for_connector - set crtc for connector
  * @conn_state: atomic state object for the connector
  * @crtc: crtc to use for the connector
@@ -1467,11 +1540,9 @@
  */
 
 static struct drm_pending_vblank_event *create_vblank_event(
-		struct drm_device *dev, struct drm_file *file_priv,
-		struct fence *fence, uint64_t user_data)
+		struct drm_device *dev, uint64_t user_data)
 {
 	struct drm_pending_vblank_event *e = NULL;
-	int ret;
 
 	e = kzalloc(sizeof *e, GFP_KERNEL);
 	if (!e)
@@ -1481,17 +1552,6 @@
 	e->event.base.length = sizeof(e->event);
 	e->event.user_data = user_data;
 
-	if (file_priv) {
-		ret = drm_event_reserve_init(dev, file_priv, &e->base,
-					     &e->event.base);
-		if (ret) {
-			kfree(e);
-			return NULL;
-		}
-	}
-
-	e->base.fence = fence;
-
 	return e;
 }
 
@@ -1596,6 +1656,206 @@
 }
 EXPORT_SYMBOL(drm_atomic_clean_old_fb);
 
+/**
+ * DOC: explicit fencing properties
+ *
+ * Explicit fencing allows userspace to control the buffer synchronization
+ * between devices. A Fence or a group of fences are transfered to/from
+ * userspace using Sync File fds and there are two DRM properties for that.
+ * IN_FENCE_FD on each DRM Plane to send fences to the kernel and
+ * OUT_FENCE_PTR on each DRM CRTC to receive fences from the kernel.
+ *
+ * As a contrast, with implicit fencing the kernel keeps track of any
+ * ongoing rendering, and automatically ensures that the atomic update waits
+ * for any pending rendering to complete. For shared buffers represented with
+ * a struct &dma_buf this is tracked in &reservation_object structures.
+ * Implicit syncing is how Linux traditionally worked (e.g. DRI2/3 on X.org),
+ * whereas explicit fencing is what Android wants.
+ *
+ * "IN_FENCE_FD”:
+ *	Use this property to pass a fence that DRM should wait on before
+ *	proceeding with the Atomic Commit request and show the framebuffer for
+ *	the plane on the screen. The fence can be either a normal fence or a
+ *	merged one, the sync_file framework will handle both cases and use a
+ *	fence_array if a merged fence is received. Passing -1 here means no
+ *	fences to wait on.
+ *
+ *	If the Atomic Commit request has the DRM_MODE_ATOMIC_TEST_ONLY flag
+ *	it will only check if the Sync File is a valid one.
+ *
+ *	On the driver side the fence is stored on the @fence parameter of
+ *	struct &drm_plane_state. Drivers which also support implicit fencing
+ *	should set the implicit fence using drm_atomic_set_fence_for_plane(),
+ *	to make sure there's consistent behaviour between drivers in precedence
+ *	of implicit vs. explicit fencing.
+ *
+ * "OUT_FENCE_PTR”:
+ *	Use this property to pass a file descriptor pointer to DRM. Once the
+ *	Atomic Commit request call returns OUT_FENCE_PTR will be filled with
+ *	the file descriptor number of a Sync File. This Sync File contains the
+ *	CRTC fence that will be signaled when all framebuffers present on the
+ *	Atomic Commit * request for that given CRTC are scanned out on the
+ *	screen.
+ *
+ *	The Atomic Commit request fails if a invalid pointer is passed. If the
+ *	Atomic Commit request fails for any other reason the out fence fd
+ *	returned will be -1. On a Atomic Commit with the
+ *	DRM_MODE_ATOMIC_TEST_ONLY flag the out fence will also be set to -1.
+ *
+ *	Note that out-fences don't have a special interface to drivers and are
+ *	internally represented by a struct &drm_pending_vblank_event in struct
+ *	&drm_crtc_state, which is also used by the nonblocking atomic commit
+ *	helpers and for the DRM event handling for existing userspace.
+ */
+
+struct drm_out_fence_state {
+	s32 __user *out_fence_ptr;
+	struct sync_file *sync_file;
+	int fd;
+};
+
+static int setup_out_fence(struct drm_out_fence_state *fence_state,
+			   struct fence *fence)
+{
+	fence_state->fd = get_unused_fd_flags(O_CLOEXEC);
+	if (fence_state->fd < 0)
+		return fence_state->fd;
+
+	if (put_user(fence_state->fd, fence_state->out_fence_ptr))
+		return -EFAULT;
+
+	fence_state->sync_file = sync_file_create(fence);
+	if (!fence_state->sync_file)
+		return -ENOMEM;
+
+	return 0;
+}
+
+static int prepare_crtc_signaling(struct drm_device *dev,
+				  struct drm_atomic_state *state,
+				  struct drm_mode_atomic *arg,
+				  struct drm_file *file_priv,
+				  struct drm_out_fence_state **fence_state,
+				  unsigned int *num_fences)
+{
+	struct drm_crtc *crtc;
+	struct drm_crtc_state *crtc_state;
+	int i, ret;
+
+	if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY)
+		return 0;
+
+	for_each_crtc_in_state(state, crtc, crtc_state, i) {
+		s32 __user *fence_ptr;
+
+		fence_ptr = get_out_fence_for_crtc(crtc_state->state, crtc);
+
+		if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT || fence_ptr) {
+			struct drm_pending_vblank_event *e;
+
+			e = create_vblank_event(dev, arg->user_data);
+			if (!e)
+				return -ENOMEM;
+
+			crtc_state->event = e;
+		}
+
+		if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
+			struct drm_pending_vblank_event *e = crtc_state->event;
+
+			if (!file_priv)
+				continue;
+
+			ret = drm_event_reserve_init(dev, file_priv, &e->base,
+						     &e->event.base);
+			if (ret) {
+				kfree(e);
+				crtc_state->event = NULL;
+				return ret;
+			}
+		}
+
+		if (fence_ptr) {
+			struct fence *fence;
+			struct drm_out_fence_state *f;
+
+			f = krealloc(*fence_state, sizeof(**fence_state) *
+				     (*num_fences + 1), GFP_KERNEL);
+			if (!f)
+				return -ENOMEM;
+
+			memset(&f[*num_fences], 0, sizeof(*f));
+
+			f[*num_fences].out_fence_ptr = fence_ptr;
+			*fence_state = f;
+
+			fence = drm_crtc_create_fence(crtc);
+			if (!fence)
+				return -ENOMEM;
+
+			ret = setup_out_fence(&f[(*num_fences)++], fence);
+			if (ret) {
+				fence_put(fence);
+				return ret;
+			}
+
+			crtc_state->event->base.fence = fence;
+		}
+	}
+
+	return 0;
+}
+
+static void complete_crtc_signaling(struct drm_device *dev,
+				    struct drm_atomic_state *state,
+				    struct drm_out_fence_state *fence_state,
+				    unsigned int num_fences,
+				    bool install_fds)
+{
+	struct drm_crtc *crtc;
+	struct drm_crtc_state *crtc_state;
+	int i;
+
+	if (install_fds) {
+		for (i = 0; i < num_fences; i++)
+			fd_install(fence_state[i].fd,
+				   fence_state[i].sync_file->file);
+
+		kfree(fence_state);
+		return;
+	}
+
+	for_each_crtc_in_state(state, crtc, crtc_state, i) {
+		struct drm_pending_vblank_event *event = crtc_state->event;
+		/*
+		 * Free the allocated event. drm_atomic_helper_setup_commit
+		 * can allocate an event too, so only free it if it's ours
+		 * to prevent a double free in drm_atomic_state_clear.
+		 */
+		if (event && (event->base.fence || event->base.file_priv)) {
+			drm_event_cancel_free(dev, &event->base);
+			crtc_state->event = NULL;
+		}
+	}
+
+	if (!fence_state)
+		return;
+
+	for (i = 0; i < num_fences; i++) {
+		if (fence_state[i].sync_file)
+			fput(fence_state[i].sync_file->file);
+		if (fence_state[i].fd >= 0)
+			put_unused_fd(fence_state[i].fd);
+
+		/* If this fails log error to the user */
+		if (fence_state[i].out_fence_ptr &&
+		    put_user(-1, fence_state[i].out_fence_ptr))
+			DRM_DEBUG_ATOMIC("Couldn't clear out_fence_ptr\n");
+	}
+
+	kfree(fence_state);
+}
+
 int drm_mode_atomic_ioctl(struct drm_device *dev,
 			  void *data, struct drm_file *file_priv)
 {
@@ -1608,11 +1868,10 @@
 	struct drm_atomic_state *state;
 	struct drm_modeset_acquire_ctx ctx;
 	struct drm_plane *plane;
-	struct drm_crtc *crtc;
-	struct drm_crtc_state *crtc_state;
+	struct drm_out_fence_state *fence_state = NULL;
 	unsigned plane_mask;
 	int ret = 0;
-	unsigned int i, j;
+	unsigned int i, j, num_fences = 0;
 
 	/* disallow for drivers not supporting atomic: */
 	if (!drm_core_check_feature(dev, DRIVER_ATOMIC))
@@ -1727,20 +1986,10 @@
 		drm_mode_object_unreference(obj);
 	}
 
-	if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
-		for_each_crtc_in_state(state, crtc, crtc_state, i) {
-			struct drm_pending_vblank_event *e;
-
-			e = create_vblank_event(dev, file_priv, NULL,
-						arg->user_data);
-			if (!e) {
-				ret = -ENOMEM;
-				goto out;
-			}
-
-			crtc_state->event = e;
-		}
-	}
+	ret = prepare_crtc_signaling(dev, state, arg, file_priv, &fence_state,
+				     &num_fences);
+	if (ret)
+		goto out;
 
 	if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) {
 		/*
@@ -1757,20 +2006,7 @@
 out:
 	drm_atomic_clean_old_fb(dev, plane_mask, ret);
 
-	if (ret && arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
-		/*
-		 * Free the allocated event. drm_atomic_helper_setup_commit
-		 * can allocate an event too, so only free it if it's ours
-		 * to prevent a double free in drm_atomic_state_clear.
-		 */
-		for_each_crtc_in_state(state, crtc, crtc_state, i) {
-			struct drm_pending_vblank_event *event = crtc_state->event;
-			if (event && (event->base.fence || event->base.file_priv)) {
-				drm_event_cancel_free(dev, &event->base);
-				crtc_state->event = NULL;
-			}
-		}
-	}
+	complete_crtc_signaling(dev, state, fence_state, num_fences, !ret);
 
 	if (ret == -EDEADLK) {
 		drm_atomic_state_clear(state);
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 50acd79..f34b4e8 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -3166,6 +3166,9 @@
 {
 	if (state->fb)
 		drm_framebuffer_unreference(state->fb);
+
+	if (state->fence)
+		fence_put(state->fence);
 }
 EXPORT_SYMBOL(__drm_atomic_helper_plane_destroy_state);
 
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 2d7bedf..79b3d52 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -33,6 +33,7 @@
 #include <linux/list.h>
 #include <linux/slab.h>
 #include <linux/export.h>
+#include <linux/fence.h>
 #include <drm/drmP.h>
 #include <drm/drm_crtc.h>
 #include <drm/drm_edid.h>
@@ -141,6 +142,54 @@
 	}
 }
 
+static const struct fence_ops drm_crtc_fence_ops;
+
+static struct drm_crtc *fence_to_crtc(struct fence *fence)
+{
+	BUG_ON(fence->ops != &drm_crtc_fence_ops);
+	return container_of(fence->lock, struct drm_crtc, fence_lock);
+}
+
+static const char *drm_crtc_fence_get_driver_name(struct fence *fence)
+{
+	struct drm_crtc *crtc = fence_to_crtc(fence);
+
+	return crtc->dev->driver->name;
+}
+
+static const char *drm_crtc_fence_get_timeline_name(struct fence *fence)
+{
+	struct drm_crtc *crtc = fence_to_crtc(fence);
+
+	return crtc->timeline_name;
+}
+
+static bool drm_crtc_fence_enable_signaling(struct fence *fence)
+{
+	return true;
+}
+
+static const struct fence_ops drm_crtc_fence_ops = {
+	.get_driver_name = drm_crtc_fence_get_driver_name,
+	.get_timeline_name = drm_crtc_fence_get_timeline_name,
+	.enable_signaling = drm_crtc_fence_enable_signaling,
+	.wait = fence_default_wait,
+};
+
+struct fence *drm_crtc_create_fence(struct drm_crtc *crtc)
+{
+	struct fence *fence;
+
+	fence = kzalloc(sizeof(*fence), GFP_KERNEL);
+	if (!fence)
+		return NULL;
+
+	fence_init(fence, &drm_crtc_fence_ops, &crtc->fence_lock,
+		       crtc->fence_context, ++crtc->fence_seqno);
+
+	return fence;
+}
+
 /**
  * drm_crtc_init_with_planes - Initialise a new CRTC object with
  *    specified primary and cursor planes.
@@ -198,6 +247,11 @@
 		return -ENOMEM;
 	}
 
+	crtc->fence_context = fence_context_alloc(1);
+	spin_lock_init(&crtc->fence_lock);
+	snprintf(crtc->timeline_name, sizeof(crtc->timeline_name),
+		 "CRTC:%d-%s", crtc->base.id, crtc->name);
+
 	crtc->base.properties = &crtc->properties;
 
 	list_add_tail(&crtc->head, &config->crtc_list);
@@ -213,6 +267,8 @@
 	if (drm_core_check_feature(dev, DRIVER_ATOMIC)) {
 		drm_object_attach_property(&crtc->base, config->prop_active, 0);
 		drm_object_attach_property(&crtc->base, config->prop_mode_id, 0);
+		drm_object_attach_property(&crtc->base,
+					   config->prop_out_fence_ptr, 0);
 	}
 
 	return 0;
@@ -365,6 +421,18 @@
 		return -ENOMEM;
 	dev->mode_config.prop_fb_id = prop;
 
+	prop = drm_property_create_signed_range(dev, DRM_MODE_PROP_ATOMIC,
+			"IN_FENCE_FD", -1, INT_MAX);
+	if (!prop)
+		return -ENOMEM;
+	dev->mode_config.prop_in_fence_fd = prop;
+
+	prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
+			"OUT_FENCE_PTR", 0, U64_MAX);
+	if (!prop)
+		return -ENOMEM;
+	dev->mode_config.prop_out_fence_ptr = prop;
+
 	prop = drm_property_create_object(dev, DRM_MODE_PROP_ATOMIC,
 			"CRTC_ID", DRM_MODE_OBJECT_CRTC);
 	if (!prop)
diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h
index c48ba02..df2b51a 100644
--- a/drivers/gpu/drm/drm_crtc_internal.h
+++ b/drivers/gpu/drm/drm_crtc_internal.h
@@ -41,6 +41,8 @@
 			    const struct drm_display_mode *mode,
 			    const struct drm_framebuffer *fb);
 
+struct fence *drm_crtc_create_fence(struct drm_crtc *crtc);
+
 void drm_fb_release(struct drm_file *file_priv);
 
 /* dumb buffer support IOCTLs */
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 0bc0afb..4e5ba7e 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -76,6 +76,8 @@
 #define EDID_QUIRK_FORCE_12BPC			(1 << 9)
 /* Force 6bpc */
 #define EDID_QUIRK_FORCE_6BPC			(1 << 10)
+/* Force 10bpc */
+#define EDID_QUIRK_FORCE_10BPC			(1 << 11)
 
 struct detailed_mode_closure {
 	struct drm_connector *connector;
@@ -90,6 +92,14 @@
 #define LEVEL_GTF2	2
 #define LEVEL_CVT	3
 
+/*Enum storing luminance types for HDR blocks in EDID*/
+enum luminance_value {
+	NO_LUMINANCE_DATA = 3,
+	MAXIMUM_LUMINANCE = 4,
+	FRAME_AVERAGE_LUMINANCE = 5,
+	MINIMUM_LUMINANCE = 6
+};
+
 static const struct edid_quirk {
 	char vendor[4];
 	int product_id;
@@ -118,6 +128,9 @@
 	{ "FCM", 13600, EDID_QUIRK_PREFER_LARGE_75 |
 	  EDID_QUIRK_DETAILED_IN_CM },
 
+	/* LGD panel of HP zBook 17 G2, eDP 10 bpc, but reports unknown bpc */
+	{ "LGD", 764, EDID_QUIRK_FORCE_10BPC },
+
 	/* LG Philips LCD LP154W01-A5 */
 	{ "LPL", 0, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE },
 	{ "LPL", 0x2a00, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE },
@@ -997,6 +1010,221 @@
 		   2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
 	 .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+	/* 65 - 1280x720@24Hz */
+	{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 59400, 1280, 3040,
+		   3080, 3300, 0, 720, 725, 730, 750, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 66 - 1280x720@25Hz */
+	{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3700,
+		   3740, 3960, 0, 720, 725, 730, 750, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 67 - 1280x720@30Hz */
+	{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3040,
+		   3080, 3300, 0, 720, 725, 730, 750, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 68 - 1280x720@50Hz */
+	{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720,
+		   1760, 1980, 0, 720, 725, 730, 750, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 69 - 1280x720@60Hz */
+	{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390,
+		   1430, 1650, 0, 720, 725, 730, 750, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 70 - 1280x720@100Hz */
+	{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1720,
+		   1760, 1980, 0, 720, 725, 730, 750, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 71 - 1280x720@120Hz */
+	{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1390,
+		   1430, 1650, 0, 720, 725, 730, 750, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 72 - 1920x1080@24Hz */
+	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558,
+		   2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 73 - 1920x1080@25Hz */
+	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
+		   2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 74 - 1920x1080@30Hz */
+	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
+		   2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 75 - 1920x1080@50Hz */
+	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
+		   2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 76 - 1920x1080@60Hz */
+	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
+		   2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 77 - 1920x1080@100Hz */
+	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448,
+		   2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	 .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 78 - 1920x1080@120Hz */
+	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2008,
+		   2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	 .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 79 - 1680x720@24Hz */
+	{ DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 59400, 1680, 3040,
+		3080, 3300, 0, 720, 725, 730, 750, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 80 - 1680x720@25Hz */
+	{ DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 59400, 1680, 2908,
+		2948, 3168, 0, 720, 725, 730, 750, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 81 - 1680x720@30Hz */
+	{ DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 59400, 1680, 2380,
+		2420, 2640, 0, 720, 725, 730, 750, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 82 - 1680x720@50Hz */
+	{ DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 82500, 1680, 1940,
+		1980, 2200, 0, 720, 725, 730, 750, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 83 - 1680x720@60Hz */
+	{ DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 99000, 1680, 1940,
+		1980, 2200, 0, 720, 725, 730, 750, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 84 - 1680x720@100Hz */
+	{ DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 165000, 1680, 1740,
+		1780, 2000, 0, 720, 725, 730, 825, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 85 - 1680x720@120Hz */
+	{ DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 198000, 1680, 1740,
+		1780, 2000, 0, 720, 725, 730, 825, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 86 - 2560x1080@24Hz */
+	{ DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 99000, 2560, 3558,
+		3602, 3750, 0, 1080, 1084, 1089, 1100, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 87 - 2560x1080@25Hz */
+	{ DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 90000, 2560, 3008,
+		3052, 3200, 0, 1080, 1084, 1089, 1125, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 88 - 2560x1080@30Hz */
+	{ DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 118800, 2560, 3328,
+		3372, 3520, 0, 1080, 1084, 1089, 1125, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 89 - 2560x1080@50Hz */
+	{ DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 185625, 2560, 3108,
+		3152, 3300, 0, 1080, 1084, 1089, 1125, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 90 - 2560x1080@60Hz */
+	{ DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 198000, 2560, 2808,
+		2852, 3000, 0, 1080, 1084, 1089, 1100, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 91 - 2560x1080@100Hz */
+	{ DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 371250, 2560, 2778,
+		2822, 2970, 0, 1080, 1084, 1089, 1250, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 92 - 2560x1080@120Hz */
+	{ DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 495000, 2560, 3108,
+		3152, 3300, 0, 1080, 1084, 1089, 1250, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 93 - 3840x2160p@24Hz 16:9 */
+	{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 5116,
+		5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9,},
+	/* 94 - 3840x2160p@25Hz 16:9 */
+	{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4896,
+		4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9},
+	/* 95 - 3840x2160p@30Hz 16:9 */
+	{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4016,
+		4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9},
+	/* 96 - 3840x2160p@50Hz 16:9 */
+	{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4896,
+		4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9},
+	/* 97 - 3840x2160p@60Hz 16:9 */
+	{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4016,
+		4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9},
+	/* 98 - 4096x2160p@24Hz 256:135 */
+	{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000, 4096, 5116,
+		5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135},
+	/* 99 - 4096x2160p@25Hz 256:135 */
+	{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000, 4096, 5064,
+		5152, 5280, 0, 2160, 2168, 2178, 2250, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135},
+	/* 100 - 4096x2160p@30Hz 256:135 */
+	{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000, 4096, 4184,
+		4272, 4400, 0, 2160, 2168, 2178, 2250, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135},
+	/* 101 - 4096x2160p@50Hz 256:135 */
+	{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 594000, 4096, 5064,
+		5152, 5280, 0, 2160, 2168, 2178, 2250, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135},
+	/* 102 - 4096x2160p@60Hz 256:135 */
+	{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 594000, 4096, 4184,
+		4272, 4400, 0, 2160, 2168, 2178, 2250, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135},
+	/* 103 - 3840x2160p@24Hz 64:27 */
+	{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 5116,
+		5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27},
+	/* 104 - 3840x2160p@25Hz 64:27 */
+	{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4016,
+		4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27},
+	/* 105 - 3840x2160p@30Hz 64:27 */
+	{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4016,
+		4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27},
+	/* 106 - 3840x2160p@50Hz 64:27 */
+	{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4896,
+		4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27},
+	/* 107 - 3840x2160p@60Hz 64:27 */
+	{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4016,
+		4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27},
 };
 
 /*
@@ -2514,12 +2742,15 @@
 
 	return closure.modes;
 }
-
+#define VIDEO_CAPABILITY_EXTENDED_DATA_BLOCK 0x0
 #define AUDIO_BLOCK	0x01
 #define VIDEO_BLOCK     0x02
 #define VENDOR_BLOCK    0x03
 #define SPEAKER_BLOCK	0x04
+#define HDR_STATIC_METADATA_EXTENDED_DATA_BLOCK 0x06
+#define EXTENDED_TAG  0x07
 #define VIDEO_CAPABILITY_BLOCK	0x07
+#define Y420_VIDEO_DATA_BLOCK	0x0E
 #define EDID_BASIC_AUDIO	(1 << 6)
 #define EDID_CEA_YCRCB444	(1 << 5)
 #define EDID_CEA_YCRCB422	(1 << 4)
@@ -3168,6 +3399,21 @@
 	return hdmi_id == HDMI_IEEE_OUI;
 }
 
+static bool cea_db_is_hdmi_hf_vsdb(const u8 *db)
+{
+	int hdmi_id;
+
+	if (cea_db_tag(db) != VENDOR_BLOCK)
+		return false;
+
+	if (cea_db_payload_len(db) < 7)
+		return false;
+
+	hdmi_id = db[1] | (db[2] << 8) | (db[3] << 16);
+
+	return hdmi_id == HDMI_IEEE_OUI_HF;
+}
+
 #define for_each_cea_db(cea, i, start, end) \
 	for ((i) = (start); (i) < (end) && (i) + cea_db_payload_len(&(cea)[(i)]) < (end); (i) += cea_db_payload_len(&(cea)[(i)]) + 1)
 
@@ -3287,6 +3533,227 @@
 }
 
 static void
+parse_hdmi_hf_vsdb(struct drm_connector *connector, const u8 *db)
+{
+	u8 len = cea_db_payload_len(db);
+
+	if (len < 7)
+		return;
+
+	if (db[4] != 1)
+		return; /* invalid version */
+
+	connector->max_tmds_char = db[5] * 5;
+	connector->scdc_present = db[6] & (1 << 7);
+	connector->rr_capable = db[6] & (1 << 6);
+	connector->flags_3d = db[6] & 0x7;
+	connector->supports_scramble = connector->scdc_present &&
+			(db[6] & (1 << 3));
+
+	DRM_DEBUG_KMS("HDMI v2: max TMDS char %d, "
+			"scdc %s, "
+			"rr %s, "
+			"3D flags 0x%x, "
+			"scramble %s\n",
+			connector->max_tmds_char,
+			connector->scdc_present ? "available" : "not available",
+			connector->rr_capable ? "capable" : "not capable",
+			connector->flags_3d,
+			connector->supports_scramble ?
+				"supported" : "not supported");
+}
+
+/*
+ * drm_extract_vcdb_info - Parse the HDMI Video Capability Data Block
+ * @connector: connector corresponding to the HDMI sink
+ * @db: start of the CEA vendor specific block
+ *
+ * Parses the HDMI VCDB to extract sink info for @connector.
+ */
+static void
+drm_extract_vcdb_info(struct drm_connector *connector, const u8 *db)
+{
+	/*
+	 * Check if the sink specifies underscan
+	 * support for:
+	 * BIT 5: preferred video format
+	 * BIT 3: IT video format
+	 * BIT 1: CE video format
+	 */
+
+	connector->pt_scan_info =
+		(db[2] & (BIT(4) | BIT(5))) >> 4;
+	connector->it_scan_info =
+		(db[2] & (BIT(3) | BIT(2))) >> 2;
+	connector->ce_scan_info =
+		db[2] & (BIT(1) | BIT(0));
+
+	DRM_DEBUG_KMS("Scan Info (pt|it|ce): (%d|%d|%d)",
+			  (int) connector->pt_scan_info,
+			  (int) connector->it_scan_info,
+			  (int) connector->ce_scan_info);
+}
+
+static bool drm_edid_is_luminance_value_present(
+u32 block_length, enum luminance_value value)
+{
+	return block_length > NO_LUMINANCE_DATA && value <= block_length;
+}
+
+/*
+ * drm_extract_hdr_db - Parse the HDMI HDR extended block
+ * @connector: connector corresponding to the HDMI sink
+ * @db: start of the HDMI HDR extended block
+ *
+ * Parses the HDMI HDR extended block to extract sink info for @connector.
+ */
+static void
+drm_extract_hdr_db(struct drm_connector *connector, const u8 *db)
+{
+
+	u8 len = 0;
+
+	if (!db)
+		return;
+
+	len = db[0] & 0x1f;
+	/* Byte 3: Electro-Optical Transfer Functions */
+	connector->hdr_eotf = db[2] & 0x3F;
+
+	/* Byte 4: Static Metadata Descriptor Type 1 */
+	connector->hdr_metadata_type_one = (db[3] & BIT(0));
+
+	/* Byte 5: Desired Content Maximum Luminance */
+	if (drm_edid_is_luminance_value_present(len, MAXIMUM_LUMINANCE))
+		connector->hdr_max_luminance =
+			db[MAXIMUM_LUMINANCE];
+
+	/* Byte 6: Desired Content Max Frame-average Luminance */
+	if (drm_edid_is_luminance_value_present(len, FRAME_AVERAGE_LUMINANCE))
+		connector->hdr_avg_luminance =
+			db[FRAME_AVERAGE_LUMINANCE];
+
+	/* Byte 7: Desired Content Min Luminance */
+	if (drm_edid_is_luminance_value_present(len, MINIMUM_LUMINANCE))
+		connector->hdr_min_luminance =
+			db[MINIMUM_LUMINANCE];
+
+	connector->hdr_supported = true;
+
+	DRM_DEBUG_KMS("HDR electro-optical %d\n", connector->hdr_eotf);
+	DRM_DEBUG_KMS("metadata desc 1 %d\n", connector->hdr_metadata_type_one);
+	DRM_DEBUG_KMS("max luminance %d\n", connector->hdr_max_luminance);
+	DRM_DEBUG_KMS("avg luminance %d\n", connector->hdr_avg_luminance);
+	DRM_DEBUG_KMS("min luminance %d\n", connector->hdr_min_luminance);
+}
+
+/*
+ * drm_hdmi_extract_extended_blk_info - Parse the HDMI extended tag blocks
+ * @connector: connector corresponding to the HDMI sink
+ * @edid: handle to the EDID structure
+ * Parses the all extended tag blocks extract sink info for @connector.
+ */
+static void
+drm_hdmi_extract_extended_blk_info(struct drm_connector *connector,
+struct edid *edid)
+{
+	const u8 *cea = drm_find_cea_extension(edid);
+	const u8 *db = NULL;
+
+	if (cea && cea_revision(cea) >= 3) {
+		int i, start, end;
+
+		if (cea_db_offsets(cea, &start, &end))
+			return;
+
+		for_each_cea_db(cea, i, start, end) {
+			db = &cea[i];
+
+			if (cea_db_tag(db) == EXTENDED_TAG) {
+				DRM_DEBUG_KMS("found extended tag block = %d\n",
+				db[1]);
+				switch (db[1]) {
+				case VIDEO_CAPABILITY_EXTENDED_DATA_BLOCK:
+					drm_extract_vcdb_info(connector, db);
+					break;
+				case HDR_STATIC_METADATA_EXTENDED_DATA_BLOCK:
+					drm_extract_hdr_db(connector, db);
+					break;
+				default:
+					break;
+				}
+			}
+		}
+	}
+}
+
+static u8 *
+drm_edid_find_extended_tag_block(struct edid *edid, int blk_id)
+{
+	u8 *db = NULL;
+	u8 *cea = NULL;
+
+	if (!edid)
+		return NULL;
+
+	cea = drm_find_cea_extension(edid);
+
+	if (cea && cea_revision(cea) >= 3) {
+		int i, start, end;
+
+		if (cea_db_offsets(cea, &start, &end))
+			return NULL;
+
+		for_each_cea_db(cea, i, start, end) {
+			db = &cea[i];
+			if ((cea_db_tag(db) == EXTENDED_TAG) &&
+				(db[1] == blk_id))
+				return db;
+		}
+	}
+	return NULL;
+}
+
+/*
+ * add_YCbCr420VDB_modes - add the modes found in Ycbcr420 VDB block
+ * @connector: connector corresponding to the HDMI sink
+ * @edid: handle to the EDID structure
+ * Parses the YCbCr420 VDB block and adds the modes to @connector.
+ */
+static int
+add_YCbCr420VDB_modes(struct drm_connector *connector, struct edid *edid)
+{
+
+	const u8 *db = NULL;
+	u32 i = 0;
+	u32 modes = 0;
+	u32 video_format = 0;
+	u8 len = 0;
+
+	/*Find the YCbCr420 VDB*/
+	db = drm_edid_find_extended_tag_block(edid, Y420_VIDEO_DATA_BLOCK);
+	/* Offset to byte 3 */
+	if (db) {
+		len = db[0] & 0x1F;
+		db += 2;
+		for (i = 0; i < len - 1; i++) {
+			struct drm_display_mode *mode;
+
+			video_format = *(db + i) & 0x7F;
+			mode = drm_display_mode_from_vic_index(connector,
+					db, len-1, i);
+			if (mode) {
+				DRM_DEBUG_KMS("Adding mode for vic = %d\n",
+				video_format);
+				drm_mode_probed_add(connector, mode);
+				modes++;
+			}
+		}
+	}
+	return modes;
+}
+
+static void
 monitor_name(struct detailed_timing *t, void *data)
 {
 	if (t->data.other_data.type == EDID_DETAIL_MONITOR_NAME)
@@ -3410,6 +3877,9 @@
 				/* HDMI Vendor-Specific Data Block */
 				if (cea_db_is_hdmi_vsdb(db))
 					drm_parse_hdmi_vsdb_audio(connector, db);
+				/* HDMI Forum Vendor-Specific Data Block */
+				else if (cea_db_is_hdmi_hf_vsdb(db))
+					parse_hdmi_hf_vsdb(connector, db);
 				break;
 			default:
 				break;
@@ -3840,6 +4310,37 @@
 	}
 }
 
+static void
+drm_hdmi_extract_vsdbs_info(struct drm_connector *connector, struct edid *edid)
+{
+	const u8 *cea = drm_find_cea_extension(edid);
+	const u8 *db = NULL;
+
+	if (cea && cea_revision(cea) >= 3) {
+		int i, start, end;
+
+		if (cea_db_offsets(cea, &start, &end))
+			return;
+
+		for_each_cea_db(cea, i, start, end) {
+			db = &cea[i];
+
+			if (cea_db_tag(db) == VENDOR_BLOCK) {
+				/* HDMI Vendor-Specific Data Block */
+				if (cea_db_is_hdmi_vsdb(db)) {
+					drm_parse_hdmi_vsdb_video(
+						connector, db);
+					drm_parse_hdmi_vsdb_audio(
+						connector, db);
+				}
+				/* HDMI Forum Vendor-Specific Data Block */
+				else if (cea_db_is_hdmi_hf_vsdb(db))
+					parse_hdmi_hf_vsdb(connector, db);
+			}
+		}
+	}
+}
+
 static void drm_add_display_info(struct drm_connector *connector,
 				 struct edid *edid)
 {
@@ -3877,6 +4378,11 @@
 			  connector->name, info->bpc);
 	}
 
+	/* Extract audio and video latency fields for the sink */
+	drm_hdmi_extract_vsdbs_info(connector, edid);
+	/* Extract info from extended tag blocks */
+	drm_hdmi_extract_extended_blk_info(connector, edid);
+
 	/* Only defined for 1.4 with digital displays */
 	if (edid->revision < 4)
 		return;
@@ -4091,6 +4597,7 @@
 	num_modes += add_cea_modes(connector, edid);
 	num_modes += add_alternate_cea_modes(connector, edid);
 	num_modes += add_displayid_detailed_modes(connector, edid);
+	num_modes += add_YCbCr420VDB_modes(connector, edid);
 	if (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF)
 		num_modes += add_inferred_modes(connector, edid);
 
@@ -4105,6 +4612,9 @@
 	if (quirks & EDID_QUIRK_FORCE_8BPC)
 		connector->display_info.bpc = 8;
 
+	if (quirks & EDID_QUIRK_FORCE_10BPC)
+		connector->display_info.bpc = 10;
+
 	if (quirks & EDID_QUIRK_FORCE_12BPC)
 		connector->display_info.bpc = 12;
 
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
index 1fd6eac..52629b6 100644
--- a/drivers/gpu/drm/drm_fb_cma_helper.c
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -18,13 +18,16 @@
  */
 
 #include <drm/drmP.h>
+#include <drm/drm_atomic.h>
 #include <drm/drm_crtc.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_fb_cma_helper.h>
+#include <linux/dma-buf.h>
 #include <linux/dma-mapping.h>
 #include <linux/module.h>
+#include <linux/reservation.h>
 
 #define DEFAULT_FBDEFIO_DELAY_MS 50
 
@@ -265,6 +268,38 @@
 }
 EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_obj);
 
+/**
+ * drm_fb_cma_prepare_fb() - Prepare CMA framebuffer
+ * @plane: Which plane
+ * @state: Plane state attach fence to
+ *
+ * This should be put into prepare_fb hook of struct &drm_plane_helper_funcs .
+ *
+ * This function checks if the plane FB has an dma-buf attached, extracts
+ * the exclusive fence and attaches it to plane state for the atomic helper
+ * to wait on.
+ *
+ * There is no need for cleanup_fb for CMA based framebuffer drivers.
+ */
+int drm_fb_cma_prepare_fb(struct drm_plane *plane,
+			  struct drm_plane_state *state)
+{
+	struct dma_buf *dma_buf;
+	struct fence *fence;
+
+	if ((plane->state->fb == state->fb) || !state->fb)
+		return 0;
+
+	dma_buf = drm_fb_cma_get_gem_obj(state->fb, 0)->base.dma_buf;
+	if (dma_buf) {
+		fence = reservation_object_get_excl_rcu(dma_buf->resv);
+		drm_atomic_set_fence_for_plane(state, fence);
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(drm_fb_cma_prepare_fb);
+
 #ifdef CONFIG_DEBUG_FS
 static void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m)
 {
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index f5815e1..fe00bea 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -663,6 +663,10 @@
 		list_del(&p->pending_link);
 	}
 	spin_unlock_irqrestore(&dev->event_lock, flags);
+
+	if (p->fence)
+		fence_put(p->fence);
+
 	kfree(p);
 }
 EXPORT_SYMBOL(drm_event_cancel_free);
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index 249c0ae..3957ef8 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -137,6 +137,7 @@
 
 	if (drm_core_check_feature(dev, DRIVER_ATOMIC)) {
 		drm_object_attach_property(&plane->base, config->prop_fb_id, 0);
+		drm_object_attach_property(&plane->base, config->prop_in_fence_fd, -1);
 		drm_object_attach_property(&plane->base, config->prop_crtc_id, 0);
 		drm_object_attach_property(&plane->base, config->prop_crtc_x, 0);
 		drm_object_attach_property(&plane->base, config->prop_crtc_y, 0);
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index f46aac1..c75f4bb 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -420,6 +420,11 @@
 		return 0;
 	}
 
+	if (intel_vgpu_active(dev_priv)) {
+		DRM_INFO("iGVT-g active, disabling use of stolen memory\n");
+		return 0;
+	}
+
 #ifdef CONFIG_INTEL_IOMMU
 	if (intel_iommu_gfx_mapped && INTEL_INFO(dev)->gen < 8) {
 		DRM_INFO("DMAR active, disabling use of stolen memory\n");
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 4112bef..9ded825 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -1,10 +1,20 @@
-ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/msm -Idrivers/gpu/drm/msm/dsi-staging
+ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/msm -Idrivers/gpu/drm/msm/dsi-staging -Idrivers/gpu/drm/msm/dp
 ccflags-$(CONFIG_DRM_MSM_DSI) += -Idrivers/gpu/drm/msm/dsi
 ccflags-$(CONFIG_DRM_MSM_DSI_PLL) += -Idrivers/gpu/drm/msm/dsi
 ccflags-y += -Idrivers/gpu/drm/msm/sde
 ccflags-y += -Idrivers/media/platform/msm/sde/rotator
 
 msm_drm-y := \
+	dp/dp_usbpd.o \
+	dp/dp_parser.o \
+	dp/dp_power.o \
+	dp/dp_catalog.o \
+	dp/dp_aux.o \
+	dp/dp_panel.o \
+	dp/dp_link.o \
+	dp/dp_ctrl.o \
+	dp/dp_display.o \
+	dp/dp_drm.o \
 	hdmi/hdmi.o \
 	hdmi/hdmi_audio.o \
 	hdmi/hdmi_bridge.o \
@@ -51,6 +61,7 @@
 	sde/sde_hw_reg_dma_v1_color_proc.o \
 	sde/sde_hw_color_proc_v4.o \
 	sde/sde_hw_ad4.o \
+	sde_edid_parser.o
 
 msm_drm-$(CONFIG_DRM_SDE_RSC) += sde_rsc.o \
 	sde_rsc_hw.o \
diff --git a/drivers/gpu/drm/msm/dp/dp_aux.c b/drivers/gpu/drm/msm/dp/dp_aux.c
index 30f477e..a79a9c9 100644
--- a/drivers/gpu/drm/msm/dp/dp_aux.c
+++ b/drivers/gpu/drm/msm/dp/dp_aux.c
@@ -43,6 +43,7 @@
 	struct aux_buf rxp;
 
 	u32 aux_error_num;
+	bool cmd_busy;
 
 	u8 txbuf[256];
 	u8 rxbuf[256];
@@ -82,6 +83,7 @@
 static void dp_aux_buf_set(struct dp_aux_private *aux)
 {
 	init_completion(&aux->comp);
+	aux->cmd_busy = false;
 	mutex_init(&aux->mutex);
 
 	dp_aux_buf_init(&aux->txp, aux->txbuf, sizeof(aux->txbuf));
@@ -235,7 +237,7 @@
 
 static void dp_aux_native_handler(struct dp_aux_private *aux)
 {
-	u32 isr = aux->catalog->isr1;
+	u32 isr = aux->catalog->isr;
 
 	if (isr & DP_INTR_AUX_I2C_DONE)
 		aux->aux_error_num = DP_AUX_ERR_NONE;
@@ -251,7 +253,7 @@
 
 static void dp_aux_i2c_handler(struct dp_aux_private *aux)
 {
-	u32 isr = aux->catalog->isr1;
+	u32 isr = aux->catalog->isr;
 
 	if (isr & DP_INTR_AUX_I2C_DONE) {
 		if (isr & (DP_INTR_I2C_NACK | DP_INTR_I2C_DEFER))
@@ -285,7 +287,10 @@
 
 	aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
 
-	aux->catalog->get_irq(aux->catalog);
+	aux->catalog->get_irq(aux->catalog, aux->cmd_busy);
+
+	if (!aux->cmd_busy)
+		return;
 
 	if (aux->cmds->tx_mode == AUX_NATIVE)
 		dp_aux_native_handler(aux);
@@ -318,6 +323,7 @@
 	}
 
 	reinit_completion(&aux->comp);
+	aux->cmd_busy = true;
 
 	len = dp_aux_cmd_fifo_tx(aux);
 
@@ -333,6 +339,7 @@
 	else
 		ret = aux->aux_error_num;
 
+	aux->cmd_busy = false;
 	mutex_unlock(&aux->mutex);
 	return  ret;
 }
@@ -367,6 +374,7 @@
 	}
 
 	reinit_completion(&aux->comp);
+	aux->cmd_busy = true;
 
 	dp_aux_cmd_fifo_tx(aux);
 
@@ -383,6 +391,7 @@
 		ret = aux->aux_error_num;
 
 	aux->cmds->buf = rp->data;
+	aux->cmd_busy = false;
 
 	mutex_unlock(&aux->mutex);
 
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.c b/drivers/gpu/drm/msm/dp/dp_catalog.c
new file mode 100644
index 0000000..ca55d16
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_catalog.c
@@ -0,0 +1,964 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt)	"[drm-dp] %s: " fmt, __func__
+
+#include <linux/delay.h>
+
+#include "dp_catalog.h"
+
+/* DP_TX Registers */
+#define DP_HW_VERSION				(0x00000000)
+#define DP_SW_RESET				(0x00000010)
+#define DP_PHY_CTRL				(0x00000014)
+#define DP_CLK_CTRL				(0x00000018)
+#define DP_CLK_ACTIVE				(0x0000001C)
+#define DP_INTR_STATUS				(0x00000020)
+#define DP_INTR_STATUS2				(0x00000024)
+#define DP_INTR_STATUS3				(0x00000028)
+
+#define DP_DP_HPD_CTRL				(0x00000200)
+#define DP_DP_HPD_INT_STATUS			(0x00000204)
+#define DP_DP_HPD_INT_ACK			(0x00000208)
+#define DP_DP_HPD_INT_MASK			(0x0000020C)
+#define DP_DP_HPD_REFTIMER			(0x00000218)
+#define DP_DP_HPD_EVENT_TIME_0			(0x0000021C)
+#define DP_DP_HPD_EVENT_TIME_1			(0x00000220)
+#define DP_AUX_CTRL				(0x00000230)
+#define DP_AUX_DATA				(0x00000234)
+#define DP_AUX_TRANS_CTRL			(0x00000238)
+#define DP_TIMEOUT_COUNT			(0x0000023C)
+#define DP_AUX_LIMITS				(0x00000240)
+#define DP_AUX_STATUS				(0x00000244)
+
+#define DP_DPCD_CP_IRQ				(0x201)
+#define DP_DPCD_RXSTATUS			(0x69493)
+
+#define DP_INTERRUPT_TRANS_NUM			(0x000002A0)
+
+#define DP_MAINLINK_CTRL			(0x00000400)
+#define DP_STATE_CTRL				(0x00000404)
+#define DP_CONFIGURATION_CTRL			(0x00000408)
+#define DP_SOFTWARE_MVID			(0x00000410)
+#define DP_SOFTWARE_NVID			(0x00000418)
+#define DP_TOTAL_HOR_VER			(0x0000041C)
+#define DP_START_HOR_VER_FROM_SYNC		(0x00000420)
+#define DP_HSYNC_VSYNC_WIDTH_POLARITY		(0x00000424)
+#define DP_ACTIVE_HOR_VER			(0x00000428)
+#define DP_MISC1_MISC0				(0x0000042C)
+#define DP_VALID_BOUNDARY			(0x00000430)
+#define DP_VALID_BOUNDARY_2			(0x00000434)
+#define DP_LOGICAL2PHYSCIAL_LANE_MAPPING	(0x00000438)
+
+#define DP_MAINLINK_READY			(0x00000440)
+#define DP_MAINLINK_LEVELS			(0x00000444)
+#define DP_TU					(0x0000044C)
+
+#define DP_HBR2_COMPLIANCE_SCRAMBLER_RESET	(0x00000454)
+#define DP_TEST_80BIT_CUSTOM_PATTERN_REG0	(0x000004C0)
+#define DP_TEST_80BIT_CUSTOM_PATTERN_REG1	(0x000004C4)
+#define DP_TEST_80BIT_CUSTOM_PATTERN_REG2	(0x000004C8)
+
+#define MMSS_DP_MISC1_MISC0			(0x0000042C)
+#define MMSS_DP_AUDIO_TIMING_GEN		(0x00000480)
+#define MMSS_DP_AUDIO_TIMING_RBR_32		(0x00000484)
+#define MMSS_DP_AUDIO_TIMING_HBR_32		(0x00000488)
+#define MMSS_DP_AUDIO_TIMING_RBR_44		(0x0000048C)
+#define MMSS_DP_AUDIO_TIMING_HBR_44		(0x00000490)
+#define MMSS_DP_AUDIO_TIMING_RBR_48		(0x00000494)
+#define MMSS_DP_AUDIO_TIMING_HBR_48		(0x00000498)
+
+#define MMSS_DP_PSR_CRC_RG			(0x00000554)
+#define MMSS_DP_PSR_CRC_B			(0x00000558)
+
+#define MMSS_DP_AUDIO_CFG			(0x00000600)
+#define MMSS_DP_AUDIO_STATUS			(0x00000604)
+#define MMSS_DP_AUDIO_PKT_CTRL			(0x00000608)
+#define MMSS_DP_AUDIO_PKT_CTRL2			(0x0000060C)
+#define MMSS_DP_AUDIO_ACR_CTRL			(0x00000610)
+#define MMSS_DP_AUDIO_CTRL_RESET		(0x00000614)
+
+#define MMSS_DP_SDP_CFG				(0x00000628)
+#define MMSS_DP_SDP_CFG2			(0x0000062C)
+#define MMSS_DP_AUDIO_TIMESTAMP_0		(0x00000630)
+#define MMSS_DP_AUDIO_TIMESTAMP_1		(0x00000634)
+
+#define MMSS_DP_AUDIO_STREAM_0			(0x00000640)
+#define MMSS_DP_AUDIO_STREAM_1			(0x00000644)
+
+#define MMSS_DP_EXTENSION_0			(0x00000650)
+#define MMSS_DP_EXTENSION_1			(0x00000654)
+#define MMSS_DP_EXTENSION_2			(0x00000658)
+#define MMSS_DP_EXTENSION_3			(0x0000065C)
+#define MMSS_DP_EXTENSION_4			(0x00000660)
+#define MMSS_DP_EXTENSION_5			(0x00000664)
+#define MMSS_DP_EXTENSION_6			(0x00000668)
+#define MMSS_DP_EXTENSION_7			(0x0000066C)
+#define MMSS_DP_EXTENSION_8			(0x00000670)
+#define MMSS_DP_EXTENSION_9			(0x00000674)
+#define MMSS_DP_AUDIO_COPYMANAGEMENT_0		(0x00000678)
+#define MMSS_DP_AUDIO_COPYMANAGEMENT_1		(0x0000067C)
+#define MMSS_DP_AUDIO_COPYMANAGEMENT_2		(0x00000680)
+#define MMSS_DP_AUDIO_COPYMANAGEMENT_3		(0x00000684)
+#define MMSS_DP_AUDIO_COPYMANAGEMENT_4		(0x00000688)
+#define MMSS_DP_AUDIO_COPYMANAGEMENT_5		(0x0000068C)
+#define MMSS_DP_AUDIO_ISRC_0			(0x00000690)
+#define MMSS_DP_AUDIO_ISRC_1			(0x00000694)
+#define MMSS_DP_AUDIO_ISRC_2			(0x00000698)
+#define MMSS_DP_AUDIO_ISRC_3			(0x0000069C)
+#define MMSS_DP_AUDIO_ISRC_4			(0x000006A0)
+#define MMSS_DP_AUDIO_ISRC_5			(0x000006A4)
+#define MMSS_DP_AUDIO_INFOFRAME_0		(0x000006A8)
+#define MMSS_DP_AUDIO_INFOFRAME_1		(0x000006AC)
+#define MMSS_DP_AUDIO_INFOFRAME_2		(0x000006B0)
+
+#define MMSS_DP_GENERIC0_0			(0x00000700)
+#define MMSS_DP_GENERIC0_1			(0x00000704)
+#define MMSS_DP_GENERIC0_2			(0x00000708)
+#define MMSS_DP_GENERIC0_3			(0x0000070C)
+#define MMSS_DP_GENERIC0_4			(0x00000710)
+#define MMSS_DP_GENERIC0_5			(0x00000714)
+#define MMSS_DP_GENERIC0_6			(0x00000718)
+#define MMSS_DP_GENERIC0_7			(0x0000071C)
+#define MMSS_DP_GENERIC0_8			(0x00000720)
+#define MMSS_DP_GENERIC0_9			(0x00000724)
+#define MMSS_DP_GENERIC1_0			(0x00000728)
+#define MMSS_DP_GENERIC1_1			(0x0000072C)
+#define MMSS_DP_GENERIC1_2			(0x00000730)
+#define MMSS_DP_GENERIC1_3			(0x00000734)
+#define MMSS_DP_GENERIC1_4			(0x00000738)
+#define MMSS_DP_GENERIC1_5			(0x0000073C)
+#define MMSS_DP_GENERIC1_6			(0x00000740)
+#define MMSS_DP_GENERIC1_7			(0x00000744)
+#define MMSS_DP_GENERIC1_8			(0x00000748)
+#define MMSS_DP_GENERIC1_9			(0x0000074C)
+
+#define MMSS_DP_TIMING_ENGINE_EN		(0x00000A10)
+#define MMSS_DP_ASYNC_FIFO_CONFIG		(0x00000A88)
+
+/*DP PHY Register offsets */
+#define DP_PHY_REVISION_ID0                     (0x00000000)
+#define DP_PHY_REVISION_ID1                     (0x00000004)
+#define DP_PHY_REVISION_ID2                     (0x00000008)
+#define DP_PHY_REVISION_ID3                     (0x0000000C)
+
+#define DP_PHY_CFG                              (0x00000010)
+#define DP_PHY_PD_CTL                           (0x00000018)
+#define DP_PHY_MODE                             (0x0000001C)
+
+#define DP_PHY_AUX_CFG0                         (0x00000020)
+#define DP_PHY_AUX_CFG1                         (0x00000024)
+#define DP_PHY_AUX_CFG2                         (0x00000028)
+#define DP_PHY_AUX_CFG3                         (0x0000002C)
+#define DP_PHY_AUX_CFG4                         (0x00000030)
+#define DP_PHY_AUX_CFG5                         (0x00000034)
+#define DP_PHY_AUX_CFG6                         (0x00000038)
+#define DP_PHY_AUX_CFG7                         (0x0000003C)
+#define DP_PHY_AUX_CFG8                         (0x00000040)
+#define DP_PHY_AUX_CFG9                         (0x00000044)
+#define DP_PHY_AUX_INTERRUPT_MASK               (0x00000048)
+#define DP_PHY_AUX_INTERRUPT_CLEAR              (0x0000004C)
+
+#define DP_PHY_SPARE0				(0x00AC)
+
+#define TXn_TX_EMP_POST1_LVL			(0x000C)
+#define TXn_TX_DRV_LVL				(0x001C)
+
+#define QSERDES_COM_BIAS_EN_CLKBUFLR_EN		(0x004)
+
+#define EDID_START_ADDRESS			0x50
+
+/* DP MMSS_CC registers */
+#define MMSS_DP_LINK_CMD_RCGR			(0x0138)
+#define MMSS_DP_LINK_CFG_RCGR			(0x013C)
+#define MMSS_DP_PIXEL_M				(0x0174)
+#define MMSS_DP_PIXEL_N				(0x0178)
+
+/* DP HDCP 1.3 registers */
+#define DP_HDCP_CTRL                                   (0x0A0)
+#define DP_HDCP_STATUS                                 (0x0A4)
+#define DP_HDCP_SW_UPPER_AKSV                          (0x298)
+#define DP_HDCP_SW_LOWER_AKSV                          (0x29C)
+#define DP_HDCP_ENTROPY_CTRL0                          (0x750)
+#define DP_HDCP_ENTROPY_CTRL1                          (0x75C)
+#define DP_HDCP_SHA_STATUS                             (0x0C8)
+#define DP_HDCP_RCVPORT_DATA2_0                        (0x0B0)
+#define DP_HDCP_RCVPORT_DATA3                          (0x2A4)
+#define DP_HDCP_RCVPORT_DATA4                          (0x2A8)
+#define DP_HDCP_RCVPORT_DATA5                          (0x0C0)
+#define DP_HDCP_RCVPORT_DATA6                          (0x0C4)
+
+#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_SHA_CTRL           (0x024)
+#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_SHA_DATA           (0x028)
+#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA0      (0x004)
+#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA1      (0x008)
+#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA7      (0x00C)
+#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA8      (0x010)
+#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA9      (0x014)
+#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA10     (0x018)
+#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA11     (0x01C)
+#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA12     (0x020)
+
+#define dp_read(offset) readl_relaxed((offset))
+#define dp_write(offset, data) writel_relaxed((data), (offset))
+
+#define dp_catalog_get_priv(x) { \
+	struct dp_catalog *dp_catalog; \
+	dp_catalog = container_of(x, struct dp_catalog, x); \
+	catalog = container_of(dp_catalog, struct dp_catalog_private, \
+				dp_catalog); \
+}
+
+#define DP_INTERRUPT_STATUS1 \
+	(DP_INTR_AUX_I2C_DONE| \
+	DP_INTR_WRONG_ADDR | DP_INTR_TIMEOUT | \
+	DP_INTR_NACK_DEFER | DP_INTR_WRONG_DATA_CNT | \
+	DP_INTR_I2C_NACK | DP_INTR_I2C_DEFER | \
+	DP_INTR_PLL_UNLOCKED | DP_INTR_AUX_ERROR)
+
+#define DP_INTR_MASK1		(DP_INTERRUPT_STATUS1 << 2)
+
+#define DP_INTERRUPT_STATUS2 \
+	(DP_INTR_READY_FOR_VIDEO | DP_INTR_IDLE_PATTERN_SENT | \
+	DP_INTR_FRAME_END | DP_INTR_CRC_UPDATED)
+
+#define DP_INTR_MASK2		(DP_INTERRUPT_STATUS2 << 2)
+
+static u8 const vm_pre_emphasis[4][4] = {
+	{0x00, 0x0B, 0x12, 0xFF},       /* pe0, 0 db */
+	{0x00, 0x0A, 0x12, 0xFF},       /* pe1, 3.5 db */
+	{0x00, 0x0C, 0xFF, 0xFF},       /* pe2, 6.0 db */
+	{0xFF, 0xFF, 0xFF, 0xFF}        /* pe3, 9.5 db */
+};
+
+/* voltage swing, 0.2v and 1.0v are not support */
+static u8 const vm_voltage_swing[4][4] = {
+	{0x07, 0x0F, 0x14, 0xFF}, /* sw0, 0.4v  */
+	{0x11, 0x1D, 0x1F, 0xFF}, /* sw1, 0.6 v */
+	{0x18, 0x1F, 0xFF, 0xFF}, /* sw1, 0.8 v */
+	{0xFF, 0xFF, 0xFF, 0xFF}  /* sw1, 1.2 v, optional */
+};
+
+struct dp_catalog_private {
+	struct device *dev;
+	struct dp_io *io;
+	struct dp_catalog dp_catalog;
+};
+
+/* aux related catalog functions */
+static u32 dp_catalog_aux_read_data(struct dp_catalog_aux *aux)
+{
+	struct dp_catalog_private *catalog;
+	void __iomem *base;
+
+	if (!aux) {
+		pr_err("invalid input\n");
+		goto end;
+	}
+
+	dp_catalog_get_priv(aux);
+	base = catalog->io->ctrl_io.base;
+
+	return dp_read(base + DP_AUX_DATA);
+end:
+	return 0;
+}
+
+static int dp_catalog_aux_write_data(struct dp_catalog_aux *aux)
+{
+	int rc = 0;
+	struct dp_catalog_private *catalog;
+	void __iomem *base;
+
+	if (!aux) {
+		pr_err("invalid input\n");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	dp_catalog_get_priv(aux);
+	base = catalog->io->ctrl_io.base;
+
+	dp_write(base + DP_AUX_DATA, aux->data);
+end:
+	return rc;
+}
+
+static int dp_catalog_aux_write_trans(struct dp_catalog_aux *aux)
+{
+	int rc = 0;
+	struct dp_catalog_private *catalog;
+	void __iomem *base;
+
+	if (!aux) {
+		pr_err("invalid input\n");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	dp_catalog_get_priv(aux);
+	base = catalog->io->ctrl_io.base;
+
+	dp_write(base + DP_AUX_TRANS_CTRL, aux->data);
+end:
+	return rc;
+}
+
+static void dp_catalog_aux_reset(struct dp_catalog_aux *aux)
+{
+	u32 aux_ctrl;
+	struct dp_catalog_private *catalog;
+	void __iomem *base;
+
+	if (!aux) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	dp_catalog_get_priv(aux);
+	base = catalog->io->ctrl_io.base;
+
+	aux_ctrl = dp_read(base + DP_AUX_CTRL);
+
+	aux_ctrl |= BIT(1);
+	dp_write(base + DP_AUX_CTRL, aux_ctrl);
+	usleep_range(1000, 1010); /* h/w recommended delay */
+
+	aux_ctrl &= ~BIT(1);
+	dp_write(base + DP_AUX_CTRL, aux_ctrl);
+}
+
+static void dp_catalog_aux_enable(struct dp_catalog_aux *aux, bool enable)
+{
+	u32 aux_ctrl;
+	struct dp_catalog_private *catalog;
+	void __iomem *base;
+
+	if (!aux) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	dp_catalog_get_priv(aux);
+	base = catalog->io->ctrl_io.base;
+
+	aux_ctrl = dp_read(base + DP_AUX_CTRL);
+
+	if (enable) {
+		dp_write(base + DP_TIMEOUT_COUNT, 0xffff);
+		dp_write(base + DP_AUX_LIMITS, 0xffff);
+		aux_ctrl |= BIT(0);
+	} else {
+		aux_ctrl &= ~BIT(0);
+	}
+
+	dp_write(base + DP_AUX_CTRL, aux_ctrl);
+}
+
+static void dp_catalog_aux_setup(struct dp_catalog_aux *aux, u32 *aux_cfg)
+{
+	struct dp_catalog_private *catalog;
+
+	if (!aux || !aux_cfg) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	dp_catalog_get_priv(aux);
+
+	dp_write(catalog->io->phy_io.base + DP_PHY_PD_CTL, 0x02);
+	wmb(); /* make sure PD programming happened */
+	dp_write(catalog->io->phy_io.base + DP_PHY_PD_CTL, 0x7d);
+
+	/* Turn on BIAS current for PHY/PLL */
+	dp_write(catalog->io->dp_pll_io.base +
+		QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x3f);
+
+	/* DP AUX CFG register programming */
+	dp_write(catalog->io->phy_io.base + DP_PHY_AUX_CFG0, aux_cfg[0]);
+	dp_write(catalog->io->phy_io.base + DP_PHY_AUX_CFG1, aux_cfg[1]);
+	dp_write(catalog->io->phy_io.base + DP_PHY_AUX_CFG2, aux_cfg[2]);
+	dp_write(catalog->io->phy_io.base + DP_PHY_AUX_CFG3, aux_cfg[3]);
+	dp_write(catalog->io->phy_io.base + DP_PHY_AUX_CFG4, aux_cfg[4]);
+	dp_write(catalog->io->phy_io.base + DP_PHY_AUX_CFG5, aux_cfg[5]);
+	dp_write(catalog->io->phy_io.base + DP_PHY_AUX_CFG6, aux_cfg[6]);
+	dp_write(catalog->io->phy_io.base + DP_PHY_AUX_CFG7, aux_cfg[7]);
+	dp_write(catalog->io->phy_io.base + DP_PHY_AUX_CFG8, aux_cfg[8]);
+	dp_write(catalog->io->phy_io.base + DP_PHY_AUX_CFG9, aux_cfg[9]);
+
+	dp_write(catalog->io->phy_io.base + DP_PHY_AUX_INTERRUPT_MASK, 0x1F);
+}
+
+static void dp_catalog_aux_get_irq(struct dp_catalog_aux *aux, bool cmd_busy)
+{
+	u32 ack;
+	struct dp_catalog_private *catalog;
+	void __iomem *base;
+
+	if (!aux) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	dp_catalog_get_priv(aux);
+	base = catalog->io->ctrl_io.base;
+
+	if (cmd_busy)
+		dp_write(base + DP_AUX_TRANS_CTRL, 0x0);
+
+	aux->isr = dp_read(base + DP_INTR_STATUS);
+	aux->isr &= ~DP_INTR_MASK1;
+	ack = aux->isr & DP_INTERRUPT_STATUS1;
+	ack <<= 1;
+	ack |= DP_INTR_MASK1;
+	dp_write(base + DP_INTR_STATUS, ack);
+}
+
+/* controller related catalog functions */
+static void dp_catalog_ctrl_update_transfer_unit(struct dp_catalog_ctrl *ctrl)
+{
+	struct dp_catalog_private *catalog;
+	void __iomem *base;
+
+	if (!ctrl) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	dp_catalog_get_priv(ctrl);
+	base = catalog->io->ctrl_io.base;
+
+	dp_write(base + DP_VALID_BOUNDARY, ctrl->valid_boundary);
+	dp_write(base + DP_TU, ctrl->dp_tu);
+	dp_write(base + DP_VALID_BOUNDARY_2, ctrl->valid_boundary2);
+}
+
+static void dp_catalog_ctrl_state_ctrl(struct dp_catalog_ctrl *ctrl, u32 state)
+{
+	struct dp_catalog_private *catalog;
+	void __iomem *base;
+
+	if (!ctrl) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	dp_catalog_get_priv(ctrl);
+	base = catalog->io->ctrl_io.base;
+
+	dp_write(base + DP_STATE_CTRL, state);
+}
+
+static void dp_catalog_ctrl_config_ctrl(struct dp_catalog_ctrl *ctrl, u32 cfg)
+{
+	struct dp_catalog_private *catalog;
+	void __iomem *base;
+
+	if (!ctrl) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	dp_catalog_get_priv(ctrl);
+	base = catalog->io->ctrl_io.base;
+
+	dp_write(base + DP_CONFIGURATION_CTRL, cfg);
+	dp_write(base + DP_MAINLINK_LEVELS, 0xa08);
+	dp_write(base + MMSS_DP_ASYNC_FIFO_CONFIG, 0x1);
+}
+
+static void dp_catalog_ctrl_lane_mapping(struct dp_catalog_ctrl *ctrl)
+{
+	struct dp_catalog_private *catalog;
+	void __iomem *base;
+
+	if (!ctrl) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	dp_catalog_get_priv(ctrl);
+	base = catalog->io->ctrl_io.base;
+
+	dp_write(base + DP_LOGICAL2PHYSCIAL_LANE_MAPPING, 0xe4);
+}
+
+static void dp_catalog_ctrl_mainlink_ctrl(struct dp_catalog_ctrl *ctrl,
+						bool enable)
+{
+	u32 mainlink_ctrl;
+	struct dp_catalog_private *catalog;
+	void __iomem *base;
+
+	if (!ctrl) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	dp_catalog_get_priv(ctrl);
+	base = catalog->io->ctrl_io.base;
+
+	mainlink_ctrl = dp_read(base + DP_MAINLINK_CTRL);
+
+	if (enable) {
+		mainlink_ctrl |= BIT(0);
+		dp_write(base + DP_MAINLINK_CTRL, 0x02000000);
+		wmb(); /* make sure mainlink is turned off before reset */
+		dp_write(base + DP_MAINLINK_CTRL, 0x02000002);
+		wmb(); /* make sure mainlink entered reset */
+		dp_write(base + DP_MAINLINK_CTRL, 0x02000000);
+		wmb(); /* make sure mainlink reset done */
+		dp_write(base + DP_MAINLINK_CTRL, 0x02000001);
+		wmb(); /* make sure mainlink turned on */
+	} else {
+		mainlink_ctrl &= ~BIT(0);
+		dp_write(base + DP_MAINLINK_CTRL, 0x0);
+	}
+}
+
+static void dp_catalog_ctrl_config_misc(struct dp_catalog_ctrl *ctrl,
+					u32 cc, u32 tb)
+{
+	u32 misc_val = cc;
+	struct dp_catalog_private *catalog;
+	void __iomem *base;
+
+	if (!ctrl) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	dp_catalog_get_priv(ctrl);
+	base = catalog->io->ctrl_io.base;
+
+	misc_val |= (tb << 5);
+	misc_val |= BIT(0); /* Configure clock to synchronous mode */
+
+	pr_debug("isc settings = 0x%x\n", misc_val);
+	dp_write(base + DP_MISC1_MISC0, misc_val);
+}
+
+static void dp_catalog_ctrl_config_msa(struct dp_catalog_ctrl *ctrl)
+{
+	u32 pixel_m, pixel_n;
+	u32 mvid, nvid;
+	struct dp_catalog_private *catalog;
+	void __iomem *base_cc, *base_ctrl;
+
+	if (!ctrl) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	dp_catalog_get_priv(ctrl);
+	base_cc = catalog->io->dp_cc_io.base;
+	base_ctrl = catalog->io->ctrl_io.base;
+
+	pixel_m = dp_read(base_cc + MMSS_DP_PIXEL_M);
+	pixel_n = dp_read(base_cc + MMSS_DP_PIXEL_N);
+	pr_debug("pixel_m=0x%x, pixel_n=0x%x\n", pixel_m, pixel_n);
+
+	mvid = (pixel_m & 0xFFFF) * 5;
+	nvid = (0xFFFF & (~pixel_n)) + (pixel_m & 0xFFFF);
+
+	pr_debug("mvid=0x%x, nvid=0x%x\n", mvid, nvid);
+	dp_write(base_ctrl + DP_SOFTWARE_MVID, mvid);
+	dp_write(base_ctrl + DP_SOFTWARE_NVID, nvid);
+}
+
+static void dp_catalog_ctrl_set_pattern(struct dp_catalog_ctrl *ctrl,
+					u32 pattern)
+{
+	int bit, cnt = 10;
+	u32 data;
+	struct dp_catalog_private *catalog;
+	void __iomem *base;
+
+	if (!ctrl) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	dp_catalog_get_priv(ctrl);
+	base = catalog->io->ctrl_io.base;
+
+	bit = 1;
+	bit <<= (pattern - 1);
+	pr_debug("bit=%d train=%d\n", bit, pattern);
+	dp_write(base + DP_STATE_CTRL, bit);
+
+	bit = 8;
+	bit <<= (pattern - 1);
+
+	while (cnt--) {
+		data = dp_read(base + DP_MAINLINK_READY);
+		if (data & bit)
+			break;
+	}
+
+	if (cnt == 0)
+		pr_err("set link_train=%d failed\n", pattern);
+}
+
+static void dp_catalog_ctrl_reset(struct dp_catalog_ctrl *ctrl)
+{
+	u32 sw_reset;
+	struct dp_catalog_private *catalog;
+	void __iomem *base;
+
+	if (!ctrl) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	dp_catalog_get_priv(ctrl);
+	base = catalog->io->ctrl_io.base;
+
+	sw_reset = dp_read(base + DP_SW_RESET);
+
+	sw_reset |= BIT(0);
+	dp_write(base + DP_SW_RESET, sw_reset);
+	usleep_range(1000, 1010); /* h/w recommended delay */
+
+	sw_reset &= ~BIT(0);
+	dp_write(base + DP_SW_RESET, sw_reset);
+}
+
+static bool dp_catalog_ctrl_mainlink_ready(struct dp_catalog_ctrl *ctrl)
+{
+	u32 data;
+	int cnt = 10;
+	struct dp_catalog_private *catalog;
+	void __iomem *base;
+
+	if (!ctrl) {
+		pr_err("invalid input\n");
+		goto end;
+	}
+
+	dp_catalog_get_priv(ctrl);
+	base = catalog->io->ctrl_io.base;
+
+	while (--cnt) {
+		/* DP_MAINLINK_READY */
+		data = dp_read(base + DP_MAINLINK_READY);
+		if (data & BIT(0))
+			return true;
+
+		usleep_range(1000, 1010); /* 1ms wait before next reg read */
+	}
+	pr_err("mainlink not ready\n");
+end:
+	return false;
+}
+
+static void dp_catalog_ctrl_enable_irq(struct dp_catalog_ctrl *ctrl,
+						bool enable)
+{
+	struct dp_catalog_private *catalog;
+	void __iomem *base;
+
+	if (!ctrl) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	dp_catalog_get_priv(ctrl);
+	base = catalog->io->ctrl_io.base;
+
+	if (enable) {
+		dp_write(base + DP_INTR_STATUS, DP_INTR_MASK1);
+		dp_write(base + DP_INTR_STATUS2, DP_INTR_MASK2);
+	} else {
+		dp_write(base + DP_INTR_STATUS, 0x00);
+		dp_write(base + DP_INTR_STATUS2, 0x00);
+	}
+}
+
+static void dp_catalog_ctrl_hpd_config(struct dp_catalog_ctrl *ctrl, bool en)
+{
+	struct dp_catalog_private *catalog;
+	void __iomem *base;
+
+	if (!ctrl) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	dp_catalog_get_priv(ctrl);
+	base = catalog->io->ctrl_io.base;
+
+	if (en) {
+		u32 reftimer = dp_read(base + DP_DP_HPD_REFTIMER);
+
+		dp_write(base + DP_DP_HPD_INT_ACK, 0xF);
+		dp_write(base + DP_DP_HPD_INT_MASK, 0xF);
+
+		/* Enabling REFTIMER */
+		reftimer |= BIT(16);
+		dp_write(base + DP_DP_HPD_REFTIMER, 0xF);
+		/* Enable HPD */
+		dp_write(base + DP_DP_HPD_CTRL, 0x1);
+	} else {
+		/*Disable HPD */
+		dp_write(base + DP_DP_HPD_CTRL, 0x0);
+	}
+}
+
+static void dp_catalog_ctrl_get_interrupt(struct dp_catalog_ctrl *ctrl)
+{
+	u32 ack = 0;
+	struct dp_catalog_private *catalog;
+	void __iomem *base;
+
+	if (!ctrl) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	dp_catalog_get_priv(ctrl);
+	base = catalog->io->ctrl_io.base;
+
+	ctrl->isr = dp_read(base + DP_INTR_STATUS2);
+	ctrl->isr &= ~DP_INTR_MASK2;
+	ack = ctrl->isr & DP_INTERRUPT_STATUS2;
+	ack <<= 1;
+	ack |= DP_INTR_MASK2;
+	dp_write(base + DP_INTR_STATUS2, ack);
+}
+
+static void dp_catalog_ctrl_phy_reset(struct dp_catalog_ctrl *ctrl)
+{
+	struct dp_catalog_private *catalog;
+	void __iomem *base;
+
+	if (!ctrl) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	dp_catalog_get_priv(ctrl);
+	base = catalog->io->ctrl_io.base;
+
+	dp_write(base + DP_PHY_CTRL, 0x5); /* bit 0 & 2 */
+	usleep_range(1000, 1010); /* h/w recommended delay */
+	dp_write(base + DP_PHY_CTRL, 0x0);
+	wmb(); /* make sure PHY reset done */
+}
+
+static void dp_catalog_ctrl_phy_lane_cfg(struct dp_catalog_ctrl *ctrl,
+		bool flipped, u8 ln_cnt)
+{
+	u32 info = 0x0;
+	struct dp_catalog_private *catalog;
+	u8 orientation = BIT(!!flipped);
+
+	if (!ctrl) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	dp_catalog_get_priv(ctrl);
+
+	info |= (ln_cnt & 0x0F);
+	info |= ((orientation & 0x0F) << 4);
+	pr_debug("Shared Info = 0x%x\n", info);
+
+	dp_write(catalog->io->phy_io.base + DP_PHY_SPARE0, info);
+}
+
+static void dp_catalog_ctrl_update_vx_px(struct dp_catalog_ctrl *ctrl,
+		u8 v_level, u8 p_level)
+{
+	struct dp_catalog_private *catalog;
+	void __iomem *base0, *base1;
+	u8 value0, value1;
+
+	if (!ctrl) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	dp_catalog_get_priv(ctrl);
+	base0 = catalog->io->ln_tx0_io.base;
+	base1 = catalog->io->ln_tx1_io.base;
+
+	pr_debug("v=%d p=%d\n", v_level, p_level);
+
+	value0 = vm_voltage_swing[v_level][p_level];
+	value1 = vm_pre_emphasis[v_level][p_level];
+
+	/* program default setting first */
+	dp_write(base0 + TXn_TX_DRV_LVL, 0x2A);
+	dp_write(base1 + TXn_TX_DRV_LVL, 0x2A);
+	dp_write(base0 + TXn_TX_EMP_POST1_LVL, 0x20);
+	dp_write(base1 + TXn_TX_EMP_POST1_LVL, 0x20);
+
+	/* Enable MUX to use Cursor values from these registers */
+	value0 |= BIT(5);
+	value1 |= BIT(5);
+
+	/* Configure host and panel only if both values are allowed */
+	if (value0 != 0xFF && value1 != 0xFF) {
+		dp_write(base0 + TXn_TX_DRV_LVL, value0);
+		dp_write(base1 + TXn_TX_DRV_LVL, value0);
+		dp_write(base0 + TXn_TX_EMP_POST1_LVL, value1);
+		dp_write(base1 + TXn_TX_EMP_POST1_LVL, value1);
+
+		pr_debug("host PHY settings: value0=0x%x value1=0x%x",
+						value0, value1);
+	}
+}
+
+/* panel related catalog functions */
+static int dp_catalog_panel_timing_cfg(struct dp_catalog_panel *panel)
+{
+	struct dp_catalog_private *catalog;
+	void __iomem *base;
+
+	if (!panel) {
+		pr_err("invalid input\n");
+		goto end;
+	}
+
+	dp_catalog_get_priv(panel);
+	base = catalog->io->ctrl_io.base;
+
+	dp_write(base + DP_TOTAL_HOR_VER, panel->total);
+	dp_write(base + DP_START_HOR_VER_FROM_SYNC, panel->sync_start);
+	dp_write(base + DP_HSYNC_VSYNC_WIDTH_POLARITY, panel->width_blanking);
+	dp_write(base + DP_ACTIVE_HOR_VER, panel->dp_active);
+end:
+	return 0;
+}
+ /* audio related catalog functions */
+static int dp_catalog_audio_acr_ctrl(struct dp_catalog_audio *audio)
+{
+	return 0;
+}
+
+static int dp_catalog_audio_stream_sdp(struct dp_catalog_audio *audio)
+{
+	return 0;
+}
+
+static int dp_catalog_audio_timestamp_sdp(struct dp_catalog_audio *audio)
+{
+	return 0;
+}
+
+static int dp_catalog_audio_infoframe_sdp(struct dp_catalog_audio *audio)
+{
+	return 0;
+}
+
+static int dp_catalog_audio_copy_mgmt_sdp(struct dp_catalog_audio *audio)
+{
+	return 0;
+}
+
+static int dp_catalog_audio_isrc_sdp(struct dp_catalog_audio *audio)
+{
+	return 0;
+}
+
+static int dp_catalog_audio_setup_sdp(struct dp_catalog_audio *audio)
+{
+	return 0;
+}
+
+struct dp_catalog *dp_catalog_get(struct device *dev, struct dp_io *io)
+{
+	int rc = 0;
+	struct dp_catalog *dp_catalog;
+	struct dp_catalog_private *catalog;
+	struct dp_catalog_aux aux = {
+		.read_data     = dp_catalog_aux_read_data,
+		.write_data    = dp_catalog_aux_write_data,
+		.write_trans   = dp_catalog_aux_write_trans,
+		.reset         = dp_catalog_aux_reset,
+		.enable        = dp_catalog_aux_enable,
+		.setup         = dp_catalog_aux_setup,
+		.get_irq       = dp_catalog_aux_get_irq,
+	};
+	struct dp_catalog_ctrl ctrl = {
+		.state_ctrl     = dp_catalog_ctrl_state_ctrl,
+		.config_ctrl    = dp_catalog_ctrl_config_ctrl,
+		.lane_mapping   = dp_catalog_ctrl_lane_mapping,
+		.mainlink_ctrl  = dp_catalog_ctrl_mainlink_ctrl,
+		.config_misc    = dp_catalog_ctrl_config_misc,
+		.config_msa     = dp_catalog_ctrl_config_msa,
+		.set_pattern    = dp_catalog_ctrl_set_pattern,
+		.reset          = dp_catalog_ctrl_reset,
+		.mainlink_ready = dp_catalog_ctrl_mainlink_ready,
+		.enable_irq     = dp_catalog_ctrl_enable_irq,
+		.hpd_config     = dp_catalog_ctrl_hpd_config,
+		.phy_reset      = dp_catalog_ctrl_phy_reset,
+		.phy_lane_cfg   = dp_catalog_ctrl_phy_lane_cfg,
+		.update_vx_px   = dp_catalog_ctrl_update_vx_px,
+		.get_interrupt  = dp_catalog_ctrl_get_interrupt,
+		.update_transfer_unit = dp_catalog_ctrl_update_transfer_unit,
+	};
+	struct dp_catalog_audio audio = {
+		.acr_ctrl      = dp_catalog_audio_acr_ctrl,
+		.stream_sdp    = dp_catalog_audio_stream_sdp,
+		.timestamp_sdp = dp_catalog_audio_timestamp_sdp,
+		.infoframe_sdp = dp_catalog_audio_infoframe_sdp,
+		.copy_mgmt_sdp = dp_catalog_audio_copy_mgmt_sdp,
+		.isrc_sdp      = dp_catalog_audio_isrc_sdp,
+		.setup_sdp     = dp_catalog_audio_setup_sdp,
+	};
+	struct dp_catalog_panel panel = {
+		.timing_cfg = dp_catalog_panel_timing_cfg,
+	};
+
+	if (!io) {
+		pr_err("invalid input\n");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	catalog  = devm_kzalloc(dev, sizeof(*catalog), GFP_KERNEL);
+	if (!catalog) {
+		rc = -ENOMEM;
+		goto error;
+	}
+
+	catalog->dev = dev;
+	catalog->io = io;
+
+	dp_catalog = &catalog->dp_catalog;
+
+	dp_catalog->aux   = aux;
+	dp_catalog->ctrl  = ctrl;
+	dp_catalog->audio = audio;
+	dp_catalog->panel = panel;
+
+	return dp_catalog;
+error:
+	return ERR_PTR(rc);
+}
+
+void dp_catalog_put(struct dp_catalog *dp_catalog)
+{
+	struct dp_catalog_private *catalog;
+
+	if (!dp_catalog)
+		return;
+
+	catalog = container_of(dp_catalog, struct dp_catalog_private,
+				dp_catalog);
+
+	devm_kfree(catalog->dev, catalog);
+}
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.h b/drivers/gpu/drm/msm/dp/dp_catalog.h
new file mode 100644
index 0000000..ce88569
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_catalog.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DP_CATALOG_H_
+#define _DP_CATALOG_H_
+
+#include "dp_parser.h"
+
+/* interrupts */
+#define DP_INTR_HPD		BIT(0)
+#define DP_INTR_AUX_I2C_DONE	BIT(3)
+#define DP_INTR_WRONG_ADDR	BIT(6)
+#define DP_INTR_TIMEOUT		BIT(9)
+#define DP_INTR_NACK_DEFER	BIT(12)
+#define DP_INTR_WRONG_DATA_CNT	BIT(15)
+#define DP_INTR_I2C_NACK	BIT(18)
+#define DP_INTR_I2C_DEFER	BIT(21)
+#define DP_INTR_PLL_UNLOCKED	BIT(24)
+#define DP_INTR_AUX_ERROR	BIT(27)
+
+#define DP_INTR_READY_FOR_VIDEO		BIT(0)
+#define DP_INTR_IDLE_PATTERN_SENT	BIT(3)
+#define DP_INTR_FRAME_END		BIT(6)
+#define DP_INTR_CRC_UPDATED		BIT(9)
+
+struct dp_catalog_aux {
+	u32 data;
+	u32 isr;
+
+	u32 (*read_data)(struct dp_catalog_aux *aux);
+	int (*write_data)(struct dp_catalog_aux *aux);
+	int (*write_trans)(struct dp_catalog_aux *aux);
+	void (*reset)(struct dp_catalog_aux *aux);
+	void (*enable)(struct dp_catalog_aux *aux, bool enable);
+	void (*setup)(struct dp_catalog_aux *aux, u32 *aux_cfg);
+	void (*get_irq)(struct dp_catalog_aux *aux, bool cmd_busy);
+};
+
+struct dp_catalog_ctrl {
+	u32 dp_tu;
+	u32 valid_boundary;
+	u32 valid_boundary2;
+	u32 isr;
+
+	void (*state_ctrl)(struct dp_catalog_ctrl *ctrl, u32 state);
+	void (*config_ctrl)(struct dp_catalog_ctrl *ctrl, u32 config);
+	void (*lane_mapping)(struct dp_catalog_ctrl *ctrl);
+	void (*mainlink_ctrl)(struct dp_catalog_ctrl *ctrl, bool enable);
+	void (*config_misc)(struct dp_catalog_ctrl *ctrl, u32 cc, u32 tb);
+	void (*config_msa)(struct dp_catalog_ctrl *ctrl);
+	void (*set_pattern)(struct dp_catalog_ctrl *ctrl, u32 pattern);
+	void (*reset)(struct dp_catalog_ctrl *ctrl);
+	bool (*mainlink_ready)(struct dp_catalog_ctrl *ctrl);
+	void (*enable_irq)(struct dp_catalog_ctrl *ctrl, bool enable);
+	void (*hpd_config)(struct dp_catalog_ctrl *ctrl, bool enable);
+	void (*phy_reset)(struct dp_catalog_ctrl *ctrl);
+	void (*phy_lane_cfg)(struct dp_catalog_ctrl *ctrl, bool flipped,
+				u8 lane_cnt);
+	void (*update_vx_px)(struct dp_catalog_ctrl *ctrl, u8 v_level,
+				u8 p_level);
+	void (*get_interrupt)(struct dp_catalog_ctrl *ctrl);
+	void (*update_transfer_unit)(struct dp_catalog_ctrl *ctrl);
+};
+
+struct dp_catalog_audio {
+	u32 data;
+
+	int (*acr_ctrl)(struct dp_catalog_audio *audio);
+	int (*stream_sdp)(struct dp_catalog_audio *audio);
+	int (*timestamp_sdp)(struct dp_catalog_audio *audio);
+	int (*infoframe_sdp)(struct dp_catalog_audio *audio);
+	int (*copy_mgmt_sdp)(struct dp_catalog_audio *audio);
+	int (*isrc_sdp)(struct dp_catalog_audio *audio);
+	int (*setup_sdp)(struct dp_catalog_audio *audio);
+};
+
+struct dp_catalog_panel {
+	u32 total;
+	u32 sync_start;
+	u32 width_blanking;
+	u32 dp_active;
+
+	int (*timing_cfg)(struct dp_catalog_panel *panel);
+};
+
+struct dp_catalog {
+	struct dp_catalog_aux aux;
+	struct dp_catalog_ctrl ctrl;
+	struct dp_catalog_audio audio;
+	struct dp_catalog_panel panel;
+};
+
+struct dp_catalog *dp_catalog_get(struct device *dev, struct dp_io *io);
+void dp_catalog_put(struct dp_catalog *catalog);
+
+#endif /* _DP_CATALOG_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
new file mode 100644
index 0000000..56f6052
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
@@ -0,0 +1,1395 @@
+/*
+ * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt)	"[drm-dp] %s: " fmt, __func__
+
+#include <linux/types.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+
+#include "dp_ctrl.h"
+
+#define DP_LINK_RATE_MULTIPLIER	27000000
+#define DP_KHZ_TO_HZ 1000
+#define DP_CRYPTO_CLK_RATE_KHZ 180000
+
+/* sink power state  */
+#define SINK_POWER_ON		1
+#define SINK_POWER_OFF		2
+
+#define DP_CTRL_INTR_READY_FOR_VIDEO     BIT(0)
+#define DP_CTRL_INTR_IDLE_PATTERN_SENT  BIT(3)
+
+/* dp state ctrl */
+#define ST_TRAIN_PATTERN_1		BIT(0)
+#define ST_TRAIN_PATTERN_2		BIT(1)
+#define ST_TRAIN_PATTERN_3		BIT(2)
+#define ST_TRAIN_PATTERN_4		BIT(3)
+#define ST_SYMBOL_ERR_RATE_MEASUREMENT	BIT(4)
+#define ST_PRBS7			BIT(5)
+#define ST_CUSTOM_80_BIT_PATTERN	BIT(6)
+#define ST_SEND_VIDEO			BIT(7)
+#define ST_PUSH_IDLE			BIT(8)
+
+struct dp_vc_tu_mapping_table {
+	u32 vic;
+	u8 lanes;
+	u8 lrate; /* DP_LINK_RATE -> 162(6), 270(10), 540(20), 810 (30) */
+	u8 bpp;
+	u8 valid_boundary_link;
+	u16 delay_start_link;
+	bool boundary_moderation_en;
+	u8 valid_lower_boundary_link;
+	u8 upper_boundary_count;
+	u8 lower_boundary_count;
+	u8 tu_size_minus1;
+};
+
+struct dp_ctrl_private {
+	struct dp_ctrl dp_ctrl;
+
+	struct device *dev;
+	struct dp_aux *aux;
+	struct dp_panel *panel;
+	struct dp_link *link;
+	struct dp_power *power;
+	struct dp_parser *parser;
+	struct dp_catalog_ctrl *catalog;
+
+	struct completion idle_comp;
+	struct completion video_comp;
+	struct completion irq_comp;
+
+	bool hpd_irq_on;
+	bool power_on;
+	bool sink_info_read;
+	bool cont_splash;
+	bool psm_enabled;
+	bool initialized;
+	bool orientation;
+
+	u32 pixel_rate;
+	u32 vic;
+};
+
+enum notification_status {
+	NOTIFY_UNKNOWN,
+	NOTIFY_CONNECT,
+	NOTIFY_DISCONNECT,
+	NOTIFY_CONNECT_IRQ_HPD,
+	NOTIFY_DISCONNECT_IRQ_HPD,
+};
+
+static void dp_ctrl_idle_patterns_sent(struct dp_ctrl_private *ctrl)
+{
+	pr_debug("idle_patterns_sent\n");
+	complete(&ctrl->idle_comp);
+}
+
+static void dp_ctrl_video_ready(struct dp_ctrl_private *ctrl)
+{
+	pr_debug("dp_video_ready\n");
+	complete(&ctrl->video_comp);
+}
+
+static void dp_ctrl_set_sink_power_state(struct dp_ctrl_private *ctrl,
+		u8 power_state)
+{
+	const int len = 1;
+
+	ctrl->aux->write(ctrl->aux, 0x600, len, AUX_NATIVE, &power_state);
+}
+
+static void dp_ctrl_state_ctrl(struct dp_ctrl_private *ctrl, u32 state)
+{
+	ctrl->catalog->state_ctrl(ctrl->catalog, state);
+}
+
+static void dp_ctrl_push_idle(struct dp_ctrl *dp_ctrl)
+{
+	int const idle_pattern_completion_timeout_ms = 3 * HZ / 100;
+	struct dp_ctrl_private *ctrl;
+
+	if (!dp_ctrl) {
+		pr_err("Invalid input data\n");
+		return;
+	}
+
+	ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+
+	dp_ctrl_set_sink_power_state(ctrl, SINK_POWER_OFF);
+
+	reinit_completion(&ctrl->idle_comp);
+	dp_ctrl_state_ctrl(ctrl, ST_PUSH_IDLE);
+
+	if (!wait_for_completion_timeout(&ctrl->idle_comp,
+			idle_pattern_completion_timeout_ms))
+		pr_warn("PUSH_IDLE pattern timedout\n");
+
+	pr_debug("mainlink off done\n");
+}
+
+static void dp_ctrl_config_ctrl(struct dp_ctrl_private *ctrl)
+{
+	u32 config = 0, tbd;
+
+	config |= (2 << 13); /* Default-> LSCLK DIV: 1/4 LCLK  */
+	config |= (0 << 11); /* RGB */
+
+	/* Scrambler reset enable */
+	if (ctrl->panel->dpcd.scrambler_reset)
+		config |= (1 << 10);
+
+	tbd = ctrl->link->get_test_bits_depth(ctrl->link,
+			ctrl->panel->pinfo.bpp);
+	config |= tbd << 8;
+
+	/* Num of Lanes */
+	config |= ((ctrl->link->lane_count - 1) << 4);
+
+	if (ctrl->panel->dpcd.enhanced_frame)
+		config |= 0x40;
+
+	config |= 0x04; /* progressive video */
+
+	config |= 0x03;	/* sycn clock & static Mvid */
+
+	ctrl->catalog->config_ctrl(ctrl->catalog, config);
+}
+
+/**
+ * dp_ctrl_configure_source_params() - configures DP transmitter source params
+ * @ctrl: Display Port Driver data
+ *
+ * Configures the DP transmitter source params including details such as lane
+ * configuration, output format and sink/panel timing information.
+ */
+static void dp_ctrl_configure_source_params(struct dp_ctrl_private *ctrl)
+{
+	u32 cc, tb;
+
+	ctrl->catalog->lane_mapping(ctrl->catalog);
+	ctrl->catalog->mainlink_ctrl(ctrl->catalog, true);
+
+	dp_ctrl_config_ctrl(ctrl);
+
+	tb = ctrl->link->get_test_bits_depth(ctrl->link,
+		ctrl->panel->pinfo.bpp);
+	cc = ctrl->link->get_colorimetry_config(ctrl->link);
+	ctrl->catalog->config_misc(ctrl->catalog, cc, tb);
+
+	ctrl->catalog->config_msa(ctrl->catalog);
+
+	ctrl->panel->timing_cfg(ctrl->panel);
+}
+
+static void dp_ctrl_get_extra_req_bytes(u64 result_valid,
+					int valid_bdary_link,
+					u64 value1, u64 value2,
+					bool *negative, u64 *result,
+					u64 compare)
+{
+	*negative = false;
+	if (result_valid >= compare) {
+		if (valid_bdary_link
+				>= compare)
+			*result = value1 + value2;
+		else {
+			if (value1 < value2)
+				*negative = true;
+			*result = (value1 >= value2) ?
+				(value1 - value2) : (value2 - value1);
+		}
+	} else {
+		if (valid_bdary_link
+				>= compare) {
+			if (value1 >= value2)
+				*negative = true;
+			*result = (value1 >= value2) ?
+				(value1 - value2) : (value2 - value1);
+		} else {
+			*result = value1 + value2;
+			*negative = true;
+		}
+	}
+}
+
+static u64 roundup_u64(u64 x, u64 y)
+{
+	x += (y - 1);
+	return (div64_ul(x, y) * y);
+}
+
+static u64 rounddown_u64(u64 x, u64 y)
+{
+	u64 rem;
+
+	div64_u64_rem(x, y, &rem);
+	return (x - rem);
+}
+
+static void dp_ctrl_calc_tu_parameters(struct dp_ctrl_private *ctrl,
+		struct dp_vc_tu_mapping_table *tu_table)
+{
+	u32 const multiplier = 1000000;
+	u64 pclk, lclk;
+	u8 bpp, ln_cnt, link_rate;
+	int run_idx = 0;
+	u32 lwidth, h_blank;
+	u32 fifo_empty = 0;
+	u32 ratio_scale = 1001;
+	u64 temp, ratio, original_ratio;
+	u64 temp2, reminder;
+	u64 temp3, temp4, result = 0;
+
+	u64 err = multiplier;
+	u64 n_err = 0, n_n_err = 0;
+	bool n_err_neg, nn_err_neg;
+	u8 hblank_margin = 16;
+
+	u8 tu_size, tu_size_desired = 0, tu_size_minus1;
+	int valid_boundary_link;
+	u64 resulting_valid;
+	u64 total_valid;
+	u64 effective_valid;
+	u64 effective_valid_recorded;
+	int n_tus;
+	int n_tus_per_lane;
+	int paired_tus;
+	int remainder_tus;
+	int remainder_tus_upper, remainder_tus_lower;
+	int extra_bytes;
+	int filler_size;
+	int delay_start_link;
+	int boundary_moderation_en = 0;
+	int upper_bdry_cnt = 0;
+	int lower_bdry_cnt = 0;
+	int i_upper_bdry_cnt = 0;
+	int i_lower_bdry_cnt = 0;
+	int valid_lower_boundary_link = 0;
+	int even_distribution_bf = 0;
+	int even_distribution_legacy = 0;
+	int even_distribution = 0;
+	int min_hblank = 0;
+	int extra_pclk_cycles;
+	u8 extra_pclk_cycle_delay = 4;
+	int extra_pclk_cycles_in_link_clk;
+	u64 ratio_by_tu;
+	u64 average_valid2;
+	u64 extra_buffer_margin;
+	int new_valid_boundary_link;
+
+	u64 resulting_valid_tmp;
+	u64 ratio_by_tu_tmp;
+	int n_tus_tmp;
+	int extra_pclk_cycles_tmp;
+	int extra_pclk_cycles_in_lclk_tmp;
+	int extra_req_bytes_new_tmp;
+	int filler_size_tmp;
+	int lower_filler_size_tmp;
+	int delay_start_link_tmp;
+	int min_hblank_tmp = 0;
+	bool extra_req_bytes_is_neg = false;
+	struct dp_panel_info *pinfo = &ctrl->panel->pinfo;
+
+	u8 dp_brute_force = 1;
+	u64 brute_force_threshold = 10;
+	u64 diff_abs;
+
+	link_rate = ctrl->link->link_rate;
+	ln_cnt =  ctrl->link->lane_count;
+
+	bpp = pinfo->bpp;
+	lwidth = pinfo->h_active;
+	h_blank = pinfo->h_back_porch + pinfo->h_front_porch +
+				pinfo->h_sync_width;
+	pclk = pinfo->pixel_clk_khz * 1000;
+
+	boundary_moderation_en = 0;
+	upper_bdry_cnt = 0;
+	lower_bdry_cnt = 0;
+	i_upper_bdry_cnt = 0;
+	i_lower_bdry_cnt = 0;
+	valid_lower_boundary_link = 0;
+	even_distribution_bf = 0;
+	even_distribution_legacy = 0;
+	even_distribution = 0;
+	min_hblank = 0;
+
+	lclk = link_rate * DP_LINK_RATE_MULTIPLIER;
+
+	pr_debug("pclk=%lld, active_width=%d, h_blank=%d\n",
+						pclk, lwidth, h_blank);
+	pr_debug("lclk = %lld, ln_cnt = %d\n", lclk, ln_cnt);
+	ratio = div64_u64_rem(pclk * bpp * multiplier,
+				8 * ln_cnt * lclk, &reminder);
+	ratio = div64_u64((pclk * bpp * multiplier), (8 * ln_cnt * lclk));
+	original_ratio = ratio;
+
+	extra_buffer_margin = roundup_u64(div64_u64(extra_pclk_cycle_delay
+				* lclk * multiplier, pclk), multiplier);
+	extra_buffer_margin = div64_u64(extra_buffer_margin, multiplier);
+
+	/* To deal with cases where lines are not distributable */
+	if (((lwidth % ln_cnt) != 0) && ratio < multiplier) {
+		ratio = ratio * ratio_scale;
+		ratio = ratio < (1000 * multiplier)
+				? ratio : (1000 * multiplier);
+	}
+	pr_debug("ratio = %lld\n", ratio);
+
+	for (tu_size = 32; tu_size <= 64; tu_size++) {
+		temp = ratio * tu_size;
+		temp2 = ((temp / multiplier) + 1) * multiplier;
+		n_err = roundup_u64(temp, multiplier) - temp;
+
+		if (n_err < err) {
+			err = n_err;
+			tu_size_desired = tu_size;
+		}
+	}
+	pr_debug("Info: tu_size_desired = %d\n", tu_size_desired);
+
+	tu_size_minus1 = tu_size_desired - 1;
+
+	valid_boundary_link = roundup_u64(ratio * tu_size_desired, multiplier);
+	valid_boundary_link /= multiplier;
+	n_tus = rounddown((lwidth * bpp * multiplier)
+			/ (8 * valid_boundary_link), multiplier) / multiplier;
+	even_distribution_legacy = n_tus % ln_cnt == 0 ? 1 : 0;
+	pr_debug("Info: n_symbol_per_tu=%d, number_of_tus=%d\n",
+					valid_boundary_link, n_tus);
+
+	extra_bytes = roundup_u64((n_tus + 1)
+			* ((valid_boundary_link * multiplier)
+			- (original_ratio * tu_size_desired)), multiplier);
+	extra_bytes /= multiplier;
+	extra_pclk_cycles = roundup(extra_bytes * 8 * multiplier / bpp,
+			multiplier);
+	extra_pclk_cycles /= multiplier;
+	extra_pclk_cycles_in_link_clk = roundup_u64(div64_u64(extra_pclk_cycles
+				* lclk * multiplier, pclk), multiplier);
+	extra_pclk_cycles_in_link_clk /= multiplier;
+	filler_size = roundup_u64((tu_size_desired - valid_boundary_link)
+						* multiplier, multiplier);
+	filler_size /= multiplier;
+	ratio_by_tu = div64_u64(ratio * tu_size_desired, multiplier);
+
+	pr_debug("extra_pclk_cycles_in_link_clk=%d, extra_bytes=%d\n",
+				extra_pclk_cycles_in_link_clk, extra_bytes);
+	pr_debug("extra_pclk_cycles_in_link_clk=%d\n",
+				extra_pclk_cycles_in_link_clk);
+	pr_debug("filler_size=%d, extra_buffer_margin=%lld\n",
+				filler_size, extra_buffer_margin);
+
+	delay_start_link = ((extra_bytes > extra_pclk_cycles_in_link_clk)
+			? extra_bytes
+			: extra_pclk_cycles_in_link_clk)
+				+ filler_size + extra_buffer_margin;
+	resulting_valid = valid_boundary_link;
+	pr_debug("Info: delay_start_link=%d, filler_size=%d\n",
+				delay_start_link, filler_size);
+	pr_debug("valid_boundary_link=%d ratio_by_tu=%lld\n",
+				valid_boundary_link, ratio_by_tu);
+
+	diff_abs = (resulting_valid >= ratio_by_tu)
+				? (resulting_valid - ratio_by_tu)
+				: (ratio_by_tu - resulting_valid);
+
+	if (err != 0 && ((diff_abs > brute_force_threshold)
+			|| (even_distribution_legacy == 0)
+			|| (dp_brute_force == 1))) {
+		err = multiplier;
+		for (tu_size = 32; tu_size <= 64; tu_size++) {
+			for (i_upper_bdry_cnt = 1; i_upper_bdry_cnt <= 15;
+						i_upper_bdry_cnt++) {
+				for (i_lower_bdry_cnt = 1;
+					i_lower_bdry_cnt <= 15;
+					i_lower_bdry_cnt++) {
+					new_valid_boundary_link =
+						roundup_u64(ratio
+						* tu_size, multiplier);
+					average_valid2 = (i_upper_bdry_cnt
+						* new_valid_boundary_link
+						+ i_lower_bdry_cnt
+						* (new_valid_boundary_link
+							- multiplier))
+						/ (i_upper_bdry_cnt
+							+ i_lower_bdry_cnt);
+					n_tus = rounddown_u64(div64_u64(lwidth
+						* multiplier * multiplier
+						* (bpp / 8), average_valid2),
+							multiplier);
+					n_tus /= multiplier;
+					n_tus_per_lane
+						= rounddown(n_tus
+							* multiplier
+							/ ln_cnt, multiplier);
+					n_tus_per_lane /= multiplier;
+					paired_tus =
+						rounddown((n_tus_per_lane)
+							* multiplier
+							/ (i_upper_bdry_cnt
+							+ i_lower_bdry_cnt),
+							multiplier);
+					paired_tus /= multiplier;
+					remainder_tus = n_tus_per_lane
+							- paired_tus
+						* (i_upper_bdry_cnt
+							+ i_lower_bdry_cnt);
+					if ((remainder_tus
+						- i_upper_bdry_cnt) > 0) {
+						remainder_tus_upper
+							= i_upper_bdry_cnt;
+						remainder_tus_lower =
+							remainder_tus
+							- i_upper_bdry_cnt;
+					} else {
+						remainder_tus_upper
+							= remainder_tus;
+						remainder_tus_lower = 0;
+					}
+					total_valid = paired_tus
+						* (i_upper_bdry_cnt
+						* new_valid_boundary_link
+							+ i_lower_bdry_cnt
+						* (new_valid_boundary_link
+							- multiplier))
+						+ (remainder_tus_upper
+						* new_valid_boundary_link)
+						+ (remainder_tus_lower
+						* (new_valid_boundary_link
+							- multiplier));
+					n_err_neg = nn_err_neg = false;
+					effective_valid
+						= div_u64(total_valid,
+							n_tus_per_lane);
+					n_n_err = (effective_valid
+							>= (ratio * tu_size))
+						? (effective_valid
+							- (ratio * tu_size))
+						: ((ratio * tu_size)
+							- effective_valid);
+					if (effective_valid < (ratio * tu_size))
+						nn_err_neg = true;
+					n_err = (average_valid2
+						>= (ratio * tu_size))
+						? (average_valid2
+							- (ratio * tu_size))
+						: ((ratio * tu_size)
+							- average_valid2);
+					if (average_valid2 < (ratio * tu_size))
+						n_err_neg = true;
+					even_distribution =
+						n_tus % ln_cnt == 0 ? 1 : 0;
+					diff_abs =
+						resulting_valid >= ratio_by_tu
+						? (resulting_valid
+							- ratio_by_tu)
+						: (ratio_by_tu
+							- resulting_valid);
+
+					resulting_valid_tmp = div64_u64(
+						(i_upper_bdry_cnt
+						* new_valid_boundary_link
+						+ i_lower_bdry_cnt
+						* (new_valid_boundary_link
+							- multiplier)),
+						(i_upper_bdry_cnt
+							+ i_lower_bdry_cnt));
+					ratio_by_tu_tmp =
+						original_ratio * tu_size;
+					ratio_by_tu_tmp /= multiplier;
+					n_tus_tmp = rounddown_u64(
+						div64_u64(lwidth
+						* multiplier * multiplier
+						* bpp / 8,
+						resulting_valid_tmp),
+						multiplier);
+					n_tus_tmp /= multiplier;
+
+					temp3 = (resulting_valid_tmp
+						>= (original_ratio * tu_size))
+						? (resulting_valid_tmp
+						- original_ratio * tu_size)
+						: (original_ratio * tu_size)
+						- resulting_valid_tmp;
+					temp3 = (n_tus_tmp + 1) * temp3;
+					temp4 = (new_valid_boundary_link
+						>= (original_ratio * tu_size))
+						? (new_valid_boundary_link
+							- original_ratio
+							* tu_size)
+						: (original_ratio * tu_size)
+						- new_valid_boundary_link;
+					temp4 = (i_upper_bdry_cnt
+							* ln_cnt * temp4);
+
+					temp3 = roundup_u64(temp3, multiplier);
+					temp4 = roundup_u64(temp4, multiplier);
+					dp_ctrl_get_extra_req_bytes
+						(resulting_valid_tmp,
+						new_valid_boundary_link,
+						temp3, temp4,
+						&extra_req_bytes_is_neg,
+						&result,
+						(original_ratio * tu_size));
+					extra_req_bytes_new_tmp
+						= div64_ul(result, multiplier);
+					if ((extra_req_bytes_is_neg)
+						&& (extra_req_bytes_new_tmp
+							> 1))
+						extra_req_bytes_new_tmp
+						= extra_req_bytes_new_tmp - 1;
+					if (extra_req_bytes_new_tmp == 0)
+						extra_req_bytes_new_tmp = 1;
+					extra_pclk_cycles_tmp =
+						(u64)(extra_req_bytes_new_tmp
+						      * 8 * multiplier) / bpp;
+					extra_pclk_cycles_tmp /= multiplier;
+
+					if (extra_pclk_cycles_tmp <= 0)
+						extra_pclk_cycles_tmp = 1;
+					extra_pclk_cycles_in_lclk_tmp =
+						roundup_u64(div64_u64(
+							extra_pclk_cycles_tmp
+							* lclk * multiplier,
+							pclk), multiplier);
+					extra_pclk_cycles_in_lclk_tmp
+						/= multiplier;
+					filler_size_tmp = roundup_u64(
+						(tu_size * multiplier *
+						new_valid_boundary_link),
+						multiplier);
+					filler_size_tmp /= multiplier;
+					lower_filler_size_tmp =
+						filler_size_tmp + 1;
+					if (extra_req_bytes_is_neg)
+						temp3 = (extra_req_bytes_new_tmp
+						> extra_pclk_cycles_in_lclk_tmp
+						? extra_pclk_cycles_in_lclk_tmp
+						: extra_req_bytes_new_tmp);
+					else
+						temp3 = (extra_req_bytes_new_tmp
+						> extra_pclk_cycles_in_lclk_tmp
+						? extra_req_bytes_new_tmp :
+						extra_pclk_cycles_in_lclk_tmp);
+
+					temp4 = lower_filler_size_tmp
+						+ extra_buffer_margin;
+					if (extra_req_bytes_is_neg)
+						delay_start_link_tmp
+							= (temp3 >= temp4)
+							? (temp3 - temp4)
+							: (temp4 - temp3);
+					else
+						delay_start_link_tmp
+							= temp3 + temp4;
+
+					min_hblank_tmp = (int)div64_u64(
+						roundup_u64(
+						div64_u64(delay_start_link_tmp
+						* pclk * multiplier, lclk),
+						multiplier), multiplier)
+						+ hblank_margin;
+
+					if (((even_distribution == 1)
+						|| ((even_distribution_bf == 0)
+						&& (even_distribution_legacy
+								== 0)))
+						&& !n_err_neg && !nn_err_neg
+						&& n_n_err < err
+						&& (n_n_err < diff_abs
+						|| (dp_brute_force == 1))
+						&& (new_valid_boundary_link
+									- 1) > 0
+						&& (h_blank >=
+							(u32)min_hblank_tmp)) {
+						upper_bdry_cnt =
+							i_upper_bdry_cnt;
+						lower_bdry_cnt =
+							i_lower_bdry_cnt;
+						err = n_n_err;
+						boundary_moderation_en = 1;
+						tu_size_desired = tu_size;
+						valid_boundary_link =
+							new_valid_boundary_link;
+						effective_valid_recorded
+							= effective_valid;
+						delay_start_link
+							= delay_start_link_tmp;
+						filler_size = filler_size_tmp;
+						min_hblank = min_hblank_tmp;
+						n_tus = n_tus_tmp;
+						even_distribution_bf = 1;
+
+						pr_debug("upper_bdry_cnt=%d, lower_boundary_cnt=%d, err=%lld, tu_size_desired=%d, valid_boundary_link=%d, effective_valid=%lld\n",
+							upper_bdry_cnt,
+							lower_bdry_cnt, err,
+							tu_size_desired,
+							valid_boundary_link,
+							effective_valid);
+					}
+				}
+			}
+		}
+
+		if (boundary_moderation_en == 1) {
+			resulting_valid = (u64)(upper_bdry_cnt
+					*valid_boundary_link + lower_bdry_cnt
+					* (valid_boundary_link - 1))
+					/ (upper_bdry_cnt + lower_bdry_cnt);
+			ratio_by_tu = original_ratio * tu_size_desired;
+			valid_lower_boundary_link =
+				(valid_boundary_link / multiplier) - 1;
+
+			tu_size_minus1 = tu_size_desired - 1;
+			even_distribution_bf = 1;
+			valid_boundary_link /= multiplier;
+			pr_debug("Info: Boundary_moderation enabled\n");
+		}
+	}
+
+	min_hblank = ((int) roundup_u64(div64_u64(delay_start_link * pclk
+			* multiplier, lclk), multiplier))
+			/ multiplier + hblank_margin;
+	if (h_blank < (u32)min_hblank) {
+		pr_debug(" WARNING: run_idx=%d Programmed h_blank %d is smaller than the min_hblank %d supported.\n",
+					run_idx, h_blank, min_hblank);
+	}
+
+	if (fifo_empty)	{
+		tu_size_minus1 = 31;
+		valid_boundary_link = 32;
+		delay_start_link = 0;
+		boundary_moderation_en = 0;
+	}
+
+	pr_debug("tu_size_minus1=%d valid_boundary_link=%d delay_start_link=%d boundary_moderation_en=%d\n upper_boundary_cnt=%d lower_boundary_cnt=%d valid_lower_boundary_link=%d min_hblank=%d\n",
+		tu_size_minus1, valid_boundary_link, delay_start_link,
+		boundary_moderation_en, upper_bdry_cnt, lower_bdry_cnt,
+		valid_lower_boundary_link, min_hblank);
+
+	tu_table->valid_boundary_link = valid_boundary_link;
+	tu_table->delay_start_link = delay_start_link;
+	tu_table->boundary_moderation_en = boundary_moderation_en;
+	tu_table->valid_lower_boundary_link = valid_lower_boundary_link;
+	tu_table->upper_boundary_count = upper_bdry_cnt;
+	tu_table->lower_boundary_count = lower_bdry_cnt;
+	tu_table->tu_size_minus1 = tu_size_minus1;
+}
+
+static void dp_ctrl_setup_tr_unit(struct dp_ctrl_private *ctrl)
+{
+	u32 dp_tu = 0x0;
+	u32 valid_boundary = 0x0;
+	u32 valid_boundary2 = 0x0;
+	struct dp_vc_tu_mapping_table tu_calc_table;
+
+	dp_ctrl_calc_tu_parameters(ctrl, &tu_calc_table);
+
+	dp_tu |= tu_calc_table.tu_size_minus1;
+	valid_boundary |= tu_calc_table.valid_boundary_link;
+	valid_boundary |= (tu_calc_table.delay_start_link << 16);
+
+	valid_boundary2 |= (tu_calc_table.valid_lower_boundary_link << 1);
+	valid_boundary2 |= (tu_calc_table.upper_boundary_count << 16);
+	valid_boundary2 |= (tu_calc_table.lower_boundary_count << 20);
+
+	if (tu_calc_table.boundary_moderation_en)
+		valid_boundary2 |= BIT(0);
+
+	pr_debug("dp_tu=0x%x, valid_boundary=0x%x, valid_boundary2=0x%x\n",
+			dp_tu, valid_boundary, valid_boundary2);
+
+	ctrl->catalog->dp_tu = dp_tu;
+	ctrl->catalog->valid_boundary = valid_boundary;
+	ctrl->catalog->valid_boundary2 = valid_boundary2;
+
+	ctrl->catalog->update_transfer_unit(ctrl->catalog);
+}
+
+static int dp_ctrl_wait4video_ready(struct dp_ctrl_private *ctrl)
+{
+	int ret = 0;
+
+	if (ctrl->cont_splash)
+		return ret;
+
+	ret = wait_for_completion_timeout(&ctrl->video_comp, HZ / 2);
+	if (ret <= 0) {
+		pr_err("Link Train timedout\n");
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static int dp_ctrl_update_sink_vx_px(struct dp_ctrl_private *ctrl,
+		u32 voltage_level, u32 pre_emphasis_level)
+{
+	int i;
+	u8 buf[4];
+	u32 max_level_reached = 0;
+
+	if (voltage_level == DP_LINK_VOLTAGE_MAX) {
+		pr_debug("max. voltage swing level reached %d\n",
+				voltage_level);
+		max_level_reached |= BIT(2);
+	}
+
+	if (pre_emphasis_level == DP_LINK_PRE_EMPHASIS_MAX) {
+		pr_debug("max. pre-emphasis level reached %d\n",
+				pre_emphasis_level);
+		max_level_reached  |= BIT(5);
+	}
+
+	pr_debug("max_level_reached = 0x%x\n", max_level_reached);
+
+	pre_emphasis_level <<= 3;
+
+	for (i = 0; i < 4; i++)
+		buf[i] = voltage_level | pre_emphasis_level | max_level_reached;
+
+	pr_debug("p|v=0x%x\n", voltage_level | pre_emphasis_level);
+	return ctrl->aux->write(ctrl->aux, 0x103, 4, AUX_NATIVE, buf);
+}
+
+static void dp_ctrl_update_vx_px(struct dp_ctrl_private *ctrl)
+{
+	struct dp_link *link = ctrl->link;
+
+	pr_debug("v=%d p=%d\n", link->v_level, link->p_level);
+
+	ctrl->catalog->update_vx_px(ctrl->catalog,
+			link->v_level, link->p_level);
+
+	dp_ctrl_update_sink_vx_px(ctrl, link->v_level, link->p_level);
+}
+
+static void dp_ctrl_cap_lane_rate_set(struct dp_ctrl_private *ctrl)
+{
+	u8 buf[4];
+	struct dp_panel_dpcd *cap;
+
+	cap = &ctrl->panel->dpcd;
+
+	pr_debug("bw=%x lane=%d\n", ctrl->link->link_rate,
+		ctrl->link->lane_count);
+
+	buf[0] = ctrl->link->link_rate;
+	buf[1] = ctrl->link->lane_count;
+
+	if (cap->enhanced_frame)
+		buf[1] |= 0x80;
+
+	ctrl->aux->write(ctrl->aux, 0x100, 2, AUX_NATIVE, buf);
+}
+
+static void dp_ctrl_train_pattern_set(struct dp_ctrl_private *ctrl,
+		u8 pattern)
+{
+	u8 buf[4];
+
+	pr_debug("pattern=%x\n", pattern);
+
+	buf[0] = pattern;
+	ctrl->aux->write(ctrl->aux, 0x102, 1, AUX_NATIVE, buf);
+}
+
+static int dp_ctrl_link_train_1(struct dp_ctrl_private *ctrl)
+{
+	int tries, old_v_level;
+	int ret = 0;
+	int usleep_time;
+	int const maximum_retries = 5;
+
+	dp_ctrl_state_ctrl(ctrl, 0);
+
+	/* Make sure to clear the current pattern before starting a new one */
+	wmb();
+
+	ctrl->catalog->set_pattern(ctrl->catalog, 0x01);
+	dp_ctrl_cap_lane_rate_set(ctrl);
+	dp_ctrl_train_pattern_set(ctrl, 0x21); /* train_1 */
+	dp_ctrl_update_vx_px(ctrl);
+
+	tries = 0;
+	old_v_level = ctrl->link->v_level;
+	while (1) {
+		usleep_time = ctrl->panel->dpcd.training_read_interval;
+		usleep_range(usleep_time, usleep_time * 2);
+
+		if (ctrl->link->clock_recovery(ctrl->link)) {
+			ret = 0;
+			break;
+		}
+
+		if (ctrl->link->v_level == DP_LINK_VOLTAGE_MAX) {
+			ret = -1;
+			break;	/* quit */
+		}
+
+		if (old_v_level == ctrl->link->v_level) {
+			tries++;
+			if (tries >= maximum_retries) {
+				ret = -1;
+				break;	/* quit */
+			}
+		} else {
+			tries = 0;
+			old_v_level = ctrl->link->v_level;
+		}
+
+		ctrl->link->adjust_levels(ctrl->link);
+
+		dp_ctrl_update_vx_px(ctrl);
+	}
+
+	return ret;
+}
+
+static int dp_ctrl_link_rate_down_shift(struct dp_ctrl_private *ctrl)
+{
+	int ret = 0;
+
+	if (!ctrl)
+		return -EINVAL;
+
+	switch (ctrl->link->link_rate) {
+	case DP_LINK_RATE_810:
+		ctrl->link->link_rate = DP_LINK_RATE_540;
+		break;
+	case DP_LINK_RATE_540:
+		ctrl->link->link_rate = DP_LINK_RATE_270;
+		break;
+	case DP_LINK_RATE_270:
+		ctrl->link->link_rate = DP_LINK_RATE_162;
+		break;
+	case DP_LINK_RATE_162:
+	default:
+		ret = -EINVAL;
+		break;
+	};
+
+	pr_debug("new rate=%d\n", ctrl->link->link_rate);
+
+	return ret;
+}
+
+static void dp_ctrl_clear_training_pattern(struct dp_ctrl_private *ctrl)
+{
+	int usleep_time;
+
+	dp_ctrl_train_pattern_set(ctrl, 0);
+
+	usleep_time = ctrl->panel->dpcd.training_read_interval;
+	usleep_range(usleep_time, usleep_time * 2);
+}
+
+static int dp_ctrl_link_training_2(struct dp_ctrl_private *ctrl)
+{
+	int tries = 0;
+	int ret = 0;
+	int usleep_time;
+	char pattern;
+	int const maximum_retries = 5;
+
+	if (ctrl->panel->dpcd.flags & DPCD_TPS3)
+		pattern = 0x03;
+	else
+		pattern = 0x02;
+
+	dp_ctrl_update_vx_px(ctrl);
+	ctrl->catalog->set_pattern(ctrl->catalog, pattern);
+	dp_ctrl_train_pattern_set(ctrl, pattern | 0x20);
+
+	do  {
+		usleep_time = ctrl->panel->dpcd.training_read_interval;
+		usleep_range(usleep_time, usleep_time * 2);
+
+		if (ctrl->link->channel_equalization(ctrl->link)) {
+			ret = 0;
+			break;
+		}
+
+		if (tries > maximum_retries) {
+			ret = -1;
+			break;
+		}
+		tries++;
+
+		ctrl->link->adjust_levels(ctrl->link);
+
+		dp_ctrl_update_vx_px(ctrl);
+	} while (1);
+
+	return ret;
+}
+
+static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl)
+{
+	int ret = 0;
+
+	ret = ctrl->aux->ready(ctrl->aux);
+	if (!ret) {
+		pr_err("aux chan NOT ready\n");
+		return ret;
+	}
+
+	ctrl->link->p_level = 0;
+	ctrl->link->v_level = 0;
+
+	dp_ctrl_config_ctrl(ctrl);
+	dp_ctrl_state_ctrl(ctrl, 0);
+
+	ret = dp_ctrl_link_train_1(ctrl);
+	if (ret < 0) {
+		if (!dp_ctrl_link_rate_down_shift(ctrl)) {
+			pr_debug("retry with lower rate\n");
+
+			dp_ctrl_clear_training_pattern(ctrl);
+			return -EAGAIN;
+		}
+
+		pr_err("Training 1 failed\n");
+		ret = -EINVAL;
+		goto clear;
+	}
+
+	pr_debug("Training 1 completed successfully\n");
+
+	dp_ctrl_state_ctrl(ctrl, 0);
+
+	/* Make sure to clear the current pattern before starting a new one */
+	wmb();
+
+	ret = dp_ctrl_link_training_2(ctrl);
+	if (ret < 0) {
+		if (!dp_ctrl_link_rate_down_shift(ctrl)) {
+			pr_debug("retry with lower rate\n");
+
+			dp_ctrl_clear_training_pattern(ctrl);
+			return -EAGAIN;
+		}
+
+		pr_err("Training 2 failed\n");
+		ret = -EINVAL;
+		goto clear;
+	}
+
+	pr_debug("Training 2 completed successfully\n");
+
+	dp_ctrl_state_ctrl(ctrl, 0);
+	/* Make sure to clear the current pattern before starting a new one */
+	wmb();
+
+clear:
+	dp_ctrl_clear_training_pattern(ctrl);
+	return ret;
+}
+
+static int dp_ctrl_setup_main_link(struct dp_ctrl_private *ctrl, bool train)
+{
+	bool mainlink_ready = false;
+	int ret = 0;
+
+	ctrl->catalog->mainlink_ctrl(ctrl->catalog, true);
+
+	dp_ctrl_set_sink_power_state(ctrl, SINK_POWER_ON);
+
+	if (ctrl->link->phy_pattern_requested(ctrl->link))
+		goto end;
+
+	if (!train)
+		goto send_video;
+
+	/*
+	 * As part of previous calls, DP controller state might have
+	 * transitioned to PUSH_IDLE. In order to start transmitting a link
+	 * training pattern, we have to first to a DP software reset.
+	 */
+	ctrl->catalog->reset(ctrl->catalog);
+
+	ret = dp_ctrl_link_train(ctrl);
+	if (ret)
+		goto end;
+
+send_video:
+	/*
+	 * Set up transfer unit values and set controller state to send
+	 * video.
+	 */
+	dp_ctrl_setup_tr_unit(ctrl);
+	ctrl->catalog->state_ctrl(ctrl->catalog, ST_SEND_VIDEO);
+
+	dp_ctrl_wait4video_ready(ctrl);
+	mainlink_ready = ctrl->catalog->mainlink_ready(ctrl->catalog);
+	pr_debug("mainlink %s\n", mainlink_ready ? "READY" : "NOT READY");
+end:
+	return ret;
+}
+
+static void dp_ctrl_set_clock_rate(struct dp_ctrl_private *ctrl,
+		char *name, u32 rate)
+{
+	u32 num = ctrl->parser->mp[DP_CTRL_PM].num_clk;
+	struct dss_clk *cfg = ctrl->parser->mp[DP_CTRL_PM].clk_config;
+
+	while (num && strcmp(cfg->clk_name, name)) {
+		num--;
+		cfg++;
+	}
+
+	if (num)
+		cfg->rate = rate;
+	else
+		pr_err("%s clock could not be set with rate %d\n", name, rate);
+}
+
+static int dp_ctrl_enable_mainlink_clocks(struct dp_ctrl_private *ctrl)
+{
+	int ret = 0;
+
+	ctrl->power->set_pixel_clk_parent(ctrl->power);
+
+	dp_ctrl_set_clock_rate(ctrl, "ctrl_link_clk",
+		(ctrl->link->link_rate * DP_LINK_RATE_MULTIPLIER) /
+			DP_KHZ_TO_HZ);
+
+	dp_ctrl_set_clock_rate(ctrl, "ctrl_crypto_clk", DP_CRYPTO_CLK_RATE_KHZ);
+
+	dp_ctrl_set_clock_rate(ctrl, "ctrl_pixel_clk", ctrl->pixel_rate);
+
+	ret = ctrl->power->clk_enable(ctrl->power, DP_CTRL_PM, true);
+	if (ret) {
+		pr_err("Unabled to start link clocks\n");
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static int dp_ctrl_disable_mainlink_clocks(struct dp_ctrl_private *ctrl)
+{
+	return ctrl->power->clk_enable(ctrl->power, DP_CTRL_PM, false);
+}
+
+static int dp_ctrl_host_init(struct dp_ctrl *dp_ctrl, bool flip)
+{
+	struct dp_ctrl_private *ctrl;
+	struct dp_catalog_ctrl *catalog;
+
+	if (!dp_ctrl) {
+		pr_err("Invalid input data\n");
+		return -EINVAL;
+	}
+
+	ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+
+	if (ctrl->initialized) {
+		pr_debug("host init done already\n");
+		return 0;
+	}
+
+	ctrl->orientation = flip;
+	catalog = ctrl->catalog;
+
+	catalog->reset(ctrl->catalog);
+	catalog->phy_reset(ctrl->catalog);
+	catalog->enable_irq(ctrl->catalog, true);
+
+	ctrl->initialized = true;
+
+	return 0;
+}
+
+/**
+ * dp_ctrl_host_deinit() - Uninitialize DP controller
+ * @ctrl: Display Port Driver data
+ *
+ * Perform required steps to uninitialize DP controller
+ * and its resources.
+ */
+static void dp_ctrl_host_deinit(struct dp_ctrl *dp_ctrl)
+{
+	struct dp_ctrl_private *ctrl;
+
+	if (!dp_ctrl) {
+		pr_err("Invalid input data\n");
+		return;
+	}
+
+	ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+
+	if (!ctrl->initialized) {
+		pr_debug("host deinit done already\n");
+		return;
+	}
+
+	ctrl->catalog->enable_irq(ctrl->catalog, false);
+	ctrl->catalog->reset(ctrl->catalog);
+
+	/* Make sure DP is disabled before clk disable */
+	wmb();
+
+	dp_ctrl_disable_mainlink_clocks(ctrl);
+
+	ctrl->initialized = false;
+	pr_debug("Host deinitialized successfully\n");
+}
+
+static int dp_ctrl_on_irq(struct dp_ctrl_private *ctrl, bool lt_needed)
+{
+	int ret = 0;
+
+	do {
+		if (ret == -EAGAIN)
+			ctrl->catalog->mainlink_ctrl(ctrl->catalog, false);
+
+		ctrl->catalog->phy_lane_cfg(ctrl->catalog,
+			ctrl->orientation, ctrl->link->lane_count);
+
+		if (lt_needed) {
+			/*
+			 * Diasable and re-enable the mainlink clock since the
+			 * link clock might have been adjusted as part of the
+			 * link maintenance.
+			 */
+			if (!ctrl->link->phy_pattern_requested(
+					ctrl->link))
+				dp_ctrl_disable_mainlink_clocks(ctrl);
+
+			ret = dp_ctrl_enable_mainlink_clocks(ctrl);
+			if (ret)
+				continue;
+		}
+
+		dp_ctrl_configure_source_params(ctrl);
+
+		reinit_completion(&ctrl->idle_comp);
+
+		ctrl->power_on = true;
+
+		if (ctrl->psm_enabled) {
+			ret = ctrl->link->send_psm_request(ctrl->link, false);
+			if (ret) {
+				pr_err("failed to exit low power mode, rc=%d\n",
+					ret);
+				continue;
+			}
+		}
+
+		ret = dp_ctrl_setup_main_link(ctrl, lt_needed);
+	} while (ret == -EAGAIN);
+
+	return ret;
+}
+
+static int dp_ctrl_on_hpd(struct dp_ctrl_private *ctrl)
+{
+	int ret = 0;
+
+	if (ctrl->cont_splash)
+		goto link_training;
+
+	ctrl->power->clk_enable(ctrl->power, DP_CORE_PM, true);
+	ctrl->catalog->hpd_config(ctrl->catalog, true);
+
+	ctrl->link->link_rate  = ctrl->panel->get_link_rate(ctrl->panel);
+	ctrl->link->lane_count = ctrl->panel->dpcd.max_lane_count;
+	ctrl->pixel_rate = ctrl->panel->pinfo.pixel_clk_khz;
+
+	pr_debug("link_rate=%d, lane_count=%d, pixel_rate=%d\n",
+		ctrl->link->link_rate, ctrl->link->lane_count,
+		ctrl->pixel_rate);
+
+	ctrl->catalog->phy_lane_cfg(ctrl->catalog,
+			ctrl->orientation, ctrl->link->lane_count);
+
+	ret = dp_ctrl_enable_mainlink_clocks(ctrl);
+	if (ret)
+		goto exit;
+
+	reinit_completion(&ctrl->idle_comp);
+
+	dp_ctrl_configure_source_params(ctrl);
+
+	if (ctrl->psm_enabled)
+		ret = ctrl->link->send_psm_request(ctrl->link, false);
+link_training:
+	ctrl->power_on = true;
+
+	while (-EAGAIN == dp_ctrl_setup_main_link(ctrl, true))
+		pr_debug("MAIN LINK TRAINING RETRY\n");
+
+	ctrl->cont_splash = 0;
+
+	ctrl->power_on = true;
+	pr_debug("End-\n");
+
+exit:
+	return ret;
+}
+
+static int dp_ctrl_off_irq(struct dp_ctrl_private *ctrl)
+{
+	if (!ctrl->power_on) {
+		pr_debug("ctrl already powered off\n");
+		return 0;
+	}
+
+	ctrl->catalog->mainlink_ctrl(ctrl->catalog, false);
+
+	/* Make sure DP mainlink and audio engines are disabled */
+	wmb();
+
+	complete_all(&ctrl->irq_comp);
+	pr_debug("end\n");
+
+	return 0;
+}
+
+static int dp_ctrl_off_hpd(struct dp_ctrl_private *ctrl)
+{
+	if (!ctrl->power_on) {
+		pr_debug("panel already powered off\n");
+		return 0;
+	}
+
+	ctrl->catalog->mainlink_ctrl(ctrl->catalog, false);
+
+	ctrl->power_on = false;
+	ctrl->sink_info_read = false;
+
+	pr_debug("DP off done\n");
+
+	return 0;
+}
+
+static int dp_ctrl_on(struct dp_ctrl *dp_ctrl)
+{
+	int rc = 0;
+	struct dp_ctrl_private *ctrl;
+
+	if (!dp_ctrl) {
+		rc = -EINVAL;
+		goto end;
+	}
+
+	ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+
+	if (ctrl->hpd_irq_on)
+		rc = dp_ctrl_on_irq(ctrl, false);
+	else
+		rc = dp_ctrl_on_hpd(ctrl);
+end:
+	return rc;
+}
+
+static int dp_ctrl_off(struct dp_ctrl *dp_ctrl)
+{
+	int rc = 0;
+	struct dp_ctrl_private *ctrl;
+
+	if (!dp_ctrl) {
+		rc = -EINVAL;
+		goto end;
+	}
+
+	ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+
+	if (ctrl->hpd_irq_on)
+		rc = dp_ctrl_off_irq(ctrl);
+	else
+		rc = dp_ctrl_off_hpd(ctrl);
+end:
+	return rc;
+}
+
+static void dp_ctrl_isr(struct dp_ctrl *dp_ctrl)
+{
+	struct dp_ctrl_private *ctrl;
+
+	if (!dp_ctrl)
+		return;
+
+	ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+
+	ctrl->catalog->get_interrupt(ctrl->catalog);
+
+	if (ctrl->catalog->isr & DP_CTRL_INTR_READY_FOR_VIDEO)
+		dp_ctrl_video_ready(ctrl);
+
+	if (ctrl->catalog->isr & DP_CTRL_INTR_IDLE_PATTERN_SENT)
+		dp_ctrl_idle_patterns_sent(ctrl);
+}
+
+struct dp_ctrl *dp_ctrl_get(struct dp_ctrl_in *in)
+{
+	int rc = 0;
+	struct dp_ctrl_private *ctrl;
+	struct dp_ctrl *dp_ctrl;
+
+	if (!in->dev || !in->panel || !in->aux ||
+	    !in->link || !in->catalog) {
+		pr_err("invalid input\n");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	ctrl = devm_kzalloc(in->dev, sizeof(*ctrl), GFP_KERNEL);
+	if (!ctrl) {
+		rc = -ENOMEM;
+		goto error;
+	}
+
+	init_completion(&ctrl->idle_comp);
+	init_completion(&ctrl->video_comp);
+	init_completion(&ctrl->irq_comp);
+
+	/* in parameters */
+	ctrl->parser   = in->parser;
+	ctrl->panel    = in->panel;
+	ctrl->power    = in->power;
+	ctrl->aux      = in->aux;
+	ctrl->link     = in->link;
+	ctrl->catalog  = in->catalog;
+
+	dp_ctrl = &ctrl->dp_ctrl;
+
+	/* out parameters */
+	dp_ctrl->init      = dp_ctrl_host_init;
+	dp_ctrl->deinit    = dp_ctrl_host_deinit;
+	dp_ctrl->on        = dp_ctrl_on;
+	dp_ctrl->off       = dp_ctrl_off;
+	dp_ctrl->push_idle = dp_ctrl_push_idle;
+	dp_ctrl->isr       = dp_ctrl_isr;
+
+	return dp_ctrl;
+error:
+	return ERR_PTR(rc);
+}
+
+void dp_ctrl_put(struct dp_ctrl *dp_ctrl)
+{
+	struct dp_ctrl_private *ctrl;
+
+	if (!dp_ctrl)
+		return;
+
+	ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+
+	devm_kfree(ctrl->dev, ctrl);
+}
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.h b/drivers/gpu/drm/msm/dp/dp_ctrl.h
new file mode 100644
index 0000000..5efe505
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DP_CTRL_H_
+#define _DP_CTRL_H_
+
+#include "dp_aux.h"
+#include "dp_panel.h"
+#include "dp_link.h"
+#include "dp_parser.h"
+#include "dp_power.h"
+#include "dp_catalog.h"
+
+struct dp_ctrl {
+	int (*init)(struct dp_ctrl *dp_ctrl, bool flip);
+	void (*deinit)(struct dp_ctrl *dp_ctrl);
+	int (*on)(struct dp_ctrl *dp_ctrl);
+	int (*off)(struct dp_ctrl *dp_ctrl);
+	void (*push_idle)(struct dp_ctrl *dp_ctrl);
+	void (*isr)(struct dp_ctrl *dp_ctrl);
+};
+
+struct dp_ctrl_in {
+	struct device *dev;
+	struct dp_panel *panel;
+	struct dp_aux *aux;
+	struct dp_link *link;
+	struct dp_parser *parser;
+	struct dp_power *power;
+	struct dp_catalog_ctrl *catalog;
+};
+
+struct dp_ctrl *dp_ctrl_get(struct dp_ctrl_in *in);
+void dp_ctrl_put(struct dp_ctrl *dp_ctrl);
+
+#endif /* _DP_CTRL_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
new file mode 100644
index 0000000..850acbf
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -0,0 +1,730 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt)	"[drm-dp]: %s: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <linux/component.h>
+#include <linux/of_irq.h>
+
+#include "msm_drv.h"
+#include "dp_usbpd.h"
+#include "dp_parser.h"
+#include "dp_power.h"
+#include "dp_catalog.h"
+#include "dp_aux.h"
+#include "dp_link.h"
+#include "dp_panel.h"
+#include "dp_ctrl.h"
+#include "dp_display.h"
+
+static struct dp_display *g_dp_display;
+
+struct dp_display_private {
+	char *name;
+	int irq;
+
+	struct platform_device *pdev;
+	struct dentry *root;
+	struct mutex lock;
+
+	struct dp_usbpd   *usbpd;
+	struct dp_parser  *parser;
+	struct dp_power   *power;
+	struct dp_catalog *catalog;
+	struct dp_aux     *aux;
+	struct dp_link    *link;
+	struct dp_panel   *panel;
+	struct dp_ctrl    *ctrl;
+
+	struct dp_usbpd_cb usbpd_cb;
+	struct dp_display_mode mode;
+	struct dp_display dp_display;
+};
+
+static const struct of_device_id dp_dt_match[] = {
+	{.compatible = "qcom,dp-display"},
+	{}
+};
+
+static irqreturn_t dp_display_irq(int irq, void *dev_id)
+{
+	struct dp_display_private *dp = dev_id;
+
+	if (!dp) {
+		pr_err("invalid data\n");
+		return IRQ_NONE;
+	}
+
+	/* DP controller isr */
+	dp->ctrl->isr(dp->ctrl);
+
+	/* DP aux isr */
+	dp->aux->isr(dp->aux);
+
+	return IRQ_HANDLED;
+}
+
+static ssize_t debugfs_dp_info_read(struct file *file, char __user *buff,
+		size_t count, loff_t *ppos)
+{
+	struct dp_display_private *dp = file->private_data;
+	char *buf;
+	u32 len = 0;
+
+	if (!dp)
+		return -ENODEV;
+
+	if (*ppos)
+		return 0;
+
+	buf = kzalloc(SZ_4K, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	len += snprintf(buf + len, (SZ_4K - len), "name = %s\n", dp->name);
+	len += snprintf(buf + len, (SZ_4K - len),
+			"\tResolution = %dx%d\n",
+			dp->panel->pinfo.h_active,
+			dp->panel->pinfo.v_active);
+
+	if (copy_to_user(buff, buf, len)) {
+		kfree(buf);
+		return -EFAULT;
+	}
+
+	*ppos += len;
+
+	kfree(buf);
+	return len;
+}
+
+static const struct file_operations dp_debug_fops = {
+	.open = simple_open,
+	.read = debugfs_dp_info_read,
+};
+
+static int dp_display_debugfs_init(struct dp_display_private *dp)
+{
+	int rc = 0;
+	struct dentry *dir, *file;
+
+	dir = debugfs_create_dir(dp->name, NULL);
+	if (IS_ERR_OR_NULL(dir)) {
+		rc = PTR_ERR(dir);
+		pr_err("[%s] debugfs create dir failed, rc = %d\n",
+		       dp->name, rc);
+		goto error;
+	}
+
+	file = debugfs_create_file("dp_debug", 0444, dir, dp, &dp_debug_fops);
+	if (IS_ERR_OR_NULL(file)) {
+		rc = PTR_ERR(file);
+		pr_err("[%s] debugfs create file failed, rc=%d\n",
+		       dp->name, rc);
+		goto error_remove_dir;
+	}
+
+	dp->root = dir;
+	return rc;
+error_remove_dir:
+	debugfs_remove(dir);
+error:
+	return rc;
+}
+
+static int dp_display_debugfs_deinit(struct dp_display_private *dp)
+{
+	debugfs_remove(dp->root);
+	return 0;
+}
+
+static int dp_display_bind(struct device *dev, struct device *master,
+		void *data)
+{
+	int rc = 0;
+	struct dp_display_private *dp;
+	struct drm_device *drm;
+	struct msm_drm_private *priv;
+	struct platform_device *pdev = to_platform_device(dev);
+
+	if (!dev || !pdev || !master) {
+		pr_err("invalid param(s), dev %pK, pdev %pK, master %pK\n",
+				dev, pdev, master);
+		rc = -EINVAL;
+		goto error;
+	}
+
+	drm = dev_get_drvdata(master);
+	dp = platform_get_drvdata(pdev);
+	if (!drm || !dp) {
+		pr_err("invalid param(s), drm %pK, dp %pK\n",
+				drm, dp);
+		rc = -EINVAL;
+		goto error;
+	}
+
+	dp->dp_display.drm_dev = drm;
+	priv = drm->dev_private;
+
+	mutex_lock(&dp->lock);
+
+	rc = dp_display_debugfs_init(dp);
+	if (rc) {
+		pr_err("[%s]Debugfs init failed, rc=%d\n", dp->name, rc);
+		goto end;
+	}
+
+	rc = dp->parser->parse(dp->parser);
+	if (rc) {
+		pr_err("device tree parsing failed\n");
+		goto end;
+	}
+
+	rc = dp->power->power_client_init(dp->power, &priv->phandle);
+	if (rc) {
+		pr_err("Power client create failed\n");
+		goto end;
+	}
+end:
+	mutex_unlock(&dp->lock);
+error:
+	return rc;
+}
+
+static void dp_display_unbind(struct device *dev, struct device *master,
+		void *data)
+{
+	struct dp_display_private *dp;
+	struct platform_device *pdev = to_platform_device(dev);
+
+	if (!dev || !pdev) {
+		pr_err("invalid param(s)\n");
+		return;
+	}
+
+	dp = platform_get_drvdata(pdev);
+	if (!dp) {
+		pr_err("Invalid params\n");
+		return;
+	}
+
+	mutex_lock(&dp->lock);
+
+	(void)dp->power->power_client_deinit(dp->power);
+
+	(void)dp_display_debugfs_deinit(dp);
+
+	mutex_unlock(&dp->lock);
+}
+
+static const struct component_ops dp_display_comp_ops = {
+	.bind = dp_display_bind,
+	.unbind = dp_display_unbind,
+};
+
+static int dp_display_process_hpd_high(struct dp_display_private *dp)
+{
+	int rc;
+
+	rc = dp->panel->read_dpcd(dp->panel);
+	if (rc)
+		goto end;
+
+	rc = dp->panel->read_edid(dp->panel);
+	if (rc)
+		goto end;
+
+	return 0;
+end:
+	return rc;
+}
+
+static int dp_display_process_hpd_low(struct dp_display_private *dp)
+{
+	return 0;
+}
+
+static int dp_display_usbpd_configure_cb(struct device *dev)
+{
+	int rc = 0;
+	bool flip = false;
+	struct dp_display_private *dp;
+
+	if (!dev) {
+		pr_err("invalid dev\n");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	dp = dev_get_drvdata(dev);
+	if (!dp) {
+		pr_err("no driver data found\n");
+		rc = -ENODEV;
+		goto end;
+	}
+
+	mutex_lock(&dp->lock);
+
+	if (dp->usbpd->orientation == ORIENTATION_CC2)
+		flip = true;
+
+	dp->power->init(dp->power, flip);
+	dp->ctrl->init(dp->ctrl, flip);
+	dp->aux->init(dp->aux, dp->parser->aux_cfg);
+	enable_irq(dp->irq);
+
+	if (dp->usbpd->hpd_high)
+		dp_display_process_hpd_high(dp);
+
+	mutex_unlock(&dp->lock);
+end:
+	return rc;
+}
+
+static int dp_display_usbpd_disconnect_cb(struct device *dev)
+{
+	int rc = 0;
+	struct dp_display_private *dp;
+
+	if (!dev) {
+		pr_err("invalid dev\n");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	dp = dev_get_drvdata(dev);
+	if (!dp) {
+		pr_err("no driver data found\n");
+		rc = -ENODEV;
+		goto end;
+	}
+
+	mutex_lock(&dp->lock);
+	disable_irq(dp->irq);
+	mutex_unlock(&dp->lock);
+
+end:
+	return rc;
+}
+
+static int dp_display_usbpd_attention_cb(struct device *dev)
+{
+	int rc = 0;
+	struct dp_display_private *dp;
+
+	if (!dev) {
+		pr_err("invalid dev\n");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	dp = dev_get_drvdata(dev);
+	if (!dp) {
+		pr_err("no driver data found\n");
+		rc = -ENODEV;
+		goto end;
+	}
+
+	mutex_lock(&dp->lock);
+
+	if (dp->usbpd->hpd_irq) {
+		if (!dp->link->process_request(dp->link))
+			goto end;
+	}
+
+	if (dp->usbpd->hpd_high)
+		dp_display_process_hpd_high(dp);
+	else
+		dp_display_process_hpd_low(dp);
+
+	mutex_unlock(&dp->lock);
+end:
+	return rc;
+}
+
+static int dp_init_sub_modules(struct dp_display_private *dp)
+{
+	int rc = 0;
+	struct device *dev = &dp->pdev->dev;
+	struct dp_usbpd_cb *cb = &dp->usbpd_cb;
+	struct dp_ctrl_in ctrl_in = {
+		.dev = dev,
+	};
+
+	cb->configure  = dp_display_usbpd_configure_cb;
+	cb->disconnect = dp_display_usbpd_disconnect_cb;
+	cb->attention  = dp_display_usbpd_attention_cb;
+
+	dp->usbpd = dp_usbpd_get(dev, cb);
+	if (IS_ERR(dp->usbpd)) {
+		rc = PTR_ERR(dp->usbpd);
+		pr_err("failed to initialize usbpd, rc = %d\n", rc);
+		goto err;
+	}
+
+	dp->parser = dp_parser_get(dp->pdev);
+	if (IS_ERR(dp->parser)) {
+		rc = PTR_ERR(dp->parser);
+		pr_err("failed to initialize parser, rc = %d\n", rc);
+		goto err;
+	}
+
+	dp->catalog = dp_catalog_get(dev, &dp->parser->io);
+	if (IS_ERR(dp->catalog)) {
+		rc = PTR_ERR(dp->catalog);
+		pr_err("failed to initialize catalog, rc = %d\n", rc);
+		goto err;
+	}
+
+	dp->power = dp_power_get(dp->parser);
+	if (IS_ERR(dp->power)) {
+		rc = PTR_ERR(dp->power);
+		pr_err("failed to initialize power, rc = %d\n", rc);
+		goto err;
+	}
+
+	dp->aux = dp_aux_get(dev, &dp->catalog->aux);
+	if (IS_ERR(dp->aux)) {
+		rc = PTR_ERR(dp->aux);
+		pr_err("failed to initialize aux, rc = %d\n", rc);
+		goto err;
+	}
+
+	dp->panel = dp_panel_get(dev, dp->aux, &dp->catalog->panel);
+	if (IS_ERR(dp->panel)) {
+		rc = PTR_ERR(dp->panel);
+		pr_err("failed to initialize panel, rc = %d\n", rc);
+		goto err;
+	}
+
+	dp->link = dp_link_get(dev, dp->aux);
+	if (IS_ERR(dp->link)) {
+		rc = PTR_ERR(dp->link);
+		pr_err("failed to initialize link, rc = %d\n", rc);
+		goto err;
+	}
+
+	ctrl_in.link = dp->link;
+	ctrl_in.panel = dp->panel;
+	ctrl_in.aux = dp->aux;
+	ctrl_in.power = dp->power;
+	ctrl_in.catalog = &dp->catalog->ctrl;
+	ctrl_in.parser = dp->parser;
+
+	dp->ctrl = dp_ctrl_get(&ctrl_in);
+	if (IS_ERR(dp->ctrl)) {
+		rc = PTR_ERR(dp->ctrl);
+		pr_err("failed to initialize ctrl, rc = %d\n", rc);
+		goto err;
+	}
+err:
+	return rc;
+}
+
+static int dp_display_set_mode(struct dp_display *dp_display,
+		struct dp_display_mode *mode)
+{
+	int rc = 0;
+	struct dp_display_private *dp;
+
+	if (!dp_display) {
+		pr_err("invalid input\n");
+		rc = -EINVAL;
+		goto error;
+	}
+	dp = container_of(dp_display, struct dp_display_private, dp_display);
+
+	dp->panel->pinfo = mode->timing;
+	dp->panel->init_info(dp->panel);
+error:
+	return rc;
+}
+
+static int dp_display_prepare(struct dp_display *dp)
+{
+	return 0;
+}
+
+static int dp_display_enable(struct dp_display *dp_display)
+{
+	int rc = 0;
+	struct dp_display_private *dp;
+
+	if (!dp_display) {
+		pr_err("invalid input\n");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	dp = container_of(dp_display, struct dp_display_private, dp_display);
+
+	mutex_lock(&dp->lock);
+	dp->ctrl->on(dp->ctrl);
+	mutex_unlock(&dp->lock);
+error:
+	return rc;
+}
+
+static int dp_display_post_enable(struct dp_display *dp)
+{
+	return 0;
+}
+
+static int dp_display_pre_disable(struct dp_display *dp_display)
+{
+	int rc = 0;
+	struct dp_display_private *dp;
+
+	if (!dp_display) {
+		pr_err("invalid input\n");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	dp = container_of(dp_display, struct dp_display_private, dp_display);
+
+	mutex_lock(&dp->lock);
+
+	dp->ctrl->off(dp->ctrl);
+
+	mutex_unlock(&dp->lock);
+error:
+	return rc;
+}
+
+static int dp_display_disable(struct dp_display *dp_display)
+{
+	int rc = 0;
+	struct dp_display_private *dp;
+
+	if (!dp_display) {
+		pr_err("invalid input\n");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	dp = container_of(dp_display, struct dp_display_private, dp_display);
+
+	mutex_lock(&dp->lock);
+
+	dp->aux->deinit(dp->aux);
+	dp->ctrl->deinit(dp->ctrl);
+	dp->power->deinit(dp->power);
+
+	mutex_unlock(&dp->lock);
+error:
+	return rc;
+}
+
+static int dp_request_irq(struct dp_display *dp_display)
+{
+	int rc = 0;
+	struct dp_display_private *dp;
+
+	if (!dp_display) {
+		pr_err("invalid input\n");
+		return -EINVAL;
+	}
+
+	dp = container_of(dp_display, struct dp_display_private, dp_display);
+
+	dp->irq = irq_of_parse_and_map(dp->pdev->dev.of_node, 0);
+	if (dp->irq < 0) {
+		rc = dp->irq;
+		pr_err("failed to get irq: %d\n", rc);
+		return rc;
+	}
+
+	rc = devm_request_irq(&dp->pdev->dev, dp->irq, dp_display_irq,
+		IRQF_TRIGGER_HIGH, "dp_display_isr", dp);
+	if (rc < 0) {
+		pr_err("failed to request IRQ%u: %d\n",
+				dp->irq, rc);
+		return rc;
+	}
+	disable_irq(dp->irq);
+
+	return 0;
+}
+
+static int dp_display_unprepare(struct dp_display *dp)
+{
+	return 0;
+}
+
+static int dp_display_validate_mode(struct dp_display *dp,
+	struct dp_display_mode *mode)
+{
+	return 0;
+}
+
+static int dp_display_get_modes(struct dp_display *dp,
+	struct dp_display_mode *modes, u32 *count)
+{
+	*count = 1;
+
+	if (modes) {
+		modes->timing.h_active = 1920;
+		modes->timing.v_active = 1080;
+		modes->timing.h_back_porch = 148;
+		modes->timing.h_front_porch = 88;
+		modes->timing.h_sync_width = 44;
+		modes->timing.h_active_low = 0;
+		modes->timing.v_back_porch = 36;
+		modes->timing.v_front_porch = 4;
+		modes->timing.v_sync_width = 5;
+		modes->timing.v_active_low = 0;
+		modes->timing.h_skew = 0;
+		modes->timing.refresh_rate = 60;
+		modes->timing.pixel_clk_khz = 148500;
+	}
+
+	return 0;
+}
+
+static int dp_display_detect(struct dp_display *dp)
+{
+	return 0;
+}
+
+static int dp_display_probe(struct platform_device *pdev)
+{
+	int rc = 0;
+	struct dp_display_private *dp;
+
+	if (!pdev || !pdev->dev.of_node) {
+		pr_err("pdev not found\n");
+		return -ENODEV;
+	}
+
+	dp = devm_kzalloc(&pdev->dev, sizeof(*dp), GFP_KERNEL);
+	if (!dp)
+		return -ENOMEM;
+
+	mutex_init(&dp->lock);
+	dp->pdev = pdev;
+	dp->name = "drm_dp";
+
+	rc = dp_init_sub_modules(dp);
+	if (rc) {
+		devm_kfree(&pdev->dev, dp);
+		return -EPROBE_DEFER;
+	}
+
+	platform_set_drvdata(pdev, dp);
+
+	g_dp_display = &dp->dp_display;
+
+	g_dp_display->enable        = dp_display_enable;
+	g_dp_display->post_enable   = dp_display_post_enable;
+	g_dp_display->pre_disable   = dp_display_pre_disable;
+	g_dp_display->disable       = dp_display_disable;
+	g_dp_display->set_mode      = dp_display_set_mode;
+	g_dp_display->validate_mode = dp_display_validate_mode;
+	g_dp_display->get_modes     = dp_display_get_modes;
+	g_dp_display->detect        = dp_display_detect;
+	g_dp_display->prepare       = dp_display_prepare;
+	g_dp_display->unprepare     = dp_display_unprepare;
+	g_dp_display->request_irq   = dp_request_irq;
+
+	rc = component_add(&pdev->dev, &dp_display_comp_ops);
+	if (rc)
+		pr_err("component add failed, rc=%d\n", rc);
+
+	return rc;
+}
+
+int dp_display_get_displays(void **displays, int count)
+{
+	if (!displays) {
+		pr_err("invalid data\n");
+		return -EINVAL;
+	}
+
+	if (count != 1) {
+		pr_err("invalid number of displays\n");
+		return -EINVAL;
+	}
+
+	displays[0] = g_dp_display;
+	return count;
+}
+
+int dp_display_get_num_of_displays(void)
+{
+	return 1;
+}
+
+static void dp_display_deinit_sub_modules(struct dp_display_private *dp)
+{
+	dp_ctrl_put(dp->ctrl);
+	dp_link_put(dp->link);
+	dp_panel_put(dp->panel);
+	dp_aux_put(dp->aux);
+	dp_power_put(dp->power);
+	dp_catalog_put(dp->catalog);
+	dp_parser_put(dp->parser);
+	dp_usbpd_put(dp->usbpd);
+}
+
+static int dp_display_remove(struct platform_device *pdev)
+{
+	struct dp_display_private *dp;
+
+	if (!pdev)
+		return -EINVAL;
+
+	dp = platform_get_drvdata(pdev);
+
+	dp_display_deinit_sub_modules(dp);
+
+	platform_set_drvdata(pdev, NULL);
+	devm_kfree(&pdev->dev, dp);
+
+	return 0;
+}
+
+static struct platform_driver dp_display_driver = {
+	.probe  = dp_display_probe,
+	.remove = dp_display_remove,
+	.driver = {
+		.name = "msm-dp-display",
+		.of_match_table = dp_dt_match,
+	},
+};
+
+static int __init dp_display_init(void)
+{
+	int ret;
+
+	ret = platform_driver_register(&dp_display_driver);
+	if (ret) {
+		pr_err("driver register failed");
+		return ret;
+	}
+
+	return ret;
+}
+module_init(dp_display_init);
+
+static void __exit dp_display_cleanup(void)
+{
+	platform_driver_unregister(&dp_display_driver);
+}
+module_exit(dp_display_cleanup);
+
diff --git a/drivers/gpu/drm/msm/dp/dp_display.h b/drivers/gpu/drm/msm/dp/dp_display.h
new file mode 100644
index 0000000..e684854
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_display.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DP_DISPLAY_H_
+#define _DP_DISPLAY_H_
+
+#include <drm/drmP.h>
+
+#include "dp_panel.h"
+
+struct dp_display_mode {
+	struct dp_panel_info timing;
+	u32 capabilities;
+};
+
+struct dp_display {
+	struct drm_device *drm_dev;
+	struct dp_bridge *bridge;
+
+	int (*enable)(struct dp_display *dp_display);
+	int (*post_enable)(struct dp_display *dp_display);
+
+	int (*pre_disable)(struct dp_display *dp_display);
+	int (*disable)(struct dp_display *dp_display);
+
+	int (*set_mode)(struct dp_display *dp_display,
+			struct dp_display_mode *mode);
+	int (*validate_mode)(struct dp_display *dp_display,
+			struct dp_display_mode *mode);
+	int (*get_modes)(struct dp_display *dp_display,
+		struct dp_display_mode *modes, u32 *count);
+
+	int (*detect)(struct dp_display *dp_display);
+
+	int (*prepare)(struct dp_display *dp_display);
+	int (*unprepare)(struct dp_display *dp_display);
+	int (*request_irq)(struct dp_display *dp_display);
+};
+
+int dp_display_get_num_of_displays(void);
+int dp_display_get_displays(void **displays, int count);
+#endif /* _DP_DISPLAY_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_drm.c b/drivers/gpu/drm/msm/dp/dp_drm.c
new file mode 100644
index 0000000..75bce8a
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_drm.c
@@ -0,0 +1,473 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt)	"[drm-dp]: %s: " fmt, __func__
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_crtc.h>
+
+#include "msm_drv.h"
+#include "msm_kms.h"
+#include "sde_connector.h"
+#include "dp_drm.h"
+
+#define to_dp_bridge(x)     container_of((x), struct dp_bridge, base)
+
+static void convert_to_dp_mode(const struct drm_display_mode *drm_mode,
+				struct dp_display_mode *dp_mode)
+{
+	memset(dp_mode, 0, sizeof(*dp_mode));
+
+	dp_mode->timing.h_active = drm_mode->hdisplay;
+	dp_mode->timing.h_back_porch = drm_mode->htotal - drm_mode->hsync_end;
+	dp_mode->timing.h_sync_width = drm_mode->htotal -
+			(drm_mode->hsync_start + dp_mode->timing.h_back_porch);
+	dp_mode->timing.h_front_porch = drm_mode->hsync_start -
+					 drm_mode->hdisplay;
+	dp_mode->timing.h_skew = drm_mode->hskew;
+
+	dp_mode->timing.v_active = drm_mode->vdisplay;
+	dp_mode->timing.v_back_porch = drm_mode->vtotal - drm_mode->vsync_end;
+	dp_mode->timing.v_sync_width = drm_mode->vtotal -
+		(drm_mode->vsync_start + dp_mode->timing.v_back_porch);
+
+	dp_mode->timing.v_front_porch = drm_mode->vsync_start -
+					 drm_mode->vdisplay;
+
+	dp_mode->timing.refresh_rate = drm_mode->vrefresh;
+
+	dp_mode->timing.pixel_clk_khz = drm_mode->clock;
+
+	dp_mode->timing.v_active_low =
+		!!(drm_mode->flags & DRM_MODE_FLAG_NVSYNC);
+
+	dp_mode->timing.h_active_low =
+		!!(drm_mode->flags & DRM_MODE_FLAG_NHSYNC);
+}
+
+static void convert_to_drm_mode(const struct dp_display_mode *dp_mode,
+				struct drm_display_mode *drm_mode)
+{
+	u32 flags = 0;
+
+	memset(drm_mode, 0, sizeof(*drm_mode));
+
+	drm_mode->hdisplay = dp_mode->timing.h_active;
+	drm_mode->hsync_start = drm_mode->hdisplay +
+				dp_mode->timing.h_front_porch;
+	drm_mode->hsync_end = drm_mode->hsync_start +
+			      dp_mode->timing.h_sync_width;
+	drm_mode->htotal = drm_mode->hsync_end + dp_mode->timing.h_back_porch;
+	drm_mode->hskew = dp_mode->timing.h_skew;
+
+	drm_mode->vdisplay = dp_mode->timing.v_active;
+	drm_mode->vsync_start = drm_mode->vdisplay +
+				dp_mode->timing.v_front_porch;
+	drm_mode->vsync_end = drm_mode->vsync_start +
+			      dp_mode->timing.v_sync_width;
+	drm_mode->vtotal = drm_mode->vsync_end + dp_mode->timing.v_back_porch;
+
+	drm_mode->vrefresh = dp_mode->timing.refresh_rate;
+	drm_mode->clock = dp_mode->timing.pixel_clk_khz;
+
+	if (dp_mode->timing.h_active_low)
+		flags |= DRM_MODE_FLAG_NHSYNC;
+	else
+		flags |= DRM_MODE_FLAG_PHSYNC;
+
+	if (dp_mode->timing.v_active_low)
+		flags |= DRM_MODE_FLAG_NVSYNC;
+	else
+		flags |= DRM_MODE_FLAG_PVSYNC;
+
+	drm_mode->flags = flags;
+
+	drm_mode->type = 0x48;
+	drm_mode_set_name(drm_mode);
+}
+
+static int dp_bridge_attach(struct drm_bridge *dp_bridge)
+{
+	struct dp_bridge *bridge = to_dp_bridge(dp_bridge);
+
+	if (!dp_bridge) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	pr_debug("[%d] attached\n", bridge->id);
+
+	return 0;
+}
+
+static void dp_bridge_pre_enable(struct drm_bridge *drm_bridge)
+{
+	int rc = 0;
+	struct dp_bridge *bridge;
+	struct dp_display *dp;
+
+	if (!drm_bridge) {
+		pr_err("Invalid params\n");
+		return;
+	}
+
+	bridge = to_dp_bridge(drm_bridge);
+	dp = bridge->display;
+
+	/* By this point mode should have been validated through mode_fixup */
+	rc = dp->set_mode(dp, &bridge->dp_mode);
+	if (rc) {
+		pr_err("[%d] failed to perform a mode set, rc=%d\n",
+		       bridge->id, rc);
+		return;
+	}
+
+	rc = dp->prepare(dp);
+	if (rc) {
+		pr_err("[%d] DP display prepare failed, rc=%d\n",
+		       bridge->id, rc);
+		return;
+	}
+
+	rc = dp->enable(dp);
+	if (rc) {
+		pr_err("[%d] DP display enable failed, rc=%d\n",
+		       bridge->id, rc);
+		dp->unprepare(dp);
+	}
+}
+
+static void dp_bridge_enable(struct drm_bridge *drm_bridge)
+{
+	int rc = 0;
+	struct dp_bridge *bridge;
+	struct dp_display *dp;
+
+	if (!drm_bridge) {
+		pr_err("Invalid params\n");
+		return;
+	}
+
+	bridge = to_dp_bridge(drm_bridge);
+	dp = bridge->display;
+
+	rc = dp->post_enable(dp);
+	if (rc)
+		pr_err("[%d] DP display post enable failed, rc=%d\n",
+		       bridge->id, rc);
+}
+
+static void dp_bridge_disable(struct drm_bridge *drm_bridge)
+{
+	int rc = 0;
+	struct dp_bridge *bridge;
+	struct dp_display *dp;
+
+	if (!drm_bridge) {
+		pr_err("Invalid params\n");
+		return;
+	}
+
+	bridge = to_dp_bridge(drm_bridge);
+	dp = bridge->display;
+
+	rc = dp->pre_disable(dp);
+	if (rc) {
+		pr_err("[%d] DP display pre disable failed, rc=%d\n",
+		       bridge->id, rc);
+	}
+}
+
+static void dp_bridge_post_disable(struct drm_bridge *drm_bridge)
+{
+	int rc = 0;
+	struct dp_bridge *bridge;
+	struct dp_display *dp;
+
+	if (!drm_bridge) {
+		pr_err("Invalid params\n");
+		return;
+	}
+
+	bridge = to_dp_bridge(drm_bridge);
+	dp = bridge->display;
+
+	rc = dp->disable(dp);
+	if (rc) {
+		pr_err("[%d] DP display disable failed, rc=%d\n",
+		       bridge->id, rc);
+		return;
+	}
+
+	rc = dp->unprepare(dp);
+	if (rc) {
+		pr_err("[%d] DP display unprepare failed, rc=%d\n",
+		       bridge->id, rc);
+		return;
+	}
+}
+
+static void dp_bridge_mode_set(struct drm_bridge *drm_bridge,
+				struct drm_display_mode *mode,
+				struct drm_display_mode *adjusted_mode)
+{
+	struct dp_bridge *bridge;
+	struct dp_display *dp;
+
+	if (!drm_bridge || !mode || !adjusted_mode) {
+		pr_err("Invalid params\n");
+		return;
+	}
+
+	bridge = to_dp_bridge(drm_bridge);
+	dp = bridge->display;
+
+	memset(&bridge->dp_mode, 0x0, sizeof(struct dp_display_mode));
+	convert_to_dp_mode(adjusted_mode, &bridge->dp_mode);
+}
+
+static bool dp_bridge_mode_fixup(struct drm_bridge *drm_bridge,
+				  const struct drm_display_mode *mode,
+				  struct drm_display_mode *adjusted_mode)
+{
+	int rc = 0;
+	bool ret = true;
+	struct dp_display_mode dp_mode;
+	struct dp_bridge *bridge;
+	struct dp_display *dp;
+
+	if (!drm_bridge || !mode || !adjusted_mode) {
+		pr_err("Invalid params\n");
+		ret = false;
+		goto end;
+	}
+
+	bridge = to_dp_bridge(drm_bridge);
+	dp = bridge->display;
+
+	convert_to_dp_mode(mode, &dp_mode);
+
+	rc = dp->validate_mode(dp, &dp_mode);
+	if (rc) {
+		pr_err("[%d] mode is not valid, rc=%d\n", bridge->id, rc);
+		ret = false;
+	} else {
+		convert_to_drm_mode(&dp_mode, adjusted_mode);
+	}
+end:
+	return ret;
+}
+
+static const struct drm_bridge_funcs dp_bridge_ops = {
+	.attach       = dp_bridge_attach,
+	.mode_fixup   = dp_bridge_mode_fixup,
+	.pre_enable   = dp_bridge_pre_enable,
+	.enable       = dp_bridge_enable,
+	.disable      = dp_bridge_disable,
+	.post_disable = dp_bridge_post_disable,
+	.mode_set     = dp_bridge_mode_set,
+};
+
+int dp_connector_post_init(struct drm_connector *connector,
+		void *info,
+		void *display)
+{
+	struct dp_display *dp_display = display;
+
+	if (!info || !dp_display)
+		return -EINVAL;
+
+	return 0;
+}
+
+int dp_connector_get_info(struct msm_display_info *info, void *data)
+{
+	struct dsi_display *display = data;
+
+	if (!info || !display) {
+		pr_err("invalid params\n");
+		return -EINVAL;
+	}
+
+	info->intf_type = DRM_MODE_CONNECTOR_DisplayPort;
+
+	info->num_of_h_tiles = 1;
+	info->h_tile_instance[0] = 0;
+
+	info->is_connected = true;
+	info->frame_rate = 60;
+	info->width_mm = 160;
+	info->height_mm = 90;
+	info->max_width = 1920;
+	info->max_height = 1080;
+	info->vtotal = 1125;
+	info->is_primary = true;
+	info->comp_info.comp_type = MSM_DISPLAY_COMPRESSION_NONE;
+	info->capabilities |= MSM_DISPLAY_CAP_VID_MODE;
+
+	return 0;
+}
+
+enum drm_connector_status dp_connector_detect(struct drm_connector *conn,
+		bool force,
+		void *display)
+{
+	enum drm_connector_status status = connector_status_unknown;
+	struct msm_display_info info;
+	int rc;
+
+	if (!conn || !display)
+		return status;
+
+	/* get display dp_info */
+	memset(&info, 0x0, sizeof(info));
+	rc = dp_connector_get_info(&info, display);
+	if (rc) {
+		pr_err("failed to get display info, rc=%d\n", rc);
+		return connector_status_disconnected;
+	}
+
+	if (info.capabilities & MSM_DISPLAY_CAP_HOT_PLUG)
+		status = (info.is_connected ? connector_status_connected :
+					      connector_status_disconnected);
+	else
+		status = connector_status_connected;
+
+	conn->display_info.width_mm = info.width_mm;
+	conn->display_info.height_mm = info.height_mm;
+
+	return status;
+}
+
+int dp_connector_get_modes(struct drm_connector *connector,
+		void *display)
+{
+	u32 count = 0;
+	u32 size = 0;
+	struct dp_display_mode *modes;
+	struct drm_display_mode drm_mode;
+	struct dp_display *dp;
+	int rc, i;
+
+	if (!connector || !display || sde_connector_get_panel(connector))
+		goto end;
+
+	dp = display;
+
+	rc = dp->get_modes(dp, NULL, &count);
+	if (rc) {
+		pr_err("failed to get num of modes, rc=%d\n", rc);
+		goto end;
+	}
+
+	size = count * sizeof(*modes);
+	modes = kzalloc(size,  GFP_KERNEL);
+	if (!modes) {
+		count = 0;
+		goto end;
+	}
+
+	rc = dp->get_modes(dp, modes, &count);
+	if (rc) {
+		pr_err("failed to get modes, rc=%d\n", rc);
+		count = 0;
+		goto error;
+	}
+
+	for (i = 0; i < count; i++) {
+		struct drm_display_mode *m;
+
+		memset(&drm_mode, 0x0, sizeof(drm_mode));
+		convert_to_drm_mode(&modes[i], &drm_mode);
+		m = drm_mode_duplicate(connector->dev, &drm_mode);
+		if (!m) {
+			pr_err("failed to add mode %ux%u\n",
+			       drm_mode.hdisplay,
+			       drm_mode.vdisplay);
+			count = -ENOMEM;
+			goto error;
+		}
+		m->width_mm = connector->display_info.width_mm;
+		m->height_mm = connector->display_info.height_mm;
+		drm_mode_probed_add(connector, m);
+	}
+error:
+	kfree(modes);
+end:
+	pr_debug("MODE COUNT =%d\n\n", count);
+	return count;
+}
+
+int dp_drm_bridge_init(void *data, struct drm_encoder *encoder)
+{
+	int rc = 0;
+	struct dp_bridge *bridge;
+	struct drm_device *dev;
+	struct dp_display *display = data;
+	struct msm_drm_private *priv = NULL;
+
+	bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
+	if (!bridge) {
+		rc = -ENOMEM;
+		goto error;
+	}
+
+	dev = display->drm_dev;
+	bridge->display = display;
+	bridge->base.funcs = &dp_bridge_ops;
+	bridge->base.encoder = encoder;
+
+	priv = dev->dev_private;
+
+	rc = drm_bridge_attach(dev, &bridge->base);
+	if (rc) {
+		pr_err("failed to attach bridge, rc=%d\n", rc);
+		goto error_free_bridge;
+	}
+
+	rc = display->request_irq(display);
+	if (rc) {
+		pr_err("request_irq failed, rc=%d\n", rc);
+		goto error_free_bridge;
+	}
+
+	encoder->bridge = &bridge->base;
+	priv->bridges[priv->num_bridges++] = &bridge->base;
+	display->bridge = bridge;
+
+	return 0;
+error_free_bridge:
+	kfree(bridge);
+error:
+	return rc;
+}
+
+void dp_drm_bridge_deinit(void *data)
+{
+	struct dp_display *display = data;
+	struct dp_bridge *bridge = display->bridge;
+
+	if (bridge && bridge->base.encoder)
+		bridge->base.encoder->bridge = NULL;
+
+	kfree(bridge);
+}
+
+enum drm_mode_status dp_connector_mode_valid(struct drm_connector *connector,
+		struct drm_display_mode *mode,
+		void *display)
+{
+	return MODE_OK;
+}
diff --git a/drivers/gpu/drm/msm/dp/dp_drm.h b/drivers/gpu/drm/msm/dp/dp_drm.h
new file mode 100644
index 0000000..4ff1e07
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_drm.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DP_DRM_H_
+#define _DP_DRM_H_
+
+#include <linux/types.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "msm_drv.h"
+#include "dp_display.h"
+
+struct dp_bridge {
+	struct drm_bridge base;
+	u32 id;
+
+	struct dp_display *display;
+	struct dp_display_mode dp_mode;
+};
+
+/**
+ * dp_connector_post_init - callback to perform additional initialization steps
+ * @connector: Pointer to drm connector structure
+ * @info: Pointer to sde connector info structure
+ * @display: Pointer to private display handle
+ * Returns: Zero on success
+ */
+int dp_connector_post_init(struct drm_connector *connector,
+		void *info,
+		void *display);
+
+/**
+ * dp_connector_detect - callback to determine if connector is connected
+ * @connector: Pointer to drm connector structure
+ * @force: Force detect setting from drm framework
+ * @display: Pointer to private display handle
+ * Returns: Connector 'is connected' status
+ */
+enum drm_connector_status dp_connector_detect(struct drm_connector *conn,
+		bool force,
+		void *display);
+
+/**
+ * dp_connector_get_modes - callback to add drm modes via drm_mode_probed_add()
+ * @connector: Pointer to drm connector structure
+ * @display: Pointer to private display handle
+ * Returns: Number of modes added
+ */
+int dp_connector_get_modes(struct drm_connector *connector,
+		void *display);
+
+/**
+ * dp_connector_mode_valid - callback to determine if specified mode is valid
+ * @connector: Pointer to drm connector structure
+ * @mode: Pointer to drm mode structure
+ * @display: Pointer to private display handle
+ * Returns: Validity status for specified mode
+ */
+enum drm_mode_status dp_connector_mode_valid(struct drm_connector *connector,
+		struct drm_display_mode *mode,
+		void *display);
+
+int dp_connector_get_info(struct msm_display_info *info, void *display);
+
+int dp_drm_bridge_init(void *display,
+	struct drm_encoder *encoder);
+
+void dp_drm_bridge_deinit(void *display);
+#endif /* _DP_DRM_H_ */
+
diff --git a/drivers/gpu/drm/msm/dp/dp_link.c b/drivers/gpu/drm/msm/dp/dp_link.c
new file mode 100644
index 0000000..e9955a9
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_link.c
@@ -0,0 +1,1809 @@
+/*
+ * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt)	"[drm-dp] %s: " fmt, __func__
+
+#include "dp_link.h"
+#include "dp_panel.h"
+
+#define DP_LINK_ENUM_STR(x)		#x
+
+enum dp_lane_count {
+	DP_LANE_COUNT_1	= 1,
+	DP_LANE_COUNT_2	= 2,
+	DP_LANE_COUNT_4	= 4,
+};
+
+enum phy_test_pattern {
+	PHY_TEST_PATTERN_NONE,
+	PHY_TEST_PATTERN_D10_2_NO_SCRAMBLING,
+	PHY_TEST_PATTERN_SYMBOL_ERR_MEASUREMENT_CNT,
+	PHY_TEST_PATTERN_PRBS7,
+	PHY_TEST_PATTERN_80_BIT_CUSTOM_PATTERN,
+	PHY_TEST_PATTERN_HBR2_CTS_EYE_PATTERN,
+};
+
+enum dynamic_range {
+	DP_DYNAMIC_RANGE_RGB_VESA = 0x00,
+	DP_DYNAMIC_RANGE_RGB_CEA = 0x01,
+	DP_DYNAMIC_RANGE_UNKNOWN = 0xFFFFFFFF,
+};
+
+enum test_video_pattern {
+	DP_TEST_VIDEO_PATTERN_NONE = 0x00,
+	DP_TEST_VIDEO_PATTERN_COLOR_RAMPS = 0x01,
+	DP_TEST_VIDEO_PATTERN_BW_VERT_LINES = 0x02,
+	DP_TEST_VIDEO_PATTERN_COLOR_SQUARE = 0x03,
+};
+
+enum test_bit_depth {
+	DP_TEST_BIT_DEPTH_6 = 0x00,
+	DP_TEST_BIT_DEPTH_8 = 0x01,
+	DP_TEST_BIT_DEPTH_10 = 0x02,
+	DP_TEST_BIT_DEPTH_UNKNOWN = 0xFFFFFFFF,
+};
+
+enum dp_link_response {
+	TEST_ACK			= 0x1,
+	TEST_NACK			= 0x2,
+	TEST_EDID_CHECKSUM_WRITE	= 0x4,
+};
+
+enum audio_sample_rate {
+	AUDIO_SAMPLE_RATE_32_KHZ	= 0x00,
+	AUDIO_SAMPLE_RATE_44_1_KHZ	= 0x01,
+	AUDIO_SAMPLE_RATE_48_KHZ	= 0x02,
+	AUDIO_SAMPLE_RATE_88_2_KHZ	= 0x03,
+	AUDIO_SAMPLE_RATE_96_KHZ	= 0x04,
+	AUDIO_SAMPLE_RATE_176_4_KHZ	= 0x05,
+	AUDIO_SAMPLE_RATE_192_KHZ	= 0x06,
+};
+
+enum audio_pattern_type {
+	AUDIO_TEST_PATTERN_OPERATOR_DEFINED	= 0x00,
+	AUDIO_TEST_PATTERN_SAWTOOTH		= 0x01,
+};
+
+struct dp_link_request {
+	u32 test_requested;
+	u32 test_link_rate;
+	u32 test_lane_count;
+	u32 phy_test_pattern_sel;
+	u32 test_video_pattern;
+	u32 test_bit_depth;
+	u32 test_dyn_range;
+	u32 test_h_total;
+	u32 test_v_total;
+	u32 test_h_start;
+	u32 test_v_start;
+	u32 test_hsync_pol;
+	u32 test_hsync_width;
+	u32 test_vsync_pol;
+	u32 test_vsync_width;
+	u32 test_h_width;
+	u32 test_v_height;
+	u32 test_rr_d;
+	u32 test_rr_n;
+	u32 test_audio_sampling_rate;
+	u32 test_audio_channel_count;
+	u32 test_audio_pattern_type;
+	u32 test_audio_period_ch_1;
+	u32 test_audio_period_ch_2;
+	u32 test_audio_period_ch_3;
+	u32 test_audio_period_ch_4;
+	u32 test_audio_period_ch_5;
+	u32 test_audio_period_ch_6;
+	u32 test_audio_period_ch_7;
+	u32 test_audio_period_ch_8;
+	u32 response;
+};
+
+struct dp_link_sink_count {
+	u32 count;
+	bool cp_ready;
+};
+
+struct dp_link_status {
+	u8 lane_01_status;
+	u8 lane_23_status;
+	u8 interlane_align_done;
+	u8 downstream_port_status_changed;
+	u8 link_status_updated;
+	u8 port_0_in_sync;
+	u8 port_1_in_sync;
+	u8 req_voltage_swing[4];
+	u8 req_pre_emphasis[4];
+};
+
+struct dp_link_private {
+	struct device *dev;
+	struct dp_aux *aux;
+	struct dp_link dp_link;
+
+	struct dp_link_request request;
+	struct dp_link_sink_count sink_count;
+	struct dp_link_status link_status;
+};
+
+/**
+ * mdss_dp_test_bit_depth_to_bpp() - convert test bit depth to bpp
+ * @tbd: test bit depth
+ *
+ * Returns the bits per pixel (bpp) to be used corresponding to the
+ * git bit depth value. This function assumes that bit depth has
+ * already been validated.
+ */
+static inline u32 dp_link_bit_depth_to_bpp(enum test_bit_depth tbd)
+{
+	u32 bpp;
+
+	/*
+	 * Few simplistic rules and assumptions made here:
+	 *    1. Bit depth is per color component
+	 *    2. If bit depth is unknown return 0
+	 *    3. Assume 3 color components
+	 */
+	switch (tbd) {
+	case DP_TEST_BIT_DEPTH_6:
+		bpp = 18;
+		break;
+	case DP_TEST_BIT_DEPTH_8:
+		bpp = 24;
+		break;
+	case DP_TEST_BIT_DEPTH_10:
+		bpp = 30;
+		break;
+	case DP_TEST_BIT_DEPTH_UNKNOWN:
+	default:
+		bpp = 0;
+	}
+
+	return bpp;
+}
+
+static char *dp_link_get_phy_test_pattern(u32 phy_test_pattern_sel)
+{
+	switch (phy_test_pattern_sel) {
+	case PHY_TEST_PATTERN_NONE:
+		return DP_LINK_ENUM_STR(PHY_TEST_PATTERN_NONE);
+	case PHY_TEST_PATTERN_D10_2_NO_SCRAMBLING:
+		return DP_LINK_ENUM_STR(PHY_TEST_PATTERN_D10_2_NO_SCRAMBLING);
+	case PHY_TEST_PATTERN_SYMBOL_ERR_MEASUREMENT_CNT:
+		return DP_LINK_ENUM_STR(
+			PHY_TEST_PATTERN_SYMBOL_ERR_MEASUREMENT_CNT);
+	case PHY_TEST_PATTERN_PRBS7:
+		return DP_LINK_ENUM_STR(PHY_TEST_PATTERN_PRBS7);
+	case PHY_TEST_PATTERN_80_BIT_CUSTOM_PATTERN:
+		return DP_LINK_ENUM_STR(PHY_TEST_PATTERN_80_BIT_CUSTOM_PATTERN);
+	case PHY_TEST_PATTERN_HBR2_CTS_EYE_PATTERN:
+		return DP_LINK_ENUM_STR(PHY_TEST_PATTERN_HBR2_CTS_EYE_PATTERN);
+	default:
+		return "unknown";
+	}
+}
+
+static char *dp_link_get_audio_test_pattern(u32 pattern)
+{
+	switch (pattern) {
+	case AUDIO_TEST_PATTERN_OPERATOR_DEFINED:
+		return DP_LINK_ENUM_STR(AUDIO_TEST_PATTERN_OPERATOR_DEFINED);
+	case AUDIO_TEST_PATTERN_SAWTOOTH:
+		return DP_LINK_ENUM_STR(AUDIO_TEST_PATTERN_SAWTOOTH);
+	default:
+		return "unknown";
+	}
+}
+
+static char *dp_link_get_audio_sample_rate(u32 rate)
+{
+	switch (rate) {
+	case AUDIO_SAMPLE_RATE_32_KHZ:
+		return DP_LINK_ENUM_STR(AUDIO_SAMPLE_RATE_32_KHZ);
+	case AUDIO_SAMPLE_RATE_44_1_KHZ:
+		return DP_LINK_ENUM_STR(AUDIO_SAMPLE_RATE_44_1_KHZ);
+	case AUDIO_SAMPLE_RATE_48_KHZ:
+		return DP_LINK_ENUM_STR(AUDIO_SAMPLE_RATE_48_KHZ);
+	case AUDIO_SAMPLE_RATE_88_2_KHZ:
+		return DP_LINK_ENUM_STR(AUDIO_SAMPLE_RATE_88_2_KHZ);
+	case AUDIO_SAMPLE_RATE_96_KHZ:
+		return DP_LINK_ENUM_STR(AUDIO_SAMPLE_RATE_96_KHZ);
+	case AUDIO_SAMPLE_RATE_176_4_KHZ:
+		return DP_LINK_ENUM_STR(AUDIO_SAMPLE_RATE_176_4_KHZ);
+	case AUDIO_SAMPLE_RATE_192_KHZ:
+		return DP_LINK_ENUM_STR(AUDIO_SAMPLE_RATE_192_KHZ);
+	default:
+		return "unknown";
+	}
+}
+
+static int dp_link_get_period(struct dp_link_private *link, int const addr)
+{
+	int ret = 0;
+	u8 *bp;
+	u8 data;
+	int rlen;
+	u32 const param_len = 0x1;
+	u32 const max_audio_period = 0xA;
+
+	/* TEST_AUDIO_PERIOD_CH_XX */
+	rlen = link->aux->read(link->aux, addr, param_len, AUX_NATIVE, &bp);
+	if (rlen < param_len) {
+		pr_err("failed to read test_audio_period (0x%x)\n", addr);
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	data = *bp;
+
+	/* Period - Bits 3:0 */
+	data = data & 0xF;
+	if ((int)data > max_audio_period) {
+		pr_err("invalid test_audio_period_ch_1 = 0x%x\n", data);
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	ret = data;
+exit:
+	return ret;
+}
+
+static int dp_link_parse_audio_channel_period(struct dp_link_private *link)
+{
+	int ret = 0;
+	int const test_audio_period_ch_1_addr = 0x273;
+	int const test_audio_period_ch_2_addr = 0x274;
+	int const test_audio_period_ch_3_addr = 0x275;
+	int const test_audio_period_ch_4_addr = 0x276;
+	int const test_audio_period_ch_5_addr = 0x277;
+	int const test_audio_period_ch_6_addr = 0x278;
+	int const test_audio_period_ch_7_addr = 0x279;
+	int const test_audio_period_ch_8_addr = 0x27A;
+	struct dp_link_request *req = &link->request;
+
+	/* TEST_AUDIO_PERIOD_CH_1 (Byte 0x273) */
+	ret = dp_link_get_period(link, test_audio_period_ch_1_addr);
+	if (ret == -EINVAL)
+		goto exit;
+
+	req->test_audio_period_ch_1 = ret;
+	pr_debug("test_audio_period_ch_1 = 0x%x\n", ret);
+
+	/* TEST_AUDIO_PERIOD_CH_2 (Byte 0x274) */
+	ret = dp_link_get_period(link, test_audio_period_ch_2_addr);
+	if (ret == -EINVAL)
+		goto exit;
+
+	req->test_audio_period_ch_2 = ret;
+	pr_debug("test_audio_period_ch_2 = 0x%x\n", ret);
+
+	/* TEST_AUDIO_PERIOD_CH_3 (Byte 0x275) */
+	ret = dp_link_get_period(link, test_audio_period_ch_3_addr);
+	if (ret == -EINVAL)
+		goto exit;
+
+	req->test_audio_period_ch_3 = ret;
+	pr_debug("test_audio_period_ch_3 = 0x%x\n", ret);
+
+	/* TEST_AUDIO_PERIOD_CH_4 (Byte 0x276) */
+	ret = dp_link_get_period(link, test_audio_period_ch_4_addr);
+	if (ret == -EINVAL)
+		goto exit;
+
+	req->test_audio_period_ch_4 = ret;
+	pr_debug("test_audio_period_ch_4 = 0x%x\n", ret);
+
+	/* TEST_AUDIO_PERIOD_CH_5 (Byte 0x277) */
+	ret = dp_link_get_period(link, test_audio_period_ch_5_addr);
+	if (ret == -EINVAL)
+		goto exit;
+
+	req->test_audio_period_ch_5 = ret;
+	pr_debug("test_audio_period_ch_5 = 0x%x\n", ret);
+
+	/* TEST_AUDIO_PERIOD_CH_6 (Byte 0x278) */
+	ret = dp_link_get_period(link, test_audio_period_ch_6_addr);
+	if (ret == -EINVAL)
+		goto exit;
+
+	req->test_audio_period_ch_6 = ret;
+	pr_debug("test_audio_period_ch_6 = 0x%x\n", ret);
+
+	/* TEST_AUDIO_PERIOD_CH_7 (Byte 0x279) */
+	ret = dp_link_get_period(link, test_audio_period_ch_7_addr);
+	if (ret == -EINVAL)
+		goto exit;
+
+	req->test_audio_period_ch_7 = ret;
+	pr_debug("test_audio_period_ch_7 = 0x%x\n", ret);
+
+	/* TEST_AUDIO_PERIOD_CH_8 (Byte 0x27A) */
+	ret = dp_link_get_period(link, test_audio_period_ch_8_addr);
+	if (ret == -EINVAL)
+		goto exit;
+
+	req->test_audio_period_ch_8 = ret;
+	pr_debug("test_audio_period_ch_8 = 0x%x\n", ret);
+exit:
+	return ret;
+}
+
+static int dp_link_parse_audio_pattern_type(struct dp_link_private *link)
+{
+	int ret = 0;
+	u8 *bp;
+	u8 data;
+	int rlen;
+	int const param_len = 0x1;
+	int const test_audio_pattern_type_addr = 0x272;
+	int const max_audio_pattern_type = 0x1;
+
+	/* Read the requested audio pattern type (Byte 0x272). */
+	rlen = link->aux->read(link->aux, test_audio_pattern_type_addr,
+			param_len, AUX_NATIVE, &bp);
+	if (rlen < param_len) {
+		pr_err("failed to read link audio mode data\n");
+		ret = -EINVAL;
+		goto exit;
+	}
+	data = *bp;
+
+	/* Audio Pattern Type - Bits 7:0 */
+	if ((int)data > max_audio_pattern_type) {
+		pr_err("invalid audio pattern type = 0x%x\n", data);
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	link->request.test_audio_pattern_type = data;
+	pr_debug("audio pattern type = %s\n",
+			dp_link_get_audio_test_pattern(data));
+exit:
+	return ret;
+}
+
+static int dp_link_parse_audio_mode(struct dp_link_private *link)
+{
+	int ret = 0;
+	u8 *bp;
+	u8 data;
+	int rlen;
+	int const param_len = 0x1;
+	int const test_audio_mode_addr = 0x271;
+	int const max_audio_sampling_rate = 0x6;
+	int const max_audio_channel_count = 0x8;
+	int sampling_rate = 0x0;
+	int channel_count = 0x0;
+
+	/* Read the requested audio mode (Byte 0x271). */
+	rlen = link->aux->read(link->aux, test_audio_mode_addr,
+			param_len, AUX_NATIVE, &bp);
+	if (rlen < param_len) {
+		pr_err("failed to read link audio mode data\n");
+		ret = -EINVAL;
+		goto exit;
+	}
+	data = *bp;
+
+	/* Sampling Rate - Bits 3:0 */
+	sampling_rate = data & 0xF;
+	if (sampling_rate > max_audio_sampling_rate) {
+		pr_err("sampling rate (0x%x) greater than max (0x%x)\n",
+				sampling_rate, max_audio_sampling_rate);
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	/* Channel Count - Bits 7:4 */
+	channel_count = ((data & 0xF0) >> 4) + 1;
+	if (channel_count > max_audio_channel_count) {
+		pr_err("channel_count (0x%x) greater than max (0x%x)\n",
+				channel_count, max_audio_channel_count);
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	link->request.test_audio_sampling_rate = sampling_rate;
+	link->request.test_audio_channel_count = channel_count;
+	pr_debug("sampling_rate = %s, channel_count = 0x%x\n",
+		dp_link_get_audio_sample_rate(sampling_rate), channel_count);
+exit:
+	return ret;
+}
+
+/**
+ * dp_parse_audio_pattern_params() - parses audio pattern parameters from DPCD
+ * @link: Display Port Driver data
+ *
+ * Returns 0 if it successfully parses the audio link pattern parameters.
+ */
+static int dp_link_parse_audio_pattern_params(struct dp_link_private *link)
+{
+	int ret = 0;
+
+	ret = dp_link_parse_audio_mode(link);
+	if (ret)
+		goto exit;
+
+	ret = dp_link_parse_audio_pattern_type(link);
+	if (ret)
+		goto exit;
+
+	ret = dp_link_parse_audio_channel_period(link);
+
+exit:
+	return ret;
+}
+
+/**
+ * dp_link_is_video_pattern_valid() - validates the video pattern
+ * @pattern: video pattern requested by the sink
+ *
+ * Returns true if the requested video pattern is supported.
+ */
+static bool dp_link_is_video_pattern_valid(u32 pattern)
+{
+	switch (pattern) {
+	case DP_TEST_VIDEO_PATTERN_NONE:
+	case DP_TEST_VIDEO_PATTERN_COLOR_RAMPS:
+	case DP_TEST_VIDEO_PATTERN_BW_VERT_LINES:
+	case DP_TEST_VIDEO_PATTERN_COLOR_SQUARE:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static char *dp_link_video_pattern_to_string(u32 test_video_pattern)
+{
+	switch (test_video_pattern) {
+	case DP_TEST_VIDEO_PATTERN_NONE:
+		return DP_LINK_ENUM_STR(DP_TEST_VIDEO_PATTERN_NONE);
+	case DP_TEST_VIDEO_PATTERN_COLOR_RAMPS:
+		return DP_LINK_ENUM_STR(DP_TEST_VIDEO_PATTERN_COLOR_RAMPS);
+	case DP_TEST_VIDEO_PATTERN_BW_VERT_LINES:
+		return DP_LINK_ENUM_STR(DP_TEST_VIDEO_PATTERN_BW_VERT_LINES);
+	case DP_TEST_VIDEO_PATTERN_COLOR_SQUARE:
+		return DP_LINK_ENUM_STR(DP_TEST_VIDEO_PATTERN_COLOR_SQUARE);
+	default:
+		return "unknown";
+	}
+}
+
+/**
+ * dp_link_is_dynamic_range_valid() - validates the dynamic range
+ * @bit_depth: the dynamic range value to be checked
+ *
+ * Returns true if the dynamic range value is supported.
+ */
+static bool dp_link_is_dynamic_range_valid(u32 dr)
+{
+	switch (dr) {
+	case DP_DYNAMIC_RANGE_RGB_VESA:
+	case DP_DYNAMIC_RANGE_RGB_CEA:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static char *dp_link_dynamic_range_to_string(u32 dr)
+{
+	switch (dr) {
+	case DP_DYNAMIC_RANGE_RGB_VESA:
+		return DP_LINK_ENUM_STR(DP_DYNAMIC_RANGE_RGB_VESA);
+	case DP_DYNAMIC_RANGE_RGB_CEA:
+		return DP_LINK_ENUM_STR(DP_DYNAMIC_RANGE_RGB_CEA);
+	case DP_DYNAMIC_RANGE_UNKNOWN:
+	default:
+		return "unknown";
+	}
+}
+
+/**
+ * dp_link_is_bit_depth_valid() - validates the bit depth requested
+ * @bit_depth: bit depth requested by the sink
+ *
+ * Returns true if the requested bit depth is supported.
+ */
+static bool dp_link_is_bit_depth_valid(u32 tbd)
+{
+	/* DP_TEST_VIDEO_PATTERN_NONE is treated as invalid */
+	switch (tbd) {
+	case DP_TEST_BIT_DEPTH_6:
+	case DP_TEST_BIT_DEPTH_8:
+	case DP_TEST_BIT_DEPTH_10:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static char *dp_link_bit_depth_to_string(u32 tbd)
+{
+	switch (tbd) {
+	case DP_TEST_BIT_DEPTH_6:
+		return DP_LINK_ENUM_STR(DP_TEST_BIT_DEPTH_6);
+	case DP_TEST_BIT_DEPTH_8:
+		return DP_LINK_ENUM_STR(DP_TEST_BIT_DEPTH_8);
+	case DP_TEST_BIT_DEPTH_10:
+		return DP_LINK_ENUM_STR(DP_TEST_BIT_DEPTH_10);
+	case DP_TEST_BIT_DEPTH_UNKNOWN:
+	default:
+		return "unknown";
+	}
+}
+
+static int dp_link_parse_timing_params1(struct dp_link_private *link,
+	int const addr, int const len, u32 *val)
+{
+	u8 *bp;
+	int rlen;
+
+	if (len < 2)
+		return -EINVAL;
+
+	/* Read the requested video link pattern (Byte 0x221). */
+	rlen = link->aux->read(link->aux, addr, len, AUX_NATIVE, &bp);
+	if (rlen < len) {
+		pr_err("failed to read 0x%x\n", addr);
+		return -EINVAL;
+	}
+
+	*val = bp[1] | (bp[0] << 8);
+
+	return 0;
+}
+
+static int dp_link_parse_timing_params2(struct dp_link_private *link,
+	int const addr, int const len, u32 *val1, u32 *val2)
+{
+	u8 *bp;
+	int rlen;
+
+	if (len < 2)
+		return -EINVAL;
+
+	/* Read the requested video link pattern (Byte 0x221). */
+	rlen = link->aux->read(link->aux, addr, len, AUX_NATIVE, &bp);
+	if (rlen < len) {
+		pr_err("failed to read 0x%x\n", addr);
+		return -EINVAL;
+	}
+
+	*val1 = (bp[0] & BIT(7)) >> 7;
+	*val2 = bp[1] | ((bp[0] & 0x7F) << 8);
+
+	return 0;
+}
+
+static int dp_link_parse_timing_params3(struct dp_link_private *link,
+	int const addr, u32 *val)
+{
+	u8 *bp;
+	u32 len = 1;
+	int rlen;
+
+	/* Read the requested video link pattern (Byte 0x221). */
+	rlen = link->aux->read(link->aux, addr, len, AUX_NATIVE, &bp);
+	if (rlen < 1) {
+		pr_err("failed to read 0x%x\n", addr);
+		return -EINVAL;
+	}
+	*val = bp[0];
+
+	return 0;
+}
+
+/**
+ * dp_parse_video_pattern_params() - parses video pattern parameters from DPCD
+ * @link: Display Port Driver data
+ *
+ * Returns 0 if it successfully parses the video link pattern and the link
+ * bit depth requested by the sink and, and if the values parsed are valid.
+ */
+static int dp_link_parse_video_pattern_params(struct dp_link_private *link)
+{
+	int ret = 0;
+	int rlen;
+	u8 *bp;
+	u8 data;
+	u32 dyn_range;
+	int const param_len = 0x1;
+	int const test_video_pattern_addr = 0x221;
+	int const test_misc_addr = 0x232;
+
+	/* Read the requested video link pattern (Byte 0x221). */
+	rlen = link->aux->read(link->aux, test_video_pattern_addr,
+			param_len, AUX_NATIVE, &bp);
+	if (rlen < param_len) {
+		pr_err("failed to read link video pattern\n");
+		ret = -EINVAL;
+		goto exit;
+	}
+	data = *bp;
+
+	if (!dp_link_is_video_pattern_valid(data)) {
+		pr_err("invalid link video pattern = 0x%x\n", data);
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	link->request.test_video_pattern = data;
+	pr_debug("link video pattern = 0x%x (%s)\n",
+		link->request.test_video_pattern,
+		dp_link_video_pattern_to_string(
+			link->request.test_video_pattern));
+
+	/* Read the requested color bit depth and dynamic range (Byte 0x232) */
+	rlen = link->aux->read(link->aux, test_misc_addr,
+			param_len, AUX_NATIVE, &bp);
+	if (rlen < param_len) {
+		pr_err("failed to read link bit depth\n");
+		ret = -EINVAL;
+		goto exit;
+	}
+	data = *bp;
+
+	/* Dynamic Range */
+	dyn_range = (data & BIT(3)) >> 3;
+	if (!dp_link_is_dynamic_range_valid(dyn_range)) {
+		pr_err("invalid link dynamic range = 0x%x", dyn_range);
+		ret = -EINVAL;
+		goto exit;
+	}
+	link->request.test_dyn_range = dyn_range;
+	pr_debug("link dynamic range = 0x%x (%s)\n",
+		link->request.test_dyn_range,
+		dp_link_dynamic_range_to_string(
+			link->request.test_dyn_range));
+
+	/* Color bit depth */
+	data &= (BIT(5) | BIT(6) | BIT(7));
+	data >>= 5;
+	if (!dp_link_is_bit_depth_valid(data)) {
+		pr_err("invalid link bit depth = 0x%x\n", data);
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	link->request.test_bit_depth = data;
+	pr_debug("link bit depth = 0x%x (%s)\n",
+		link->request.test_bit_depth,
+		dp_link_bit_depth_to_string(link->request.test_bit_depth));
+
+	/* resolution timing params */
+	ret = dp_link_parse_timing_params1(link, 0x222, 2,
+			&link->request.test_h_total);
+	if (ret) {
+		pr_err("failed to parse test_h_total (0x222)\n");
+		goto exit;
+	}
+	pr_debug("TEST_H_TOTAL = %d\n", link->request.test_h_total);
+
+	ret = dp_link_parse_timing_params1(link, 0x224, 2,
+			&link->request.test_v_total);
+	if (ret) {
+		pr_err("failed to parse test_v_total (0x224)\n");
+		goto exit;
+	}
+	pr_debug("TEST_V_TOTAL = %d\n", link->request.test_v_total);
+
+	ret = dp_link_parse_timing_params1(link, 0x226, 2,
+			&link->request.test_h_start);
+	if (ret) {
+		pr_err("failed to parse test_h_start (0x226)\n");
+		goto exit;
+	}
+	pr_debug("TEST_H_START = %d\n", link->request.test_h_start);
+
+	ret = dp_link_parse_timing_params1(link, 0x228, 2,
+			&link->request.test_v_start);
+	if (ret) {
+		pr_err("failed to parse test_v_start (0x228)\n");
+		goto exit;
+	}
+	pr_debug("TEST_V_START = %d\n", link->request.test_v_start);
+
+	ret = dp_link_parse_timing_params2(link, 0x22A, 2,
+			&link->request.test_hsync_pol,
+			&link->request.test_hsync_width);
+	if (ret) {
+		pr_err("failed to parse (0x22A)\n");
+		goto exit;
+	}
+	pr_debug("TEST_HSYNC_POL = %d\n", link->request.test_hsync_pol);
+	pr_debug("TEST_HSYNC_WIDTH = %d\n", link->request.test_hsync_width);
+
+	ret = dp_link_parse_timing_params2(link, 0x22C, 2,
+			&link->request.test_vsync_pol,
+			&link->request.test_vsync_width);
+	if (ret) {
+		pr_err("failed to parse (0x22C)\n");
+		goto exit;
+	}
+	pr_debug("TEST_VSYNC_POL = %d\n", link->request.test_vsync_pol);
+	pr_debug("TEST_VSYNC_WIDTH = %d\n", link->request.test_vsync_width);
+
+	ret = dp_link_parse_timing_params1(link, 0x22E, 2,
+			&link->request.test_h_width);
+	if (ret) {
+		pr_err("failed to parse test_h_width (0x22E)\n");
+		goto exit;
+	}
+	pr_debug("TEST_H_WIDTH = %d\n", link->request.test_h_width);
+
+	ret = dp_link_parse_timing_params1(link, 0x230, 2,
+			&link->request.test_v_height);
+	if (ret) {
+		pr_err("failed to parse test_v_height (0x230)\n");
+		goto exit;
+	}
+	pr_debug("TEST_V_HEIGHT = %d\n", link->request.test_v_height);
+
+	ret = dp_link_parse_timing_params3(link, 0x233,
+		&link->request.test_rr_d);
+	link->request.test_rr_d &= BIT(0);
+	if (ret) {
+		pr_err("failed to parse test_rr_d (0x233)\n");
+		goto exit;
+	}
+	pr_debug("TEST_REFRESH_DENOMINATOR = %d\n", link->request.test_rr_d);
+
+	ret = dp_link_parse_timing_params3(link, 0x234,
+		&link->request.test_rr_n);
+	if (ret) {
+		pr_err("failed to parse test_rr_n (0x234)\n");
+		goto exit;
+	}
+	pr_debug("TEST_REFRESH_NUMERATOR = %d\n", link->request.test_rr_n);
+exit:
+	return ret;
+}
+
+/**
+ * dp_link_is_link_rate_valid() - validates the link rate
+ * @lane_rate: link rate requested by the sink
+ *
+ * Returns true if the requested link rate is supported.
+ */
+static bool dp_link_is_link_rate_valid(u32 link_rate)
+{
+	return ((link_rate == DP_LINK_RATE_162) ||
+		(link_rate == DP_LINK_RATE_270) ||
+		(link_rate == DP_LINK_RATE_540) ||
+		(link_rate == DP_LINK_RATE_810));
+}
+
+/**
+ * dp_link_is_lane_count_valid() - validates the lane count
+ * @lane_count: lane count requested by the sink
+ *
+ * Returns true if the requested lane count is supported.
+ */
+static bool dp_link_is_lane_count_valid(u32 lane_count)
+{
+	return (lane_count == DP_LANE_COUNT_1) ||
+		(lane_count == DP_LANE_COUNT_2) ||
+		(lane_count == DP_LANE_COUNT_4);
+}
+
+/**
+ * dp_link_parse_link_training_params() - parses link training parameters from
+ * DPCD
+ * @link: Display Port Driver data
+ *
+ * Returns 0 if it successfully parses the link rate (Byte 0x219) and lane
+ * count (Byte 0x220), and if these values parse are valid.
+ */
+static int dp_link_parse_link_training_params(struct dp_link_private *link)
+{
+	u8 *bp;
+	u8 data;
+	int ret = 0;
+	int rlen;
+	int const param_len = 0x1;
+	int const test_link_rate_addr = 0x219;
+	int const test_lane_count_addr = 0x220;
+
+	/* Read the requested link rate (Byte 0x219). */
+	rlen = link->aux->read(link->aux, test_link_rate_addr,
+			param_len, AUX_NATIVE, &bp);
+	if (rlen < param_len) {
+		pr_err("failed to read link rate\n");
+		ret = -EINVAL;
+		goto exit;
+	}
+	data = *bp;
+
+	if (!dp_link_is_link_rate_valid(data)) {
+		pr_err("invalid link rate = 0x%x\n", data);
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	link->request.test_link_rate = data;
+	pr_debug("link rate = 0x%x\n", link->request.test_link_rate);
+
+	/* Read the requested lane count (Byte 0x220). */
+	rlen = link->aux->read(link->aux, test_lane_count_addr,
+			param_len, AUX_NATIVE, &bp);
+	if (rlen < param_len) {
+		pr_err("failed to read lane count\n");
+		ret = -EINVAL;
+		goto exit;
+	}
+	data = *bp;
+	data &= 0x1F;
+
+	if (!dp_link_is_lane_count_valid(data)) {
+		pr_err("invalid lane count = 0x%x\n", data);
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	link->request.test_lane_count = data;
+	pr_debug("lane count = 0x%x\n", link->request.test_lane_count);
+exit:
+	return ret;
+}
+
+static bool dp_link_is_phy_test_pattern_supported(u32 phy_test_pattern_sel)
+{
+	switch (phy_test_pattern_sel) {
+	case PHY_TEST_PATTERN_NONE:
+	case PHY_TEST_PATTERN_D10_2_NO_SCRAMBLING:
+	case PHY_TEST_PATTERN_SYMBOL_ERR_MEASUREMENT_CNT:
+	case PHY_TEST_PATTERN_PRBS7:
+	case PHY_TEST_PATTERN_80_BIT_CUSTOM_PATTERN:
+	case PHY_TEST_PATTERN_HBR2_CTS_EYE_PATTERN:
+		return true;
+	default:
+		return false;
+	}
+}
+
+/**
+ * dp_parse_phy_test_params() - parses the phy link parameters
+ * @link: Display Port Driver data
+ *
+ * Parses the DPCD (Byte 0x248) for the DP PHY link pattern that is being
+ * requested.
+ */
+static int dp_link_parse_phy_test_params(struct dp_link_private *link)
+{
+	u8 *bp;
+	u8 data;
+	int rlen;
+	int const param_len = 0x1;
+	int const phy_test_pattern_addr = 0x248;
+	int ret = 0;
+
+	rlen = link->aux->read(link->aux, phy_test_pattern_addr,
+				param_len, AUX_NATIVE, &bp);
+	if (rlen < param_len) {
+		pr_err("failed to read phy link pattern\n");
+		ret = -EINVAL;
+		goto end;
+	}
+
+	data = *bp;
+
+	link->request.phy_test_pattern_sel = data;
+
+	pr_debug("phy_test_pattern_sel = %s\n",
+			dp_link_get_phy_test_pattern(data));
+
+	if (!dp_link_is_phy_test_pattern_supported(data))
+		ret = -EINVAL;
+end:
+	return ret;
+}
+
+static char *dp_link_get_test_name(u32 test_requested)
+{
+	switch (test_requested) {
+	case TEST_LINK_TRAINING: return DP_LINK_ENUM_STR(TEST_LINK_TRAINING);
+	case TEST_VIDEO_PATTERN: return DP_LINK_ENUM_STR(TEST_VIDEO_PATTERN);
+	case PHY_TEST_PATTERN:	 return DP_LINK_ENUM_STR(PHY_TEST_PATTERN);
+	case TEST_EDID_READ:	 return DP_LINK_ENUM_STR(TEST_EDID_READ);
+	case TEST_AUDIO_PATTERN: return DP_LINK_ENUM_STR(TEST_AUDIO_PATTERN);
+	default:		 return "unknown";
+	}
+}
+
+/**
+ * dp_link_is_video_audio_test_requested() - checks for audio/video link request
+ * @link: link requested by the sink
+ *
+ * Returns true if the requested link is a permitted audio/video link.
+ */
+static bool dp_link_is_video_audio_test_requested(u32 link)
+{
+	return (link == TEST_VIDEO_PATTERN) ||
+		(link == (TEST_AUDIO_PATTERN | TEST_VIDEO_PATTERN)) ||
+		(link == TEST_AUDIO_PATTERN) ||
+		(link == (TEST_AUDIO_PATTERN | TEST_AUDIO_DISABLED_VIDEO));
+}
+
+/**
+ * dp_link_supported() - checks if link requested by sink is supported
+ * @test_requested: link requested by the sink
+ *
+ * Returns true if the requested link is supported.
+ */
+static bool dp_link_is_test_supported(u32 test_requested)
+{
+	return (test_requested == TEST_LINK_TRAINING) ||
+		(test_requested == TEST_EDID_READ) ||
+		(test_requested == PHY_TEST_PATTERN) ||
+		dp_link_is_video_audio_test_requested(test_requested);
+}
+
+/**
+ * dp_sink_parse_test_request() - parses link request parameters from sink
+ * @link: Display Port Driver data
+ *
+ * Parses the DPCD to check if an automated link is requested (Byte 0x201),
+ * and what type of link automation is being requested (Byte 0x218).
+ */
+static int dp_link_parse_request(struct dp_link_private *link)
+{
+	int ret = 0;
+	u8 *bp;
+	u8 data;
+	int rlen;
+	u32 const param_len = 0x1;
+	u32 const device_service_irq_addr = 0x201;
+	u32 const test_request_addr = 0x218;
+	u8 buf[4];
+
+	/**
+	 * Read the device service IRQ vector (Byte 0x201) to determine
+	 * whether an automated link has been requested by the sink.
+	 */
+	rlen = link->aux->read(link->aux, device_service_irq_addr,
+				param_len, AUX_NATIVE, &bp);
+	if (rlen < param_len) {
+		pr_err("aux read failed\n");
+		ret = -EINVAL;
+		goto end;
+	}
+
+	data = *bp;
+
+	pr_debug("device service irq vector = 0x%x\n", data);
+
+	if (!(data & BIT(1))) {
+		pr_debug("no link requested\n");
+		goto end;
+	}
+
+	/**
+	 * Read the link request byte (Byte 0x218) to determine what type
+	 * of automated link has been requested by the sink.
+	 */
+	rlen = link->aux->read(link->aux, test_request_addr,
+				param_len, AUX_NATIVE, &bp);
+	if (rlen < param_len) {
+		pr_err("aux read failed\n");
+		ret = -EINVAL;
+		goto end;
+	}
+
+	data = *bp;
+
+	if (!dp_link_is_test_supported(data)) {
+		pr_debug("link 0x%x not supported\n", data);
+		goto end;
+	}
+
+	pr_debug("%s (0x%x) requested\n", dp_link_get_test_name(data), data);
+	link->request.test_requested = data;
+
+	if (link->request.test_requested == PHY_TEST_PATTERN) {
+		ret = dp_link_parse_phy_test_params(link);
+		if (ret)
+			goto end;
+		ret = dp_link_parse_link_training_params(link);
+	}
+
+	if (link->request.test_requested == TEST_LINK_TRAINING)
+		ret = dp_link_parse_link_training_params(link);
+
+	if (dp_link_is_video_audio_test_requested(
+			link->request.test_requested)) {
+		ret = dp_link_parse_video_pattern_params(link);
+		if (ret)
+			goto end;
+
+		ret = dp_link_parse_audio_pattern_params(link);
+	}
+end:
+	/* clear the link request IRQ */
+	buf[0] = 1;
+	link->aux->write(link->aux, test_request_addr, 1, AUX_NATIVE, buf);
+
+	/**
+	 * Send a TEST_ACK if all link parameters are valid, otherwise send
+	 * a TEST_NACK.
+	 */
+	if (ret)
+		link->request.response = TEST_NACK;
+	else
+		link->request.response = TEST_ACK;
+
+	return ret;
+}
+
+/**
+ * dp_link_parse_sink_count() - parses the sink count
+ *
+ * Parses the DPCD to check if there is an update to the sink count
+ * (Byte 0x200), and whether all the sink devices connected have Content
+ * Protection enabled.
+ */
+static void dp_link_parse_sink_count(struct dp_link_private *link)
+{
+	u8 *bp;
+	u8 data;
+	int rlen;
+	int const param_len = 0x1;
+	int const sink_count_addr = 0x200;
+
+	rlen = link->aux->read(link->aux, sink_count_addr,
+				param_len, AUX_NATIVE, &bp);
+	if (rlen < param_len) {
+		pr_err("failed to read sink count\n");
+		return;
+	}
+
+	data = *bp;
+
+	/* BIT 7, BIT 5:0 */
+	link->sink_count.count = (data & BIT(7)) << 6 | (data & 0x63);
+	/* BIT 6*/
+	link->sink_count.cp_ready = data & BIT(6);
+
+	pr_debug("sink_count = 0x%x, cp_ready = 0x%x\n",
+		link->sink_count.count, link->sink_count.cp_ready);
+}
+
+static int dp_link_link_status_read(struct dp_link_private *link)
+{
+	u8 *bp;
+	u8 data;
+	int rlen, ret = 0;
+	int const addr = 0x202;
+	int const len = 6;
+	struct dp_link_status *sp;
+
+	rlen = link->aux->read(link->aux, addr, len, AUX_NATIVE, &bp);
+	if (rlen < len) {
+		pr_err("edp aux read failed\n");
+		ret = -EINVAL;
+		goto error;
+	}
+
+	sp = &link->link_status;
+
+	data = *bp++; /* byte 0x202 */
+	sp->lane_01_status = data; /* lane 0, 1 */
+
+	data = *bp++; /* byte 0x203 */
+	sp->lane_23_status = data; /* lane 2, 3 */
+
+	data = *bp++; /* byte 0x204 */
+	sp->interlane_align_done = (data & BIT(0));
+	sp->downstream_port_status_changed = (data & BIT(6));
+	sp->link_status_updated = (data & BIT(7));
+
+	data = *bp++; /* byte 0x205 */
+	sp->port_0_in_sync = (data & BIT(0));
+	sp->port_1_in_sync = (data & BIT(1));
+
+	data = *bp++; /* byte 0x206 */
+	sp->req_voltage_swing[0] = data & 0x03;
+	data >>= 2;
+	sp->req_pre_emphasis[0] = data & 0x03;
+	data >>= 2;
+	sp->req_voltage_swing[1] = data & 0x03;
+	data >>= 2;
+	sp->req_pre_emphasis[1] = data & 0x03;
+
+	data = *bp++; /* byte 0x207 */
+	sp->req_voltage_swing[2] = data & 0x03;
+	data >>= 2;
+	sp->req_pre_emphasis[2] = data & 0x03;
+	data >>= 2;
+	sp->req_voltage_swing[3] = data & 0x03;
+	data >>= 2;
+	sp->req_pre_emphasis[3] = data & 0x03;
+
+	return 0;
+error:
+	return ret;
+}
+
+static void dp_link_parse_sink_status_field(struct dp_link_private *link)
+{
+	dp_link_parse_sink_count(link);
+	dp_link_parse_request(link);
+	dp_link_link_status_read(link);
+}
+
+static bool dp_link_is_link_training_requested(struct dp_link_private *link)
+{
+	return (link->request.test_requested == TEST_LINK_TRAINING);
+}
+
+/**
+ * dp_link_process_link_training_request() - processes new training requests
+ * @link: Display Port link data
+ *
+ * This function will handle new link training requests that are initiated by
+ * the sink. In particular, it will update the requested lane count and link
+ * link rate, and then trigger the link retraining procedure.
+ *
+ * The function will return 0 if a link training request has been processed,
+ * otherwise it will return -EINVAL.
+ */
+static int dp_link_process_link_training_request(struct dp_link_private *link)
+{
+	if (!dp_link_is_link_training_requested(link))
+		return -EINVAL;
+
+	pr_debug("%s link rate = 0x%x, lane count = 0x%x\n",
+			dp_link_get_test_name(TEST_LINK_TRAINING),
+			link->request.test_link_rate,
+			link->request.test_lane_count);
+
+	link->dp_link.lane_count = link->request.test_lane_count;
+	link->dp_link.link_rate = link->request.test_link_rate;
+
+	return 0;
+}
+
+static bool dp_link_phy_pattern_requested(struct dp_link *dp_link)
+{
+	struct dp_link_private *link = container_of(dp_link,
+			struct dp_link_private, dp_link);
+
+	return (link->request.test_requested == PHY_TEST_PATTERN);
+}
+
+static int dp_link_parse_vx_px(struct dp_link_private *link)
+{
+	u8 *bp;
+	u8 data;
+	int const param_len = 0x1;
+	int const addr1 = 0x206;
+	int const addr2 = 0x207;
+	int ret = 0;
+	u32 v0, p0, v1, p1, v2, p2, v3, p3;
+	int rlen;
+
+	pr_debug("\n");
+
+	rlen = link->aux->read(link->aux, addr1, param_len, AUX_NATIVE, &bp);
+	if (rlen < param_len) {
+		pr_err("failed reading lanes 0/1\n");
+		ret = -EINVAL;
+		goto end;
+	}
+
+	data = *bp;
+
+	pr_debug("lanes 0/1 (Byte 0x206): 0x%x\n", data);
+
+	v0 = data & 0x3;
+	data = data >> 2;
+	p0 = data & 0x3;
+	data = data >> 2;
+
+	v1 = data & 0x3;
+	data = data >> 2;
+	p1 = data & 0x3;
+	data = data >> 2;
+
+	rlen = link->aux->read(link->aux, addr2, param_len, AUX_NATIVE, &bp);
+	if (rlen < param_len) {
+		pr_err("failed reading lanes 2/3\n");
+		ret = -EINVAL;
+		goto end;
+	}
+
+	data = *bp;
+
+	pr_debug("lanes 2/3 (Byte 0x207): 0x%x\n", data);
+
+	v2 = data & 0x3;
+	data = data >> 2;
+	p2 = data & 0x3;
+	data = data >> 2;
+
+	v3 = data & 0x3;
+	data = data >> 2;
+	p3 = data & 0x3;
+	data = data >> 2;
+
+	pr_debug("vx: 0=%d, 1=%d, 2=%d, 3=%d\n", v0, v1, v2, v3);
+	pr_debug("px: 0=%d, 1=%d, 2=%d, 3=%d\n", p0, p1, p2, p3);
+
+	/**
+	 * Update the voltage and pre-emphasis levels as per DPCD request
+	 * vector.
+	 */
+	pr_debug("Current: v_level = 0x%x, p_level = 0x%x\n",
+			link->dp_link.v_level, link->dp_link.p_level);
+	pr_debug("Requested: v_level = 0x%x, p_level = 0x%x\n", v0, p0);
+	link->dp_link.v_level = v0;
+	link->dp_link.p_level = p0;
+
+	pr_debug("Success\n");
+end:
+	return ret;
+}
+
+/**
+ * dp_link_process_phy_test_pattern_request() - process new phy link requests
+ * @link: Display Port Driver data
+ *
+ * This function will handle new phy link pattern requests that are initiated
+ * by the sink. The function will return 0 if a phy link pattern has been
+ * processed, otherwise it will return -EINVAL.
+ */
+static int dp_link_process_phy_test_pattern_request(
+		struct dp_link_private *link)
+{
+	u32 test_link_rate = 0, test_lane_count = 0;
+
+	if (!dp_link_phy_pattern_requested(&link->dp_link))
+		return -EINVAL;
+
+	test_link_rate = link->request.test_link_rate;
+	test_lane_count = link->request.test_lane_count;
+
+	if (!dp_link_is_link_rate_valid(test_link_rate) ||
+		!dp_link_is_lane_count_valid(test_lane_count)) {
+		pr_err("Invalid params: link rate = 0x%x, lane count = 0x%x\n",
+				test_link_rate, test_lane_count);
+		return -EINVAL;
+	}
+
+	pr_debug("start\n");
+
+	link->dp_link.lane_count = link->request.test_lane_count;
+	link->dp_link.link_rate = link->request.test_link_rate;
+
+	dp_link_parse_vx_px(link);
+
+	pr_debug("end\n");
+
+	return 0;
+}
+
+static bool dp_link_is_link_status_updated(struct dp_link_private *link)
+{
+	return link->link_status.link_status_updated;
+}
+
+static bool dp_link_channel_eq_done(struct dp_link_private *link)
+{
+	u32 mask, data;
+	struct dp_link *dp_link = &link->dp_link;
+
+	pr_debug("\n");
+
+	dp_link_link_status_read(link);
+
+	if (!link->link_status.interlane_align_done) { /* not align */
+		pr_err("interlane align failed\n");
+		return 0;
+	}
+
+	if (dp_link->lane_count == 1) {
+		mask = 0x7;
+		data = link->link_status.lane_01_status;
+	} else if (dp_link->lane_count == 2) {
+		mask = 0x77;
+		data = link->link_status.lane_01_status;
+	} else {
+		mask = 0x7777;
+		data = link->link_status.lane_23_status;
+		data <<= 8;
+		data |= link->link_status.lane_01_status;
+	}
+
+	data &= mask;
+	pr_debug("data=%x mask=%x\n", data, mask);
+
+	if (data == mask)/* all done */
+		return true;
+
+	return false;
+}
+
+static bool dp_link_clock_recovery_done(struct dp_link_private *link)
+{
+	u32 mask, data;
+	struct dp_link *dp_link = &link->dp_link;
+
+	dp_link_link_status_read(link);
+
+	if (dp_link->lane_count == 1) {
+		mask = 0x01;	/* lane 0 */
+		data = link->link_status.lane_01_status;
+	} else if (dp_link->lane_count == 2) {
+		mask = 0x011; /*B lane 0, 1 */
+		data = link->link_status.lane_01_status;
+	} else {
+		mask = 0x01111; /*B lane 0, 1 */
+		data = link->link_status.lane_23_status;
+		data <<= 8;
+		data |= link->link_status.lane_01_status;
+	}
+
+	data &= mask;
+	pr_debug("data=%x mask=%x\n", data, mask);
+
+	if (data == mask) /* all done */
+		return true;
+
+	return false;
+}
+
+/**
+ * dp_link_process_link_status_update() - processes link status updates
+ * @link: Display Port link module data
+ *
+ * This function will check for changes in the link status, e.g. clock
+ * recovery done on all lanes, and trigger link training if there is a
+ * failure/error on the link.
+ *
+ * The function will return 0 if the a link status update has been processed,
+ * otherwise it will return -EINVAL.
+ */
+static int dp_link_process_link_status_update(struct dp_link_private *link)
+{
+	if (!dp_link_is_link_status_updated(link) ||
+	    (dp_link_channel_eq_done(link) &&
+	     dp_link_clock_recovery_done(link)))
+		return -EINVAL;
+
+	pr_debug("channel_eq_done = %d, clock_recovery_done = %d\n",
+			dp_link_channel_eq_done(link),
+			dp_link_clock_recovery_done(link));
+
+	return 0;
+}
+
+static bool dp_link_is_ds_port_status_changed(struct dp_link_private *link)
+{
+	return link->link_status.downstream_port_status_changed;
+}
+
+/**
+ * dp_link_process_downstream_port_status_change() - process port status changes
+ * @link: Display Port Driver data
+ *
+ * This function will handle downstream port updates that are initiated by
+ * the sink. If the downstream port status has changed, the EDID is read via
+ * AUX.
+ *
+ * The function will return 0 if a downstream port update has been
+ * processed, otherwise it will return -EINVAL.
+ */
+static int dp_link_process_ds_port_status_change(struct dp_link_private *link)
+{
+	if (!dp_link_is_ds_port_status_changed(link))
+		return -EINVAL;
+
+	return 0;
+}
+
+static bool dp_link_is_video_pattern_requested(struct dp_link_private *link)
+{
+	return (link->request.test_requested & TEST_VIDEO_PATTERN)
+		&& !(link->request.test_requested & TEST_AUDIO_DISABLED_VIDEO);
+}
+
+static bool dp_link_is_audio_pattern_requested(struct dp_link_private *link)
+{
+	return (link->request.test_requested & TEST_AUDIO_PATTERN);
+}
+
+/**
+ * dp_link_process_video_pattern_request() - process new video pattern request
+ * @link: Display Port link module's data
+ *
+ * This function will handle a new video pattern request that are initiated by
+ * the sink. This is acheieved by first sending a disconnect notification to
+ * the sink followed by a subsequent connect notification to the user modules,
+ * where it is expected that the user modules would draw the required link
+ * pattern.
+ */
+static int dp_link_process_video_pattern_request(struct dp_link_private *link)
+{
+	if (!dp_link_is_video_pattern_requested(link))
+		goto end;
+
+	pr_debug("%s: bit depth=%d(%d bpp) pattern=%s\n",
+		dp_link_get_test_name(TEST_VIDEO_PATTERN),
+		link->request.test_bit_depth,
+		dp_link_bit_depth_to_bpp(link->request.test_bit_depth),
+		dp_link_video_pattern_to_string(
+			link->request.test_video_pattern));
+
+	return 0;
+end:
+	return -EINVAL;
+}
+
+/**
+ * dp_link_process_audio_pattern_request() - process new audio pattern request
+ * @link: Display Port link module data
+ *
+ * This function will handle a new audio pattern request that is initiated by
+ * the sink. This is acheieved by sending the necessary secondary data packets
+ * to the sink. It is expected that any simulatenous requests for video
+ * patterns will be handled before the audio pattern is sent to the sink.
+ */
+static int dp_link_process_audio_pattern_request(struct dp_link_private *link)
+{
+	if (!dp_link_is_audio_pattern_requested(link))
+		return -EINVAL;
+
+	pr_debug("sampling_rate=%s, channel_count=%d, pattern_type=%s\n",
+		dp_link_get_audio_sample_rate(
+			link->request.test_audio_sampling_rate),
+		link->request.test_audio_channel_count,
+		dp_link_get_audio_test_pattern(
+			link->request.test_audio_pattern_type));
+
+	pr_debug("audio_period: ch1=0x%x, ch2=0x%x, ch3=0x%x, ch4=0x%x\n",
+		link->request.test_audio_period_ch_1,
+		link->request.test_audio_period_ch_2,
+		link->request.test_audio_period_ch_3,
+		link->request.test_audio_period_ch_4);
+
+	pr_debug("audio_period: ch5=0x%x, ch6=0x%x, ch7=0x%x, ch8=0x%x\n",
+		link->request.test_audio_period_ch_5,
+		link->request.test_audio_period_ch_6,
+		link->request.test_audio_period_ch_7,
+		link->request.test_audio_period_ch_8);
+
+	return 0;
+}
+
+static void dp_link_reset_data(struct dp_link_private *link)
+{
+	link->request = (const struct dp_link_request){ 0 };
+	link->request.test_bit_depth = DP_TEST_BIT_DEPTH_UNKNOWN;
+
+	link->dp_link.test_requested = 0;
+}
+
+/**
+ * dp_link_process_request() - handle HPD IRQ transition to HIGH
+ * @link: pointer to link module data
+ *
+ * This function will handle the HPD IRQ state transitions from LOW to HIGH
+ * (including cases when there are back to back HPD IRQ HIGH) indicating
+ * the start of a new link training request or sink status update.
+ */
+static int dp_link_process_request(struct dp_link *dp_link)
+{
+	int ret = 0;
+	struct dp_link_private *link;
+
+	if (!dp_link) {
+		pr_err("invalid input\n");
+		return -EINVAL;
+	}
+
+	link = container_of(dp_link, struct dp_link_private, dp_link);
+
+	pr_debug("start\n");
+
+	dp_link_reset_data(link);
+
+	dp_link_parse_sink_status_field(link);
+
+	ret = dp_link_process_link_training_request(link);
+	if (!ret) {
+		dp_link->test_requested |= TEST_LINK_TRAINING;
+		goto exit;
+	}
+
+	ret = dp_link_process_phy_test_pattern_request(link);
+	if (!ret) {
+		dp_link->test_requested |= PHY_TEST_PATTERN;
+		goto exit;
+	}
+
+	ret = dp_link_process_link_status_update(link);
+	if (!ret) {
+		dp_link->test_requested |= LINK_STATUS_UPDATED;
+		goto exit;
+	}
+
+	ret = dp_link_process_ds_port_status_change(link);
+	if (!ret) {
+		dp_link->test_requested |= DS_PORT_STATUS_CHANGED;
+		goto exit;
+	}
+
+	ret = dp_link_process_video_pattern_request(link);
+	if (!ret) {
+		dp_link->test_requested |= TEST_VIDEO_PATTERN;
+		goto exit;
+	}
+
+	ret = dp_link_process_audio_pattern_request(link);
+	if (!ret) {
+		dp_link->test_requested |= TEST_AUDIO_PATTERN;
+		goto exit;
+	}
+
+	pr_debug("done\n");
+exit:
+	return ret;
+}
+
+static u8 *dp_link_get_voltage_swing(struct dp_link *dp_link)
+
+{
+	struct dp_link_private *link;
+
+	if (!dp_link) {
+		pr_err("invalid input\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	link = container_of(dp_link, struct dp_link_private, dp_link);
+
+	return link->link_status.req_voltage_swing;
+}
+
+static u8 *dp_link_get_pre_emphasis(struct dp_link *dp_link)
+
+{
+	struct dp_link_private *link;
+
+
+	if (!dp_link) {
+		pr_err("invalid input\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	link = container_of(dp_link, struct dp_link_private, dp_link);
+
+	return link->link_status.req_pre_emphasis;
+}
+
+static int dp_link_get_colorimetry_config(struct dp_link *dp_link)
+{
+	u32 cc;
+	enum dynamic_range dr;
+	struct dp_link_private *link;
+
+	if (!dp_link) {
+		pr_err("invalid input\n");
+		return -EINVAL;
+	}
+
+	link = container_of(dp_link, struct dp_link_private, dp_link);
+
+	/* unless a video pattern CTS test is ongoing, use CEA_VESA */
+	if (dp_link_is_video_pattern_requested(link))
+		dr = link->request.test_dyn_range;
+	else
+		dr = DP_DYNAMIC_RANGE_RGB_VESA;
+
+	/* Only RGB_VESA nd RGB_CEA supported for now */
+	switch (dr) {
+	case DP_DYNAMIC_RANGE_RGB_CEA:
+		cc = BIT(3);
+		break;
+	case DP_DYNAMIC_RANGE_RGB_VESA:
+	default:
+		cc = 0;
+	}
+
+	return cc;
+}
+
+static bool dp_link_clock_recovery(struct dp_link *dp_link)
+{
+	struct dp_link_private *link;
+
+	if (!dp_link) {
+		pr_err("invalid input\n");
+		return -EINVAL;
+	}
+
+	link = container_of(dp_link, struct dp_link_private, dp_link);
+
+	return dp_link_clock_recovery_done(link);
+}
+
+static bool dp_link_channel_equalization(struct dp_link *dp_link)
+{
+	struct dp_link_private *link;
+
+	if (!dp_link) {
+		pr_err("invalid input\n");
+		return -EINVAL;
+	}
+
+	link = container_of(dp_link, struct dp_link_private, dp_link);
+
+	return dp_link_channel_eq_done(link);
+}
+
+static int dp_link_adjust_levels(struct dp_link *dp_link)
+{
+	int i;
+	int max = 0;
+	struct dp_link_private *link;
+
+	if (!dp_link) {
+		pr_err("invalid input\n");
+		return -EINVAL;
+	}
+
+	link = container_of(dp_link, struct dp_link_private, dp_link);
+
+	/* use the max level across lanes */
+	for (i = 0; i < dp_link->lane_count; i++) {
+		pr_debug("lane=%d req_voltage_swing=%d\n",
+			i, link->link_status.req_voltage_swing[i]);
+		if (max < link->link_status.req_voltage_swing[i])
+			max = link->link_status.req_voltage_swing[i];
+	}
+
+	dp_link->v_level = max;
+
+	/* use the max level across lanes */
+	max = 0;
+	for (i = 0; i < dp_link->lane_count; i++) {
+		pr_debug("lane=%d req_pre_emphasis=%d\n",
+			i, link->link_status.req_pre_emphasis[i]);
+		if (max < link->link_status.req_pre_emphasis[i])
+			max = link->link_status.req_pre_emphasis[i];
+	}
+
+	dp_link->p_level = max;
+
+	/**
+	 * Adjust the voltage swing and pre-emphasis level combination to within
+	 * the allowable range.
+	 */
+	if (dp_link->v_level > DP_LINK_VOLTAGE_MAX) {
+		pr_debug("Requested vSwingLevel=%d, change to %d\n",
+				dp_link->v_level, DP_LINK_VOLTAGE_MAX);
+		dp_link->v_level = DP_LINK_VOLTAGE_MAX;
+	}
+
+	if (dp_link->p_level > DP_LINK_PRE_EMPHASIS_MAX) {
+		pr_debug("Requested preEmphasisLevel=%d, change to %d\n",
+				dp_link->p_level, DP_LINK_PRE_EMPHASIS_MAX);
+		dp_link->p_level = DP_LINK_PRE_EMPHASIS_MAX;
+	}
+
+	if ((dp_link->p_level > DP_LINK_PRE_EMPHASIS_LEVEL_1)
+			&& (dp_link->v_level == DP_LINK_VOLTAGE_LEVEL_2)) {
+		pr_debug("Requested preEmphasisLevel=%d, change to %d\n",
+				dp_link->p_level, DP_LINK_PRE_EMPHASIS_LEVEL_1);
+		dp_link->p_level = DP_LINK_PRE_EMPHASIS_LEVEL_1;
+	}
+
+	pr_debug("v_level=%d, p_level=%d\n",
+		dp_link->v_level, dp_link->p_level);
+
+	return 0;
+}
+
+static int dp_link_send_psm_request(struct dp_link *dp_link, bool req)
+{
+	struct dp_link_private *link;
+
+	if (!dp_link) {
+		pr_err("invalid input\n");
+		return -EINVAL;
+	}
+
+	link = container_of(dp_link, struct dp_link_private, dp_link);
+
+	return 0;
+}
+
+static u32 dp_link_get_test_bits_depth(struct dp_link *dp_link, u32 bpp)
+{
+	enum test_bit_depth tbd;
+
+	/*
+	 * Few simplistic rules and assumptions made here:
+	 *    1. Test bit depth is bit depth per color component
+	 *    2. Assume 3 color components
+	 */
+	switch (bpp) {
+	case 18:
+		tbd = DP_TEST_BIT_DEPTH_6;
+		break;
+	case 24:
+		tbd = DP_TEST_BIT_DEPTH_8;
+		break;
+	case 30:
+		tbd = DP_TEST_BIT_DEPTH_10;
+		break;
+	default:
+		tbd = DP_TEST_BIT_DEPTH_UNKNOWN;
+		break;
+	}
+
+	return tbd;
+}
+
+struct dp_link *dp_link_get(struct device *dev, struct dp_aux *aux)
+{
+	int rc = 0;
+	struct dp_link_private *link;
+	struct dp_link *dp_link;
+
+	if (!dev || !aux) {
+		pr_err("invalid input\n");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	link = devm_kzalloc(dev, sizeof(*link), GFP_KERNEL);
+	if (!link) {
+		rc = -EINVAL;
+		goto error;
+	}
+
+	link->dev   = dev;
+	link->aux   = aux;
+
+	dp_link = &link->dp_link;
+
+	dp_link->process_request        = dp_link_process_request;
+	dp_link->get_voltage_swing      = dp_link_get_voltage_swing;
+	dp_link->get_test_bits_depth    = dp_link_get_test_bits_depth;
+	dp_link->get_pre_emphasis       = dp_link_get_pre_emphasis;
+	dp_link->get_colorimetry_config = dp_link_get_colorimetry_config;
+	dp_link->clock_recovery         = dp_link_clock_recovery;
+	dp_link->channel_equalization   = dp_link_channel_equalization;
+	dp_link->adjust_levels          = dp_link_adjust_levels;
+	dp_link->send_psm_request       = dp_link_send_psm_request;
+	dp_link->phy_pattern_requested  = dp_link_phy_pattern_requested;
+
+	return dp_link;
+error:
+	return ERR_PTR(rc);
+}
+
+void dp_link_put(struct dp_link *dp_link)
+{
+	struct dp_link_private *link;
+
+	if (!dp_link)
+		return;
+
+	link = container_of(dp_link, struct dp_link_private, dp_link);
+
+	devm_kfree(link->dev, link);
+}
diff --git a/drivers/gpu/drm/msm/dp/dp_link.h b/drivers/gpu/drm/msm/dp/dp_link.h
new file mode 100644
index 0000000..de10e9a
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_link.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DP_LINK_H_
+#define _DP_LINK_H_
+
+#include "dp_aux.h"
+
+enum dp_link_voltage_level {
+	DP_LINK_VOLTAGE_LEVEL_0	= 0,
+	DP_LINK_VOLTAGE_LEVEL_1	= 1,
+	DP_LINK_VOLTAGE_LEVEL_2	= 2,
+	DP_LINK_VOLTAGE_MAX	= DP_LINK_VOLTAGE_LEVEL_2,
+};
+
+enum dp_link_preemaphasis_level {
+	DP_LINK_PRE_EMPHASIS_LEVEL_0	= 0,
+	DP_LINK_PRE_EMPHASIS_LEVEL_1	= 1,
+	DP_LINK_PRE_EMPHASIS_LEVEL_2	= 2,
+	DP_LINK_PRE_EMPHASIS_MAX	= DP_LINK_PRE_EMPHASIS_LEVEL_2,
+};
+
+enum test_type {
+	UNKNOWN_TEST		  = 0,
+	TEST_LINK_TRAINING	  = 0x01,
+	TEST_VIDEO_PATTERN	  = 0x02,
+	PHY_TEST_PATTERN	  = 0x08,
+	TEST_EDID_READ		  = 0x04,
+	TEST_AUDIO_PATTERN	  = 0x20,
+	TEST_AUDIO_DISABLED_VIDEO = 0x40,
+};
+
+enum status_update {
+	LINK_STATUS_UPDATED    = 0x100,
+	DS_PORT_STATUS_CHANGED = 0x200,
+};
+
+struct dp_link {
+	u32 test_requested;
+
+	u32 lane_count;
+	u32 link_rate;
+	u32 v_level;
+	u32 p_level;
+
+	u8 *(*get_voltage_swing)(struct dp_link *dp_link);
+	u8 *(*get_pre_emphasis)(struct dp_link *dp_link);
+	u32 (*get_test_bits_depth)(struct dp_link *dp_link, u32 bpp);
+	int (*process_request)(struct dp_link *dp_link);
+	int (*get_colorimetry_config)(struct dp_link *dp_link);
+	int (*adjust_levels)(struct dp_link *dp_link);
+	int (*send_psm_request)(struct dp_link *dp_link, bool req);
+	bool (*clock_recovery)(struct dp_link *dp_link);
+	bool (*channel_equalization)(struct dp_link *dp_link);
+	bool (*phy_pattern_requested)(struct dp_link *dp_link);
+};
+
+/**
+ * dp_link_get() - get the functionalities of dp test module
+ *
+ *
+ * return: a pointer to dp_link struct
+ */
+struct dp_link *dp_link_get(struct device *dev, struct dp_aux *aux);
+
+/**
+ * dp_link_put() - releases the dp test module's resources
+ *
+ * @dp_link: an instance of dp_link module
+ *
+ */
+void dp_link_put(struct dp_link *dp_link);
+
+#endif /* _DP_LINK_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c
new file mode 100644
index 0000000..f9616c4
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_panel.c
@@ -0,0 +1,468 @@
+/*
+ * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt)	"[drm-dp] %s: " fmt, __func__
+
+#include "dp_panel.h"
+
+#define DP_LINK_RATE_MULTIPLIER	27000000
+
+struct dp_panel_private {
+	struct device *dev;
+	struct dp_panel dp_panel;
+	struct dp_aux *aux;
+	struct dp_catalog_panel *catalog;
+};
+
+static int dp_panel_read_dpcd(struct dp_panel *dp_panel)
+{
+	u8 *bp;
+	u8 data;
+	u32 const addr = 0x0;
+	u32 const len = 16;
+	int rlen, rc = 0;
+	struct dp_panel_private *panel;
+	struct dp_panel_dpcd *cap;
+
+	if (!dp_panel) {
+		pr_err("invalid input\n");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	cap = &dp_panel->dpcd;
+	panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+
+	rlen = panel->aux->read(panel->aux, addr, len, AUX_NATIVE, &bp);
+	if (rlen != len) {
+		pr_err("dpcd read failed, rlen=%d\n", rlen);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	memset(cap, 0, sizeof(*cap));
+
+	data = *bp++; /* byte 0 */
+	cap->major = (data >> 4) & 0x0f;
+	cap->minor = data & 0x0f;
+	pr_debug("version: %d.%d\n", cap->major, cap->minor);
+
+	data = *bp++; /* byte 1 */
+	/* 162, 270, 540, 810 MB, symbol rate, NOT bit rate */
+	cap->max_link_rate = data;
+	pr_debug("link_rate=%d\n", cap->max_link_rate);
+
+	data = *bp++; /* byte 2 */
+	if (data & BIT(7))
+		cap->enhanced_frame++;
+
+	if (data & 0x40) {
+		cap->flags |=  DPCD_TPS3;
+		pr_debug("pattern 3 supported\n");
+	} else {
+		pr_debug("pattern 3 not supported\n");
+	}
+
+	data &= 0x0f;
+	cap->max_lane_count = data;
+	pr_debug("lane_count=%d\n", cap->max_lane_count);
+
+	data = *bp++; /* byte 3 */
+	if (data & BIT(0)) {
+		cap->flags |= DPCD_MAX_DOWNSPREAD_0_5;
+		pr_debug("max_downspread\n");
+	}
+
+	if (data & BIT(6)) {
+		cap->flags |= DPCD_NO_AUX_HANDSHAKE;
+		pr_debug("NO Link Training\n");
+	}
+
+	data = *bp++; /* byte 4 */
+	cap->num_rx_port = (data & BIT(0)) + 1;
+	pr_debug("rx_ports=%d", cap->num_rx_port);
+
+	data = *bp++; /* Byte 5: DOWN_STREAM_PORT_PRESENT */
+	cap->downstream_port.dfp_present = data & BIT(0);
+	cap->downstream_port.dfp_type = data & 0x6;
+	cap->downstream_port.format_conversion = data & BIT(3);
+	cap->downstream_port.detailed_cap_info_available = data & BIT(4);
+	pr_debug("dfp_present = %d, dfp_type = %d\n",
+			cap->downstream_port.dfp_present,
+			cap->downstream_port.dfp_type);
+	pr_debug("format_conversion = %d, detailed_cap_info_available = %d\n",
+			cap->downstream_port.format_conversion,
+			cap->downstream_port.detailed_cap_info_available);
+
+	bp += 1;	/* Skip Byte 6 */
+	rlen -= 1;
+
+	data = *bp++; /* Byte 7: DOWN_STREAM_PORT_COUNT */
+	cap->downstream_port.dfp_count = data & 0x7;
+	cap->downstream_port.msa_timing_par_ignored = data & BIT(6);
+	cap->downstream_port.oui_support = data & BIT(7);
+	pr_debug("dfp_count = %d, msa_timing_par_ignored = %d\n",
+			cap->downstream_port.dfp_count,
+			cap->downstream_port.msa_timing_par_ignored);
+	pr_debug("oui_support = %d\n", cap->downstream_port.oui_support);
+
+	data = *bp++; /* byte 8 */
+	if (data & BIT(1)) {
+		cap->flags |= DPCD_PORT_0_EDID_PRESENTED;
+		pr_debug("edid presented\n");
+	}
+
+	data = *bp++; /* byte 9 */
+	cap->rx_port0_buf_size = (data + 1) * 32;
+	pr_debug("lane_buf_size=%d\n", cap->rx_port0_buf_size);
+
+	bp += 2; /* skip 10, 11 port1 capability */
+	rlen -= 2;
+
+	data = *bp++;	/* byte 12 */
+	cap->i2c_speed_ctrl = data;
+	if (cap->i2c_speed_ctrl > 0)
+		pr_debug("i2c_rate=%d", cap->i2c_speed_ctrl);
+
+	data = *bp++;	/* byte 13 */
+	cap->scrambler_reset = data & BIT(0);
+	pr_debug("scrambler_reset=%d\n", cap->scrambler_reset);
+
+	if (data & BIT(1))
+		cap->enhanced_frame++;
+
+	pr_debug("enhanced_framing=%d\n", cap->enhanced_frame);
+
+	data = *bp++; /* byte 14 */
+	if (data == 0)
+		cap->training_read_interval = 4000; /* us */
+	else
+		cap->training_read_interval = 4000 * data; /* us */
+	pr_debug("training_interval=%d\n", cap->training_read_interval);
+end:
+	return rc;
+}
+
+/*
+ * edid standard header bytes
+ */
+static u8 edid_hdr[8] = {0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00};
+
+static bool dp_panel_is_edid_header_valid(u8 *buf)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(edid_hdr); i++) {
+		if (buf[i] != edid_hdr[i])
+			return false;
+	}
+
+	return true;
+}
+
+static int dp_panel_validate_edid(u8 *bp, int len)
+{
+	int i;
+	u8 csum = 0;
+	u32 const size = 128;
+
+	if (len < size) {
+		pr_err("Error: len=%x\n", len);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < size; i++)
+		csum += *bp++;
+
+	if (csum != 0) {
+		pr_err("error: csum=0x%x\n", csum);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int dp_panel_read_edid(struct dp_panel *dp_panel)
+{
+	u8 *edid_buf;
+	u32 checksum = 0;
+	int rlen, ret = 0;
+	int edid_blk = 0, blk_num = 0, retries = 10;
+	u32 const segment_addr = 0x30;
+	bool edid_parsing_done = false;
+	struct dp_panel_private *panel;
+
+	panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+
+	ret = panel->aux->ready(panel->aux);
+	if (!ret) {
+		pr_err("aux chan NOT ready\n");
+		goto end;
+	}
+
+	do {
+		u8 segment;
+
+
+		/*
+		 * Write the segment first.
+		 * Segment = 0, for blocks 0 and 1
+		 * Segment = 1, for blocks 2 and 3
+		 * Segment = 2, for blocks 3 and 4
+		 * and so on ...
+		 */
+		segment = blk_num >> 1;
+
+		panel->aux->write(panel->aux, segment_addr, 1, AUX_I2C,
+					&segment);
+
+		rlen = panel->aux->read(panel->aux, EDID_START_ADDRESS +
+				(blk_num * EDID_BLOCK_SIZE),
+				EDID_BLOCK_SIZE, AUX_I2C, &edid_buf);
+		if (rlen != EDID_BLOCK_SIZE) {
+			pr_err("invalid edid len: %d\n", rlen);
+			continue;
+		}
+
+		pr_debug("=== EDID data ===\n");
+		print_hex_dump(KERN_DEBUG, "EDID: ", DUMP_PREFIX_NONE, 16, 1,
+			edid_buf, EDID_BLOCK_SIZE, false);
+
+		pr_debug("blk_num=%d, rlen=%d\n", blk_num, rlen);
+
+		if (dp_panel_is_edid_header_valid(edid_buf)) {
+			ret = dp_panel_validate_edid(edid_buf, rlen);
+			if (ret) {
+				pr_err("corrupt edid block detected\n");
+				goto end;
+			}
+
+			if (edid_parsing_done) {
+				blk_num++;
+				continue;
+			}
+
+			dp_panel->edid.ext_block_cnt = edid_buf[0x7E];
+			edid_parsing_done = true;
+			checksum = edid_buf[rlen - 1];
+		} else {
+			edid_blk++;
+			blk_num++;
+		}
+
+		memcpy(dp_panel->edid.buf + (edid_blk * EDID_BLOCK_SIZE),
+			edid_buf, EDID_BLOCK_SIZE);
+
+		if (edid_blk == dp_panel->edid.ext_block_cnt)
+			goto end;
+	} while (retries--);
+end:
+	return ret;
+}
+
+static int dp_panel_timing_cfg(struct dp_panel *dp_panel)
+{
+	int rc = 0;
+	u32 data, total_ver, total_hor;
+	struct dp_catalog_panel *catalog;
+	struct dp_panel_private *panel;
+	struct dp_panel_info *pinfo;
+
+	if (!dp_panel) {
+		pr_err("invalid input\n");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+	catalog = panel->catalog;
+	pinfo = &panel->dp_panel.pinfo;
+
+	pr_debug("width=%d hporch= %d %d %d\n",
+		pinfo->h_active, pinfo->h_back_porch,
+		pinfo->h_front_porch, pinfo->h_sync_width);
+
+	pr_debug("height=%d vporch= %d %d %d\n",
+		pinfo->v_active, pinfo->v_back_porch,
+		pinfo->v_front_porch, pinfo->v_sync_width);
+
+	total_hor = pinfo->h_active + pinfo->h_back_porch +
+		pinfo->h_front_porch + pinfo->h_sync_width;
+
+	total_ver = pinfo->v_active + pinfo->v_back_porch +
+			pinfo->v_front_porch + pinfo->v_sync_width;
+
+	data = total_ver;
+	data <<= 16;
+	data |= total_hor;
+
+	catalog->total = data;
+
+	data = (pinfo->v_back_porch + pinfo->v_sync_width);
+	data <<= 16;
+	data |= (pinfo->h_back_porch + pinfo->h_sync_width);
+
+	catalog->sync_start = data;
+
+	data = pinfo->v_sync_width;
+	data <<= 16;
+	data |= (pinfo->v_active_low << 31);
+	data |= pinfo->h_sync_width;
+	data |= (pinfo->h_active_low << 15);
+
+	catalog->width_blanking = data;
+
+	data = pinfo->v_active;
+	data <<= 16;
+	data |= pinfo->h_active;
+
+	catalog->dp_active = data;
+
+	panel->catalog->timing_cfg(catalog);
+end:
+	return rc;
+}
+
+static int dp_panel_init_panel_info(struct dp_panel *dp_panel)
+{
+	int rc = 0;
+	struct dp_panel_private *panel;
+
+	if (!dp_panel) {
+		pr_err("invalid input\n");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+end:
+	return rc;
+}
+
+static u8 dp_panel_get_link_rate(struct dp_panel *dp_panel)
+{
+	const u32 encoding_factx10 = 8;
+	const u32 ln_to_link_ratio = 10;
+	u32 min_link_rate, reminder = 0;
+	u8 calc_link_rate = 0, lane_cnt;
+	struct dp_panel_private *panel;
+	struct dp_panel_info *pinfo;
+
+	if (!dp_panel) {
+		pr_err("invalid input\n");
+		goto end;
+	}
+
+	panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+
+	lane_cnt = dp_panel->dpcd.max_lane_count;
+	pinfo = &dp_panel->pinfo;
+
+	pinfo->bpp = 24;
+
+	/*
+	 * The max pixel clock supported is 675Mhz. The
+	 * current calculations below will make sure
+	 * the min_link_rate is within 32 bit limits.
+	 * Any changes in the section of code should
+	 * consider this limitation.
+	 */
+	min_link_rate = (u32)div_u64(pinfo->pixel_clk_khz * 1000,
+				(lane_cnt * encoding_factx10));
+	min_link_rate /= ln_to_link_ratio;
+	min_link_rate = (min_link_rate * pinfo->bpp);
+	min_link_rate = (u32)div_u64_rem(min_link_rate * 10,
+				DP_LINK_RATE_MULTIPLIER, &reminder);
+
+	/*
+	 * To avoid any fractional values,
+	 * increment the min_link_rate
+	 */
+	if (reminder)
+		min_link_rate += 1;
+	pr_debug("min_link_rate = %d\n", min_link_rate);
+
+	if (min_link_rate <= DP_LINK_RATE_162)
+		calc_link_rate = DP_LINK_RATE_162;
+	else if (min_link_rate <= DP_LINK_RATE_270)
+		calc_link_rate = DP_LINK_RATE_270;
+	else if (min_link_rate <= DP_LINK_RATE_540)
+		calc_link_rate = DP_LINK_RATE_540;
+	else if (min_link_rate <= DP_LINK_RATE_810)
+		calc_link_rate = DP_LINK_RATE_810;
+	else {
+		/* Cap the link rate to the max supported rate */
+		pr_debug("link_rate = %d is unsupported\n", min_link_rate);
+		calc_link_rate = DP_LINK_RATE_810;
+	}
+
+	if (calc_link_rate > dp_panel->dpcd.max_link_rate)
+		calc_link_rate = dp_panel->dpcd.max_link_rate;
+
+	pr_debug("calc_link_rate = 0x%x\n", calc_link_rate);
+end:
+	return calc_link_rate;
+}
+
+struct dp_panel *dp_panel_get(struct device *dev, struct dp_aux *aux,
+				struct dp_catalog_panel *catalog)
+{
+	int rc = 0;
+	struct dp_panel_private *panel;
+	struct dp_panel *dp_panel;
+
+	if (!dev || !aux || !catalog) {
+		pr_err("invalid input\n");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	panel = devm_kzalloc(dev, sizeof(*panel), GFP_KERNEL);
+	if (!panel) {
+		rc = -ENOMEM;
+		goto error;
+	}
+
+	panel->dev = dev;
+	panel->aux = aux;
+	panel->catalog = catalog;
+
+	dp_panel = &panel->dp_panel;
+
+	dp_panel->edid.buf = devm_kzalloc(dev,
+				sizeof(EDID_BLOCK_SIZE) * 4, GFP_KERNEL);
+
+	dp_panel->init_info = dp_panel_init_panel_info;
+	dp_panel->timing_cfg = dp_panel_timing_cfg;
+	dp_panel->read_edid = dp_panel_read_edid;
+	dp_panel->read_dpcd = dp_panel_read_dpcd;
+	dp_panel->get_link_rate = dp_panel_get_link_rate;
+
+	return dp_panel;
+error:
+	return ERR_PTR(rc);
+}
+
+void dp_panel_put(struct dp_panel *dp_panel)
+{
+	struct dp_panel_private *panel;
+
+	if (!dp_panel)
+		return;
+
+	panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+
+	devm_kfree(panel->dev, dp_panel->edid.buf);
+	devm_kfree(panel->dev, panel);
+}
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.h b/drivers/gpu/drm/msm/dp/dp_panel.h
new file mode 100644
index 0000000..5c145eb
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_panel.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DP_PANEL_H_
+#define _DP_PANEL_H_
+
+#include "dp_aux.h"
+
+#define DPCD_ENHANCED_FRAME     BIT(0)
+#define DPCD_TPS3               BIT(1)
+#define DPCD_MAX_DOWNSPREAD_0_5 BIT(2)
+#define DPCD_NO_AUX_HANDSHAKE   BIT(3)
+#define DPCD_PORT_0_EDID_PRESENTED BIT(4)
+
+#define EDID_START_ADDRESS	0x50
+#define EDID_BLOCK_SIZE		0x80
+
+
+#define DP_LINK_RATE_162	6	/* 1.62G = 270M * 6 */
+#define DP_LINK_RATE_270	10	/* 2.70G = 270M * 10 */
+#define DP_LINK_RATE_540	20	/* 5.40G = 270M * 20 */
+#define DP_LINK_RATE_810	30	/* 8.10G = 270M * 30 */
+#define DP_LINK_RATE_MAX	DP_LINK_RATE_810
+
+struct downstream_port_config {
+	/* Byte 02205h */
+	bool dfp_present;
+	u32 dfp_type;
+	bool format_conversion;
+	bool detailed_cap_info_available;
+	/* Byte 02207h */
+	u32 dfp_count;
+	bool msa_timing_par_ignored;
+	bool oui_support;
+};
+
+struct dp_panel_dpcd {
+	u8 major;
+	u8 minor;
+	u8 max_lane_count;
+	u8 num_rx_port;
+	u8 i2c_speed_ctrl;
+	u8 scrambler_reset;
+	u8 enhanced_frame;
+	u32 max_link_rate;  /* 162, 270 and 540 Mb, divided by 10 */
+	u32 flags;
+	u32 rx_port0_buf_size;
+	u32 training_read_interval;/* us */
+	struct downstream_port_config downstream_port;
+};
+
+struct dp_panel_edid {
+	u8 *buf;
+	u8 id_name[4];
+	u8 id_product;
+	u8 version;
+	u8 revision;
+	u8 video_intf;	/* dp == 0x5 */
+	u8 color_depth;	/* 6, 8, 10, 12 and 14 bits */
+	u8 color_format;	/* RGB 4:4:4, YCrCb 4:4:4, Ycrcb 4:2:2 */
+	u8 dpm;		/* display power management */
+	u8 sync_digital;	/* 1 = digital */
+	u8 sync_separate;	/* 1 = separate */
+	u8 vsync_pol;		/* 0 = negative, 1 = positive */
+	u8 hsync_pol;		/* 0 = negative, 1 = positive */
+	u8 ext_block_cnt;
+};
+
+struct dp_panel_info {
+	u32 h_active;
+	u32 v_active;
+	u32 h_back_porch;
+	u32 h_front_porch;
+	u32 h_sync_width;
+	u32 h_active_low;
+	u32 v_back_porch;
+	u32 v_front_porch;
+	u32 v_sync_width;
+	u32 v_active_low;
+	u32 h_skew;
+	u32 refresh_rate;
+	u32 pixel_clk_khz;
+	u32 bpp;
+};
+
+struct dp_panel {
+	struct dp_panel_dpcd dpcd;
+	struct dp_panel_edid edid;
+	struct dp_panel_info pinfo;
+
+	u32 vic;
+
+	int (*init_info)(struct dp_panel *dp_panel);
+	int (*timing_cfg)(struct dp_panel *dp_panel);
+	int (*read_edid)(struct dp_panel *dp_panel);
+	int (*read_dpcd)(struct dp_panel *dp_panel);
+	u8 (*get_link_rate)(struct dp_panel *dp_panel);
+};
+
+struct dp_panel *dp_panel_get(struct device *dev, struct dp_aux *aux,
+				struct dp_catalog_panel *catalog);
+void dp_panel_put(struct dp_panel *dp_panel);
+#endif /* _DP_PANEL_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_power.c b/drivers/gpu/drm/msm/dp/dp_power.c
index 54d4a10..60c8966 100644
--- a/drivers/gpu/drm/msm/dp/dp_power.c
+++ b/drivers/gpu/drm/msm/dp/dp_power.c
@@ -17,6 +17,8 @@
 #include <linux/clk.h>
 #include "dp_power.h"
 
+#define DP_CLIENT_NAME_SIZE	20
+
 struct dp_power_private {
 	struct dp_parser *parser;
 	struct platform_device *pdev;
@@ -24,6 +26,8 @@
 	struct clk *pixel_parent;
 
 	struct dp_power dp_power;
+	struct sde_power_client *dp_core_client;
+	struct sde_power_handle *phandle;
 
 	bool core_clks_on;
 	bool link_clks_on;
@@ -58,6 +62,25 @@
 	return rc;
 }
 
+static void dp_power_regulator_deinit(struct dp_power_private *power)
+{
+	int rc = 0, i = 0;
+	struct platform_device *pdev;
+	struct dp_parser *parser;
+
+	parser = power->parser;
+	pdev = power->pdev;
+
+	for (i = DP_CORE_PM; (i < DP_MAX_PM); i++) {
+		rc = msm_dss_config_vreg(&pdev->dev,
+			parser->mp[i].vreg_config,
+			parser->mp[i].num_vreg, 0);
+		if (rc)
+			pr_err("failed to deinit vregs for %s\n",
+				dp_parser_pm_name(i));
+	}
+}
+
 static int dp_power_regulator_ctrl(struct dp_power_private *power, bool enable)
 {
 	int rc = 0, i = 0, j = 0;
@@ -387,6 +410,67 @@
 	return 0;
 }
 
+static int dp_power_client_init(struct dp_power *dp_power,
+		struct sde_power_handle *phandle)
+{
+	int rc = 0;
+	struct dp_power_private *power;
+	char dp_client_name[DP_CLIENT_NAME_SIZE];
+
+	if (!dp_power) {
+		pr_err("invalid power data\n");
+		return -EINVAL;
+	}
+
+	power = container_of(dp_power, struct dp_power_private, dp_power);
+
+	rc = dp_power_regulator_init(power);
+	if (rc) {
+		pr_err("failed to init regulators\n");
+		goto error_power;
+	}
+
+	rc = dp_power_clk_init(power, true);
+	if (rc) {
+		pr_err("failed to init clocks\n");
+		goto error_clk;
+	}
+
+	power->phandle = phandle;
+	snprintf(dp_client_name, DP_CLIENT_NAME_SIZE, "dp_core_client");
+	power->dp_core_client = sde_power_client_create(phandle,
+			dp_client_name);
+	if (IS_ERR_OR_NULL(power->dp_core_client)) {
+		pr_err("[%s] client creation failed for DP", dp_client_name);
+		rc = -EINVAL;
+		goto error_client;
+	}
+	return 0;
+
+error_client:
+	dp_power_clk_init(power, false);
+error_clk:
+	dp_power_regulator_deinit(power);
+error_power:
+	return rc;
+}
+
+static void dp_power_client_deinit(struct dp_power *dp_power)
+{
+	struct dp_power_private *power;
+
+	if (!dp_power) {
+		pr_err("invalid power data\n");
+		return;
+	}
+
+	power = container_of(dp_power, struct dp_power_private, dp_power);
+
+	sde_power_client_destroy(power->phandle, power->dp_core_client);
+	dp_power_clk_init(power, false);
+	dp_power_regulator_deinit(power);
+}
+
 static int dp_power_set_pixel_clk_parent(struct dp_power *dp_power)
 {
 	int rc = 0;
@@ -437,6 +521,13 @@
 		goto err_gpio;
 	}
 
+	rc = sde_power_resource_enable(power->phandle,
+		power->dp_core_client, true);
+	if (rc) {
+		pr_err("Power resource enable failed\n");
+		goto err_sde_power;
+	}
+
 	rc = dp_power_clk_enable(dp_power, DP_CORE_PM, true);
 	if (rc) {
 		pr_err("failed to enable DP core clocks\n");
@@ -446,6 +537,8 @@
 	return 0;
 
 err_clk:
+	sde_power_resource_enable(power->phandle, power->dp_core_client, false);
+err_sde_power:
 	dp_power_config_gpios(power, flip, false);
 err_gpio:
 	dp_power_pinctrl_set(power, false);
@@ -469,6 +562,12 @@
 	power = container_of(dp_power, struct dp_power_private, dp_power);
 
 	dp_power_clk_enable(dp_power, DP_CORE_PM, false);
+	rc = sde_power_resource_enable(power->phandle,
+			power->dp_core_client, false);
+	if (rc) {
+		pr_err("Power resource enable failed, rc=%d\n", rc);
+		goto exit;
+	}
 	dp_power_config_gpios(power, false, false);
 	dp_power_pinctrl_set(power, false);
 	dp_power_regulator_ctrl(power, false);
@@ -503,18 +602,8 @@
 	dp_power->deinit = dp_power_deinit;
 	dp_power->clk_enable = dp_power_clk_enable;
 	dp_power->set_pixel_clk_parent = dp_power_set_pixel_clk_parent;
-
-	rc = dp_power_regulator_init(power);
-	if (rc) {
-		pr_err("failed to init regulators\n");
-		goto error;
-	}
-
-	rc = dp_power_clk_init(power, true);
-	if (rc) {
-		pr_err("failed to init clocks\n");
-		goto error;
-	}
+	dp_power->power_client_init = dp_power_client_init;
+	dp_power->power_client_deinit = dp_power_client_deinit;
 
 	return dp_power;
 error:
@@ -526,6 +615,5 @@
 	struct dp_power_private *power = container_of(dp_power,
 			struct dp_power_private, dp_power);
 
-	(void)dp_power_clk_init(power, false);
 	devm_kfree(&power->pdev->dev, power);
 }
diff --git a/drivers/gpu/drm/msm/dp/dp_power.h b/drivers/gpu/drm/msm/dp/dp_power.h
index 3d71695..e6e9900 100644
--- a/drivers/gpu/drm/msm/dp/dp_power.h
+++ b/drivers/gpu/drm/msm/dp/dp_power.h
@@ -16,6 +16,7 @@
 #define _DP_POWER_H_
 
 #include "dp_parser.h"
+#include "sde_power_handle.h"
 
 /**
  * sruct dp_power - DisplayPort's power related data
@@ -31,6 +32,9 @@
 	int (*clk_enable)(struct dp_power *power, enum dp_pm_type pm_type,
 				bool enable);
 	int (*set_pixel_clk_parent)(struct dp_power *power);
+	int (*power_client_init)(struct dp_power *power,
+				struct sde_power_handle *phandle);
+	void (*power_client_deinit)(struct dp_power *power);
 };
 
 /**
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index f596989..2cd9aa1 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -110,6 +110,8 @@
 	PLANE_PROP_ROT_DST_Y,
 	PLANE_PROP_ROT_DST_W,
 	PLANE_PROP_ROT_DST_H,
+	PLANE_PROP_PREFILL_SIZE,
+	PLANE_PROP_PREFILL_TIME,
 
 	/* enum/bitmask properties */
 	PLANE_PROP_ROTATION,
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.h b/drivers/gpu/drm/msm/sde/sde_connector.h
index 71e64e4..497d0db 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.h
+++ b/drivers/gpu/drm/msm/sde/sde_connector.h
@@ -282,7 +282,7 @@
  * Returns: Pointer to associated private display structure
  */
 #define sde_connector_get_panel(C) \
-	((C) ? to_sde_connector((C))->panel : 0)
+	((C) ? to_sde_connector((C))->panel : NULL)
 
 /**
  * sde_connector_get_encoder - get sde connector's private encoder pointer
diff --git a/drivers/gpu/drm/msm/sde/sde_core_irq.c b/drivers/gpu/drm/msm/sde/sde_core_irq.c
index 5adef2d..1b40161 100644
--- a/drivers/gpu/drm/msm/sde/sde_core_irq.c
+++ b/drivers/gpu/drm/msm/sde/sde_core_irq.c
@@ -428,6 +428,103 @@
 	sde_kms->irq_obj.total_irqs = 0;
 }
 
+static void sde_core_irq_mask(struct irq_data *irqd)
+{
+	struct sde_kms *sde_kms;
+
+	if (!irqd || !irq_data_get_irq_chip_data(irqd)) {
+		SDE_ERROR("invalid parameters irqd %d\n", irqd != NULL);
+		return;
+	}
+	sde_kms = irq_data_get_irq_chip_data(irqd);
+
+	/* memory barrier */
+	smp_mb__before_atomic();
+	clear_bit(irqd->hwirq, &sde_kms->irq_controller.enabled_mask);
+	/* memory barrier */
+	smp_mb__after_atomic();
+}
+
+static void sde_core_irq_unmask(struct irq_data *irqd)
+{
+	struct sde_kms *sde_kms;
+
+	if (!irqd || !irq_data_get_irq_chip_data(irqd)) {
+		SDE_ERROR("invalid parameters irqd %d\n", irqd != NULL);
+		return;
+	}
+	sde_kms = irq_data_get_irq_chip_data(irqd);
+
+	/* memory barrier */
+	smp_mb__before_atomic();
+	set_bit(irqd->hwirq, &sde_kms->irq_controller.enabled_mask);
+	/* memory barrier */
+	smp_mb__after_atomic();
+}
+
+static struct irq_chip sde_core_irq_chip = {
+	.name = "sde",
+	.irq_mask = sde_core_irq_mask,
+	.irq_unmask = sde_core_irq_unmask,
+};
+
+static int sde_core_irqdomain_map(struct irq_domain *domain,
+		unsigned int irq, irq_hw_number_t hwirq)
+{
+	struct sde_kms *sde_kms;
+	int rc;
+
+	if (!domain || !domain->host_data) {
+		SDE_ERROR("invalid parameters domain %d\n", domain != NULL);
+		return -EINVAL;
+	}
+	sde_kms = domain->host_data;
+
+	irq_set_chip_and_handler(irq, &sde_core_irq_chip, handle_level_irq);
+	rc = irq_set_chip_data(irq, sde_kms);
+
+	return rc;
+}
+
+static const struct irq_domain_ops sde_core_irqdomain_ops = {
+	.map = sde_core_irqdomain_map,
+	.xlate = irq_domain_xlate_onecell,
+};
+
+int sde_core_irq_domain_add(struct sde_kms *sde_kms)
+{
+	struct device *dev;
+	struct irq_domain *domain;
+
+	if (!sde_kms->dev || !sde_kms->dev->dev) {
+		pr_err("invalid device handles\n");
+		return -EINVAL;
+	}
+
+	dev = sde_kms->dev->dev;
+
+	domain = irq_domain_add_linear(dev->of_node, 32,
+			&sde_core_irqdomain_ops, sde_kms);
+	if (!domain) {
+		pr_err("failed to add irq_domain\n");
+		return -EINVAL;
+	}
+
+	sde_kms->irq_controller.enabled_mask = 0;
+	sde_kms->irq_controller.domain = domain;
+
+	return 0;
+}
+
+int sde_core_irq_domain_fini(struct sde_kms *sde_kms)
+{
+	if (sde_kms->irq_controller.domain) {
+		irq_domain_remove(sde_kms->irq_controller.domain);
+		sde_kms->irq_controller.domain = NULL;
+	}
+	return 0;
+}
+
 irqreturn_t sde_core_irq(struct sde_kms *sde_kms)
 {
 	/*
diff --git a/drivers/gpu/drm/msm/sde/sde_core_irq.h b/drivers/gpu/drm/msm/sde/sde_core_irq.h
index 64f4160..c775f8c 100644
--- a/drivers/gpu/drm/msm/sde/sde_core_irq.h
+++ b/drivers/gpu/drm/msm/sde/sde_core_irq.h
@@ -38,6 +38,20 @@
 void sde_core_irq_uninstall(struct sde_kms *sde_kms);
 
 /**
+ * sde_core_irq_domain_add - Add core IRQ domain for SDE
+ * @sde_kms:		SDE handle
+ * @return:		none
+ */
+int sde_core_irq_domain_add(struct sde_kms *sde_kms);
+
+/**
+ * sde_core_irq_domain_fini - uninstall core IRQ domain
+ * @sde_kms:		SDE handle
+ * @return:		0 if success; error code otherwise
+ */
+int sde_core_irq_domain_fini(struct sde_kms *sde_kms);
+
+/**
  * sde_core_irq - core IRQ handler
  * @sde_kms:		SDE handle
  * @return:		interrupt handling status
diff --git a/drivers/gpu/drm/msm/sde/sde_core_perf.c b/drivers/gpu/drm/msm/sde/sde_core_perf.c
index 7671649..448a1e7 100644
--- a/drivers/gpu/drm/msm/sde/sde_core_perf.c
+++ b/drivers/gpu/drm/msm/sde/sde_core_perf.c
@@ -42,6 +42,23 @@
 	SDE_PERF_MODE_MAX
 };
 
+/**
+ * enum sde_perf_vote_mode: perf vote mode.
+ * @APPS_RSC_MODE:	It combines the vote for all displays and votes it
+ *                      through APPS rsc. This is default mode when display
+ *                      rsc is not available.
+ * @DISP_RSC_MODE:	It combines the vote for all displays and votes it
+ *                      through display rsc. This is default configuration
+ *                      when display rsc is available.
+ * @DISP_RSC_PRIMARY_MODE:	The primary display votes through display rsc
+ *                      while all other displays votes through apps rsc.
+ */
+enum sde_perf_vote_mode {
+	APPS_RSC_MODE,
+	DISP_RSC_MODE,
+	DISP_RSC_PRIMARY_MODE,
+};
+
 static struct sde_kms *_sde_crtc_get_kms(struct drm_crtc *crtc)
 {
 	struct msm_drm_private *priv;
@@ -169,7 +186,9 @@
 
 	SDE_DEBUG("final threshold bw limit = %d\n", threshold);
 
-	if (!threshold) {
+	if (!sde_cstate->bw_control) {
+		SDE_DEBUG("bypass bandwidth check\n");
+	} else if (!threshold) {
 		sde_cstate->new_perf = sde_cstate->cur_perf;
 		SDE_ERROR("no bandwidth limits specified\n");
 		return -E2BIG;
@@ -182,12 +201,38 @@
 	return 0;
 }
 
+static inline bool _is_crtc_client_type_matches(struct drm_crtc *tmp_crtc,
+	enum sde_crtc_client_type curr_client_type,
+	struct sde_core_perf *perf)
+{
+	if (!tmp_crtc)
+		return false;
+	else if (perf->bw_vote_mode == DISP_RSC_PRIMARY_MODE &&
+							perf->sde_rsc_available)
+		return curr_client_type == sde_crtc_get_client_type(tmp_crtc);
+	else
+		return true;
+}
+
+static inline enum sde_crtc_client_type _get_sde_client_type(
+	enum sde_crtc_client_type curr_client_type,
+	struct sde_core_perf *perf)
+{
+	if (perf->bw_vote_mode == DISP_RSC_PRIMARY_MODE &&
+						perf->sde_rsc_available)
+		return curr_client_type;
+	else if (perf->bw_vote_mode != APPS_RSC_MODE && perf->sde_rsc_available)
+		return RT_RSC_CLIENT;
+	else
+		return RT_CLIENT;
+}
+
 static void _sde_core_perf_crtc_update_bus(struct sde_kms *kms,
 		struct drm_crtc *crtc)
 {
 	u64 bw_sum_of_intfs = 0, bus_ab_quota, bus_ib_quota;
 	struct sde_core_perf_params perf = {0};
-	enum sde_crtc_client_type curr_client_type
+	enum sde_crtc_client_type client_vote, curr_client_type
 					= sde_crtc_get_client_type(crtc);
 	struct drm_crtc *tmp_crtc;
 	struct sde_crtc_state *sde_cstate;
@@ -195,7 +240,8 @@
 
 	drm_for_each_crtc(tmp_crtc, crtc->dev) {
 		if (_sde_core_perf_crtc_is_power_on(tmp_crtc) &&
-		    (curr_client_type == sde_crtc_get_client_type(tmp_crtc))) {
+		    _is_crtc_client_type_matches(tmp_crtc, curr_client_type,
+								&kms->perf)) {
 			sde_cstate = to_sde_crtc_state(tmp_crtc->state);
 
 			perf.max_per_pipe_ib = max(perf.max_per_pipe_ib,
@@ -217,7 +263,8 @@
 		bus_ib_quota = kms->perf.fix_core_ib_vote;
 	}
 
-	switch (curr_client_type) {
+	client_vote = _get_sde_client_type(curr_client_type, &kms->perf);
+	switch (client_vote) {
 	case NRT_CLIENT:
 		sde_power_data_bus_set_quota(&priv->phandle, kms->core_client,
 				SDE_POWER_HANDLE_DATA_BUS_CLIENT_NRT,
@@ -246,6 +293,32 @@
 		SDE_ERROR("invalid client type:%d\n", curr_client_type);
 		break;
 	}
+
+	if (kms->perf.bw_vote_mode_updated) {
+		switch (kms->perf.bw_vote_mode) {
+		case DISP_RSC_MODE:
+			sde_power_data_bus_set_quota(&priv->phandle,
+				kms->core_client,
+				SDE_POWER_HANDLE_DATA_BUS_CLIENT_NRT, 0, 0);
+			sde_power_data_bus_set_quota(&priv->phandle,
+				kms->core_client,
+				SDE_POWER_HANDLE_DATA_BUS_CLIENT_RT, 0, 0);
+			kms->perf.bw_vote_mode_updated = false;
+			break;
+
+		case APPS_RSC_MODE:
+			sde_cstate = to_sde_crtc_state(crtc->state);
+			if (sde_cstate->rsc_client) {
+				sde_rsc_client_vote(sde_cstate->rsc_client,
+									0, 0);
+				kms->perf.bw_vote_mode_updated = false;
+			}
+			break;
+
+		default:
+			break;
+		}
+	}
 }
 
 /**
@@ -349,6 +422,10 @@
 	}
 	priv = kms->dev->dev_private;
 
+	/* wake vote update is not required with display rsc */
+	if (kms->perf.bw_vote_mode == DISP_RSC_MODE && stop_req)
+		return;
+
 	sde_crtc = to_sde_crtc(crtc);
 	sde_cstate = to_sde_crtc_state(crtc->state);
 
@@ -376,6 +453,21 @@
 			update_bus = 1;
 		}
 
+		/* display rsc override during solver mode */
+		if (kms->perf.bw_vote_mode == DISP_RSC_MODE &&
+				get_sde_rsc_current_state(SDE_RSC_INDEX) ==
+							    SDE_RSC_CMD_STATE) {
+			/* update new bandwdith in all cases */
+			if (params_changed && new->bw_ctl != old->bw_ctl) {
+				old->bw_ctl = new->bw_ctl;
+				old->max_per_pipe_ib = new->max_per_pipe_ib;
+				update_bus = 1;
+			/* reduce bw vote is not required in solver mode */
+			} else if (!params_changed) {
+				update_bus = 0;
+			}
+		}
+
 		if ((params_changed &&
 				(new->core_clk_rate > old->core_clk_rate)) ||
 				(!params_changed &&
@@ -535,6 +627,10 @@
 			(u32 *)&catalog->perf.max_bw_high);
 	debugfs_create_file("perf_mode", 0644, perf->debugfs_root,
 			(u32 *)perf, &sde_core_perf_mode_fops);
+	debugfs_create_u32("bw_vote_mode", 0600, perf->debugfs_root,
+			&perf->bw_vote_mode);
+	debugfs_create_bool("bw_vote_mode_updated", 0600, perf->debugfs_root,
+			&perf->bw_vote_mode_updated);
 	debugfs_create_u64("fix_core_clk_rate", 0644, perf->debugfs_root,
 			&perf->fix_core_clk_rate);
 	debugfs_create_u64("fix_core_ib_vote", 0644, perf->debugfs_root,
@@ -566,7 +662,6 @@
 	sde_core_perf_debugfs_destroy(perf);
 	perf->max_core_clk_rate = 0;
 	perf->core_clk = NULL;
-	mutex_destroy(&perf->perf_lock);
 	perf->clk_name = NULL;
 	perf->phandle = NULL;
 	perf->catalog = NULL;
@@ -590,7 +685,12 @@
 	perf->phandle = phandle;
 	perf->pclient = pclient;
 	perf->clk_name = clk_name;
-	mutex_init(&perf->perf_lock);
+	perf->sde_rsc_available = is_sde_rsc_available(SDE_RSC_INDEX);
+	/* set default mode */
+	if (perf->sde_rsc_available)
+		perf->bw_vote_mode = DISP_RSC_MODE;
+	else
+		perf->bw_vote_mode = APPS_RSC_MODE;
 
 	perf->core_clk = sde_power_clk_get_clk(phandle, clk_name);
 	if (!perf->core_clk) {
diff --git a/drivers/gpu/drm/msm/sde/sde_core_perf.h b/drivers/gpu/drm/msm/sde/sde_core_perf.h
index 31851be..4a1bdad 100644
--- a/drivers/gpu/drm/msm/sde/sde_core_perf.h
+++ b/drivers/gpu/drm/msm/sde/sde_core_perf.h
@@ -22,8 +22,6 @@
 #include "sde_power_handle.h"
 
 #define	SDE_PERF_DEFAULT_MAX_CORE_CLK_RATE	320000000
-#define	SDE_PERF_DEFAULT_MAX_BUS_AB_QUOTA	2000000000
-#define	SDE_PERF_DEFAULT_MAX_BUS_IB_QUOTA	2000000000
 
 /**
  * struct sde_core_perf_params - definition of performance parameters
@@ -53,7 +51,6 @@
  * struct sde_core_perf - definition of core performance context
  * @dev: Pointer to drm device
  * @debugfs_root: top level debug folder
- * @perf_lock: serialization lock for this context
  * @catalog: Pointer to catalog configuration
  * @phandle: Pointer to power handler
  * @pclient: Pointer to power client
@@ -66,11 +63,13 @@
  * @fix_core_clk_rate: fixed core clock request in Hz used in mode 2
  * @fix_core_ib_vote: fixed core ib vote in bps used in mode 2
  * @fix_core_ab_vote: fixed core ab vote in bps used in mode 2
+ * @bw_vote_mode: apps rsc vs display rsc bandwidth vote mode
+ * @sde_rsc_available: is display rsc available
+ * @bw_vote_mode_updated: bandwidth vote mode update
  */
 struct sde_core_perf {
 	struct drm_device *dev;
 	struct dentry *debugfs_root;
-	struct mutex perf_lock;
 	struct sde_mdss_cfg *catalog;
 	struct sde_power_handle *phandle;
 	struct sde_power_client *pclient;
@@ -83,6 +82,9 @@
 	u64 fix_core_clk_rate;
 	u64 fix_core_ib_vote;
 	u64 fix_core_ab_vote;
+	u32 bw_vote_mode;
+	bool sde_rsc_available;
+	bool bw_vote_mode_updated;
 };
 
 /**
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
index d0ade33..7d0fad0 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.c
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -2912,19 +2912,19 @@
 			CRTC_PROP_CORE_CLK);
 	msm_property_install_range(&sde_crtc->property_info,
 			"core_ab", 0x0, 0, U64_MAX,
-			SDE_PERF_DEFAULT_MAX_BUS_AB_QUOTA,
+			catalog->perf.max_bw_high * 1000ULL,
 			CRTC_PROP_CORE_AB);
 	msm_property_install_range(&sde_crtc->property_info,
 			"core_ib", 0x0, 0, U64_MAX,
-			SDE_PERF_DEFAULT_MAX_BUS_IB_QUOTA,
+			catalog->perf.max_bw_high * 1000ULL,
 			CRTC_PROP_CORE_IB);
 	msm_property_install_range(&sde_crtc->property_info,
 			"mem_ab", 0x0, 0, U64_MAX,
-			SDE_PERF_DEFAULT_MAX_BUS_AB_QUOTA,
+			catalog->perf.max_bw_high * 1000ULL,
 			CRTC_PROP_MEM_AB);
 	msm_property_install_range(&sde_crtc->property_info,
 			"mem_ib", 0x0, 0, U64_MAX,
-			SDE_PERF_DEFAULT_MAX_BUS_IB_QUOTA,
+			catalog->perf.max_bw_high * 1000ULL,
 			CRTC_PROP_MEM_IB);
 	msm_property_install_range(&sde_crtc->property_info,
 			"rot_prefill_bw", 0, 0, U64_MAX,
@@ -3050,6 +3050,12 @@
 			case CRTC_PROP_ROI_V1:
 				ret = _sde_crtc_set_roi_v1(state, (void *)val);
 				break;
+			case CRTC_PROP_CORE_AB:
+			case CRTC_PROP_CORE_IB:
+			case CRTC_PROP_MEM_AB:
+			case CRTC_PROP_MEM_IB:
+				cstate->bw_control = true;
+				break;
 			default:
 				/* nothing to do */
 				break;
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.h b/drivers/gpu/drm/msm/sde/sde_crtc.h
index 9ef6f25..4b3c814 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.h
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.h
@@ -255,6 +255,7 @@
  * @intf_mode     : Interface mode of the primary connector
  * @rsc_client    : sde rsc client when mode is valid
  * @is_ppsplit    : Whether current topology requires PPSplit special handling
+ * @bw_control    : true if bw controlled by bw properties
  * @crtc_roi      : Current CRTC ROI. Possibly sub-rectangle of mode.
  *                  Origin top left of CRTC.
  * @lm_bounds     : LM boundaries based on current mode full resolution, no ROI.
@@ -280,6 +281,7 @@
 	enum sde_intf_mode intf_mode;
 	struct sde_rsc_client *rsc_client;
 	bool rsc_update;
+	bool bw_control;
 
 	bool is_ppsplit;
 	struct sde_rect crtc_roi;
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c
index e3ad960..3d48a17 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.c
@@ -2579,6 +2579,9 @@
 	} else if (disp_info->intf_type == DRM_MODE_CONNECTOR_HDMIA) {
 		*drm_enc_mode = DRM_MODE_ENCODER_TMDS;
 		intf_type = INTF_HDMI;
+	} else if (disp_info->intf_type == DRM_MODE_CONNECTOR_DisplayPort) {
+		*drm_enc_mode = DRM_MODE_ENCODER_TMDS;
+		intf_type = INTF_DP;
 	} else if (disp_info->intf_type == DRM_MODE_CONNECTOR_VIRTUAL) {
 		*drm_enc_mode = DRM_MODE_ENCODER_VIRTUAL;
 		intf_type = INTF_WB;
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
index 53f5b89..7adab09 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
@@ -700,6 +700,8 @@
 		}
 	}
 
+	if (phys_enc->hw_pp->ops.enable_tearcheck)
+		phys_enc->hw_pp->ops.enable_tearcheck(phys_enc->hw_pp, false);
 	phys_enc->enable_state = SDE_ENC_DISABLED;
 }
 
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
index 28a2b16..385c610 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
@@ -21,6 +21,7 @@
 #include "sde_core_irq.h"
 #include "sde_wb.h"
 #include "sde_vbif.h"
+#include "sde_crtc.h"
 
 #define to_sde_encoder_phys_wb(x) \
 	container_of(x, struct sde_encoder_phys_wb, base)
@@ -104,6 +105,48 @@
 }
 
 /**
+ * sde_encoder_phys_wb_set_qos_remap - set QoS remapper for writeback
+ * @phys_enc:	Pointer to physical encoder
+ */
+static void sde_encoder_phys_wb_set_qos_remap(
+		struct sde_encoder_phys *phys_enc)
+{
+	struct sde_encoder_phys_wb *wb_enc;
+	struct sde_hw_wb *hw_wb;
+	struct drm_crtc *crtc;
+	struct sde_vbif_set_qos_params qos_params;
+
+	if (!phys_enc || !phys_enc->parent || !phys_enc->parent->crtc) {
+		SDE_ERROR("invalid arguments\n");
+		return;
+	}
+
+	wb_enc = to_sde_encoder_phys_wb(phys_enc);
+	crtc = phys_enc->parent->crtc;
+
+	if (!wb_enc->hw_wb || !wb_enc->hw_wb->caps) {
+		SDE_ERROR("invalid writeback hardware\n");
+		return;
+	}
+
+	hw_wb = wb_enc->hw_wb;
+
+	memset(&qos_params, 0, sizeof(qos_params));
+	qos_params.vbif_idx = hw_wb->caps->vbif_idx;
+	qos_params.xin_id = hw_wb->caps->xin_id;
+	qos_params.clk_ctrl = hw_wb->caps->clk_ctrl;
+	qos_params.num = hw_wb->idx - WB_0;
+	qos_params.is_rt = sde_crtc_get_client_type(crtc) != NRT_CLIENT;
+
+	SDE_DEBUG("[qos_remap] wb:%d vbif:%d xin:%d rt:%d\n",
+			qos_params.num,
+			qos_params.vbif_idx,
+			qos_params.xin_id, qos_params.is_rt);
+
+	sde_vbif_set_qos_remap(phys_enc->sde_kms, &qos_params);
+}
+
+/**
  * sde_encoder_phys_setup_cdm - setup chroma down block
  * @phys_enc:	Pointer to physical encoder
  * @fb:		Pointer to output framebuffer
@@ -528,6 +571,8 @@
 
 	sde_encoder_phys_wb_set_traffic_shaper(phys_enc);
 
+	sde_encoder_phys_wb_set_qos_remap(phys_enc);
+
 	sde_encoder_phys_setup_cdm(phys_enc, fb, wb_enc->wb_fmt, wb_roi);
 
 	sde_encoder_phys_wb_setup_fb(phys_enc, fb, wb_roi);
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
index 1faa46e2..30e63da 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
@@ -285,6 +285,8 @@
 	VBIF_DEFAULT_OT_WR_LIMIT,
 	VBIF_DYNAMIC_OT_RD_LIMIT,
 	VBIF_DYNAMIC_OT_WR_LIMIT,
+	VBIF_QOS_RT_REMAP,
+	VBIF_QOS_NRT_REMAP,
 	VBIF_PROP_MAX,
 };
 
@@ -512,6 +514,10 @@
 		PROP_TYPE_U32_ARRAY},
 	{VBIF_DYNAMIC_OT_WR_LIMIT, "qcom,sde-vbif-dynamic-ot-wr-limit", false,
 		PROP_TYPE_U32_ARRAY},
+	{VBIF_QOS_RT_REMAP, "qcom,sde-vbif-qos-rt-remap", false,
+		PROP_TYPE_U32_ARRAY},
+	{VBIF_QOS_NRT_REMAP, "qcom,sde-vbif-qos-nrt-remap", false,
+		PROP_TYPE_U32_ARRAY},
 };
 
 static struct sde_prop_type reg_dma_prop[REG_DMA_PROP_MAX] = {
@@ -1049,6 +1055,13 @@
 
 		set_bit(SDE_SSPP_SRC, &sspp->features);
 
+		if (sde_cfg->ts_prefill_rev == 1) {
+			set_bit(SDE_SSPP_TS_PREFILL, &sspp->features);
+		} else if (sde_cfg->ts_prefill_rev == 2) {
+			set_bit(SDE_SSPP_TS_PREFILL, &sspp->features);
+			set_bit(SDE_SSPP_TS_PREFILL_REC1, &sspp->features);
+		}
+
 		sblk->smart_dma_priority =
 			PROP_VALUE_ACCESS(prop_value, SSPP_SMART_DMA, i);
 
@@ -1926,7 +1939,7 @@
 	int rc, prop_count[VBIF_PROP_MAX], i, j, k;
 	struct sde_prop_value *prop_value = NULL;
 	bool prop_exists[VBIF_PROP_MAX];
-	u32 off_count, vbif_len, rd_len = 0, wr_len = 0;
+	u32 off_count, vbif_len;
 	struct sde_vbif_cfg *vbif;
 
 	if (!sde_cfg) {
@@ -1948,12 +1961,22 @@
 		goto end;
 
 	rc = _validate_dt_entry(np, &vbif_prop[VBIF_DYNAMIC_OT_RD_LIMIT], 1,
-			&prop_count[VBIF_DYNAMIC_OT_RD_LIMIT], &rd_len);
+			&prop_count[VBIF_DYNAMIC_OT_RD_LIMIT], NULL);
 	if (rc)
 		goto end;
 
 	rc = _validate_dt_entry(np, &vbif_prop[VBIF_DYNAMIC_OT_WR_LIMIT], 1,
-			&prop_count[VBIF_DYNAMIC_OT_WR_LIMIT], &wr_len);
+			&prop_count[VBIF_DYNAMIC_OT_WR_LIMIT], NULL);
+	if (rc)
+		goto end;
+
+	rc = _validate_dt_entry(np, &vbif_prop[VBIF_QOS_RT_REMAP], 1,
+			&prop_count[VBIF_QOS_RT_REMAP], NULL);
+	if (rc)
+		goto end;
+
+	rc = _validate_dt_entry(np, &vbif_prop[VBIF_QOS_NRT_REMAP], 1,
+			&prop_count[VBIF_QOS_NRT_REMAP], NULL);
 	if (rc)
 		goto end;
 
@@ -2048,6 +2071,63 @@
 				vbif->dynamic_ot_rd_tbl.count ||
 				vbif->dynamic_ot_wr_tbl.count)
 			set_bit(SDE_VBIF_QOS_OTLIM, &vbif->features);
+
+		vbif->qos_rt_tbl.npriority_lvl =
+				prop_count[VBIF_QOS_RT_REMAP];
+		SDE_DEBUG("qos_rt_tbl.npriority_lvl=%u\n",
+				vbif->qos_rt_tbl.npriority_lvl);
+		if (vbif->qos_rt_tbl.npriority_lvl == sde_cfg->vbif_qos_nlvl) {
+			vbif->qos_rt_tbl.priority_lvl = kcalloc(
+				vbif->qos_rt_tbl.npriority_lvl, sizeof(u32),
+				GFP_KERNEL);
+			if (!vbif->qos_rt_tbl.priority_lvl) {
+				rc = -ENOMEM;
+				goto end;
+			}
+		} else if (vbif->qos_rt_tbl.npriority_lvl) {
+			vbif->qos_rt_tbl.npriority_lvl = 0;
+			vbif->qos_rt_tbl.priority_lvl = NULL;
+			SDE_ERROR("invalid qos rt table\n");
+		}
+
+		for (j = 0; j < vbif->qos_rt_tbl.npriority_lvl; j++) {
+			vbif->qos_rt_tbl.priority_lvl[j] =
+				PROP_VALUE_ACCESS(prop_value,
+						VBIF_QOS_RT_REMAP, j);
+			SDE_DEBUG("lvl[%d]=%u\n", j,
+					vbif->qos_rt_tbl.priority_lvl[j]);
+		}
+
+		vbif->qos_nrt_tbl.npriority_lvl =
+				prop_count[VBIF_QOS_NRT_REMAP];
+		SDE_DEBUG("qos_nrt_tbl.npriority_lvl=%u\n",
+				vbif->qos_nrt_tbl.npriority_lvl);
+
+		if (vbif->qos_nrt_tbl.npriority_lvl == sde_cfg->vbif_qos_nlvl) {
+			vbif->qos_nrt_tbl.priority_lvl = kcalloc(
+				vbif->qos_nrt_tbl.npriority_lvl, sizeof(u32),
+				GFP_KERNEL);
+			if (!vbif->qos_nrt_tbl.priority_lvl) {
+				rc = -ENOMEM;
+				goto end;
+			}
+		} else if (vbif->qos_nrt_tbl.npriority_lvl) {
+			vbif->qos_nrt_tbl.npriority_lvl = 0;
+			vbif->qos_nrt_tbl.priority_lvl = NULL;
+			SDE_ERROR("invalid qos nrt table\n");
+		}
+
+		for (j = 0; j < vbif->qos_nrt_tbl.npriority_lvl; j++) {
+			vbif->qos_nrt_tbl.priority_lvl[j] =
+				PROP_VALUE_ACCESS(prop_value,
+						VBIF_QOS_NRT_REMAP, j);
+			SDE_DEBUG("lvl[%d]=%u\n", j,
+					vbif->qos_nrt_tbl.priority_lvl[j]);
+		}
+
+		if (vbif->qos_rt_tbl.npriority_lvl ||
+				vbif->qos_nrt_tbl.npriority_lvl)
+			set_bit(SDE_VBIF_QOS_REMAP, &vbif->features);
 	}
 
 end:
@@ -2510,11 +2590,17 @@
 		/* update msm8998 target here */
 		sde_cfg->has_wb_ubwc = true;
 		sde_cfg->perf.min_prefill_lines = 25;
+		sde_cfg->vbif_qos_nlvl = 4;
+		sde_cfg->ts_prefill_rev = 1;
+		sde_cfg->perf.min_prefill_lines = 25;
 		break;
 	case SDE_HW_VER_400:
-		/* update msm8998 and sdm845 target here */
+		/* update sdm845 target here */
 		sde_cfg->has_wb_ubwc = true;
 		sde_cfg->perf.min_prefill_lines = 24;
+		sde_cfg->vbif_qos_nlvl = 8;
+		sde_cfg->ts_prefill_rev = 2;
+		sde_cfg->perf.min_prefill_lines = 24;
 		break;
 	default:
 		sde_cfg->perf.min_prefill_lines = 0xffff;
@@ -2549,6 +2635,8 @@
 	for (i = 0; i < sde_cfg->vbif_count; i++) {
 		kfree(sde_cfg->vbif[i].dynamic_ot_rd_tbl.cfg);
 		kfree(sde_cfg->vbif[i].dynamic_ot_wr_tbl.cfg);
+		kfree(sde_cfg->vbif[i].qos_rt_tbl.priority_lvl);
+		kfree(sde_cfg->vbif[i].qos_nrt_tbl.priority_lvl);
 	}
 
 	kfree(sde_cfg->dma_formats);
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
index cfb1b67..e24192b 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
@@ -111,6 +111,8 @@
  * @SDE_SSPP_SMART_DMA_V1,   SmartDMA 1.0 support
  * @SDE_SSPP_SMART_DMA_V2,   SmartDMA 2.0 support
  * @SDE_SSPP_SBUF,           SSPP support inline stream buffer
+ * @SDE_SSPP_TS_PREFILL      Supports prefill with traffic shaper
+ * @SDE_SSPP_TS_PREFILL_REC1 Supports prefill with traffic shaper multirec
  * @SDE_SSPP_MAX             maximum value
  */
 enum {
@@ -130,6 +132,8 @@
 	SDE_SSPP_SMART_DMA_V1,
 	SDE_SSPP_SMART_DMA_V2,
 	SDE_SSPP_SBUF,
+	SDE_SSPP_TS_PREFILL,
+	SDE_SSPP_TS_PREFILL_REC1,
 	SDE_SSPP_MAX
 };
 
@@ -258,10 +262,12 @@
 /**
  * VBIF sub-blocks and features
  * @SDE_VBIF_QOS_OTLIM        VBIF supports OT Limit
+ * @SDE_VBIF_QOS_REMAP        VBIF supports QoS priority remap
  * @SDE_VBIF_MAX              maximum value
  */
 enum {
 	SDE_VBIF_QOS_OTLIM = 0x1,
+	SDE_VBIF_QOS_REMAP,
 	SDE_VBIF_MAX
 };
 
@@ -653,6 +659,16 @@
 };
 
 /**
+ * struct sde_vbif_qos_tbl - QoS priority table
+ * @npriority_lvl      num of priority level
+ * @priority_lvl       pointer to array of priority level in ascending order
+ */
+struct sde_vbif_qos_tbl {
+	u32 npriority_lvl;
+	u32 *priority_lvl;
+};
+
+/**
  * struct sde_vbif_cfg - information of VBIF blocks
  * @id                 enum identifying this block
  * @base               register offset of this block
@@ -662,6 +678,8 @@
  * @xin_halt_timeout   maximum time (in usec) for xin to halt
  * @dynamic_ot_rd_tbl  dynamic OT read configuration table
  * @dynamic_ot_wr_tbl  dynamic OT write configuration table
+ * @qos_rt_tbl         real-time QoS priority table
+ * @qos_nrt_tbl        non-real-time QoS priority table
  */
 struct sde_vbif_cfg {
 	SDE_HW_BLK_INFO;
@@ -670,6 +688,8 @@
 	u32 xin_halt_timeout;
 	struct sde_vbif_dynamic_ot_tbl dynamic_ot_rd_tbl;
 	struct sde_vbif_dynamic_ot_tbl dynamic_ot_wr_tbl;
+	struct sde_vbif_qos_tbl qos_rt_tbl;
+	struct sde_vbif_qos_tbl qos_nrt_tbl;
 };
 /**
  * struct sde_reg_dma_cfg - information of lut dma blocks
@@ -746,6 +766,8 @@
  * @cursor_formats     Supported formats for cursor pipe
  * @vig_formats        Supported formats for vig pipe
  * @wb_formats         Supported formats for wb
+ * @vbif_qos_nlvl      number of vbif QoS priority level
+ * @ts_prefill_rev     prefill traffic shaper feature revision
  */
 struct sde_mdss_cfg {
 	u32 hwversion;
@@ -765,6 +787,8 @@
 	bool has_sbuf;
 	u32 sbuf_headroom;
 	bool has_idle_pc;
+	u32 vbif_qos_nlvl;
+	u32 ts_prefill_rev;
 
 	u32 mdss_count;
 	struct sde_mdss_base_cfg mdss[MAX_BLOCKS];
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_intf.c b/drivers/gpu/drm/msm/sde/sde_hw_intf.c
index 1f17378..be83afe 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_intf.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_intf.c
@@ -116,7 +116,7 @@
 	display_v_end = ((vsync_period - p->v_front_porch) * hsync_period) +
 	p->hsync_skew - 1;
 
-	if (ctx->cap->type == INTF_EDP) {
+	if (ctx->cap->type == INTF_EDP || ctx->cap->type == INTF_DP) {
 		display_v_start += p->hsync_pulse_width + p->h_back_porch;
 		display_v_end -= p->h_front_porch;
 	}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_lm.c b/drivers/gpu/drm/msm/sde/sde_hw_lm.c
index 7780c5b..fedc72c 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_lm.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_lm.c
@@ -71,7 +71,7 @@
 	if (stage == SDE_STAGE_BASE)
 		rc = -EINVAL;
 	else if (stage <= sblk->maxblendstages)
-		rc = sblk->blendstage_base[stage - 1];
+		rc = sblk->blendstage_base[stage - SDE_STAGE_0];
 	else
 		rc = -EINVAL;
 
@@ -198,7 +198,7 @@
 {
 	struct sde_hw_blk_reg_map *c = &ctx->hw;
 	int stage_off;
-	u32 val = 0;
+	u32 val = 0, alpha = 0;
 
 	stage_off = _stage_offset(ctx, dim_layer->stage);
 	if (stage_off < 0) {
@@ -206,13 +206,13 @@
 		return;
 	}
 
-	val = (dim_layer->color_fill.color_1 & 0xFFF) << 16 |
-			(dim_layer->color_fill.color_0 & 0xFFF);
+	alpha = dim_layer->color_fill.color_3 & 0xFF;
+	val = ((dim_layer->color_fill.color_1 << 2) & 0xFFF) << 16 |
+			((dim_layer->color_fill.color_0 << 2) & 0xFFF);
 	SDE_REG_WRITE(c, LM_FG_COLOR_FILL_COLOR_0 + stage_off, val);
 
-	val = 0;
-	val = (dim_layer->color_fill.color_3 & 0xFFF) << 16 |
-			(dim_layer->color_fill.color_2 & 0xFFF);
+	val = (alpha << 4) << 16 |
+			((dim_layer->color_fill.color_2 << 2) & 0xFFF);
 	SDE_REG_WRITE(c, LM_FG_COLOR_FILL_COLOR_1 + stage_off, val);
 
 	val = dim_layer->rect.h << 16 | dim_layer->rect.w;
@@ -222,9 +222,14 @@
 	SDE_REG_WRITE(c, LM_FG_COLOR_FILL_XY + stage_off, val);
 
 	val = BIT(16); /* enable dim layer */
+	val |= SDE_BLEND_FG_ALPHA_FG_CONST | SDE_BLEND_BG_ALPHA_BG_CONST;
 	if (dim_layer->flags & SDE_DRM_DIM_LAYER_EXCLUSIVE)
 		val |= BIT(17);
+	else
+		val &= ~BIT(17);
 	SDE_REG_WRITE(c, LM_BLEND0_OP + stage_off, val);
+	val = (alpha << 16) | (0xff - alpha);
+	SDE_REG_WRITE(c, LM_BLEND0_CONST_ALPHA + stage_off, val);
 }
 
 static void sde_hw_lm_setup_misr(struct sde_hw_mixer *ctx,
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_sspp.c b/drivers/gpu/drm/msm/sde/sde_hw_sspp.c
index a1f5cee..694d267 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_sspp.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_sspp.c
@@ -79,11 +79,16 @@
 #define SSPP_SW_PIX_EXT_C3_LR              0x120
 #define SSPP_SW_PIX_EXT_C3_TB              0x124
 #define SSPP_SW_PIX_EXT_C3_REQ_PIXELS      0x128
+#define SSPP_TRAFFIC_SHAPER                0x130
 #define SSPP_UBWC_ERROR_STATUS             0x138
+#define SSPP_TRAFFIC_SHAPER_PREFILL        0x150
+#define SSPP_TRAFFIC_SHAPER_REC1_PREFILL   0x154
+#define SSPP_TRAFFIC_SHAPER_REC1           0x158
 #define SSPP_EXCL_REC_SIZE                 0x1B4
 #define SSPP_EXCL_REC_XY                   0x1B8
 #define SSPP_VIG_OP_MODE                   0x0
 #define SSPP_VIG_CSC_10_OP_MODE            0x0
+#define SSPP_TRAFFIC_SHAPER_BPC_MAX        0xFF
 
 /* SSPP_QOS_CTRL */
 #define SSPP_QOS_CTRL_VBLANK_EN            BIT(16)
@@ -186,6 +191,9 @@
 #define VIG_CSC_10_EN          BIT(0)
 #define CSC_10BIT_OFFSET       4
 
+/* traffic shaper clock in Hz */
+#define TS_CLK			19200000
+
 static inline int _sspp_subblk_offset(struct sde_hw_pipe *ctx,
 		int s_id,
 		u32 *idx)
@@ -1041,6 +1049,51 @@
 	status->rd_ptr[1] = val & 0xffff;
 }
 
+static void sde_hw_sspp_setup_ts_prefill(struct sde_hw_pipe *ctx,
+		struct sde_hw_pipe_ts_cfg *cfg,
+		enum sde_sspp_multirect_index index)
+{
+	u32 idx;
+	u32 ts_offset, ts_prefill_offset;
+	u32 ts_count = 0, ts_bytes = 0;
+	const struct sde_sspp_cfg *cap;
+
+	if (!ctx || !cfg || !ctx->cap)
+		return;
+
+	if (_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx))
+		return;
+
+	cap = ctx->cap;
+
+	if (index == SDE_SSPP_RECT_0 &&
+			test_bit(SDE_SSPP_TS_PREFILL, &cap->features)) {
+		ts_offset = SSPP_TRAFFIC_SHAPER;
+		ts_prefill_offset = SSPP_TRAFFIC_SHAPER_PREFILL;
+	} else if (index == SDE_SSPP_RECT_1 &&
+			test_bit(SDE_SSPP_TS_PREFILL_REC1, &cap->features)) {
+		ts_offset = SSPP_TRAFFIC_SHAPER_REC1;
+		ts_prefill_offset = SSPP_TRAFFIC_SHAPER_REC1_PREFILL;
+	} else {
+		return;
+	}
+
+	if (cfg->time) {
+		ts_bytes = mult_frac(TS_CLK * 1000000ULL, cfg->size,
+				cfg->time);
+		if (ts_bytes > SSPP_TRAFFIC_SHAPER_BPC_MAX)
+			ts_bytes = SSPP_TRAFFIC_SHAPER_BPC_MAX;
+	}
+
+	if (ts_bytes) {
+		ts_count = DIV_ROUND_UP_ULL(cfg->size, ts_bytes);
+		ts_bytes |= BIT(31) | BIT(27);
+	}
+
+	SDE_REG_WRITE(&ctx->hw, ts_offset, ts_bytes);
+	SDE_REG_WRITE(&ctx->hw, ts_prefill_offset, ts_count);
+}
+
 static void _setup_layer_ops(struct sde_hw_pipe *c,
 		unsigned long features)
 {
@@ -1062,6 +1115,9 @@
 		c->ops.setup_qos_ctrl = sde_hw_sspp_setup_qos_ctrl;
 	}
 
+	if (test_bit(SDE_SSPP_TS_PREFILL, &features))
+		c->ops.setup_ts_prefill = sde_hw_sspp_setup_ts_prefill;
+
 	if (test_bit(SDE_SSPP_CSC, &features) ||
 		test_bit(SDE_SSPP_CSC_10BIT, &features))
 		c->ops.setup_csc = sde_hw_sspp_setup_csc;
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_sspp.h b/drivers/gpu/drm/msm/sde/sde_hw_sspp.h
index 1b81e54..010b363 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_sspp.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_sspp.h
@@ -343,6 +343,16 @@
 };
 
 /**
+ * struct sde_hw_pipe_ts_cfg - traffic shaper configuration
+ * @size: size to prefill in bytes, or zero to disable
+ * @time: time to prefill in usec, or zero to disable
+ */
+struct sde_hw_pipe_ts_cfg {
+	u64 size;
+	u64 time;
+};
+
+/**
  * Maximum number of stream buffer plane
  */
 #define SDE_PIPE_SBUF_PLANE_NUM	2
@@ -554,6 +564,16 @@
 	 */
 	void (*get_sbuf_status)(struct sde_hw_pipe *ctx,
 			struct sde_hw_pipe_sbuf_status *status);
+
+	/**
+	 * setup_ts_prefill - setup prefill traffic shaper
+	 * @ctx: Pointer to pipe context
+	 * @cfg: Pointer to traffic shaper configuration
+	 * @index: rectangle index in multirect
+	 */
+	void (*setup_ts_prefill)(struct sde_hw_pipe *ctx,
+			struct sde_hw_pipe_ts_cfg *cfg,
+			enum sde_sspp_multirect_index index);
 };
 
 /**
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_vbif.c b/drivers/gpu/drm/msm/sde/sde_hw_vbif.c
index 048ec47..9b9763a 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_vbif.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_vbif.c
@@ -33,6 +33,8 @@
 #define VBIF_OUT_WR_LIM_CONF0		0x00D4
 #define VBIF_XIN_HALT_CTRL0		0x0200
 #define VBIF_XIN_HALT_CTRL1		0x0204
+#define VBIF_XINL_QOS_RP_REMAP_000	0x0550
+#define VBIF_XINL_QOS_LVL_REMAP_000	0x0590
 
 static void sde_hw_set_limit_conf(struct sde_hw_vbif *vbif,
 		u32 xin_id, bool rd, u32 limit)
@@ -104,6 +106,35 @@
 	return (reg_val & BIT(xin_id)) ? true : false;
 }
 
+static void sde_hw_set_qos_remap(struct sde_hw_vbif *vbif,
+		u32 xin_id, u32 level, u32 remap_level)
+{
+	struct sde_hw_blk_reg_map *c;
+	u32 reg_val, reg_val_lvl, mask, reg_high, reg_shift;
+
+	if (!vbif)
+		return;
+
+	c = &vbif->hw;
+
+	reg_high = ((xin_id & 0x8) >> 3) * 4 + (level * 8);
+	reg_shift = (xin_id & 0x7) * 4;
+
+	reg_val = SDE_REG_READ(c, VBIF_XINL_QOS_RP_REMAP_000 + reg_high);
+	reg_val_lvl = SDE_REG_READ(c, VBIF_XINL_QOS_LVL_REMAP_000 + reg_high);
+
+	mask = 0x7 << reg_shift;
+
+	reg_val &= ~mask;
+	reg_val |= (remap_level << reg_shift) & mask;
+
+	reg_val_lvl &= ~mask;
+	reg_val_lvl |= (remap_level << reg_shift) & mask;
+
+	SDE_REG_WRITE(c, VBIF_XINL_QOS_RP_REMAP_000 + reg_high, reg_val);
+	SDE_REG_WRITE(c, VBIF_XINL_QOS_LVL_REMAP_000 + reg_high, reg_val_lvl);
+}
+
 static void _setup_vbif_ops(struct sde_hw_vbif_ops *ops,
 		unsigned long cap)
 {
@@ -111,6 +142,8 @@
 	ops->get_limit_conf = sde_hw_get_limit_conf;
 	ops->set_halt_ctrl = sde_hw_set_halt_ctrl;
 	ops->get_halt_ctrl = sde_hw_get_halt_ctrl;
+	if (test_bit(SDE_VBIF_QOS_REMAP, &cap))
+		ops->set_qos_remap = sde_hw_set_qos_remap;
 }
 
 static const struct sde_vbif_cfg *_top_offset(enum sde_vbif vbif,
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_vbif.h b/drivers/gpu/drm/msm/sde/sde_hw_vbif.h
index de7fac0..c67738b 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_vbif.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_vbif.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -61,6 +61,16 @@
 	 */
 	bool (*get_halt_ctrl)(struct sde_hw_vbif *vbif,
 			u32 xin_id);
+
+	/**
+	 * set_qos_remap - set QoS priority remap
+	 * @vbif: vbif context driver
+	 * @xin_id: client interface identifier
+	 * @level: priority level
+	 * @remap_level: remapped level
+	 */
+	void (*set_qos_remap)(struct sde_hw_vbif *vbif,
+			u32 xin_id, u32 level, u32 remap_level);
 };
 
 struct sde_hw_vbif {
diff --git a/drivers/gpu/drm/msm/sde/sde_irq.c b/drivers/gpu/drm/msm/sde/sde_irq.c
index e3b658a..eeb7a00 100644
--- a/drivers/gpu/drm/msm/sde/sde_irq.c
+++ b/drivers/gpu/drm/msm/sde/sde_irq.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -49,90 +49,14 @@
 	return IRQ_HANDLED;
 }
 
-static void sde_hw_irq_mask(struct irq_data *irqd)
-{
-	struct sde_kms *sde_kms;
-
-	if (!irqd || !irq_data_get_irq_chip_data(irqd)) {
-		SDE_ERROR("invalid parameters irqd %d\n", irqd != 0);
-		return;
-	}
-	sde_kms = irq_data_get_irq_chip_data(irqd);
-
-	/* memory barrier */
-	smp_mb__before_atomic();
-	clear_bit(irqd->hwirq, &sde_kms->irq_controller.enabled_mask);
-	/* memory barrier */
-	smp_mb__after_atomic();
-}
-
-static void sde_hw_irq_unmask(struct irq_data *irqd)
-{
-	struct sde_kms *sde_kms;
-
-	if (!irqd || !irq_data_get_irq_chip_data(irqd)) {
-		SDE_ERROR("invalid parameters irqd %d\n", irqd != 0);
-		return;
-	}
-	sde_kms = irq_data_get_irq_chip_data(irqd);
-
-	/* memory barrier */
-	smp_mb__before_atomic();
-	set_bit(irqd->hwirq, &sde_kms->irq_controller.enabled_mask);
-	/* memory barrier */
-	smp_mb__after_atomic();
-}
-
-static struct irq_chip sde_hw_irq_chip = {
-	.name = "sde",
-	.irq_mask = sde_hw_irq_mask,
-	.irq_unmask = sde_hw_irq_unmask,
-};
-
-static int sde_hw_irqdomain_map(struct irq_domain *domain,
-		unsigned int irq, irq_hw_number_t hwirq)
-{
-	struct sde_kms *sde_kms;
-	int rc;
-
-	if (!domain || !domain->host_data) {
-		SDE_ERROR("invalid parameters domain %d\n", domain != 0);
-		return -EINVAL;
-	}
-	sde_kms = domain->host_data;
-
-	irq_set_chip_and_handler(irq, &sde_hw_irq_chip, handle_level_irq);
-	rc = irq_set_chip_data(irq, sde_kms);
-
-	return rc;
-}
-
-static const struct irq_domain_ops sde_hw_irqdomain_ops = {
-	.map = sde_hw_irqdomain_map,
-	.xlate = irq_domain_xlate_onecell,
-};
-
 void sde_irq_preinstall(struct msm_kms *kms)
 {
 	struct sde_kms *sde_kms = to_sde_kms(kms);
-	struct device *dev;
-	struct irq_domain *domain;
 
 	if (!sde_kms->dev || !sde_kms->dev->dev) {
 		pr_err("invalid device handles\n");
 		return;
 	}
-	dev = sde_kms->dev->dev;
-
-	domain = irq_domain_add_linear(dev->of_node, 32,
-			&sde_hw_irqdomain_ops, sde_kms);
-	if (!domain) {
-		pr_err("failed to add irq_domain\n");
-		return;
-	}
-
-	sde_kms->irq_controller.enabled_mask = 0;
-	sde_kms->irq_controller.domain = domain;
 
 	sde_core_irq_preinstall(sde_kms);
 }
@@ -162,9 +86,5 @@
 	}
 
 	sde_core_irq_uninstall(sde_kms);
-
-	if (sde_kms->irq_controller.domain) {
-		irq_domain_remove(sde_kms->irq_controller.domain);
-		sde_kms->irq_controller.domain = NULL;
-	}
+	sde_core_irq_domain_fini(sde_kms);
 }
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c
index 8cc196a..e73d647 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.c
+++ b/drivers/gpu/drm/msm/sde/sde_kms.c
@@ -25,10 +25,13 @@
 
 #include "msm_drv.h"
 #include "msm_mmu.h"
+#include "msm_gem.h"
 
 #include "dsi_display.h"
 #include "dsi_drm.h"
 #include "sde_wb.h"
+#include "dp_display.h"
+#include "dp_drm.h"
 
 #include "sde_kms.h"
 #include "sde_core_irq.h"
@@ -512,8 +515,28 @@
 			wb_display_get_displays(sde_kms->wb_displays,
 					sde_kms->wb_display_count);
 	}
+
+	/* dp */
+	sde_kms->dp_displays = NULL;
+	sde_kms->dp_display_count = dp_display_get_num_of_displays();
+	if (sde_kms->dp_display_count) {
+		sde_kms->dp_displays = kcalloc(sde_kms->dp_display_count,
+				sizeof(void *), GFP_KERNEL);
+		if (!sde_kms->dp_displays) {
+			SDE_ERROR("failed to allocate dp displays\n");
+			goto exit_deinit_dp;
+		}
+		sde_kms->dp_display_count =
+			dp_display_get_displays(sde_kms->dp_displays,
+					sde_kms->dp_display_count);
+	}
 	return 0;
 
+exit_deinit_dp:
+	kfree(sde_kms->dp_displays);
+	sde_kms->dp_display_count = 0;
+	sde_kms->dp_displays = NULL;
+
 exit_deinit_wb:
 	kfree(sde_kms->wb_displays);
 	sde_kms->wb_display_count = 0;
@@ -579,6 +602,13 @@
 		.soft_reset =   NULL,
 		.get_topology = sde_wb_get_topology
 	};
+	static const struct sde_connector_ops dp_ops = {
+		.post_init  = dp_connector_post_init,
+		.detect     = dp_connector_detect,
+		.get_modes  = dp_connector_get_modes,
+		.mode_valid = dp_connector_mode_valid,
+		.get_info   = dp_connector_get_info,
+	};
 	struct msm_display_info info;
 	struct drm_encoder *encoder;
 	void *display, *connector;
@@ -590,7 +620,8 @@
 		return -EINVAL;
 	}
 
-	max_encoders = sde_kms->dsi_display_count + sde_kms->wb_display_count;
+	max_encoders = sde_kms->dsi_display_count + sde_kms->wb_display_count +
+				sde_kms->dp_display_count;
 	if (max_encoders > ARRAY_SIZE(priv->encoders)) {
 		max_encoders = ARRAY_SIZE(priv->encoders);
 		SDE_ERROR("capping number of displays to %d", max_encoders);
@@ -679,6 +710,47 @@
 			sde_encoder_destroy(encoder);
 		}
 	}
+	/* dp */
+	for (i = 0; i < sde_kms->dp_display_count &&
+			priv->num_encoders < max_encoders; ++i) {
+		display = sde_kms->dp_displays[i];
+		encoder = NULL;
+
+		memset(&info, 0x0, sizeof(info));
+		rc = dp_connector_get_info(&info, display);
+		if (rc) {
+			SDE_ERROR("dp get_info %d failed\n", i);
+			continue;
+		}
+
+		encoder = sde_encoder_init(dev, &info);
+		if (IS_ERR_OR_NULL(encoder)) {
+			SDE_ERROR("dp encoder init failed %d\n", i);
+			continue;
+		}
+
+		rc = dp_drm_bridge_init(display, encoder);
+		if (rc) {
+			SDE_ERROR("dp bridge %d init failed, %d\n", i, rc);
+			sde_encoder_destroy(encoder);
+			continue;
+		}
+
+		connector = sde_connector_init(dev,
+					encoder,
+					NULL,
+					display,
+					&dp_ops,
+					DRM_CONNECTOR_POLL_HPD,
+					DRM_MODE_CONNECTOR_DisplayPort);
+		if (connector) {
+			priv->encoders[priv->num_encoders++] = encoder;
+		} else {
+			SDE_ERROR("dp %d connector init failed\n", i);
+			dp_drm_bridge_deinit(display);
+			sde_encoder_destroy(encoder);
+		}
+	}
 
 	return 0;
 }
@@ -744,6 +816,9 @@
 	priv = dev->dev_private;
 	catalog = sde_kms->catalog;
 
+	ret = sde_core_irq_domain_add(sde_kms);
+	if (ret)
+		goto fail_irq;
 	/*
 	 * Query for underlying display drivers, and create connectors,
 	 * bridges and encoders for them.
@@ -821,6 +896,8 @@
 	return 0;
 fail:
 	_sde_kms_drm_obj_destroy(sde_kms);
+fail_irq:
+	sde_core_irq_domain_fini(sde_kms);
 	return ret;
 }
 
@@ -950,6 +1027,13 @@
 	}
 }
 
+static void sde_kms_set_gem_flags(struct msm_gem_object *msm_obj,
+		uint32_t flags)
+{
+	if (msm_obj)
+		msm_obj->flags |= flags;
+}
+
 struct sde_kms_fbo *sde_kms_fbo_alloc(struct drm_device *dev, u32 width,
 		u32 height, u32 pixel_format, u64 modifier[4], u32 flags)
 {
@@ -1036,10 +1120,13 @@
 			fbo->bo[0] = NULL;
 			goto done;
 		}
+
+		/* insert extra bo flags */
+		sde_kms_set_gem_flags(to_msm_bo(fbo->bo[0]), MSM_BO_KEEPATTRS);
 	} else {
 		mutex_lock(&dev->struct_mutex);
 		fbo->bo[0] = msm_gem_new(dev, fbo->layout.total_size,
-				MSM_BO_SCANOUT | MSM_BO_WC);
+				MSM_BO_SCANOUT | MSM_BO_WC | MSM_BO_KEEPATTRS);
 		if (IS_ERR(fbo->bo[0])) {
 			mutex_unlock(&dev->struct_mutex);
 			SDE_ERROR("failed to new gem buffer\n");
@@ -1539,6 +1626,14 @@
 		goto perf_err;
 	}
 
+	sde_kms->hw_intr = sde_hw_intr_init(sde_kms->mmio, sde_kms->catalog);
+	if (IS_ERR_OR_NULL(sde_kms->hw_intr)) {
+		rc = PTR_ERR(sde_kms->hw_intr);
+		SDE_ERROR("hw_intr init failed: %d\n", rc);
+		sde_kms->hw_intr = NULL;
+		goto hw_intr_init_err;
+	}
+
 	/*
 	 * _sde_kms_drm_obj_init should create the DRM related objects
 	 * i.e. CRTCs, planes, encoders, connectors and so forth
@@ -1564,23 +1659,12 @@
 	 */
 	dev->mode_config.allow_fb_modifiers = true;
 
-	sde_kms->hw_intr = sde_hw_intr_init(sde_kms->mmio, sde_kms->catalog);
-	if (IS_ERR_OR_NULL(sde_kms->hw_intr)) {
-		rc = PTR_ERR(sde_kms->hw_intr);
-		if (!sde_kms->hw_intr)
-			rc = -EINVAL;
-		SDE_ERROR("hw_intr init failed: %d\n", rc);
-		sde_kms->hw_intr = NULL;
-		goto hw_intr_init_err;
-	}
-
 	sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
 	return 0;
 
-hw_intr_init_err:
-	_sde_kms_drm_obj_destroy(sde_kms);
 drm_obj_init_err:
 	sde_core_perf_destroy(&sde_kms->perf);
+hw_intr_init_err:
 perf_err:
 power_error:
 	sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.h b/drivers/gpu/drm/msm/sde/sde_kms.h
index 1f56d73..d20af9f 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.h
+++ b/drivers/gpu/drm/msm/sde/sde_kms.h
@@ -188,6 +188,8 @@
 	void **dsi_displays;
 	int wb_display_count;
 	void **wb_displays;
+	int dp_display_count;
+	void **dp_displays;
 
 	bool has_danger_ctrl;
 };
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c
index 1721c67..463c84e 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.c
+++ b/drivers/gpu/drm/msm/sde/sde_plane.c
@@ -585,6 +585,99 @@
 	sde_vbif_set_ot_limit(sde_kms, &ot_params);
 }
 
+/**
+ * _sde_plane_set_vbif_qos - set vbif QoS for the given plane
+ * @plane:		Pointer to drm plane
+ */
+static void _sde_plane_set_qos_remap(struct drm_plane *plane)
+{
+	struct sde_plane *psde;
+	struct sde_vbif_set_qos_params qos_params;
+	struct msm_drm_private *priv;
+	struct sde_kms *sde_kms;
+
+	if (!plane || !plane->dev) {
+		SDE_ERROR("invalid arguments\n");
+		return;
+	}
+
+	priv = plane->dev->dev_private;
+	if (!priv || !priv->kms) {
+		SDE_ERROR("invalid KMS reference\n");
+		return;
+	}
+
+	sde_kms = to_sde_kms(priv->kms);
+	psde = to_sde_plane(plane);
+	if (!psde->pipe_hw) {
+		SDE_ERROR("invalid pipe reference\n");
+		return;
+	}
+
+	memset(&qos_params, 0, sizeof(qos_params));
+	qos_params.vbif_idx = VBIF_RT;
+	qos_params.clk_ctrl = psde->pipe_hw->cap->clk_ctrl;
+	qos_params.xin_id = psde->pipe_hw->cap->xin_id;
+	qos_params.num = psde->pipe_hw->idx - SSPP_VIG0;
+	qos_params.is_rt = psde->is_rt_pipe;
+
+	SDE_DEBUG("plane%d pipe:%d vbif:%d xin:%d rt:%d\n",
+			plane->base.id, qos_params.num,
+			qos_params.vbif_idx,
+			qos_params.xin_id, qos_params.is_rt);
+
+	sde_vbif_set_qos_remap(sde_kms, &qos_params);
+}
+
+/**
+ * _sde_plane_set_ts_prefill - set prefill with traffic shaper
+ * @plane:	Pointer to drm plane
+ * @pstate:	Pointer to sde plane state
+ */
+static void _sde_plane_set_ts_prefill(struct drm_plane *plane,
+		struct sde_plane_state *pstate)
+{
+	struct sde_plane *psde;
+	struct sde_hw_pipe_ts_cfg cfg;
+	struct msm_drm_private *priv;
+	struct sde_kms *sde_kms;
+
+	if (!plane || !plane->dev) {
+		SDE_ERROR("invalid arguments");
+		return;
+	}
+
+	priv = plane->dev->dev_private;
+	if (!priv || !priv->kms) {
+		SDE_ERROR("invalid KMS reference\n");
+		return;
+	}
+
+	sde_kms = to_sde_kms(priv->kms);
+	psde = to_sde_plane(plane);
+	if (!psde->pipe_hw) {
+		SDE_ERROR("invalid pipe reference\n");
+		return;
+	}
+
+	if (!psde->pipe_hw || !psde->pipe_hw->ops.setup_ts_prefill)
+		return;
+
+	_sde_plane_set_qos_ctrl(plane, false, SDE_PLANE_QOS_VBLANK_AMORTIZE);
+
+	memset(&cfg, 0, sizeof(cfg));
+	cfg.size = sde_plane_get_property(pstate,
+			PLANE_PROP_PREFILL_SIZE);
+	cfg.time = sde_plane_get_property(pstate,
+			PLANE_PROP_PREFILL_TIME);
+
+	SDE_DEBUG("plane%d size:%llu time:%llu\n",
+			plane->base.id, cfg.size, cfg.time);
+	SDE_EVT32(DRMID(plane), cfg.size, cfg.time);
+	psde->pipe_hw->ops.setup_ts_prefill(psde->pipe_hw, &cfg,
+			pstate->multirect_index);
+}
+
 /* helper to update a state's input fence pointer from the property */
 static void _sde_plane_set_input_fence(struct sde_plane *psde,
 		struct sde_plane_state *pstate, uint64_t fd)
@@ -2855,6 +2948,10 @@
 		case PLANE_PROP_BLEND_OP:
 			/* no special action required */
 			break;
+		case PLANE_PROP_PREFILL_SIZE:
+		case PLANE_PROP_PREFILL_TIME:
+			pstate->dirty |= SDE_PLANE_DIRTY_PERF;
+			break;
 		case PLANE_PROP_ROT_DST_X:
 		case PLANE_PROP_ROT_DST_Y:
 		case PLANE_PROP_ROT_DST_W:
@@ -3010,7 +3107,7 @@
 						SDE_PIPE_SC_OP_MODE_OFFLINE;
 				pstate->sc_cfg.rd_en = false;
 				pstate->sc_cfg.rd_scid = 0;
-				pstate->sc_cfg.rd_noallocate = false;
+				pstate->sc_cfg.rd_noallocate = true;
 				pstate->sc_cfg.rd_op_type =
 					SDE_PIPE_SC_RD_OP_TYPE_CACHEABLE;
 			}
@@ -3046,8 +3143,12 @@
 	if (plane->type != DRM_PLANE_TYPE_CURSOR) {
 		_sde_plane_set_qos_ctrl(plane, true, SDE_PLANE_QOS_PANIC_CTRL);
 		_sde_plane_set_ot_limit(plane, crtc);
+		if (pstate->dirty & SDE_PLANE_DIRTY_PERF)
+			_sde_plane_set_ts_prefill(plane, pstate);
 	}
 
+	_sde_plane_set_qos_remap(plane);
+
 	/* clear dirty */
 	pstate->dirty = 0x0;
 
@@ -3232,6 +3333,13 @@
 		msm_property_install_range(&psde->property_info, "color_fill",
 				0, 0, 0xFFFFFFFF, 0, PLANE_PROP_COLOR_FILL);
 
+	msm_property_install_range(&psde->property_info,
+			"prefill_size", 0x0, 0, ~0, 0,
+			PLANE_PROP_PREFILL_SIZE);
+	msm_property_install_range(&psde->property_info,
+			"prefill_time", 0x0, 0, ~0, 0,
+			PLANE_PROP_PREFILL_TIME);
+
 	info = kzalloc(sizeof(struct sde_kms_info), GFP_KERNEL);
 	if (!info) {
 		SDE_ERROR("failed to allocate info memory\n");
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.h b/drivers/gpu/drm/msm/sde/sde_plane.h
index 2056a70..47611d1 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.h
+++ b/drivers/gpu/drm/msm/sde/sde_plane.h
@@ -95,6 +95,7 @@
 #define SDE_PLANE_DIRTY_RECTS	0x1
 #define SDE_PLANE_DIRTY_FORMAT	0x2
 #define SDE_PLANE_DIRTY_SHARPEN	0x4
+#define SDE_PLANE_DIRTY_PERF	0x8
 #define SDE_PLANE_DIRTY_ALL	0xFFFFFFFF
 
 /**
diff --git a/drivers/gpu/drm/msm/sde/sde_vbif.c b/drivers/gpu/drm/msm/sde/sde_vbif.c
index c0c8248..c675216 100644
--- a/drivers/gpu/drm/msm/sde/sde_vbif.c
+++ b/drivers/gpu/drm/msm/sde/sde_vbif.c
@@ -210,6 +210,61 @@
 	return;
 }
 
+void sde_vbif_set_qos_remap(struct sde_kms *sde_kms,
+		struct sde_vbif_set_qos_params *params)
+{
+	struct sde_hw_vbif *vbif = NULL;
+	struct sde_hw_mdp *mdp;
+	bool forced_on = false;
+	const struct sde_vbif_qos_tbl *qos_tbl;
+	int i;
+
+	if (!sde_kms || !params || !sde_kms->hw_mdp) {
+		SDE_ERROR("invalid arguments\n");
+		return;
+	}
+	mdp = sde_kms->hw_mdp;
+
+	for (i = 0; i < ARRAY_SIZE(sde_kms->hw_vbif); i++) {
+		if (sde_kms->hw_vbif[i] &&
+				sde_kms->hw_vbif[i]->idx == params->vbif_idx) {
+			vbif = sde_kms->hw_vbif[i];
+			break;
+		}
+	}
+
+	if (!vbif || !vbif->cap) {
+		SDE_ERROR("invalid vbif %d\n", params->vbif_idx);
+		return;
+	}
+
+	if (!vbif->ops.set_qos_remap || !mdp->ops.setup_clk_force_ctrl) {
+		SDE_DEBUG("qos remap not supported\n");
+		return;
+	}
+
+	qos_tbl = params->is_rt ? &vbif->cap->qos_rt_tbl :
+			&vbif->cap->qos_nrt_tbl;
+
+	if (!qos_tbl->npriority_lvl || !qos_tbl->priority_lvl) {
+		SDE_DEBUG("qos tbl not defined\n");
+		return;
+	}
+
+	forced_on = mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, true);
+
+	for (i = 0; i < qos_tbl->npriority_lvl; i++) {
+		SDE_DEBUG("vbif:%d xin:%d lvl:%d/%d\n",
+				params->vbif_idx, params->xin_id, i,
+				qos_tbl->priority_lvl[i]);
+		vbif->ops.set_qos_remap(vbif, params->xin_id, i,
+				qos_tbl->priority_lvl[i]);
+	}
+
+	if (forced_on)
+		mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, false);
+}
+
 #ifdef CONFIG_DEBUG_FS
 void sde_debugfs_vbif_destroy(struct sde_kms *sde_kms)
 {
diff --git a/drivers/gpu/drm/msm/sde/sde_vbif.h b/drivers/gpu/drm/msm/sde/sde_vbif.h
index 4b1cb1c..d05c2e0 100644
--- a/drivers/gpu/drm/msm/sde/sde_vbif.h
+++ b/drivers/gpu/drm/msm/sde/sde_vbif.h
@@ -28,6 +28,22 @@
 };
 
 /**
+ * struct sde_vbif_set_qos_params - QoS remapper parameter
+ * @vbif_idx: vbif identifier
+ * @xin_id: client interface identifier
+ * @clk_ctrl: clock control identifier of the xin
+ * @num: pipe identifier (debug only)
+ * @is_rt: true if pipe is used in real-time use case
+ */
+struct sde_vbif_set_qos_params {
+	u32 vbif_idx;
+	u32 xin_id;
+	u32 clk_ctrl;
+	u32 num;
+	bool is_rt;
+};
+
+/**
  * sde_vbif_set_ot_limit - set OT limit for vbif client
  * @sde_kms:	SDE handler
  * @params:	Pointer to OT configuration parameters
@@ -35,6 +51,14 @@
 void sde_vbif_set_ot_limit(struct sde_kms *sde_kms,
 		struct sde_vbif_set_ot_params *params);
 
+/**
+ * sde_vbif_set_qos_remap - set QoS priority level remap
+ * @sde_kms:	SDE handler
+ * @params:	Pointer to QoS configuration parameters
+ */
+void sde_vbif_set_qos_remap(struct sde_kms *sde_kms,
+		struct sde_vbif_set_qos_params *params);
+
 #ifdef CONFIG_DEBUG_FS
 int sde_debugfs_vbif_init(struct sde_kms *sde_kms, struct dentry *debugfs_root);
 void sde_debugfs_vbif_destroy(struct sde_kms *sde_kms);
diff --git a/drivers/gpu/drm/msm/sde_edid_parser.c b/drivers/gpu/drm/msm/sde_edid_parser.c
new file mode 100644
index 0000000..12165e8
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde_edid_parser.c
@@ -0,0 +1,511 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drm_edid.h>
+
+#include "sde_kms.h"
+#include "sde_edid_parser.h"
+
+#define DBC_START_OFFSET 4
+#define EDID_DTD_LEN 18
+
+enum data_block_types {
+	RESERVED,
+	AUDIO_DATA_BLOCK,
+	VIDEO_DATA_BLOCK,
+	VENDOR_SPECIFIC_DATA_BLOCK,
+	SPEAKER_ALLOCATION_DATA_BLOCK,
+	VESA_DTC_DATA_BLOCK,
+	RESERVED2,
+	USE_EXTENDED_TAG
+};
+
+static u8 *sde_find_edid_extension(struct edid *edid, int ext_id)
+{
+	u8 *edid_ext = NULL;
+	int i;
+
+	/* No EDID or EDID extensions */
+	if (edid == NULL || edid->extensions == 0)
+		return NULL;
+
+	/* Find CEA extension */
+	for (i = 0; i < edid->extensions; i++) {
+		edid_ext = (u8 *)edid + EDID_LENGTH * (i + 1);
+		if (edid_ext[0] == ext_id)
+			break;
+	}
+
+	if (i == edid->extensions)
+		return NULL;
+
+	return edid_ext;
+}
+
+static u8 *sde_find_cea_extension(struct edid *edid)
+{
+	return sde_find_edid_extension(edid, SDE_CEA_EXT);
+}
+
+static int
+sde_cea_db_payload_len(const u8 *db)
+{
+	return db[0] & 0x1f;
+}
+
+static int
+sde_cea_db_tag(const u8 *db)
+{
+	return db[0] >> 5;
+}
+
+static int
+sde_cea_revision(const u8 *cea)
+{
+	return cea[1];
+}
+
+static int
+sde_cea_db_offsets(const u8 *cea, int *start, int *end)
+{
+	/* Data block offset in CEA extension block */
+	*start = 4;
+	*end = cea[2];
+	if (*end == 0)
+		*end = 127;
+	if (*end < 4 || *end > 127)
+		return -ERANGE;
+	return 0;
+}
+
+#define sde_for_each_cea_db(cea, i, start, end) \
+for ((i) = (start); \
+(i) < (end) && (i) + sde_cea_db_payload_len(&(cea)[(i)]) < (end); \
+(i) += sde_cea_db_payload_len(&(cea)[(i)]) + 1)
+
+static u8 *sde_edid_find_extended_tag_block(struct edid *edid, int blk_id)
+{
+	u8 *db = NULL;
+	u8 *cea = NULL;
+
+	if (!edid) {
+		SDE_ERROR("%s: invalid input\n", __func__);
+		return NULL;
+	}
+
+	cea = sde_find_cea_extension(edid);
+
+	if (cea && sde_cea_revision(cea) >= 3) {
+		int i, start, end;
+
+		if (sde_cea_db_offsets(cea, &start, &end))
+			return NULL;
+
+		sde_for_each_cea_db(cea, i, start, end) {
+			db = &cea[i];
+			if ((sde_cea_db_tag(db) == SDE_EXTENDED_TAG) &&
+				(db[1] == blk_id))
+				return db;
+		}
+	}
+	return NULL;
+}
+
+static u8 *
+sde_edid_find_block(struct edid *edid, int blk_id)
+{
+	u8 *db = NULL;
+	u8 *cea = NULL;
+
+	if (!edid) {
+		SDE_ERROR("%s: invalid input\n", __func__);
+		return NULL;
+	}
+
+	cea = sde_find_cea_extension(edid);
+
+	if (cea && sde_cea_revision(cea) >= 3) {
+		int i, start, end;
+
+		if (sde_cea_db_offsets(cea, &start, &end))
+			return NULL;
+
+		sde_for_each_cea_db(cea, i, start, end) {
+			db = &cea[i];
+			if (sde_cea_db_tag(db) == blk_id)
+				return db;
+		}
+	}
+	return NULL;
+}
+
+
+static const u8 *_sde_edid_find_block(const u8 *in_buf, u32 start_offset,
+	u8 type, u8 *len)
+{
+	/* the start of data block collection, start of Video Data Block */
+	u32 offset = start_offset;
+	u32 dbc_offset = in_buf[2];
+
+	SDE_EDID_DEBUG("%s +", __func__);
+	/*
+	 * * edid buffer 1, byte 2 being 4 means no non-DTD/Data block
+	 *   collection present.
+	 * * edid buffer 1, byte 2 being 0 means no non-DTD/DATA block
+	 *   collection present and no DTD data present.
+	 */
+	if ((dbc_offset == 0) || (dbc_offset == 4)) {
+		SDE_ERROR("EDID: no DTD or non-DTD data present\n");
+		return NULL;
+	}
+
+	while (offset < dbc_offset) {
+		u8 block_len = in_buf[offset] & 0x1F;
+
+		if ((offset + block_len <= dbc_offset) &&
+		    (in_buf[offset] >> 5) == type) {
+			*len = block_len;
+			SDE_EDID_DEBUG("block=%d found @ 0x%x w/ len=%d\n",
+				type, offset, block_len);
+
+			return in_buf + offset;
+		}
+		offset += 1 + block_len;
+	}
+
+	return NULL;
+}
+
+static void sde_edid_extract_vendor_id(struct sde_edid_ctrl *edid_ctrl)
+{
+	char *vendor_id;
+	u32 id_codes;
+
+	SDE_EDID_DEBUG("%s +", __func__);
+	if (!edid_ctrl) {
+		SDE_ERROR("%s: invalid input\n", __func__);
+		return;
+	}
+
+	vendor_id = edid_ctrl->vendor_id;
+	id_codes = ((u32)edid_ctrl->edid->mfg_id[0] << 8) +
+		edid_ctrl->edid->mfg_id[1];
+
+	vendor_id[0] = 'A' - 1 + ((id_codes >> 10) & 0x1F);
+	vendor_id[1] = 'A' - 1 + ((id_codes >> 5) & 0x1F);
+	vendor_id[2] = 'A' - 1 + (id_codes & 0x1F);
+	vendor_id[3] = 0;
+	SDE_EDID_DEBUG("vendor id is %s ", vendor_id);
+	SDE_EDID_DEBUG("%s -", __func__);
+}
+
+static void sde_edid_set_y420_support(struct drm_connector *connector,
+u32 video_format)
+{
+	u8 cea_mode = 0;
+	struct drm_display_mode *mode;
+
+	/* Need to add Y420 support flag to the modes */
+	list_for_each_entry(mode, &connector->probed_modes, head) {
+		cea_mode = drm_match_cea_mode(mode);
+		if ((cea_mode != 0) && (cea_mode == video_format)) {
+			SDE_EDID_DEBUG("%s found match for %d ", __func__,
+			video_format);
+			mode->flags |= DRM_MODE_FLAG_SUPPORTS_YUV;
+		}
+	}
+}
+
+static void sde_edid_parse_Y420CMDB(
+struct drm_connector *connector, struct sde_edid_ctrl *edid_ctrl,
+const u8 *db)
+{
+	u32 offset = 0;
+	u8 len = 0;
+	u8 svd_len = 0;
+	const u8 *svd = NULL;
+	u32 i = 0, j = 0;
+	u32 video_format = 0;
+
+	if (!edid_ctrl) {
+		SDE_ERROR("%s: edid_ctrl is NULL\n", __func__);
+		return;
+	}
+
+	if (!db) {
+		SDE_ERROR("%s: invalid input\n", __func__);
+		return;
+	}
+	SDE_EDID_DEBUG("%s +\n", __func__);
+	len = db[0] & 0x1f;
+
+	if (len < 7)
+		return;
+	/* Byte 3 to L+1 contain SVDs */
+	offset += 2;
+
+	svd = sde_edid_find_block(edid_ctrl->edid, VIDEO_DATA_BLOCK);
+
+	if (svd) {
+		/*moving to the next byte as vic info begins there*/
+		++svd;
+		svd_len = svd[0] & 0x1f;
+	}
+
+	for (i = 0; i < svd_len; i++, j++) {
+		video_format = *svd & 0x7F;
+		if (db[offset] & (1 << j))
+			sde_edid_set_y420_support(connector, video_format);
+
+		if (j & 0x80) {
+			j = j/8;
+			offset++;
+			if (offset >= len)
+				break;
+		}
+	}
+
+	SDE_EDID_DEBUG("%s -\n", __func__);
+
+}
+
+static void sde_edid_parse_Y420VDB(
+struct drm_connector *connector, struct sde_edid_ctrl *edid_ctrl,
+const u8 *db)
+{
+	u8 len = db[0] & 0x1f;
+	u32 i = 0;
+	u32 video_format = 0;
+
+	if (!edid_ctrl) {
+		SDE_ERROR("%s: invalid input\n", __func__);
+		return;
+	}
+
+	SDE_EDID_DEBUG("%s +\n", __func__);
+
+	/* Offset to byte 3 */
+	db += 2;
+	for (i = 0; i < len - 1; i++) {
+		video_format = *(db + i) & 0x7F;
+		/*
+		 * mode was already added in get_modes()
+		 * only need to set the Y420 support flag
+		 */
+		sde_edid_set_y420_support(connector, video_format);
+	}
+	SDE_EDID_DEBUG("%s -", __func__);
+}
+
+static void sde_edid_set_mode_format(
+struct drm_connector *connector, struct sde_edid_ctrl *edid_ctrl)
+{
+	const u8 *db = NULL;
+	struct drm_display_mode *mode;
+
+	SDE_EDID_DEBUG("%s +\n", __func__);
+	/* Set YUV mode support flags for YCbcr420VDB */
+	db = sde_edid_find_extended_tag_block(edid_ctrl->edid,
+			Y420_VIDEO_DATA_BLOCK);
+	if (db)
+		sde_edid_parse_Y420VDB(connector, edid_ctrl, db);
+	else
+		SDE_EDID_DEBUG("YCbCr420 VDB is not present\n");
+
+	/* Set RGB supported on all modes where YUV is not set */
+	list_for_each_entry(mode, &connector->probed_modes, head) {
+		if (!(mode->flags & DRM_MODE_FLAG_SUPPORTS_YUV))
+			mode->flags |= DRM_MODE_FLAG_SUPPORTS_RGB;
+	}
+
+
+	db = sde_edid_find_extended_tag_block(edid_ctrl->edid,
+			Y420_CAPABILITY_MAP_DATA_BLOCK);
+	if (db)
+		sde_edid_parse_Y420CMDB(connector, edid_ctrl, db);
+	else
+		SDE_EDID_DEBUG("YCbCr420 CMDB is not present\n");
+
+	SDE_EDID_DEBUG("%s -\n", __func__);
+}
+
+static void _sde_edid_extract_audio_data_blocks(
+	struct sde_edid_ctrl *edid_ctrl)
+{
+	u8 len = 0;
+	u8 adb_max = 0;
+	const u8 *adb = NULL;
+	u32 offset = DBC_START_OFFSET;
+	u8 *cea = NULL;
+
+	if (!edid_ctrl) {
+		SDE_ERROR("invalid edid_ctrl\n");
+		return;
+	}
+	SDE_EDID_DEBUG("%s +", __func__);
+	cea = sde_find_cea_extension(edid_ctrl->edid);
+	if (!cea) {
+		SDE_DEBUG("CEA extension not found\n");
+		return;
+	}
+
+	edid_ctrl->adb_size = 0;
+
+	memset(edid_ctrl->audio_data_block, 0,
+		sizeof(edid_ctrl->audio_data_block));
+
+	do {
+		len = 0;
+		adb = _sde_edid_find_block(cea, offset, AUDIO_DATA_BLOCK,
+			&len);
+
+		if ((adb == NULL) || (len > MAX_AUDIO_DATA_BLOCK_SIZE ||
+			adb_max >= MAX_NUMBER_ADB)) {
+			if (!edid_ctrl->adb_size) {
+				SDE_DEBUG("No/Invalid Audio Data Block\n");
+				return;
+			}
+
+			continue;
+		}
+
+		memcpy(edid_ctrl->audio_data_block + edid_ctrl->adb_size,
+			adb + 1, len);
+		offset = (adb - cea) + 1 + len;
+
+		edid_ctrl->adb_size += len;
+		adb_max++;
+	} while (adb);
+	SDE_EDID_DEBUG("%s -", __func__);
+}
+
+static void _sde_edid_extract_speaker_allocation_data(
+	struct sde_edid_ctrl *edid_ctrl)
+{
+	u8 len;
+	const u8 *sadb = NULL;
+	u8 *cea = NULL;
+
+	if (!edid_ctrl) {
+		SDE_ERROR("invalid edid_ctrl\n");
+		return;
+	}
+	SDE_EDID_DEBUG("%s +", __func__);
+	cea = sde_find_cea_extension(edid_ctrl->edid);
+	if (!cea) {
+		SDE_DEBUG("CEA extension not found\n");
+		return;
+	}
+
+	sadb = _sde_edid_find_block(cea, DBC_START_OFFSET,
+		SPEAKER_ALLOCATION_DATA_BLOCK, &len);
+	if ((sadb == NULL) || (len != MAX_SPKR_ALLOC_DATA_BLOCK_SIZE)) {
+		SDE_DEBUG("No/Invalid Speaker Allocation Data Block\n");
+		return;
+	}
+
+	memcpy(edid_ctrl->spkr_alloc_data_block, sadb + 1, len);
+	edid_ctrl->sadb_size = len;
+
+	SDE_EDID_DEBUG("speaker alloc data SP byte = %08x %s%s%s%s%s%s%s\n",
+		sadb[1],
+		(sadb[1] & BIT(0)) ? "FL/FR," : "",
+		(sadb[1] & BIT(1)) ? "LFE," : "",
+		(sadb[1] & BIT(2)) ? "FC," : "",
+		(sadb[1] & BIT(3)) ? "RL/RR," : "",
+		(sadb[1] & BIT(4)) ? "RC," : "",
+		(sadb[1] & BIT(5)) ? "FLC/FRC," : "",
+		(sadb[1] & BIT(6)) ? "RLC/RRC," : "");
+	SDE_EDID_DEBUG("%s -", __func__);
+}
+
+struct sde_edid_ctrl *sde_edid_init(void)
+{
+	struct sde_edid_ctrl *edid_ctrl = NULL;
+
+	SDE_EDID_DEBUG("%s +\n", __func__);
+	edid_ctrl = kzalloc(sizeof(*edid_ctrl), GFP_KERNEL);
+	if (!edid_ctrl) {
+		SDE_ERROR("edid_ctrl alloc failed\n");
+		return NULL;
+	}
+	memset((edid_ctrl), 0, sizeof(*edid_ctrl));
+	SDE_EDID_DEBUG("%s -\n", __func__);
+	return edid_ctrl;
+}
+
+void sde_free_edid(void **input)
+{
+	struct sde_edid_ctrl *edid_ctrl = (struct sde_edid_ctrl *)(*input);
+
+	SDE_EDID_DEBUG("%s +", __func__);
+	kfree(edid_ctrl->edid);
+	edid_ctrl->edid = NULL;
+}
+
+void sde_edid_deinit(void **input)
+{
+	struct sde_edid_ctrl *edid_ctrl = (struct sde_edid_ctrl *)(*input);
+
+	SDE_EDID_DEBUG("%s +", __func__);
+	sde_free_edid((void *)&edid_ctrl);
+	kfree(edid_ctrl);
+	SDE_EDID_DEBUG("%s -", __func__);
+}
+
+int _sde_edid_update_modes(struct drm_connector *connector,
+	void *input)
+{
+	int rc = 0;
+	struct sde_edid_ctrl *edid_ctrl = (struct sde_edid_ctrl *)(input);
+
+	SDE_EDID_DEBUG("%s +", __func__);
+	if (edid_ctrl->edid) {
+		drm_mode_connector_update_edid_property(connector,
+			edid_ctrl->edid);
+
+		rc = drm_add_edid_modes(connector, edid_ctrl->edid);
+		sde_edid_set_mode_format(connector, edid_ctrl);
+		SDE_EDID_DEBUG("%s -", __func__);
+		return rc;
+	}
+
+	drm_mode_connector_update_edid_property(connector, NULL);
+	SDE_EDID_DEBUG("%s null edid -", __func__);
+	return rc;
+}
+
+bool sde_detect_hdmi_monitor(void *input)
+{
+	struct sde_edid_ctrl *edid_ctrl = (struct sde_edid_ctrl *)(input);
+
+	return drm_detect_hdmi_monitor(edid_ctrl->edid);
+}
+
+void sde_get_edid(struct drm_connector *connector,
+				  struct i2c_adapter *adapter, void **input)
+{
+	struct sde_edid_ctrl *edid_ctrl = (struct sde_edid_ctrl *)(*input);
+
+	edid_ctrl->edid = drm_get_edid(connector, adapter);
+	SDE_EDID_DEBUG("%s +\n", __func__);
+
+	if (!edid_ctrl->edid)
+		SDE_ERROR("EDID read failed\n");
+
+	if (edid_ctrl->edid) {
+		sde_edid_extract_vendor_id(edid_ctrl);
+		_sde_edid_extract_audio_data_blocks(edid_ctrl);
+		_sde_edid_extract_speaker_allocation_data(edid_ctrl);
+	}
+	SDE_EDID_DEBUG("%s -\n", __func__);
+};
diff --git a/drivers/gpu/drm/msm/sde_edid_parser.h b/drivers/gpu/drm/msm/sde_edid_parser.h
new file mode 100644
index 0000000..1143dc2
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde_edid_parser.h
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _SDE_EDID_PARSER_H_
+#define _SDE_EDID_PARSER_H_
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/debugfs.h>
+#include <linux/of_device.h>
+#include <linux/i2c.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+
+
+#define MAX_NUMBER_ADB 5
+#define MAX_AUDIO_DATA_BLOCK_SIZE 30
+#define MAX_SPKR_ALLOC_DATA_BLOCK_SIZE 3
+#define EDID_VENDOR_ID_SIZE     4
+
+#define SDE_CEA_EXT    0x02
+#define SDE_EXTENDED_TAG 0x07
+
+enum extended_data_block_types {
+	VIDEO_CAPABILITY_DATA_BLOCK = 0x0,
+	VENDOR_SPECIFIC_VIDEO_DATA_BLOCK = 0x01,
+	HDMI_VIDEO_DATA_BLOCK = 0x04,
+	HDR_STATIC_METADATA_DATA_BLOCK = 0x06,
+	Y420_VIDEO_DATA_BLOCK = 0x0E,
+	VIDEO_FORMAT_PREFERENCE_DATA_BLOCK = 0x0D,
+	Y420_CAPABILITY_MAP_DATA_BLOCK = 0x0F,
+	VENDOR_SPECIFIC_AUDIO_DATA_BLOCK = 0x11,
+	INFOFRAME_DATA_BLOCK = 0x20,
+};
+
+#ifdef SDE_EDID_DEBUG_ENABLE
+#define SDE_EDID_DEBUG(fmt, args...)   SDE_ERROR(fmt, ##args)
+#else
+#define SDE_EDID_DEBUG(fmt, args...)   SDE_DEBUG(fmt, ##args)
+#endif
+
+/*
+ * struct hdmi_edid_hdr_data - HDR Static Metadata
+ * @eotf: Electro-Optical Transfer Function
+ * @metadata_type_one: Static Metadata Type 1 support
+ * @max_luminance: Desired Content Maximum Luminance
+ * @avg_luminance: Desired Content Frame-average Luminance
+ * @min_luminance: Desired Content Minimum Luminance
+ */
+struct sde_edid_hdr_data {
+	u32 eotf;
+	bool metadata_type_one;
+	u32 max_luminance;
+	u32 avg_luminance;
+	u32 min_luminance;
+};
+
+struct sde_edid_sink_caps {
+	u32 max_pclk_in_hz;
+	bool scdc_present;
+	bool scramble_support; /* scramble support for less than 340Mcsc */
+	bool read_req_support;
+	bool osd_disparity;
+	bool dual_view_support;
+	bool ind_view_support;
+};
+
+struct sde_edid_ctrl {
+	struct edid *edid;
+	u8 pt_scan_info;
+	u8 it_scan_info;
+	u8 ce_scan_info;
+	u8 audio_data_block[MAX_NUMBER_ADB * MAX_AUDIO_DATA_BLOCK_SIZE];
+	int adb_size;
+	u8 spkr_alloc_data_block[MAX_SPKR_ALLOC_DATA_BLOCK_SIZE];
+	int sadb_size;
+	bool hdr_supported;
+	char vendor_id[EDID_VENDOR_ID_SIZE];
+	struct sde_edid_sink_caps sink_caps;
+	struct sde_edid_hdr_data hdr_data;
+};
+
+/**
+ * sde_edid_init() - init edid structure.
+ * @edid_ctrl:     Handle to the edid_ctrl structure.
+ * Return: handle to sde_edid_ctrl for the client.
+ */
+struct sde_edid_ctrl *sde_edid_init(void);
+
+/**
+ * sde_edid_deinit() - deinit edid structure.
+ * @edid_ctrl:     Handle to the edid_ctrl structure.
+ *
+ * Return: void.
+ */
+void sde_edid_deinit(void **edid_ctrl);
+
+/**
+ * sde_get_edid() - get edid info.
+ * @connector:   Handle to the drm_connector.
+ * @adapter:     handle to i2c adapter for DDC read
+ * @edid_ctrl:   Handle to the edid_ctrl structure.
+ *
+ * Return: void.
+ */
+void sde_get_edid(struct drm_connector *connector,
+struct i2c_adapter *adapter,
+void **edid_ctrl);
+
+/**
+ * sde_free_edid() - free edid structure.
+ * @edid_ctrl:     Handle to the edid_ctrl structure.
+ *
+ * Return: void.
+ */
+void sde_free_edid(void **edid_ctrl);
+
+/**
+ * sde_detect_hdmi_monitor() - detect HDMI mode.
+ * @edid_ctrl:     Handle to the edid_ctrl structure.
+ *
+ * Return: error code.
+ */
+bool sde_detect_hdmi_monitor(void *edid_ctrl);
+
+/**
+ * _sde_edid_update_modes() - populate EDID modes.
+ * @edid_ctrl:     Handle to the edid_ctrl structure.
+ *
+ * Return: error code.
+ */
+int _sde_edid_update_modes(struct drm_connector *connector,
+							void *edid_ctrl);
+
+#endif /* _SDE_EDID_PARSER_H_ */
+
diff --git a/drivers/gpu/drm/msm/sde_power_handle.c b/drivers/gpu/drm/msm/sde_power_handle.c
index 1e4f6b1..fb7f85c 100644
--- a/drivers/gpu/drm/msm/sde_power_handle.c
+++ b/drivers/gpu/drm/msm/sde_power_handle.c
@@ -333,6 +333,31 @@
 		return -EINVAL;
 	}
 
+	pdbus->ab_rt = ab_quota_rt;
+	pdbus->ib_rt = ib_quota_rt;
+	pdbus->ab_nrt = ab_quota_nrt;
+	pdbus->ib_nrt = ib_quota_nrt;
+
+	if (pdbus->enable) {
+		ab_quota_rt = max_t(u64, ab_quota_rt,
+				SDE_POWER_HANDLE_ENABLE_BUS_AB_QUOTA);
+		ib_quota_rt = max_t(u64, ib_quota_rt,
+				SDE_POWER_HANDLE_ENABLE_BUS_IB_QUOTA);
+		ab_quota_nrt = max_t(u64, ab_quota_nrt,
+				SDE_POWER_HANDLE_ENABLE_BUS_AB_QUOTA);
+		ib_quota_nrt = max_t(u64, ib_quota_nrt,
+				SDE_POWER_HANDLE_ENABLE_BUS_IB_QUOTA);
+	} else {
+		ab_quota_rt = max_t(u64, ab_quota_rt,
+				SDE_POWER_HANDLE_DISABLE_BUS_AB_QUOTA);
+		ib_quota_rt = max_t(u64, ib_quota_rt,
+				SDE_POWER_HANDLE_DISABLE_BUS_IB_QUOTA);
+		ab_quota_nrt = max_t(u64, ab_quota_nrt,
+				SDE_POWER_HANDLE_DISABLE_BUS_AB_QUOTA);
+		ib_quota_nrt = max_t(u64, ib_quota_nrt,
+				SDE_POWER_HANDLE_DISABLE_BUS_IB_QUOTA);
+	}
+
 	if (!ab_quota_rt && !ab_quota_nrt && !ib_quota_rt && !ib_quota_nrt)  {
 		new_uc_idx = 0;
 	} else {
@@ -571,19 +596,12 @@
 							bool enable)
 {
 	int rc = 0;
-	u64 ab_quota_rt, ab_quota_nrt;
-	u64 ib_quota_rt, ib_quota_nrt;
 
-	ab_quota_rt = ab_quota_nrt = enable ?
-			SDE_POWER_HANDLE_ENABLE_BUS_AB_QUOTA :
-			SDE_POWER_HANDLE_DISABLE_BUS_AB_QUOTA;
-	ib_quota_rt = ib_quota_nrt = enable ?
-			SDE_POWER_HANDLE_ENABLE_BUS_IB_QUOTA :
-			SDE_POWER_HANDLE_DISABLE_BUS_IB_QUOTA;
+	pdbus->enable = enable;
 
 	if (pdbus->data_bus_hdl)
-		rc = _sde_power_data_bus_set_quota(pdbus, ab_quota_rt,
-				ab_quota_nrt, ib_quota_rt, ib_quota_nrt);
+		rc = _sde_power_data_bus_set_quota(pdbus, pdbus->ab_rt,
+				pdbus->ab_nrt, pdbus->ib_rt, pdbus->ib_nrt);
 
 	if (rc)
 		pr_err("failed to set data bus vote rc=%d enable:%d\n",
diff --git a/drivers/gpu/drm/msm/sde_power_handle.h b/drivers/gpu/drm/msm/sde_power_handle.h
index 38bf21f..c526b71 100644
--- a/drivers/gpu/drm/msm/sde_power_handle.h
+++ b/drivers/gpu/drm/msm/sde_power_handle.h
@@ -16,9 +16,9 @@
 
 #define MAX_CLIENT_NAME_LEN 128
 
-#define SDE_POWER_HANDLE_ENABLE_BUS_AB_QUOTA	6000000000
+#define SDE_POWER_HANDLE_ENABLE_BUS_AB_QUOTA	2000000
 #define SDE_POWER_HANDLE_DISABLE_BUS_AB_QUOTA	0
-#define SDE_POWER_HANDLE_ENABLE_BUS_IB_QUOTA	6000000000
+#define SDE_POWER_HANDLE_ENABLE_BUS_IB_QUOTA	2000000
 #define SDE_POWER_HANDLE_DISABLE_BUS_IB_QUOTA	0
 
 #include <linux/sde_io_util.h>
@@ -93,6 +93,11 @@
  * @bus_channels: number of memory bus channels
  * @curr_bw_uc_idx: current use case index of data bus
  * @ao_bw_uc_idx: active only use case index of data bus
+ * @ab_rt: realtime ab quota
+ * @ib_rt: realtime ib quota
+ * @ab_nrt: non-realtime ab quota
+ * @ib_nrt: non-realtime ib quota
+ * @enable: true if bus is enabled
  */
 struct sde_power_data_bus_handle {
 	struct msm_bus_scale_pdata *data_bus_scale_table;
@@ -102,6 +107,11 @@
 	u32 bus_channels;
 	u32 curr_bw_uc_idx;
 	u32 ao_bw_uc_idx;
+	u64 ab_rt;
+	u64 ib_rt;
+	u64 ab_nrt;
+	u64 ib_nrt;
+	bool enable;
 };
 
 /*
diff --git a/drivers/gpu/drm/msm/sde_rsc.c b/drivers/gpu/drm/msm/sde_rsc.c
index 46d1df8..7bf2211 100644
--- a/drivers/gpu/drm/msm/sde_rsc.c
+++ b/drivers/gpu/drm/msm/sde_rsc.c
@@ -220,6 +220,39 @@
 }
 EXPORT_SYMBOL(sde_rsc_unregister_event);
 
+bool is_sde_rsc_available(int rsc_index)
+{
+	if (rsc_index >= MAX_RSC_COUNT) {
+		pr_err("invalid rsc index:%d\n", rsc_index);
+		return false;
+	} else if (!rsc_prv_list[rsc_index]) {
+		pr_err("rsc idx:%d not probed yet or not available\n",
+								rsc_index);
+		return false;
+	}
+
+	return true;
+}
+EXPORT_SYMBOL(is_sde_rsc_available);
+
+enum sde_rsc_state get_sde_rsc_current_state(int rsc_index)
+{
+	struct sde_rsc_priv *rsc;
+
+	if (rsc_index >= MAX_RSC_COUNT) {
+		pr_err("invalid rsc index:%d\n", rsc_index);
+		return SDE_RSC_IDLE_STATE;
+	} else if (!rsc_prv_list[rsc_index]) {
+		pr_err("rsc idx:%d not probed yet or not available\n",
+								rsc_index);
+		return SDE_RSC_IDLE_STATE;
+	}
+
+	rsc = rsc_prv_list[rsc_index];
+	return rsc->current_state;
+}
+EXPORT_SYMBOL(get_sde_rsc_current_state);
+
 static int sde_rsc_clk_enable(struct sde_power_handle *phandle,
 	struct sde_power_client *pclient, bool enable)
 {
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
index 8894fee..cbacbb6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
@@ -130,7 +130,7 @@
 		poll = false;
 	}
 
-	if (list_empty(&therm->alarm.head) && poll)
+	if (poll)
 		nvkm_timer_alarm(tmr, 1000000000ULL, &therm->alarm);
 	spin_unlock_irqrestore(&therm->lock, flags);
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fan.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fan.c
index 91198d7..e2fecce 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fan.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fan.c
@@ -83,7 +83,7 @@
 	spin_unlock_irqrestore(&fan->lock, flags);
 
 	/* schedule next fan update, if not at target speed already */
-	if (list_empty(&fan->alarm.head) && target != duty) {
+	if (target != duty) {
 		u16 bump_period = fan->bios.bump_period;
 		u16 slow_down_period = fan->bios.slow_down_period;
 		u64 delay;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fantog.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fantog.c
index 59701b7..ff9fbe7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fantog.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fantog.c
@@ -53,7 +53,7 @@
 	duty = !nvkm_gpio_get(gpio, 0, DCB_GPIO_FAN, 0xff);
 	nvkm_gpio_set(gpio, 0, DCB_GPIO_FAN, 0xff, duty);
 
-	if (list_empty(&fan->alarm.head) && percent != (duty * 100)) {
+	if (percent != (duty * 100)) {
 		u64 next_change = (percent * fan->period_us) / 100;
 		if (!duty)
 			next_change = fan->period_us - next_change;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c
index b9703c0..9a79e91 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c
@@ -185,7 +185,7 @@
 	spin_unlock_irqrestore(&therm->sensor.alarm_program_lock, flags);
 
 	/* schedule the next poll in one second */
-	if (therm->func->temp_get(therm) >= 0 && list_empty(&alarm->head))
+	if (therm->func->temp_get(therm) >= 0)
 		nvkm_timer_alarm(tmr, 1000000000ULL, alarm);
 }
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
index 07dc82b..f2a86ea 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
@@ -36,23 +36,29 @@
 	unsigned long flags;
 	LIST_HEAD(exec);
 
-	/* move any due alarms off the pending list */
+	/* Process pending alarms. */
 	spin_lock_irqsave(&tmr->lock, flags);
 	list_for_each_entry_safe(alarm, atemp, &tmr->alarms, head) {
-		if (alarm->timestamp <= nvkm_timer_read(tmr))
-			list_move_tail(&alarm->head, &exec);
+		/* Have we hit the earliest alarm that hasn't gone off? */
+		if (alarm->timestamp > nvkm_timer_read(tmr)) {
+			/* Schedule it.  If we didn't race, we're done. */
+			tmr->func->alarm_init(tmr, alarm->timestamp);
+			if (alarm->timestamp > nvkm_timer_read(tmr))
+				break;
+		}
+
+		/* Move to completed list.  We'll drop the lock before
+		 * executing the callback so it can reschedule itself.
+		 */
+		list_move_tail(&alarm->head, &exec);
 	}
 
-	/* reschedule interrupt for next alarm time */
-	if (!list_empty(&tmr->alarms)) {
-		alarm = list_first_entry(&tmr->alarms, typeof(*alarm), head);
-		tmr->func->alarm_init(tmr, alarm->timestamp);
-	} else {
+	/* Shut down interrupt if no more pending alarms. */
+	if (list_empty(&tmr->alarms))
 		tmr->func->alarm_fini(tmr);
-	}
 	spin_unlock_irqrestore(&tmr->lock, flags);
 
-	/* execute any pending alarm handlers */
+	/* Execute completed callbacks. */
 	list_for_each_entry_safe(alarm, atemp, &exec, head) {
 		list_del_init(&alarm->head);
 		alarm->func(alarm);
@@ -65,24 +71,37 @@
 	struct nvkm_alarm *list;
 	unsigned long flags;
 
-	alarm->timestamp = nvkm_timer_read(tmr) + nsec;
-
-	/* append new alarm to list, in soonest-alarm-first order */
+	/* Remove alarm from pending list.
+	 *
+	 * This both protects against the corruption of the list,
+	 * and implements alarm rescheduling/cancellation.
+	 */
 	spin_lock_irqsave(&tmr->lock, flags);
-	if (!nsec) {
-		if (!list_empty(&alarm->head))
-			list_del(&alarm->head);
-	} else {
+	list_del_init(&alarm->head);
+
+	if (nsec) {
+		/* Insert into pending list, ordered earliest to latest. */
+		alarm->timestamp = nvkm_timer_read(tmr) + nsec;
 		list_for_each_entry(list, &tmr->alarms, head) {
 			if (list->timestamp > alarm->timestamp)
 				break;
 		}
+
 		list_add_tail(&alarm->head, &list->head);
+
+		/* Update HW if this is now the earliest alarm. */
+		list = list_first_entry(&tmr->alarms, typeof(*list), head);
+		if (list == alarm) {
+			tmr->func->alarm_init(tmr, alarm->timestamp);
+			/* This shouldn't happen if callers aren't stupid.
+			 *
+			 * Worst case scenario is that it'll take roughly
+			 * 4 seconds for the next alarm to trigger.
+			 */
+			WARN_ON(alarm->timestamp <= nvkm_timer_read(tmr));
+		}
 	}
 	spin_unlock_irqrestore(&tmr->lock, flags);
-
-	/* process pending alarms */
-	nvkm_timer_alarm_trigger(tmr);
 }
 
 void
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c
index 7b9ce87..7f48249 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c
@@ -76,8 +76,8 @@
 	u32 stat = nvkm_rd32(device, NV04_PTIMER_INTR_0);
 
 	if (stat & 0x00000001) {
-		nvkm_timer_alarm_trigger(tmr);
 		nvkm_wr32(device, NV04_PTIMER_INTR_0, 0x00000001);
+		nvkm_timer_alarm_trigger(tmr);
 		stat &= ~0x00000001;
 	}
 
diff --git a/drivers/gpu/msm/a6xx_reg.h b/drivers/gpu/msm/a6xx_reg.h
index e982afe..1f76233a 100644
--- a/drivers/gpu/msm/a6xx_reg.h
+++ b/drivers/gpu/msm/a6xx_reg.h
@@ -860,6 +860,9 @@
 #define A6XX_GMU_AHB_FENCE_RANGE_0		0x23B11
 #define A6XX_GMU_AHB_FENCE_RANGE_1		0x23B12
 
+/* GPUCC registers */
+#define A6XX_GPU_CC_GX_GDSCR                   0x24403
+
 /* GPU RSC sequencer registers */
 #define	A6XX_RSCC_PDC_SEQ_START_ADDR			0x23408
 #define A6XX_RSCC_PDC_MATCH_VALUE_LO			0x23409
diff --git a/drivers/gpu/msm/adreno_a6xx.c b/drivers/gpu/msm/adreno_a6xx.c
index 6e025c8..9a56bec 100644
--- a/drivers/gpu/msm/adreno_a6xx.c
+++ b/drivers/gpu/msm/adreno_a6xx.c
@@ -1007,6 +1007,7 @@
 #define SPTPRAC_POWEROFF_STATUS_MASK	BIT(2)
 #define SPTPRAC_POWERON_STATUS_MASK	BIT(3)
 #define SPTPRAC_CTRL_TIMEOUT		10 /* ms */
+#define A6XX_RETAIN_FF_ENABLE_ENABLE_MASK BIT(11)
 
 /*
  * a6xx_sptprac_enable() - Power on SPTPRAC
@@ -1047,6 +1048,10 @@
 	if (!gmu->pdev)
 		return;
 
+	/* Ensure that retention is on */
+	kgsl_gmu_regrmw(device, A6XX_GPU_CC_GX_GDSCR, 0,
+			A6XX_RETAIN_FF_ENABLE_ENABLE_MASK);
+
 	kgsl_gmu_regwrite(device, A6XX_GMU_GX_SPTPRAC_POWER_CONTROL,
 			SPTPRAC_POWEROFF_CTRL_MASK);
 
@@ -1101,6 +1106,10 @@
 	if (!regulator_is_enabled(gmu->gx_gdsc))
 		return 0;
 
+	/* Ensure that retention is on */
+	kgsl_gmu_regrmw(device, A6XX_GPU_CC_GX_GDSCR, 0,
+			A6XX_RETAIN_FF_ENABLE_ENABLE_MASK);
+
 	clk_disable_unprepare(pwr->grp_clks[0]);
 
 	clk_set_rate(pwr->grp_clks[0],
diff --git a/drivers/gpu/msm/kgsl_gmu.c b/drivers/gpu/msm/kgsl_gmu.c
index 7354e82..f72b3fa 100644
--- a/drivers/gpu/msm/kgsl_gmu.c
+++ b/drivers/gpu/msm/kgsl_gmu.c
@@ -1543,8 +1543,6 @@
 
 	if (gmu->reg_virt) {
 		devm_iounmap(&gmu->pdev->dev, gmu->reg_virt);
-		devm_release_mem_region(&gmu->pdev->dev,
-				gmu->reg_phys, gmu->reg_len);
 		gmu->reg_virt = NULL;
 	}
 
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
index b5beea53..ab646a9 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
+++ b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
@@ -217,7 +217,15 @@
 	if (ret < 0 || value < 0)
 		ret = -EINVAL;
 
-	return ret;
+	ret = sensor_hub_get_feature(st->hsdev,
+				     st->poll.report_id,
+				     st->poll.index, sizeof(value), &value);
+	if (ret < 0 || value < 0)
+		return -EINVAL;
+
+	st->poll_interval = value;
+
+	return 0;
 }
 EXPORT_SYMBOL(hid_sensor_write_samp_freq_value);
 
@@ -259,7 +267,16 @@
 	if (ret < 0 || value < 0)
 		ret = -EINVAL;
 
-	return ret;
+	ret = sensor_hub_get_feature(st->hsdev,
+				     st->sensitivity.report_id,
+				     st->sensitivity.index, sizeof(value),
+				     &value);
+	if (ret < 0 || value < 0)
+		return -EINVAL;
+
+	st->raw_hystersis = value;
+
+	return 0;
 }
 EXPORT_SYMBOL(hid_sensor_write_raw_hyst_value);
 
@@ -355,6 +372,9 @@
 	/* Default unit of measure is milliseconds */
 	if (st->poll.units == 0)
 		st->poll.units = HID_USAGE_SENSOR_UNITS_MILLISECOND;
+
+	st->poll_interval = -1;
+
 	return 0;
 
 }
@@ -377,6 +397,8 @@
 					HID_USAGE_SENSOR_PROY_POWER_STATE,
 					&st->power_state);
 
+	st->raw_hystersis = -1;
+
 	sensor_hub_input_get_attribute_info(hsdev,
 			HID_FEATURE_REPORT, usage_id,
 			HID_USAGE_SENSOR_PROP_SENSITIVITY_ABS,
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
index ecf592d..6082934 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
+++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
@@ -51,6 +51,8 @@
 			st->report_state.report_id,
 			st->report_state.index,
 			HID_USAGE_SENSOR_PROP_REPORTING_STATE_ALL_EVENTS_ENUM);
+
+		poll_value = hid_sensor_read_poll_value(st);
 	} else {
 		int val;
 
@@ -87,9 +89,7 @@
 	sensor_hub_get_feature(st->hsdev, st->power_state.report_id,
 			       st->power_state.index,
 			       sizeof(state_val), &state_val);
-	if (state)
-		poll_value = hid_sensor_read_poll_value(st);
-	if (poll_value > 0)
+	if (state && poll_value)
 		msleep_interruptible(poll_value * 2);
 
 	return 0;
@@ -127,6 +127,20 @@
 	struct hid_sensor_common *attrb = container_of(work,
 						       struct hid_sensor_common,
 						       work);
+
+	if (attrb->poll_interval >= 0)
+		sensor_hub_set_feature(attrb->hsdev, attrb->poll.report_id,
+				       attrb->poll.index,
+				       sizeof(attrb->poll_interval),
+				       &attrb->poll_interval);
+
+	if (attrb->raw_hystersis >= 0)
+		sensor_hub_set_feature(attrb->hsdev,
+				       attrb->sensitivity.report_id,
+				       attrb->sensitivity.index,
+				       sizeof(attrb->raw_hystersis),
+				       &attrb->raw_hystersis);
+
 	_hid_sensor_power_state(attrb, true);
 }
 
diff --git a/drivers/iio/dac/ad7303.c b/drivers/iio/dac/ad7303.c
index e690dd1..4b0f942 100644
--- a/drivers/iio/dac/ad7303.c
+++ b/drivers/iio/dac/ad7303.c
@@ -184,9 +184,9 @@
 	.address = (chan),					\
 	.scan_type = {						\
 		.sign = 'u',					\
-		.realbits = '8',				\
-		.storagebits = '8',				\
-		.shift = '0',					\
+		.realbits = 8,					\
+		.storagebits = 8,				\
+		.shift = 0,					\
 	},							\
 	.ext_info = ad7303_ext_info,				\
 }
diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c
index e5a533c..f762eb8 100644
--- a/drivers/iio/pressure/bmp280-core.c
+++ b/drivers/iio/pressure/bmp280-core.c
@@ -175,11 +175,12 @@
 	}
 	H6 = sign_extend32(tmp, 7);
 
-	var = ((s32)data->t_fine) - 76800;
-	var = ((((adc_humidity << 14) - (H4 << 20) - (H5 * var)) + 16384) >> 15)
-		* (((((((var * H6) >> 10) * (((var * H3) >> 11) + 32768)) >> 10)
-		+ 2097152) * H2 + 8192) >> 14);
-	var -= ((((var >> 15) * (var >> 15)) >> 7) * H1) >> 4;
+	var = ((s32)data->t_fine) - (s32)76800;
+	var = ((((adc_humidity << 14) - (H4 << 20) - (H5 * var))
+		+ (s32)16384) >> 15) * (((((((var * H6) >> 10)
+		* (((var * (s32)H3) >> 11) + (s32)32768)) >> 10)
+		+ (s32)2097152) * H2 + 8192) >> 14);
+	var -= ((((var >> 15) * (var >> 15)) >> 7) * (s32)H1) >> 4;
 
 	return var >> 12;
 };
diff --git a/drivers/iio/proximity/as3935.c b/drivers/iio/proximity/as3935.c
index 5656deb..0204595 100644
--- a/drivers/iio/proximity/as3935.c
+++ b/drivers/iio/proximity/as3935.c
@@ -50,7 +50,6 @@
 #define AS3935_TUNE_CAP		0x08
 #define AS3935_CALIBRATE	0x3D
 
-#define AS3935_WRITE_DATA	BIT(15)
 #define AS3935_READ_DATA	BIT(14)
 #define AS3935_ADDRESS(x)	((x) << 8)
 
@@ -105,7 +104,7 @@
 {
 	u8 *buf = st->buf;
 
-	buf[0] = (AS3935_WRITE_DATA | AS3935_ADDRESS(reg)) >> 8;
+	buf[0] = AS3935_ADDRESS(reg) >> 8;
 	buf[1] = val;
 
 	return spi_write(st->spi, buf, 2);
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index 0f58f46..8fd108d 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -444,8 +444,8 @@
 	fl6.saddr = src_in->sin6_addr;
 	fl6.flowi6_oif = addr->bound_dev_if;
 
-	dst = ip6_route_output(addr->net, NULL, &fl6);
-	if ((ret = dst->error))
+	ret = ipv6_stub->ipv6_dst_lookup(addr->net, NULL, &dst, &fl6);
+	if (ret < 0)
 		goto put;
 
 	rt = (struct rt6_info *)dst;
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index bd786b7..bb72976 100644
--- a/drivers/infiniband/hw/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -751,6 +751,9 @@
 	/* release the cpu */
 	hfi1_put_proc_affinity(fdata->rec_cpu_num);
 
+	/* clean up rcv side */
+	hfi1_user_exp_rcv_free(fdata);
+
 	/*
 	 * Clear any left over, unhandled events so the next process that
 	 * gets this context doesn't get confused.
@@ -790,7 +793,7 @@
 
 	dd->rcd[uctxt->ctxt] = NULL;
 
-	hfi1_user_exp_rcv_free(fdata);
+	hfi1_user_exp_rcv_grp_free(uctxt);
 	hfi1_clear_ctxt_pkey(dd, uctxt->ctxt);
 
 	uctxt->rcvwait_to = 0;
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index e3b5bc9..34cfd34 100644
--- a/drivers/infiniband/hw/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -1757,6 +1757,7 @@
 			    !HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR)) {
 				dd_dev_err(dd, "ctxt%u: Failed to allocate eager buffers\n",
 					   rcd->ctxt);
+				ret = -ENOMEM;
 				goto bail_rcvegrbuf_phys;
 			}
 
diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.c b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
index 64d2652..db0f140 100644
--- a/drivers/infiniband/hw/hfi1/user_exp_rcv.c
+++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
@@ -250,36 +250,40 @@
 	return ret;
 }
 
+void hfi1_user_exp_rcv_grp_free(struct hfi1_ctxtdata *uctxt)
+{
+	struct tid_group *grp, *gptr;
+
+	list_for_each_entry_safe(grp, gptr, &uctxt->tid_group_list.list,
+				 list) {
+		list_del_init(&grp->list);
+		kfree(grp);
+	}
+	hfi1_clear_tids(uctxt);
+}
+
 int hfi1_user_exp_rcv_free(struct hfi1_filedata *fd)
 {
 	struct hfi1_ctxtdata *uctxt = fd->uctxt;
-	struct tid_group *grp, *gptr;
 
-	if (!test_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags))
-		return 0;
 	/*
 	 * The notifier would have been removed when the process'es mm
 	 * was freed.
 	 */
-	if (fd->handler)
+	if (fd->handler) {
 		hfi1_mmu_rb_unregister(fd->handler);
-
-	kfree(fd->invalid_tids);
-
-	if (!uctxt->cnt) {
+	} else {
 		if (!EXP_TID_SET_EMPTY(uctxt->tid_full_list))
 			unlock_exp_tids(uctxt, &uctxt->tid_full_list, fd);
 		if (!EXP_TID_SET_EMPTY(uctxt->tid_used_list))
 			unlock_exp_tids(uctxt, &uctxt->tid_used_list, fd);
-		list_for_each_entry_safe(grp, gptr, &uctxt->tid_group_list.list,
-					 list) {
-			list_del_init(&grp->list);
-			kfree(grp);
-		}
-		hfi1_clear_tids(uctxt);
 	}
 
+	kfree(fd->invalid_tids);
+	fd->invalid_tids = NULL;
+
 	kfree(fd->entry_to_rb);
+	fd->entry_to_rb = NULL;
 	return 0;
 }
 
diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.h b/drivers/infiniband/hw/hfi1/user_exp_rcv.h
index 9bc8d9f..d1d7d3d 100644
--- a/drivers/infiniband/hw/hfi1/user_exp_rcv.h
+++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.h
@@ -70,6 +70,7 @@
 		(tid) |= EXP_TID_SET(field, (value));			\
 	} while (0)
 
+void hfi1_user_exp_rcv_grp_free(struct hfi1_ctxtdata *uctxt);
 int hfi1_user_exp_rcv_init(struct file *);
 int hfi1_user_exp_rcv_free(struct hfi1_filedata *);
 int hfi1_user_exp_rcv_setup(struct file *, struct hfi1_tid_info *);
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index be2d02b..1fb31a4 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -1828,7 +1828,7 @@
 		klms[i].va = cpu_to_be64(sg_dma_address(sg) + sg_offset);
 		klms[i].bcount = cpu_to_be32(sg_dma_len(sg) - sg_offset);
 		klms[i].key = cpu_to_be32(lkey);
-		mr->ibmr.length += sg_dma_len(sg);
+		mr->ibmr.length += sg_dma_len(sg) - sg_offset;
 
 		sg_offset = 0;
 	}
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index b9e50c1..87fcbf7 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -2049,11 +2049,14 @@
 	if (context_copied(context)) {
 		u16 did_old = context_domain_id(context);
 
-		if (did_old >= 0 && did_old < cap_ndoms(iommu->cap))
+		if (did_old >= 0 && did_old < cap_ndoms(iommu->cap)) {
 			iommu->flush.flush_context(iommu, did_old,
 						   (((u16)bus) << 8) | devfn,
 						   DMA_CCMD_MASK_NOBIT,
 						   DMA_CCMD_DEVICE_INVL);
+			iommu->flush.flush_iotlb(iommu, did_old, 0, 0,
+						 DMA_TLB_DSI_FLUSH);
+		}
 	}
 
 	pgd = domain->pgd;
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 3b53f34..e7b8f49 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -357,6 +357,7 @@
 config DM_RAID
        tristate "RAID 1/4/5/6/10 target"
        depends on BLK_DEV_DM
+       select MD_RAID0
        select MD_RAID1
        select MD_RAID10
        select MD_RAID456
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 125aedc..8bf9667 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -215,7 +215,7 @@
  * Buffers are freed after this timeout
  */
 static unsigned dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS;
-static unsigned dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES;
+static unsigned long dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES;
 
 static unsigned long dm_bufio_peak_allocated;
 static unsigned long dm_bufio_allocated_kmem_cache;
@@ -923,10 +923,11 @@
 {
 	unsigned long buffers;
 
-	if (ACCESS_ONCE(dm_bufio_cache_size) != dm_bufio_cache_size_latch) {
-		mutex_lock(&dm_bufio_clients_lock);
-		__cache_size_refresh();
-		mutex_unlock(&dm_bufio_clients_lock);
+	if (unlikely(ACCESS_ONCE(dm_bufio_cache_size) != dm_bufio_cache_size_latch)) {
+		if (mutex_trylock(&dm_bufio_clients_lock)) {
+			__cache_size_refresh();
+			mutex_unlock(&dm_bufio_clients_lock);
+		}
 	}
 
 	buffers = dm_bufio_cache_size_per_client >>
@@ -1540,10 +1541,10 @@
 	return true;
 }
 
-static unsigned get_retain_buffers(struct dm_bufio_client *c)
+static unsigned long get_retain_buffers(struct dm_bufio_client *c)
 {
-        unsigned retain_bytes = ACCESS_ONCE(dm_bufio_retain_bytes);
-        return retain_bytes / c->block_size;
+        unsigned long retain_bytes = ACCESS_ONCE(dm_bufio_retain_bytes);
+        return retain_bytes >> (c->sectors_per_block_bits + SECTOR_SHIFT);
 }
 
 static unsigned long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan,
@@ -1553,7 +1554,7 @@
 	struct dm_buffer *b, *tmp;
 	unsigned long freed = 0;
 	unsigned long count = nr_to_scan;
-	unsigned retain_target = get_retain_buffers(c);
+	unsigned long retain_target = get_retain_buffers(c);
 
 	for (l = 0; l < LIST_SIZE; l++) {
 		list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) {
@@ -1779,11 +1780,19 @@
 static void __evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz)
 {
 	struct dm_buffer *b, *tmp;
-	unsigned retain_target = get_retain_buffers(c);
-	unsigned count;
+	unsigned long retain_target = get_retain_buffers(c);
+	unsigned long count;
+	LIST_HEAD(write_list);
 
 	dm_bufio_lock(c);
 
+	__check_watermark(c, &write_list);
+	if (unlikely(!list_empty(&write_list))) {
+		dm_bufio_unlock(c);
+		__flush_write_list(&write_list);
+		dm_bufio_lock(c);
+	}
+
 	count = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY];
 	list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_CLEAN], lru_list) {
 		if (count <= retain_target)
@@ -1808,6 +1817,8 @@
 
 	mutex_lock(&dm_bufio_clients_lock);
 
+	__cache_size_refresh();
+
 	list_for_each_entry(c, &dm_bufio_all_clients, client_list)
 		__evict_old_buffers(c, max_age_hz);
 
@@ -1930,7 +1941,7 @@
 module_param_named(max_age_seconds, dm_bufio_max_age, uint, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds");
 
-module_param_named(retain_bytes, dm_bufio_retain_bytes, uint, S_IRUGO | S_IWUSR);
+module_param_named(retain_bytes, dm_bufio_retain_bytes, ulong, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(retain_bytes, "Try to keep at least this many bytes cached in memory");
 
 module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, S_IRUGO | S_IWUSR);
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index 6955778..6937ca4 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -1383,17 +1383,19 @@
 
 int dm_cache_commit(struct dm_cache_metadata *cmd, bool clean_shutdown)
 {
-	int r;
+	int r = -EINVAL;
 	flags_mutator mutator = (clean_shutdown ? set_clean_shutdown :
 				 clear_clean_shutdown);
 
 	WRITE_LOCK(cmd);
+	if (cmd->fail_io)
+		goto out;
+
 	r = __commit_transaction(cmd, mutator);
 	if (r)
 		goto out;
 
 	r = __begin_transaction(cmd);
-
 out:
 	WRITE_UNLOCK(cmd);
 	return r;
@@ -1405,7 +1407,8 @@
 	int r = -EINVAL;
 
 	READ_LOCK(cmd);
-	r = dm_sm_get_nr_free(cmd->metadata_sm, result);
+	if (!cmd->fail_io)
+		r = dm_sm_get_nr_free(cmd->metadata_sm, result);
 	READ_UNLOCK(cmd);
 
 	return r;
@@ -1417,7 +1420,8 @@
 	int r = -EINVAL;
 
 	READ_LOCK(cmd);
-	r = dm_sm_get_nr_blocks(cmd->metadata_sm, result);
+	if (!cmd->fail_io)
+		r = dm_sm_get_nr_blocks(cmd->metadata_sm, result);
 	READ_UNLOCK(cmd);
 
 	return r;
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index e477af8..ac8235b 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -119,7 +119,8 @@
 
 static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
 static void trigger_event(struct work_struct *work);
-static void activate_path(struct work_struct *work);
+static void activate_or_offline_path(struct pgpath *pgpath);
+static void activate_path_work(struct work_struct *work);
 static void process_queued_bios(struct work_struct *work);
 
 /*-----------------------------------------------
@@ -144,7 +145,7 @@
 
 	if (pgpath) {
 		pgpath->is_active = true;
-		INIT_DELAYED_WORK(&pgpath->activate_path, activate_path);
+		INIT_DELAYED_WORK(&pgpath->activate_path, activate_path_work);
 	}
 
 	return pgpath;
@@ -1515,10 +1516,8 @@
 	spin_unlock_irqrestore(&m->lock, flags);
 }
 
-static void activate_path(struct work_struct *work)
+static void activate_or_offline_path(struct pgpath *pgpath)
 {
-	struct pgpath *pgpath =
-		container_of(work, struct pgpath, activate_path.work);
 	struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
 
 	if (pgpath->is_active && !blk_queue_dying(q))
@@ -1527,6 +1526,14 @@
 		pg_init_done(pgpath, SCSI_DH_DEV_OFFLINED);
 }
 
+static void activate_path_work(struct work_struct *work)
+{
+	struct pgpath *pgpath =
+		container_of(work, struct pgpath, activate_path.work);
+
+	activate_or_offline_path(pgpath);
+}
+
 static int noretry_error(int error)
 {
 	switch (error) {
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index a15091a..4477bf9 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -485,11 +485,11 @@
 	if (r < 0)
 		return r;
 
-	r = save_sm_roots(pmd);
+	r = dm_tm_pre_commit(pmd->tm);
 	if (r < 0)
 		return r;
 
-	r = dm_tm_pre_commit(pmd->tm);
+	r = save_sm_roots(pmd);
 	if (r < 0)
 		return r;
 
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 24925f2..eddd360 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -6752,6 +6752,7 @@
 	void __user *argp = (void __user *)arg;
 	struct mddev *mddev = NULL;
 	int ro;
+	bool did_set_md_closing = false;
 
 	if (!md_ioctl_valid(cmd))
 		return -ENOTTY;
@@ -6841,7 +6842,9 @@
 			err = -EBUSY;
 			goto out;
 		}
+		WARN_ON_ONCE(test_bit(MD_CLOSING, &mddev->flags));
 		set_bit(MD_CLOSING, &mddev->flags);
+		did_set_md_closing = true;
 		mutex_unlock(&mddev->open_mutex);
 		sync_blockdev(bdev);
 	}
@@ -7041,6 +7044,8 @@
 		mddev->hold_active = 0;
 	mddev_unlock(mddev);
 out:
+	if(did_set_md_closing)
+		clear_bit(MD_CLOSING, &mddev->flags);
 	return err;
 }
 #ifdef CONFIG_COMPAT
diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
index 20a4032..7a75b50 100644
--- a/drivers/md/persistent-data/dm-btree.c
+++ b/drivers/md/persistent-data/dm-btree.c
@@ -897,8 +897,12 @@
 		else
 			*result_key = le64_to_cpu(ro_node(s)->keys[0]);
 
-		if (next_block || flags & INTERNAL_NODE)
-			block = value64(ro_node(s), i);
+		if (next_block || flags & INTERNAL_NODE) {
+			if (find_highest)
+				block = value64(ro_node(s), i);
+			else
+				block = value64(ro_node(s), 0);
+		}
 
 	} while (flags & INTERNAL_NODE);
 
diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c
index ebb280a..32adf6b 100644
--- a/drivers/md/persistent-data/dm-space-map-disk.c
+++ b/drivers/md/persistent-data/dm-space-map-disk.c
@@ -142,10 +142,23 @@
 
 static int sm_disk_dec_block(struct dm_space_map *sm, dm_block_t b)
 {
+	int r;
+	uint32_t old_count;
 	enum allocation_event ev;
 	struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
 
-	return sm_ll_dec(&smd->ll, b, &ev);
+	r = sm_ll_dec(&smd->ll, b, &ev);
+	if (!r && (ev == SM_FREE)) {
+		/*
+		 * It's only free if it's also free in the last
+		 * transaction.
+		 */
+		r = sm_ll_lookup(&smd->old_ll, b, &old_count);
+		if (!r && !old_count)
+			smd->nr_allocated_this_transaction--;
+	}
+
+	return r;
 }
 
 static int sm_disk_new_block(struct dm_space_map *sm, dm_block_t *b)
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index cce6057b..f34ad2b 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -2253,6 +2253,10 @@
 		err = -ENOMEM;
 
 	mutex_unlock(&conf->cache_size_mutex);
+
+	conf->slab_cache = sc;
+	conf->active_name = 1-conf->active_name;
+
 	/* Step 4, return new stripes to service */
 	while(!list_empty(&newstripes)) {
 		nsh = list_entry(newstripes.next, struct stripe_head, lru);
@@ -2270,8 +2274,6 @@
 	}
 	/* critical section pass, GFP_NOIO no longer needed */
 
-	conf->slab_cache = sc;
-	conf->active_name = 1-conf->active_name;
 	if (!err)
 		conf->pool_size = newsize;
 	return err;
diff --git a/drivers/media/dvb-frontends/cxd2841er.c b/drivers/media/dvb-frontends/cxd2841er.c
index 5afb9c5..fd0f25e 100644
--- a/drivers/media/dvb-frontends/cxd2841er.c
+++ b/drivers/media/dvb-frontends/cxd2841er.c
@@ -3852,7 +3852,9 @@
 			FE_CAN_MUTE_TS |
 			FE_CAN_2G_MODULATION,
 		.frequency_min = 42000000,
-		.frequency_max = 1002000000
+		.frequency_max = 1002000000,
+		.symbol_rate_min = 870000,
+		.symbol_rate_max = 11700000
 	},
 	.init = cxd2841er_init_tc,
 	.sleep = cxd2841er_sleep_tc,
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
index 49085d7..4b2db07 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
@@ -3017,7 +3017,7 @@
 
 	/* Create Worker for ife_hw_mgr with 10 tasks */
 	rc = cam_req_mgr_workq_create("cam_ife_worker", 10,
-			&g_ife_hw_mgr.workq);
+			&g_ife_hw_mgr.workq, CRM_WORKQ_USAGE_NON_IRQ);
 
 	if (rc < 0) {
 		pr_err("%s: Unable to create worker\n", __func__);
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
index b97593b..ed251eb 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
@@ -1813,7 +1813,8 @@
 	/* Create worker for current link */
 	snprintf(buf, sizeof(buf), "%x-%x",
 		link_info->session_hdl, link->link_hdl);
-	rc = cam_req_mgr_workq_create(buf, CRM_WORKQ_NUM_TASKS, &link->workq);
+	rc = cam_req_mgr_workq_create(buf, CRM_WORKQ_NUM_TASKS,
+		&link->workq, CRM_WORKQ_USAGE_NON_IRQ);
 	if (rc < 0) {
 		CRM_ERR("FATAL: unable to create worker");
 		__cam_req_mgr_destroy_link_info(link);
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.c
index b026b7c..38dcb42 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.c
@@ -12,16 +12,30 @@
 
 #include "cam_req_mgr_workq.h"
 
+#define WORKQ_ACQUIRE_LOCK(workq, flags) {\
+	if ((workq)->in_irq) \
+		spin_lock_irqsave(&(workq)->lock_bh, (flags)); \
+	else \
+		spin_lock_bh(&(workq)->lock_bh); \
+}
+
+#define WORKQ_RELEASE_LOCK(workq, flags) {\
+	if ((workq)->in_irq) \
+		spin_unlock_irqrestore(&(workq)->lock_bh, (flags)); \
+	else	\
+		spin_unlock_bh(&(workq)->lock_bh); \
+}
 
 struct crm_workq_task *cam_req_mgr_workq_get_task(
 	struct cam_req_mgr_core_workq *workq)
 {
 	struct crm_workq_task *task = NULL;
+	unsigned long flags = 0;
 
 	if (!workq)
 		return NULL;
 
-	spin_lock_bh(&workq->lock_bh);
+	WORKQ_ACQUIRE_LOCK(workq, flags);
 	if (list_empty(&workq->task.empty_head))
 		goto end;
 
@@ -33,7 +47,8 @@
 	}
 
 end:
-	spin_unlock_bh(&workq->lock_bh);
+	WORKQ_RELEASE_LOCK(workq, flags);
+
 	return task;
 }
 
@@ -41,8 +56,9 @@
 {
 	struct cam_req_mgr_core_workq *workq =
 		(struct cam_req_mgr_core_workq *)task->parent;
+	unsigned long flags = 0;
 
-	spin_lock_bh(&workq->lock_bh);
+	WORKQ_ACQUIRE_LOCK(workq, flags);
 	list_del_init(&task->entry);
 	task->cancel = 0;
 	task->process_cb = NULL;
@@ -50,7 +66,7 @@
 	list_add_tail(&task->entry,
 		&workq->task.empty_head);
 	atomic_add(1, &workq->task.free_cnt);
-	spin_unlock_bh(&workq->lock_bh);
+	WORKQ_RELEASE_LOCK(workq, flags);
 }
 
 /**
@@ -131,6 +147,7 @@
 {
 	int rc = 0;
 	struct cam_req_mgr_core_workq *workq = NULL;
+	unsigned long flags = 0;
 
 	if (!task) {
 		CRM_WARN("NULL task pointer can not schedule");
@@ -159,10 +176,10 @@
 		(prio < CRM_TASK_PRIORITY_MAX && prio >= CRM_TASK_PRIORITY_0)
 		? prio : CRM_TASK_PRIORITY_0;
 
-	spin_lock_bh(&workq->lock_bh);
+	WORKQ_ACQUIRE_LOCK(workq, flags);
 	list_add_tail(&task->entry,
 		&workq->task.process_head[task->priority]);
-	spin_unlock_bh(&workq->lock_bh);
+	WORKQ_RELEASE_LOCK(workq, flags);
 
 	atomic_add(1, &workq->task.pending_cnt);
 	CRM_DBG("enq task %pK pending_cnt %d",
@@ -175,7 +192,7 @@
 }
 
 int cam_req_mgr_workq_create(char *name, int32_t num_tasks,
-	struct cam_req_mgr_core_workq **workq)
+	struct cam_req_mgr_core_workq **workq, enum crm_workq_context in_irq)
 {
 	int32_t i;
 	struct crm_workq_task  *task;
@@ -210,6 +227,7 @@
 		for (i = CRM_TASK_PRIORITY_0; i < CRM_TASK_PRIORITY_MAX; i++)
 			INIT_LIST_HEAD(&crm_workq->task.process_head[i]);
 		INIT_LIST_HEAD(&crm_workq->task.empty_head);
+		crm_workq->in_irq = in_irq;
 		crm_workq->task.num_task = num_tasks;
 		crm_workq->task.pool = (struct crm_workq_task *)
 			kzalloc(sizeof(struct crm_workq_task) *
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.h
index 7d8ca59..eb3b804 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.h
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.h
@@ -25,9 +25,16 @@
 
 /* Task priorities, lower the number higher the priority*/
 enum crm_task_priority {
-	CRM_TASK_PRIORITY_0 = 0,
-	CRM_TASK_PRIORITY_1 = 1,
-	CRM_TASK_PRIORITY_MAX = 2,
+	CRM_TASK_PRIORITY_0,
+	CRM_TASK_PRIORITY_1,
+	CRM_TASK_PRIORITY_MAX,
+};
+
+/* workqueue will be used from irq context or not */
+enum crm_workq_context {
+	CRM_WORKQ_USAGE_NON_IRQ,
+	CRM_WORKQ_USAGE_IRQ,
+	CRM_WORKQ_USAGE_INVALID,
 };
 
 /** struct crm_workq_task
@@ -58,8 +65,9 @@
  * @work       : work token used by workqueue
  * @job        : workqueue internal job struct
  * task -
- * @lock       : lock for task structs
- * @free_cnt   :  num of free/available tasks
+ * @lock_bh    : lock for task structs
+ * @in_irq     : set true if workque can be used in irq context
+ * @free_cnt   : num of free/available tasks
  * @empty_head : list  head of available taska which can be used
  *               or acquired in order to enqueue a task to workq
  * @pool       : pool of tasks used for handling events in workq context
@@ -70,6 +78,7 @@
 	struct work_struct         work;
 	struct workqueue_struct   *job;
 	spinlock_t                 lock_bh;
+	uint32_t                   in_irq;
 
 	/* tasks */
 	struct {
@@ -91,11 +100,12 @@
  *             of session handle and link handle
  * @num_task : Num_tasks to be allocated for workq
  * @workq    : Double pointer worker
+ * @in_irq   : Set to one if workq might be used in irq context
  * This function will allocate and create workqueue and pass
  * the workq pointer to caller.
  */
 int cam_req_mgr_workq_create(char *name, int32_t num_tasks,
-	struct cam_req_mgr_core_workq **workq);
+	struct cam_req_mgr_core_workq **workq, enum crm_workq_context in_irq);
 
 /**
  * cam_req_mgr_workq_destroy()
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c b/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
index 2fa39c8..140542b 100644
--- a/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
@@ -1904,14 +1904,14 @@
 	}
 
 	rc = cam_req_mgr_workq_create("icp_command_queue", ICP_WORKQ_NUM_TASK,
-					&icp_hw_mgr.cmd_work);
+		&icp_hw_mgr.cmd_work, CRM_WORKQ_USAGE_NON_IRQ);
 	if (rc < 0) {
 		pr_err("unable to create a worker\n");
 		goto cmd_work_failed;
 	}
 
 	rc = cam_req_mgr_workq_create("icp_message_queue", ICP_WORKQ_NUM_TASK,
-					&icp_hw_mgr.msg_work);
+		&icp_hw_mgr.msg_work, CRM_WORKQ_USAGE_IRQ);
 	if (rc < 0) {
 		pr_err("unable to create a worker\n");
 		goto msg_work_failed;
diff --git a/drivers/media/platform/msm/vidc/hfi_packetization.c b/drivers/media/platform/msm/vidc/hfi_packetization.c
index e7e9278..88250e1 100644
--- a/drivers/media/platform/msm/vidc/hfi_packetization.c
+++ b/drivers/media/platform/msm/vidc/hfi_packetization.c
@@ -1523,14 +1523,6 @@
 			sizeof(struct hfi_vui_timing_info);
 		break;
 	}
-	case HAL_CONFIG_VPE_DEINTERLACE:
-	{
-		create_pkt_enable(pkt->rg_property_data,
-				HFI_PROPERTY_CONFIG_VPE_DEINTERLACE,
-				((struct hal_enable *)pdata)->enable);
-		pkt->size += sizeof(u32) + sizeof(struct hfi_enable);
-		break;
-	}
 	case HAL_PARAM_VENC_GENERATE_AUDNAL:
 	{
 		create_pkt_enable(pkt->rg_property_data,
diff --git a/drivers/media/platform/msm/vidc/msm_vidc.c b/drivers/media/platform/msm/vidc/msm_vidc.c
index 499d851..6253632 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc.c
@@ -171,6 +171,12 @@
 	case V4L2_CID_MPEG_VIDC_VIDEO_NUM_B_FRAMES:
 		msm_vidc_ctrl_get_range(ctrl, &inst->capability.bframe);
 		break;
+	case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB:
+		msm_vidc_ctrl_get_range(ctrl, &inst->capability.slice_mbs);
+		break;
+	case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES:
+		msm_vidc_ctrl_get_range(ctrl, &inst->capability.slice_bytes);
+		break;
 	default:
 		rc = -EINVAL;
 	}
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_debug.c b/drivers/media/platform/msm/vidc/msm_vidc_debug.c
index a8f776f..22491f6 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_debug.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_debug.c
@@ -186,7 +186,7 @@
 	__debugfs_create(bool, "clock_scaling",
 			&msm_vidc_clock_scaling) &&
 	__debugfs_create(bool, "debug_timeout",
-			&msm_vidc_debug_timeout);
+			&msm_vidc_debug_timeout) &&
 	__debugfs_create(bool, "disable_video_syscache",
 			&msm_vidc_syscache_disable);
 
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_api.h b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
index cc35bb3..a2f076b 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_api.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
@@ -157,7 +157,6 @@
 	HAL_CONFIG_VPE_OPERATIONS,
 	HAL_PARAM_VENC_INTRA_REFRESH,
 	HAL_PARAM_VENC_MULTI_SLICE_CONTROL,
-	HAL_CONFIG_VPE_DEINTERLACE,
 	HAL_SYS_DEBUG_CONFIG,
 	HAL_CONFIG_BUFFER_REQUIREMENTS,
 	HAL_CONFIG_PRIORITY,
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
index 0df4812..2d4a573 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
@@ -349,8 +349,6 @@
 	(HFI_DOMAIN_BASE_VPE + HFI_ARCH_COMMON_OFFSET + 0x8000)
 #define  HFI_PROPERTY_CONFIG_VENC_BLUR_FRAME_SIZE		\
 	(HFI_PROPERTY_CONFIG_COMMON_START + 0x010)
-#define HFI_PROPERTY_CONFIG_VPE_DEINTERLACE				\
-	(HFI_PROPERTY_CONFIG_VPE_COMMON_START + 0x001)
 #define HFI_PROPERTY_CONFIG_VPE_OPERATIONS				\
 	(HFI_PROPERTY_CONFIG_VPE_COMMON_START + 0x002)
 
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
index 27e7cf6..7c24da5 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
@@ -206,6 +206,7 @@
 		}
 		s5p_mfc_clock_on();
 		ret = s5p_mfc_init_hw(dev);
+		s5p_mfc_clock_off();
 		if (ret)
 			mfc_err("Failed to reinit FW\n");
 	}
@@ -663,9 +664,9 @@
 				break;
 			}
 			s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev);
-			wake_up_ctx(ctx, reason, err);
 			WARN_ON(test_and_clear_bit(0, &dev->hw_lock) == 0);
 			s5p_mfc_clock_off();
+			wake_up_ctx(ctx, reason, err);
 			s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
 		} else {
 			s5p_mfc_handle_frame(ctx, reason, err);
@@ -679,15 +680,11 @@
 	case S5P_MFC_R2H_CMD_OPEN_INSTANCE_RET:
 		ctx->inst_no = s5p_mfc_hw_call(dev->mfc_ops, get_inst_no, dev);
 		ctx->state = MFCINST_GOT_INST;
-		clear_work_bit(ctx);
-		wake_up(&ctx->queue);
 		goto irq_cleanup_hw;
 
 	case S5P_MFC_R2H_CMD_CLOSE_INSTANCE_RET:
-		clear_work_bit(ctx);
 		ctx->inst_no = MFC_NO_INSTANCE_SET;
 		ctx->state = MFCINST_FREE;
-		wake_up(&ctx->queue);
 		goto irq_cleanup_hw;
 
 	case S5P_MFC_R2H_CMD_SYS_INIT_RET:
@@ -697,9 +694,9 @@
 		if (ctx)
 			clear_work_bit(ctx);
 		s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev);
-		wake_up_dev(dev, reason, err);
 		clear_bit(0, &dev->hw_lock);
 		clear_bit(0, &dev->enter_suspend);
+		wake_up_dev(dev, reason, err);
 		break;
 
 	case S5P_MFC_R2H_CMD_INIT_BUFFERS_RET:
@@ -714,9 +711,7 @@
 		break;
 
 	case S5P_MFC_R2H_CMD_DPB_FLUSH_RET:
-		clear_work_bit(ctx);
 		ctx->state = MFCINST_RUNNING;
-		wake_up(&ctx->queue);
 		goto irq_cleanup_hw;
 
 	default:
@@ -735,6 +730,8 @@
 		mfc_err("Failed to unlock hw\n");
 
 	s5p_mfc_clock_off();
+	clear_work_bit(ctx);
+	wake_up(&ctx->queue);
 
 	s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
 	spin_unlock(&dev->irqlock);
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index 4f8c7ef..db525cd 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -1332,8 +1332,8 @@
 			}
 		}
 	}
-	if (ep_in == NULL) {
-		dev_dbg(&intf->dev, "inbound and/or endpoint not found");
+	if (!ep_in || !ep_out) {
+		dev_dbg(&intf->dev, "required endpoints not found\n");
 		return -ENODEV;
 	}
 
diff --git a/drivers/media/usb/cx231xx/cx231xx-audio.c b/drivers/media/usb/cx231xx/cx231xx-audio.c
index 8263c4b..bf4b3ca 100644
--- a/drivers/media/usb/cx231xx/cx231xx-audio.c
+++ b/drivers/media/usb/cx231xx/cx231xx-audio.c
@@ -674,10 +674,8 @@
 
 	spin_lock_init(&adev->slock);
 	err = snd_pcm_new(card, "Cx231xx Audio", 0, 0, 1, &pcm);
-	if (err < 0) {
-		snd_card_free(card);
-		return err;
-	}
+	if (err < 0)
+		goto err_free_card;
 
 	snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE,
 			&snd_cx231xx_pcm_capture);
@@ -691,10 +689,9 @@
 	INIT_WORK(&dev->wq_trigger, audio_trigger);
 
 	err = snd_card_register(card);
-	if (err < 0) {
-		snd_card_free(card);
-		return err;
-	}
+	if (err < 0)
+		goto err_free_card;
+
 	adev->sndcard = card;
 	adev->udev = dev->udev;
 
@@ -704,6 +701,11 @@
 					    hs_config_info[0].interface_info.
 					    audio_index + 1];
 
+	if (uif->altsetting[0].desc.bNumEndpoints < isoc_pipe + 1) {
+		err = -ENODEV;
+		goto err_free_card;
+	}
+
 	adev->end_point_addr =
 	    uif->altsetting[0].endpoint[isoc_pipe].desc.
 			bEndpointAddress;
@@ -713,13 +715,20 @@
 		"audio EndPoint Addr 0x%x, Alternate settings: %i\n",
 		adev->end_point_addr, adev->num_alt);
 	adev->alt_max_pkt_size = kmalloc(32 * adev->num_alt, GFP_KERNEL);
-
-	if (adev->alt_max_pkt_size == NULL)
-		return -ENOMEM;
+	if (!adev->alt_max_pkt_size) {
+		err = -ENOMEM;
+		goto err_free_card;
+	}
 
 	for (i = 0; i < adev->num_alt; i++) {
-		u16 tmp =
-		    le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe].desc.
+		u16 tmp;
+
+		if (uif->altsetting[i].desc.bNumEndpoints < isoc_pipe + 1) {
+			err = -ENODEV;
+			goto err_free_pkt_size;
+		}
+
+		tmp = le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe].desc.
 				wMaxPacketSize);
 		adev->alt_max_pkt_size[i] =
 		    (tmp & 0x07ff) * (((tmp & 0x1800) >> 11) + 1);
@@ -729,6 +738,13 @@
 	}
 
 	return 0;
+
+err_free_pkt_size:
+	kfree(adev->alt_max_pkt_size);
+err_free_card:
+	snd_card_free(card);
+
+	return err;
 }
 
 static int cx231xx_audio_fini(struct cx231xx *dev)
diff --git a/drivers/media/usb/cx231xx/cx231xx-cards.c b/drivers/media/usb/cx231xx/cx231xx-cards.c
index 36bc254..be9e333 100644
--- a/drivers/media/usb/cx231xx/cx231xx-cards.c
+++ b/drivers/media/usb/cx231xx/cx231xx-cards.c
@@ -1397,6 +1397,9 @@
 
 	uif = udev->actconfig->interface[idx];
 
+	if (uif->altsetting[0].desc.bNumEndpoints < isoc_pipe + 1)
+		return -ENODEV;
+
 	dev->video_mode.end_point_addr = uif->altsetting[0].endpoint[isoc_pipe].desc.bEndpointAddress;
 	dev->video_mode.num_alt = uif->num_altsetting;
 
@@ -1410,7 +1413,12 @@
 		return -ENOMEM;
 
 	for (i = 0; i < dev->video_mode.num_alt; i++) {
-		u16 tmp = le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe].desc.wMaxPacketSize);
+		u16 tmp;
+
+		if (uif->altsetting[i].desc.bNumEndpoints < isoc_pipe + 1)
+			return -ENODEV;
+
+		tmp = le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe].desc.wMaxPacketSize);
 		dev->video_mode.alt_max_pkt_size[i] = (tmp & 0x07ff) * (((tmp & 0x1800) >> 11) + 1);
 		dev_dbg(dev->dev,
 			"Alternate setting %i, max size= %i\n", i,
@@ -1427,6 +1435,9 @@
 	}
 	uif = udev->actconfig->interface[idx];
 
+	if (uif->altsetting[0].desc.bNumEndpoints < isoc_pipe + 1)
+		return -ENODEV;
+
 	dev->vbi_mode.end_point_addr =
 	    uif->altsetting[0].endpoint[isoc_pipe].desc.
 			bEndpointAddress;
@@ -1443,8 +1454,12 @@
 		return -ENOMEM;
 
 	for (i = 0; i < dev->vbi_mode.num_alt; i++) {
-		u16 tmp =
-		    le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe].
+		u16 tmp;
+
+		if (uif->altsetting[i].desc.bNumEndpoints < isoc_pipe + 1)
+			return -ENODEV;
+
+		tmp = le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe].
 				desc.wMaxPacketSize);
 		dev->vbi_mode.alt_max_pkt_size[i] =
 		    (tmp & 0x07ff) * (((tmp & 0x1800) >> 11) + 1);
@@ -1464,6 +1479,9 @@
 	}
 	uif = udev->actconfig->interface[idx];
 
+	if (uif->altsetting[0].desc.bNumEndpoints < isoc_pipe + 1)
+		return -ENODEV;
+
 	dev->sliced_cc_mode.end_point_addr =
 	    uif->altsetting[0].endpoint[isoc_pipe].desc.
 			bEndpointAddress;
@@ -1478,7 +1496,12 @@
 		return -ENOMEM;
 
 	for (i = 0; i < dev->sliced_cc_mode.num_alt; i++) {
-		u16 tmp = le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe].
+		u16 tmp;
+
+		if (uif->altsetting[i].desc.bNumEndpoints < isoc_pipe + 1)
+			return -ENODEV;
+
+		tmp = le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe].
 				desc.wMaxPacketSize);
 		dev->sliced_cc_mode.alt_max_pkt_size[i] =
 		    (tmp & 0x07ff) * (((tmp & 0x1800) >> 11) + 1);
@@ -1647,6 +1670,11 @@
 		}
 		uif = udev->actconfig->interface[idx];
 
+		if (uif->altsetting[0].desc.bNumEndpoints < isoc_pipe + 1) {
+			retval = -ENODEV;
+			goto err_video_alt;
+		}
+
 		dev->ts1_mode.end_point_addr =
 		    uif->altsetting[0].endpoint[isoc_pipe].
 				desc.bEndpointAddress;
@@ -1664,7 +1692,14 @@
 		}
 
 		for (i = 0; i < dev->ts1_mode.num_alt; i++) {
-			u16 tmp = le16_to_cpu(uif->altsetting[i].
+			u16 tmp;
+
+			if (uif->altsetting[i].desc.bNumEndpoints < isoc_pipe + 1) {
+				retval = -ENODEV;
+				goto err_video_alt;
+			}
+
+			tmp = le16_to_cpu(uif->altsetting[i].
 						endpoint[isoc_pipe].desc.
 						wMaxPacketSize);
 			dev->ts1_mode.alt_max_pkt_size[i] =
diff --git a/drivers/media/usb/dvb-usb/dib0700_core.c b/drivers/media/usb/dvb-usb/dib0700_core.c
index 47ce9d5..563f690 100644
--- a/drivers/media/usb/dvb-usb/dib0700_core.c
+++ b/drivers/media/usb/dvb-usb/dib0700_core.c
@@ -812,6 +812,9 @@
 
 	/* Starting in firmware 1.20, the RC info is provided on a bulk pipe */
 
+	if (intf->altsetting[0].desc.bNumEndpoints < rc_ep + 1)
+		return -ENODEV;
+
 	purb = usb_alloc_urb(0, GFP_KERNEL);
 	if (purb == NULL)
 		return -ENOMEM;
diff --git a/drivers/media/usb/dvb-usb/dibusb-mc-common.c b/drivers/media/usb/dvb-usb/dibusb-mc-common.c
index d66f56c..1f7bce6 100644
--- a/drivers/media/usb/dvb-usb/dibusb-mc-common.c
+++ b/drivers/media/usb/dvb-usb/dibusb-mc-common.c
@@ -12,6 +12,8 @@
 #include <linux/kconfig.h>
 #include "dibusb.h"
 
+MODULE_LICENSE("GPL");
+
 /* 3000MC/P stuff */
 // Config Adjacent channels  Perf -cal22
 static struct dibx000_agc_config dib3000p_mt2060_agc_config = {
diff --git a/drivers/media/usb/dvb-usb/digitv.c b/drivers/media/usb/dvb-usb/digitv.c
index 4284f69..475a3c0 100644
--- a/drivers/media/usb/dvb-usb/digitv.c
+++ b/drivers/media/usb/dvb-usb/digitv.c
@@ -33,6 +33,9 @@
 
 	wo = (rbuf == NULL || rlen == 0); /* write-only */
 
+	if (wlen > 4 || rlen > 4)
+		return -EIO;
+
 	memset(st->sndbuf, 0, 7);
 	memset(st->rcvbuf, 0, 7);
 
diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
index c3e6734..4a0cc54 100644
--- a/drivers/media/usb/dvb-usb/dw2102.c
+++ b/drivers/media/usb/dvb-usb/dw2102.c
@@ -205,6 +205,20 @@
 
 	switch (num) {
 	case 2:
+		if (msg[0].len != 1) {
+			warn("i2c rd: len=%d is not 1!\n",
+			     msg[0].len);
+			num = -EOPNOTSUPP;
+			break;
+		}
+
+		if (2 + msg[1].len > sizeof(buf6)) {
+			warn("i2c rd: len=%d is too big!\n",
+			     msg[1].len);
+			num = -EOPNOTSUPP;
+			break;
+		}
+
 		/* read si2109 register by number */
 		buf6[0] = msg[0].addr << 1;
 		buf6[1] = msg[0].len;
@@ -220,6 +234,13 @@
 	case 1:
 		switch (msg[0].addr) {
 		case 0x68:
+			if (2 + msg[0].len > sizeof(buf6)) {
+				warn("i2c wr: len=%d is too big!\n",
+				     msg[0].len);
+				num = -EOPNOTSUPP;
+				break;
+			}
+
 			/* write to si2109 register */
 			buf6[0] = msg[0].addr << 1;
 			buf6[1] = msg[0].len;
@@ -263,6 +284,13 @@
 		/* first write first register number */
 		u8 ibuf[MAX_XFER_SIZE], obuf[3];
 
+		if (2 + msg[0].len != sizeof(obuf)) {
+			warn("i2c rd: len=%d is not 1!\n",
+			     msg[0].len);
+			ret = -EOPNOTSUPP;
+			goto unlock;
+		}
+
 		if (2 + msg[1].len > sizeof(ibuf)) {
 			warn("i2c rd: len=%d is too big!\n",
 			     msg[1].len);
@@ -463,6 +491,12 @@
 		/* first write first register number */
 		u8 ibuf[MAX_XFER_SIZE], obuf[3];
 
+		if (2 + msg[0].len != sizeof(obuf)) {
+			warn("i2c rd: len=%d is not 1!\n",
+			     msg[0].len);
+			ret = -EOPNOTSUPP;
+			goto unlock;
+		}
 		if (2 + msg[1].len > sizeof(ibuf)) {
 			warn("i2c rd: len=%d is too big!\n",
 			     msg[1].len);
@@ -697,6 +731,13 @@
 			msg[0].buf[0] = state->data[1];
 			break;
 		default:
+			if (3 + msg[0].len > sizeof(state->data)) {
+				warn("i2c wr: len=%d is too big!\n",
+				     msg[0].len);
+				num = -EOPNOTSUPP;
+				break;
+			}
+
 			/* always i2c write*/
 			state->data[0] = 0x08;
 			state->data[1] = msg[0].addr;
@@ -712,6 +753,19 @@
 		break;
 	case 2:
 		/* always i2c read */
+		if (4 + msg[0].len > sizeof(state->data)) {
+			warn("i2c rd: len=%d is too big!\n",
+			     msg[0].len);
+			num = -EOPNOTSUPP;
+			break;
+		}
+		if (1 + msg[1].len > sizeof(state->data)) {
+			warn("i2c rd: len=%d is too big!\n",
+			     msg[1].len);
+			num = -EOPNOTSUPP;
+			break;
+		}
+
 		state->data[0] = 0x09;
 		state->data[1] = msg[0].len;
 		state->data[2] = msg[1].len;
diff --git a/drivers/media/usb/dvb-usb/ttusb2.c b/drivers/media/usb/dvb-usb/ttusb2.c
index ecc207f..9e0d6a4 100644
--- a/drivers/media/usb/dvb-usb/ttusb2.c
+++ b/drivers/media/usb/dvb-usb/ttusb2.c
@@ -78,6 +78,9 @@
 	u8 *s, *r = NULL;
 	int ret = 0;
 
+	if (4 + rlen > 64)
+		return -EIO;
+
 	s = kzalloc(wlen+4, GFP_KERNEL);
 	if (!s)
 		return -ENOMEM;
@@ -381,6 +384,22 @@
 		write_read = i+1 < num && (msg[i+1].flags & I2C_M_RD);
 		read = msg[i].flags & I2C_M_RD;
 
+		if (3 + msg[i].len > sizeof(obuf)) {
+			err("i2c wr len=%d too high", msg[i].len);
+			break;
+		}
+		if (write_read) {
+			if (3 + msg[i+1].len > sizeof(ibuf)) {
+				err("i2c rd len=%d too high", msg[i+1].len);
+				break;
+			}
+		} else if (read) {
+			if (3 + msg[i].len > sizeof(ibuf)) {
+				err("i2c rd len=%d too high", msg[i].len);
+				break;
+			}
+		}
+
 		obuf[0] = (msg[i].addr << 1) | (write_read | read);
 		if (read)
 			obuf[1] = 0;
diff --git a/drivers/media/usb/gspca/konica.c b/drivers/media/usb/gspca/konica.c
index 40aaaa9..78542ff 100644
--- a/drivers/media/usb/gspca/konica.c
+++ b/drivers/media/usb/gspca/konica.c
@@ -188,6 +188,9 @@
 		return -EIO;
 	}
 
+	if (alt->desc.bNumEndpoints < 2)
+		return -ENODEV;
+
 	packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize);
 
 	n = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].priv;
diff --git a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/media/usb/usbvision/usbvision-video.c
index c8b4eb2..bfdf723 100644
--- a/drivers/media/usb/usbvision/usbvision-video.c
+++ b/drivers/media/usb/usbvision/usbvision-video.c
@@ -1506,7 +1506,14 @@
 	}
 
 	for (i = 0; i < usbvision->num_alt; i++) {
-		u16 tmp = le16_to_cpu(uif->altsetting[i].endpoint[1].desc.
+		u16 tmp;
+
+		if (uif->altsetting[i].desc.bNumEndpoints < 2) {
+			ret = -ENODEV;
+			goto err_pkt;
+		}
+
+		tmp = le16_to_cpu(uif->altsetting[i].endpoint[1].desc.
 				      wMaxPacketSize);
 		usbvision->alt_max_pkt_size[i] =
 			(tmp & 0x07ff) * (((tmp & 0x1800) >> 11) + 1);
diff --git a/drivers/media/usb/zr364xx/zr364xx.c b/drivers/media/usb/zr364xx/zr364xx.c
index cc128db..e3735bf 100644
--- a/drivers/media/usb/zr364xx/zr364xx.c
+++ b/drivers/media/usb/zr364xx/zr364xx.c
@@ -604,6 +604,14 @@
 	ptr = pdest = frm->lpvbits;
 
 	if (frm->ulState == ZR364XX_READ_IDLE) {
+		if (purb->actual_length < 128) {
+			/* header incomplete */
+			dev_info(&cam->udev->dev,
+				 "%s: buffer (%d bytes) too small to hold jpeg header. Discarding.\n",
+				 __func__, purb->actual_length);
+			return -EINVAL;
+		}
+
 		frm->ulState = ZR364XX_READ_FRAME;
 		frm->cur_size = 0;
 
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
index dd99b06..fa4fe02 100644
--- a/drivers/misc/cxl/pci.c
+++ b/drivers/misc/cxl/pci.c
@@ -1496,8 +1496,6 @@
 	if ((rc = cxl_native_register_psl_err_irq(adapter)))
 		goto err;
 
-	/* Release the context lock as adapter is configured */
-	cxl_adapter_context_unlock(adapter);
 	return 0;
 
 err:
@@ -1596,6 +1594,9 @@
 	if ((rc = cxl_sysfs_adapter_add(adapter)))
 		goto err_put1;
 
+	/* Release the context lock as adapter is configured */
+	cxl_adapter_context_unlock(adapter);
+
 	return adapter;
 
 err_put1:
@@ -1778,7 +1779,7 @@
 {
 	struct cxl *adapter = pci_get_drvdata(pdev);
 	struct cxl_afu *afu;
-	pci_ers_result_t result = PCI_ERS_RESULT_NEED_RESET;
+	pci_ers_result_t result = PCI_ERS_RESULT_NEED_RESET, afu_result;
 	int i;
 
 	/* At this point, we could still have an interrupt pending.
@@ -1883,16 +1884,26 @@
 	for (i = 0; i < adapter->slices; i++) {
 		afu = adapter->afu[i];
 
-		result = cxl_vphb_error_detected(afu, state);
-
-		/* Only continue if everyone agrees on NEED_RESET */
-		if (result != PCI_ERS_RESULT_NEED_RESET)
-			return result;
+		afu_result = cxl_vphb_error_detected(afu, state);
 
 		cxl_context_detach_all(afu);
 		cxl_ops->afu_deactivate_mode(afu, afu->current_mode);
 		pci_deconfigure_afu(afu);
+
+		/* Disconnect trumps all, NONE trumps NEED_RESET */
+		if (afu_result == PCI_ERS_RESULT_DISCONNECT)
+			result = PCI_ERS_RESULT_DISCONNECT;
+		else if ((afu_result == PCI_ERS_RESULT_NONE) &&
+			 (result == PCI_ERS_RESULT_NEED_RESET))
+			result = PCI_ERS_RESULT_NONE;
 	}
+
+	/* should take the context lock here */
+	if (cxl_adapter_context_lock(adapter) != 0)
+		dev_warn(&adapter->dev,
+			 "Couldn't take context lock with %d active-contexts\n",
+			 atomic_read(&adapter->contexts_num));
+
 	cxl_deconfigure_adapter(adapter);
 
 	return result;
@@ -1911,6 +1922,13 @@
 	if (cxl_configure_adapter(adapter, pdev))
 		goto err;
 
+	/*
+	 * Unlock context activation for the adapter. Ideally this should be
+	 * done in cxl_pci_resume but cxlflash module tries to activate the
+	 * master context as part of slot_reset callback.
+	 */
+	cxl_adapter_context_unlock(adapter);
+
 	for (i = 0; i < adapter->slices; i++) {
 		afu = adapter->afu[i];
 
diff --git a/drivers/misc/uid_sys_stats.c b/drivers/misc/uid_sys_stats.c
index 127a052..871040e 100644
--- a/drivers/misc/uid_sys_stats.c
+++ b/drivers/misc/uid_sys_stats.c
@@ -50,7 +50,8 @@
 
 #define UID_STATE_TOTAL_CURR	2
 #define UID_STATE_TOTAL_LAST	3
-#define UID_STATE_SIZE		4
+#define UID_STATE_DEAD_TASKS	4
+#define UID_STATE_SIZE		5
 
 struct uid_entry {
 	uid_t uid;
@@ -215,35 +216,44 @@
 	return task->ioac.write_bytes - task->ioac.cancelled_write_bytes;
 }
 
-static void add_uid_io_curr_stats(struct uid_entry *uid_entry,
-			struct task_struct *task)
+static void add_uid_io_stats(struct uid_entry *uid_entry,
+			struct task_struct *task, int slot)
 {
-	struct io_stats *io_curr = &uid_entry->io[UID_STATE_TOTAL_CURR];
+	struct io_stats *io_slot = &uid_entry->io[slot];
 
-	io_curr->read_bytes += task->ioac.read_bytes;
-	io_curr->write_bytes += compute_write_bytes(task);
-	io_curr->rchar += task->ioac.rchar;
-	io_curr->wchar += task->ioac.wchar;
-	io_curr->fsync += task->ioac.syscfs;
+	io_slot->read_bytes += task->ioac.read_bytes;
+	io_slot->write_bytes += compute_write_bytes(task);
+	io_slot->rchar += task->ioac.rchar;
+	io_slot->wchar += task->ioac.wchar;
+	io_slot->fsync += task->ioac.syscfs;
 }
 
-static void clean_uid_io_last_stats(struct uid_entry *uid_entry,
-			struct task_struct *task)
+static void compute_uid_io_bucket_stats(struct io_stats *io_bucket,
+					struct io_stats *io_curr,
+					struct io_stats *io_last,
+					struct io_stats *io_dead)
 {
-	struct io_stats *io_last = &uid_entry->io[UID_STATE_TOTAL_LAST];
+	io_bucket->read_bytes += io_curr->read_bytes + io_dead->read_bytes -
+		io_last->read_bytes;
+	io_bucket->write_bytes += io_curr->write_bytes + io_dead->write_bytes -
+		io_last->write_bytes;
+	io_bucket->rchar += io_curr->rchar + io_dead->rchar - io_last->rchar;
+	io_bucket->wchar += io_curr->wchar + io_dead->wchar - io_last->wchar;
+	io_bucket->fsync += io_curr->fsync + io_dead->fsync - io_last->fsync;
 
-	io_last->read_bytes -= task->ioac.read_bytes;
-	io_last->write_bytes -= compute_write_bytes(task);
-	io_last->rchar -= task->ioac.rchar;
-	io_last->wchar -= task->ioac.wchar;
-	io_last->fsync -= task->ioac.syscfs;
+	io_last->read_bytes = io_curr->read_bytes;
+	io_last->write_bytes = io_curr->write_bytes;
+	io_last->rchar = io_curr->rchar;
+	io_last->wchar = io_curr->wchar;
+	io_last->fsync = io_curr->fsync;
+
+	memset(io_dead, 0, sizeof(struct io_stats));
 }
 
 static void update_io_stats_all_locked(void)
 {
 	struct uid_entry *uid_entry;
 	struct task_struct *task, *temp;
-	struct io_stats *io_bucket, *io_curr, *io_last;
 	struct user_namespace *user_ns = current_user_ns();
 	unsigned long bkt;
 	uid_t uid;
@@ -258,70 +268,38 @@
 		uid_entry = find_or_register_uid(uid);
 		if (!uid_entry)
 			continue;
-		add_uid_io_curr_stats(uid_entry, task);
+		add_uid_io_stats(uid_entry, task, UID_STATE_TOTAL_CURR);
 	} while_each_thread(temp, task);
 	rcu_read_unlock();
 
 	hash_for_each(hash_table, bkt, uid_entry, hash) {
-		io_bucket = &uid_entry->io[uid_entry->state];
-		io_curr = &uid_entry->io[UID_STATE_TOTAL_CURR];
-		io_last = &uid_entry->io[UID_STATE_TOTAL_LAST];
-
-		io_bucket->read_bytes +=
-			io_curr->read_bytes - io_last->read_bytes;
-		io_bucket->write_bytes +=
-			io_curr->write_bytes - io_last->write_bytes;
-		io_bucket->rchar += io_curr->rchar - io_last->rchar;
-		io_bucket->wchar += io_curr->wchar - io_last->wchar;
-		io_bucket->fsync += io_curr->fsync - io_last->fsync;
-
-		io_last->read_bytes = io_curr->read_bytes;
-		io_last->write_bytes = io_curr->write_bytes;
-		io_last->rchar = io_curr->rchar;
-		io_last->wchar = io_curr->wchar;
-		io_last->fsync = io_curr->fsync;
+		compute_uid_io_bucket_stats(&uid_entry->io[uid_entry->state],
+					&uid_entry->io[UID_STATE_TOTAL_CURR],
+					&uid_entry->io[UID_STATE_TOTAL_LAST],
+					&uid_entry->io[UID_STATE_DEAD_TASKS]);
 	}
 }
 
-static void update_io_stats_uid_locked(uid_t target_uid)
+static void update_io_stats_uid_locked(struct uid_entry *uid_entry)
 {
-	struct uid_entry *uid_entry;
 	struct task_struct *task, *temp;
-	struct io_stats *io_bucket, *io_curr, *io_last;
 	struct user_namespace *user_ns = current_user_ns();
 
-	uid_entry = find_or_register_uid(target_uid);
-	if (!uid_entry)
-		return;
-
 	memset(&uid_entry->io[UID_STATE_TOTAL_CURR], 0,
 		sizeof(struct io_stats));
 
 	rcu_read_lock();
 	do_each_thread(temp, task) {
-		if (from_kuid_munged(user_ns, task_uid(task)) != target_uid)
+		if (from_kuid_munged(user_ns, task_uid(task)) != uid_entry->uid)
 			continue;
-		add_uid_io_curr_stats(uid_entry, task);
+		add_uid_io_stats(uid_entry, task, UID_STATE_TOTAL_CURR);
 	} while_each_thread(temp, task);
 	rcu_read_unlock();
 
-	io_bucket = &uid_entry->io[uid_entry->state];
-	io_curr = &uid_entry->io[UID_STATE_TOTAL_CURR];
-	io_last = &uid_entry->io[UID_STATE_TOTAL_LAST];
-
-	io_bucket->read_bytes +=
-		io_curr->read_bytes - io_last->read_bytes;
-	io_bucket->write_bytes +=
-		io_curr->write_bytes - io_last->write_bytes;
-	io_bucket->rchar += io_curr->rchar - io_last->rchar;
-	io_bucket->wchar += io_curr->wchar - io_last->wchar;
-	io_bucket->fsync += io_curr->fsync - io_last->fsync;
-
-	io_last->read_bytes = io_curr->read_bytes;
-	io_last->write_bytes = io_curr->write_bytes;
-	io_last->rchar = io_curr->rchar;
-	io_last->wchar = io_curr->wchar;
-	io_last->fsync = io_curr->fsync;
+	compute_uid_io_bucket_stats(&uid_entry->io[uid_entry->state],
+				&uid_entry->io[UID_STATE_TOTAL_CURR],
+				&uid_entry->io[UID_STATE_TOTAL_LAST],
+				&uid_entry->io[UID_STATE_DEAD_TASKS]);
 }
 
 static int uid_io_show(struct seq_file *m, void *v)
@@ -406,7 +384,7 @@
 		return count;
 	}
 
-	update_io_stats_uid_locked(uid);
+	update_io_stats_uid_locked(uid_entry);
 
 	uid_entry->state = state;
 
@@ -444,8 +422,7 @@
 	uid_entry->utime += utime;
 	uid_entry->stime += stime;
 
-	update_io_stats_uid_locked(uid);
-	clean_uid_io_last_stats(uid_entry, task);
+	add_uid_io_stats(uid_entry, task, UID_STATE_DEAD_TASKS);
 
 exit:
 	rt_mutex_unlock(&uid_lock);
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 3bde96a..f222f8a 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -138,6 +138,74 @@
 };
 EXPORT_SYMBOL_GPL(nand_ooblayout_lp_ops);
 
+/*
+ * Support the old "large page" layout used for 1-bit Hamming ECC where ECC
+ * are placed at a fixed offset.
+ */
+static int nand_ooblayout_ecc_lp_hamming(struct mtd_info *mtd, int section,
+					 struct mtd_oob_region *oobregion)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct nand_ecc_ctrl *ecc = &chip->ecc;
+
+	if (section)
+		return -ERANGE;
+
+	switch (mtd->oobsize) {
+	case 64:
+		oobregion->offset = 40;
+		break;
+	case 128:
+		oobregion->offset = 80;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	oobregion->length = ecc->total;
+	if (oobregion->offset + oobregion->length > mtd->oobsize)
+		return -ERANGE;
+
+	return 0;
+}
+
+static int nand_ooblayout_free_lp_hamming(struct mtd_info *mtd, int section,
+					  struct mtd_oob_region *oobregion)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct nand_ecc_ctrl *ecc = &chip->ecc;
+	int ecc_offset = 0;
+
+	if (section < 0 || section > 1)
+		return -ERANGE;
+
+	switch (mtd->oobsize) {
+	case 64:
+		ecc_offset = 40;
+		break;
+	case 128:
+		ecc_offset = 80;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (section == 0) {
+		oobregion->offset = 2;
+		oobregion->length = ecc_offset - 2;
+	} else {
+		oobregion->offset = ecc_offset + ecc->total;
+		oobregion->length = mtd->oobsize - oobregion->offset;
+	}
+
+	return 0;
+}
+
+const struct mtd_ooblayout_ops nand_ooblayout_lp_hamming_ops = {
+	.ecc = nand_ooblayout_ecc_lp_hamming,
+	.free = nand_ooblayout_free_lp_hamming,
+};
+
 static int check_offs_len(struct mtd_info *mtd,
 					loff_t ofs, uint64_t len)
 {
@@ -4565,7 +4633,7 @@
 			break;
 		case 64:
 		case 128:
-			mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
+			mtd_set_ooblayout(mtd, &nand_ooblayout_lp_hamming_ops);
 			break;
 		default:
 			WARN(1, "No oob scheme defined for oobsize %d\n",
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index 5513bfd9..c178cb0d 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -1856,6 +1856,15 @@
 	nand_chip->ecc.priv	= NULL;
 	nand_set_flash_node(nand_chip, dev->of_node);
 
+	if (!mtd->name) {
+		mtd->name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
+					   "omap2-nand.%d", info->gpmc_cs);
+		if (!mtd->name) {
+			dev_err(&pdev->dev, "Failed to set MTD name\n");
+			return -ENOMEM;
+		}
+	}
+
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	nand_chip->IO_ADDR_R = devm_ioremap_resource(&pdev->dev, res);
 	if (IS_ERR(nand_chip->IO_ADDR_R))
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c
index 40a7c4a..af2f091 100644
--- a/drivers/mtd/nand/orion_nand.c
+++ b/drivers/mtd/nand/orion_nand.c
@@ -23,6 +23,11 @@
 #include <asm/sizes.h>
 #include <linux/platform_data/mtd-orion_nand.h>
 
+struct orion_nand_info {
+	struct nand_chip chip;
+	struct clk *clk;
+};
+
 static void orion_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
 {
 	struct nand_chip *nc = mtd_to_nand(mtd);
@@ -75,20 +80,21 @@
 
 static int __init orion_nand_probe(struct platform_device *pdev)
 {
+	struct orion_nand_info *info;
 	struct mtd_info *mtd;
 	struct nand_chip *nc;
 	struct orion_nand_data *board;
 	struct resource *res;
-	struct clk *clk;
 	void __iomem *io_base;
 	int ret = 0;
 	u32 val = 0;
 
-	nc = devm_kzalloc(&pdev->dev,
-			sizeof(struct nand_chip),
+	info = devm_kzalloc(&pdev->dev,
+			sizeof(struct orion_nand_info),
 			GFP_KERNEL);
-	if (!nc)
+	if (!info)
 		return -ENOMEM;
+	nc = &info->chip;
 	mtd = nand_to_mtd(nc);
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -145,15 +151,13 @@
 	if (board->dev_ready)
 		nc->dev_ready = board->dev_ready;
 
-	platform_set_drvdata(pdev, mtd);
+	platform_set_drvdata(pdev, info);
 
 	/* Not all platforms can gate the clock, so it is not
 	   an error if the clock does not exists. */
-	clk = clk_get(&pdev->dev, NULL);
-	if (!IS_ERR(clk)) {
-		clk_prepare_enable(clk);
-		clk_put(clk);
-	}
+	info->clk = devm_clk_get(&pdev->dev, NULL);
+	if (!IS_ERR(info->clk))
+		clk_prepare_enable(info->clk);
 
 	if (nand_scan(mtd, 1)) {
 		ret = -ENXIO;
@@ -170,26 +174,22 @@
 	return 0;
 
 no_dev:
-	if (!IS_ERR(clk)) {
-		clk_disable_unprepare(clk);
-		clk_put(clk);
-	}
+	if (!IS_ERR(info->clk))
+		clk_disable_unprepare(info->clk);
 
 	return ret;
 }
 
 static int orion_nand_remove(struct platform_device *pdev)
 {
-	struct mtd_info *mtd = platform_get_drvdata(pdev);
-	struct clk *clk;
+	struct orion_nand_info *info = platform_get_drvdata(pdev);
+	struct nand_chip *chip = &info->chip;
+	struct mtd_info *mtd = nand_to_mtd(chip);
 
 	nand_release(mtd);
 
-	clk = clk_get(&pdev->dev, NULL);
-	if (!IS_ERR(clk)) {
-		clk_disable_unprepare(clk);
-		clk_put(clk);
-	}
+	if (!IS_ERR(info->clk))
+		clk_disable_unprepare(info->clk);
 
 	return 0;
 }
diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c
index 8716b8c..6f3c805 100644
--- a/drivers/net/irda/irda-usb.c
+++ b/drivers/net/irda/irda-usb.c
@@ -1077,7 +1077,7 @@
          * are "42101001.sb" or "42101002.sb"
          */
         sprintf(stir421x_fw_name, "4210%4X.sb",
-                self->usbdev->descriptor.bcdDevice);
+		le16_to_cpu(self->usbdev->descriptor.bcdDevice));
         ret = request_firmware(&fw, stir421x_fw_name, &self->usbdev->dev);
         if (ret < 0)
                 return ret;
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index e1c338c..f15589c 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -37,6 +37,7 @@
 	{ USB_DEVICE(0x0cf3, 0xb002) }, /* Ubiquiti WifiStation */
 	{ USB_DEVICE(0x057c, 0x8403) }, /* AVM FRITZ!WLAN 11N v2 USB */
 	{ USB_DEVICE(0x0471, 0x209e) }, /* Philips (or NXP) PTA01 */
+	{ USB_DEVICE(0x1eda, 0x2315) }, /* AirTies */
 
 	{ USB_DEVICE(0x0cf3, 0x7015),
 	  .driver_info = AR9287_USB },  /* Atheros */
@@ -1218,6 +1219,9 @@
 	u8 bulk_out_ep;
 	int r;
 
+	if (iface_desc->desc.bNumEndpoints < 2)
+		return -ENODEV;
+
 	/* Find bulk out endpoint */
 	for (r = 1; r >= 0; r--) {
 		endpoint = &iface_desc->endpoint[r].desc;
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
index d472e13..1afed52 100644
--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012-2016 Qualcomm Atheros, Inc.
+ * Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -26,6 +26,10 @@
 module_param(use_msi, bool, 0444);
 MODULE_PARM_DESC(use_msi, " Use MSI interrupt, default - true");
 
+static bool ftm_mode;
+module_param(ftm_mode, bool, 0444);
+MODULE_PARM_DESC(ftm_mode, " Set factory test mode, default - false");
+
 #ifdef CONFIG_PM
 #ifdef CONFIG_PM_SLEEP
 static int wil6210_pm_notify(struct notifier_block *notify_block,
@@ -36,13 +40,15 @@
 static
 void wil_set_capabilities(struct wil6210_priv *wil)
 {
+	const char *wil_fw_name;
 	u32 jtag_id = wil_r(wil, RGF_USER_JTAG_DEV_ID);
 	u8 chip_revision = (wil_r(wil, RGF_USER_REVISION_ID) &
 			    RGF_USER_REVISION_ID_MASK);
 
 	bitmap_zero(wil->hw_capabilities, hw_capability_last);
 	bitmap_zero(wil->fw_capabilities, WMI_FW_CAPABILITY_MAX);
-	wil->wil_fw_name = WIL_FW_NAME_DEFAULT;
+	wil->wil_fw_name = ftm_mode ? WIL_FW_NAME_FTM_DEFAULT :
+			   WIL_FW_NAME_DEFAULT;
 	wil->chip_revision = chip_revision;
 
 	switch (jtag_id) {
@@ -51,9 +57,11 @@
 		case REVISION_ID_SPARROW_D0:
 			wil->hw_name = "Sparrow D0";
 			wil->hw_version = HW_VER_SPARROW_D0;
-			if (wil_fw_verify_file_exists(wil,
-						      WIL_FW_NAME_SPARROW_PLUS))
-				wil->wil_fw_name = WIL_FW_NAME_SPARROW_PLUS;
+			wil_fw_name = ftm_mode ? WIL_FW_NAME_FTM_SPARROW_PLUS :
+				      WIL_FW_NAME_SPARROW_PLUS;
+
+			if (wil_fw_verify_file_exists(wil, wil_fw_name))
+				wil->wil_fw_name = wil_fw_name;
 			break;
 		case REVISION_ID_SPARROW_B0:
 			wil->hw_name = "Sparrow B0";
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index d05bb36..ba1c33b 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -38,8 +38,13 @@
 extern bool disable_ap_sme;
 
 #define WIL_NAME "wil6210"
-#define WIL_FW_NAME_DEFAULT "wil6210.fw" /* code Sparrow B0 */
-#define WIL_FW_NAME_SPARROW_PLUS "wil6210_sparrow_plus.fw" /* code Sparrow D0 */
+
+#define WIL_FW_NAME_DEFAULT "wil6210.fw"
+#define WIL_FW_NAME_FTM_DEFAULT "wil6210_ftm.fw"
+
+#define WIL_FW_NAME_SPARROW_PLUS "wil6210_sparrow_plus.fw"
+#define WIL_FW_NAME_FTM_SPARROW_PLUS "wil6210_sparrow_plus_ftm.fw"
+
 #define WIL_BOARD_FILE_NAME "wil6210.brd" /* board & radio parameters */
 
 #define WIL_DEFAULT_BUS_REQUEST_KBPS 128000 /* ~1Gbps */
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index 16241d2..afdbbf5 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -2512,9 +2512,11 @@
 			priv->random_mac[i] |= get_random_int() &
 					       ~(request->mac_addr_mask[i]);
 		}
+		ether_addr_copy(user_scan_cfg->random_mac, priv->random_mac);
+	} else {
+		eth_zero_addr(priv->random_mac);
 	}
 
-	ether_addr_copy(user_scan_cfg->random_mac, priv->random_mac);
 	user_scan_cfg->num_ssids = request->n_ssids;
 	user_scan_cfg->ssid_list = request->ssids;
 
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
index 7a310c4..1fdb86c 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
+++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
@@ -995,6 +995,7 @@
 	if (card && card->cmd_buf) {
 		mwifiex_unmap_pci_memory(adapter, card->cmd_buf,
 					 PCI_DMA_TODEVICE);
+		dev_kfree_skb_any(card->cmd_buf);
 	}
 	return 0;
 }
@@ -1561,6 +1562,11 @@
 		return -1;
 
 	card->cmd_buf = skb;
+	/*
+	 * Need to keep a reference, since core driver might free up this
+	 * buffer before we've unmapped it.
+	 */
+	skb_get(skb);
 
 	/* To send a command, the driver will:
 		1. Write the 64bit physical address of the data buffer to
@@ -1658,6 +1664,7 @@
 	if (card->cmd_buf) {
 		mwifiex_unmap_pci_memory(adapter, card->cmd_buf,
 					 PCI_DMA_TODEVICE);
+		dev_kfree_skb_any(card->cmd_buf);
 		card->cmd_buf = NULL;
 	}
 
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
index 5dad4021..a74fad6 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
@@ -359,6 +359,107 @@
 	return rtl8821ae_phy_rf6052_config(hw);
 }
 
+static void _rtl8812ae_phy_set_rfe_reg_24g(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	u8 tmp;
+
+	switch (rtlhal->rfe_type) {
+	case 3:
+		rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x54337770);
+		rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x54337770);
+		rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x010);
+		rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x010);
+		rtl_set_bbreg(hw, 0x900, 0x00000303, 0x1);
+		break;
+	case 4:
+		rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x77777777);
+		rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77777777);
+		rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x001);
+		rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x001);
+		break;
+	case 5:
+		rtl_write_byte(rtlpriv, RA_RFE_PINMUX + 2, 0x77);
+		rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77777777);
+		tmp = rtl_read_byte(rtlpriv, RA_RFE_INV + 3);
+		rtl_write_byte(rtlpriv, RA_RFE_INV + 3, tmp & ~0x1);
+		rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x000);
+		break;
+	case 1:
+		if (rtlpriv->btcoexist.bt_coexistence) {
+			rtl_set_bbreg(hw, RA_RFE_PINMUX, 0xffffff, 0x777777);
+			rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD,
+				      0x77777777);
+			rtl_set_bbreg(hw, RA_RFE_INV, 0x33f00000, 0x000);
+			rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x000);
+			break;
+		}
+	case 0:
+	case 2:
+	default:
+		rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x77777777);
+		rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77777777);
+		rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x000);
+		rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x000);
+		break;
+	}
+}
+
+static void _rtl8812ae_phy_set_rfe_reg_5g(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	u8 tmp;
+
+	switch (rtlhal->rfe_type) {
+	case 0:
+		rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x77337717);
+		rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77337717);
+		rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x010);
+		rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x010);
+		break;
+	case 1:
+		if (rtlpriv->btcoexist.bt_coexistence) {
+			rtl_set_bbreg(hw, RA_RFE_PINMUX, 0xffffff, 0x337717);
+			rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD,
+				      0x77337717);
+			rtl_set_bbreg(hw, RA_RFE_INV, 0x33f00000, 0x000);
+			rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x000);
+		} else {
+			rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD,
+				      0x77337717);
+			rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD,
+				      0x77337717);
+			rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x000);
+			rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x000);
+		}
+		break;
+	case 3:
+		rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x54337717);
+		rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x54337717);
+		rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x010);
+		rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x010);
+		rtl_set_bbreg(hw, 0x900, 0x00000303, 0x1);
+		break;
+	case 5:
+		rtl_write_byte(rtlpriv, RA_RFE_PINMUX + 2, 0x33);
+		rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77337777);
+		tmp = rtl_read_byte(rtlpriv, RA_RFE_INV + 3);
+		rtl_write_byte(rtlpriv, RA_RFE_INV + 3, tmp | 0x1);
+		rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x010);
+		break;
+	case 2:
+	case 4:
+	default:
+		rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x77337777);
+		rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77337777);
+		rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x010);
+		rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x010);
+		break;
+	}
+}
+
 u32 phy_get_tx_swing_8812A(struct ieee80211_hw *hw, u8	band,
 			   u8 rf_path)
 {
@@ -553,14 +654,9 @@
 			/* 0x82C[1:0] = 2b'00 */
 			rtl_set_bbreg(hw, 0x82c, 0x3, 0);
 		}
-		if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) {
-			rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD,
-				      0x77777777);
-			rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD,
-				      0x77777777);
-			rtl_set_bbreg(hw, RA_RFE_INV, 0x3ff00000, 0x000);
-			rtl_set_bbreg(hw, RB_RFE_INV, 0x3ff00000, 0x000);
-		}
+
+		if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+			_rtl8812ae_phy_set_rfe_reg_24g(hw);
 
 		rtl_set_bbreg(hw, RTXPATH, 0xf0, 0x1);
 		rtl_set_bbreg(hw, RCCK_RX, 0x0f000000, 0x1);
@@ -615,14 +711,8 @@
 			/* 0x82C[1:0] = 2'b00 */
 			rtl_set_bbreg(hw, 0x82c, 0x3, 1);
 
-		if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) {
-			rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD,
-				      0x77337777);
-			rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD,
-				      0x77337777);
-			rtl_set_bbreg(hw, RA_RFE_INV, 0x3ff00000, 0x010);
-			rtl_set_bbreg(hw, RB_RFE_INV, 0x3ff00000, 0x010);
-		}
+		if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+			_rtl8812ae_phy_set_rfe_reg_5g(hw);
 
 		rtl_set_bbreg(hw, RTXPATH, 0xf0, 0);
 		rtl_set_bbreg(hw, RCCK_RX, 0x0f000000, 0xf);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/reg.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/reg.h
index 1d6110f..ed69dbe 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/reg.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/reg.h
@@ -2424,6 +2424,7 @@
 #define	BMASKH4BITS			0xf0000000
 #define BMASKOFDM_D			0xffc00000
 #define	BMASKCCK			0x3f3f3f3f
+#define BMASKRFEINV			0x3ff00000
 
 #define BRFREGOFFSETMASK		0xfffff
 
diff --git a/drivers/nfc/Kconfig b/drivers/nfc/Kconfig
index 9d23692..e1c6f99 100644
--- a/drivers/nfc/Kconfig
+++ b/drivers/nfc/Kconfig
@@ -70,3 +70,11 @@
 source "drivers/nfc/s3fwrn5/Kconfig"
 source "drivers/nfc/st95hf/Kconfig"
 endmenu
+
+config NFC_NQ
+        tristate "QTI NCI based NFC Controller Driver for NQx"
+        depends on I2C
+        help
+          This enables the NFC driver for NQx based devices.
+          This is for i2c connected version. NCI protocol logic
+          resides in the usermode and it has no other NFC dependencies.
diff --git a/drivers/nfc/Makefile b/drivers/nfc/Makefile
index bab8ef0..b691fd4 100644
--- a/drivers/nfc/Makefile
+++ b/drivers/nfc/Makefile
@@ -17,3 +17,4 @@
 obj-$(CONFIG_NFC_NXP_NCI)	+= nxp-nci/
 obj-$(CONFIG_NFC_S3FWRN5)	+= s3fwrn5/
 obj-$(CONFIG_NFC_ST95HF)	+= st95hf/
+obj-$(CONFIG_NFC_NQ)		+= nq-nci.o
diff --git a/drivers/nfc/nq-nci.c b/drivers/nfc/nq-nci.c
new file mode 100644
index 0000000..baa4f94
--- /dev/null
+++ b/drivers/nfc/nq-nci.c
@@ -0,0 +1,1242 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/reboot.h>
+#include <linux/slab.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/spinlock.h>
+#include <linux/of_gpio.h>
+#include <linux/of_device.h>
+#include <linux/uaccess.h>
+#include "nq-nci.h"
+#include <linux/clk.h>
+#ifdef CONFIG_COMPAT
+#include <linux/compat.h>
+#endif
+
+struct nqx_platform_data {
+	unsigned int irq_gpio;
+	unsigned int en_gpio;
+	unsigned int clkreq_gpio;
+	unsigned int firm_gpio;
+	unsigned int ese_gpio;
+	const char *clk_src_name;
+};
+
+static const struct of_device_id msm_match_table[] = {
+	{.compatible = "qcom,nq-nci"},
+	{}
+};
+
+MODULE_DEVICE_TABLE(of, msm_match_table);
+
+#define MAX_BUFFER_SIZE			(320)
+#define WAKEUP_SRC_TIMEOUT		(2000)
+#define MAX_RETRY_COUNT			3
+
+struct nqx_dev {
+	wait_queue_head_t	read_wq;
+	struct	mutex		read_mutex;
+	struct	i2c_client	*client;
+	struct	miscdevice	nqx_device;
+	union  nqx_uinfo	nqx_info;
+	/* NFC GPIO variables */
+	unsigned int		irq_gpio;
+	unsigned int		en_gpio;
+	unsigned int		firm_gpio;
+	unsigned int		clkreq_gpio;
+	unsigned int		ese_gpio;
+	/* NFC VEN pin state powered by Nfc */
+	bool			nfc_ven_enabled;
+	/* NFC_IRQ state */
+	bool			irq_enabled;
+	/* NFC_IRQ wake-up state */
+	bool			irq_wake_up;
+	spinlock_t		irq_enabled_lock;
+	unsigned int		count_irq;
+	/* Initial CORE RESET notification */
+	unsigned int		core_reset_ntf;
+	/* CLK control */
+	bool			clk_run;
+	struct	clk		*s_clk;
+	/* read buffer*/
+	size_t kbuflen;
+	u8 *kbuf;
+	struct nqx_platform_data *pdata;
+};
+
+static int nfcc_reboot(struct notifier_block *notifier, unsigned long val,
+			void *v);
+/*clock enable function*/
+static int nqx_clock_select(struct nqx_dev *nqx_dev);
+/*clock disable function*/
+static int nqx_clock_deselect(struct nqx_dev *nqx_dev);
+static struct notifier_block nfcc_notifier = {
+	.notifier_call	= nfcc_reboot,
+	.next			= NULL,
+	.priority		= 0
+};
+
+unsigned int	disable_ctrl;
+
+static void nqx_init_stat(struct nqx_dev *nqx_dev)
+{
+	nqx_dev->count_irq = 0;
+}
+
+static void nqx_disable_irq(struct nqx_dev *nqx_dev)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&nqx_dev->irq_enabled_lock, flags);
+	if (nqx_dev->irq_enabled) {
+		disable_irq_nosync(nqx_dev->client->irq);
+		nqx_dev->irq_enabled = false;
+	}
+	spin_unlock_irqrestore(&nqx_dev->irq_enabled_lock, flags);
+}
+
+/**
+ * nqx_enable_irq()
+ *
+ * Check if interrupt is enabled or not
+ * and enable interrupt
+ *
+ * Return: void
+ */
+static void nqx_enable_irq(struct nqx_dev *nqx_dev)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&nqx_dev->irq_enabled_lock, flags);
+	if (!nqx_dev->irq_enabled) {
+		nqx_dev->irq_enabled = true;
+		enable_irq(nqx_dev->client->irq);
+	}
+	spin_unlock_irqrestore(&nqx_dev->irq_enabled_lock, flags);
+}
+
+static irqreturn_t nqx_dev_irq_handler(int irq, void *dev_id)
+{
+	struct nqx_dev *nqx_dev = dev_id;
+	unsigned long flags;
+
+	if (device_may_wakeup(&nqx_dev->client->dev))
+		pm_wakeup_event(&nqx_dev->client->dev, WAKEUP_SRC_TIMEOUT);
+
+	nqx_disable_irq(nqx_dev);
+	spin_lock_irqsave(&nqx_dev->irq_enabled_lock, flags);
+	nqx_dev->count_irq++;
+	spin_unlock_irqrestore(&nqx_dev->irq_enabled_lock, flags);
+	wake_up(&nqx_dev->read_wq);
+
+	return IRQ_HANDLED;
+}
+
+static ssize_t nfc_read(struct file *filp, char __user *buf,
+					size_t count, loff_t *offset)
+{
+	struct nqx_dev *nqx_dev = filp->private_data;
+	unsigned char *tmp = NULL;
+	int ret;
+	int irq_gpio_val = 0;
+
+	if (!nqx_dev) {
+		ret = -ENODEV;
+		goto out;
+	}
+
+	if (count > nqx_dev->kbuflen)
+		count = nqx_dev->kbuflen;
+
+	dev_dbg(&nqx_dev->client->dev, "%s : reading %zu bytes.\n",
+			__func__, count);
+
+	mutex_lock(&nqx_dev->read_mutex);
+
+	irq_gpio_val = gpio_get_value(nqx_dev->irq_gpio);
+	if (irq_gpio_val == 0) {
+		if (filp->f_flags & O_NONBLOCK) {
+			dev_err(&nqx_dev->client->dev,
+			":f_falg has O_NONBLOCK. EAGAIN\n");
+			ret = -EAGAIN;
+			goto err;
+		}
+		while (1) {
+			ret = 0;
+			if (!nqx_dev->irq_enabled) {
+				nqx_dev->irq_enabled = true;
+				enable_irq(nqx_dev->client->irq);
+			}
+			if (!gpio_get_value(nqx_dev->irq_gpio)) {
+				ret = wait_event_interruptible(nqx_dev->read_wq,
+					!nqx_dev->irq_enabled);
+			}
+			if (ret)
+				goto err;
+			nqx_disable_irq(nqx_dev);
+
+			if (gpio_get_value(nqx_dev->irq_gpio))
+				break;
+			dev_err_ratelimited(&nqx_dev->client->dev,
+			"gpio is low, no need to read data\n");
+		}
+	}
+
+	tmp = nqx_dev->kbuf;
+	if (!tmp) {
+		dev_err(&nqx_dev->client->dev,
+			"%s: device doesn't exist anymore\n", __func__);
+		ret = -ENODEV;
+		goto err;
+	}
+	memset(tmp, 0x00, count);
+
+	/* Read data */
+	ret = i2c_master_recv(nqx_dev->client, tmp, count);
+	if (ret < 0) {
+		dev_err(&nqx_dev->client->dev,
+			"%s: i2c_master_recv returned %d\n", __func__, ret);
+		goto err;
+	}
+	if (ret > count) {
+		dev_err(&nqx_dev->client->dev,
+			"%s: received too many bytes from i2c (%d)\n",
+			__func__, ret);
+		ret = -EIO;
+		goto err;
+	}
+#ifdef NFC_KERNEL_BU
+		dev_dbg(&nqx_dev->client->dev, "%s : NfcNciRx %x %x %x\n",
+			__func__, tmp[0], tmp[1], tmp[2]);
+#endif
+	if (copy_to_user(buf, tmp, ret)) {
+		dev_warn(&nqx_dev->client->dev,
+			"%s : failed to copy to user space\n", __func__);
+		ret = -EFAULT;
+		goto err;
+	}
+	mutex_unlock(&nqx_dev->read_mutex);
+	return ret;
+
+err:
+	mutex_unlock(&nqx_dev->read_mutex);
+out:
+	return ret;
+}
+
+static ssize_t nfc_write(struct file *filp, const char __user *buf,
+				size_t count, loff_t *offset)
+{
+	struct nqx_dev *nqx_dev = filp->private_data;
+	char *tmp = NULL;
+	int ret = 0;
+
+	if (!nqx_dev) {
+		ret = -ENODEV;
+		goto out;
+	}
+	if (count > nqx_dev->kbuflen) {
+		dev_err(&nqx_dev->client->dev, "%s: out of memory\n",
+			__func__);
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	tmp = memdup_user(buf, count);
+	if (IS_ERR(tmp)) {
+		dev_err(&nqx_dev->client->dev, "%s: memdup_user failed\n",
+			__func__);
+		ret = PTR_ERR(tmp);
+		goto out;
+	}
+
+	ret = i2c_master_send(nqx_dev->client, tmp, count);
+	if (ret != count) {
+		dev_err(&nqx_dev->client->dev,
+		"%s: failed to write %d\n", __func__, ret);
+		ret = -EIO;
+		goto out_free;
+	}
+#ifdef NFC_KERNEL_BU
+	dev_dbg(&nqx_dev->client->dev,
+			"%s : i2c-%d: NfcNciTx %x %x %x\n",
+			__func__, iminor(file_inode(filp)),
+			tmp[0], tmp[1], tmp[2]);
+#endif
+	usleep_range(1000, 1100);
+out_free:
+	kfree(tmp);
+out:
+	return ret;
+}
+
+/**
+ * nqx_standby_write()
+ * @buf:       pointer to data buffer
+ * @len:       # of bytes need to transfer
+ *
+ * write data buffer over I2C and retry
+ * if NFCC is in stand by mode
+ *
+ * Return: # of bytes written or -ve value in case of error
+ */
+static int nqx_standby_write(struct nqx_dev *nqx_dev,
+				const unsigned char *buf, size_t len)
+{
+	int ret = -EINVAL;
+	int retry_cnt;
+
+	for (retry_cnt = 1; retry_cnt <= MAX_RETRY_COUNT; retry_cnt++) {
+		ret = i2c_master_send(nqx_dev->client, buf, len);
+		if (ret < 0) {
+			dev_err(&nqx_dev->client->dev,
+				"%s: write failed, Maybe in Standby Mode - Retry(%d)\n",
+				 __func__, retry_cnt);
+			usleep_range(1000, 1100);
+		} else if (ret == len)
+			break;
+	}
+	return ret;
+}
+
+/*
+ * Power management of the eSE
+ * NFC & eSE ON : NFC_EN high and eSE_pwr_req high.
+ * NFC OFF & eSE ON : NFC_EN high and eSE_pwr_req high.
+ * NFC OFF & eSE OFF : NFC_EN low and eSE_pwr_req low.
+ */
+static int nqx_ese_pwr(struct nqx_dev *nqx_dev, unsigned long int arg)
+{
+	int r = -1;
+	const unsigned char svdd_off_cmd_warn[] =  {0x2F, 0x31, 0x01, 0x01};
+	const unsigned char svdd_off_cmd_done[] =  {0x2F, 0x31, 0x01, 0x00};
+
+	if (!gpio_is_valid(nqx_dev->ese_gpio)) {
+		dev_err(&nqx_dev->client->dev,
+			"%s: ese_gpio is not valid\n", __func__);
+		return -EINVAL;
+	}
+
+	if (arg == 0) {
+		/*
+		 * We want to power on the eSE and to do so we need the
+		 * eSE_pwr_req pin and the NFC_EN pin to be high
+		 */
+		if (gpio_get_value(nqx_dev->ese_gpio)) {
+			dev_dbg(&nqx_dev->client->dev, "ese_gpio is already high\n");
+			r = 0;
+		} else {
+			/**
+			 * Let's store the NFC_EN pin state
+			 * only if the eSE is not yet on
+			 */
+			nqx_dev->nfc_ven_enabled =
+					gpio_get_value(nqx_dev->en_gpio);
+			if (!nqx_dev->nfc_ven_enabled) {
+				gpio_set_value(nqx_dev->en_gpio, 1);
+				/* hardware dependent delay */
+				usleep_range(1000, 1100);
+			}
+			gpio_set_value(nqx_dev->ese_gpio, 1);
+			if (gpio_get_value(nqx_dev->ese_gpio)) {
+				dev_dbg(&nqx_dev->client->dev, "ese_gpio is enabled\n");
+				r = 0;
+			}
+		}
+	} else if (arg == 1) {
+		if (nqx_dev->nfc_ven_enabled &&
+			((nqx_dev->nqx_info.info.chip_type == NFCC_NQ_220) ||
+			(nqx_dev->nqx_info.info.chip_type == NFCC_PN66T))) {
+			/**
+			 * Let's inform the CLF we're
+			 * powering off the eSE
+			 */
+			r = nqx_standby_write(nqx_dev, svdd_off_cmd_warn,
+						sizeof(svdd_off_cmd_warn));
+			if (r < 0) {
+				dev_err(&nqx_dev->client->dev,
+					"%s: write failed after max retry\n",
+					 __func__);
+				return -ENXIO;
+			}
+			dev_dbg(&nqx_dev->client->dev,
+				"%s: svdd_off_cmd_warn sent\n", __func__);
+
+			/* let's power down the eSE */
+			gpio_set_value(nqx_dev->ese_gpio, 0);
+			dev_dbg(&nqx_dev->client->dev,
+				"%s: nqx_dev->ese_gpio set to 0\n", __func__);
+
+			/**
+			 * Time needed for the SVDD capacitor
+			 * to get discharged
+			 */
+			usleep_range(8000, 8100);
+
+			/* Let's inform the CLF the eSE is now off */
+			r = nqx_standby_write(nqx_dev, svdd_off_cmd_done,
+						sizeof(svdd_off_cmd_done));
+			if (r < 0) {
+				dev_err(&nqx_dev->client->dev,
+					"%s: write failed after max retry\n",
+					 __func__);
+				return -ENXIO;
+			}
+			dev_dbg(&nqx_dev->client->dev,
+				"%s: svdd_off_cmd_done sent\n", __func__);
+		} else {
+			/**
+			 * In case the NFC is off,
+			 * there's no need to send the i2c commands
+			 */
+			gpio_set_value(nqx_dev->ese_gpio, 0);
+		}
+
+		if (!gpio_get_value(nqx_dev->ese_gpio)) {
+			dev_dbg(&nqx_dev->client->dev, "ese_gpio is disabled\n");
+			r = 0;
+		}
+
+		if (!nqx_dev->nfc_ven_enabled) {
+			/* hardware dependent delay */
+			usleep_range(1000, 1100);
+			dev_dbg(&nqx_dev->client->dev, "disabling en_gpio\n");
+			gpio_set_value(nqx_dev->en_gpio, 0);
+		}
+	} else if (arg == 3) {
+		r = gpio_get_value(nqx_dev->ese_gpio);
+	}
+	return r;
+}
+
+static int nfc_open(struct inode *inode, struct file *filp)
+{
+	int ret = 0;
+	struct nqx_dev *nqx_dev = container_of(filp->private_data,
+				struct nqx_dev, nqx_device);
+
+	filp->private_data = nqx_dev;
+	nqx_init_stat(nqx_dev);
+
+	dev_dbg(&nqx_dev->client->dev,
+			"%s: %d,%d\n", __func__, imajor(inode), iminor(inode));
+	return ret;
+}
+
+/*
+ * nfc_ioctl_power_states() - power control
+ * @filp:	pointer to the file descriptor
+ * @arg:	mode that we want to move to
+ *
+ * Device power control. Depending on the arg value, device moves to
+ * different states
+ * (arg = 0): NFC_ENABLE	GPIO = 0, FW_DL GPIO = 0
+ * (arg = 1): NFC_ENABLE	GPIO = 1, FW_DL GPIO = 0
+ * (arg = 2): FW_DL GPIO = 1
+ *
+ * Return: -ENOIOCTLCMD if arg is not supported, 0 in any other case
+ */
+int nfc_ioctl_power_states(struct file *filp, unsigned long arg)
+{
+	int r = 0;
+	struct nqx_dev *nqx_dev = filp->private_data;
+
+	if (arg == 0) {
+		/*
+		 * We are attempting a hardware reset so let us disable
+		 * interrupts to avoid spurious notifications to upper
+		 * layers.
+		 */
+		nqx_disable_irq(nqx_dev);
+		dev_dbg(&nqx_dev->client->dev,
+			"gpio_set_value disable: %s: info: %p\n",
+			__func__, nqx_dev);
+		if (gpio_is_valid(nqx_dev->firm_gpio))
+			gpio_set_value(nqx_dev->firm_gpio, 0);
+
+		if (gpio_is_valid(nqx_dev->ese_gpio)) {
+			if (!gpio_get_value(nqx_dev->ese_gpio)) {
+				dev_dbg(&nqx_dev->client->dev, "disabling en_gpio\n");
+				gpio_set_value(nqx_dev->en_gpio, 0);
+			} else {
+				dev_dbg(&nqx_dev->client->dev, "keeping en_gpio high\n");
+			}
+		} else {
+			dev_dbg(&nqx_dev->client->dev, "ese_gpio invalid, set en_gpio to low\n");
+			gpio_set_value(nqx_dev->en_gpio, 0);
+		}
+		r = nqx_clock_deselect(nqx_dev);
+		if (r < 0)
+			dev_err(&nqx_dev->client->dev, "unable to disable clock\n");
+		nqx_dev->nfc_ven_enabled = false;
+		/* hardware dependent delay */
+		msleep(100);
+	} else if (arg == 1) {
+		nqx_enable_irq(nqx_dev);
+		dev_dbg(&nqx_dev->client->dev,
+			"gpio_set_value enable: %s: info: %p\n",
+			__func__, nqx_dev);
+		if (gpio_is_valid(nqx_dev->firm_gpio))
+			gpio_set_value(nqx_dev->firm_gpio, 0);
+		gpio_set_value(nqx_dev->en_gpio, 1);
+		r = nqx_clock_select(nqx_dev);
+		if (r < 0)
+			dev_err(&nqx_dev->client->dev, "unable to enable clock\n");
+		nqx_dev->nfc_ven_enabled = true;
+		msleep(20);
+	} else if (arg == 2) {
+		/*
+		 * We are switching to Dowload Mode, toggle the enable pin
+		 * in order to set the NFCC in the new mode
+		 */
+		if (gpio_is_valid(nqx_dev->ese_gpio)) {
+			if (gpio_get_value(nqx_dev->ese_gpio)) {
+				dev_err(&nqx_dev->client->dev,
+				"FW download forbidden while ese is on\n");
+				return -EBUSY; /* Device or resource busy */
+			}
+		}
+		gpio_set_value(nqx_dev->en_gpio, 1);
+		msleep(20);
+		if (gpio_is_valid(nqx_dev->firm_gpio))
+			gpio_set_value(nqx_dev->firm_gpio, 1);
+		msleep(20);
+		gpio_set_value(nqx_dev->en_gpio, 0);
+		msleep(100);
+		gpio_set_value(nqx_dev->en_gpio, 1);
+		msleep(20);
+	} else {
+		r = -ENOIOCTLCMD;
+	}
+
+	return r;
+}
+
+#ifdef CONFIG_COMPAT
+static long nfc_compat_ioctl(struct file *pfile, unsigned int cmd,
+				unsigned long arg)
+{
+	long r = 0;
+
+	arg = (compat_u64)arg;
+	switch (cmd) {
+	case NFC_SET_PWR:
+		nfc_ioctl_power_states(pfile, arg);
+		break;
+	case ESE_SET_PWR:
+		nqx_ese_pwr(pfile->private_data, arg);
+		break;
+	case ESE_GET_PWR:
+		nqx_ese_pwr(pfile->private_data, 3);
+		break;
+	case SET_RX_BLOCK:
+		break;
+	case SET_EMULATOR_TEST_POINT:
+		break;
+	default:
+		r = -ENOTTY;
+	}
+	return r;
+}
+#endif
+
+/*
+ * nfc_ioctl_core_reset_ntf()
+ * @filp:       pointer to the file descriptor
+ *
+ * Allows callers to determine if a CORE_RESET_NTF has arrived
+ *
+ * Return: the value of variable core_reset_ntf
+ */
+int nfc_ioctl_core_reset_ntf(struct file *filp)
+{
+	struct nqx_dev *nqx_dev = filp->private_data;
+
+	dev_dbg(&nqx_dev->client->dev, "%s: returning = %d\n", __func__,
+		nqx_dev->core_reset_ntf);
+	return nqx_dev->core_reset_ntf;
+}
+
+/*
+ * Inside nfc_ioctl_nfcc_info
+ *
+ * @brief   nfc_ioctl_nfcc_info
+ *
+ * Check the NQ Chipset and firmware version details
+ */
+unsigned int nfc_ioctl_nfcc_info(struct file *filp, unsigned long arg)
+{
+	unsigned int r = 0;
+	struct nqx_dev *nqx_dev = filp->private_data;
+
+	r = nqx_dev->nqx_info.i;
+	dev_dbg(&nqx_dev->client->dev,
+		"nqx nfc : nfc_ioctl_nfcc_info r = %d\n", r);
+
+	return r;
+}
+
+static long nfc_ioctl(struct file *pfile, unsigned int cmd,
+			unsigned long arg)
+{
+	int r = 0;
+
+	switch (cmd) {
+	case NFC_SET_PWR:
+		r = nfc_ioctl_power_states(pfile, arg);
+		break;
+	case ESE_SET_PWR:
+		r = nqx_ese_pwr(pfile->private_data, arg);
+		break;
+	case ESE_GET_PWR:
+		r = nqx_ese_pwr(pfile->private_data, 3);
+		break;
+	case SET_RX_BLOCK:
+		break;
+	case SET_EMULATOR_TEST_POINT:
+		break;
+	case NFCC_INITIAL_CORE_RESET_NTF:
+		r = nfc_ioctl_core_reset_ntf(pfile);
+		break;
+	case NFCC_GET_INFO:
+		r = nfc_ioctl_nfcc_info(pfile, arg);
+		break;
+	default:
+		r = -ENOIOCTLCMD;
+	}
+	return r;
+}
+
+static const struct file_operations nfc_dev_fops = {
+	.owner = THIS_MODULE,
+	.llseek = no_llseek,
+	.read  = nfc_read,
+	.write = nfc_write,
+	.open = nfc_open,
+	.unlocked_ioctl = nfc_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = nfc_compat_ioctl
+#endif
+};
+
+/* Check for availability of NQ_ NFC controller hardware */
+static int nfcc_hw_check(struct i2c_client *client, struct nqx_dev *nqx_dev)
+{
+	int ret = 0;
+
+	unsigned char raw_nci_reset_cmd[] =  {0x20, 0x00, 0x01, 0x00};
+	unsigned char raw_nci_init_cmd[] =   {0x20, 0x01, 0x00};
+	unsigned char nci_init_rsp[28];
+	unsigned char nci_reset_rsp[6];
+	unsigned char init_rsp_len = 0;
+	unsigned int enable_gpio = nqx_dev->en_gpio;
+	/* making sure that the NFCC starts in a clean state. */
+	gpio_set_value(enable_gpio, 0);/* ULPM: Disable */
+	/* hardware dependent delay */
+	msleep(20);
+	gpio_set_value(enable_gpio, 1);/* HPD : Enable*/
+	/* hardware dependent delay */
+	msleep(20);
+
+	/* send NCI CORE RESET CMD with Keep Config parameters */
+	ret = i2c_master_send(client, raw_nci_reset_cmd,
+						sizeof(raw_nci_reset_cmd));
+	if (ret < 0) {
+		dev_err(&client->dev,
+		"%s: - i2c_master_send Error\n", __func__);
+		goto err_nfcc_hw_check;
+	}
+	/* hardware dependent delay */
+	msleep(30);
+
+	/* Read Response of RESET command */
+	ret = i2c_master_recv(client, nci_reset_rsp,
+		sizeof(nci_reset_rsp));
+	dev_err(&client->dev,
+	"%s: - nq - reset cmd answer : NfcNciRx %x %x %x\n",
+	__func__, nci_reset_rsp[0],
+	nci_reset_rsp[1], nci_reset_rsp[2]);
+	if (ret < 0) {
+		dev_err(&client->dev,
+		"%s: - i2c_master_recv Error\n", __func__);
+		goto err_nfcc_hw_check;
+	}
+	ret = i2c_master_send(client, raw_nci_init_cmd,
+		sizeof(raw_nci_init_cmd));
+	if (ret < 0) {
+		dev_err(&client->dev,
+		"%s: - i2c_master_send Error\n", __func__);
+		goto err_nfcc_hw_check;
+	}
+	/* hardware dependent delay */
+	msleep(30);
+	/* Read Response of INIT command */
+	ret = i2c_master_recv(client, nci_init_rsp,
+		sizeof(nci_init_rsp));
+	if (ret < 0) {
+		dev_err(&client->dev,
+		"%s: - i2c_master_recv Error\n", __func__);
+		goto err_nfcc_hw_check;
+	}
+	init_rsp_len = 2 + nci_init_rsp[2]; /*payload + len*/
+	if (init_rsp_len > PAYLOAD_HEADER_LENGTH) {
+		nqx_dev->nqx_info.info.chip_type =
+				nci_init_rsp[init_rsp_len - 3];
+		nqx_dev->nqx_info.info.rom_version =
+				nci_init_rsp[init_rsp_len - 2];
+		nqx_dev->nqx_info.info.fw_major =
+				nci_init_rsp[init_rsp_len - 1];
+		nqx_dev->nqx_info.info.fw_minor =
+				nci_init_rsp[init_rsp_len];
+	}
+	dev_dbg(&nqx_dev->client->dev, "NQ NFCC chip_type = %x\n",
+		nqx_dev->nqx_info.info.chip_type);
+	dev_dbg(&nqx_dev->client->dev, "NQ fw version = %x.%x.%x\n",
+		nqx_dev->nqx_info.info.rom_version,
+		nqx_dev->nqx_info.info.fw_major,
+		nqx_dev->nqx_info.info.fw_minor);
+
+	switch (nqx_dev->nqx_info.info.chip_type) {
+	case NFCC_NQ_210:
+		dev_dbg(&client->dev,
+		"%s: ## NFCC == NQ210 ##\n", __func__);
+		break;
+	case NFCC_NQ_220:
+		dev_dbg(&client->dev,
+		"%s: ## NFCC == NQ220 ##\n", __func__);
+		break;
+	case NFCC_NQ_310:
+		dev_dbg(&client->dev,
+		"%s: ## NFCC == NQ310 ##\n", __func__);
+		break;
+	case NFCC_NQ_330:
+		dev_dbg(&client->dev,
+		"%s: ## NFCC == NQ330 ##\n", __func__);
+		break;
+	case NFCC_PN66T:
+		dev_dbg(&client->dev,
+		"%s: ## NFCC == PN66T ##\n", __func__);
+		break;
+	default:
+		dev_err(&client->dev,
+		"%s: - NFCC HW not Supported\n", __func__);
+		break;
+	}
+
+	/*Disable NFC by default to save power on boot*/
+	gpio_set_value(enable_gpio, 0);/* ULPM: Disable */
+	ret = 0;
+	goto done;
+
+err_nfcc_hw_check:
+	ret = -ENXIO;
+	dev_err(&client->dev,
+		"%s: - NFCC HW not available\n", __func__);
+done:
+	return ret;
+}
+
+/*
+ * Routine to enable clock.
+ * this routine can be extended to select from multiple
+ * sources based on clk_src_name.
+ */
+static int nqx_clock_select(struct nqx_dev *nqx_dev)
+{
+	int r = 0;
+
+	nqx_dev->s_clk = clk_get(&nqx_dev->client->dev, "ref_clk");
+
+	if (nqx_dev->s_clk == NULL)
+		goto err_clk;
+
+	if (nqx_dev->clk_run == false)
+		r = clk_prepare_enable(nqx_dev->s_clk);
+
+	if (r)
+		goto err_clk;
+
+	nqx_dev->clk_run = true;
+
+	return r;
+
+err_clk:
+	r = -1;
+	return r;
+}
+
+/*
+ * Routine to disable clocks
+ */
+static int nqx_clock_deselect(struct nqx_dev *nqx_dev)
+{
+	int r = -1;
+
+	if (nqx_dev->s_clk != NULL) {
+		if (nqx_dev->clk_run == true) {
+			clk_disable_unprepare(nqx_dev->s_clk);
+			nqx_dev->clk_run = false;
+		}
+		return 0;
+	}
+	return r;
+}
+
+static int nfc_parse_dt(struct device *dev, struct nqx_platform_data *pdata)
+{
+	int r = 0;
+	struct device_node *np = dev->of_node;
+
+	pdata->en_gpio = of_get_named_gpio(np, "qcom,nq-ven", 0);
+	if ((!gpio_is_valid(pdata->en_gpio)))
+		return -EINVAL;
+	disable_ctrl = pdata->en_gpio;
+
+	pdata->irq_gpio = of_get_named_gpio(np, "qcom,nq-irq", 0);
+	if ((!gpio_is_valid(pdata->irq_gpio)))
+		return -EINVAL;
+
+	pdata->firm_gpio = of_get_named_gpio(np, "qcom,nq-firm", 0);
+	if (!gpio_is_valid(pdata->firm_gpio)) {
+		dev_warn(dev,
+			"FIRM GPIO <OPTIONAL> error getting from OF node\n");
+		pdata->firm_gpio = -EINVAL;
+	}
+
+	pdata->ese_gpio = of_get_named_gpio(np, "qcom,nq-esepwr", 0);
+	if (!gpio_is_valid(pdata->ese_gpio)) {
+		dev_warn(dev,
+			"ese GPIO <OPTIONAL> error getting from OF node\n");
+		pdata->ese_gpio = -EINVAL;
+	}
+
+	r = of_property_read_string(np, "qcom,clk-src", &pdata->clk_src_name);
+
+	pdata->clkreq_gpio = of_get_named_gpio(np, "qcom,nq-clkreq", 0);
+
+	if (r)
+		return -EINVAL;
+	return r;
+}
+
+static inline int gpio_input_init(const struct device * const dev,
+			const int gpio, const char * const gpio_name)
+{
+	int r = gpio_request(gpio, gpio_name);
+
+	if (r) {
+		dev_err(dev, "unable to request gpio [%d]\n", gpio);
+		return r;
+	}
+
+	r = gpio_direction_input(gpio);
+	if (r)
+		dev_err(dev, "unable to set direction for gpio [%d]\n", gpio);
+
+	return r;
+}
+
+static int nqx_probe(struct i2c_client *client,
+			const struct i2c_device_id *id)
+{
+	int r = 0;
+	int irqn = 0;
+	struct nqx_platform_data *platform_data;
+	struct nqx_dev *nqx_dev;
+
+	dev_dbg(&client->dev, "%s: enter\n", __func__);
+	if (client->dev.of_node) {
+		platform_data = devm_kzalloc(&client->dev,
+			sizeof(struct nqx_platform_data), GFP_KERNEL);
+		if (!platform_data) {
+			r = -ENOMEM;
+			goto err_platform_data;
+		}
+		r = nfc_parse_dt(&client->dev, platform_data);
+		if (r)
+			goto err_free_data;
+	} else
+		platform_data = client->dev.platform_data;
+
+	dev_dbg(&client->dev,
+		"%s, inside nfc-nci flags = %x\n",
+		__func__, client->flags);
+
+	if (platform_data == NULL) {
+		dev_err(&client->dev, "%s: failed\n", __func__);
+		r = -ENODEV;
+		goto err_platform_data;
+	}
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+		dev_err(&client->dev, "%s: need I2C_FUNC_I2C\n", __func__);
+		r = -ENODEV;
+		goto err_free_data;
+	}
+	nqx_dev = kzalloc(sizeof(*nqx_dev), GFP_KERNEL);
+	if (nqx_dev == NULL) {
+		r = -ENOMEM;
+		goto err_free_data;
+	}
+	nqx_dev->client = client;
+	nqx_dev->kbuflen = MAX_BUFFER_SIZE;
+	nqx_dev->kbuf = kzalloc(MAX_BUFFER_SIZE, GFP_KERNEL);
+	if (!nqx_dev->kbuf) {
+		dev_err(&client->dev,
+			"failed to allocate memory for nqx_dev->kbuf\n");
+		r = -ENOMEM;
+		goto err_free_dev;
+	}
+
+	if (gpio_is_valid(platform_data->en_gpio)) {
+		r = gpio_request(platform_data->en_gpio, "nfc_reset_gpio");
+		if (r) {
+			dev_err(&client->dev,
+			"%s: unable to request nfc reset gpio [%d]\n",
+				__func__,
+				platform_data->en_gpio);
+			goto err_mem;
+		}
+		r = gpio_direction_output(platform_data->en_gpio, 0);
+		if (r) {
+			dev_err(&client->dev,
+				"%s: unable to set direction for nfc reset gpio [%d]\n",
+					__func__,
+					platform_data->en_gpio);
+			goto err_en_gpio;
+		}
+	} else {
+		dev_err(&client->dev,
+		"%s: nfc reset gpio not provided\n", __func__);
+		goto err_mem;
+	}
+
+	if (gpio_is_valid(platform_data->irq_gpio)) {
+		r = gpio_request(platform_data->irq_gpio, "nfc_irq_gpio");
+		if (r) {
+			dev_err(&client->dev, "%s: unable to request nfc irq gpio [%d]\n",
+				__func__, platform_data->irq_gpio);
+			goto err_en_gpio;
+		}
+		r = gpio_direction_input(platform_data->irq_gpio);
+		if (r) {
+			dev_err(&client->dev,
+			"%s: unable to set direction for nfc irq gpio [%d]\n",
+				__func__,
+				platform_data->irq_gpio);
+			goto err_irq_gpio;
+		}
+		irqn = gpio_to_irq(platform_data->irq_gpio);
+		if (irqn < 0) {
+			r = irqn;
+			goto err_irq_gpio;
+		}
+		client->irq = irqn;
+	} else {
+		dev_err(&client->dev, "%s: irq gpio not provided\n", __func__);
+		goto err_en_gpio;
+	}
+	if (gpio_is_valid(platform_data->firm_gpio)) {
+		r = gpio_request(platform_data->firm_gpio,
+			"nfc_firm_gpio");
+		if (r) {
+			dev_err(&client->dev,
+				"%s: unable to request nfc firmware gpio [%d]\n",
+				__func__, platform_data->firm_gpio);
+			goto err_irq_gpio;
+		}
+		r = gpio_direction_output(platform_data->firm_gpio, 0);
+		if (r) {
+			dev_err(&client->dev,
+			"%s: cannot set direction for nfc firmware gpio [%d]\n",
+			__func__, platform_data->firm_gpio);
+			goto err_firm_gpio;
+		}
+	} else {
+		dev_err(&client->dev,
+			"%s: firm gpio not provided\n", __func__);
+		goto err_irq_gpio;
+	}
+	if (gpio_is_valid(platform_data->ese_gpio)) {
+		r = gpio_request(platform_data->ese_gpio,
+				"nfc-ese_pwr");
+		if (r) {
+			nqx_dev->ese_gpio = -EINVAL;
+			dev_err(&client->dev,
+				"%s: unable to request nfc ese gpio [%d]\n",
+					__func__, platform_data->ese_gpio);
+			/* ese gpio optional so we should continue */
+		} else {
+			nqx_dev->ese_gpio = platform_data->ese_gpio;
+			r = gpio_direction_output(platform_data->ese_gpio, 0);
+			if (r) {
+				/*
+				 * free ese gpio and set invalid
+				 * to avoid further use
+				 */
+				gpio_free(platform_data->ese_gpio);
+				nqx_dev->ese_gpio = -EINVAL;
+				dev_err(&client->dev,
+					"%s: cannot set direction for nfc ese gpio [%d]\n",
+					__func__, platform_data->ese_gpio);
+				/* ese gpio optional so we should continue */
+			}
+		}
+	} else {
+		nqx_dev->ese_gpio = -EINVAL;
+		dev_err(&client->dev,
+			"%s: ese gpio not provided\n", __func__);
+		/* ese gpio optional so we should continue */
+	}
+	if (gpio_is_valid(platform_data->clkreq_gpio)) {
+		r = gpio_request(platform_data->clkreq_gpio,
+			"nfc_clkreq_gpio");
+		if (r) {
+			dev_err(&client->dev,
+				"%s: unable to request nfc clkreq gpio [%d]\n",
+				__func__, platform_data->clkreq_gpio);
+			goto err_ese_gpio;
+		}
+		r = gpio_direction_input(platform_data->clkreq_gpio);
+		if (r) {
+			dev_err(&client->dev,
+			"%s: cannot set direction for nfc clkreq gpio [%d]\n",
+			__func__, platform_data->clkreq_gpio);
+			goto err_clkreq_gpio;
+		}
+	} else {
+		dev_err(&client->dev,
+			"%s: clkreq gpio not provided\n", __func__);
+		goto err_ese_gpio;
+	}
+
+	nqx_dev->en_gpio = platform_data->en_gpio;
+	nqx_dev->irq_gpio = platform_data->irq_gpio;
+	nqx_dev->firm_gpio  = platform_data->firm_gpio;
+	nqx_dev->clkreq_gpio = platform_data->clkreq_gpio;
+	nqx_dev->pdata = platform_data;
+
+	/* init mutex and queues */
+	init_waitqueue_head(&nqx_dev->read_wq);
+	mutex_init(&nqx_dev->read_mutex);
+	spin_lock_init(&nqx_dev->irq_enabled_lock);
+
+	nqx_dev->nqx_device.minor = MISC_DYNAMIC_MINOR;
+	nqx_dev->nqx_device.name = "nq-nci";
+	nqx_dev->nqx_device.fops = &nfc_dev_fops;
+
+	r = misc_register(&nqx_dev->nqx_device);
+	if (r) {
+		dev_err(&client->dev, "%s: misc_register failed\n", __func__);
+		goto err_misc_register;
+	}
+
+	/* NFC_INT IRQ */
+	nqx_dev->irq_enabled = true;
+	r = request_irq(client->irq, nqx_dev_irq_handler,
+			  IRQF_TRIGGER_HIGH, client->name, nqx_dev);
+	if (r) {
+		dev_err(&client->dev, "%s: request_irq failed\n", __func__);
+		goto err_request_irq_failed;
+	}
+	nqx_disable_irq(nqx_dev);
+
+	/*
+	 * To be efficient we need to test whether nfcc hardware is physically
+	 * present before attempting further hardware initialisation.
+	 *
+	 */
+	r = nfcc_hw_check(client, nqx_dev);
+	if (r) {
+		/* make sure NFCC is not enabled */
+		gpio_set_value(platform_data->en_gpio, 0);
+		/* We don't think there is hardware switch NFC OFF */
+		goto err_request_hw_check_failed;
+	}
+
+	/* Register reboot notifier here */
+	r = register_reboot_notifier(&nfcc_notifier);
+	if (r) {
+		dev_err(&client->dev,
+			"%s: cannot register reboot notifier(err = %d)\n",
+			__func__, r);
+		/*
+		 * nfcc_hw_check function not doing memory
+		 * allocation so using same goto target here
+		 */
+		goto err_request_hw_check_failed;
+	}
+
+#ifdef NFC_KERNEL_BU
+	r = nqx_clock_select(nqx_dev);
+	if (r < 0) {
+		dev_err(&client->dev,
+			"%s: nqx_clock_select failed\n", __func__);
+		goto err_clock_en_failed;
+	}
+	gpio_set_value(platform_data->en_gpio, 1);
+#endif
+	device_init_wakeup(&client->dev, true);
+	device_set_wakeup_capable(&client->dev, true);
+	i2c_set_clientdata(client, nqx_dev);
+	nqx_dev->irq_wake_up = false;
+
+	dev_err(&client->dev,
+	"%s: probing NFCC NQxxx exited successfully\n",
+		 __func__);
+	return 0;
+
+#ifdef NFC_KERNEL_BU
+err_clock_en_failed:
+	unregister_reboot_notifier(&nfcc_notifier);
+#endif
+err_request_hw_check_failed:
+	free_irq(client->irq, nqx_dev);
+err_request_irq_failed:
+	misc_deregister(&nqx_dev->nqx_device);
+err_misc_register:
+	mutex_destroy(&nqx_dev->read_mutex);
+err_clkreq_gpio:
+	gpio_free(platform_data->clkreq_gpio);
+err_ese_gpio:
+	/* optional gpio, not sure was configured in probe */
+	if (nqx_dev->ese_gpio > 0)
+		gpio_free(platform_data->ese_gpio);
+err_firm_gpio:
+	gpio_free(platform_data->firm_gpio);
+err_irq_gpio:
+	gpio_free(platform_data->irq_gpio);
+err_en_gpio:
+	gpio_free(platform_data->en_gpio);
+err_mem:
+	kfree(nqx_dev->kbuf);
+err_free_dev:
+	kfree(nqx_dev);
+err_free_data:
+	if (client->dev.of_node)
+		devm_kfree(&client->dev, platform_data);
+err_platform_data:
+	dev_err(&client->dev,
+	"%s: probing nqxx failed, check hardware\n",
+		 __func__);
+	return r;
+}
+
+static int nqx_remove(struct i2c_client *client)
+{
+	int ret = 0;
+	struct nqx_dev *nqx_dev;
+
+	nqx_dev = i2c_get_clientdata(client);
+	if (!nqx_dev) {
+		dev_err(&client->dev,
+		"%s: device doesn't exist anymore\n", __func__);
+		ret = -ENODEV;
+		goto err;
+	}
+
+	unregister_reboot_notifier(&nfcc_notifier);
+	free_irq(client->irq, nqx_dev);
+	misc_deregister(&nqx_dev->nqx_device);
+	mutex_destroy(&nqx_dev->read_mutex);
+	gpio_free(nqx_dev->clkreq_gpio);
+	/* optional gpio, not sure was configured in probe */
+	if (nqx_dev->ese_gpio > 0)
+		gpio_free(nqx_dev->ese_gpio);
+	gpio_free(nqx_dev->firm_gpio);
+	gpio_free(nqx_dev->irq_gpio);
+	gpio_free(nqx_dev->en_gpio);
+	kfree(nqx_dev->kbuf);
+	if (client->dev.of_node)
+		devm_kfree(&client->dev, nqx_dev->pdata);
+
+	kfree(nqx_dev);
+err:
+	return ret;
+}
+
+static int nqx_suspend(struct device *device)
+{
+	struct i2c_client *client = to_i2c_client(device);
+	struct nqx_dev *nqx_dev = i2c_get_clientdata(client);
+
+	if (device_may_wakeup(&client->dev) && nqx_dev->irq_enabled) {
+		if (!enable_irq_wake(client->irq))
+			nqx_dev->irq_wake_up = true;
+	}
+	return 0;
+}
+
+static int nqx_resume(struct device *device)
+{
+	struct i2c_client *client = to_i2c_client(device);
+	struct nqx_dev *nqx_dev = i2c_get_clientdata(client);
+
+	if (device_may_wakeup(&client->dev) && nqx_dev->irq_wake_up) {
+		if (!disable_irq_wake(client->irq))
+			nqx_dev->irq_wake_up = false;
+	}
+	return 0;
+}
+
+static const struct i2c_device_id nqx_id[] = {
+	{"nqx-i2c", 0},
+	{}
+};
+
+static const struct dev_pm_ops nfc_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(nqx_suspend, nqx_resume)
+};
+
+static struct i2c_driver nqx = {
+	.id_table = nqx_id,
+	.probe = nqx_probe,
+	.remove = nqx_remove,
+	.driver = {
+		.owner = THIS_MODULE,
+		.name = "nq-nci",
+		.of_match_table = msm_match_table,
+		.pm = &nfc_pm_ops,
+	},
+};
+
+static int nfcc_reboot(struct notifier_block *notifier, unsigned long val,
+			  void *v)
+{
+	gpio_set_value(disable_ctrl, 1);
+	return NOTIFY_OK;
+}
+
+/*
+ * module load/unload record keeping
+ */
+static int __init nqx_dev_init(void)
+{
+	return i2c_add_driver(&nqx);
+}
+module_init(nqx_dev_init);
+
+static void __exit nqx_dev_exit(void)
+{
+	unregister_reboot_notifier(&nfcc_notifier);
+	i2c_del_driver(&nqx);
+}
+module_exit(nqx_dev_exit);
+
+MODULE_DESCRIPTION("NFC nqx");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nfc/nq-nci.h b/drivers/nfc/nq-nci.h
new file mode 100644
index 0000000..87715c2
--- /dev/null
+++ b/drivers/nfc/nq-nci.h
@@ -0,0 +1,54 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __NQ_NCI_H
+#define __NQ_NCI_H
+
+#include <linux/i2c.h>
+#include <linux/types.h>
+#include <linux/version.h>
+
+#include <linux/semaphore.h>
+#include <linux/completion.h>
+
+#include <linux/ioctl.h>
+#include <linux/miscdevice.h>
+#include <linux/nfcinfo.h>
+
+#define NFC_SET_PWR			_IOW(0xE9, 0x01, unsigned int)
+#define ESE_SET_PWR			_IOW(0xE9, 0x02, unsigned int)
+#define ESE_GET_PWR			_IOR(0xE9, 0x03, unsigned int)
+#define SET_RX_BLOCK			_IOW(0xE9, 0x04, unsigned int)
+#define SET_EMULATOR_TEST_POINT		_IOW(0xE9, 0x05, unsigned int)
+#define NFCC_INITIAL_CORE_RESET_NTF	_IOW(0xE9, 0x10, unsigned int)
+
+#define NFC_RX_BUFFER_CNT_START		(0x0)
+#define PAYLOAD_HEADER_LENGTH		(0x3)
+#define PAYLOAD_LENGTH_MAX		(256)
+#define BYTE				(0x8)
+#define NCI_IDENTIFIER			(0x10)
+
+enum nfcc_initial_core_reset_ntf {
+	TIMEDOUT_INITIAL_CORE_RESET_NTF = 0, /* 0*/
+	ARRIVED_INITIAL_CORE_RESET_NTF, /* 1 */
+	DEFAULT_INITIAL_CORE_RESET_NTF, /*2*/
+};
+
+enum nfcc_chip_variant {
+	NFCC_NQ_210			= 0x48,	/**< NFCC NQ210 */
+	NFCC_NQ_220			= 0x58,	/**< NFCC NQ220 */
+	NFCC_NQ_310			= 0x40,	/**< NFCC NQ310 */
+	NFCC_NQ_330			= 0x51,	/**< NFCC NQ330 */
+	NFCC_PN66T			= 0x18,	/**< NFCC PN66T */
+	NFCC_NOT_SUPPORTED	        = 0xFF	/**< NFCC is not supported */
+};
+#endif
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 351bac8..0392eb8 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -218,7 +218,10 @@
 	if (cmd_rc < 0)
 		return cmd_rc;
 
-	nvdimm_clear_from_poison_list(nvdimm_bus, phys, len);
+	if (clear_err.cleared > 0)
+		nvdimm_clear_from_poison_list(nvdimm_bus, phys,
+					      clear_err.cleared);
+
 	return clear_err.cleared;
 }
 EXPORT_SYMBOL_GPL(nvdimm_clear_poison);
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 8a9c186..14eac73 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1384,6 +1384,11 @@
 	if (dev->cmb) {
 		iounmap(dev->cmb);
 		dev->cmb = NULL;
+		if (dev->cmbsz) {
+			sysfs_remove_file_from_group(&dev->ctrl.device->kobj,
+						     &dev_attr_cmb.attr, NULL);
+			dev->cmbsz = 0;
+		}
 	}
 }
 
@@ -1655,6 +1660,7 @@
 {
 	struct pci_dev *pdev = to_pci_dev(dev->dev);
 
+	nvme_release_cmb(dev);
 	pci_free_irq_vectors(pdev);
 
 	if (pci_is_enabled(pdev)) {
@@ -1993,7 +1999,6 @@
 	nvme_dev_disable(dev, true);
 	nvme_dev_remove_admin(dev);
 	nvme_free_queues(dev, 0);
-	nvme_release_cmb(dev);
 	nvme_release_prp_pools(dev);
 	nvme_dev_unmap(dev);
 	nvme_put_ctrl(&dev->ctrl);
diff --git a/drivers/of/address.c b/drivers/of/address.c
index 3723f57..2c1b08a 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -263,7 +263,7 @@
 	if (!parser->range || parser->range + parser->np > parser->end)
 		return NULL;
 
-	range->pci_space = parser->range[0];
+	range->pci_space = be32_to_cpup(parser->range);
 	range->flags = of_bus_pci_get_flags(parser->range);
 	range->pci_addr = of_read_number(parser->range + 1, ns);
 	range->cpu_addr = of_translate_address(parser->node,
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 8668808..66af185 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -505,6 +505,9 @@
 
 	/* Allocate memory for the expanded device tree */
 	mem = dt_alloc(size + 4, __alignof__(struct device_node));
+	if (!mem)
+		return NULL;
+
 	memset(mem, 0, size);
 
 	*(__be32 *)(mem + size) = cpu_to_be32(0xdeadbeef);
diff --git a/drivers/of/of_numa.c b/drivers/of/of_numa.c
index a53982a..2db1f7a 100644
--- a/drivers/of/of_numa.c
+++ b/drivers/of/of_numa.c
@@ -57,6 +57,8 @@
 		else
 			node_set(nid, numa_nodes_parsed);
 	}
+
+	of_node_put(cpus);
 }
 
 static int __init of_numa_parse_memory_nodes(void)
diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c
index 61fc349..dafb4cd 100644
--- a/drivers/pci/host/pci-hyperv.c
+++ b/drivers/pci/host/pci-hyperv.c
@@ -72,6 +72,7 @@
 	PCI_PROTOCOL_VERSION_CURRENT = PCI_PROTOCOL_VERSION_1_1
 };
 
+#define CPU_AFFINITY_ALL	-1ULL
 #define PCI_CONFIG_MMIO_LENGTH	0x2000
 #define CFG_PAGE_OFFSET 0x1000
 #define CFG_PAGE_SIZE (PCI_CONFIG_MMIO_LENGTH - CFG_PAGE_OFFSET)
@@ -868,7 +869,7 @@
 		hv_int_desc_free(hpdev, int_desc);
 	}
 
-	int_desc = kzalloc(sizeof(*int_desc), GFP_KERNEL);
+	int_desc = kzalloc(sizeof(*int_desc), GFP_ATOMIC);
 	if (!int_desc)
 		goto drop_reference;
 
@@ -889,9 +890,13 @@
 	 * processors because Hyper-V only supports 64 in a guest.
 	 */
 	affinity = irq_data_get_affinity_mask(data);
-	for_each_cpu_and(cpu, affinity, cpu_online_mask) {
-		int_pkt->int_desc.cpu_mask |=
-			(1ULL << vmbus_cpu_number_to_vp_number(cpu));
+	if (cpumask_weight(affinity) >= 32) {
+		int_pkt->int_desc.cpu_mask = CPU_AFFINITY_ALL;
+	} else {
+		for_each_cpu_and(cpu, affinity, cpu_online_mask) {
+			int_pkt->int_desc.cpu_mask |=
+				(1ULL << vmbus_cpu_number_to_vp_number(cpu));
+		}
 	}
 
 	ret = vmbus_sendpacket(hpdev->hbus->hdev->channel, int_pkt,
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index bcd10c7..1b07865 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -974,15 +974,19 @@
 int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma,
 		  enum pci_mmap_api mmap_api)
 {
-	unsigned long nr, start, size, pci_start;
+	unsigned long nr, start, size;
+	resource_size_t pci_start = 0, pci_end;
 
 	if (pci_resource_len(pdev, resno) == 0)
 		return 0;
 	nr = vma_pages(vma);
 	start = vma->vm_pgoff;
 	size = ((pci_resource_len(pdev, resno) - 1) >> PAGE_SHIFT) + 1;
-	pci_start = (mmap_api == PCI_MMAP_PROCFS) ?
-			pci_resource_start(pdev, resno) >> PAGE_SHIFT : 0;
+	if (mmap_api == PCI_MMAP_PROCFS) {
+		pci_resource_to_user(pdev, resno, &pdev->resource[resno],
+				     &pci_start, &pci_end);
+		pci_start >>= PAGE_SHIFT;
+	}
 	if (start >= pci_start && start < pci_start + size &&
 			start + nr <= pci_start + size)
 		return 1;
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 6922964..579c494 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1782,8 +1782,8 @@
 		}
 	}
 	if (!list_empty(&pci_pme_list))
-		schedule_delayed_work(&pci_pme_work,
-				      msecs_to_jiffies(PME_TIMEOUT));
+		queue_delayed_work(system_freezable_wq, &pci_pme_work,
+				   msecs_to_jiffies(PME_TIMEOUT));
 	mutex_unlock(&pci_pme_list_mutex);
 }
 
@@ -1848,8 +1848,9 @@
 			mutex_lock(&pci_pme_list_mutex);
 			list_add(&pme_dev->list, &pci_pme_list);
 			if (list_is_singular(&pci_pme_list))
-				schedule_delayed_work(&pci_pme_work,
-						      msecs_to_jiffies(PME_TIMEOUT));
+				queue_delayed_work(system_freezable_wq,
+						   &pci_pme_work,
+						   msecs_to_jiffies(PME_TIMEOUT));
 			mutex_unlock(&pci_pme_list_mutex);
 		} else {
 			mutex_lock(&pci_pme_list_mutex);
diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
index 2408abe..66c8863 100644
--- a/drivers/pci/proc.c
+++ b/drivers/pci/proc.c
@@ -231,24 +231,33 @@
 {
 	struct pci_dev *dev = PDE_DATA(file_inode(file));
 	struct pci_filp_private *fpriv = file->private_data;
-	int i, ret, write_combine;
+	int i, ret, write_combine = 0, res_bit;
 
 	if (!capable(CAP_SYS_RAWIO))
 		return -EPERM;
 
+	if (fpriv->mmap_state == pci_mmap_io)
+		res_bit = IORESOURCE_IO;
+	else
+		res_bit = IORESOURCE_MEM;
+
 	/* Make sure the caller is mapping a real resource for this device */
 	for (i = 0; i < PCI_ROM_RESOURCE; i++) {
-		if (pci_mmap_fits(dev, i, vma,  PCI_MMAP_PROCFS))
+		if (dev->resource[i].flags & res_bit &&
+		    pci_mmap_fits(dev, i, vma,  PCI_MMAP_PROCFS))
 			break;
 	}
 
 	if (i >= PCI_ROM_RESOURCE)
 		return -ENODEV;
 
-	if (fpriv->mmap_state == pci_mmap_mem)
-		write_combine = fpriv->write_combine;
-	else
-		write_combine = 0;
+	if (fpriv->mmap_state == pci_mmap_mem &&
+	    fpriv->write_combine) {
+		if (dev->resource[i].flags & IORESOURCE_PREFETCH)
+			write_combine = 1;
+		else
+			return -EINVAL;
+	}
 	ret = pci_mmap_page_range(dev, vma,
 				  fpriv->mmap_state, write_combine);
 	if (ret < 0)
diff --git a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
index 4672233..bcd602c 100644
--- a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
@@ -80,6 +80,7 @@
 
 u32 apps_to_ipa_hdl, ipa_to_apps_hdl; /* get handler from ipa */
 static struct mutex ipa_to_apps_pipe_handle_guard;
+static struct mutex add_mux_channel_lock;
 static int wwan_add_ul_flt_rule_to_ipa(void);
 static int wwan_del_ul_flt_rule_to_ipa(void);
 static void ipa_wwan_msg_free_cb(void*, u32, u32);
@@ -1527,9 +1528,11 @@
 					rmnet_mux_val.mux_id);
 				return rc;
 			}
+			mutex_lock(&add_mux_channel_lock);
 			if (rmnet_index >= MAX_NUM_OF_MUX_CHANNEL) {
 				IPAWANERR("Exceed mux_channel limit(%d)\n",
 				rmnet_index);
+				mutex_unlock(&add_mux_channel_lock);
 				return -EFAULT;
 			}
 			IPAWANDBG("ADD_MUX_CHANNEL(%d, name: %s)\n",
@@ -1558,6 +1561,7 @@
 					IPAWANERR("device %s reg IPA failed\n",
 						extend_ioctl_data.u.
 						rmnet_mux_val.vchannel_name);
+					mutex_unlock(&add_mux_channel_lock);
 					return -ENODEV;
 				}
 				mux_channel[rmnet_index].mux_channel_set = true;
@@ -1570,6 +1574,7 @@
 				mux_channel[rmnet_index].ul_flt_reg = false;
 			}
 			rmnet_index++;
+			mutex_unlock(&add_mux_channel_lock);
 			break;
 		case RMNET_IOCTL_SET_EGRESS_DATA_FORMAT:
 			IPAWANDBG("get RMNET_IOCTL_SET_EGRESS_DATA_FORMAT\n");
@@ -3084,6 +3089,7 @@
 	atomic_set(&is_ssr, 0);
 
 	mutex_init(&ipa_to_apps_pipe_handle_guard);
+	mutex_init(&add_mux_channel_lock);
 	ipa_to_apps_hdl = -1;
 
 	ipa_qmi_init();
@@ -3103,6 +3109,7 @@
 
 	ipa_qmi_cleanup();
 	mutex_destroy(&ipa_to_apps_pipe_handle_guard);
+	mutex_destroy(&add_mux_channel_lock);
 	ret = subsys_notif_unregister_notifier(subsys_notify_handle,
 					&ssr_notifier);
 	if (ret)
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
index a15bd04..b198348 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
@@ -141,6 +141,7 @@
 	u32 apps_to_ipa3_hdl;
 	u32 ipa3_to_apps_hdl;
 	struct mutex pipe_handle_guard;
+	struct mutex add_mux_channel_lock;
 };
 
 static struct rmnet_ipa3_context *rmnet_ipa3_ctx;
@@ -1636,10 +1637,13 @@
 					rmnet_mux_val.mux_id);
 				return rc;
 			}
+			mutex_lock(&rmnet_ipa3_ctx->add_mux_channel_lock);
 			if (rmnet_ipa3_ctx->rmnet_index
 				>= MAX_NUM_OF_MUX_CHANNEL) {
 				IPAWANERR("Exceed mux_channel limit(%d)\n",
 				rmnet_ipa3_ctx->rmnet_index);
+				mutex_unlock(&rmnet_ipa3_ctx->
+					add_mux_channel_lock);
 				return -EFAULT;
 			}
 			IPAWANDBG("ADD_MUX_CHANNEL(%d, name: %s)\n",
@@ -1673,6 +1677,8 @@
 					IPAWANERR("device %s reg IPA failed\n",
 						extend_ioctl_data.u.
 						rmnet_mux_val.vchannel_name);
+					mutex_unlock(&rmnet_ipa3_ctx->
+						add_mux_channel_lock);
 					return -ENODEV;
 				}
 				mux_channel[rmnet_index].mux_channel_set = true;
@@ -1685,6 +1691,7 @@
 				mux_channel[rmnet_index].ul_flt_reg = false;
 			}
 			rmnet_ipa3_ctx->rmnet_index++;
+			mutex_unlock(&rmnet_ipa3_ctx->add_mux_channel_lock);
 			break;
 		case RMNET_IOCTL_SET_EGRESS_DATA_FORMAT:
 			rc = handle3_egress_format(dev, &extend_ioctl_data);
@@ -3204,6 +3211,7 @@
 	atomic_set(&rmnet_ipa3_ctx->is_ssr, 0);
 
 	mutex_init(&rmnet_ipa3_ctx->pipe_handle_guard);
+	mutex_init(&rmnet_ipa3_ctx->add_mux_channel_lock);
 	rmnet_ipa3_ctx->ipa3_to_apps_hdl = -1;
 	rmnet_ipa3_ctx->apps_to_ipa3_hdl = -1;
 
@@ -3222,8 +3230,10 @@
 static void __exit ipa3_wwan_cleanup(void)
 {
 	int ret;
+
 	ipa3_qmi_cleanup();
 	mutex_destroy(&rmnet_ipa3_ctx->pipe_handle_guard);
+	mutex_destroy(&rmnet_ipa3_ctx->add_mux_channel_lock);
 	ret = subsys_notif_unregister_notifier(
 		rmnet_ipa3_ctx->subsys_notify_handle, &ipa3_ssr_notifier);
 	if (ret)
diff --git a/drivers/regulator/rk808-regulator.c b/drivers/regulator/rk808-regulator.c
index 3314bf2..dfa8d50 100644
--- a/drivers/regulator/rk808-regulator.c
+++ b/drivers/regulator/rk808-regulator.c
@@ -520,7 +520,7 @@
 		RK818_LDO1_ON_VSEL_REG, RK818_LDO_VSEL_MASK, RK818_LDO_EN_REG,
 		BIT(0), 400),
 	RK8XX_DESC(RK818_ID_LDO2, "LDO_REG2", "vcc6", 1800, 3400, 100,
-		RK818_LDO1_ON_VSEL_REG, RK818_LDO_VSEL_MASK, RK818_LDO_EN_REG,
+		RK818_LDO2_ON_VSEL_REG, RK818_LDO_VSEL_MASK, RK818_LDO_EN_REG,
 		BIT(1), 400),
 	{
 		.name = "LDO_REG3",
diff --git a/drivers/regulator/tps65023-regulator.c b/drivers/regulator/tps65023-regulator.c
index d2c3d7c..5ca6d21 100644
--- a/drivers/regulator/tps65023-regulator.c
+++ b/drivers/regulator/tps65023-regulator.c
@@ -311,8 +311,7 @@
 
 	/* Enable setting output voltage by I2C */
 	regmap_update_bits(tps->regmap, TPS65023_REG_CON_CTRL2,
-					TPS65023_REG_CTRL2_CORE_ADJ,
-					TPS65023_REG_CTRL2_CORE_ADJ);
+			   TPS65023_REG_CTRL2_CORE_ADJ, 0);
 
 	return 0;
 }
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index c4fe95a..904422f 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -1169,6 +1169,7 @@
 		cmd = list_first_entry_or_null(&vscsi->free_cmd,
 					       struct ibmvscsis_cmd, list);
 		if (cmd) {
+			cmd->flags &= ~(DELAY_SEND);
 			list_del(&cmd->list);
 			cmd->iue = iue;
 			cmd->type = UNSET_TYPE;
@@ -1748,45 +1749,79 @@
 static void ibmvscsis_send_messages(struct scsi_info *vscsi)
 {
 	u64 msg_hi = 0;
-	/* note do not attmempt to access the IU_data_ptr with this pointer
+	/* note do not attempt to access the IU_data_ptr with this pointer
 	 * it is not valid
 	 */
 	struct viosrp_crq *crq = (struct viosrp_crq *)&msg_hi;
 	struct ibmvscsis_cmd *cmd, *nxt;
 	struct iu_entry *iue;
 	long rc = ADAPT_SUCCESS;
+	bool retry = false;
 
 	if (!(vscsi->flags & RESPONSE_Q_DOWN)) {
-		list_for_each_entry_safe(cmd, nxt, &vscsi->waiting_rsp, list) {
-			iue = cmd->iue;
+		do {
+			retry = false;
+			list_for_each_entry_safe(cmd, nxt, &vscsi->waiting_rsp,
+						 list) {
+				/*
+				 * Check to make sure abort cmd gets processed
+				 * prior to the abort tmr cmd
+				 */
+				if (cmd->flags & DELAY_SEND)
+					continue;
 
-			crq->valid = VALID_CMD_RESP_EL;
-			crq->format = cmd->rsp.format;
+				if (cmd->abort_cmd) {
+					retry = true;
+					cmd->abort_cmd->flags &= ~(DELAY_SEND);
+				}
 
-			if (cmd->flags & CMD_FAST_FAIL)
-				crq->status = VIOSRP_ADAPTER_FAIL;
+				/*
+				 * If CMD_T_ABORTED w/o CMD_T_TAS scenarios and
+				 * the case where LIO issued a
+				 * ABORT_TASK: Sending TMR_TASK_DOES_NOT_EXIST
+				 * case then we dont send a response, since it
+				 * was already done.
+				 */
+				if (cmd->se_cmd.transport_state & CMD_T_ABORTED &&
+				    !(cmd->se_cmd.transport_state & CMD_T_TAS)) {
+					list_del(&cmd->list);
+					ibmvscsis_free_cmd_resources(vscsi,
+								     cmd);
+				} else {
+					iue = cmd->iue;
 
-			crq->IU_length = cpu_to_be16(cmd->rsp.len);
+					crq->valid = VALID_CMD_RESP_EL;
+					crq->format = cmd->rsp.format;
 
-			rc = h_send_crq(vscsi->dma_dev->unit_address,
-					be64_to_cpu(msg_hi),
-					be64_to_cpu(cmd->rsp.tag));
+					if (cmd->flags & CMD_FAST_FAIL)
+						crq->status = VIOSRP_ADAPTER_FAIL;
 
-			pr_debug("send_messages: cmd %p, tag 0x%llx, rc %ld\n",
-				 cmd, be64_to_cpu(cmd->rsp.tag), rc);
+					crq->IU_length = cpu_to_be16(cmd->rsp.len);
 
-			/* if all ok free up the command element resources */
-			if (rc == H_SUCCESS) {
-				/* some movement has occurred */
-				vscsi->rsp_q_timer.timer_pops = 0;
-				list_del(&cmd->list);
+					rc = h_send_crq(vscsi->dma_dev->unit_address,
+							be64_to_cpu(msg_hi),
+							be64_to_cpu(cmd->rsp.tag));
 
-				ibmvscsis_free_cmd_resources(vscsi, cmd);
-			} else {
-				srp_snd_msg_failed(vscsi, rc);
-				break;
+					pr_debug("send_messages: cmd %p, tag 0x%llx, rc %ld\n",
+						 cmd, be64_to_cpu(cmd->rsp.tag), rc);
+
+					/* if all ok free up the command
+					 * element resources
+					 */
+					if (rc == H_SUCCESS) {
+						/* some movement has occurred */
+						vscsi->rsp_q_timer.timer_pops = 0;
+						list_del(&cmd->list);
+
+						ibmvscsis_free_cmd_resources(vscsi,
+									     cmd);
+					} else {
+						srp_snd_msg_failed(vscsi, rc);
+						break;
+					}
+				}
 			}
-		}
+		} while (retry);
 
 		if (!rc) {
 			/*
@@ -2707,6 +2742,7 @@
 
 	for (i = 0, cmd = (struct ibmvscsis_cmd *)vscsi->cmd_pool; i < num;
 	     i++, cmd++) {
+		cmd->abort_cmd = NULL;
 		cmd->adapter = vscsi;
 		INIT_WORK(&cmd->work, ibmvscsis_scheduler);
 		list_add_tail(&cmd->list, &vscsi->free_cmd);
@@ -3578,9 +3614,20 @@
 {
 	struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd,
 						 se_cmd);
+	struct scsi_info *vscsi = cmd->adapter;
 	struct iu_entry *iue = cmd->iue;
 	int rc;
 
+	/*
+	 * If CLIENT_FAILED OR RESPONSE_Q_DOWN, then just return success
+	 * since LIO can't do anything about it, and we dont want to
+	 * attempt an srp_transfer_data.
+	 */
+	if ((vscsi->flags & (CLIENT_FAILED | RESPONSE_Q_DOWN))) {
+		pr_err("write_pending failed since: %d\n", vscsi->flags);
+		return 0;
+	}
+
 	rc = srp_transfer_data(cmd, &vio_iu(iue)->srp.cmd, ibmvscsis_rdma,
 			       1, 1);
 	if (rc) {
@@ -3659,11 +3706,28 @@
 	struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd,
 						 se_cmd);
 	struct scsi_info *vscsi = cmd->adapter;
+	struct ibmvscsis_cmd *cmd_itr;
+	struct iu_entry *iue = iue = cmd->iue;
+	struct srp_tsk_mgmt *srp_tsk = &vio_iu(iue)->srp.tsk_mgmt;
+	u64 tag_to_abort = be64_to_cpu(srp_tsk->task_tag);
 	uint len;
 
 	pr_debug("queue_tm_rsp %p, status %d\n",
 		 se_cmd, (int)se_cmd->se_tmr_req->response);
 
+	if (srp_tsk->tsk_mgmt_func == SRP_TSK_ABORT_TASK &&
+	    cmd->se_cmd.se_tmr_req->response == TMR_TASK_DOES_NOT_EXIST) {
+		spin_lock_bh(&vscsi->intr_lock);
+		list_for_each_entry(cmd_itr, &vscsi->active_q, list) {
+			if (tag_to_abort == cmd_itr->se_cmd.tag) {
+				cmd_itr->abort_cmd = cmd;
+				cmd->flags |= DELAY_SEND;
+				break;
+			}
+		}
+		spin_unlock_bh(&vscsi->intr_lock);
+	}
+
 	srp_build_response(vscsi, cmd, &len);
 	cmd->rsp.format = SRP_FORMAT;
 	cmd->rsp.len = len;
@@ -3671,8 +3735,8 @@
 
 static void ibmvscsis_aborted_task(struct se_cmd *se_cmd)
 {
-	/* TBD: What (if anything) should we do here? */
-	pr_debug("ibmvscsis_aborted_task %p\n", se_cmd);
+	pr_debug("ibmvscsis_aborted_task %p task_tag: %llu\n",
+		 se_cmd, se_cmd->tag);
 }
 
 static struct se_wwn *ibmvscsis_make_tport(struct target_fabric_configfs *tf,
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h
index 98b0ca7..f5683af 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h
@@ -167,10 +167,12 @@
 	struct iu_rsp rsp;
 	struct work_struct work;
 	struct scsi_info *adapter;
+	struct ibmvscsis_cmd *abort_cmd;
 	/* Sense buffer that will be mapped into outgoing status */
 	unsigned char sense_buf[TRANSPORT_SENSE_BUFFER];
 	u64 init_time;
 #define CMD_FAST_FAIL	BIT(0)
+#define DELAY_SEND	BIT(1)
 	u32 flags;
 	char type;
 };
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 2f6cd95..6418c11 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -1523,6 +1523,7 @@
 	}
 	spin_unlock_irqrestore(hba->host->host_lock, flags);
 out:
+	hba->ufs_stats.clk_hold.ts = ktime_get();
 	return rc;
 }
 EXPORT_SYMBOL_GPL(ufshcd_hold);
@@ -1627,6 +1628,7 @@
 
 	hba->clk_gating.state = REQ_CLKS_OFF;
 	trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
+	hba->ufs_stats.clk_rel.ts = ktime_get();
 
 	hrtimer_start(&hba->clk_gating.gate_hrtimer,
 			ms_to_ktime(hba->clk_gating.delay_ms),
@@ -2073,8 +2075,10 @@
 
 	/* Exit from hibern8 */
 	if (ufshcd_is_link_hibern8(hba)) {
+		hba->ufs_stats.clk_hold.ctx = H8_EXIT_WORK;
 		ufshcd_hold(hba, false);
 		ret = ufshcd_uic_hibern8_exit(hba);
+		hba->ufs_stats.clk_rel.ctx = H8_EXIT_WORK;
 		ufshcd_release(hba, false);
 		if (!ret) {
 			spin_lock_irqsave(hba->host->host_lock, flags);
@@ -2500,6 +2504,7 @@
 	int ret;
 	unsigned long flags;
 
+	hba->ufs_stats.clk_hold.ctx = UIC_CMD_SEND;
 	ufshcd_hold_all(hba);
 	mutex_lock(&hba->uic_cmd_mutex);
 	ufshcd_add_delay_before_dme_cmd(hba);
@@ -2513,6 +2518,7 @@
 	ufshcd_save_tstamp_of_last_dme_cmd(hba);
 	mutex_unlock(&hba->uic_cmd_mutex);
 	ufshcd_release_all(hba);
+	hba->ufs_stats.clk_rel.ctx = UIC_CMD_SEND;
 
 	ufsdbg_error_inject_dispatcher(hba,
 		ERR_INJECT_UIC, 0, &ret);
@@ -2999,6 +3005,7 @@
 		goto out;
 	}
 
+	hba->ufs_stats.clk_hold.ctx = QUEUE_CMD;
 	err = ufshcd_hold(hba, true);
 	if (err) {
 		err = SCSI_MLQUEUE_HOST_BUSY;
@@ -3013,6 +3020,7 @@
 	if (err) {
 		clear_bit_unlock(tag, &hba->lrb_in_use);
 		err = SCSI_MLQUEUE_HOST_BUSY;
+		hba->ufs_stats.clk_rel.ctx = QUEUE_CMD;
 		ufshcd_release(hba, true);
 		goto out;
 	}
@@ -4392,8 +4400,10 @@
 	uic_cmd.command = UIC_CMD_DME_SET;
 	uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
 	uic_cmd.argument3 = mode;
+	hba->ufs_stats.clk_hold.ctx = PWRCTL_CMD_SEND;
 	ufshcd_hold_all(hba);
 	ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
+	hba->ufs_stats.clk_rel.ctx = PWRCTL_CMD_SEND;
 	ufshcd_release_all(hba);
 out:
 	return ret;
@@ -5580,6 +5590,7 @@
 			update_req_stats(hba, lrbp);
 			/* Mark completed command as NULL in LRB */
 			lrbp->cmd = NULL;
+			hba->ufs_stats.clk_rel.ctx = XFR_REQ_COMPL;
 			__ufshcd_release(hba, false);
 			__ufshcd_hibern8_release(hba, false);
 			if (cmd->request) {
@@ -6101,6 +6112,7 @@
 	if (unlikely((hba->clk_gating.state != CLKS_ON) &&
 	    ufshcd_is_auto_hibern8_supported(hba))) {
 		spin_unlock_irqrestore(hba->host->host_lock, flags);
+		hba->ufs_stats.clk_hold.ctx = ERR_HNDLR_WORK;
 		ufshcd_hold(hba, false);
 		spin_lock_irqsave(hba->host->host_lock, flags);
 		clks_enabled = true;
@@ -6245,8 +6257,10 @@
 
 	hba->silence_err_logs = false;
 
-	if (clks_enabled)
+	if (clks_enabled) {
 		__ufshcd_release(hba, false);
+		hba->ufs_stats.clk_rel.ctx = ERR_HNDLR_WORK;
+	}
 out:
 	ufshcd_clear_eh_in_progress(hba);
 	spin_unlock_irqrestore(hba->host->host_lock, flags);
@@ -6482,7 +6496,8 @@
 
 	spin_lock(hba->host->host_lock);
 	intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
-
+	hba->ufs_stats.last_intr_status = intr_status;
+	hba->ufs_stats.last_intr_ts = ktime_get();
 	/*
 	 * There could be max of hba->nutrs reqs in flight and in worst case
 	 * if the reqs get finished 1 by 1 after the interrupt status is
@@ -6561,6 +6576,7 @@
 	 * the maximum wait time is bounded by %TM_CMD_TIMEOUT.
 	 */
 	wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot));
+	hba->ufs_stats.clk_hold.ctx = TM_CMD_SEND;
 	ufshcd_hold_all(hba);
 
 	spin_lock_irqsave(host->host_lock, flags);
@@ -6618,6 +6634,7 @@
 	clear_bit(free_slot, &hba->tm_condition);
 	ufshcd_put_tm_slot(hba, free_slot);
 	wake_up(&hba->tm_tag_wq);
+	hba->ufs_stats.clk_rel.ctx = TM_CMD_SEND;
 
 	ufshcd_release_all(hba);
 	return err;
@@ -9635,6 +9652,7 @@
 	int ret = 0;
 
 	/* let's not get into low power until clock scaling is completed */
+	hba->ufs_stats.clk_hold.ctx = CLK_SCALE_WORK;
 	ufshcd_hold_all(hba);
 
 	ret = ufshcd_clock_scaling_prepare(hba);
@@ -9698,6 +9716,7 @@
 clk_scaling_unprepare:
 	ufshcd_clock_scaling_unprepare(hba);
 out:
+	hba->ufs_stats.clk_rel.ctx = CLK_SCALE_WORK;
 	ufshcd_release_all(hba);
 	return ret;
 }
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 6966aac..77ccc39 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -584,6 +584,22 @@
 };
 #endif
 
+enum ufshcd_ctx {
+	QUEUE_CMD,
+	ERR_HNDLR_WORK,
+	H8_EXIT_WORK,
+	UIC_CMD_SEND,
+	PWRCTL_CMD_SEND,
+	TM_CMD_SEND,
+	XFR_REQ_COMPL,
+	CLK_SCALE_WORK,
+};
+
+struct ufshcd_clk_ctx {
+	ktime_t ts;
+	enum ufshcd_ctx ctx;
+};
+
 /**
  * struct ufs_stats - keeps usage/err statistics
  * @enabled: enable tag stats for debugfs
@@ -612,6 +628,10 @@
 	int query_stats_arr[UPIU_QUERY_OPCODE_MAX][MAX_QUERY_IDN];
 
 #endif
+	u32 last_intr_status;
+	ktime_t last_intr_ts;
+	struct ufshcd_clk_ctx clk_hold;
+	struct ufshcd_clk_ctx clk_rel;
 	u32 hibern8_exit_cnt;
 	ktime_t last_hibern8_exit_tstamp;
 	struct ufs_uic_err_reg_hist pa_err;
diff --git a/drivers/soc/qcom/early_random.c b/drivers/soc/qcom/early_random.c
index 0c562ec..5156bc1 100644
--- a/drivers/soc/qcom/early_random.c
+++ b/drivers/soc/qcom/early_random.c
@@ -1,4 +1,5 @@
-/* Copyright (c) 2013-2014, 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2014, 2016-2017, The Linux Foundation. All rights
+ * reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -12,7 +13,7 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/random.h>
+#include <linux/hw_random.h>
 #include <linux/io.h>
 
 #include <soc/qcom/scm.h>
@@ -57,7 +58,7 @@
 	if (!ret) {
 		dmac_inv_range(random_buffer, random_buffer +
 						RANDOM_BUFFER_SIZE);
-		add_device_randomness(random_buffer, SZ_512);
+		add_hwgenerator_randomness(random_buffer, SZ_512, SZ_512 << 3);
 	}
 }
 
diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c
index 69e0ebc..e3b5826 100644
--- a/drivers/soc/qcom/icnss.c
+++ b/drivers/soc/qcom/icnss.c
@@ -691,6 +691,8 @@
 		goto out;
 	}
 
+	memset(&ind_msg, 0, sizeof(ind_msg));
+
 	ind_desc.msg_id = QMI_WLFW_PIN_CONNECT_RESULT_IND_V01;
 	ind_desc.max_msg_len = WLFW_PIN_CONNECT_RESULT_IND_MSG_V01_MAX_MSG_LEN;
 	ind_desc.ei_array = wlfw_pin_connect_result_ind_msg_v01_ei;
@@ -3969,6 +3971,9 @@
 	    data_len > QMI_WLFW_MAX_DATA_SIZE_V01)
 		return -EINVAL;
 
+	kfree(priv->diag_reg_read_buf);
+	priv->diag_reg_read_buf = NULL;
+
 	reg_buf = kzalloc(data_len, GFP_KERNEL);
 	if (!reg_buf)
 		return -ENOMEM;
@@ -4002,12 +4007,13 @@
 	.llseek         = seq_lseek,
 };
 
+#ifdef CONFIG_ICNSS_DEBUG
 static int icnss_debugfs_create(struct icnss_priv *priv)
 {
 	int ret = 0;
 	struct dentry *root_dentry;
 
-	root_dentry = debugfs_create_dir("icnss", 0);
+	root_dentry = debugfs_create_dir("icnss", NULL);
 
 	if (IS_ERR(root_dentry)) {
 		ret = PTR_ERR(root_dentry);
@@ -4017,19 +4023,40 @@
 
 	priv->root_dentry = root_dentry;
 
-	debugfs_create_file("fw_debug", 0644, root_dentry, priv,
+	debugfs_create_file("fw_debug", 0600, root_dentry, priv,
 			    &icnss_fw_debug_fops);
 
-	debugfs_create_file("stats", 0644, root_dentry, priv,
+	debugfs_create_file("stats", 0600, root_dentry, priv,
 			    &icnss_stats_fops);
 	debugfs_create_file("reg_read", 0600, root_dentry, priv,
 			    &icnss_regread_fops);
-	debugfs_create_file("reg_write", 0644, root_dentry, priv,
+	debugfs_create_file("reg_write", 0600, root_dentry, priv,
 			    &icnss_regwrite_fops);
 
 out:
 	return ret;
 }
+#else
+static int icnss_debugfs_create(struct icnss_priv *priv)
+{
+	int ret = 0;
+	struct dentry *root_dentry;
+
+	root_dentry = debugfs_create_dir("icnss", NULL);
+
+	if (IS_ERR(root_dentry)) {
+		ret = PTR_ERR(root_dentry);
+		icnss_pr_err("Unable to create debugfs %d\n", ret);
+		return ret;
+	}
+
+	priv->root_dentry = root_dentry;
+
+	debugfs_create_file("stats", 0600, root_dentry, priv,
+			    &icnss_stats_fops);
+	return 0;
+}
+#endif
 
 static void icnss_debugfs_destroy(struct icnss_priv *priv)
 {
diff --git a/drivers/soc/qcom/subsystem_restart.c b/drivers/soc/qcom/subsystem_restart.c
index e7c2bb2..21f3580 100644
--- a/drivers/soc/qcom/subsystem_restart.c
+++ b/drivers/soc/qcom/subsystem_restart.c
@@ -481,17 +481,21 @@
 	mutex_unlock(&subsys_list_lock);
 }
 
-static void for_each_subsys_device(struct subsys_device **list,
+static int for_each_subsys_device(struct subsys_device **list,
 		unsigned int count, void *data,
-		void (*fn)(struct subsys_device *, void *))
+		int (*fn)(struct subsys_device *, void *))
 {
+	int ret;
 	while (count--) {
 		struct subsys_device *dev = *list++;
 
 		if (!dev)
 			continue;
-		fn(dev, data);
+		ret = fn(dev, data);
+		if (ret)
+			return ret;
 	}
+	return 0;
 }
 
 static void notify_each_subsys_device(struct subsys_device **list,
@@ -593,21 +597,31 @@
 	return 0;
 }
 
-static void subsystem_shutdown(struct subsys_device *dev, void *data)
+static int subsystem_shutdown(struct subsys_device *dev, void *data)
 {
 	const char *name = dev->desc->name;
+	int ret;
 
 	pr_info("[%s:%d]: Shutting down %s\n",
 			current->comm, current->pid, name);
-	if (dev->desc->shutdown(dev->desc, true) < 0)
-		panic("subsys-restart: [%s:%d]: Failed to shutdown %s!",
-			current->comm, current->pid, name);
+	ret = dev->desc->shutdown(dev->desc, true);
+	if (ret < 0) {
+		if (!dev->desc->ignore_ssr_failure) {
+			panic("subsys-restart: [%s:%d]: Failed to shutdown %s!",
+				current->comm, current->pid, name);
+		} else {
+			pr_err("Shutdown failure on %s\n", name);
+			return ret;
+		}
+	}
 	dev->crash_count++;
 	subsys_set_state(dev, SUBSYS_OFFLINE);
 	disable_all_irqs(dev);
+
+	return 0;
 }
 
-static void subsystem_ramdump(struct subsys_device *dev, void *data)
+static int subsystem_ramdump(struct subsys_device *dev, void *data)
 {
 	const char *name = dev->desc->name;
 
@@ -616,15 +630,17 @@
 			pr_warn("%s[%s:%d]: Ramdump failed.\n",
 				name, current->comm, current->pid);
 	dev->do_ramdump_on_put = false;
+	return 0;
 }
 
-static void subsystem_free_memory(struct subsys_device *dev, void *data)
+static int subsystem_free_memory(struct subsys_device *dev, void *data)
 {
 	if (dev->desc->free_memory)
 		dev->desc->free_memory(dev->desc);
+	return 0;
 }
 
-static void subsystem_powerup(struct subsys_device *dev, void *data)
+static int subsystem_powerup(struct subsys_device *dev, void *data)
 {
 	const char *name = dev->desc->name;
 	int ret;
@@ -632,11 +648,17 @@
 	pr_info("[%s:%d]: Powering up %s\n", current->comm, current->pid, name);
 	init_completion(&dev->err_ready);
 
-	if (dev->desc->powerup(dev->desc) < 0) {
+	ret = dev->desc->powerup(dev->desc);
+	if (ret < 0) {
 		notify_each_subsys_device(&dev, 1, SUBSYS_POWERUP_FAILURE,
 								NULL);
-		panic("[%s:%d]: Powerup error: %s!",
-			current->comm, current->pid, name);
+		if (!dev->desc->ignore_ssr_failure) {
+			panic("[%s:%d]: Powerup error: %s!",
+				current->comm, current->pid, name);
+		} else {
+			pr_err("Powerup failure on %s\n", name);
+			return ret;
+		}
 	}
 	enable_all_irqs(dev);
 
@@ -644,11 +666,16 @@
 	if (ret) {
 		notify_each_subsys_device(&dev, 1, SUBSYS_POWERUP_FAILURE,
 								NULL);
-		panic("[%s:%d]: Timed out waiting for error ready: %s!",
-			current->comm, current->pid, name);
+		if (!dev->desc->ignore_ssr_failure)
+			panic("[%s:%d]: Timed out waiting for error ready: %s!",
+				current->comm, current->pid, name);
+		else
+			return ret;
 	}
 	subsys_set_state(dev, SUBSYS_ONLINE);
 	subsys_set_crash_status(dev, CRASH_STATUS_NO_CRASH);
+
+	return 0;
 }
 
 static int __find_subsys(struct device *dev, void *data)
@@ -910,6 +937,7 @@
 	struct subsys_tracking *track;
 	unsigned int count;
 	unsigned long flags;
+	int ret;
 
 	/*
 	 * It's OK to not take the registration lock at this point.
@@ -957,7 +985,9 @@
 	pr_debug("[%s:%d]: Starting restart sequence for %s\n",
 			current->comm, current->pid, desc->name);
 	notify_each_subsys_device(list, count, SUBSYS_BEFORE_SHUTDOWN, NULL);
-	for_each_subsys_device(list, count, NULL, subsystem_shutdown);
+	ret = for_each_subsys_device(list, count, NULL, subsystem_shutdown);
+	if (ret)
+		goto err;
 	notify_each_subsys_device(list, count, SUBSYS_AFTER_SHUTDOWN, NULL);
 
 	notify_each_subsys_device(list, count, SUBSYS_RAMDUMP_NOTIFICATION,
@@ -973,12 +1003,19 @@
 	for_each_subsys_device(list, count, NULL, subsystem_free_memory);
 
 	notify_each_subsys_device(list, count, SUBSYS_BEFORE_POWERUP, NULL);
-	for_each_subsys_device(list, count, NULL, subsystem_powerup);
+	ret = for_each_subsys_device(list, count, NULL, subsystem_powerup);
+	if (ret)
+		goto err;
 	notify_each_subsys_device(list, count, SUBSYS_AFTER_POWERUP, NULL);
 
 	pr_info("[%s:%d]: Restart sequence for %s completed.\n",
 			current->comm, current->pid, desc->name);
 
+err:
+	/* Reset subsys count */
+	if (ret)
+		dev->count = 0;
+
 	mutex_unlock(&soc_order_reg_lock);
 	mutex_unlock(&track->lock);
 
@@ -1470,6 +1507,9 @@
 			desc->generic_irq = ret;
 	}
 
+	desc->ignore_ssr_failure = of_property_read_bool(pdev->dev.of_node,
+						"qcom,ignore-ssr-failure");
+
 	order = ssr_parse_restart_orders(desc);
 	if (IS_ERR(order)) {
 		pr_err("Could not initialize SSR restart order, err = %ld\n",
diff --git a/drivers/soc/qcom/watchdog_v2.c b/drivers/soc/qcom/watchdog_v2.c
index 7a784aa..8bf5659 100644
--- a/drivers/soc/qcom/watchdog_v2.c
+++ b/drivers/soc/qcom/watchdog_v2.c
@@ -136,6 +136,8 @@
 		return 0;
 	__raw_writel(1, wdog_dd->base + WDT0_RST);
 	if (wdog_dd->wakeup_irq_enable) {
+		/* Make sure register write is complete before proceeding */
+		mb();
 		wdog_dd->last_pet = sched_clock();
 		return 0;
 	}
@@ -151,8 +153,15 @@
 {
 	struct msm_watchdog_data *wdog_dd =
 			(struct msm_watchdog_data *)dev_get_drvdata(dev);
-	if (!enable || wdog_dd->wakeup_irq_enable)
+	if (!enable)
 		return 0;
+	if (wdog_dd->wakeup_irq_enable) {
+		__raw_writel(1, wdog_dd->base + WDT0_RST);
+		/* Make sure register write is complete before proceeding */
+		mb();
+		wdog_dd->last_pet = sched_clock();
+		return 0;
+	}
 	__raw_writel(1, wdog_dd->base + WDT0_EN);
 	__raw_writel(1, wdog_dd->base + WDT0_RST);
 	/* Make sure watchdog is reset before setting enable */
diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
index 8cc77c1..08eb00a 100644
--- a/drivers/spi/spi-geni-qcom.c
+++ b/drivers/spi/spi-geni-qcom.c
@@ -73,11 +73,11 @@
 #define SPI_CS_DEASSERT		(9)
 #define SPI_SCK_ONLY		(10)
 /* M_CMD params for SPI */
-#define SPI_PRE_CMD_DELAY	(0)
-#define TIMESTAMP_BEFORE	(1)
-#define FRAGMENTATION		(2)
-#define TIMESTAMP_AFTER		(3)
-#define POST_CMD_DELAY		(4)
+#define SPI_PRE_CMD_DELAY	BIT(0)
+#define TIMESTAMP_BEFORE	BIT(1)
+#define FRAGMENTATION		BIT(2)
+#define TIMESTAMP_AFTER		BIT(3)
+#define POST_CMD_DELAY		BIT(4)
 
 #define SPI_CORE2X_VOTE		(10000)
 
@@ -172,15 +172,13 @@
 	u32 loopback_cfg = geni_read_reg(mas->base, SE_SPI_LOOPBACK);
 	u32 cpol = geni_read_reg(mas->base, SE_SPI_CPOL);
 	u32 cpha = geni_read_reg(mas->base, SE_SPI_CPHA);
-	u32 demux_sel = geni_read_reg(mas->base, SE_SPI_DEMUX_SEL);
-	u32 demux_output_inv =
-			geni_read_reg(mas->base, SE_SPI_DEMUX_OUTPUT_INV);
+	u32 demux_sel = 0;
+	u32 demux_output_inv = 0;
 	int ret = 0;
 
 	loopback_cfg &= ~LOOPBACK_MSK;
 	cpol &= ~CPOL;
 	cpha &= ~CPHA;
-	demux_output_inv &= ~BIT(spi_slv->chip_select);
 
 	if (mode & SPI_LOOP)
 		loopback_cfg |= LOOPBACK_ENABLE;
@@ -194,7 +192,7 @@
 	if (spi_slv->mode & SPI_CS_HIGH)
 		demux_output_inv |= BIT(spi_slv->chip_select);
 
-	demux_sel |= BIT(spi_slv->chip_select);
+	demux_sel = spi_slv->chip_select;
 	mas->cur_speed_hz = spi_slv->max_speed_hz;
 	mas->cur_word_len = spi_slv->bits_per_word;
 
diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
index a5bfeab..9cc85ee 100644
--- a/drivers/spmi/spmi-pmic-arb.c
+++ b/drivers/spmi/spmi-pmic-arb.c
@@ -584,6 +584,11 @@
 			id = ffs(status) - 1;
 			status &= ~BIT(id);
 			apid = id + i * 32;
+			if (apid < pa->min_apid || apid > pa->max_apid) {
+				WARN_ONCE(true, "spurious spmi irq received for apid=%d\n",
+					apid);
+				continue;
+			}
 			enable = readl_relaxed(pa->intr +
 					pa->ver_ops->acc_enable(apid));
 			if (enable & SPMI_PIC_ACC_ENABLE_BIT)
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
index 8d6bca6..591f274 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
@@ -97,8 +97,9 @@
 
 	switch (variable) {
 	case HW_VAR_BSSID:
-		rtl92e_writel(dev, BSSIDR, ((u32 *)(val))[0]);
-		rtl92e_writew(dev, BSSIDR+2, ((u16 *)(val+2))[0]);
+		/* BSSIDR 2 byte alignment */
+		rtl92e_writew(dev, BSSIDR, *(u16 *)val);
+		rtl92e_writel(dev, BSSIDR + 2, *(u32 *)(val + 2));
 		break;
 
 	case HW_VAR_MEDIA_STATUS:
@@ -626,7 +627,7 @@
 	struct r8192_priv *priv = rtllib_priv(dev);
 
 	RT_TRACE(COMP_INIT, "===========>%s()\n", __func__);
-	curCR = rtl92e_readl(dev, EPROM_CMD);
+	curCR = rtl92e_readw(dev, EPROM_CMD);
 	RT_TRACE(COMP_INIT, "read from Reg Cmd9346CR(%x):%x\n", EPROM_CMD,
 		 curCR);
 	priv->epromtype = (curCR & EPROM_CMD_9356SEL) ? EEPROM_93C56 :
@@ -963,8 +964,8 @@
 	rtl92e_config_rate(dev, &rate_config);
 	priv->dot11CurrentPreambleMode = PREAMBLE_AUTO;
 	 priv->basic_rate = rate_config &= 0x15f;
-	rtl92e_writel(dev, BSSIDR, ((u32 *)net->bssid)[0]);
-	rtl92e_writew(dev, BSSIDR+4, ((u16 *)net->bssid)[2]);
+	rtl92e_writew(dev, BSSIDR, *(u16 *)net->bssid);
+	rtl92e_writel(dev, BSSIDR + 2, *(u32 *)(net->bssid + 2));
 
 	if (priv->rtllib->iw_mode == IW_MODE_ADHOC) {
 		rtl92e_writew(dev, ATIMWND, 2);
@@ -1184,8 +1185,7 @@
 			  struct cb_desc *cb_desc, struct sk_buff *skb)
 {
 	struct r8192_priv *priv = rtllib_priv(dev);
-	dma_addr_t mapping = pci_map_single(priv->pdev, skb->data, skb->len,
-			 PCI_DMA_TODEVICE);
+	dma_addr_t mapping;
 	struct tx_fwinfo_8190pci *pTxFwInfo;
 
 	pTxFwInfo = (struct tx_fwinfo_8190pci *)skb->data;
@@ -1196,8 +1196,6 @@
 	pTxFwInfo->Short = _rtl92e_query_is_short(pTxFwInfo->TxHT,
 						  pTxFwInfo->TxRate, cb_desc);
 
-	if (pci_dma_mapping_error(priv->pdev, mapping))
-		netdev_err(dev, "%s(): DMA Mapping error\n", __func__);
 	if (cb_desc->bAMPDUEnable) {
 		pTxFwInfo->AllowAggregation = 1;
 		pTxFwInfo->RxMF = cb_desc->ampdu_factor;
@@ -1232,6 +1230,14 @@
 	}
 
 	memset((u8 *)pdesc, 0, 12);
+
+	mapping = pci_map_single(priv->pdev, skb->data, skb->len,
+				 PCI_DMA_TODEVICE);
+	if (pci_dma_mapping_error(priv->pdev, mapping)) {
+		netdev_err(dev, "%s(): DMA Mapping error\n", __func__);
+		return;
+	}
+
 	pdesc->LINIP = 0;
 	pdesc->CmdInit = 1;
 	pdesc->Offset = sizeof(struct tx_fwinfo_8190pci) + 8;
diff --git a/drivers/staging/rtl8192e/rtl819x_TSProc.c b/drivers/staging/rtl8192e/rtl819x_TSProc.c
index a966a8e..4615a6f 100644
--- a/drivers/staging/rtl8192e/rtl819x_TSProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_TSProc.c
@@ -306,11 +306,6 @@
 	pTsCommonInfo->TClasNum = TCLAS_Num;
 }
 
-static bool IsACValid(unsigned int tid)
-{
-	return tid < 7;
-}
-
 bool GetTs(struct rtllib_device *ieee, struct ts_common_info **ppTS,
 	   u8 *Addr, u8 TID, enum tr_select TxRxSelect, bool bAddNewTs)
 {
@@ -328,12 +323,6 @@
 	if (ieee->current_network.qos_data.supported == 0) {
 		UP = 0;
 	} else {
-		if (!IsACValid(TID)) {
-			netdev_warn(ieee->dev, "%s(): TID(%d) is not valid\n",
-				    __func__, TID);
-			return false;
-		}
-
 		switch (TID) {
 		case 0:
 		case 3:
@@ -351,6 +340,10 @@
 		case 7:
 			UP = 7;
 			break;
+		default:
+			netdev_warn(ieee->dev, "%s(): TID(%d) is not valid\n",
+				    __func__, TID);
+			return false;
 		}
 	}
 
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index 7da9211..355d013 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -620,6 +620,7 @@
 	if (cpufreq_device->cpufreq_state == state)
 		return 0;
 
+	cpufreq_device->cpufreq_state = state;
 	/* If state is the last, isolate the CPU */
 	if (state == cpufreq_device->max_level)
 		return sched_isolate_cpu(cpu);
@@ -627,7 +628,6 @@
 		sched_unisolate_cpu(cpu);
 
 	clip_freq = cpufreq_device->freq_table[state];
-	cpufreq_device->cpufreq_state = state;
 	cpufreq_device->clipped_freq = clip_freq;
 
 	/* Check if the device has a platform mitigation function that
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index c5ff13f..a876d47 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -311,6 +311,12 @@
 		break;
 
 	case USB_CDC_NOTIFY_SERIAL_STATE:
+		if (le16_to_cpu(dr->wLength) != 2) {
+			dev_dbg(&acm->control->dev,
+				"%s - malformed serial state\n", __func__);
+			break;
+		}
+
 		newctrl = get_unaligned_le16(data);
 
 		if (!acm->clocal && (acm->ctrlin & ~newctrl & ACM_CTRL_DCD)) {
@@ -347,11 +353,10 @@
 
 	default:
 		dev_dbg(&acm->control->dev,
-			"%s - unknown notification %d received: index %d "
-			"len %d data0 %d data1 %d\n",
+			"%s - unknown notification %d received: index %d len %d\n",
 			__func__,
-			dr->bNotificationType, dr->wIndex,
-			dr->wLength, data[0], data[1]);
+			dr->bNotificationType, dr->wIndex, dr->wLength);
+
 		break;
 	}
 exit:
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 4016dae..840930b0 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -481,11 +481,11 @@
 
 	if (userurb) {		/* Async */
 		if (when == SUBMIT)
-			dev_info(&udev->dev, "userurb %p, ep%d %s-%s, "
+			dev_info(&udev->dev, "userurb %pK, ep%d %s-%s, "
 					"length %u\n",
 					userurb, ep, t, d, length);
 		else
-			dev_info(&udev->dev, "userurb %p, ep%d %s-%s, "
+			dev_info(&udev->dev, "userurb %pK, ep%d %s-%s, "
 					"actual_length %u status %d\n",
 					userurb, ep, t, d, length,
 					timeout_or_status);
@@ -1905,7 +1905,7 @@
 	if (as) {
 		int retval;
 
-		snoop(&ps->dev->dev, "reap %p\n", as->userurb);
+		snoop(&ps->dev->dev, "reap %pK\n", as->userurb);
 		retval = processcompl(as, (void __user * __user *)arg);
 		free_async(as);
 		return retval;
@@ -1922,7 +1922,7 @@
 
 	as = async_getcompleted(ps);
 	if (as) {
-		snoop(&ps->dev->dev, "reap %p\n", as->userurb);
+		snoop(&ps->dev->dev, "reap %pK\n", as->userurb);
 		retval = processcompl(as, (void __user * __user *)arg);
 		free_async(as);
 	} else {
@@ -2053,7 +2053,7 @@
 	if (as) {
 		int retval;
 
-		snoop(&ps->dev->dev, "reap %p\n", as->userurb);
+		snoop(&ps->dev->dev, "reap %pK\n", as->userurb);
 		retval = processcompl_compat(as, (void __user * __user *)arg);
 		free_async(as);
 		return retval;
@@ -2070,7 +2070,7 @@
 
 	as = async_getcompleted(ps);
 	if (as) {
-		snoop(&ps->dev->dev, "reap %p\n", as->userurb);
+		snoop(&ps->dev->dev, "reap %pK\n", as->userurb);
 		retval = processcompl_compat(as, (void __user * __user *)arg);
 		free_async(as);
 	} else {
@@ -2499,7 +2499,7 @@
 #endif
 
 	case USBDEVFS_DISCARDURB:
-		snoop(&dev->dev, "%s: DISCARDURB %p\n", __func__, p);
+		snoop(&dev->dev, "%s: DISCARDURB %pK\n", __func__, p);
 		ret = proc_unlinkurb(ps, p);
 		break;
 
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index ff45ebf..32f99da 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1723,7 +1723,7 @@
 		if (retval == 0)
 			retval = -EINPROGRESS;
 		else if (retval != -EIDRM && retval != -EBUSY)
-			dev_dbg(&udev->dev, "hcd_unlink_urb %p fail %d\n",
+			dev_dbg(&udev->dev, "hcd_unlink_urb %pK fail %d\n",
 					urb, retval);
 		usb_put_dev(udev);
 	}
@@ -1890,7 +1890,7 @@
 		/* kick hcd */
 		unlink1(hcd, urb, -ESHUTDOWN);
 		dev_dbg (hcd->self.controller,
-			"shutdown urb %p ep%d%s%s\n",
+			"shutdown urb %pK ep%d%s%s\n",
 			urb, usb_endpoint_num(&ep->desc),
 			is_in ? "in" : "out",
 			({	char *s;
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 7388f73..fcbaa61 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -365,7 +365,8 @@
 }
 
 /* USB 2.0 spec Section 11.24.4.5 */
-static int get_hub_descriptor(struct usb_device *hdev, void *data)
+static int get_hub_descriptor(struct usb_device *hdev,
+		struct usb_hub_descriptor *desc)
 {
 	int i, ret, size;
 	unsigned dtype;
@@ -381,10 +382,18 @@
 	for (i = 0; i < 3; i++) {
 		ret = usb_control_msg(hdev, usb_rcvctrlpipe(hdev, 0),
 			USB_REQ_GET_DESCRIPTOR, USB_DIR_IN | USB_RT_HUB,
-			dtype << 8, 0, data, size,
+			dtype << 8, 0, desc, size,
 			USB_CTRL_GET_TIMEOUT);
-		if (ret >= (USB_DT_HUB_NONVAR_SIZE + 2))
+		if (hub_is_superspeed(hdev)) {
+			if (ret == size)
+				return ret;
+		} else if (ret >= USB_DT_HUB_NONVAR_SIZE + 2) {
+			/* Make sure we have the DeviceRemovable field. */
+			size = USB_DT_HUB_NONVAR_SIZE + desc->bNbrPorts / 8 + 1;
+			if (ret < size)
+				return -EMSGSIZE;
 			return ret;
+		}
 	}
 	return -EINVAL;
 }
@@ -1322,7 +1331,7 @@
 	}
 	mutex_init(&hub->status_mutex);
 
-	hub->descriptor = kmalloc(sizeof(*hub->descriptor), GFP_KERNEL);
+	hub->descriptor = kzalloc(sizeof(*hub->descriptor), GFP_KERNEL);
 	if (!hub->descriptor) {
 		ret = -ENOMEM;
 		goto fail;
@@ -1330,7 +1339,7 @@
 
 	/* Request the entire hub descriptor.
 	 * hub->descriptor can handle USB_MAXCHILDREN ports,
-	 * but the hub can/will return fewer bytes here.
+	 * but a (non-SS) hub can/will return fewer bytes here.
 	 */
 	ret = get_hub_descriptor(hdev, hub->descriptor);
 	if (ret < 0) {
diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
index a903969..5133ab9 100644
--- a/drivers/usb/core/urb.c
+++ b/drivers/usb/core/urb.c
@@ -333,7 +333,7 @@
 	if (!urb || !urb->complete)
 		return -EINVAL;
 	if (urb->hcpriv) {
-		WARN_ONCE(1, "URB %p submitted while active\n", urb);
+		WARN_ONCE(1, "URB %pK submitted while active\n", urb);
 		return -EBUSY;
 	}
 
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 62574bf..026ff6c 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -990,11 +990,16 @@
 	}
 
 	/* always enable Continue on Short Packet */
-	trb->ctrl |= DWC3_TRB_CTRL_CSP;
+	if (usb_endpoint_dir_out(dep->endpoint.desc)) {
+		trb->ctrl |= DWC3_TRB_CTRL_CSP;
+
+		if (req->request.short_not_ok)
+			trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
+	}
 
 	if ((!req->request.no_interrupt && !chain) ||
 			(dwc3_calc_trbs_left(dep) == 0))
-		trb->ctrl |= DWC3_TRB_CTRL_IOC | DWC3_TRB_CTRL_ISP_IMI;
+		trb->ctrl |= DWC3_TRB_CTRL_IOC;
 
 	if (chain)
 		trb->ctrl |= DWC3_TRB_CTRL_CHN;
@@ -3419,13 +3424,13 @@
 	trace_dwc3_event(event->raw);
 	/* skip event processing in absence of vbus */
 	if (!dwc->vbus_active) {
-		dev_err(dwc->dev, "SKIP EVT:%x", event->raw);
+		dbg_event(0xFF, "SKIP_EVT", event->raw);
 		return;
 	}
 
 	/* If run/stop is cleared don't process any more events */
 	if (!dwc->pullups_connected) {
-		dev_err(dwc->dev, "SKIP_EVT_PULLUP:%x", event->raw);
+		dbg_event(0xFF, "SKIP_EVT_PULLUP", event->raw);
 		return;
 	}
 
@@ -3549,6 +3554,15 @@
 
 	evt = dwc->ev_buf;
 
+	/*
+	 * With PCIe legacy interrupt, test shows that top-half irq handler can
+	 * be called again after HW interrupt deassertion. Check if bottom-half
+	 * irq event handler completes before caching new event to prevent
+	 * losing events.
+	 */
+	if (evt->flags & DWC3_EVENT_PENDING)
+		return IRQ_HANDLED;
+
 	count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0));
 	count &= DWC3_GEVNTCOUNT_MASK;
 	if (!count)
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 86612ac..f6c7a27 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -231,7 +231,8 @@
 
 		/* Start up the I/O watchdog timer, if it's not running */
 		if (!timer_pending(&ohci->io_watchdog) &&
-				list_empty(&ohci->eds_in_use)) {
+				list_empty(&ohci->eds_in_use) &&
+				!(ohci->flags & OHCI_QUIRK_QEMU)) {
 			ohci->prev_frame_no = ohci_frame_no(ohci);
 			mod_timer(&ohci->io_watchdog,
 					jiffies + IO_WATCHDOG_DELAY);
diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
index bb15096..a84aebe 100644
--- a/drivers/usb/host/ohci-pci.c
+++ b/drivers/usb/host/ohci-pci.c
@@ -164,6 +164,15 @@
 	return 0;
 }
 
+static int ohci_quirk_qemu(struct usb_hcd *hcd)
+{
+	struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+
+	ohci->flags |= OHCI_QUIRK_QEMU;
+	ohci_dbg(ohci, "enabled qemu quirk\n");
+	return 0;
+}
+
 /* List of quirks for OHCI */
 static const struct pci_device_id ohci_pci_quirks[] = {
 	{
@@ -214,6 +223,13 @@
 		PCI_DEVICE(PCI_VENDOR_ID_ATI, 0x4399),
 		.driver_data = (unsigned long)ohci_quirk_amd700,
 	},
+	{
+		.vendor		= PCI_VENDOR_ID_APPLE,
+		.device		= 0x003f,
+		.subvendor	= PCI_SUBVENDOR_ID_REDHAT_QUMRANET,
+		.subdevice	= PCI_SUBDEVICE_ID_QEMU,
+		.driver_data	= (unsigned long)ohci_quirk_qemu,
+	},
 
 	/* FIXME for some of the early AMD 760 southbridges, OHCI
 	 * won't work at all.  blacklist them.
diff --git a/drivers/usb/host/ohci.h b/drivers/usb/host/ohci.h
index 37f1725..a51b189 100644
--- a/drivers/usb/host/ohci.h
+++ b/drivers/usb/host/ohci.h
@@ -418,6 +418,7 @@
 #define	OHCI_QUIRK_AMD_PLL	0x200			/* AMD PLL quirk*/
 #define	OHCI_QUIRK_AMD_PREFETCH	0x400			/* pre-fetch for ISO transfer */
 #define	OHCI_QUIRK_GLOBAL_SUSPEND	0x800		/* must suspend ports */
+#define	OHCI_QUIRK_QEMU		0x1000			/* relax timing expectations */
 
 	// there are also chip quirks/bugs in init logic
 
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 40504c8..d680eb3 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -56,7 +56,7 @@
 	}
 
 	if (max_packet) {
-		seg->bounce_buf = kzalloc(max_packet, flags | GFP_DMA);
+		seg->bounce_buf = kzalloc(max_packet, flags);
 		if (!seg->bounce_buf) {
 			dma_pool_free(xhci->segment_pool, seg->trbs, dma);
 			kfree(seg);
@@ -1726,7 +1726,7 @@
 	xhci->dcbaa->dev_context_ptrs[0] = cpu_to_le64(xhci->scratchpad->sp_dma);
 	for (i = 0; i < num_sp; i++) {
 		dma_addr_t dma;
-		void *buf = dma_alloc_coherent(dev, xhci->page_size, &dma,
+		void *buf = dma_zalloc_coherent(dev, xhci->page_size, &dma,
 				flags);
 		if (!buf)
 			goto fail_sp5;
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 93f566c..e7d6752 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -52,6 +52,7 @@
 #define PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI		0x0aa8
 #define PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI		0x1aa8
 #define PCI_DEVICE_ID_INTEL_APL_XHCI			0x5aa8
+#define PCI_DEVICE_ID_INTEL_DNV_XHCI			0x19d0
 
 static const char hcd_name[] = "xhci_hcd";
 
@@ -166,7 +167,8 @@
 		 pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
 		 pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI ||
 		 pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI ||
-		 pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI)) {
+		 pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI ||
+		 pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI)) {
 		xhci->quirks |= XHCI_PME_STUCK_QUIRK;
 	}
 	if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
@@ -175,7 +177,8 @@
 	}
 	if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
 	    (pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
-	     pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI))
+	     pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI ||
+	     pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI))
 		xhci->quirks |= XHCI_MISSING_CAS;
 
 	if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index a0bc61f..6cb5ab3 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -210,7 +210,7 @@
 
 	irq = platform_get_irq(pdev, 0);
 	if (irq < 0)
-		return -ENODEV;
+		return irq;
 
 	/*
 	 * sysdev must point to a device that is known to the system firmware
diff --git a/drivers/usb/misc/chaoskey.c b/drivers/usb/misc/chaoskey.c
index 6ddd08a..efecb87 100644
--- a/drivers/usb/misc/chaoskey.c
+++ b/drivers/usb/misc/chaoskey.c
@@ -194,7 +194,7 @@
 
 	dev->in_ep = in_ep;
 
-	if (udev->descriptor.idVendor != ALEA_VENDOR_ID)
+	if (le16_to_cpu(udev->descriptor.idVendor) != ALEA_VENDOR_ID)
 		dev->reads_started = 1;
 
 	dev->size = size;
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
index 37c63cb..0ef29d2 100644
--- a/drivers/usb/misc/iowarrior.c
+++ b/drivers/usb/misc/iowarrior.c
@@ -554,7 +554,7 @@
 			info.revision = le16_to_cpu(dev->udev->descriptor.bcdDevice);
 
 			/* 0==UNKNOWN, 1==LOW(usb1.1) ,2=FULL(usb1.1), 3=HIGH(usb2.0) */
-			info.speed = le16_to_cpu(dev->udev->speed);
+			info.speed = dev->udev->speed;
 			info.if_num = dev->interface->cur_altsetting->desc.bInterfaceNumber;
 			info.report_size = dev->report_size;
 
diff --git a/drivers/usb/misc/legousbtower.c b/drivers/usb/misc/legousbtower.c
index c8fbe7b..c2e2b2e 100644
--- a/drivers/usb/misc/legousbtower.c
+++ b/drivers/usb/misc/legousbtower.c
@@ -317,9 +317,16 @@
 	int subminor;
 	int retval = 0;
 	struct usb_interface *interface;
-	struct tower_reset_reply reset_reply;
+	struct tower_reset_reply *reset_reply;
 	int result;
 
+	reset_reply = kmalloc(sizeof(*reset_reply), GFP_KERNEL);
+
+	if (!reset_reply) {
+		retval = -ENOMEM;
+		goto exit;
+	}
+
 	nonseekable_open(inode, file);
 	subminor = iminor(inode);
 
@@ -364,8 +371,8 @@
 				  USB_TYPE_VENDOR | USB_DIR_IN | USB_RECIP_DEVICE,
 				  0,
 				  0,
-				  &reset_reply,
-				  sizeof(reset_reply),
+				  reset_reply,
+				  sizeof(*reset_reply),
 				  1000);
 	if (result < 0) {
 		dev_err(&dev->udev->dev,
@@ -406,6 +413,7 @@
 	mutex_unlock(&dev->lock);
 
 exit:
+	kfree(reset_reply);
 	return retval;
 }
 
@@ -808,7 +816,7 @@
 	struct lego_usb_tower *dev = NULL;
 	struct usb_host_interface *iface_desc;
 	struct usb_endpoint_descriptor* endpoint;
-	struct tower_get_version_reply get_version_reply;
+	struct tower_get_version_reply *get_version_reply = NULL;
 	int i;
 	int retval = -ENOMEM;
 	int result;
@@ -886,6 +894,13 @@
 	dev->interrupt_in_interval = interrupt_in_interval ? interrupt_in_interval : dev->interrupt_in_endpoint->bInterval;
 	dev->interrupt_out_interval = interrupt_out_interval ? interrupt_out_interval : dev->interrupt_out_endpoint->bInterval;
 
+	get_version_reply = kmalloc(sizeof(*get_version_reply), GFP_KERNEL);
+
+	if (!get_version_reply) {
+		retval = -ENOMEM;
+		goto error;
+	}
+
 	/* get the firmware version and log it */
 	result = usb_control_msg (udev,
 				  usb_rcvctrlpipe(udev, 0),
@@ -893,18 +908,19 @@
 				  USB_TYPE_VENDOR | USB_DIR_IN | USB_RECIP_DEVICE,
 				  0,
 				  0,
-				  &get_version_reply,
-				  sizeof(get_version_reply),
+				  get_version_reply,
+				  sizeof(*get_version_reply),
 				  1000);
 	if (result < 0) {
 		dev_err(idev, "LEGO USB Tower get version control request failed\n");
 		retval = result;
 		goto error;
 	}
-	dev_info(&interface->dev, "LEGO USB Tower firmware version is %d.%d "
-		 "build %d\n", get_version_reply.major,
-		 get_version_reply.minor,
-		 le16_to_cpu(get_version_reply.build_no));
+	dev_info(&interface->dev,
+		 "LEGO USB Tower firmware version is %d.%d build %d\n",
+		 get_version_reply->major,
+		 get_version_reply->minor,
+		 le16_to_cpu(get_version_reply->build_no));
 
 	/* we can register the device now, as it is ready */
 	usb_set_intfdata (interface, dev);
@@ -925,9 +941,11 @@
 		 USB_MAJOR, dev->minor);
 
 exit:
+	kfree(get_version_reply);
 	return retval;
 
 error:
+	kfree(get_version_reply);
 	tower_delete(dev);
 	return retval;
 }
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 8064514..99beda9 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -2780,10 +2780,11 @@
 	int ret;
 	struct usb_hcd *hcd = musb->hcd;
 
-	MUSB_HST_MODE(musb);
-	musb->xceiv->otg->default_a = 1;
-	musb->xceiv->otg->state = OTG_STATE_A_IDLE;
-
+	if (musb->port_mode == MUSB_PORT_MODE_HOST) {
+		MUSB_HST_MODE(musb);
+		musb->xceiv->otg->default_a = 1;
+		musb->xceiv->otg->state = OTG_STATE_A_IDLE;
+	}
 	otg_set_host(musb->xceiv->otg, &hcd->self);
 	hcd->self.otg_port = 1;
 	musb->xceiv->otg->host = &hcd->self;
diff --git a/drivers/usb/musb/tusb6010_omap.c b/drivers/usb/musb/tusb6010_omap.c
index e6959cc..4047426 100644
--- a/drivers/usb/musb/tusb6010_omap.c
+++ b/drivers/usb/musb/tusb6010_omap.c
@@ -220,6 +220,7 @@
 	u32				dma_remaining;
 	int				src_burst, dst_burst;
 	u16				csr;
+	u32				psize;
 	int				ch;
 	s8				dmareq;
 	s8				sync_dev;
@@ -391,15 +392,19 @@
 
 	if (chdat->tx) {
 		/* Send transfer_packet_sz packets at a time */
-		musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET,
-			chdat->transfer_packet_sz);
+		psize = musb_readl(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET);
+		psize &= ~0x7ff;
+		psize |= chdat->transfer_packet_sz;
+		musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET, psize);
 
 		musb_writel(ep_conf, TUSB_EP_TX_OFFSET,
 			TUSB_EP_CONFIG_XFR_SIZE(chdat->transfer_len));
 	} else {
 		/* Receive transfer_packet_sz packets at a time */
-		musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET,
-			chdat->transfer_packet_sz << 16);
+		psize = musb_readl(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET);
+		psize &= ~(0x7ff << 16);
+		psize |= (chdat->transfer_packet_sz << 16);
+		musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET, psize);
 
 		musb_writel(ep_conf, TUSB_EP_RX_OFFSET,
 			TUSB_EP_CONFIG_XFR_SIZE(chdat->transfer_len));
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 1dc75db..1939496 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -809,10 +809,10 @@
 	{ USB_DEVICE(FTDI_VID, FTDI_PROPOX_ISPCABLEIII_PID) },
 	{ USB_DEVICE(FTDI_VID, CYBER_CORTEX_AV_PID),
 		.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
-	{ USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_PID),
-		.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
-	{ USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_H_PID),
-		.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+	{ USB_DEVICE_INTERFACE_NUMBER(OLIMEX_VID, OLIMEX_ARM_USB_OCD_PID, 1) },
+	{ USB_DEVICE_INTERFACE_NUMBER(OLIMEX_VID, OLIMEX_ARM_USB_OCD_H_PID, 1) },
+	{ USB_DEVICE_INTERFACE_NUMBER(OLIMEX_VID, OLIMEX_ARM_USB_TINY_PID, 1) },
+	{ USB_DEVICE_INTERFACE_NUMBER(OLIMEX_VID, OLIMEX_ARM_USB_TINY_H_PID, 1) },
 	{ USB_DEVICE(FIC_VID, FIC_NEO1973_DEBUG_PID),
 		.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
 	{ USB_DEVICE(FTDI_VID, FTDI_OOCDLINK_PID),
@@ -1508,9 +1508,9 @@
 					(new_serial.flags & ASYNC_FLAGS));
 	priv->custom_divisor = new_serial.custom_divisor;
 
+check_and_exit:
 	write_latency_timer(port);
 
-check_and_exit:
 	if ((old_priv.flags & ASYNC_SPD_MASK) !=
 	     (priv->flags & ASYNC_SPD_MASK)) {
 		if ((priv->flags & ASYNC_SPD_MASK) == ASYNC_SPD_HI)
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 71fb9e5..4fcf1ce 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -882,6 +882,8 @@
 /* Olimex */
 #define OLIMEX_VID			0x15BA
 #define OLIMEX_ARM_USB_OCD_PID		0x0003
+#define OLIMEX_ARM_USB_TINY_PID	0x0004
+#define OLIMEX_ARM_USB_TINY_H_PID	0x002a
 #define OLIMEX_ARM_USB_OCD_H_PID	0x002b
 
 /*
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
index f1a8fdc..e98532f 100644
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -2349,8 +2349,11 @@
 	if (!baud) {
 		/* pick a default, any default... */
 		baud = 9600;
-	} else
+	} else {
+		/* Avoid a zero divisor. */
+		baud = min(baud, 461550);
 		tty_encode_baud_rate(tty, baud, baud);
+	}
 
 	edge_port->baud_rate = baud;
 	config->wBaudRate = (__u16)((461550L + baud/2) / baud);
diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c
index edbc81f..70f346f 100644
--- a/drivers/usb/serial/mct_u232.c
+++ b/drivers/usb/serial/mct_u232.c
@@ -189,7 +189,7 @@
 		return -ENOMEM;
 
 	divisor = mct_u232_calculate_baud_rate(serial, value, &speed);
-	put_unaligned_le32(cpu_to_le32(divisor), buf);
+	put_unaligned_le32(divisor, buf);
 	rc = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
 				MCT_U232_SET_BAUD_RATE_REQUEST,
 				MCT_U232_SET_REQUEST_TYPE,
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index af67a0d..3bf61ac 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -281,6 +281,7 @@
 #define TELIT_PRODUCT_LE922_USBCFG0		0x1042
 #define TELIT_PRODUCT_LE922_USBCFG3		0x1043
 #define TELIT_PRODUCT_LE922_USBCFG5		0x1045
+#define TELIT_PRODUCT_ME910			0x1100
 #define TELIT_PRODUCT_LE920			0x1200
 #define TELIT_PRODUCT_LE910			0x1201
 #define TELIT_PRODUCT_LE910_USBCFG4		0x1206
@@ -640,6 +641,11 @@
 	.reserved = BIT(5) | BIT(6),
 };
 
+static const struct option_blacklist_info telit_me910_blacklist = {
+	.sendsetup = BIT(0),
+	.reserved = BIT(1) | BIT(3),
+};
+
 static const struct option_blacklist_info telit_le910_blacklist = {
 	.sendsetup = BIT(0),
 	.reserved = BIT(1) | BIT(2),
@@ -1235,6 +1241,8 @@
 		.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
 	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG5, 0xff),
 		.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
+	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
+		.driver_info = (kernel_ulong_t)&telit_me910_blacklist },
 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
 		.driver_info = (kernel_ulong_t)&telit_le910_blacklist },
 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4),
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index 38b3f0d..fd509ed6c 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -162,6 +162,8 @@
 	{DEVICE_SWI(0x1199, 0x9071)},	/* Sierra Wireless MC74xx */
 	{DEVICE_SWI(0x1199, 0x9078)},	/* Sierra Wireless EM74xx */
 	{DEVICE_SWI(0x1199, 0x9079)},	/* Sierra Wireless EM74xx */
+	{DEVICE_SWI(0x1199, 0x907a)},	/* Sierra Wireless EM74xx QDL */
+	{DEVICE_SWI(0x1199, 0x907b)},	/* Sierra Wireless EM74xx */
 	{DEVICE_SWI(0x413c, 0x81a2)},	/* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */
 	{DEVICE_SWI(0x413c, 0x81a3)},	/* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */
 	{DEVICE_SWI(0x413c, 0x81a4)},	/* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
diff --git a/drivers/usb/storage/ene_ub6250.c b/drivers/usb/storage/ene_ub6250.c
index 02bdaa9..4340b49 100644
--- a/drivers/usb/storage/ene_ub6250.c
+++ b/drivers/usb/storage/ene_ub6250.c
@@ -446,6 +446,10 @@
 #define SD_BLOCK_LEN  9
 
 struct ene_ub6250_info {
+
+	/* I/O bounce buffer */
+	u8		*bbuf;
+
 	/* for 6250 code */
 	struct SD_STATUS	SD_Status;
 	struct MS_STATUS	MS_Status;
@@ -493,8 +497,11 @@
 
 static void ene_ub6250_info_destructor(void *extra)
 {
+	struct ene_ub6250_info *info = (struct ene_ub6250_info *) extra;
+
 	if (!extra)
 		return;
+	kfree(info->bbuf);
 }
 
 static int ene_send_scsi_cmd(struct us_data *us, u8 fDir, void *buf, int use_sg)
@@ -860,8 +867,9 @@
 		u8 PageNum, u32 *PageBuf, struct ms_lib_type_extdat *ExtraDat)
 {
 	struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
+	struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
+	u8 *bbuf = info->bbuf;
 	int result;
-	u8 ExtBuf[4];
 	u32 bn = PhyBlockAddr * 0x20 + PageNum;
 
 	result = ene_load_bincode(us, MS_RW_PATTERN);
@@ -901,7 +909,7 @@
 	bcb->CDB[2]     = (unsigned char)(PhyBlockAddr>>16);
 	bcb->CDB[6]     = 0x01;
 
-	result = ene_send_scsi_cmd(us, FDIR_READ, &ExtBuf, 0);
+	result = ene_send_scsi_cmd(us, FDIR_READ, bbuf, 0);
 	if (result != USB_STOR_XFER_GOOD)
 		return USB_STOR_TRANSPORT_ERROR;
 
@@ -910,9 +918,9 @@
 	ExtraDat->status0  = 0x10;  /* Not yet,fireware support */
 
 	ExtraDat->status1  = 0x00;  /* Not yet,fireware support */
-	ExtraDat->ovrflg   = ExtBuf[0];
-	ExtraDat->mngflg   = ExtBuf[1];
-	ExtraDat->logadr   = memstick_logaddr(ExtBuf[2], ExtBuf[3]);
+	ExtraDat->ovrflg   = bbuf[0];
+	ExtraDat->mngflg   = bbuf[1];
+	ExtraDat->logadr   = memstick_logaddr(bbuf[2], bbuf[3]);
 
 	return USB_STOR_TRANSPORT_GOOD;
 }
@@ -1332,8 +1340,9 @@
 				u8 PageNum, struct ms_lib_type_extdat *ExtraDat)
 {
 	struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
+	struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
+	u8 *bbuf = info->bbuf;
 	int result;
-	u8 ExtBuf[4];
 
 	memset(bcb, 0, sizeof(struct bulk_cb_wrap));
 	bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
@@ -1347,7 +1356,7 @@
 	bcb->CDB[2]     = (unsigned char)(PhyBlock>>16);
 	bcb->CDB[6]     = 0x01;
 
-	result = ene_send_scsi_cmd(us, FDIR_READ, &ExtBuf, 0);
+	result = ene_send_scsi_cmd(us, FDIR_READ, bbuf, 0);
 	if (result != USB_STOR_XFER_GOOD)
 		return USB_STOR_TRANSPORT_ERROR;
 
@@ -1355,9 +1364,9 @@
 	ExtraDat->intr     = 0x80;  /* Not yet, waiting for fireware support */
 	ExtraDat->status0  = 0x10;  /* Not yet, waiting for fireware support */
 	ExtraDat->status1  = 0x00;  /* Not yet, waiting for fireware support */
-	ExtraDat->ovrflg   = ExtBuf[0];
-	ExtraDat->mngflg   = ExtBuf[1];
-	ExtraDat->logadr   = memstick_logaddr(ExtBuf[2], ExtBuf[3]);
+	ExtraDat->ovrflg   = bbuf[0];
+	ExtraDat->mngflg   = bbuf[1];
+	ExtraDat->logadr   = memstick_logaddr(bbuf[2], bbuf[3]);
 
 	return USB_STOR_TRANSPORT_GOOD;
 }
@@ -1558,9 +1567,9 @@
 	u16 PhyBlock, newblk, i;
 	u16 LogStart, LogEnde;
 	struct ms_lib_type_extdat extdat;
-	u8 buf[0x200];
 	u32 count = 0, index = 0;
 	struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
+	u8 *bbuf = info->bbuf;
 
 	for (PhyBlock = 0; PhyBlock < info->MS_Lib.NumberOfPhyBlock;) {
 		ms_lib_phy_to_log_range(PhyBlock, &LogStart, &LogEnde);
@@ -1574,14 +1583,16 @@
 			}
 
 			if (count == PhyBlock) {
-				ms_lib_read_extrablock(us, PhyBlock, 0, 0x80, &buf);
+				ms_lib_read_extrablock(us, PhyBlock, 0, 0x80,
+						bbuf);
 				count += 0x80;
 			}
 			index = (PhyBlock % 0x80) * 4;
 
-			extdat.ovrflg = buf[index];
-			extdat.mngflg = buf[index+1];
-			extdat.logadr = memstick_logaddr(buf[index+2], buf[index+3]);
+			extdat.ovrflg = bbuf[index];
+			extdat.mngflg = bbuf[index+1];
+			extdat.logadr = memstick_logaddr(bbuf[index+2],
+					bbuf[index+3]);
 
 			if ((extdat.ovrflg & MS_REG_OVR_BKST) != MS_REG_OVR_BKST_OK) {
 				ms_lib_setacquired_errorblock(us, PhyBlock);
@@ -2064,9 +2075,9 @@
 {
 	struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
 	int result;
-	u8 buf[0x200];
 	u16 MSP_BlockSize, MSP_UserAreaBlocks;
 	struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
+	u8 *bbuf = info->bbuf;
 
 	printk(KERN_INFO "transport --- ENE_MSInit\n");
 
@@ -2085,13 +2096,13 @@
 	bcb->CDB[0]     = 0xF1;
 	bcb->CDB[1]     = 0x01;
 
-	result = ene_send_scsi_cmd(us, FDIR_READ, &buf, 0);
+	result = ene_send_scsi_cmd(us, FDIR_READ, bbuf, 0);
 	if (result != USB_STOR_XFER_GOOD) {
 		printk(KERN_ERR "Execution MS Init Code Fail !!\n");
 		return USB_STOR_TRANSPORT_ERROR;
 	}
 	/* the same part to test ENE */
-	info->MS_Status = *(struct MS_STATUS *)&buf[0];
+	info->MS_Status = *(struct MS_STATUS *) bbuf;
 
 	if (info->MS_Status.Insert && info->MS_Status.Ready) {
 		printk(KERN_INFO "Insert     = %x\n", info->MS_Status.Insert);
@@ -2100,15 +2111,15 @@
 		printk(KERN_INFO "IsMSPHG    = %x\n", info->MS_Status.IsMSPHG);
 		printk(KERN_INFO "WtP= %x\n", info->MS_Status.WtP);
 		if (info->MS_Status.IsMSPro) {
-			MSP_BlockSize      = (buf[6] << 8) | buf[7];
-			MSP_UserAreaBlocks = (buf[10] << 8) | buf[11];
+			MSP_BlockSize      = (bbuf[6] << 8) | bbuf[7];
+			MSP_UserAreaBlocks = (bbuf[10] << 8) | bbuf[11];
 			info->MSP_TotalBlock = MSP_BlockSize * MSP_UserAreaBlocks;
 		} else {
 			ms_card_init(us); /* Card is MS (to ms.c)*/
 		}
 		usb_stor_dbg(us, "MS Init Code OK !!\n");
 	} else {
-		usb_stor_dbg(us, "MS Card Not Ready --- %x\n", buf[0]);
+		usb_stor_dbg(us, "MS Card Not Ready --- %x\n", bbuf[0]);
 		return USB_STOR_TRANSPORT_ERROR;
 	}
 
@@ -2118,9 +2129,9 @@
 static int ene_sd_init(struct us_data *us)
 {
 	int result;
-	u8  buf[0x200];
 	struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
 	struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
+	u8 *bbuf = info->bbuf;
 
 	usb_stor_dbg(us, "transport --- ENE_SDInit\n");
 	/* SD Init Part-1 */
@@ -2154,17 +2165,17 @@
 	bcb->Flags              = US_BULK_FLAG_IN;
 	bcb->CDB[0]             = 0xF1;
 
-	result = ene_send_scsi_cmd(us, FDIR_READ, &buf, 0);
+	result = ene_send_scsi_cmd(us, FDIR_READ, bbuf, 0);
 	if (result != USB_STOR_XFER_GOOD) {
 		usb_stor_dbg(us, "Execution SD Init Code Fail !!\n");
 		return USB_STOR_TRANSPORT_ERROR;
 	}
 
-	info->SD_Status =  *(struct SD_STATUS *)&buf[0];
+	info->SD_Status =  *(struct SD_STATUS *) bbuf;
 	if (info->SD_Status.Insert && info->SD_Status.Ready) {
 		struct SD_STATUS *s = &info->SD_Status;
 
-		ene_get_card_status(us, (unsigned char *)&buf);
+		ene_get_card_status(us, bbuf);
 		usb_stor_dbg(us, "Insert     = %x\n", s->Insert);
 		usb_stor_dbg(us, "Ready      = %x\n", s->Ready);
 		usb_stor_dbg(us, "IsMMC      = %x\n", s->IsMMC);
@@ -2172,7 +2183,7 @@
 		usb_stor_dbg(us, "HiSpeed    = %x\n", s->HiSpeed);
 		usb_stor_dbg(us, "WtP        = %x\n", s->WtP);
 	} else {
-		usb_stor_dbg(us, "SD Card Not Ready --- %x\n", buf[0]);
+		usb_stor_dbg(us, "SD Card Not Ready --- %x\n", bbuf[0]);
 		return USB_STOR_TRANSPORT_ERROR;
 	}
 	return USB_STOR_TRANSPORT_GOOD;
@@ -2182,13 +2193,15 @@
 static int ene_init(struct us_data *us)
 {
 	int result;
-	u8  misc_reg03 = 0;
+	u8  misc_reg03;
 	struct ene_ub6250_info *info = (struct ene_ub6250_info *)(us->extra);
+	u8 *bbuf = info->bbuf;
 
-	result = ene_get_card_type(us, REG_CARD_STATUS, &misc_reg03);
+	result = ene_get_card_type(us, REG_CARD_STATUS, bbuf);
 	if (result != USB_STOR_XFER_GOOD)
 		return USB_STOR_TRANSPORT_ERROR;
 
+	misc_reg03 = bbuf[0];
 	if (misc_reg03 & 0x01) {
 		if (!info->SD_Status.Ready) {
 			result = ene_sd_init(us);
@@ -2305,8 +2318,9 @@
 			 const struct usb_device_id *id)
 {
 	int result;
-	u8  misc_reg03 = 0;
+	u8  misc_reg03;
 	struct us_data *us;
+	struct ene_ub6250_info *info;
 
 	result = usb_stor_probe1(&us, intf, id,
 		   (id - ene_ub6250_usb_ids) + ene_ub6250_unusual_dev_list,
@@ -2315,11 +2329,16 @@
 		return result;
 
 	/* FIXME: where should the code alloc extra buf ? */
-	if (!us->extra) {
-		us->extra = kzalloc(sizeof(struct ene_ub6250_info), GFP_KERNEL);
-		if (!us->extra)
-			return -ENOMEM;
-		us->extra_destructor = ene_ub6250_info_destructor;
+	us->extra = kzalloc(sizeof(struct ene_ub6250_info), GFP_KERNEL);
+	if (!us->extra)
+		return -ENOMEM;
+	us->extra_destructor = ene_ub6250_info_destructor;
+
+	info = (struct ene_ub6250_info *)(us->extra);
+	info->bbuf = kmalloc(512, GFP_KERNEL);
+	if (!info->bbuf) {
+		kfree(us->extra);
+		return -ENOMEM;
 	}
 
 	us->transport_name = "ene_ub6250";
@@ -2331,12 +2350,13 @@
 		return result;
 
 	/* probe card type */
-	result = ene_get_card_type(us, REG_CARD_STATUS, &misc_reg03);
+	result = ene_get_card_type(us, REG_CARD_STATUS, info->bbuf);
 	if (result != USB_STOR_XFER_GOOD) {
 		usb_stor_disconnect(intf);
 		return USB_STOR_TRANSPORT_ERROR;
 	}
 
+	misc_reg03 = info->bbuf[0];
 	if (!(misc_reg03 & 0x01)) {
 		pr_info("ums_eneub6250: This driver only supports SD/MS cards. "
 			"It does not support SM cards.\n");
diff --git a/drivers/uwb/i1480/dfu/usb.c b/drivers/uwb/i1480/dfu/usb.c
index 6345e85..a50cf45 100644
--- a/drivers/uwb/i1480/dfu/usb.c
+++ b/drivers/uwb/i1480/dfu/usb.c
@@ -341,6 +341,7 @@
 static
 int i1480_usb_probe(struct usb_interface *iface, const struct usb_device_id *id)
 {
+	struct usb_device *udev = interface_to_usbdev(iface);
 	struct i1480_usb *i1480_usb;
 	struct i1480 *i1480;
 	struct device *dev = &iface->dev;
@@ -352,8 +353,8 @@
 			iface->cur_altsetting->desc.bInterfaceNumber);
 		goto error;
 	}
-	if (iface->num_altsetting > 1
-	    && interface_to_usbdev(iface)->descriptor.idProduct == 0xbabe) {
+	if (iface->num_altsetting > 1 &&
+			le16_to_cpu(udev->descriptor.idProduct) == 0xbabe) {
 		/* Need altsetting #1 [HW QUIRK] or EP1 won't work */
 		result = usb_set_interface(interface_to_usbdev(iface), 0, 1);
 		if (result < 0)
diff --git a/drivers/video/hdmi.c b/drivers/video/hdmi.c
index 1626892..1cf907e 100644
--- a/drivers/video/hdmi.c
+++ b/drivers/video/hdmi.c
@@ -533,6 +533,10 @@
 		return "4:3";
 	case HDMI_PICTURE_ASPECT_16_9:
 		return "16:9";
+	case HDMI_PICTURE_ASPECT_64_27:
+		return "64:27";
+	case HDMI_PICTURE_ASPECT_256_135:
+		return "256:135";
 	case HDMI_PICTURE_ASPECT_RESERVED:
 		return "Reserved";
 	}
diff --git a/drivers/watchdog/pcwd_usb.c b/drivers/watchdog/pcwd_usb.c
index 99ebf6e..5615f40 100644
--- a/drivers/watchdog/pcwd_usb.c
+++ b/drivers/watchdog/pcwd_usb.c
@@ -630,6 +630,9 @@
 		return -ENODEV;
 	}
 
+	if (iface_desc->desc.bNumEndpoints < 1)
+		return -ENODEV;
+
 	/* check out the endpoint: it has to be Interrupt & IN */
 	endpoint = &iface_desc->endpoint[0].desc;
 
diff --git a/fs/crypto/fname.c b/fs/crypto/fname.c
index 80bb956..d1bbdc9 100644
--- a/fs/crypto/fname.c
+++ b/fs/crypto/fname.c
@@ -300,7 +300,7 @@
 	} else {
 		memset(buf, 0, 8);
 	}
-	memcpy(buf + 8, iname->name + iname->len - 16, 16);
+	memcpy(buf + 8, iname->name + ((iname->len - 17) & ~15), 16);
 	oname->name[0] = '_';
 	oname->len = 1 + digest_encode(buf, 24, oname->name + 1);
 	return 0;
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index c4a389a..423a21c 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -1255,9 +1255,9 @@
 	if (unlikely(!name)) {
 		if (fname->usr_fname->name[0] == '_') {
 			int ret;
-			if (de->name_len < 16)
+			if (de->name_len <= 32)
 				return 0;
-			ret = memcmp(de->name + de->name_len - 16,
+			ret = memcmp(de->name + ((de->name_len - 17) & ~15),
 				     fname->crypto_buf.name + 8, 16);
 			return (ret == 0) ? 1 : 0;
 		}
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index ebdc90f..11f3717 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -130,19 +130,29 @@
 			continue;
 		}
 
-		/* encrypted case */
+		if (de->hash_code != namehash)
+			goto not_match;
+
 		de_name.name = d->filename[bit_pos];
 		de_name.len = le16_to_cpu(de->name_len);
 
-		/* show encrypted name */
-		if (fname->hash) {
-			if (de->hash_code == fname->hash)
-				goto found;
-		} else if (de_name.len == name->len &&
-			de->hash_code == namehash &&
-			!memcmp(de_name.name, name->name, name->len))
+#ifdef CONFIG_F2FS_FS_ENCRYPTION
+		if (unlikely(!name->name)) {
+			if (fname->usr_fname->name[0] == '_') {
+				if (de_name.len > 32 &&
+					!memcmp(de_name.name + ((de_name.len - 17) & ~15),
+						fname->crypto_buf.name + 8, 16))
+					goto found;
+				goto not_match;
+			}
+			name->name = fname->crypto_buf.name;
+			name->len = fname->crypto_buf.len;
+		}
+#endif
+		if (de_name.len == name->len &&
+				!memcmp(de_name.name, name->name, name->len))
 			goto found;
-
+not_match:
 		if (max_slots && max_len > *max_slots)
 			*max_slots = max_len;
 		max_len = 0;
@@ -170,12 +180,7 @@
 	struct f2fs_dir_entry *de = NULL;
 	bool room = false;
 	int max_slots;
-	f2fs_hash_t namehash;
-
-	if(fname->hash)
-		namehash = cpu_to_le32(fname->hash);
-	else
-		namehash = f2fs_dentry_hash(&name);
+	f2fs_hash_t namehash = f2fs_dentry_hash(&name, fname);
 
 	nbucket = dir_buckets(level, F2FS_I(dir)->i_dir_level);
 	nblock = bucket_blocks(level);
@@ -539,7 +544,7 @@
 
 	level = 0;
 	slots = GET_DENTRY_SLOTS(new_name->len);
-	dentry_hash = f2fs_dentry_hash(new_name);
+	dentry_hash = f2fs_dentry_hash(new_name, NULL);
 
 	current_depth = F2FS_I(dir)->i_current_depth;
 	if (F2FS_I(dir)->chash == dentry_hash) {
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 3a1640b..c12f695 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -2016,7 +2016,8 @@
 /*
  * hash.c
  */
-f2fs_hash_t f2fs_dentry_hash(const struct qstr *);
+f2fs_hash_t f2fs_dentry_hash(const struct qstr *name_info,
+				struct fscrypt_name *fname);
 
 /*
  * node.c
diff --git a/fs/f2fs/hash.c b/fs/f2fs/hash.c
index 71b7206..eb2e031 100644
--- a/fs/f2fs/hash.c
+++ b/fs/f2fs/hash.c
@@ -70,7 +70,8 @@
 		*buf++ = pad;
 }
 
-f2fs_hash_t f2fs_dentry_hash(const struct qstr *name_info)
+f2fs_hash_t f2fs_dentry_hash(const struct qstr *name_info,
+				struct fscrypt_name *fname)
 {
 	__u32 hash;
 	f2fs_hash_t f2fs_hash;
@@ -79,6 +80,10 @@
 	const unsigned char *name = name_info->name;
 	size_t len = name_info->len;
 
+	/* encrypted bigname case */
+	if (fname && !fname->disk_name.name)
+		return cpu_to_le32(fname->hash);
+
 	if (is_dot_dotdot(name_info))
 		return 0;
 
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index 1427db9..e14edc9 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -312,7 +312,7 @@
 		return NULL;
 	}
 
-	namehash = f2fs_dentry_hash(&name);
+	namehash = f2fs_dentry_hash(&name, fname);
 
 	inline_dentry = inline_data_addr(ipage);
 
@@ -549,7 +549,7 @@
 
 	f2fs_wait_on_page_writeback(ipage, NODE, true);
 
-	name_hash = f2fs_dentry_hash(new_name);
+	name_hash = f2fs_dentry_hash(new_name, NULL);
 	make_dentry_ptr(NULL, &d, (void *)dentry_blk, 2);
 	f2fs_update_dentry(ino, mode, &d, new_name, name_hash, bit_pos);
 
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 4e894d3..fc9b049 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -2385,8 +2385,10 @@
 		if (status != 0)
 			return status;
 	}
-	if (!(o_res->f_attr->valid & NFS_ATTR_FATTR))
+	if (!(o_res->f_attr->valid & NFS_ATTR_FATTR)) {
+		nfs4_sequence_free_slot(&o_res->seq_res);
 		nfs4_proc_getattr(server, &o_res->fh, o_res->f_attr, o_res->f_label);
+	}
 	return 0;
 }
 
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index 965db47..142a74f 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -29,13 +29,14 @@
 static struct kmem_cache *nfs_page_cachep;
 static const struct rpc_call_ops nfs_pgio_common_ops;
 
-static bool nfs_pgarray_set(struct nfs_page_array *p, unsigned int pagecount)
+static bool nfs_pgarray_set(struct nfs_page_array *p, unsigned int pagecount,
+					gfp_t gfp_flags)
 {
 	p->npages = pagecount;
 	if (pagecount <= ARRAY_SIZE(p->page_array))
 		p->pagevec = p->page_array;
 	else {
-		p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_KERNEL);
+		p->pagevec = kcalloc(pagecount, sizeof(struct page *), gfp_flags);
 		if (!p->pagevec)
 			p->npages = 0;
 	}
@@ -681,6 +682,7 @@
 {
 	struct nfs_pgio_mirror *new;
 	int i;
+	gfp_t gfp_flags = GFP_KERNEL;
 
 	desc->pg_moreio = 0;
 	desc->pg_inode = inode;
@@ -700,8 +702,10 @@
 	if (pg_ops->pg_get_mirror_count) {
 		/* until we have a request, we don't have an lseg and no
 		 * idea how many mirrors there will be */
+		if (desc->pg_rw_ops->rw_mode == FMODE_WRITE)
+			gfp_flags = GFP_NOIO;
 		new = kcalloc(NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX,
-			      sizeof(struct nfs_pgio_mirror), GFP_KERNEL);
+			      sizeof(struct nfs_pgio_mirror), gfp_flags);
 		desc->pg_mirrors_dynamic = new;
 		desc->pg_mirrors = new;
 
@@ -755,9 +759,12 @@
 	struct list_head *head = &mirror->pg_list;
 	struct nfs_commit_info cinfo;
 	unsigned int pagecount, pageused;
+	gfp_t gfp_flags = GFP_KERNEL;
 
 	pagecount = nfs_page_array_len(mirror->pg_base, mirror->pg_count);
-	if (!nfs_pgarray_set(&hdr->page_array, pagecount)) {
+	if (desc->pg_rw_ops->rw_mode == FMODE_WRITE)
+		gfp_flags = GFP_NOIO;
+	if (!nfs_pgarray_set(&hdr->page_array, pagecount, gfp_flags)) {
 		nfs_pgio_error(hdr);
 		desc->pg_error = -ENOMEM;
 		return desc->pg_error;
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 5321183..e4772a8 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -548,9 +548,9 @@
 {
 	nfs_unlock_request(req);
 	nfs_end_page_writeback(req);
-	nfs_release_request(req);
 	generic_error_remove_page(page_file_mapping(req->wb_page),
 				  req->wb_page);
+	nfs_release_request(req);
 }
 
 /*
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index abb09b5..650226f 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -1273,7 +1273,8 @@
 		return NULL;
 	}
 
-	if (!(exp->ex_layout_types & (1 << layout_type))) {
+	if (layout_type >= LAYOUT_TYPE_MAX ||
+	    !(exp->ex_layout_types & (1 << layout_type))) {
 		dprintk("%s: layout type %d not supported\n",
 			__func__, layout_type);
 		return NULL;
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index c2d2895..2ee80e1 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -4081,8 +4081,7 @@
 		struct nfsd4_getdeviceinfo *gdev)
 {
 	struct xdr_stream *xdr = &resp->xdr;
-	const struct nfsd4_layout_ops *ops =
-		nfsd4_layout_ops[gdev->gd_layout_type];
+	const struct nfsd4_layout_ops *ops;
 	u32 starting_len = xdr->buf->len, needed_len;
 	__be32 *p;
 
@@ -4099,6 +4098,7 @@
 
 	/* If maxcount is 0 then just update notifications */
 	if (gdev->gd_maxcount != 0) {
+		ops = nfsd4_layout_ops[gdev->gd_layout_type];
 		nfserr = ops->encode_getdeviceinfo(xdr, gdev);
 		if (nfserr) {
 			/*
@@ -4151,8 +4151,7 @@
 		struct nfsd4_layoutget *lgp)
 {
 	struct xdr_stream *xdr = &resp->xdr;
-	const struct nfsd4_layout_ops *ops =
-		nfsd4_layout_ops[lgp->lg_layout_type];
+	const struct nfsd4_layout_ops *ops;
 	__be32 *p;
 
 	dprintk("%s: err %d\n", __func__, nfserr);
@@ -4175,6 +4174,7 @@
 	*p++ = cpu_to_be32(lgp->lg_seg.iomode);
 	*p++ = cpu_to_be32(lgp->lg_layout_type);
 
+	ops = nfsd4_layout_ops[lgp->lg_layout_type];
 	nfserr = ops->encode_layoutget(xdr, lgp);
 out:
 	kfree(lgp->lg_content);
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
index 7f99c96..cef9885 100644
--- a/fs/notify/fanotify/fanotify_user.c
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -294,27 +294,37 @@
 		}
 
 		ret = copy_event_to_user(group, kevent, buf);
+		if (unlikely(ret == -EOPENSTALE)) {
+			/*
+			 * We cannot report events with stale fd so drop it.
+			 * Setting ret to 0 will continue the event loop and
+			 * do the right thing if there are no more events to
+			 * read (i.e. return bytes read, -EAGAIN or wait).
+			 */
+			ret = 0;
+		}
+
 		/*
 		 * Permission events get queued to wait for response.  Other
 		 * events can be destroyed now.
 		 */
 		if (!(kevent->mask & FAN_ALL_PERM_EVENTS)) {
 			fsnotify_destroy_event(group, kevent);
-			if (ret < 0)
-				break;
 		} else {
 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
-			if (ret < 0) {
+			if (ret <= 0) {
 				FANOTIFY_PE(kevent)->response = FAN_DENY;
 				wake_up(&group->fanotify_data.access_waitq);
-				break;
+			} else {
+				spin_lock(&group->notification_lock);
+				list_add_tail(&kevent->list,
+					&group->fanotify_data.access_list);
+				spin_unlock(&group->notification_lock);
 			}
-			spin_lock(&group->notification_lock);
-			list_add_tail(&kevent->list,
-				      &group->fanotify_data.access_list);
-			spin_unlock(&group->notification_lock);
 #endif
 		}
+		if (ret < 0)
+			break;
 		buf += ret;
 		count -= ret;
 	}
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index 5f2dc20..6047471 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -471,6 +471,7 @@
 		ent->data = NULL;
 		ent->proc_fops = NULL;
 		ent->proc_iops = NULL;
+		parent->nlink++;
 		if (proc_register(parent, ent) < 0) {
 			kfree(ent);
 			parent->nlink--;
diff --git a/fs/sdcardfs/dentry.c b/fs/sdcardfs/dentry.c
index a231681..e9426a6 100644
--- a/fs/sdcardfs/dentry.c
+++ b/fs/sdcardfs/dentry.c
@@ -109,14 +109,16 @@
 		goto out;
 
 	/* If our top's inode is gone, we may be out of date */
-	inode = d_inode(dentry);
+	inode = igrab(d_inode(dentry));
 	if (inode) {
 		data = top_data_get(SDCARDFS_I(inode));
-		if (data->abandoned) {
+		if (!data || data->abandoned) {
 			d_drop(dentry);
 			err = 0;
 		}
-		data_put(data);
+		if (data)
+			data_put(data);
+		iput(inode);
 	}
 
 out:
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index 9701f2d..a5696c1 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -144,6 +144,7 @@
 	struct drm_crtc *ptr;
 	struct drm_crtc_state *state;
 	struct drm_crtc_commit *commit;
+	s32 __user *out_fence_ptr;
 };
 
 struct __drm_connnectors_state {
@@ -316,6 +317,8 @@
 			      struct drm_crtc *crtc);
 void drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state,
 				 struct drm_framebuffer *fb);
+void drm_atomic_set_fence_for_plane(struct drm_plane_state *plane_state,
+				    struct fence *fence);
 int __must_check
 drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
 				  struct drm_crtc *crtc);
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index ac9d7d8..1c12875 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -531,6 +531,20 @@
  * @audio_latency: audio latency info from ELD, if found
  * @null_edid_counter: track sinks that give us all zeros for the EDID
  * @bad_edid_counter: track sinks that give us an EDID with invalid checksum
+ * @max_tmds_char: indicates the maximum TMDS Character Rate supported
+ * @scdc_present: when set the sink supports SCDC functionality
+ * @rr_capable: when set the sink is capable of initiating an SCDC read request
+ * @supports_scramble: when set the sink supports less than 340Mcsc scrambling
+ * @flags_3d: 3D view(s) supported by the sink, see drm_edid.h (DRM_EDID_3D_*)
+ * @pt_scan_info: PT scan info obtained from the VCDB of EDID
+ * @it_scan_info: IT scan info obtained from the VCDB of EDID
+ * @ce_scan_info: CE scan info obtained from the VCDB of EDID
+ * @hdr_eotf: Electro optical transfer function obtained from HDR block
+ * @hdr_metadata_type_one: Metadata type one obtained from HDR block
+ * @hdr_max_luminance: desired max luminance obtained from HDR block
+ * @hdr_avg_luminance: desired avg luminance obtained from HDR block
+ * @hdr_min_luminance: desired min luminance obtained from HDR block
+ * @hdr_supported: does the sink support HDR content
  * @edid_corrupt: indicates whether the last read EDID was corrupt
  * @debugfs_entry: debugfs directory for this connector
  * @state: current atomic state for this connector
@@ -665,6 +679,22 @@
 	int null_edid_counter; /* needed to workaround some HW bugs where we get all 0s */
 	unsigned bad_edid_counter;
 
+	/* EDID bits HDMI 2.0 */
+	int max_tmds_char;	/* in Mcsc */
+	bool scdc_present;
+	bool rr_capable;
+	bool supports_scramble;
+	int flags_3d;
+	u8 pt_scan_info;
+	u8 it_scan_info;
+	u8 ce_scan_info;
+	u32 hdr_eotf;
+	bool hdr_metadata_type_one;
+	u32 hdr_max_luminance;
+	u32 hdr_avg_luminance;
+	u32 hdr_min_luminance;
+	bool hdr_supported;
+
 	/* Flag for raw EDID header corruption - used in Displayport
 	 * compliance testing - * Displayport Link CTS Core 1.2 rev1.1 4.2.2.6
 	 */
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 0aa2925..f3d58c7 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -680,6 +680,35 @@
 	 * context.
 	 */
 	struct drm_modeset_acquire_ctx *acquire_ctx;
+
+	/**
+	 * @fence_context:
+	 *
+	 * timeline context used for fence operations.
+	 */
+	unsigned int fence_context;
+
+	/**
+	 * @fence_lock:
+	 *
+	 * spinlock to protect the fences in the fence_context.
+	 */
+
+	spinlock_t fence_lock;
+	/**
+	 * @fence_seqno:
+	 *
+	 * Seqno variable used as monotonic counter for the fences
+	 * created on the CRTC's timeline.
+	 */
+	unsigned long fence_seqno;
+
+	/**
+	 * @timeline_name:
+	 *
+	 * The name of the CRTC's fence timeline.
+	 */
+	char timeline_name[32];
 };
 
 /**
@@ -1160,6 +1189,17 @@
 	 */
 	struct drm_property *prop_fb_id;
 	/**
+	 * @prop_in_fence_fd: Sync File fd representing the incoming fences
+	 * for a Plane.
+	 */
+	struct drm_property *prop_in_fence_fd;
+	/**
+	 * @prop_out_fence_ptr: Sync File fd pointer representing the
+	 * outgoing fences for a CRTC. Userspace should provide a pointer to a
+	 * value of type s32, and then cast that pointer to u64.
+	 */
+	struct drm_property *prop_out_fence_ptr;
+	/**
 	 * @prop_crtc_id: Default atomic plane property to specify the
 	 * &drm_crtc.
 	 */
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index c3a7d44..32bd104 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -269,6 +269,11 @@
 
 #define DRM_ELD_CEA_SAD(mnl, sad)	(20 + (mnl) + 3 * (sad))
 
+/* HDMI 2.0 */
+#define DRM_EDID_3D_INDEPENDENT_VIEW	(1 << 2)
+#define DRM_EDID_3D_DUAL_VIEW		(1 << 1)
+#define DRM_EDID_3D_OSD_DISPARITY	(1 << 0)
+
 struct edid {
 	u8 header[8];
 	/* Vendor & product info */
diff --git a/include/drm/drm_fb_cma_helper.h b/include/drm/drm_fb_cma_helper.h
index f313211..3b00f64 100644
--- a/include/drm/drm_fb_cma_helper.h
+++ b/include/drm/drm_fb_cma_helper.h
@@ -12,6 +12,8 @@
 struct drm_device;
 struct drm_file;
 struct drm_mode_fb_cmd2;
+struct drm_plane;
+struct drm_plane_state;
 
 struct drm_fbdev_cma *drm_fbdev_cma_init_with_funcs(struct drm_device *dev,
 	unsigned int preferred_bpp, unsigned int num_crtc,
@@ -41,6 +43,9 @@
 struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb,
 	unsigned int plane);
 
+int drm_fb_cma_prepare_fb(struct drm_plane *plane,
+			  struct drm_plane_state *state);
+
 #ifdef CONFIG_DEBUG_FS
 struct seq_file;
 
diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h
index 8b4dc62..952ef84 100644
--- a/include/drm/drm_plane.h
+++ b/include/drm/drm_plane.h
@@ -65,7 +65,7 @@
 
 	struct drm_crtc *crtc;   /* do not write directly, use drm_atomic_set_crtc_for_plane() */
 	struct drm_framebuffer *fb;  /* do not write directly, use drm_atomic_set_fb_for_plane() */
-	struct fence *fence;
+	struct fence *fence; /* do not write directly, use drm_atomic_set_fence_for_plane() */
 
 	/* Signed dest location allows it to be partially off screen */
 	int32_t crtc_x, crtc_y;
diff --git a/include/dt-bindings/clock/qcom,gcc-sdxpoorwills.h b/include/dt-bindings/clock/qcom,gcc-sdxpoorwills.h
index 915ac08..e773848 100644
--- a/include/dt-bindings/clock/qcom,gcc-sdxpoorwills.h
+++ b/include/dt-bindings/clock/qcom,gcc-sdxpoorwills.h
@@ -83,19 +83,20 @@
 #define GCC_SPMI_FETCHER_CLK					65
 #define GCC_SPMI_FETCHER_CLK_SRC				66
 #define GCC_SYS_NOC_CPUSS_AHB_CLK				67
-#define GCC_USB30_MASTER_CLK					68
-#define GCC_USB30_MASTER_CLK_SRC				69
-#define GCC_USB30_MOCK_UTMI_CLK					70
-#define GCC_USB30_MOCK_UTMI_CLK_SRC				71
-#define GCC_USB30_SLEEP_CLK					72
-#define GCC_USB3_PRIM_CLKREF_CLK				73
-#define GCC_USB3_PHY_AUX_CLK					74
-#define GCC_USB3_PHY_AUX_CLK_SRC				75
-#define GCC_USB3_PHY_PIPE_CLK					76
-#define GCC_USB_PHY_CFG_AHB2PHY_CLK				77
-#define GCC_XO_DIV4_CLK						78
-#define GPLL0							79
-#define GPLL0_OUT_EVEN						80
+#define GCC_SYS_NOC_USB3_CLK					68
+#define GCC_USB30_MASTER_CLK					69
+#define GCC_USB30_MASTER_CLK_SRC				70
+#define GCC_USB30_MOCK_UTMI_CLK					71
+#define GCC_USB30_MOCK_UTMI_CLK_SRC				72
+#define GCC_USB30_SLEEP_CLK					73
+#define GCC_USB3_PRIM_CLKREF_CLK				74
+#define GCC_USB3_PHY_AUX_CLK					75
+#define GCC_USB3_PHY_AUX_CLK_SRC				76
+#define GCC_USB3_PHY_PIPE_CLK					77
+#define GCC_USB_PHY_CFG_AHB2PHY_CLK				78
+#define GCC_XO_DIV4_CLK						79
+#define GPLL0							80
+#define GPLL0_OUT_EVEN						81
 
 /* GDSCs */
 #define PCIE_GDSC						0
diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
new file mode 100644
index 0000000..ec80d0c
--- /dev/null
+++ b/include/linux/bpf-cgroup.h
@@ -0,0 +1,79 @@
+#ifndef _BPF_CGROUP_H
+#define _BPF_CGROUP_H
+
+#include <linux/bpf.h>
+#include <linux/jump_label.h>
+#include <uapi/linux/bpf.h>
+
+struct sock;
+struct cgroup;
+struct sk_buff;
+
+#ifdef CONFIG_CGROUP_BPF
+
+extern struct static_key_false cgroup_bpf_enabled_key;
+#define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key)
+
+struct cgroup_bpf {
+	/*
+	 * Store two sets of bpf_prog pointers, one for programs that are
+	 * pinned directly to this cgroup, and one for those that are effective
+	 * when this cgroup is accessed.
+	 */
+	struct bpf_prog *prog[MAX_BPF_ATTACH_TYPE];
+	struct bpf_prog *effective[MAX_BPF_ATTACH_TYPE];
+};
+
+void cgroup_bpf_put(struct cgroup *cgrp);
+void cgroup_bpf_inherit(struct cgroup *cgrp, struct cgroup *parent);
+
+void __cgroup_bpf_update(struct cgroup *cgrp,
+			 struct cgroup *parent,
+			 struct bpf_prog *prog,
+			 enum bpf_attach_type type);
+
+/* Wrapper for __cgroup_bpf_update() protected by cgroup_mutex */
+void cgroup_bpf_update(struct cgroup *cgrp,
+		       struct bpf_prog *prog,
+		       enum bpf_attach_type type);
+
+int __cgroup_bpf_run_filter(struct sock *sk,
+			    struct sk_buff *skb,
+			    enum bpf_attach_type type);
+
+/* Wrappers for __cgroup_bpf_run_filter() guarded by cgroup_bpf_enabled. */
+#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb)			\
+({									\
+	int __ret = 0;							\
+	if (cgroup_bpf_enabled)						\
+		__ret = __cgroup_bpf_run_filter(sk, skb,		\
+						BPF_CGROUP_INET_INGRESS); \
+									\
+	__ret;								\
+})
+
+#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb)				\
+({									\
+	int __ret = 0;							\
+	if (cgroup_bpf_enabled && sk && sk == skb->sk) {		\
+		typeof(sk) __sk = sk_to_full_sk(sk);			\
+		if (sk_fullsock(__sk))					\
+			__ret = __cgroup_bpf_run_filter(__sk, skb,	\
+						BPF_CGROUP_INET_EGRESS); \
+	}								\
+	__ret;								\
+})
+
+#else
+
+struct cgroup_bpf {};
+static inline void cgroup_bpf_put(struct cgroup *cgrp) {}
+static inline void cgroup_bpf_inherit(struct cgroup *cgrp,
+				      struct cgroup *parent) {}
+
+#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
+
+#endif /* CONFIG_CGROUP_BPF */
+
+#endif /* _BPF_CGROUP_H */
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 0f4548c..b008a33 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -16,6 +16,7 @@
 #include <linux/percpu-refcount.h>
 #include <linux/percpu-rwsem.h>
 #include <linux/workqueue.h>
+#include <linux/bpf-cgroup.h>
 
 #ifdef CONFIG_CGROUPS
 
@@ -300,6 +301,9 @@
 	/* used to schedule release agent */
 	struct work_struct release_agent_work;
 
+	/* used to store eBPF programs */
+	struct cgroup_bpf bpf;
+
 	/* ids of the ancestors at each level including self */
 	int ancestor_ids[];
 };
diff --git a/include/linux/fence.h b/include/linux/fence.h
index 8d7265f..fd9b89f 100644
--- a/include/linux/fence.h
+++ b/include/linux/fence.h
@@ -185,6 +185,16 @@
 void fence_free(struct fence *fence);
 
 /**
+ * fence_put - decreases refcount of the fence
+ * @fence:	[in]	fence to reduce refcount of
+ */
+static inline void fence_put(struct fence *fence)
+{
+	if (fence)
+		kref_put(&fence->refcount, fence_release);
+}
+
+/**
  * fence_get - increases refcount of the fence
  * @fence:	[in]	fence to increase refcount of
  *
@@ -212,13 +222,49 @@
 }
 
 /**
- * fence_put - decreases refcount of the fence
- * @fence:	[in]	fence to reduce refcount of
+ * fence_get_rcu_safe  - acquire a reference to an RCU tracked fence
+ * @fence:	[in]	pointer to fence to increase refcount of
+ *
+ * Function returns NULL if no refcount could be obtained, or the fence.
+ * This function handles acquiring a reference to a fence that may be
+ * reallocated within the RCU grace period (such as with SLAB_DESTROY_BY_RCU),
+ * so long as the caller is using RCU on the pointer to the fence.
+ *
+ * An alternative mechanism is to employ a seqlock to protect a bunch of
+ * fences, such as used by struct reservation_object. When using a seqlock,
+ * the seqlock must be taken before and checked after a reference to the
+ * fence is acquired (as shown here).
+ *
+ * The caller is required to hold the RCU read lock.
  */
-static inline void fence_put(struct fence *fence)
+static inline struct fence *fence_get_rcu_safe(struct fence * __rcu *fencep)
 {
-	if (fence)
-		kref_put(&fence->refcount, fence_release);
+	do {
+		struct fence *fence;
+
+		fence = rcu_dereference(*fencep);
+		if (!fence || !fence_get_rcu(fence))
+			return NULL;
+
+		/* The atomic_inc_not_zero() inside fence_get_rcu()
+		 * provides a full memory barrier upon success (such as now).
+		 * This is paired with the write barrier from assigning
+		 * to the __rcu protected fence pointer so that if that
+		 * pointer still matches the current fence, we know we
+		 * have successfully acquire a reference to it. If it no
+		 * longer matches, we are holding a reference to some other
+		 * reallocated pointer. This is possible if the allocator
+		 * is using a freelist like SLAB_DESTROY_BY_RCU where the
+		 * fence remains valid for the RCU grace period, but it
+		 * may be reallocated. When using such allocators, we are
+		 * responsible for ensuring the reference we get is to
+		 * the right fence, as below.
+		 */
+		if (fence == rcu_access_pointer(*fencep))
+			return rcu_pointer_handoff(fence);
+
+		fence_put(fence);
+	} while (1);
 }
 
 int fence_signal(struct fence *fence);
diff --git a/include/linux/hdmi.h b/include/linux/hdmi.h
index e974420..bc38b99a 100644
--- a/include/linux/hdmi.h
+++ b/include/linux/hdmi.h
@@ -35,6 +35,7 @@
 };
 
 #define HDMI_IEEE_OUI 0x000c03
+#define HDMI_IEEE_OUI_HF	0xc45dd8
 #define HDMI_INFOFRAME_HEADER_SIZE  4
 #define HDMI_AVI_INFOFRAME_SIZE    13
 #define HDMI_SPD_INFOFRAME_SIZE    25
@@ -78,6 +79,8 @@
 	HDMI_PICTURE_ASPECT_NONE,
 	HDMI_PICTURE_ASPECT_4_3,
 	HDMI_PICTURE_ASPECT_16_9,
+	HDMI_PICTURE_ASPECT_64_27,
+	HDMI_PICTURE_ASPECT_256_135,
 	HDMI_PICTURE_ASPECT_RESERVED,
 };
 
diff --git a/include/linux/hid-sensor-hub.h b/include/linux/hid-sensor-hub.h
index dd85f35..039e6ab 100644
--- a/include/linux/hid-sensor-hub.h
+++ b/include/linux/hid-sensor-hub.h
@@ -231,6 +231,8 @@
 	unsigned usage_id;
 	atomic_t data_ready;
 	atomic_t user_requested_state;
+	int poll_interval;
+	int raw_hystersis;
 	struct iio_trigger *trigger;
 	struct hid_sensor_hub_attribute_info poll;
 	struct hid_sensor_hub_attribute_info report_state;
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 8f68490..e233925 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -330,7 +330,9 @@
 					     int write, void __user *buffer,
 					     size_t *length, loff_t *ppos);
 #endif
-
+extern void wait_for_kprobe_optimizer(void);
+#else
+static inline void wait_for_kprobe_optimizer(void) { }
 #endif /* CONFIG_OPTPROBES */
 #ifdef CONFIG_KPROBES_ON_FTRACE
 extern void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
diff --git a/include/linux/nfcinfo.h b/include/linux/nfcinfo.h
new file mode 100644
index 0000000..b67a65f
--- /dev/null
+++ b/include/linux/nfcinfo.h
@@ -0,0 +1,18 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _NFCINFO_H
+#define _NFCINFO_H
+
+#include <uapi/linux/nfc/nfcinfo.h>
+
+#endif
diff --git a/include/linux/reservation.h b/include/linux/reservation.h
index b0f305e..bad7710 100644
--- a/include/linux/reservation.h
+++ b/include/linux/reservation.h
@@ -177,17 +177,14 @@
 reservation_object_get_excl_rcu(struct reservation_object *obj)
 {
 	struct fence *fence;
-	unsigned seq;
-retry:
-	seq = read_seqcount_begin(&obj->seq);
+
+	if (!rcu_access_pointer(obj->fence_excl))
+		return NULL;
+
 	rcu_read_lock();
-	fence = rcu_dereference(obj->fence_excl);
-	if (read_seqcount_retry(&obj->seq, seq)) {
-		rcu_read_unlock();
-		goto retry;
-	}
-	fence = fence_get(fence);
+	fence = fence_get_rcu_safe(&obj->fence_excl);
 	rcu_read_unlock();
+
 	return fence;
 }
 
diff --git a/include/linux/sde_rsc.h b/include/linux/sde_rsc.h
index f3fa9e6..f921909 100644
--- a/include/linux/sde_rsc.h
+++ b/include/linux/sde_rsc.h
@@ -206,6 +206,23 @@
  */
 void sde_rsc_unregister_event(struct sde_rsc_event *event);
 
+/**
+ * is_sde_rsc_available - check if display rsc available.
+ * @rsc_index:   A client will be created on this RSC. As of now only
+ *               SDE_RSC_INDEX is valid rsc index.
+ * Returns: true if rsc is available; false in all other cases
+ */
+bool is_sde_rsc_available(int rsc_index);
+
+/**
+ * get_sde_rsc_current_state - gets the current state of sde rsc.
+ * @rsc_index:   A client will be created on this RSC. As of now only
+ *               SDE_RSC_INDEX is valid rsc index.
+ * Returns: current state if rsc available; SDE_RSC_IDLE_STATE for
+ *          all other cases
+ */
+enum sde_rsc_state get_sde_rsc_current_state(int rsc_index);
+
 #else
 
 static inline struct sde_rsc_client *sde_rsc_client_create(u32 rsc_index,
@@ -242,6 +259,15 @@
 {
 }
 
+static inline bool is_sde_rsc_available(int rsc_index)
+{
+	return false;
+}
+
+static inline enum sde_rsc_state get_sde_rsc_current_state(int rsc_index)
+{
+	return SDE_RSC_IDLE_STATE;
+}
 #endif /* CONFIG_DRM_SDE_RSC */
 
 #endif /* _SDE_RSC_H_ */
diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
index a0596ca0..a2f8109 100644
--- a/include/linux/sock_diag.h
+++ b/include/linux/sock_diag.h
@@ -24,6 +24,7 @@
 void sock_diag_register_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh));
 void sock_diag_unregister_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh));
 
+u64 sock_gen_cookie(struct sock *sk);
 int sock_diag_check_cookie(struct sock *sk, const __u32 *cookie);
 void sock_diag_save_cookie(struct sock *sk, __u32 *cookie);
 
diff --git a/include/soc/qcom/subsystem_restart.h b/include/soc/qcom/subsystem_restart.h
index 5478417..9a0a53e 100644
--- a/include/soc/qcom/subsystem_restart.h
+++ b/include/soc/qcom/subsystem_restart.h
@@ -56,6 +56,8 @@
  * @sysmon_shutdown_ret: Return value for the call to sysmon_send_shutdown
  * @system_debug: If "set", triggers a device restart when the
  * subsystem's wdog bite handler is invoked.
+ * @ignore_ssr_failure: SSR failures are usually fatal and results in panic. If
+ * set will ignore failure.
  * @edge: GLINK logical name of the subsystem
  */
 struct subsys_desc {
@@ -91,6 +93,7 @@
 	u32 sysmon_pid;
 	int sysmon_shutdown_ret;
 	bool system_debug;
+	bool ignore_ssr_failure;
 	const char *edge;
 };
 
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index f66ba9c..9c927a5 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -77,7 +77,8 @@
 #define  DRM_MODE_FLAG_3D_TOP_AND_BOTTOM	(7<<14)
 #define  DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF	(8<<14)
 #define  DRM_MODE_FLAG_SEAMLESS			(1<<19)
-
+#define  DRM_MODE_FLAG_SUPPORTS_RGB		(1<<20)
+#define  DRM_MODE_FLAG_SUPPORTS_YUV		(1<<21)
 
 /* DPMS flags */
 /* bit compatible with the xorg definitions. */
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index 7cf7779..cd758c2 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -525,3 +525,4 @@
 header-y += msm_dsps.h
 header-y += msm-core-interface.h
 header-y += msm_rotator.h
+header-y += nfc/
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index f09c70b..14eaf2d 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -73,6 +73,8 @@
 	BPF_PROG_LOAD,
 	BPF_OBJ_PIN,
 	BPF_OBJ_GET,
+	BPF_PROG_ATTACH,
+	BPF_PROG_DETACH,
 };
 
 enum bpf_map_type {
@@ -96,8 +98,17 @@
 	BPF_PROG_TYPE_TRACEPOINT,
 	BPF_PROG_TYPE_XDP,
 	BPF_PROG_TYPE_PERF_EVENT,
+	BPF_PROG_TYPE_CGROUP_SKB,
 };
 
+enum bpf_attach_type {
+	BPF_CGROUP_INET_INGRESS,
+	BPF_CGROUP_INET_EGRESS,
+	__MAX_BPF_ATTACH_TYPE
+};
+
+#define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE
+
 #define BPF_PSEUDO_MAP_FD	1
 
 /* flags for BPF_MAP_UPDATE_ELEM command */
@@ -141,6 +152,12 @@
 		__aligned_u64	pathname;
 		__u32		bpf_fd;
 	};
+
+	struct { /* anonymous struct used by BPF_PROG_ATTACH/DETACH commands */
+		__u32		target_fd;	/* container object to attach to */
+		__u32		attach_bpf_fd;	/* eBPF program to attach */
+		__u32		attach_type;
+	};
 } __attribute__((aligned(8)));
 
 /* integer value in 'imm' field of BPF_CALL instruction selects which helper
@@ -426,6 +443,67 @@
 	 */
 	BPF_FUNC_set_hash_invalid,
 
+	/**
+	 * int bpf_get_numa_node_id()
+	 *     Return: Id of current NUMA node.
+	 */
+	BPF_FUNC_get_numa_node_id,
+
+	/**
+	 * int bpf_skb_change_head()
+	 *     Grows headroom of skb and adjusts MAC header offset accordingly.
+	 *     Will extends/reallocae as required automatically.
+	 *     May change skb data pointer and will thus invalidate any check
+	 *     performed for direct packet access.
+	 *     @skb: pointer to skb
+	 *     @len: length of header to be pushed in front
+	 *     @flags: Flags (unused for now)
+	 *     Return: 0 on success or negative error
+	 */
+	BPF_FUNC_skb_change_head,
+
+	/**
+	 * int bpf_xdp_adjust_head(xdp_md, delta)
+	 *     Adjust the xdp_md.data by delta
+	 *     @xdp_md: pointer to xdp_md
+	 *     @delta: An positive/negative integer to be added to xdp_md.data
+	 *     Return: 0 on success or negative on error
+	 */
+	BPF_FUNC_xdp_adjust_head,
+
+	/**
+	 * int bpf_probe_read_str(void *dst, int size, const void *unsafe_ptr)
+	 *     Copy a NUL terminated string from unsafe address. In case the string
+	 *     length is smaller than size, the target is not padded with further NUL
+	 *     bytes. In case the string length is larger than size, just count-1
+	 *     bytes are copied and the last byte is set to NUL.
+	 *     @dst: destination address
+	 *     @size: maximum number of bytes to copy, including the trailing NUL
+	 *     @unsafe_ptr: unsafe address
+	 *     Return:
+	 *       > 0 length of the string including the trailing NUL on success
+	 *       < 0 error
+	 */
+	BPF_FUNC_probe_read_str,
+
+	/**
+	 * u64 bpf_bpf_get_socket_cookie(skb)
+	 *     Get the cookie for the socket stored inside sk_buff.
+	 *     @skb: pointer to skb
+	 *     Return: 8 Bytes non-decreasing number on success or 0 if the socket
+	 *     field is missing inside sk_buff
+	 */
+	BPF_FUNC_get_socket_cookie,
+
+	/**
+	 * u32 bpf_get_socket_uid(skb)
+	 *     Get the owner uid of the socket stored inside sk_buff.
+	 *     @skb: pointer to skb
+	 *     Return: uid of the socket owner on success or 0 if the socket pointer
+	 *     inside sk_buff is NULL
+	 */
+	BPF_FUNC_get_socket_uid,
+
 	__BPF_FUNC_MAX_ID,
 };
 
diff --git a/include/uapi/linux/nfc/Kbuild b/include/uapi/linux/nfc/Kbuild
new file mode 100644
index 0000000..9071015
--- /dev/null
+++ b/include/uapi/linux/nfc/Kbuild
@@ -0,0 +1,2 @@
+#UAPI export list
+header-y += nfcinfo.h
diff --git a/include/uapi/linux/nfc/nfcinfo.h b/include/uapi/linux/nfc/nfcinfo.h
new file mode 100644
index 0000000..df178e2
--- /dev/null
+++ b/include/uapi/linux/nfc/nfcinfo.h
@@ -0,0 +1,21 @@
+#ifndef _UAPI_NFCINFO_H_
+#define _UAPI_NFCINFO_H_
+
+#include <linux/ioctl.h>
+
+#define NFCC_MAGIC 0xE9
+#define NFCC_GET_INFO _IOW(NFCC_MAGIC, 0x09, unsigned int)
+
+struct nqx_devinfo {
+	unsigned char chip_type;
+	unsigned char rom_version;
+	unsigned char fw_major;
+	unsigned char fw_minor;
+};
+
+union nqx_uinfo {
+	unsigned int i;
+	struct nqx_devinfo info;
+};
+
+#endif
diff --git a/init/Kconfig b/init/Kconfig
index bdfcc0f..d8a5868 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1248,6 +1248,19 @@
 
 	  Say N if unsure.
 
+config CGROUP_BPF
+	bool "Support for eBPF programs attached to cgroups"
+	depends on BPF_SYSCALL
+	select SOCK_CGROUP_DATA
+	help
+	  Allow attaching eBPF programs to a cgroup using the bpf(2)
+	  syscall command BPF_PROG_ATTACH.
+
+	  In which context these programs are accessed depends on the type
+	  of attachment. For instance, programs that are attached using
+	  BPF_CGROUP_INET_INGRESS will be executed on the ingress path of
+	  inet sockets.
+
 config CGROUP_DEBUG
 	bool "Example controller"
 	default n
diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile
index eed911d..b22256b 100644
--- a/kernel/bpf/Makefile
+++ b/kernel/bpf/Makefile
@@ -5,3 +5,4 @@
 ifeq ($(CONFIG_PERF_EVENTS),y)
 obj-$(CONFIG_BPF_SYSCALL) += stackmap.o
 endif
+obj-$(CONFIG_CGROUP_BPF) += cgroup.o
diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
new file mode 100644
index 0000000..d05c292
--- /dev/null
+++ b/kernel/bpf/cgroup.c
@@ -0,0 +1,170 @@
+/*
+ * Functions to manage eBPF programs attached to cgroups
+ *
+ * Copyright (c) 2016 Daniel Mack
+ *
+ * This file is subject to the terms and conditions of version 2 of the GNU
+ * General Public License.  See the file COPYING in the main directory of the
+ * Linux distribution for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/atomic.h>
+#include <linux/cgroup.h>
+#include <linux/slab.h>
+#include <linux/bpf.h>
+#include <linux/bpf-cgroup.h>
+#include <net/sock.h>
+
+DEFINE_STATIC_KEY_FALSE(cgroup_bpf_enabled_key);
+EXPORT_SYMBOL(cgroup_bpf_enabled_key);
+
+/**
+ * cgroup_bpf_put() - put references of all bpf programs
+ * @cgrp: the cgroup to modify
+ */
+void cgroup_bpf_put(struct cgroup *cgrp)
+{
+	unsigned int type;
+
+	for (type = 0; type < ARRAY_SIZE(cgrp->bpf.prog); type++) {
+		struct bpf_prog *prog = cgrp->bpf.prog[type];
+
+		if (prog) {
+			bpf_prog_put(prog);
+			static_branch_dec(&cgroup_bpf_enabled_key);
+		}
+	}
+}
+
+/**
+ * cgroup_bpf_inherit() - inherit effective programs from parent
+ * @cgrp: the cgroup to modify
+ * @parent: the parent to inherit from
+ */
+void cgroup_bpf_inherit(struct cgroup *cgrp, struct cgroup *parent)
+{
+	unsigned int type;
+
+	for (type = 0; type < ARRAY_SIZE(cgrp->bpf.effective); type++) {
+		struct bpf_prog *e;
+
+		e = rcu_dereference_protected(parent->bpf.effective[type],
+					      lockdep_is_held(&cgroup_mutex));
+		rcu_assign_pointer(cgrp->bpf.effective[type], e);
+	}
+}
+
+/**
+ * __cgroup_bpf_update() - Update the pinned program of a cgroup, and
+ *                         propagate the change to descendants
+ * @cgrp: The cgroup which descendants to traverse
+ * @parent: The parent of @cgrp, or %NULL if @cgrp is the root
+ * @prog: A new program to pin
+ * @type: Type of pinning operation (ingress/egress)
+ *
+ * Each cgroup has a set of two pointers for bpf programs; one for eBPF
+ * programs it owns, and which is effective for execution.
+ *
+ * If @prog is %NULL, this function attaches a new program to the cgroup and
+ * releases the one that is currently attached, if any. @prog is then made
+ * the effective program of type @type in that cgroup.
+ *
+ * If @prog is %NULL, the currently attached program of type @type is released,
+ * and the effective program of the parent cgroup (if any) is inherited to
+ * @cgrp.
+ *
+ * Then, the descendants of @cgrp are walked and the effective program for
+ * each of them is set to the effective program of @cgrp unless the
+ * descendant has its own program attached, in which case the subbranch is
+ * skipped. This ensures that delegated subcgroups with own programs are left
+ * untouched.
+ *
+ * Must be called with cgroup_mutex held.
+ */
+void __cgroup_bpf_update(struct cgroup *cgrp,
+			 struct cgroup *parent,
+			 struct bpf_prog *prog,
+			 enum bpf_attach_type type)
+{
+	struct bpf_prog *old_prog, *effective;
+	struct cgroup_subsys_state *pos;
+
+	old_prog = xchg(cgrp->bpf.prog + type, prog);
+
+	effective = (!prog && parent) ?
+		rcu_dereference_protected(parent->bpf.effective[type],
+					  lockdep_is_held(&cgroup_mutex)) :
+		prog;
+
+	css_for_each_descendant_pre(pos, &cgrp->self) {
+		struct cgroup *desc = container_of(pos, struct cgroup, self);
+
+		/* skip the subtree if the descendant has its own program */
+		if (desc->bpf.prog[type] && desc != cgrp)
+			pos = css_rightmost_descendant(pos);
+		else
+			rcu_assign_pointer(desc->bpf.effective[type],
+					   effective);
+	}
+
+	if (prog)
+		static_branch_inc(&cgroup_bpf_enabled_key);
+
+	if (old_prog) {
+		bpf_prog_put(old_prog);
+		static_branch_dec(&cgroup_bpf_enabled_key);
+	}
+}
+
+/**
+ * __cgroup_bpf_run_filter() - Run a program for packet filtering
+ * @sk: The socket sending or receiving traffic
+ * @skb: The skb that is being sent or received
+ * @type: The type of program to be exectuted
+ *
+ * If no socket is passed, or the socket is not of type INET or INET6,
+ * this function does nothing and returns 0.
+ *
+ * The program type passed in via @type must be suitable for network
+ * filtering. No further check is performed to assert that.
+ *
+ * This function will return %-EPERM if any if an attached program was found
+ * and if it returned != 1 during execution. In all other cases, 0 is returned.
+ */
+int __cgroup_bpf_run_filter(struct sock *sk,
+			    struct sk_buff *skb,
+			    enum bpf_attach_type type)
+{
+	struct bpf_prog *prog;
+	struct cgroup *cgrp;
+	int ret = 0;
+
+	if (!sk || !sk_fullsock(sk))
+		return 0;
+
+	if (sk->sk_family != AF_INET &&
+	    sk->sk_family != AF_INET6)
+		return 0;
+
+	cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
+
+	rcu_read_lock();
+
+	prog = rcu_dereference(cgrp->bpf.effective[type]);
+	if (prog) {
+		unsigned int offset = skb->data - skb_network_header(skb);
+		struct sock *save_sk = skb->sk;
+
+		skb->sk = sk;
+		__skb_push(skb, offset);
+		ret = bpf_prog_run_save_cb(prog, skb) == 1 ? 0 : -EPERM;
+		__skb_pull(skb, offset);
+		skb->sk = save_sk;
+	}
+
+	rcu_read_unlock();
+
+	return ret;
+}
+EXPORT_SYMBOL(__cgroup_bpf_run_filter);
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 237f3d6..e13157f 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -824,6 +824,77 @@
 	return bpf_obj_get_user(u64_to_ptr(attr->pathname));
 }
 
+#ifdef CONFIG_CGROUP_BPF
+
+#define BPF_PROG_ATTACH_LAST_FIELD attach_type
+
+static int bpf_prog_attach(const union bpf_attr *attr)
+{
+	struct bpf_prog *prog;
+	struct cgroup *cgrp;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	if (CHECK_ATTR(BPF_PROG_ATTACH))
+		return -EINVAL;
+
+	switch (attr->attach_type) {
+	case BPF_CGROUP_INET_INGRESS:
+	case BPF_CGROUP_INET_EGRESS:
+		prog = bpf_prog_get_type(attr->attach_bpf_fd,
+					 BPF_PROG_TYPE_CGROUP_SKB);
+		if (IS_ERR(prog))
+			return PTR_ERR(prog);
+
+		cgrp = cgroup_get_from_fd(attr->target_fd);
+		if (IS_ERR(cgrp)) {
+			bpf_prog_put(prog);
+			return PTR_ERR(cgrp);
+		}
+
+		cgroup_bpf_update(cgrp, prog, attr->attach_type);
+		cgroup_put(cgrp);
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+#define BPF_PROG_DETACH_LAST_FIELD attach_type
+
+static int bpf_prog_detach(const union bpf_attr *attr)
+{
+	struct cgroup *cgrp;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	if (CHECK_ATTR(BPF_PROG_DETACH))
+		return -EINVAL;
+
+	switch (attr->attach_type) {
+	case BPF_CGROUP_INET_INGRESS:
+	case BPF_CGROUP_INET_EGRESS:
+		cgrp = cgroup_get_from_fd(attr->target_fd);
+		if (IS_ERR(cgrp))
+			return PTR_ERR(cgrp);
+
+		cgroup_bpf_update(cgrp, NULL, attr->attach_type);
+		cgroup_put(cgrp);
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+#endif /* CONFIG_CGROUP_BPF */
+
 SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
 {
 	union bpf_attr attr = {};
@@ -890,6 +961,16 @@
 	case BPF_OBJ_GET:
 		err = bpf_obj_get(&attr);
 		break;
+
+#ifdef CONFIG_CGROUP_BPF
+	case BPF_PROG_ATTACH:
+		err = bpf_prog_attach(&attr);
+		break;
+	case BPF_PROG_DETACH:
+		err = bpf_prog_detach(&attr);
+		break;
+#endif
+
 	default:
 		err = -EINVAL;
 		break;
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 6670008..0fab276 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -5096,6 +5096,8 @@
 		if (cgrp->kn)
 			RCU_INIT_POINTER(*(void __rcu __force **)&cgrp->kn->priv,
 					 NULL);
+
+		cgroup_bpf_put(cgrp);
 	}
 
 	mutex_unlock(&cgroup_mutex);
@@ -5308,6 +5310,9 @@
 	if (!cgroup_on_dfl(cgrp))
 		cgrp->subtree_control = cgroup_control(cgrp);
 
+	if (parent)
+		cgroup_bpf_inherit(cgrp, parent);
+
 	cgroup_propagate_control(cgrp);
 
 	return cgrp;
@@ -6514,6 +6519,19 @@
 }
 subsys_initcall(cgroup_namespaces_init);
 
+#ifdef CONFIG_CGROUP_BPF
+void cgroup_bpf_update(struct cgroup *cgrp,
+		       struct bpf_prog *prog,
+		       enum bpf_attach_type type)
+{
+	struct cgroup *parent = cgroup_parent(cgrp);
+
+	mutex_lock(&cgroup_mutex);
+	__cgroup_bpf_update(cgrp, parent, prog, type);
+	mutex_unlock(&cgroup_mutex);
+}
+#endif /* CONFIG_CGROUP_BPF */
+
 #ifdef CONFIG_CGROUP_DEBUG
 static struct cgroup_subsys_state *
 debug_css_alloc(struct cgroup_subsys_state *parent_css)
diff --git a/kernel/configs/android-base.config b/kernel/configs/android-base.config
index b7b997f..bc615c6 100644
--- a/kernel/configs/android-base.config
+++ b/kernel/configs/android-base.config
@@ -19,6 +19,7 @@
 CONFIG_CGROUP_DEBUG=y
 CONFIG_CGROUP_FREEZER=y
 CONFIG_CGROUP_SCHED=y
+CONFIG_CGROUP_BPF=y
 CONFIG_CP15_BARRIER_EMULATION=y
 CONFIG_DEFAULT_SECURITY_SELINUX=y
 CONFIG_EMBEDDED=y
diff --git a/kernel/fork.c b/kernel/fork.c
index cb4faae..33663b0 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -521,7 +521,7 @@
 	set_task_stack_end_magic(tsk);
 
 #ifdef CONFIG_CC_STACKPROTECTOR
-	tsk->stack_canary = get_random_int();
+	tsk->stack_canary = get_random_long();
 #endif
 
 	/*
@@ -1774,11 +1774,13 @@
 	*/
 	recalc_sigpending();
 	if (signal_pending(current)) {
-		spin_unlock(&current->sighand->siglock);
-		write_unlock_irq(&tasklist_lock);
 		retval = -ERESTARTNOINTR;
 		goto bad_fork_cancel_cgroup;
 	}
+	if (unlikely(!(ns_of_pid(pid)->nr_hashed & PIDNS_HASH_ADDING))) {
+		retval = -ENOMEM;
+		goto bad_fork_cancel_cgroup;
+	}
 
 	if (likely(p->pid)) {
 		ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
@@ -1829,6 +1831,8 @@
 	return p;
 
 bad_fork_cancel_cgroup:
+	spin_unlock(&current->sighand->siglock);
+	write_unlock_irq(&tasklist_lock);
 	cgroup_cancel_fork(p);
 bad_fork_free_pid:
 	threadgroup_change_end(current);
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index be3c34e..077c87f 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -877,8 +877,8 @@
 	if (!desc)
 		return;
 
-	__irq_do_set_handler(desc, handle, 1, NULL);
 	desc->irq_common_data.handler_data = data;
+	__irq_do_set_handler(desc, handle, 1, NULL);
 
 	irq_put_desc_busunlock(desc, flags);
 }
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index d630954..a1a07cf 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -563,7 +563,7 @@
 }
 
 /* Wait for completing optimization and unoptimization */
-static void wait_for_kprobe_optimizer(void)
+void wait_for_kprobe_optimizer(void)
 {
 	mutex_lock(&kprobe_mutex);
 
diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
index eef2ce9..3976dd5 100644
--- a/kernel/pid_namespace.c
+++ b/kernel/pid_namespace.c
@@ -274,7 +274,7 @@
 	 * if reparented.
 	 */
 	for (;;) {
-		set_current_state(TASK_UNINTERRUPTIBLE);
+		set_current_state(TASK_INTERRUPTIBLE);
 		if (pid_ns->nr_hashed == init_pids)
 			break;
 		schedule();
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 42630ec..c42380a 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -24,6 +24,7 @@
 	struct gov_attr_set attr_set;
 	unsigned int rate_limit_us;
 	unsigned int hispeed_freq;
+	bool pl;
 };
 
 struct sugov_policy {
@@ -224,7 +225,8 @@
 	if (is_hiload && nl >= mult_frac(cpu_util, NL_RATIO, 100))
 		*util = *max;
 
-	*util = max(*util, sg_cpu->walt_load.pl);
+	if (sg_policy->tunables->pl)
+		*util = max(*util, sg_cpu->walt_load.pl);
 }
 
 static void sugov_update_single(struct update_util_data *hook, u64 time,
@@ -450,12 +452,32 @@
 	return count;
 }
 
+static ssize_t pl_show(struct gov_attr_set *attr_set, char *buf)
+{
+	struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
+
+	return sprintf(buf, "%u\n", tunables->pl);
+}
+
+static ssize_t pl_store(struct gov_attr_set *attr_set, const char *buf,
+				   size_t count)
+{
+	struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
+
+	if (kstrtobool(buf, &tunables->pl))
+		return -EINVAL;
+
+	return count;
+}
+
 static struct governor_attr rate_limit_us = __ATTR_RW(rate_limit_us);
 static struct governor_attr hispeed_freq = __ATTR_RW(hispeed_freq);
+static struct governor_attr pl = __ATTR_RW(pl);
 
 static struct attribute *sugov_attributes[] = {
 	&rate_limit_us.attr,
 	&hispeed_freq.attr,
+	&pl.attr,
 	NULL
 };
 
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index eb6c9f1..8d2b4d8 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -1484,6 +1484,11 @@
 
 end:
 	release_all_trace_kprobes();
+	/*
+	 * Wait for the optimizer work to finish. Otherwise it might fiddle
+	 * with probes in already freed __init text.
+	 */
+	wait_for_kprobe_optimizer();
 	if (warn)
 		pr_cont("NG: Some tests are failed. Please check them.\n");
 	else
diff --git a/net/core/filter.c b/net/core/filter.c
index b391209..2cb4f0f 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -26,6 +26,7 @@
 #include <linux/mm.h>
 #include <linux/fcntl.h>
 #include <linux/socket.h>
+#include <linux/sock_diag.h>
 #include <linux/in.h>
 #include <linux/inet.h>
 #include <linux/netdevice.h>
@@ -78,6 +79,10 @@
 	if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC))
 		return -ENOMEM;
 
+	err = BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb);
+	if (err)
+		return err;
+
 	err = security_sock_rcv_skb(sk, skb);
 	if (err)
 		return err;
@@ -85,8 +90,13 @@
 	rcu_read_lock();
 	filter = rcu_dereference(sk->sk_filter);
 	if (filter) {
-		unsigned int pkt_len = bpf_prog_run_save_cb(filter->prog, skb);
+		struct sock *save_sk = skb->sk;
+		unsigned int pkt_len;
+
+		skb->sk = sk;
+		pkt_len = bpf_prog_run_save_cb(filter->prog, skb);
 		err = pkt_len ? pskb_trim(skb, max(cap, pkt_len)) : -EPERM;
+		skb->sk = save_sk;
 	}
 	rcu_read_unlock();
 
@@ -2530,6 +2540,36 @@
 	.arg5_type	= ARG_CONST_STACK_SIZE,
 };
 
+BPF_CALL_1(bpf_get_socket_cookie, struct sk_buff *, skb)
+{
+	return skb->sk ? sock_gen_cookie(skb->sk) : 0;
+}
+
+static const struct bpf_func_proto bpf_get_socket_cookie_proto = {
+	.func           = bpf_get_socket_cookie,
+	.gpl_only       = false,
+	.ret_type       = RET_INTEGER,
+	.arg1_type      = ARG_PTR_TO_CTX,
+};
+
+BPF_CALL_1(bpf_get_socket_uid, struct sk_buff *, skb)
+{
+	struct sock *sk = sk_to_full_sk(skb->sk);
+	kuid_t kuid;
+
+	if (!sk || !sk_fullsock(sk))
+		return overflowuid;
+	kuid = sock_net_uid(sock_net(sk), sk);
+	return from_kuid_munged(sock_net(sk)->user_ns, kuid);
+}
+
+static const struct bpf_func_proto bpf_get_socket_uid_proto = {
+	.func           = bpf_get_socket_uid,
+	.gpl_only       = false,
+	.ret_type       = RET_INTEGER,
+	.arg1_type      = ARG_PTR_TO_CTX,
+};
+
 static const struct bpf_func_proto *
 sk_filter_func_proto(enum bpf_func_id func_id)
 {
@@ -2551,6 +2591,10 @@
 	case BPF_FUNC_trace_printk:
 		if (capable(CAP_SYS_ADMIN))
 			return bpf_get_trace_printk_proto();
+	case BPF_FUNC_get_socket_cookie:
+		return &bpf_get_socket_cookie_proto;
+	case BPF_FUNC_get_socket_uid:
+		return &bpf_get_socket_uid_proto;
 	default:
 		return NULL;
 	}
@@ -2628,6 +2672,17 @@
 	}
 }
 
+static const struct bpf_func_proto *
+cg_skb_func_proto(enum bpf_func_id func_id)
+{
+	switch (func_id) {
+	case BPF_FUNC_skb_load_bytes:
+		return &bpf_skb_load_bytes_proto;
+	default:
+		return sk_filter_func_proto(func_id);
+	}
+}
+
 static bool __is_valid_access(int off, int size, enum bpf_access_type type)
 {
 	if (off < 0 || off >= sizeof(struct __sk_buff))
@@ -2990,6 +3045,12 @@
 	.convert_ctx_access	= xdp_convert_ctx_access,
 };
 
+static const struct bpf_verifier_ops cg_skb_ops = {
+	.get_func_proto		= cg_skb_func_proto,
+	.is_valid_access	= sk_filter_is_valid_access,
+	.convert_ctx_access	= sk_filter_convert_ctx_access,
+};
+
 static struct bpf_prog_type_list sk_filter_type __read_mostly = {
 	.ops	= &sk_filter_ops,
 	.type	= BPF_PROG_TYPE_SOCKET_FILTER,
@@ -3010,12 +3071,18 @@
 	.type	= BPF_PROG_TYPE_XDP,
 };
 
+static struct bpf_prog_type_list cg_skb_type __read_mostly = {
+	.ops	= &cg_skb_ops,
+	.type	= BPF_PROG_TYPE_CGROUP_SKB,
+};
+
 static int __init register_sk_filter_ops(void)
 {
 	bpf_register_prog_type(&sk_filter_type);
 	bpf_register_prog_type(&sched_cls_type);
 	bpf_register_prog_type(&sched_act_type);
 	bpf_register_prog_type(&xdp_type);
+	bpf_register_prog_type(&cg_skb_type);
 
 	return 0;
 }
diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
index 6b10573..acd2a6c 100644
--- a/net/core/sock_diag.c
+++ b/net/core/sock_diag.c
@@ -19,7 +19,7 @@
 static DEFINE_MUTEX(sock_diag_table_mutex);
 static struct workqueue_struct *broadcast_wq;
 
-static u64 sock_gen_cookie(struct sock *sk)
+u64 sock_gen_cookie(struct sock *sk)
 {
 	while (1) {
 		u64 res = atomic64_read(&sk->sk_cookie);
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 0bd3efe..2c18bcf 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -74,6 +74,7 @@
 #include <net/checksum.h>
 #include <net/inetpeer.h>
 #include <net/lwtunnel.h>
+#include <linux/bpf-cgroup.h>
 #include <linux/igmp.h>
 #include <linux/netfilter_ipv4.h>
 #include <linux/netfilter_bridge.h>
@@ -287,6 +288,13 @@
 static int ip_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
 	unsigned int mtu;
+	int ret;
+
+	ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb);
+	if (ret) {
+		kfree_skb(skb);
+		return ret;
+	}
 
 #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
 	/* Policy lookup after SNAT yielded a new policy */
@@ -305,6 +313,20 @@
 	return ip_finish_output2(net, sk, skb);
 }
 
+static int ip_mc_finish_output(struct net *net, struct sock *sk,
+			       struct sk_buff *skb)
+{
+	int ret;
+
+	ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb);
+	if (ret) {
+		kfree_skb(skb);
+		return ret;
+	}
+
+	return dev_loopback_xmit(net, sk, skb);
+}
+
 int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
 	struct rtable *rt = skb_rtable(skb);
@@ -342,7 +364,7 @@
 			if (newskb)
 				NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING,
 					net, sk, newskb, NULL, newskb->dev,
-					dev_loopback_xmit);
+					ip_mc_finish_output);
 		}
 
 		/* Multicasts with ttl 0 must not go beyond the host */
@@ -358,7 +380,7 @@
 		if (newskb)
 			NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING,
 				net, sk, newskb, NULL, newskb->dev,
-				dev_loopback_xmit);
+				ip_mc_finish_output);
 	}
 
 	return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index e27b8fd..a215802 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -39,6 +39,7 @@
 #include <linux/module.h>
 #include <linux/slab.h>
 
+#include <linux/bpf-cgroup.h>
 #include <linux/netfilter.h>
 #include <linux/netfilter_ipv6.h>
 
@@ -131,6 +132,14 @@
 
 static int ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
+	int ret;
+
+	ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb);
+	if (ret) {
+		kfree_skb(skb);
+		return ret;
+	}
+
 	if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
 	    dst_allfrag(skb_dst(skb)) ||
 	    (IP6CB(skb)->frag_max_size && skb->len > IP6CB(skb)->frag_max_size))
diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c
index 48d0dc89b..e735f78 100644
--- a/net/ipx/af_ipx.c
+++ b/net/ipx/af_ipx.c
@@ -1168,11 +1168,10 @@
 		sipx->sipx_network	= ipxif->if_netnum;
 		memcpy(sipx->sipx_node, ipxif->if_node,
 			sizeof(sipx->sipx_node));
-		rc = -EFAULT;
-		if (copy_to_user(arg, &ifr, sizeof(ifr)))
-			break;
-		ipxitf_put(ipxif);
 		rc = 0;
+		if (copy_to_user(arg, &ifr, sizeof(ifr)))
+			rc = -EFAULT;
+		ipxitf_put(ipxif);
 		break;
 	}
 	case SIOCAIPXITFCRT:
diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 72c5867..b2cdced 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -22,6 +22,7 @@
 hostprogs-y += map_perf_test
 hostprogs-y += test_overhead
 hostprogs-y += test_cgrp2_array_pin
+hostprogs-y += test_cgrp2_attach
 hostprogs-y += xdp1
 hostprogs-y += xdp2
 hostprogs-y += test_current_task_under_cgroup
@@ -50,6 +51,7 @@
 map_perf_test-objs := bpf_load.o libbpf.o map_perf_test_user.o
 test_overhead-objs := bpf_load.o libbpf.o test_overhead_user.o
 test_cgrp2_array_pin-objs := libbpf.o test_cgrp2_array_pin.o
+test_cgrp2_attach-objs := libbpf.o test_cgrp2_attach.o
 xdp1-objs := bpf_load.o libbpf.o xdp1_user.o
 # reuse xdp1 source intentionally
 xdp2-objs := bpf_load.o libbpf.o xdp1_user.o
diff --git a/samples/bpf/libbpf.c b/samples/bpf/libbpf.c
index 9969e35..9ce707b 100644
--- a/samples/bpf/libbpf.c
+++ b/samples/bpf/libbpf.c
@@ -104,6 +104,27 @@
 	return syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr));
 }
 
+int bpf_prog_attach(int prog_fd, int target_fd, enum bpf_attach_type type)
+{
+	union bpf_attr attr = {
+		.target_fd = target_fd,
+		.attach_bpf_fd = prog_fd,
+		.attach_type = type,
+	};
+
+	return syscall(__NR_bpf, BPF_PROG_ATTACH, &attr, sizeof(attr));
+}
+
+int bpf_prog_detach(int target_fd, enum bpf_attach_type type)
+{
+	union bpf_attr attr = {
+		.target_fd = target_fd,
+		.attach_type = type,
+	};
+
+	return syscall(__NR_bpf, BPF_PROG_DETACH, &attr, sizeof(attr));
+}
+
 int bpf_obj_pin(int fd, const char *pathname)
 {
 	union bpf_attr attr = {
diff --git a/samples/bpf/libbpf.h b/samples/bpf/libbpf.h
index ac6edb6..d0a799a 100644
--- a/samples/bpf/libbpf.h
+++ b/samples/bpf/libbpf.h
@@ -15,6 +15,9 @@
 		  const struct bpf_insn *insns, int insn_len,
 		  const char *license, int kern_version);
 
+int bpf_prog_attach(int prog_fd, int attachable_fd, enum bpf_attach_type type);
+int bpf_prog_detach(int attachable_fd, enum bpf_attach_type type);
+
 int bpf_obj_pin(int fd, const char *pathname);
 int bpf_obj_get(const char *pathname);
 
diff --git a/samples/bpf/test_cgrp2_attach.c b/samples/bpf/test_cgrp2_attach.c
new file mode 100644
index 0000000..63ef208
--- /dev/null
+++ b/samples/bpf/test_cgrp2_attach.c
@@ -0,0 +1,147 @@
+/* eBPF example program:
+ *
+ * - Creates arraymap in kernel with 4 bytes keys and 8 byte values
+ *
+ * - Loads eBPF program
+ *
+ *   The eBPF program accesses the map passed in to store two pieces of
+ *   information. The number of invocations of the program, which maps
+ *   to the number of packets received, is stored to key 0. Key 1 is
+ *   incremented on each iteration by the number of bytes stored in
+ *   the skb.
+ *
+ * - Detaches any eBPF program previously attached to the cgroup
+ *
+ * - Attaches the new program to a cgroup using BPF_PROG_ATTACH
+ *
+ * - Every second, reads map[0] and map[1] to see how many bytes and
+ *   packets were seen on any socket of tasks in the given cgroup.
+ */
+
+#define _GNU_SOURCE
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stddef.h>
+#include <string.h>
+#include <unistd.h>
+#include <assert.h>
+#include <errno.h>
+#include <fcntl.h>
+
+#include <linux/bpf.h>
+
+#include "libbpf.h"
+
+enum {
+	MAP_KEY_PACKETS,
+	MAP_KEY_BYTES,
+};
+
+static int prog_load(int map_fd, int verdict)
+{
+	struct bpf_insn prog[] = {
+		BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), /* save r6 so it's not clobbered by BPF_CALL */
+
+		/* Count packets */
+		BPF_MOV64_IMM(BPF_REG_0, MAP_KEY_PACKETS), /* r0 = 0 */
+		BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -4), /* *(u32 *)(fp - 4) = r0 */
+		BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+		BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), /* r2 = fp - 4 */
+		BPF_LD_MAP_FD(BPF_REG_1, map_fd), /* load map fd to r1 */
+		BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+		BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+		BPF_MOV64_IMM(BPF_REG_1, 1), /* r1 = 1 */
+		BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_0, BPF_REG_1, 0, 0), /* xadd r0 += r1 */
+
+		/* Count bytes */
+		BPF_MOV64_IMM(BPF_REG_0, MAP_KEY_BYTES), /* r0 = 1 */
+		BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -4), /* *(u32 *)(fp - 4) = r0 */
+		BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+		BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), /* r2 = fp - 4 */
+		BPF_LD_MAP_FD(BPF_REG_1, map_fd),
+		BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+		BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+		BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6, offsetof(struct __sk_buff, len)), /* r1 = skb->len */
+		BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_0, BPF_REG_1, 0, 0), /* xadd r0 += r1 */
+
+		BPF_MOV64_IMM(BPF_REG_0, verdict), /* r0 = verdict */
+		BPF_EXIT_INSN(),
+	};
+
+	return bpf_prog_load(BPF_PROG_TYPE_CGROUP_SKB,
+			     prog, sizeof(prog), "GPL", 0);
+}
+
+static int usage(const char *argv0)
+{
+	printf("Usage: %s <cg-path> <egress|ingress> [drop]\n", argv0);
+	return EXIT_FAILURE;
+}
+
+int main(int argc, char **argv)
+{
+	int cg_fd, map_fd, prog_fd, key, ret;
+	long long pkt_cnt, byte_cnt;
+	enum bpf_attach_type type;
+	int verdict = 1;
+
+	if (argc < 3)
+		return usage(argv[0]);
+
+	if (strcmp(argv[2], "ingress") == 0)
+		type = BPF_CGROUP_INET_INGRESS;
+	else if (strcmp(argv[2], "egress") == 0)
+		type = BPF_CGROUP_INET_EGRESS;
+	else
+		return usage(argv[0]);
+
+	if (argc > 3 && strcmp(argv[3], "drop") == 0)
+		verdict = 0;
+
+	cg_fd = open(argv[1], O_DIRECTORY | O_RDONLY);
+	if (cg_fd < 0) {
+		printf("Failed to open cgroup path: '%s'\n", strerror(errno));
+		return EXIT_FAILURE;
+	}
+
+	map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY,
+				sizeof(key), sizeof(byte_cnt),
+				256, 0);
+	if (map_fd < 0) {
+		printf("Failed to create map: '%s'\n", strerror(errno));
+		return EXIT_FAILURE;
+	}
+
+	prog_fd = prog_load(map_fd, verdict);
+	printf("Output from kernel verifier:\n%s\n-------\n", bpf_log_buf);
+
+	if (prog_fd < 0) {
+		printf("Failed to load prog: '%s'\n", strerror(errno));
+		return EXIT_FAILURE;
+	}
+
+	ret = bpf_prog_detach(cg_fd, type);
+	printf("bpf_prog_detach() returned '%s' (%d)\n", strerror(errno), errno);
+
+	ret = bpf_prog_attach(prog_fd, cg_fd, type);
+	if (ret < 0) {
+		printf("Failed to attach prog to cgroup: '%s'\n",
+		       strerror(errno));
+		return EXIT_FAILURE;
+	}
+
+	while (1) {
+		key = MAP_KEY_PACKETS;
+		assert(bpf_lookup_elem(map_fd, &key, &pkt_cnt) == 0);
+
+		key = MAP_KEY_BYTES;
+		assert(bpf_lookup_elem(map_fd, &key, &byte_cnt) == 0);
+
+		printf("cgroup received %lld packets, %lld bytes\n",
+		       pkt_cnt, byte_cnt);
+		sleep(1);
+	}
+
+	return EXIT_SUCCESS;
+}
diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c
index 389325a..0974598 100644
--- a/security/integrity/ima/ima_appraise.c
+++ b/security/integrity/ima/ima_appraise.c
@@ -204,10 +204,11 @@
 
 		cause = "missing-hash";
 		status = INTEGRITY_NOLABEL;
-		if (opened & FILE_CREATED) {
+		if (opened & FILE_CREATED)
 			iint->flags |= IMA_NEW_FILE;
+		if ((iint->flags & IMA_NEW_FILE) &&
+		    !(iint->flags & IMA_DIGSIG_REQUIRED))
 			status = INTEGRITY_PASS;
-		}
 		goto out;
 	}
 
diff --git a/sound/hda/hdac_controller.c b/sound/hda/hdac_controller.c
index 0430658..0f41257 100644
--- a/sound/hda/hdac_controller.c
+++ b/sound/hda/hdac_controller.c
@@ -106,7 +106,11 @@
 	/* disable ringbuffer DMAs */
 	snd_hdac_chip_writeb(bus, RIRBCTL, 0);
 	snd_hdac_chip_writeb(bus, CORBCTL, 0);
+	spin_unlock_irq(&bus->reg_lock);
+
 	hdac_wait_for_cmd_dmas(bus);
+
+	spin_lock_irq(&bus->reg_lock);
 	/* disable unsolicited responses */
 	snd_hdac_chip_updatel(bus, GCTL, AZX_GCTL_UNSOL, 0);
 	spin_unlock_irq(&bus->reg_lock);
diff --git a/sound/soc/codecs/cs4271.c b/sound/soc/codecs/cs4271.c
index 8c0f3b8..e78b5f0 100644
--- a/sound/soc/codecs/cs4271.c
+++ b/sound/soc/codecs/cs4271.c
@@ -498,7 +498,7 @@
 	struct cs4271_private *cs4271 = snd_soc_codec_get_drvdata(codec);
 
 	if (gpio_is_valid(cs4271->gpio_nreset)) {
-		gpio_set_value(cs4271->gpio_nreset, 0);
+		gpio_direction_output(cs4271->gpio_nreset, 0);
 		mdelay(1);
 		gpio_set_value(cs4271->gpio_nreset, 1);
 		mdelay(1);
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 9e5fc16..42dfbeb 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -375,6 +375,105 @@
 	 */
 	BPF_FUNC_probe_write_user,
 
+	/**
+	 * int bpf_skb_change_tail(skb, len, flags)
+	 *     The helper will resize the skb to the given new size, to be used f.e.
+	 *     with control messages.
+	 *     @skb: pointer to skb
+	 *     @len: new skb length
+	 *     @flags: reserved
+	 *     Return: 0 on success or negative error
+	 */
+	BPF_FUNC_skb_change_tail,
+
+	/**
+	 * int bpf_skb_pull_data(skb, len)
+	 *     The helper will pull in non-linear data in case the skb is non-linear
+	 *     and not all of len are part of the linear section. Only needed for
+	 *     read/write with direct packet access.
+	 *     @skb: pointer to skb
+	 *     @len: len to make read/writeable
+	 *     Return: 0 on success or negative error
+	 */
+	BPF_FUNC_skb_pull_data,
+
+	/**
+	 * s64 bpf_csum_update(skb, csum)
+	 *     Adds csum into skb->csum in case of CHECKSUM_COMPLETE.
+	 *     @skb: pointer to skb
+	 *     @csum: csum to add
+	 *     Return: csum on success or negative error
+	 */
+	BPF_FUNC_csum_update,
+
+	/**
+	 * void bpf_set_hash_invalid(skb)
+	 *     Invalidate current skb->hash.
+	 *     @skb: pointer to skb
+	 */
+	BPF_FUNC_set_hash_invalid,
+
+	/**
+	 * int bpf_get_numa_node_id()
+	 *     Return: Id of current NUMA node.
+	 */
+	BPF_FUNC_get_numa_node_id,
+
+	/**
+	 * int bpf_skb_change_head()
+	 *     Grows headroom of skb and adjusts MAC header offset accordingly.
+	 *     Will extends/reallocae as required automatically.
+	 *     May change skb data pointer and will thus invalidate any check
+	 *     performed for direct packet access.
+	 *     @skb: pointer to skb
+	 *     @len: length of header to be pushed in front
+	 *     @flags: Flags (unused for now)
+	 *     Return: 0 on success or negative error
+	 */
+	BPF_FUNC_skb_change_head,
+
+	/**
+	 * int bpf_xdp_adjust_head(xdp_md, delta)
+	 *     Adjust the xdp_md.data by delta
+	 *     @xdp_md: pointer to xdp_md
+	 *     @delta: An positive/negative integer to be added to xdp_md.data
+	 *     Return: 0 on success or negative on error
+	 */
+	BPF_FUNC_xdp_adjust_head,
+
+	/**
+	 * int bpf_probe_read_str(void *dst, int size, const void *unsafe_ptr)
+	 *     Copy a NUL terminated string from unsafe address. In case the string
+	 *     length is smaller than size, the target is not padded with further NUL
+	 *     bytes. In case the string length is larger than size, just count-1
+	 *     bytes are copied and the last byte is set to NUL.
+	 *     @dst: destination address
+	 *     @size: maximum number of bytes to copy, including the trailing NUL
+	 *     @unsafe_ptr: unsafe address
+	 *     Return:
+	 *       > 0 length of the string including the trailing NUL on success
+	 *       < 0 error
+	 */
+	BPF_FUNC_probe_read_str,
+
+	/**
+	 * u64 bpf_get_socket_cookie(skb)
+	 * Get the cookie for the socket stored inside sk_buff.
+	 * @skb: pointer to skb
+	 * Return: 8 Bytes non-decreasing number on success or 0 if
+	 * the socket
+	 * field is missing inside sk_buff
+	 */
+	BPF_FUNC_get_socket_cookie,
+
+	/**
+	 * u32 bpf_get_socket_uid(skb)
+	 *     Get the owner uid of the socket stored inside sk_buff.
+	 *     @skb: pointer to skb
+	 *     Return: uid of the socket owner on success or overflowuid if failed.
+	 */
+	BPF_FUNC_get_socket_uid,
+
 	__BPF_FUNC_MAX_ID,
 };