Merge "power: reset: Fix compilation error when CONFIG_QCOM_DLOAD_MODE=n" into msm-4.8
diff --git a/CREDITS b/CREDITS
index 8373676..d7ebdfb 100644
--- a/CREDITS
+++ b/CREDITS
@@ -9,7 +9,7 @@
 			Linus
 ----------
 
-M: Matt Mackal
+N: Matt Mackal
 E: mpm@selenic.com
 D: SLOB slab allocator
 
@@ -1910,7 +1910,7 @@
 
 N: Andi Kleen
 E: andi@firstfloor.org
-U: http://www.halobates.de
+W: http://www.halobates.de
 D: network, x86, NUMA, various hacks
 S: Schwalbenstr. 96
 S: 85551 Ottobrunn
@@ -2089,8 +2089,8 @@
 D: Synopsys Designware PCI host bridge driver
 
 N: Gabor Kuti
-M: seasons@falcon.sch.bme.hu
-M: seasons@makosteszta.sote.hu
+E: seasons@falcon.sch.bme.hu
+E: seasons@makosteszta.sote.hu
 D: Original author of software suspend
 
 N: Jaroslav Kysela
diff --git a/Documentation/devicetree/bindings/arm/msm/msm.txt b/Documentation/devicetree/bindings/arm/msm/msm.txt
index bb6924f..0cebc4d 100644
--- a/Documentation/devicetree/bindings/arm/msm/msm.txt
+++ b/Documentation/devicetree/bindings/arm/msm/msm.txt
@@ -89,6 +89,9 @@
 - MSMSKUNK
   compatible = "qcom,msmskunk"
 
+- SDMBAT
+  compatible = "qcom,sdmbat"
+
 - MSM8952
   compatible = "qcom,msm8952"
 
@@ -260,6 +263,10 @@
 compatible = "qcom,msmskunk-cdp"
 compatible = "qcom,msmskunk-mtp"
 compatible = "qcom,msmskunk-mtp"
+compatible = "qcom,sdmbat-sim"
+compatible = "qcom,sdmbat-rumi"
+compatible = "qcom,sdmbat-cdp"
+compatible = "qcom,sdmbat-mtp"
 compatible = "qcom,msm8952-rumi"
 compatible = "qcom,msm8952-sim"
 compatible = "qcom,msm8952-qrd"
diff --git a/Documentation/devicetree/bindings/net/ethernet.txt b/Documentation/devicetree/bindings/net/ethernet.txt
index e1d7681..0515095 100644
--- a/Documentation/devicetree/bindings/net/ethernet.txt
+++ b/Documentation/devicetree/bindings/net/ethernet.txt
@@ -9,10 +9,26 @@
 - max-speed: number, specifies maximum speed in Mbit/s supported by the device;
 - max-frame-size: number, maximum transfer unit (IEEE defined MTU), rather than
   the maximum frame size (there's contradiction in ePAPR).
-- phy-mode: string, operation mode of the PHY interface; supported values are
-  "mii", "gmii", "sgmii", "qsgmii", "tbi", "rev-mii", "rmii", "rgmii", "rgmii-id",
-  "rgmii-rxid", "rgmii-txid", "rtbi", "smii", "xgmii", "trgmii"; this is now a
-  de-facto standard property;
+- phy-mode: string, operation mode of the PHY interface. This is now a de-facto
+  standard property; supported values are:
+  * "mii"
+  * "gmii"
+  * "sgmii"
+  * "qsgmii"
+  * "tbi"
+  * "rev-mii"
+  * "rmii"
+  * "rgmii" (RX and TX delays are added by the MAC when required)
+  * "rgmii-id" (RGMII with internal RX and TX delays provided by the PHY, the
+     MAC should not add the RX or TX delays in this case)
+  * "rgmii-rxid" (RGMII with internal RX delay provided by the PHY, the MAC
+     should not add an RX delay in this case)
+  * "rgmii-txid" (RGMII with internal TX delay provided by the PHY, the MAC
+     should not add an TX delay in this case)
+  * "rtbi"
+  * "smii"
+  * "xgmii"
+  * "trgmii"
 - phy-connection-type: the same as "phy-mode" property but described in ePAPR;
 - phy-handle: phandle, specifies a reference to a node representing a PHY
   device; this property is described in ePAPR and so preferred;
diff --git a/Documentation/networking/nf_conntrack-sysctl.txt b/Documentation/networking/nf_conntrack-sysctl.txt
index 399e4e8..433b672 100644
--- a/Documentation/networking/nf_conntrack-sysctl.txt
+++ b/Documentation/networking/nf_conntrack-sysctl.txt
@@ -62,10 +62,13 @@
 	protocols.
 
 nf_conntrack_helper - BOOLEAN
-	0 - disabled
-	not 0 - enabled (default)
+	0 - disabled (default)
+	not 0 - enabled
 
 	Enable automatic conntrack helper assignment.
+	If disabled it is required to set up iptables rules to assign
+	helpers to connections.  See the CT target description in the
+	iptables-extensions(8) man page for further information.
 
 nf_conntrack_icmp_timeout - INTEGER (seconds)
 	default 30
diff --git a/MAINTAINERS b/MAINTAINERS
index 8d414840..63cefa6 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -9257,11 +9257,12 @@
 F:	drivers/pci/host/*layerscape*
 
 PCI DRIVER FOR IMX6
-M:	Richard Zhu <Richard.Zhu@freescale.com>
+M:	Richard Zhu <hongxing.zhu@nxp.com>
 M:	Lucas Stach <l.stach@pengutronix.de>
 L:	linux-pci@vger.kernel.org
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:	Maintained
+F:	Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt
 F:	drivers/pci/host/*imx6*
 
 PCI DRIVER FOR TI KEYSTONE
@@ -9320,17 +9321,11 @@
 
 PCI DRIVER FOR SYNOPSIS DESIGNWARE
 M:	Jingoo Han <jingoohan1@gmail.com>
-M:	Pratyush Anand <pratyush.anand@gmail.com>
-L:	linux-pci@vger.kernel.org
-S:	Maintained
-F:	drivers/pci/host/*designware*
-
-PCI DRIVER FOR SYNOPSYS PROTOTYPING DEVICE
-M:	Jose Abreu <Jose.Abreu@synopsys.com>
+M:	Joao Pinto <Joao.Pinto@synopsys.com>
 L:	linux-pci@vger.kernel.org
 S:	Maintained
 F:	Documentation/devicetree/bindings/pci/designware-pcie.txt
-F:	drivers/pci/host/pcie-designware-plat.c
+F:	drivers/pci/host/*designware*
 
 PCI DRIVER FOR GENERIC OF HOSTS
 M:	Will Deacon <will.deacon@arm.com>
diff --git a/Makefile b/Makefile
index 569beb5..fb2f21b 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 4
 PATCHLEVEL = 9
 SUBLEVEL = 0
-EXTRAVERSION = -rc7
+EXTRAVERSION = -rc8
 NAME = Psychotic Stoned Sheep
 
 # *DOCUMENTATION*
@@ -611,6 +611,13 @@
 include/config/auto.conf: ;
 endif # $(dot-config)
 
+# For the kernel to actually contain only the needed exported symbols,
+# we have to build modules as well to determine what those symbols are.
+# (this can be evaluated only once include/config/auto.conf has been included)
+ifdef CONFIG_TRIM_UNUSED_KSYMS
+  KBUILD_MODULES := 1
+endif
+
 # The all: target is the default when no target is given on the
 # command line.
 # This allow a user to issue only 'make' to build a kernel including modules
@@ -948,7 +955,7 @@
 endif
 ifdef CONFIG_TRIM_UNUSED_KSYMS
 	$(Q)$(CONFIG_SHELL) $(srctree)/scripts/adjust_autoksyms.sh \
-	  "$(MAKE) KBUILD_MODULES=1 -f $(srctree)/Makefile vmlinux_prereq"
+	  "$(MAKE) -f $(srctree)/Makefile vmlinux"
 endif
 
 # standalone target for easier testing
@@ -1023,8 +1030,6 @@
 prepare1: prepare2 $(version_h) include/generated/utsrelease.h \
                    include/config/auto.conf
 	$(cmd_crmodverdir)
-	$(Q)test -e include/generated/autoksyms.h || \
-	    touch   include/generated/autoksyms.h
 
 archprepare: archheaders archscripts prepare1 scripts_basic
 
diff --git a/arch/arc/include/asm/delay.h b/arch/arc/include/asm/delay.h
index 08e7e2a..a36e860 100644
--- a/arch/arc/include/asm/delay.h
+++ b/arch/arc/include/asm/delay.h
@@ -22,10 +22,11 @@
 static inline void __delay(unsigned long loops)
 {
 	__asm__ __volatile__(
-	"	lp  1f	\n"
-	"	nop	\n"
-	"1:		\n"
-	: "+l"(loops));
+	"	mov lp_count, %0	\n"
+	"	lp  1f			\n"
+	"	nop			\n"
+	"1:				\n"
+	: : "r"(loops));
 }
 
 extern void __bad_udelay(void);
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
index 89eeb37..e94ca72 100644
--- a/arch/arc/include/asm/pgtable.h
+++ b/arch/arc/include/asm/pgtable.h
@@ -280,7 +280,7 @@
 
 #define pte_page(pte)		pfn_to_page(pte_pfn(pte))
 #define mk_pte(page, prot)	pfn_pte(page_to_pfn(page), prot)
-#define pfn_pte(pfn, prot)	__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
+#define pfn_pte(pfn, prot)	__pte(__pfn_to_phys(pfn) | pgprot_val(prot))
 
 /* Don't use virt_to_pfn for macros below: could cause truncations for PAE40*/
 #define pte_pfn(pte)		(pte_val(pte) >> PAGE_SHIFT)
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
index 2b96cfc..50d7169 100644
--- a/arch/arc/mm/cache.c
+++ b/arch/arc/mm/cache.c
@@ -23,7 +23,7 @@
 
 static int l2_line_sz;
 static int ioc_exists;
-int slc_enable = 1, ioc_enable = 1;
+int slc_enable = 1, ioc_enable = 0;
 unsigned long perip_base = ARC_UNCACHED_ADDR_SPACE; /* legacy value for boot */
 unsigned long perip_end = 0xFFFFFFFF; /* legacy value */
 
diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
index 39f5705..5af3ec1 100644
--- a/arch/arm/boot/dts/Makefile
+++ b/arch/arm/boot/dts/Makefile
@@ -745,7 +745,6 @@
 	sun4i-a10-pcduino2.dtb \
 	sun4i-a10-pov-protab2-ips9.dtb
 dtb-$(CONFIG_MACH_SUN5I) += \
-	ntc-gr8-evb.dtb \
 	sun5i-a10s-auxtek-t003.dtb \
 	sun5i-a10s-auxtek-t004.dtb \
 	sun5i-a10s-mk802.dtb \
@@ -761,6 +760,7 @@
 	sun5i-a13-olinuxino-micro.dtb \
 	sun5i-a13-q8-tablet.dtb \
 	sun5i-a13-utoo-p66.dtb \
+	sun5i-gr8-evb.dtb \
 	sun5i-r8-chip.dtb
 dtb-$(CONFIG_MACH_SUN6I) += \
 	sun6i-a31-app4-evb1.dtb \
diff --git a/arch/arm/boot/dts/stih407-family.dtsi b/arch/arm/boot/dts/stih407-family.dtsi
index 91096a4..8f79b41 100644
--- a/arch/arm/boot/dts/stih407-family.dtsi
+++ b/arch/arm/boot/dts/stih407-family.dtsi
@@ -283,6 +283,8 @@
 			clock-frequency = <400000>;
 			pinctrl-names = "default";
 			pinctrl-0 = <&pinctrl_i2c0_default>;
+			#address-cells = <1>;
+			#size-cells = <0>;
 
 			status = "disabled";
 		};
@@ -296,6 +298,8 @@
 			clock-frequency = <400000>;
 			pinctrl-names = "default";
 			pinctrl-0 = <&pinctrl_i2c1_default>;
+			#address-cells = <1>;
+			#size-cells = <0>;
 
 			status = "disabled";
 		};
@@ -309,6 +313,8 @@
 			clock-frequency = <400000>;
 			pinctrl-names = "default";
 			pinctrl-0 = <&pinctrl_i2c2_default>;
+			#address-cells = <1>;
+			#size-cells = <0>;
 
 			status = "disabled";
 		};
@@ -322,6 +328,8 @@
 			clock-frequency = <400000>;
 			pinctrl-names = "default";
 			pinctrl-0 = <&pinctrl_i2c3_default>;
+			#address-cells = <1>;
+			#size-cells = <0>;
 
 			status = "disabled";
 		};
@@ -335,6 +343,8 @@
 			clock-frequency = <400000>;
 			pinctrl-names = "default";
 			pinctrl-0 = <&pinctrl_i2c4_default>;
+			#address-cells = <1>;
+			#size-cells = <0>;
 
 			status = "disabled";
 		};
@@ -348,6 +358,8 @@
 			clock-frequency = <400000>;
 			pinctrl-names = "default";
 			pinctrl-0 = <&pinctrl_i2c5_default>;
+			#address-cells = <1>;
+			#size-cells = <0>;
 
 			status = "disabled";
 		};
@@ -363,6 +375,8 @@
 			clock-frequency = <400000>;
 			pinctrl-names = "default";
 			pinctrl-0 = <&pinctrl_i2c10_default>;
+			#address-cells = <1>;
+			#size-cells = <0>;
 
 			status = "disabled";
 		};
@@ -376,6 +390,8 @@
 			clock-frequency = <400000>;
 			pinctrl-names = "default";
 			pinctrl-0 = <&pinctrl_i2c11_default>;
+			#address-cells = <1>;
+			#size-cells = <0>;
 
 			status = "disabled";
 		};
diff --git a/arch/arm/boot/dts/ntc-gr8-evb.dts b/arch/arm/boot/dts/sun5i-gr8-evb.dts
similarity index 99%
rename from arch/arm/boot/dts/ntc-gr8-evb.dts
rename to arch/arm/boot/dts/sun5i-gr8-evb.dts
index 4b622f3..714381f 100644
--- a/arch/arm/boot/dts/ntc-gr8-evb.dts
+++ b/arch/arm/boot/dts/sun5i-gr8-evb.dts
@@ -44,7 +44,7 @@
  */
 
 /dts-v1/;
-#include "ntc-gr8.dtsi"
+#include "sun5i-gr8.dtsi"
 #include "sunxi-common-regulators.dtsi"
 
 #include <dt-bindings/gpio/gpio.h>
diff --git a/arch/arm/boot/dts/ntc-gr8.dtsi b/arch/arm/boot/dts/sun5i-gr8.dtsi
similarity index 100%
rename from arch/arm/boot/dts/ntc-gr8.dtsi
rename to arch/arm/boot/dts/sun5i-gr8.dtsi
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
index 2f36b15..8a3d7cd 100644
--- a/arch/arm64/Kconfig.platforms
+++ b/arch/arm64/Kconfig.platforms
@@ -127,6 +127,15 @@
 	  This enables support for the MSMSKUNK chipset. If you do not
 	  wish to build a kernel that runs on this chipset, say 'N' here.
 
+config ARCH_SDMBAT
+	bool "Enable Support for Qualcomm Technologies Inc. SDMBAT"
+	depends on ARCH_QCOM
+	select COMMON_CLK_QCOM
+	select QCOM_GDSC
+	help
+	  This enables support for the SDMBAT chipset. If you do not
+	  wish to build a kernel that runs on this chipset, say 'N' here.
+
 config ARCH_ROCKCHIP
 	bool "Rockchip Platforms"
 	select ARCH_HAS_RESET_CONTROLLER
diff --git a/arch/arm64/boot/dts/arm/juno-base.dtsi b/arch/arm64/boot/dts/arm/juno-base.dtsi
index 334271a..7d3a2ac 100644
--- a/arch/arm64/boot/dts/arm/juno-base.dtsi
+++ b/arch/arm64/boot/dts/arm/juno-base.dtsi
@@ -393,7 +393,7 @@
 		#address-cells = <3>;
 		#size-cells = <2>;
 		dma-coherent;
-		ranges = <0x01000000 0x00 0x5f800000 0x00 0x5f800000 0x0 0x00800000>,
+		ranges = <0x01000000 0x00 0x00000000 0x00 0x5f800000 0x0 0x00800000>,
 			 <0x02000000 0x00 0x50000000 0x00 0x50000000 0x0 0x08000000>,
 			 <0x42000000 0x40 0x00000000 0x40 0x00000000 0x1 0x00000000>;
 		#interrupt-cells = <1>;
diff --git a/arch/arm64/boot/dts/arm/juno-r1.dts b/arch/arm64/boot/dts/arm/juno-r1.dts
index 123a58b..f0b857d 100644
--- a/arch/arm64/boot/dts/arm/juno-r1.dts
+++ b/arch/arm64/boot/dts/arm/juno-r1.dts
@@ -76,7 +76,7 @@
 				compatible = "arm,idle-state";
 				arm,psci-suspend-param = <0x1010000>;
 				local-timer-stop;
-				entry-latency-us = <300>;
+				entry-latency-us = <400>;
 				exit-latency-us = <1200>;
 				min-residency-us = <2500>;
 			};
diff --git a/arch/arm64/boot/dts/arm/juno-r2.dts b/arch/arm64/boot/dts/arm/juno-r2.dts
index 007be82..26aaa6a 100644
--- a/arch/arm64/boot/dts/arm/juno-r2.dts
+++ b/arch/arm64/boot/dts/arm/juno-r2.dts
@@ -76,7 +76,7 @@
 				compatible = "arm,idle-state";
 				arm,psci-suspend-param = <0x1010000>;
 				local-timer-stop;
-				entry-latency-us = <300>;
+				entry-latency-us = <400>;
 				exit-latency-us = <1200>;
 				min-residency-us = <2500>;
 			};
diff --git a/arch/arm64/boot/dts/arm/juno.dts b/arch/arm64/boot/dts/arm/juno.dts
index a7270ef..6e154d9 100644
--- a/arch/arm64/boot/dts/arm/juno.dts
+++ b/arch/arm64/boot/dts/arm/juno.dts
@@ -76,7 +76,7 @@
 				compatible = "arm,idle-state";
 				arm,psci-suspend-param = <0x1010000>;
 				local-timer-stop;
-				entry-latency-us = <300>;
+				entry-latency-us = <400>;
 				exit-latency-us = <1200>;
 				min-residency-us = <2500>;
 			};
diff --git a/arch/arm64/boot/dts/qcom/Makefile b/arch/arm64/boot/dts/qcom/Makefile
index c5b5060..3ed0b06 100644
--- a/arch/arm64/boot/dts/qcom/Makefile
+++ b/arch/arm64/boot/dts/qcom/Makefile
@@ -2,8 +2,15 @@
 dtb-$(CONFIG_ARCH_QCOM)	+= msm8996-mtp.dtb
 dtb-$(CONFIG_ARCH_QCOM)	+= apq8096-db820c.dtb
 
-dtb-$(CONFIG_ARCH_QCOM) += msmskunk-sim.dtb
-dtb-$(CONFIG_ARCH_QCOM) += msmskunk-rumi.dtb
+dtb-$(CONFIG_ARCH_MSMSKUNK) += msmskunk-sim.dtb \
+	msmskunk-rumi.dtb \
+	msmskunk-mtp.dtb \
+	msmskunk-cdp.dtb
+
+dtb-$(CONFIG_ARCH_SDMBAT) += sdmbat-sim.dtb \
+	sdmbat-rumi.dtb \
+	sdmbat-mtp.dtb \
+	sdmbat-cdp.dtb
 
 always		:= $(dtb-y)
 subdir-y	:= $(dts-dirs)
diff --git a/arch/arm64/boot/dts/qcom/msmskunk-cdp.dts b/arch/arm64/boot/dts/qcom/msmskunk-cdp.dts
new file mode 100644
index 0000000..b1dd404
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msmskunk-cdp.dts
@@ -0,0 +1,23 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+
+#include "msmskunk.dtsi"
+#include "msmskunk-cdp.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. MSM skunk v1 CDP";
+	compatible = "qcom,msmskunk-cdp", "qcom,msmskunk", "qcom,cdp";
+	qcom,board-id = <1 0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/msmskunk-cdp.dtsi b/arch/arm64/boot/dts/qcom/msmskunk-cdp.dtsi
new file mode 100644
index 0000000..930c8de
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msmskunk-cdp.dtsi
@@ -0,0 +1,14 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "msmskunk-pinctrl.dtsi"
+
diff --git a/arch/arm64/boot/dts/qcom/msmskunk-mtp.dts b/arch/arm64/boot/dts/qcom/msmskunk-mtp.dts
new file mode 100644
index 0000000..d6a6ffb
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msmskunk-mtp.dts
@@ -0,0 +1,23 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+
+#include "msmskunk.dtsi"
+#include "msmskunk-mtp.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. MSM skunk v1 MTP";
+	compatible = "qcom,msmskunk-mtp", "qcom,msmskunk", "qcom,mtp";
+	qcom,board-id = <8 0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/msmskunk-mtp.dtsi b/arch/arm64/boot/dts/qcom/msmskunk-mtp.dtsi
new file mode 100644
index 0000000..930c8de
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msmskunk-mtp.dtsi
@@ -0,0 +1,14 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "msmskunk-pinctrl.dtsi"
+
diff --git a/arch/arm64/boot/dts/qcom/msmskunk.dtsi b/arch/arm64/boot/dts/qcom/msmskunk.dtsi
index e50103d..ed4377f 100644
--- a/arch/arm64/boot/dts/qcom/msmskunk.dtsi
+++ b/arch/arm64/boot/dts/qcom/msmskunk.dtsi
@@ -1142,6 +1142,30 @@
 			0x0	/* apps_v6_rt_nhash_size; */
 		>;
 	};
+
+	qcom,chd_sliver {
+		compatible = "qcom,core-hang-detect";
+		label = "silver";
+		qcom,threshold-arr = <0x17e00058 0x17e10058
+		0x17e20058 0x17e30058>;
+		qcom,config-arr = <0x17e00060 0x17e10060
+		0x17e20060 0x17e30060>;
+	};
+
+	qcom,chd_gold {
+		compatible = "qcom,core-hang-detect";
+		label = "gold";
+		qcom,threshold-arr = <0x17e40058 0x17e50058
+		0x17e60058 0x17e70058>;
+		qcom,config-arr = <0x17e40060 0x17e50060
+		0x17e60060 0x17e70060>;
+	};
+
+	qcom,ghd {
+		compatible = "qcom,gladiator-hang-detect";
+		qcom,threshold-arr = <0x1799041c 0x17990420>;
+		qcom,config-reg = <0x17990434>;
+	};
 };
 
 &pcie_0_gdsc {
diff --git a/arch/arm64/boot/dts/qcom/sdmbat-cdp.dts b/arch/arm64/boot/dts/qcom/sdmbat-cdp.dts
new file mode 100644
index 0000000..f8f916e
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdmbat-cdp.dts
@@ -0,0 +1,23 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+
+#include "sdmbat.dtsi"
+#include "sdmbat-cdp.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SDM bat v1 CDP";
+	compatible = "qcom,sdmbat-cdp", "qcom,sdmbat", "qcom,cdp";
+	qcom,board-id = <1 0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdmbat-cdp.dtsi b/arch/arm64/boot/dts/qcom/sdmbat-cdp.dtsi
new file mode 100644
index 0000000..1003478
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdmbat-cdp.dtsi
@@ -0,0 +1,14 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "msmskunk-cdp.dtsi"
+
diff --git a/arch/arm64/boot/dts/qcom/sdmbat-mtp.dts b/arch/arm64/boot/dts/qcom/sdmbat-mtp.dts
new file mode 100644
index 0000000..fb8e85a
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdmbat-mtp.dts
@@ -0,0 +1,23 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+
+#include "sdmbat.dtsi"
+#include "sdmbat-mtp.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SDM bat v1 MTP";
+	compatible = "qcom,sdmbat-mtp", "qcom,sdmbat", "qcom,mtp";
+	qcom,board-id = <8 0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdmbat-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdmbat-mtp.dtsi
new file mode 100644
index 0000000..ad26a14
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdmbat-mtp.dtsi
@@ -0,0 +1,14 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "msmskunk-mtp.dtsi"
+
diff --git a/arch/arm64/boot/dts/qcom/sdmbat-rumi.dts b/arch/arm64/boot/dts/qcom/sdmbat-rumi.dts
new file mode 100644
index 0000000..2bf868e
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdmbat-rumi.dts
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+/memreserve/ 0x90000000 0x00000100;
+
+#include "sdmbat.dtsi"
+#include "sdmbat-rumi.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SDM BAT RUMI";
+	compatible = "qcom,sdmbat-rumi", "qcom,sdmbat", "qcom,rumi";
+	qcom,board-id = <15 0>;
+};
+
diff --git a/arch/arm64/boot/dts/qcom/sdmbat-rumi.dtsi b/arch/arm64/boot/dts/qcom/sdmbat-rumi.dtsi
new file mode 100644
index 0000000..11901f1
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdmbat-rumi.dtsi
@@ -0,0 +1,20 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * As a general rule, only version-specific property overrides should be placed
+ * inside this file. Common device definitions should be placed inside the
+ * msmskunk-rumi.dtsi file.
+ */
+
+ #include "msmskunk-rumi.dtsi"
+
diff --git a/arch/arm64/boot/dts/qcom/sdmbat-sim.dts b/arch/arm64/boot/dts/qcom/sdmbat-sim.dts
new file mode 100644
index 0000000..216b3d0
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdmbat-sim.dts
@@ -0,0 +1,25 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+/memreserve/ 0x90000000 0x00000100;
+
+#include "sdmbat.dtsi"
+#include "sdmbat-sim.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SDM BAT SIM";
+	compatible = "qcom,sdmbat-sim", "qcom,sdmbat", "qcom,sim";
+	qcom,board-id = <16 0>;
+};
+
diff --git a/arch/arm64/boot/dts/qcom/sdmbat-sim.dtsi b/arch/arm64/boot/dts/qcom/sdmbat-sim.dtsi
new file mode 100644
index 0000000..560ad45
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdmbat-sim.dtsi
@@ -0,0 +1,20 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * As a general rule, only version-specific property overrides should be placed
+ * inside this file. Common device definitions should be placed inside the
+ * msmskunk-sim.dtsi file.
+ */
+
+ #include "msmskunk-sim.dtsi"
+
diff --git a/arch/arm64/boot/dts/qcom/sdmbat.dtsi b/arch/arm64/boot/dts/qcom/sdmbat.dtsi
new file mode 100644
index 0000000..950d130
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdmbat.dtsi
@@ -0,0 +1,33 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * As a general rule, only version-specific property overrides should be placed
+ * inside this file. Common device definitions should be placed inside the
+ * msmskunk.dtsi file.
+ */
+
+ #include "msmskunk.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SDM BAT";
+	compatible = "qcom,sdmbat";
+	qcom,msm-id = <328 0x0>;
+
+};
+
+&soc {
+	qcom,llcc@1300000 {
+		status = "disabled";
+	};
+};
+
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 9669fc7..74f4c66 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -1436,13 +1436,6 @@
 				"ahci: MRSM is on, fallback to single MSI\n");
 			pci_free_irq_vectors(pdev);
 		}
-
-		/*
-		 * -ENOSPC indicated we don't have enough vectors.  Don't bother
-		 * trying a single vectors for any other error:
-		 */
-		if (nvec < 0 && nvec != -ENOSPC)
-			return nvec;
 	}
 
 	/*
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 9cceb4a..c4eb4ae 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1088,7 +1088,7 @@
 		desc[1] = tf->command; /* status */
 		desc[2] = tf->device;
 		desc[3] = tf->nsect;
-		desc[0] = 0;
+		desc[7] = 0;
 		if (tf->flags & ATA_TFLAG_LBA48)  {
 			desc[8] |= 0x80;
 			if (tf->hob_nsect)
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 04365b1..5163c8f 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -1403,7 +1403,8 @@
 	zram = idr_find(&zram_index_idr, dev_id);
 	if (zram) {
 		ret = zram_remove(zram);
-		idr_remove(&zram_index_idr, dev_id);
+		if (!ret)
+			idr_remove(&zram_index_idr, dev_id);
 	} else {
 		ret = -ENODEV;
 	}
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index dcc0973..438c907 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -48,6 +48,8 @@
 
 source "drivers/tty/serial/Kconfig"
 
+source "drivers/char/diag/Kconfig"
+
 config TTY_PRINTK
 	tristate "TTY driver to output user messages via printk"
 	depends on EXPERT && TTY
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index 6e6c244..60653fd 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -58,5 +58,6 @@
 js-rtc-y = rtc.o
 
 obj-$(CONFIG_TILE_SROM)		+= tile-srom.o
+obj-$(CONFIG_DIAG_CHAR)		+= diag/
 obj-$(CONFIG_XILLYBUS)		+= xillybus/
 obj-$(CONFIG_POWERNV_OP_PANEL)	+= powernv-op-panel.o
diff --git a/drivers/char/diag/Kconfig b/drivers/char/diag/Kconfig
new file mode 100644
index 0000000..e309241
--- /dev/null
+++ b/drivers/char/diag/Kconfig
@@ -0,0 +1,37 @@
+menu "Diag Support"
+
+config DIAG_CHAR
+	tristate "DIAG CHAR Interface Core"
+	default m
+	depends on USB_CONFIGFS_F_DIAG || USB_FUNCTION_DIAG || USB_QCOM_MAEMO
+	depends on ARCH_QCOM
+	depends on POWER_RESET_QCOM
+	select CRC_CCITT
+	help
+	  Char driver interface for SoC Diagnostic information. The DIAG Char
+	  driver provides diag forwarding to user space and SoC Peripherals.
+	  This enables diagchar for maemo usb gadget or android usb gadget
+	  based on config selected.
+
+config DIAG_OVER_USB
+	bool "Enable DIAG traffic to go over USB"
+	depends on DIAG_CHAR
+	depends on ARCH_QCOM
+	default y
+	help
+	  Diag over USB enables sending DIAG traffic over a USB transport. When
+	  the USB endpoints become available, the DIAG driver will enable Diag
+	  traffic over USB. This allows for host side tools to parse and display
+	  Diag traffic from the USB endpoint.
+
+config DIAGFWD_BRIDGE_CODE
+	bool "Enable QSC/9K DIAG traffic over SMUX/HSIC"
+	depends on DIAG_CHAR
+	depends on USB_QCOM_DIAG_BRIDGE || MSM_MHI
+	default y
+	help
+	  SMUX/HSIC Transport Layer for DIAG Router. When the MHI/SMUX endpoints
+	  become available, this bridge driver enables DIAG traffic over MHI
+	  and SMUX.
+
+endmenu
diff --git a/drivers/char/diag/Makefile b/drivers/char/diag/Makefile
new file mode 100644
index 0000000..b61aae8
--- /dev/null
+++ b/drivers/char/diag/Makefile
@@ -0,0 +1,6 @@
+obj-$(CONFIG_DIAG_CHAR) := diagchar.o
+obj-$(CONFIG_DIAGFWD_BRIDGE_CODE) += diagfwd_bridge.o
+obj-$(CONFIG_USB_QCOM_DIAG_BRIDGE) += diagfwd_hsic.o
+obj-$(CONFIG_USB_QCOM_DIAG_BRIDGE) += diagfwd_smux.o
+obj-$(CONFIG_MSM_MHI) += diagfwd_mhi.o
+diagchar-objs := diagchar_core.o diagchar_hdlc.o diagfwd.o diagfwd_glink.o diagfwd_peripheral.o diagfwd_socket.o diag_mux.o diag_memorydevice.o diag_usb.o diagmem.o diagfwd_cntl.o diag_dci.o diag_masks.o diag_debugfs.o
diff --git a/drivers/char/diag/diag_dci.c b/drivers/char/diag/diag_dci.c
new file mode 100644
index 0000000..ea684fc
--- /dev/null
+++ b/drivers/char/diag/diag_dci.c
@@ -0,0 +1,3169 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/uaccess.h>
+#include <linux/diagchar.h>
+#include <linux/sched.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/workqueue.h>
+#include <linux/pm_runtime.h>
+#include <linux/platform_device.h>
+#include <linux/pm_wakeup.h>
+#include <linux/spinlock.h>
+#include <linux/ratelimit.h>
+#include <linux/reboot.h>
+#include <asm/current.h>
+#include <soc/qcom/restart.h>
+#ifdef CONFIG_DIAG_OVER_USB
+#include <linux/usb/usbdiag.h>
+#endif
+#include "diagchar_hdlc.h"
+#include "diagmem.h"
+#include "diagchar.h"
+#include "diagfwd.h"
+#include "diagfwd_cntl.h"
+#include "diag_dci.h"
+#include "diag_masks.h"
+#include "diagfwd_bridge.h"
+#include "diagfwd_peripheral.h"
+#include "diag_ipc_logging.h"
+
+static struct timer_list dci_drain_timer;
+static int dci_timer_in_progress;
+static struct work_struct dci_data_drain_work;
+
+struct diag_dci_partial_pkt_t partial_pkt;
+
+unsigned int dci_max_reg = 100;
+unsigned int dci_max_clients = 10;
+struct mutex dci_log_mask_mutex;
+struct mutex dci_event_mask_mutex;
+
+/*
+ * DCI_HANDSHAKE_RETRY_TIME: Time to wait (in microseconds) before checking the
+ * connection status again.
+ *
+ * DCI_HANDSHAKE_WAIT_TIME: Timeout (in milliseconds) to check for dci
+ * connection status
+ */
+#define DCI_HANDSHAKE_RETRY_TIME	500000
+#define DCI_HANDSHAKE_WAIT_TIME		200
+
+spinlock_t ws_lock;
+unsigned long ws_lock_flags;
+
+struct dci_ops_tbl_t dci_ops_tbl[NUM_DCI_PROC] = {
+	{
+		.ctx = 0,
+		.send_log_mask = diag_send_dci_log_mask,
+		.send_event_mask = diag_send_dci_event_mask,
+		.peripheral_status = 0,
+		.mempool = 0,
+	},
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+	{
+		.ctx = DIAGFWD_MDM_DCI,
+		.send_log_mask = diag_send_dci_log_mask_remote,
+		.send_event_mask = diag_send_dci_event_mask_remote,
+		.peripheral_status = 0,
+		.mempool = POOL_TYPE_MDM_DCI_WRITE,
+	}
+#endif
+};
+
+struct dci_channel_status_t dci_channel_status[NUM_DCI_PROC] = {
+	{
+		.id = 0,
+		.open = 0,
+		.retry_count = 0
+	},
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+	{
+		.id = DIAGFWD_MDM_DCI,
+		.open = 0,
+		.retry_count = 0
+	}
+#endif
+};
+
+/* Number of milliseconds anticipated to process the DCI data */
+#define DCI_WAKEUP_TIMEOUT 1
+
+#define DCI_CAN_ADD_BUF_TO_LIST(buf)					\
+	(buf && buf->data && !buf->in_busy && buf->data_len > 0)	\
+
+#ifdef CONFIG_DEBUG_FS
+struct diag_dci_data_info *dci_traffic;
+struct mutex dci_stat_mutex;
+void diag_dci_record_traffic(int read_bytes, uint8_t ch_type,
+			     uint8_t peripheral, uint8_t proc)
+{
+	static int curr_dci_data;
+	static unsigned long iteration;
+	struct diag_dci_data_info *temp_data = dci_traffic;
+
+	if (!temp_data)
+		return;
+	mutex_lock(&dci_stat_mutex);
+	if (curr_dci_data == DIAG_DCI_DEBUG_CNT)
+		curr_dci_data = 0;
+	temp_data += curr_dci_data;
+	temp_data->iteration = iteration + 1;
+	temp_data->data_size = read_bytes;
+	temp_data->peripheral = peripheral;
+	temp_data->ch_type = ch_type;
+	temp_data->proc = proc;
+	diag_get_timestamp(temp_data->time_stamp);
+	curr_dci_data++;
+	iteration++;
+	mutex_unlock(&dci_stat_mutex);
+}
+#else
+void diag_dci_record_traffic(int read_bytes, uint8_t ch_type,
+			     uint8_t peripheral, uint8_t proc) { }
+#endif
+static void create_dci_log_mask_tbl(unsigned char *mask, uint8_t dirty)
+{
+	unsigned char *temp = mask;
+	uint8_t i;
+
+	if (!mask)
+		return;
+
+	/* create hard coded table for log mask with 16 categories */
+	for (i = 0; i < DCI_MAX_LOG_CODES; i++) {
+		*temp = i;
+		temp++;
+		*temp = dirty ? 1 : 0;
+		temp++;
+		memset(temp, 0, DCI_MAX_ITEMS_PER_LOG_CODE);
+		temp += DCI_MAX_ITEMS_PER_LOG_CODE;
+	}
+}
+
+static void create_dci_event_mask_tbl(unsigned char *tbl_buf)
+{
+	if (tbl_buf)
+		memset(tbl_buf, 0, DCI_EVENT_MASK_SIZE);
+}
+
+void dci_drain_data(unsigned long data)
+{
+	queue_work(driver->diag_dci_wq, &dci_data_drain_work);
+}
+
+static void dci_check_drain_timer(void)
+{
+	if (!dci_timer_in_progress) {
+		dci_timer_in_progress = 1;
+		mod_timer(&dci_drain_timer, jiffies + msecs_to_jiffies(200));
+	}
+}
+
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+static void dci_handshake_work_fn(struct work_struct *work)
+{
+	int err = 0;
+	int max_retries = 5;
+
+	struct dci_channel_status_t *status = container_of(work,
+						struct dci_channel_status_t,
+						handshake_work);
+
+	if (status->open) {
+		pr_debug("diag: In %s, remote dci channel is open, index: %d\n",
+			 __func__, status->id);
+		return;
+	}
+
+	if (status->retry_count == max_retries) {
+		status->retry_count = 0;
+		pr_info("diag: dci channel connection handshake timed out, id: %d\n",
+			status->id);
+		err = diagfwd_bridge_close(TOKEN_TO_BRIDGE(status->id));
+		if (err) {
+			pr_err("diag: In %s, unable to close dci channel id: %d, err: %d\n",
+			       __func__, status->id, err);
+		}
+		return;
+	}
+	status->retry_count++;
+	/*
+	 * Sleep for sometime to check for the connection status again. The
+	 * value should be optimum to include a roundabout time for a small
+	 * packet to the remote processor.
+	 */
+	usleep_range(DCI_HANDSHAKE_RETRY_TIME, DCI_HANDSHAKE_RETRY_TIME + 100);
+	mod_timer(&status->wait_time,
+		  jiffies + msecs_to_jiffies(DCI_HANDSHAKE_WAIT_TIME));
+}
+
+static void dci_chk_handshake(unsigned long data)
+{
+	int index = (int)data;
+
+	if (index < 0 || index >= NUM_DCI_PROC)
+		return;
+
+	queue_work(driver->diag_dci_wq,
+		   &dci_channel_status[index].handshake_work);
+}
+#endif
+
+static int diag_dci_init_buffer(struct diag_dci_buffer_t *buffer, int type)
+{
+	if (!buffer || buffer->data)
+		return -EINVAL;
+
+	switch (type) {
+	case DCI_BUF_PRIMARY:
+		buffer->capacity = IN_BUF_SIZE;
+		buffer->data = kzalloc(buffer->capacity, GFP_KERNEL);
+		if (!buffer->data)
+			return -ENOMEM;
+		break;
+	case DCI_BUF_SECONDARY:
+		buffer->data = NULL;
+		buffer->capacity = IN_BUF_SIZE;
+		break;
+	case DCI_BUF_CMD:
+		buffer->capacity = DIAG_MAX_REQ_SIZE + DCI_BUF_SIZE;
+		buffer->data = kzalloc(buffer->capacity, GFP_KERNEL);
+		if (!buffer->data)
+			return -ENOMEM;
+		break;
+	default:
+		pr_err("diag: In %s, unknown type %d", __func__, type);
+		return -EINVAL;
+	}
+
+	buffer->data_len = 0;
+	buffer->in_busy = 0;
+	buffer->buf_type = type;
+	mutex_init(&buffer->data_mutex);
+
+	return 0;
+}
+
+static inline int diag_dci_check_buffer(struct diag_dci_buffer_t *buf, int len)
+{
+	if (!buf)
+		return -EINVAL;
+
+	/* Return 1 if the buffer is not busy and can hold new data */
+	if ((buf->data_len + len < buf->capacity) && !buf->in_busy)
+		return 1;
+
+	return 0;
+}
+
+static void dci_add_buffer_to_list(struct diag_dci_client_tbl *client,
+				   struct diag_dci_buffer_t *buf)
+{
+	if (!buf || !client || !buf->data)
+		return;
+
+	if (buf->in_list || buf->data_len == 0)
+		return;
+
+	mutex_lock(&client->write_buf_mutex);
+	list_add_tail(&buf->buf_track, &client->list_write_buf);
+	/*
+	 * In the case of DCI, there can be multiple packets in one read. To
+	 * calculate the wakeup source reference count, we must account for each
+	 * packet in a single read.
+	 */
+	diag_ws_on_read(DIAG_WS_DCI, buf->data_len);
+	mutex_lock(&buf->data_mutex);
+	buf->in_busy = 1;
+	buf->in_list = 1;
+	mutex_unlock(&buf->data_mutex);
+	mutex_unlock(&client->write_buf_mutex);
+}
+
+static int diag_dci_get_buffer(struct diag_dci_client_tbl *client,
+			       int data_source, int len)
+{
+	struct diag_dci_buffer_t *buf_primary = NULL;
+	struct diag_dci_buffer_t *buf_temp = NULL;
+	struct diag_dci_buffer_t *curr = NULL;
+
+	if (!client)
+		return -EINVAL;
+	if (len < 0 || len > IN_BUF_SIZE)
+		return -EINVAL;
+
+	curr = client->buffers[data_source].buf_curr;
+	buf_primary = client->buffers[data_source].buf_primary;
+
+	if (curr && diag_dci_check_buffer(curr, len) == 1)
+		return 0;
+
+	dci_add_buffer_to_list(client, curr);
+	client->buffers[data_source].buf_curr = NULL;
+
+	if (diag_dci_check_buffer(buf_primary, len) == 1) {
+		client->buffers[data_source].buf_curr = buf_primary;
+		return 0;
+	}
+
+	buf_temp = kzalloc(sizeof(struct diag_dci_buffer_t), GFP_KERNEL);
+	if (!buf_temp)
+		return -EIO;
+
+	if (!diag_dci_init_buffer(buf_temp, DCI_BUF_SECONDARY)) {
+		buf_temp->data = diagmem_alloc(driver, IN_BUF_SIZE,
+					       POOL_TYPE_DCI);
+		if (!buf_temp->data) {
+			kfree(buf_temp);
+			buf_temp = NULL;
+			return -ENOMEM;
+		}
+		client->buffers[data_source].buf_curr = buf_temp;
+		return 0;
+	}
+
+	kfree(buf_temp);
+	buf_temp = NULL;
+	return -EIO;
+}
+
+void diag_dci_wakeup_clients(void)
+{
+	struct list_head *start, *temp;
+	struct diag_dci_client_tbl *entry = NULL;
+
+	mutex_lock(&driver->dci_mutex);
+	list_for_each_safe(start, temp, &driver->dci_client_list) {
+		entry = list_entry(start, struct diag_dci_client_tbl, track);
+
+		/*
+		 * Don't wake up the client when there is no pending buffer to
+		 * write or when it is writing to user space
+		 */
+		if (!list_empty(&entry->list_write_buf) && !entry->in_service) {
+			mutex_lock(&entry->write_buf_mutex);
+			entry->in_service = 1;
+			mutex_unlock(&entry->write_buf_mutex);
+			diag_update_sleeping_process(entry->client->tgid,
+						     DCI_DATA_TYPE);
+		}
+	}
+	mutex_unlock(&driver->dci_mutex);
+}
+
+void dci_data_drain_work_fn(struct work_struct *work)
+{
+	int i;
+	struct list_head *start, *temp;
+	struct diag_dci_client_tbl *entry = NULL;
+	struct diag_dci_buf_peripheral_t *proc_buf = NULL;
+	struct diag_dci_buffer_t *buf_temp = NULL;
+
+	mutex_lock(&driver->dci_mutex);
+	list_for_each_safe(start, temp, &driver->dci_client_list) {
+		entry = list_entry(start, struct diag_dci_client_tbl, track);
+		for (i = 0; i < entry->num_buffers; i++) {
+			proc_buf = &entry->buffers[i];
+
+			mutex_lock(&proc_buf->buf_mutex);
+			buf_temp = proc_buf->buf_primary;
+			if (DCI_CAN_ADD_BUF_TO_LIST(buf_temp))
+				dci_add_buffer_to_list(entry, buf_temp);
+
+			buf_temp = proc_buf->buf_cmd;
+			if (DCI_CAN_ADD_BUF_TO_LIST(buf_temp))
+				dci_add_buffer_to_list(entry, buf_temp);
+
+			buf_temp = proc_buf->buf_curr;
+			if (DCI_CAN_ADD_BUF_TO_LIST(buf_temp)) {
+				dci_add_buffer_to_list(entry, buf_temp);
+				proc_buf->buf_curr = NULL;
+			}
+			mutex_unlock(&proc_buf->buf_mutex);
+		}
+		if (!list_empty(&entry->list_write_buf) && !entry->in_service) {
+			mutex_lock(&entry->write_buf_mutex);
+			entry->in_service = 1;
+			mutex_unlock(&entry->write_buf_mutex);
+			diag_update_sleeping_process(entry->client->tgid,
+						     DCI_DATA_TYPE);
+		}
+	}
+	mutex_unlock(&driver->dci_mutex);
+	dci_timer_in_progress = 0;
+}
+
+static int diag_process_single_dci_pkt(unsigned char *buf, int len,
+				       int data_source, int token)
+{
+	uint8_t cmd_code = 0;
+
+	if (!buf || len < 0) {
+		pr_err("diag: Invalid input in %s, buf: %pK, len: %d\n",
+			__func__, buf, len);
+		return -EIO;
+	}
+
+	cmd_code = *(uint8_t *)buf;
+
+	switch (cmd_code) {
+	case LOG_CMD_CODE:
+		extract_dci_log(buf, len, data_source, token, NULL);
+		break;
+	case EVENT_CMD_CODE:
+		extract_dci_events(buf, len, data_source, token, NULL);
+		break;
+	case EXT_HDR_CMD_CODE:
+		extract_dci_ext_pkt(buf, len, data_source, token);
+		break;
+	case DCI_PKT_RSP_CODE:
+	case DCI_DELAYED_RSP_CODE:
+		extract_dci_pkt_rsp(buf, len, data_source, token);
+		break;
+	case DCI_CONTROL_PKT_CODE:
+		extract_dci_ctrl_pkt(buf, len, token);
+		break;
+	default:
+		pr_err("diag: Unable to process single DCI packet, cmd_code: %d, data_source: %d",
+			cmd_code, data_source);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/* Process the data read from apps userspace client */
+void diag_process_apps_dci_read_data(int data_type, void *buf, int recd_bytes)
+{
+	int err = 0;
+
+	if (!buf) {
+		pr_err_ratelimited("diag: In %s, Null buf pointer\n", __func__);
+		return;
+	}
+
+	if (data_type != DATA_TYPE_DCI_LOG && data_type != DATA_TYPE_DCI_EVENT
+						&& data_type != DCI_PKT_TYPE) {
+		pr_err("diag: In %s, unsupported data_type: 0x%x\n",
+				__func__, (unsigned int)data_type);
+		return;
+	}
+
+	err = diag_process_single_dci_pkt(buf, recd_bytes, APPS_DATA,
+					  DCI_LOCAL_PROC);
+	if (err)
+		return;
+
+	/* wake up all sleeping DCI clients which have some data */
+	diag_dci_wakeup_clients();
+	dci_check_drain_timer();
+}
+
+void diag_process_remote_dci_read_data(int index, void *buf, int recd_bytes)
+{
+	int read_bytes = 0, err = 0;
+	uint16_t dci_pkt_len;
+	struct diag_dci_header_t *header = NULL;
+	int header_len = sizeof(struct diag_dci_header_t);
+	int token = BRIDGE_TO_TOKEN(index);
+
+	if (!buf)
+		return;
+
+	diag_dci_record_traffic(recd_bytes, 0, 0, token);
+
+	if (!partial_pkt.processing)
+		goto start;
+
+	if (partial_pkt.remaining > recd_bytes) {
+		if ((partial_pkt.read_len + recd_bytes) >
+							(MAX_DCI_PACKET_SZ)) {
+			pr_err("diag: Invalid length %d, %d received in %s\n",
+			       partial_pkt.read_len, recd_bytes, __func__);
+			goto end;
+		}
+		memcpy(partial_pkt.data + partial_pkt.read_len, buf,
+								recd_bytes);
+		read_bytes += recd_bytes;
+		buf += read_bytes;
+		partial_pkt.read_len += recd_bytes;
+		partial_pkt.remaining -= recd_bytes;
+	} else {
+		if ((partial_pkt.read_len + partial_pkt.remaining) >
+							(MAX_DCI_PACKET_SZ)) {
+			pr_err("diag: Invalid length during partial read %d, %d received in %s\n",
+			       partial_pkt.read_len,
+			       partial_pkt.remaining, __func__);
+			goto end;
+		}
+		memcpy(partial_pkt.data + partial_pkt.read_len, buf,
+						partial_pkt.remaining);
+		read_bytes += partial_pkt.remaining;
+		buf += read_bytes;
+		partial_pkt.read_len += partial_pkt.remaining;
+		partial_pkt.remaining = 0;
+	}
+
+	if (partial_pkt.remaining == 0) {
+		/*
+		 * Retrieve from the DCI control packet after the header = start
+		 * (1 byte) + version (1 byte) + length (2 bytes)
+		 */
+		diag_process_single_dci_pkt(partial_pkt.data + 4,
+				partial_pkt.read_len - header_len,
+				DCI_REMOTE_DATA, token);
+		partial_pkt.read_len = 0;
+		partial_pkt.total_len = 0;
+		partial_pkt.processing = 0;
+		goto start;
+	}
+	goto end;
+
+start:
+	while (read_bytes < recd_bytes) {
+		header = (struct diag_dci_header_t *)buf;
+		dci_pkt_len = header->length;
+
+		if (header->cmd_code != DCI_CONTROL_PKT_CODE &&
+			driver->num_dci_client == 0) {
+			read_bytes += header_len + dci_pkt_len;
+			buf += header_len + dci_pkt_len;
+			continue;
+		}
+
+		if (dci_pkt_len + header_len > MAX_DCI_PACKET_SZ) {
+			pr_err("diag: Invalid length in the dci packet field %d\n",
+								dci_pkt_len);
+			break;
+		}
+
+		if ((dci_pkt_len + header_len) > (recd_bytes - read_bytes)) {
+			partial_pkt.read_len = recd_bytes - read_bytes;
+			partial_pkt.total_len = dci_pkt_len + header_len;
+			partial_pkt.remaining = partial_pkt.total_len -
+						partial_pkt.read_len;
+			partial_pkt.processing = 1;
+			memcpy(partial_pkt.data, buf, partial_pkt.read_len);
+			break;
+		}
+		/*
+		 * Retrieve from the DCI control packet after the header = start
+		 * (1 byte) + version (1 byte) + length (2 bytes)
+		 */
+		err = diag_process_single_dci_pkt(buf + 4, dci_pkt_len,
+						 DCI_REMOTE_DATA, DCI_MDM_PROC);
+		if (err)
+			break;
+		read_bytes += header_len + dci_pkt_len;
+		buf += header_len + dci_pkt_len; /* advance to next DCI pkt */
+	}
+end:
+	if (err)
+		return;
+	/* wake up all sleeping DCI clients which have some data */
+	diag_dci_wakeup_clients();
+	dci_check_drain_timer();
+}
+
+/* Process the data read from the peripheral dci channels */
+void diag_dci_process_peripheral_data(struct diagfwd_info *p_info, void *buf,
+				      int recd_bytes)
+{
+	int read_bytes = 0, err = 0;
+	uint16_t dci_pkt_len;
+	struct diag_dci_pkt_header_t *header = NULL;
+	uint8_t recv_pkt_cmd_code;
+
+	if (!buf || !p_info)
+		return;
+
+	/*
+	 * Release wakeup source when there are no more clients to
+	 * process DCI data
+	 */
+	if (driver->num_dci_client == 0) {
+		diag_ws_reset(DIAG_WS_DCI);
+		return;
+	}
+
+	diag_dci_record_traffic(recd_bytes, p_info->type, p_info->peripheral,
+				DCI_LOCAL_PROC);
+	while (read_bytes < recd_bytes) {
+		header = (struct diag_dci_pkt_header_t *)buf;
+		recv_pkt_cmd_code = header->pkt_code;
+		dci_pkt_len = header->len;
+
+		/*
+		 * Check if the length of the current packet is lesser than the
+		 * remaining bytes in the received buffer. This includes space
+		 * for the Start byte (1), Version byte (1), length bytes (2)
+		 * and End byte (1)
+		 */
+		if ((dci_pkt_len + 5) > (recd_bytes - read_bytes)) {
+			pr_err("diag: Invalid length in %s, len: %d, dci_pkt_len: %d",
+				__func__, recd_bytes, dci_pkt_len);
+			diag_ws_release();
+			return;
+		}
+		/*
+		 * Retrieve from the DCI control packet after the header = start
+		 * (1 byte) + version (1 byte) + length (2 bytes)
+		 */
+		err = diag_process_single_dci_pkt(buf + 4, dci_pkt_len,
+						  (int)p_info->peripheral,
+						  DCI_LOCAL_PROC);
+		if (err) {
+			diag_ws_release();
+			break;
+		}
+		read_bytes += 5 + dci_pkt_len;
+		buf += 5 + dci_pkt_len; /* advance to next DCI pkt */
+	}
+
+	if (err)
+		return;
+	/* wake up all sleeping DCI clients which have some data */
+	diag_dci_wakeup_clients();
+	dci_check_drain_timer();
+}
+
+int diag_dci_query_log_mask(struct diag_dci_client_tbl *entry,
+			    uint16_t log_code)
+{
+	uint16_t item_num;
+	uint8_t equip_id, *log_mask_ptr, byte_mask;
+	int byte_index, offset;
+
+	if (!entry) {
+		pr_err("diag: In %s, invalid client entry\n", __func__);
+		return 0;
+	}
+
+	equip_id = LOG_GET_EQUIP_ID(log_code);
+	item_num = LOG_GET_ITEM_NUM(log_code);
+	byte_index = item_num/8 + 2;
+	byte_mask = 0x01 << (item_num % 8);
+	offset = equip_id * 514;
+
+	if (offset + byte_index > DCI_LOG_MASK_SIZE) {
+		pr_err("diag: In %s, invalid offset: %d, log_code: %d, byte_index: %d\n",
+				__func__, offset, log_code, byte_index);
+		return 0;
+	}
+
+	log_mask_ptr = entry->dci_log_mask;
+	log_mask_ptr = log_mask_ptr + offset + byte_index;
+	return ((*log_mask_ptr & byte_mask) == byte_mask) ? 1 : 0;
+
+}
+
+int diag_dci_query_event_mask(struct diag_dci_client_tbl *entry,
+			      uint16_t event_id)
+{
+	uint8_t *event_mask_ptr, byte_mask;
+	int byte_index, bit_index;
+
+	if (!entry) {
+		pr_err("diag: In %s, invalid client entry\n", __func__);
+		return 0;
+	}
+
+	byte_index = event_id/8;
+	bit_index = event_id % 8;
+	byte_mask = 0x1 << bit_index;
+
+	if (byte_index > DCI_EVENT_MASK_SIZE) {
+		pr_err("diag: In %s, invalid, event_id: %d, byte_index: %d\n",
+				__func__, event_id, byte_index);
+		return 0;
+	}
+
+	event_mask_ptr = entry->dci_event_mask;
+	event_mask_ptr = event_mask_ptr + byte_index;
+	return ((*event_mask_ptr & byte_mask) == byte_mask) ? 1 : 0;
+}
+
+static int diag_dci_filter_commands(struct diag_pkt_header_t *header)
+{
+	if (!header)
+		return -ENOMEM;
+
+	switch (header->cmd_code) {
+	case 0x7d: /* Msg Mask Configuration */
+	case 0x73: /* Log Mask Configuration */
+	case 0x81: /* Event Mask Configuration */
+	case 0x82: /* Event Mask Change */
+	case 0x60: /* Event Mask Toggle */
+		return 1;
+	}
+
+	if (header->cmd_code == 0x4b && header->subsys_id == 0x12) {
+		switch (header->subsys_cmd_code) {
+		case 0x60: /* Extended Event Mask Config */
+		case 0x61: /* Extended Msg Mask Config */
+		case 0x62: /* Extended Log Mask Config */
+		case 0x20C: /* Set current Preset ID */
+		case 0x20D: /* Get current Preset ID */
+		case 0x218: /* HDLC Disabled Command */
+			return 1;
+		}
+	}
+
+	return 0;
+}
+
+static struct dci_pkt_req_entry_t *diag_register_dci_transaction(int uid,
+								 int client_id)
+{
+	struct dci_pkt_req_entry_t *entry = NULL;
+
+	entry = kzalloc(sizeof(struct dci_pkt_req_entry_t), GFP_KERNEL);
+	if (!entry)
+		return NULL;
+
+	driver->dci_tag++;
+	entry->client_id = client_id;
+	entry->uid = uid;
+	entry->tag = driver->dci_tag;
+	pr_debug("diag: Registering DCI cmd req, client_id: %d, uid: %d, tag:%d\n",
+				entry->client_id, entry->uid, entry->tag);
+	list_add_tail(&entry->track, &driver->dci_req_list);
+
+	return entry;
+}
+
+static struct dci_pkt_req_entry_t *diag_dci_get_request_entry(int tag)
+{
+	struct list_head *start, *temp;
+	struct dci_pkt_req_entry_t *entry = NULL;
+
+	list_for_each_safe(start, temp, &driver->dci_req_list) {
+		entry = list_entry(start, struct dci_pkt_req_entry_t, track);
+		if (entry->tag == tag)
+			return entry;
+	}
+	return NULL;
+}
+
+static int diag_dci_remove_req_entry(unsigned char *buf, int len,
+				     struct dci_pkt_req_entry_t *entry)
+{
+	uint16_t rsp_count = 0, delayed_rsp_id = 0;
+
+	if (!buf || len <= 0 || !entry) {
+		pr_err("diag: In %s, invalid input buf: %pK, len: %d, entry: %pK\n",
+			__func__, buf, len, entry);
+		return -EIO;
+	}
+
+	/* It is an immediate response, delete it from the table */
+	if (*buf != 0x80) {
+		list_del(&entry->track);
+		kfree(entry);
+		return 1;
+	}
+
+	/* It is a delayed response. Check if the length is valid */
+	if (len < MIN_DELAYED_RSP_LEN) {
+		pr_err("diag: Invalid delayed rsp packet length %d\n", len);
+		return -EINVAL;
+	}
+
+	/*
+	 * If the delayed response id field (uint16_t at byte 8) is 0 then
+	 * there is only one response and we can remove the request entry.
+	 */
+	delayed_rsp_id = *(uint16_t *)(buf + 8);
+	if (delayed_rsp_id == 0) {
+		list_del(&entry->track);
+		kfree(entry);
+		return 1;
+	}
+
+	/*
+	 * Check the response count field (uint16 at byte 10). The request
+	 * entry can be deleted it it is the last response in the sequence.
+	 * It is the last response in the sequence if the response count
+	 * is 1 or if the signed bit gets dropped.
+	 */
+	rsp_count = *(uint16_t *)(buf + 10);
+	if (rsp_count > 0 && rsp_count < 0x1000) {
+		list_del(&entry->track);
+		kfree(entry);
+		return 1;
+	}
+
+	return 0;
+}
+
+static void dci_process_ctrl_status(unsigned char *buf, int len, int token)
+{
+	struct diag_ctrl_dci_status *header = NULL;
+	unsigned char *temp = buf;
+	uint32_t read_len = 0;
+	uint8_t i;
+	int peripheral_mask, status;
+
+	if (!buf || (len < sizeof(struct diag_ctrl_dci_status))) {
+		pr_err("diag: In %s, invalid buf %pK or length: %d\n",
+		       __func__, buf, len);
+		return;
+	}
+
+	if (!VALID_DCI_TOKEN(token)) {
+		pr_err("diag: In %s, invalid DCI token %d\n", __func__, token);
+		return;
+	}
+
+	header = (struct diag_ctrl_dci_status *)temp;
+	temp += sizeof(struct diag_ctrl_dci_status);
+	read_len += sizeof(struct diag_ctrl_dci_status);
+
+	for (i = 0; i < header->count; i++) {
+		if (read_len > len) {
+			pr_err("diag: In %s, Invalid length len: %d\n",
+			       __func__, len);
+			return;
+		}
+
+		switch (*(uint8_t *)temp) {
+		case PERIPHERAL_MODEM:
+			peripheral_mask = DIAG_CON_MPSS;
+			break;
+		case PERIPHERAL_LPASS:
+			peripheral_mask = DIAG_CON_LPASS;
+			break;
+		case PERIPHERAL_WCNSS:
+			peripheral_mask = DIAG_CON_WCNSS;
+			break;
+		case PERIPHERAL_SENSORS:
+			peripheral_mask = DIAG_CON_SENSORS;
+			break;
+		default:
+			pr_err("diag: In %s, unknown peripheral, peripheral: %d\n",
+				__func__, *(uint8_t *)temp);
+			return;
+		}
+		temp += sizeof(uint8_t);
+		read_len += sizeof(uint8_t);
+
+		status = (*(uint8_t *)temp) ? DIAG_STATUS_OPEN :
+							DIAG_STATUS_CLOSED;
+		temp += sizeof(uint8_t);
+		read_len += sizeof(uint8_t);
+		diag_dci_notify_client(peripheral_mask, status, token);
+	}
+}
+
+static void dci_process_ctrl_handshake_pkt(unsigned char *buf, int len,
+					   int token)
+{
+	struct diag_ctrl_dci_handshake_pkt *header = NULL;
+	unsigned char *temp = buf;
+	int err = 0;
+
+	if (!buf || (len < sizeof(struct diag_ctrl_dci_handshake_pkt)))
+		return;
+
+	if (!VALID_DCI_TOKEN(token))
+		return;
+
+	header = (struct diag_ctrl_dci_handshake_pkt *)temp;
+	if (header->magic == DCI_MAGIC) {
+		dci_channel_status[token].open = 1;
+		err = dci_ops_tbl[token].send_log_mask(token);
+		if (err) {
+			pr_err("diag: In %s, unable to send log mask to token: %d, err: %d\n",
+			       __func__, token, err);
+		}
+		err = dci_ops_tbl[token].send_event_mask(token);
+		if (err) {
+			pr_err("diag: In %s, unable to send event mask to token: %d, err: %d\n",
+			       __func__, token, err);
+		}
+	}
+}
+
+void extract_dci_ctrl_pkt(unsigned char *buf, int len, int token)
+{
+	unsigned char *temp = buf;
+	uint32_t ctrl_pkt_id;
+
+	diag_ws_on_read(DIAG_WS_DCI, len);
+	if (!buf) {
+		pr_err("diag: Invalid buffer in %s\n", __func__);
+		goto err;
+	}
+
+	if (len < (sizeof(uint8_t) + sizeof(uint32_t))) {
+		pr_err("diag: In %s, invalid length %d\n", __func__, len);
+		goto err;
+	}
+
+	/* Skip the Control packet command code */
+	temp += sizeof(uint8_t);
+	len -= sizeof(uint8_t);
+	ctrl_pkt_id = *(uint32_t *)temp;
+	switch (ctrl_pkt_id) {
+	case DIAG_CTRL_MSG_DCI_CONNECTION_STATUS:
+		dci_process_ctrl_status(temp, len, token);
+		break;
+	case DIAG_CTRL_MSG_DCI_HANDSHAKE_PKT:
+		dci_process_ctrl_handshake_pkt(temp, len, token);
+		break;
+	default:
+		pr_debug("diag: In %s, unknown control pkt %d\n",
+			 __func__, ctrl_pkt_id);
+		break;
+	}
+
+err:
+	/*
+	 * DCI control packets are not consumed by the clients. Mimic client
+	 * consumption by setting and clearing the wakeup source copy_count
+	 * explicitly.
+	 */
+	diag_ws_on_copy_fail(DIAG_WS_DCI);
+}
+
+void extract_dci_pkt_rsp(unsigned char *buf, int len, int data_source,
+			 int token)
+{
+	int tag;
+	struct diag_dci_client_tbl *entry = NULL;
+	void *temp_buf = NULL;
+	uint8_t dci_cmd_code, cmd_code_len, delete_flag = 0;
+	uint32_t rsp_len = 0;
+	struct diag_dci_buffer_t *rsp_buf = NULL;
+	struct dci_pkt_req_entry_t *req_entry = NULL;
+	unsigned char *temp = buf;
+	int save_req_uid = 0;
+	struct diag_dci_pkt_rsp_header_t pkt_rsp_header;
+
+	if (!buf) {
+		pr_err("diag: Invalid pointer in %s\n", __func__);
+		return;
+	}
+	dci_cmd_code = *(uint8_t *)(temp);
+	if (dci_cmd_code == DCI_PKT_RSP_CODE) {
+		cmd_code_len = sizeof(uint8_t);
+	} else if (dci_cmd_code == DCI_DELAYED_RSP_CODE) {
+		cmd_code_len = sizeof(uint32_t);
+	} else {
+		pr_err("diag: In %s, invalid command code %d\n", __func__,
+								dci_cmd_code);
+		return;
+	}
+	temp += cmd_code_len;
+	tag = *(int *)temp;
+	temp += sizeof(int);
+
+	/*
+	 * The size of the response is (total length) - (length of the command
+	 * code, the tag (int)
+	 */
+	rsp_len = len - (cmd_code_len + sizeof(int));
+	if ((rsp_len == 0) || (rsp_len > (len - 5))) {
+		pr_err("diag: Invalid length in %s, len: %d, rsp_len: %d",
+						__func__, len, rsp_len);
+		return;
+	}
+
+	mutex_lock(&driver->dci_mutex);
+	req_entry = diag_dci_get_request_entry(tag);
+	if (!req_entry) {
+		pr_err_ratelimited("diag: No matching client for DCI data\n");
+		mutex_unlock(&driver->dci_mutex);
+		return;
+	}
+
+	entry = diag_dci_get_client_entry(req_entry->client_id);
+	if (!entry) {
+		pr_err("diag: In %s, couldn't find client entry, id:%d\n",
+						__func__, req_entry->client_id);
+		mutex_unlock(&driver->dci_mutex);
+		return;
+	}
+
+	save_req_uid = req_entry->uid;
+	/* Remove the headers and send only the response to this function */
+	delete_flag = diag_dci_remove_req_entry(temp, rsp_len, req_entry);
+	if (delete_flag < 0) {
+		mutex_unlock(&driver->dci_mutex);
+		return;
+	}
+
+	mutex_lock(&entry->buffers[data_source].buf_mutex);
+	rsp_buf = entry->buffers[data_source].buf_cmd;
+
+	mutex_lock(&rsp_buf->data_mutex);
+	/*
+	 * Check if we can fit the data in the rsp buffer. The total length of
+	 * the rsp is the rsp length (write_len) + DCI_PKT_RSP_TYPE header (int)
+	 * + field for length (int) + delete_flag (uint8_t)
+	 */
+	if ((rsp_buf->data_len + 9 + rsp_len) > rsp_buf->capacity) {
+		pr_alert("diag: create capacity for pkt rsp\n");
+		rsp_buf->capacity += 9 + rsp_len;
+		temp_buf = krealloc(rsp_buf->data, rsp_buf->capacity,
+				    GFP_KERNEL);
+		if (!temp_buf) {
+			pr_err("diag: DCI realloc failed\n");
+			mutex_unlock(&rsp_buf->data_mutex);
+			mutex_unlock(&entry->buffers[data_source].buf_mutex);
+			mutex_unlock(&driver->dci_mutex);
+			return;
+		}
+		rsp_buf->data = temp_buf;
+	}
+
+	/* Fill in packet response header information */
+	pkt_rsp_header.type = DCI_PKT_RSP_TYPE;
+	/* Packet Length = Response Length + Length of uid field (int) */
+	pkt_rsp_header.length = rsp_len + sizeof(int);
+	pkt_rsp_header.delete_flag = delete_flag;
+	pkt_rsp_header.uid = save_req_uid;
+	memcpy(rsp_buf->data + rsp_buf->data_len, &pkt_rsp_header,
+		sizeof(struct diag_dci_pkt_rsp_header_t));
+	rsp_buf->data_len += sizeof(struct diag_dci_pkt_rsp_header_t);
+	memcpy(rsp_buf->data + rsp_buf->data_len, temp, rsp_len);
+	rsp_buf->data_len += rsp_len;
+	rsp_buf->data_source = data_source;
+
+	mutex_unlock(&rsp_buf->data_mutex);
+
+	/*
+	 * Add directly to the list for writing responses to the
+	 * userspace as these shouldn't be buffered and shouldn't wait
+	 * for log and event buffers to be full
+	 */
+	dci_add_buffer_to_list(entry, rsp_buf);
+	mutex_unlock(&entry->buffers[data_source].buf_mutex);
+	mutex_unlock(&driver->dci_mutex);
+}
+
+static void copy_ext_hdr(struct diag_dci_buffer_t *data_buffer, void *ext_hdr)
+{
+	if (!data_buffer) {
+		pr_err("diag: In %s, data buffer is NULL", __func__);
+		return;
+	}
+
+	*(int *)(data_buffer->data + data_buffer->data_len) =
+			DCI_EXT_HDR_TYPE;
+	data_buffer->data_len += sizeof(int);
+	memcpy(data_buffer->data + data_buffer->data_len, ext_hdr,
+			EXT_HDR_LEN);
+	data_buffer->data_len += EXT_HDR_LEN;
+}
+
+static void copy_dci_event(unsigned char *buf, int len,
+			struct diag_dci_client_tbl *client, int data_source,
+			void *ext_hdr)
+{
+	struct diag_dci_buffer_t *data_buffer = NULL;
+	struct diag_dci_buf_peripheral_t *proc_buf = NULL;
+	int err = 0, total_len = 0;
+
+	if (!buf || !client) {
+		pr_err("diag: Invalid pointers in %s", __func__);
+		return;
+	}
+
+	total_len = sizeof(int) + len;
+	if (ext_hdr)
+		total_len += sizeof(int) + EXT_HDR_LEN;
+
+	proc_buf = &client->buffers[data_source];
+	mutex_lock(&proc_buf->buf_mutex);
+	mutex_lock(&proc_buf->health_mutex);
+	err = diag_dci_get_buffer(client, data_source, total_len);
+	if (err) {
+		if (err == -ENOMEM)
+			proc_buf->health.dropped_events++;
+		else
+			pr_err("diag: In %s, invalid packet\n", __func__);
+		mutex_unlock(&proc_buf->health_mutex);
+		mutex_unlock(&proc_buf->buf_mutex);
+		return;
+	}
+
+	data_buffer = proc_buf->buf_curr;
+
+	proc_buf->health.received_events++;
+	mutex_unlock(&proc_buf->health_mutex);
+	mutex_unlock(&proc_buf->buf_mutex);
+
+	mutex_lock(&data_buffer->data_mutex);
+	if (ext_hdr)
+		copy_ext_hdr(data_buffer, ext_hdr);
+
+	*(int *)(data_buffer->data + data_buffer->data_len) = DCI_EVENT_TYPE;
+	data_buffer->data_len += sizeof(int);
+	memcpy(data_buffer->data + data_buffer->data_len, buf, len);
+	data_buffer->data_len += len;
+	data_buffer->data_source = data_source;
+	mutex_unlock(&data_buffer->data_mutex);
+
+}
+
+void extract_dci_events(unsigned char *buf, int len, int data_source,
+		int token, void *ext_hdr)
+{
+	uint16_t event_id, event_id_packet, length, temp_len;
+	uint8_t payload_len, payload_len_field;
+	uint8_t timestamp[8] = {0}, timestamp_len;
+	unsigned char event_data[MAX_EVENT_SIZE];
+	unsigned int total_event_len;
+	struct list_head *start, *temp;
+	struct diag_dci_client_tbl *entry = NULL;
+
+	length = *(uint16_t *)(buf + 1); /* total length of event series */
+	if (length == 0) {
+		pr_err("diag: Incoming dci event length is invalid\n");
+		return;
+	}
+	/*
+	 * Move directly to the start of the event series. 1 byte for
+	 * event code and 2 bytes for the length field.
+	 * The length field indicates the total length removing the cmd_code
+	 * and the length field. The event parsing in that case should happen
+	 * till the end.
+	 */
+	temp_len = 3;
+	while (temp_len < length) {
+		event_id_packet = *(uint16_t *)(buf + temp_len);
+		event_id = event_id_packet & 0x0FFF; /* extract 12 bits */
+		if (event_id_packet & 0x8000) {
+			/* The packet has the two smallest byte of the
+			 * timestamp
+			 */
+			timestamp_len = 2;
+		} else {
+			/* The packet has the full timestamp. The first event
+			 * will always have full timestamp. Save it in the
+			 * timestamp buffer and use it for subsequent events if
+			 * necessary.
+			 */
+			timestamp_len = 8;
+			memcpy(timestamp, buf + temp_len + 2, timestamp_len);
+		}
+		/* 13th and 14th bit represent the payload length */
+		if (((event_id_packet & 0x6000) >> 13) == 3) {
+			payload_len_field = 1;
+			payload_len = *(uint8_t *)
+					(buf + temp_len + 2 + timestamp_len);
+			if (payload_len < (MAX_EVENT_SIZE - 13)) {
+				/* copy the payload length and the payload */
+				memcpy(event_data + 12, buf + temp_len + 2 +
+							timestamp_len, 1);
+				memcpy(event_data + 13, buf + temp_len + 2 +
+					timestamp_len + 1, payload_len);
+			} else {
+				pr_err("diag: event > %d, payload_len = %d\n",
+					(MAX_EVENT_SIZE - 13), payload_len);
+				return;
+			}
+		} else {
+			payload_len_field = 0;
+			payload_len = (event_id_packet & 0x6000) >> 13;
+			/* copy the payload */
+			memcpy(event_data + 12, buf + temp_len + 2 +
+						timestamp_len, payload_len);
+		}
+
+		/* Before copying the data to userspace, check if we are still
+		 * within the buffer limit. This is an error case, don't count
+		 * it towards the health statistics.
+		 *
+		 * Here, the offset of 2 bytes(uint16_t) is for the
+		 * event_id_packet length
+		 */
+		temp_len += sizeof(uint16_t) + timestamp_len +
+						payload_len_field + payload_len;
+		if (temp_len > len) {
+			pr_err("diag: Invalid length in %s, len: %d, read: %d",
+						__func__, len, temp_len);
+			return;
+		}
+
+		/* 2 bytes for the event id & timestamp len is hard coded to 8,
+		 * as individual events have full timestamp.
+		 */
+		*(uint16_t *)(event_data) = 10 +
+					payload_len_field + payload_len;
+		*(uint16_t *)(event_data + 2) = event_id_packet & 0x7FFF;
+		memcpy(event_data + 4, timestamp, 8);
+		/* 2 bytes for the event length field which is added to
+		 * the event data.
+		 */
+		total_event_len = 2 + 10 + payload_len_field + payload_len;
+		/* parse through event mask tbl of each client and check mask */
+		mutex_lock(&driver->dci_mutex);
+		list_for_each_safe(start, temp, &driver->dci_client_list) {
+			entry = list_entry(start, struct diag_dci_client_tbl,
+									track);
+			if (entry->client_info.token != token)
+				continue;
+			if (diag_dci_query_event_mask(entry, event_id)) {
+				/* copy to client buffer */
+				copy_dci_event(event_data, total_event_len,
+					       entry, data_source, ext_hdr);
+			}
+		}
+		mutex_unlock(&driver->dci_mutex);
+	}
+}
+
+static void copy_dci_log(unsigned char *buf, int len,
+			 struct diag_dci_client_tbl *client, int data_source,
+			 void *ext_hdr)
+{
+	uint16_t log_length = 0;
+	struct diag_dci_buffer_t *data_buffer = NULL;
+	struct diag_dci_buf_peripheral_t *proc_buf = NULL;
+	int err = 0, total_len = 0;
+
+	if (!buf || !client) {
+		pr_err("diag: Invalid pointers in %s", __func__);
+		return;
+	}
+
+	log_length = *(uint16_t *)(buf + 2);
+	if (log_length > USHRT_MAX - 4) {
+		pr_err("diag: Integer overflow in %s, log_len: %d",
+				__func__, log_length);
+		return;
+	}
+	total_len = sizeof(int) + log_length;
+	if (ext_hdr)
+		total_len += sizeof(int) + EXT_HDR_LEN;
+
+	/* Check if we are within the len. The check should include the
+	 * first 4 bytes for the Log code(2) and the length bytes (2)
+	 */
+	if ((log_length + sizeof(uint16_t) + 2) > len) {
+		pr_err("diag: Invalid length in %s, log_len: %d, len: %d",
+						__func__, log_length, len);
+		return;
+	}
+
+	proc_buf = &client->buffers[data_source];
+	mutex_lock(&proc_buf->buf_mutex);
+	mutex_lock(&proc_buf->health_mutex);
+	err = diag_dci_get_buffer(client, data_source, total_len);
+	if (err) {
+		if (err == -ENOMEM)
+			proc_buf->health.dropped_logs++;
+		else
+			pr_err("diag: In %s, invalid packet\n", __func__);
+		mutex_unlock(&proc_buf->health_mutex);
+		mutex_unlock(&proc_buf->buf_mutex);
+		return;
+	}
+
+	data_buffer = proc_buf->buf_curr;
+	proc_buf->health.received_logs++;
+	mutex_unlock(&proc_buf->health_mutex);
+	mutex_unlock(&proc_buf->buf_mutex);
+
+	mutex_lock(&data_buffer->data_mutex);
+	if (!data_buffer->data) {
+		mutex_unlock(&data_buffer->data_mutex);
+		return;
+	}
+	if (ext_hdr)
+		copy_ext_hdr(data_buffer, ext_hdr);
+
+	*(int *)(data_buffer->data + data_buffer->data_len) = DCI_LOG_TYPE;
+	data_buffer->data_len += sizeof(int);
+	memcpy(data_buffer->data + data_buffer->data_len, buf + sizeof(int),
+	       log_length);
+	data_buffer->data_len += log_length;
+	data_buffer->data_source = data_source;
+	mutex_unlock(&data_buffer->data_mutex);
+}
+
+void extract_dci_log(unsigned char *buf, int len, int data_source, int token,
+			void *ext_hdr)
+{
+	uint16_t log_code, read_bytes = 0;
+	struct list_head *start, *temp;
+	struct diag_dci_client_tbl *entry = NULL;
+
+	if (!buf) {
+		pr_err("diag: In %s buffer is NULL\n", __func__);
+		return;
+	}
+
+	/* The first six bytes for the incoming log packet contains
+	 * Command code (2), the length of the packet (2) and the length
+	 * of the log (2)
+	 */
+	log_code = *(uint16_t *)(buf + 6);
+	read_bytes += sizeof(uint16_t) + 6;
+	if (read_bytes > len) {
+		pr_err("diag: Invalid length in %s, len: %d, read: %d",
+						__func__, len, read_bytes);
+		return;
+	}
+
+	/* parse through log mask table of each client and check mask */
+	mutex_lock(&driver->dci_mutex);
+	list_for_each_safe(start, temp, &driver->dci_client_list) {
+		entry = list_entry(start, struct diag_dci_client_tbl, track);
+		if (entry->client_info.token != token)
+			continue;
+		if (diag_dci_query_log_mask(entry, log_code)) {
+			pr_debug("\t log code %x needed by client %d",
+				 log_code, entry->client->tgid);
+			/* copy to client buffer */
+			copy_dci_log(buf, len, entry, data_source, ext_hdr);
+		}
+	}
+	mutex_unlock(&driver->dci_mutex);
+}
+
+void extract_dci_ext_pkt(unsigned char *buf, int len, int data_source,
+		int token)
+{
+	uint8_t version, pkt_cmd_code = 0;
+	unsigned char *pkt = NULL;
+
+	if (!buf) {
+		pr_err("diag: In %s buffer is NULL\n", __func__);
+		return;
+	}
+
+	version = *(uint8_t *)buf + 1;
+	if (version < EXT_HDR_VERSION)  {
+		pr_err("diag: %s, Extended header with invalid version: %d\n",
+			__func__, version);
+		return;
+	}
+
+	pkt = buf + EXT_HDR_LEN;
+	pkt_cmd_code = *(uint8_t *)pkt;
+	len -= EXT_HDR_LEN;
+	if (len < 0) {
+		pr_err("diag: %s, Invalid length len: %d\n", __func__, len);
+		return;
+	}
+
+	switch (pkt_cmd_code) {
+	case LOG_CMD_CODE:
+		extract_dci_log(pkt, len, data_source, token, buf);
+		break;
+	case EVENT_CMD_CODE:
+		extract_dci_events(pkt, len, data_source, token, buf);
+		break;
+	default:
+		pr_err("diag: %s unsupported cmd_code: %d, data_source: %d\n",
+			__func__, pkt_cmd_code, data_source);
+		return;
+	}
+}
+
+void diag_dci_channel_open_work(struct work_struct *work)
+{
+	int i, j;
+	char dirty_bits[16];
+	uint8_t *client_log_mask_ptr;
+	uint8_t *log_mask_ptr;
+	int ret;
+	struct list_head *start, *temp;
+	struct diag_dci_client_tbl *entry = NULL;
+
+	/* Update apps and peripheral(s) with the dci log and event masks */
+	memset(dirty_bits, 0, 16 * sizeof(uint8_t));
+
+	/*
+	 * From each log entry used by each client, determine
+	 * which log entries in the cumulative logs that need
+	 * to be updated on the peripheral.
+	 */
+	mutex_lock(&driver->dci_mutex);
+	list_for_each_safe(start, temp, &driver->dci_client_list) {
+		entry = list_entry(start, struct diag_dci_client_tbl, track);
+		if (entry->client_info.token != DCI_LOCAL_PROC)
+			continue;
+		client_log_mask_ptr = entry->dci_log_mask;
+		for (j = 0; j < 16; j++) {
+			if (*(client_log_mask_ptr+1))
+				dirty_bits[j] = 1;
+			client_log_mask_ptr += 514;
+		}
+	}
+	mutex_unlock(&driver->dci_mutex);
+
+	mutex_lock(&dci_log_mask_mutex);
+	/* Update the appropriate dirty bits in the cumulative mask */
+	log_mask_ptr = dci_ops_tbl[DCI_LOCAL_PROC].log_mask_composite;
+	for (i = 0; i < 16; i++) {
+		if (dirty_bits[i])
+			*(log_mask_ptr+1) = dirty_bits[i];
+
+		log_mask_ptr += 514;
+	}
+	mutex_unlock(&dci_log_mask_mutex);
+
+	/* Send updated mask to userspace clients */
+	diag_update_userspace_clients(DCI_LOG_MASKS_TYPE);
+	/* Send updated log mask to peripherals */
+	ret = dci_ops_tbl[DCI_LOCAL_PROC].send_log_mask(DCI_LOCAL_PROC);
+
+	/* Send updated event mask to userspace clients */
+	diag_update_userspace_clients(DCI_EVENT_MASKS_TYPE);
+	/* Send updated event mask to peripheral */
+	ret = dci_ops_tbl[DCI_LOCAL_PROC].send_event_mask(DCI_LOCAL_PROC);
+}
+
+void diag_dci_notify_client(int peripheral_mask, int data, int proc)
+{
+	int stat;
+	struct siginfo info;
+	struct list_head *start, *temp;
+	struct diag_dci_client_tbl *entry = NULL;
+
+	memset(&info, 0, sizeof(struct siginfo));
+	info.si_code = SI_QUEUE;
+	info.si_int = (peripheral_mask | data);
+	if (data == DIAG_STATUS_OPEN)
+		dci_ops_tbl[proc].peripheral_status |= peripheral_mask;
+	else
+		dci_ops_tbl[proc].peripheral_status &= ~peripheral_mask;
+
+	/* Notify the DCI process that the peripheral DCI Channel is up */
+	list_for_each_safe(start, temp, &driver->dci_client_list) {
+		entry = list_entry(start, struct diag_dci_client_tbl, track);
+		if (entry->client_info.token != proc)
+			continue;
+		if (entry->client_info.notification_list & peripheral_mask) {
+			info.si_signo = entry->client_info.signal_type;
+			if (entry->client &&
+				entry->tgid == entry->client->tgid) {
+				DIAG_LOG(DIAG_DEBUG_DCI,
+					"entry tgid = %d, dci client tgid = %d\n",
+					entry->tgid, entry->client->tgid);
+				stat = send_sig_info(
+					entry->client_info.signal_type,
+					&info, entry->client);
+				if (stat)
+					pr_err("diag: Err sending dci signal to client, signal data: 0x%x, stat: %d\n",
+							info.si_int, stat);
+			} else
+				pr_err("diag: client data is corrupted, signal data: 0x%x, stat: %d\n",
+						info.si_int, stat);
+		}
+	}
+}
+
+static int diag_send_dci_pkt(struct diag_cmd_reg_t *entry,
+			     unsigned char *buf, int len, int tag)
+{
+	int i, status = DIAG_DCI_NO_ERROR;
+	uint32_t write_len = 0;
+	struct diag_dci_pkt_header_t header;
+
+	if (!entry)
+		return -EIO;
+
+	if (len < 1 || len > DIAG_MAX_REQ_SIZE) {
+		pr_err("diag: dci: In %s, invalid length %d, max_length: %d\n",
+		       __func__, len, (int)(DCI_REQ_BUF_SIZE - sizeof(header)));
+		return -EIO;
+	}
+
+	if ((len + sizeof(header) + sizeof(uint8_t)) > DCI_BUF_SIZE) {
+		pr_err("diag: dci: In %s, invalid length %d for apps_dci_buf, max_length: %d\n",
+		       __func__, len, DIAG_MAX_REQ_SIZE);
+		return -EIO;
+	}
+
+	mutex_lock(&driver->dci_mutex);
+	/* prepare DCI packet */
+	header.start = CONTROL_CHAR;
+	header.version = 1;
+	header.len = len + sizeof(int) + sizeof(uint8_t);
+	header.pkt_code = DCI_PKT_RSP_CODE;
+	header.tag = tag;
+	memcpy(driver->apps_dci_buf, &header, sizeof(header));
+	write_len += sizeof(header);
+	memcpy(driver->apps_dci_buf + write_len, buf, len);
+	write_len += len;
+	*(uint8_t *)(driver->apps_dci_buf + write_len) = CONTROL_CHAR;
+	write_len += sizeof(uint8_t);
+
+	/* This command is registered locally on the Apps */
+	if (entry->proc == APPS_DATA) {
+		diag_update_pkt_buffer(driver->apps_dci_buf, write_len,
+				       DCI_PKT_TYPE);
+		diag_update_sleeping_process(entry->pid, DCI_PKT_TYPE);
+		mutex_unlock(&driver->dci_mutex);
+		return DIAG_DCI_NO_ERROR;
+	}
+
+	for (i = 0; i < NUM_PERIPHERALS; i++)
+		if (entry->proc == i) {
+			status = 1;
+			break;
+		}
+
+	if (status) {
+		status = diag_dci_write_proc(entry->proc,
+					     DIAG_DATA_TYPE,
+					     driver->apps_dci_buf,
+					     write_len);
+	} else {
+		pr_err("diag: Cannot send packet to peripheral %d",
+		       entry->proc);
+		status = DIAG_DCI_SEND_DATA_FAIL;
+	}
+	mutex_unlock(&driver->dci_mutex);
+	return status;
+}
+
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+unsigned char *dci_get_buffer_from_bridge(int token)
+{
+	uint8_t retries = 0, max_retries = 3;
+	unsigned char *buf = NULL;
+
+	do {
+		buf = diagmem_alloc(driver, DIAG_MDM_BUF_SIZE,
+				    dci_ops_tbl[token].mempool);
+		if (!buf) {
+			usleep_range(5000, 5100);
+			retries++;
+		} else
+			break;
+	} while (retries < max_retries);
+
+	return buf;
+}
+
+int diag_dci_write_bridge(int token, unsigned char *buf, int len)
+{
+	return diagfwd_bridge_write(TOKEN_TO_BRIDGE(token), buf, len);
+}
+
+int diag_dci_write_done_bridge(int index, unsigned char *buf, int len)
+{
+	int token = BRIDGE_TO_TOKEN(index);
+
+	if (!VALID_DCI_TOKEN(token)) {
+		pr_err("diag: Invalid DCI token %d in %s\n", token, __func__);
+		return -EINVAL;
+	}
+	diagmem_free(driver, buf, dci_ops_tbl[token].mempool);
+	return 0;
+}
+#endif
+
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+static int diag_send_dci_pkt_remote(unsigned char *data, int len, int tag,
+				    int token)
+{
+	unsigned char *buf = NULL;
+	struct diag_dci_header_t dci_header;
+	int dci_header_size = sizeof(struct diag_dci_header_t);
+	int ret = DIAG_DCI_NO_ERROR;
+	uint32_t write_len = 0;
+
+	if (!data)
+		return -EIO;
+
+	buf = dci_get_buffer_from_bridge(token);
+	if (!buf) {
+		pr_err("diag: In %s, unable to get dci buffers to write data\n",
+			__func__);
+		return -EAGAIN;
+	}
+
+	dci_header.start = CONTROL_CHAR;
+	dci_header.version = 1;
+	/*
+	 * The Length of the DCI packet = length of the command + tag (int) +
+	 * the command code size (uint8_t)
+	 */
+	dci_header.length = len + sizeof(int) + sizeof(uint8_t);
+	dci_header.cmd_code = DCI_PKT_RSP_CODE;
+
+	memcpy(buf + write_len, &dci_header, dci_header_size);
+	write_len += dci_header_size;
+	*(int *)(buf + write_len) = tag;
+	write_len += sizeof(int);
+	memcpy(buf + write_len, data, len);
+	write_len += len;
+	*(buf + write_len) = CONTROL_CHAR; /* End Terminator */
+	write_len += sizeof(uint8_t);
+
+	ret = diag_dci_write_bridge(token, buf, write_len);
+	if (ret) {
+		pr_err("diag: error writing dci pkt to remote proc, token: %d, err: %d\n",
+			token, ret);
+		diagmem_free(driver, buf, dci_ops_tbl[token].mempool);
+	} else {
+		ret = DIAG_DCI_NO_ERROR;
+	}
+
+	return ret;
+}
+#else
+static int diag_send_dci_pkt_remote(unsigned char *data, int len, int tag,
+				    int token)
+{
+	return DIAG_DCI_NO_ERROR;
+}
+#endif
+
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+int diag_dci_send_handshake_pkt(int index)
+{
+	int err = 0;
+	int token = BRIDGE_TO_TOKEN(index);
+	int write_len = 0;
+	struct diag_ctrl_dci_handshake_pkt ctrl_pkt;
+	unsigned char *buf = NULL;
+	struct diag_dci_header_t dci_header;
+
+	if (!VALID_DCI_TOKEN(token)) {
+		pr_err("diag: In %s, invalid DCI token %d\n", __func__, token);
+		return -EINVAL;
+	}
+
+	buf = dci_get_buffer_from_bridge(token);
+	if (!buf) {
+		pr_err("diag: In %s, unable to get dci buffers to write data\n",
+			__func__);
+		return -EAGAIN;
+	}
+
+	dci_header.start = CONTROL_CHAR;
+	dci_header.version = 1;
+	/* Include the cmd code (uint8_t) in the length */
+	dci_header.length = sizeof(ctrl_pkt) + sizeof(uint8_t);
+	dci_header.cmd_code = DCI_CONTROL_PKT_CODE;
+	memcpy(buf, &dci_header, sizeof(dci_header));
+	write_len += sizeof(dci_header);
+
+	ctrl_pkt.ctrl_pkt_id = DIAG_CTRL_MSG_DCI_HANDSHAKE_PKT;
+	/*
+	 *  The control packet data length accounts for the version (uint32_t)
+	 *  of the packet and the magic number (uint32_t).
+	 */
+	ctrl_pkt.ctrl_pkt_data_len = 2 * sizeof(uint32_t);
+	ctrl_pkt.version = 1;
+	ctrl_pkt.magic = DCI_MAGIC;
+	memcpy(buf + write_len, &ctrl_pkt, sizeof(ctrl_pkt));
+	write_len += sizeof(ctrl_pkt);
+
+	*(uint8_t *)(buf + write_len) = CONTROL_CHAR;
+	write_len += sizeof(uint8_t);
+
+	err = diag_dci_write_bridge(token, buf, write_len);
+	if (err) {
+		pr_err("diag: error writing ack packet to remote proc, token: %d, err: %d\n",
+		       token, err);
+		diagmem_free(driver, buf, dci_ops_tbl[token].mempool);
+		return err;
+	}
+
+	mod_timer(&(dci_channel_status[token].wait_time),
+		  jiffies + msecs_to_jiffies(DCI_HANDSHAKE_WAIT_TIME));
+
+	return 0;
+}
+#else
+int diag_dci_send_handshake_pkt(int index)
+{
+	return 0;
+}
+#endif
+
+static int diag_dci_process_apps_pkt(struct diag_pkt_header_t *pkt_header,
+				     unsigned char *req_buf, int req_len,
+				     int tag)
+{
+	uint8_t cmd_code, subsys_id, i, goto_download = 0;
+	uint8_t header_len = sizeof(struct diag_dci_pkt_header_t);
+	uint16_t ss_cmd_code;
+	uint32_t write_len = 0;
+	unsigned char *dest_buf = driver->apps_dci_buf;
+	unsigned char *payload_ptr = driver->apps_dci_buf + header_len;
+	struct diag_dci_pkt_header_t dci_header;
+
+	if (!pkt_header || !req_buf || req_len <= 0 || tag < 0)
+		return -EIO;
+
+	cmd_code = pkt_header->cmd_code;
+	subsys_id = pkt_header->subsys_id;
+	ss_cmd_code = pkt_header->subsys_cmd_code;
+
+	if (cmd_code == DIAG_CMD_DOWNLOAD) {
+		*payload_ptr = DIAG_CMD_DOWNLOAD;
+		write_len = sizeof(uint8_t);
+		goto_download = 1;
+		goto fill_buffer;
+	} else if (cmd_code == DIAG_CMD_VERSION) {
+		if (chk_polling_response()) {
+			for (i = 0; i < 55; i++, write_len++, payload_ptr++)
+				*(payload_ptr) = 0;
+			goto fill_buffer;
+		}
+	} else if (cmd_code == DIAG_CMD_EXT_BUILD) {
+		if (chk_polling_response()) {
+			*payload_ptr = DIAG_CMD_EXT_BUILD;
+			write_len = sizeof(uint8_t);
+			payload_ptr += sizeof(uint8_t);
+			for (i = 0; i < 8; i++, write_len++, payload_ptr++)
+				*(payload_ptr) = 0;
+			*(int *)(payload_ptr) = chk_config_get_id();
+			write_len += sizeof(int);
+			goto fill_buffer;
+		}
+	} else if (cmd_code == DIAG_CMD_LOG_ON_DMND) {
+		write_len = diag_cmd_log_on_demand(req_buf, req_len,
+						   payload_ptr,
+						   APPS_BUF_SIZE - header_len);
+		goto fill_buffer;
+	} else if (cmd_code != DIAG_CMD_DIAG_SUBSYS) {
+		return DIAG_DCI_TABLE_ERR;
+	}
+
+	if (subsys_id == DIAG_SS_DIAG) {
+		if (ss_cmd_code == DIAG_DIAG_MAX_PKT_SZ) {
+			memcpy(payload_ptr, pkt_header,
+					sizeof(struct diag_pkt_header_t));
+			write_len = sizeof(struct diag_pkt_header_t);
+			*(uint32_t *)(payload_ptr + write_len) =
+							DIAG_MAX_REQ_SIZE;
+			write_len += sizeof(uint32_t);
+		} else if (ss_cmd_code == DIAG_DIAG_STM) {
+			write_len = diag_process_stm_cmd(req_buf, payload_ptr);
+		}
+	} else if (subsys_id == DIAG_SS_PARAMS) {
+		if (ss_cmd_code == DIAG_DIAG_POLL) {
+			if (chk_polling_response()) {
+				memcpy(payload_ptr, pkt_header,
+					sizeof(struct diag_pkt_header_t));
+				write_len = sizeof(struct diag_pkt_header_t);
+				payload_ptr += write_len;
+				for (i = 0; i < 12; i++, write_len++) {
+					*(payload_ptr) = 0;
+					payload_ptr++;
+				}
+			}
+		} else if (ss_cmd_code == DIAG_DEL_RSP_WRAP) {
+			memcpy(payload_ptr, pkt_header,
+					sizeof(struct diag_pkt_header_t));
+			write_len = sizeof(struct diag_pkt_header_t);
+			*(int *)(payload_ptr + write_len) = wrap_enabled;
+			write_len += sizeof(int);
+		} else if (ss_cmd_code == DIAG_DEL_RSP_WRAP_CNT) {
+			wrap_enabled = true;
+			memcpy(payload_ptr, pkt_header,
+					sizeof(struct diag_pkt_header_t));
+			write_len = sizeof(struct diag_pkt_header_t);
+			*(uint16_t *)(payload_ptr + write_len) = wrap_count;
+			write_len += sizeof(uint16_t);
+		} else if (ss_cmd_code == DIAG_EXT_MOBILE_ID) {
+			write_len = diag_cmd_get_mobile_id(req_buf, req_len,
+						   payload_ptr,
+						   APPS_BUF_SIZE - header_len);
+		}
+	}
+
+fill_buffer:
+	if (write_len > 0) {
+		/* Check if we are within the range of the buffer*/
+		if (write_len + header_len > DIAG_MAX_REQ_SIZE) {
+			pr_err("diag: In %s, invalid length %d\n", __func__,
+						write_len + header_len);
+			return -ENOMEM;
+		}
+		dci_header.start = CONTROL_CHAR;
+		dci_header.version = 1;
+		/*
+		 * Length of the rsp pkt = actual data len + pkt rsp code
+		 * (uint8_t) + tag (int)
+		 */
+		dci_header.len = write_len + sizeof(uint8_t) + sizeof(int);
+		dci_header.pkt_code = DCI_PKT_RSP_CODE;
+		dci_header.tag = tag;
+		driver->in_busy_dcipktdata = 1;
+		memcpy(dest_buf, &dci_header, header_len);
+		diag_process_apps_dci_read_data(DCI_PKT_TYPE, dest_buf + 4,
+						dci_header.len);
+		driver->in_busy_dcipktdata = 0;
+
+		if (goto_download) {
+			/*
+			 * Sleep for sometime so that the response reaches the
+			 * client. The value 5000 empirically as an optimum
+			 * time for the response to reach the client.
+			 */
+			usleep_range(5000, 5100);
+			/* call download API */
+			msm_set_restart_mode(RESTART_DLOAD);
+			pr_alert("diag: download mode set, Rebooting SoC..\n");
+			kernel_restart(NULL);
+		}
+		return DIAG_DCI_NO_ERROR;
+	}
+
+	return DIAG_DCI_TABLE_ERR;
+}
+
+static int diag_process_dci_pkt_rsp(unsigned char *buf, int len)
+{
+	int ret = DIAG_DCI_TABLE_ERR;
+	int common_cmd = 0;
+	struct diag_pkt_header_t *header = NULL;
+	unsigned char *temp = buf;
+	unsigned char *req_buf = NULL;
+	uint8_t retry_count = 0, max_retries = 3;
+	uint32_t read_len = 0, req_len = len;
+	struct dci_pkt_req_entry_t *req_entry = NULL;
+	struct diag_dci_client_tbl *dci_entry = NULL;
+	struct dci_pkt_req_t req_hdr;
+	struct diag_cmd_reg_t *reg_item;
+	struct diag_cmd_reg_entry_t reg_entry;
+	struct diag_cmd_reg_entry_t *temp_entry;
+
+	if (!buf)
+		return -EIO;
+
+	if (len <= sizeof(struct dci_pkt_req_t) || len > DCI_REQ_BUF_SIZE) {
+		pr_err("diag: dci: Invalid length %d len in %s", len, __func__);
+		return -EIO;
+	}
+
+	req_hdr = *(struct dci_pkt_req_t *)temp;
+	temp += sizeof(struct dci_pkt_req_t);
+	read_len += sizeof(struct dci_pkt_req_t);
+	req_len -= sizeof(struct dci_pkt_req_t);
+	req_buf = temp; /* Start of the Request */
+	header = (struct diag_pkt_header_t *)temp;
+	temp += sizeof(struct diag_pkt_header_t);
+	read_len += sizeof(struct diag_pkt_header_t);
+	if (read_len >= DCI_REQ_BUF_SIZE) {
+		pr_err("diag: dci: In %s, invalid read_len: %d\n", __func__,
+		       read_len);
+		return -EIO;
+	}
+
+	mutex_lock(&driver->dci_mutex);
+	dci_entry = diag_dci_get_client_entry(req_hdr.client_id);
+	if (!dci_entry) {
+		pr_err("diag: Invalid client %d in %s\n",
+		       req_hdr.client_id, __func__);
+		mutex_unlock(&driver->dci_mutex);
+		return DIAG_DCI_NO_REG;
+	}
+
+	/* Check if the command is allowed on DCI */
+	if (diag_dci_filter_commands(header)) {
+		pr_debug("diag: command not supported %d %d %d",
+			 header->cmd_code, header->subsys_id,
+			 header->subsys_cmd_code);
+		mutex_unlock(&driver->dci_mutex);
+		return DIAG_DCI_SEND_DATA_FAIL;
+	}
+
+	common_cmd = diag_check_common_cmd(header);
+	if (common_cmd < 0) {
+		pr_debug("diag: error in checking common command, %d\n",
+			 common_cmd);
+		mutex_unlock(&driver->dci_mutex);
+		return DIAG_DCI_SEND_DATA_FAIL;
+	}
+
+	/*
+	 * Previous packet is yet to be consumed by the client. Wait
+	 * till the buffer is free.
+	 */
+	while (retry_count < max_retries) {
+		retry_count++;
+		if (driver->in_busy_dcipktdata)
+			usleep_range(10000, 10100);
+		else
+			break;
+	}
+	/* The buffer is still busy */
+	if (driver->in_busy_dcipktdata) {
+		pr_err("diag: In %s, apps dci buffer is still busy. Dropping packet\n",
+								__func__);
+		mutex_unlock(&driver->dci_mutex);
+		return -EAGAIN;
+	}
+
+	/* Register this new DCI packet */
+	req_entry = diag_register_dci_transaction(req_hdr.uid,
+						  req_hdr.client_id);
+	if (!req_entry) {
+		pr_alert("diag: registering new DCI transaction failed\n");
+		mutex_unlock(&driver->dci_mutex);
+		return DIAG_DCI_NO_REG;
+	}
+	mutex_unlock(&driver->dci_mutex);
+
+	/*
+	 * If the client has registered for remote data, route the packet to the
+	 * remote processor
+	 */
+	if (dci_entry->client_info.token > 0) {
+		ret = diag_send_dci_pkt_remote(req_buf, req_len, req_entry->tag,
+					       dci_entry->client_info.token);
+		return ret;
+	}
+
+	/* Check if it is a dedicated Apps command */
+	ret = diag_dci_process_apps_pkt(header, req_buf, req_len,
+					req_entry->tag);
+	if ((ret == DIAG_DCI_NO_ERROR && !common_cmd) || ret < 0)
+		return ret;
+
+	reg_entry.cmd_code = header->cmd_code;
+	reg_entry.subsys_id = header->subsys_id;
+	reg_entry.cmd_code_hi = header->subsys_cmd_code;
+	reg_entry.cmd_code_lo = header->subsys_cmd_code;
+
+	temp_entry = diag_cmd_search(&reg_entry, ALL_PROC);
+	if (temp_entry) {
+		reg_item = container_of(temp_entry, struct diag_cmd_reg_t,
+								entry);
+		ret = diag_send_dci_pkt(reg_item, req_buf, req_len,
+					req_entry->tag);
+	} else {
+		DIAG_LOG(DIAG_DEBUG_DCI, "Command not found: %02x %02x %02x\n",
+				reg_entry.cmd_code, reg_entry.subsys_id,
+				reg_entry.cmd_code_hi);
+	}
+
+	return ret;
+}
+
+int diag_process_dci_transaction(unsigned char *buf, int len)
+{
+	unsigned char *temp = buf;
+	uint16_t log_code, item_num;
+	int ret = -1, found = 0, client_id = 0, client_token = 0;
+	int count, set_mask, num_codes, bit_index, event_id, offset = 0;
+	unsigned int byte_index, read_len = 0;
+	uint8_t equip_id, *log_mask_ptr, *head_log_mask_ptr, byte_mask;
+	uint8_t *event_mask_ptr;
+	struct diag_dci_client_tbl *dci_entry = NULL;
+
+	if (!temp) {
+		pr_err("diag: Invalid buffer in %s\n", __func__);
+		return -ENOMEM;
+	}
+
+	/* This is Pkt request/response transaction */
+	if (*(int *)temp > 0) {
+		return diag_process_dci_pkt_rsp(buf, len);
+	} else if (*(int *)temp == DCI_LOG_TYPE) {
+		/* Minimum length of a log mask config is 12 + 2 bytes for
+		 * atleast one log code to be set or reset.
+		 */
+		if (len < DCI_LOG_CON_MIN_LEN || len > USER_SPACE_DATA) {
+			pr_err("diag: dci: Invalid length in %s\n", __func__);
+			return -EIO;
+		}
+
+		/* Extract each log code and put in client table */
+		temp += sizeof(int);
+		read_len += sizeof(int);
+		client_id = *(int *)temp;
+		temp += sizeof(int);
+		read_len += sizeof(int);
+		set_mask = *(int *)temp;
+		temp += sizeof(int);
+		read_len += sizeof(int);
+		num_codes = *(int *)temp;
+		temp += sizeof(int);
+		read_len += sizeof(int);
+
+		/* Find client table entry */
+		mutex_lock(&driver->dci_mutex);
+		dci_entry = diag_dci_get_client_entry(client_id);
+		if (!dci_entry) {
+			pr_err("diag: In %s, invalid client\n", __func__);
+			mutex_unlock(&driver->dci_mutex);
+			return ret;
+		}
+		client_token = dci_entry->client_info.token;
+
+		if (num_codes == 0 || (num_codes >= (USER_SPACE_DATA - 8)/2)) {
+			pr_err("diag: dci: Invalid number of log codes %d\n",
+								num_codes);
+			mutex_unlock(&driver->dci_mutex);
+			return -EIO;
+		}
+
+		head_log_mask_ptr = dci_entry->dci_log_mask;
+		if (!head_log_mask_ptr) {
+			pr_err("diag: dci: Invalid Log mask pointer in %s\n",
+								__func__);
+			mutex_unlock(&driver->dci_mutex);
+			return -ENOMEM;
+		}
+		pr_debug("diag: head of dci log mask %pK\n", head_log_mask_ptr);
+		count = 0; /* iterator for extracting log codes */
+
+		while (count < num_codes) {
+			if (read_len >= USER_SPACE_DATA) {
+				pr_err("diag: dci: Invalid length for log type in %s",
+								__func__);
+				mutex_unlock(&driver->dci_mutex);
+				return -EIO;
+			}
+			log_code = *(uint16_t *)temp;
+			equip_id = LOG_GET_EQUIP_ID(log_code);
+			item_num = LOG_GET_ITEM_NUM(log_code);
+			byte_index = item_num/8 + 2;
+			if (byte_index >= (DCI_MAX_ITEMS_PER_LOG_CODE+2)) {
+				pr_err("diag: dci: Log type, invalid byte index\n");
+				mutex_unlock(&driver->dci_mutex);
+				return ret;
+			}
+			byte_mask = 0x01 << (item_num % 8);
+			/*
+			 * Parse through log mask table and find
+			 * relevant range
+			 */
+			log_mask_ptr = head_log_mask_ptr;
+			found = 0;
+			offset = 0;
+			while (log_mask_ptr && (offset < DCI_LOG_MASK_SIZE)) {
+				if (*log_mask_ptr == equip_id) {
+					found = 1;
+					pr_debug("diag: find equip id = %x at %pK\n",
+						 equip_id, log_mask_ptr);
+					break;
+				}
+				pr_debug("diag: did not find equip id = %x at %d\n",
+					 equip_id, *log_mask_ptr);
+				log_mask_ptr += 514;
+				offset += 514;
+			}
+			if (!found) {
+				pr_err("diag: dci equip id not found\n");
+				mutex_unlock(&driver->dci_mutex);
+				return ret;
+			}
+			*(log_mask_ptr+1) = 1; /* set the dirty byte */
+			log_mask_ptr = log_mask_ptr + byte_index;
+			if (set_mask)
+				*log_mask_ptr |= byte_mask;
+			else
+				*log_mask_ptr &= ~byte_mask;
+			/* add to cumulative mask */
+			update_dci_cumulative_log_mask(
+				offset, byte_index,
+				byte_mask, client_token);
+			temp += 2;
+			read_len += 2;
+			count++;
+			ret = DIAG_DCI_NO_ERROR;
+		}
+		/* send updated mask to userspace clients */
+		if (client_token == DCI_LOCAL_PROC)
+			diag_update_userspace_clients(DCI_LOG_MASKS_TYPE);
+		/* send updated mask to peripherals */
+		ret = dci_ops_tbl[client_token].send_log_mask(client_token);
+		mutex_unlock(&driver->dci_mutex);
+	} else if (*(int *)temp == DCI_EVENT_TYPE) {
+		/* Minimum length of a event mask config is 12 + 4 bytes for
+		 * atleast one event id to be set or reset.
+		 */
+		if (len < DCI_EVENT_CON_MIN_LEN || len > USER_SPACE_DATA) {
+			pr_err("diag: dci: Invalid length in %s\n", __func__);
+			return -EIO;
+		}
+
+		/* Extract each event id and put in client table */
+		temp += sizeof(int);
+		read_len += sizeof(int);
+		client_id = *(int *)temp;
+		temp += sizeof(int);
+		read_len += sizeof(int);
+		set_mask = *(int *)temp;
+		temp += sizeof(int);
+		read_len += sizeof(int);
+		num_codes = *(int *)temp;
+		temp += sizeof(int);
+		read_len += sizeof(int);
+
+		/* find client table entry */
+		mutex_lock(&driver->dci_mutex);
+		dci_entry = diag_dci_get_client_entry(client_id);
+		if (!dci_entry) {
+			pr_err("diag: In %s, invalid client\n", __func__);
+			mutex_unlock(&driver->dci_mutex);
+			return ret;
+		}
+		client_token = dci_entry->client_info.token;
+
+		/* Check for positive number of event ids. Also, the number of
+		 * event ids should fit in the buffer along with set_mask and
+		 * num_codes which are 4 bytes each.
+		 */
+		if (num_codes == 0 || (num_codes >= (USER_SPACE_DATA - 8)/2)) {
+			pr_err("diag: dci: Invalid number of event ids %d\n",
+								num_codes);
+			mutex_unlock(&driver->dci_mutex);
+			return -EIO;
+		}
+
+		event_mask_ptr = dci_entry->dci_event_mask;
+		if (!event_mask_ptr) {
+			pr_err("diag: dci: Invalid event mask pointer in %s\n",
+								__func__);
+			mutex_unlock(&driver->dci_mutex);
+			return -ENOMEM;
+		}
+		pr_debug("diag: head of dci event mask %pK\n", event_mask_ptr);
+		count = 0; /* iterator for extracting log codes */
+		while (count < num_codes) {
+			if (read_len >= USER_SPACE_DATA) {
+				pr_err("diag: dci: Invalid length for event type in %s",
+								__func__);
+				mutex_unlock(&driver->dci_mutex);
+				return -EIO;
+			}
+			event_id = *(int *)temp;
+			byte_index = event_id/8;
+			if (byte_index >= DCI_EVENT_MASK_SIZE) {
+				pr_err("diag: dci: Event type, invalid byte index\n");
+				mutex_unlock(&driver->dci_mutex);
+				return ret;
+			}
+			bit_index = event_id % 8;
+			byte_mask = 0x1 << bit_index;
+			/*
+			 * Parse through event mask table and set
+			 * relevant byte & bit combination
+			 */
+			if (set_mask)
+				*(event_mask_ptr + byte_index) |= byte_mask;
+			else
+				*(event_mask_ptr + byte_index) &= ~byte_mask;
+			/* add to cumulative mask */
+			update_dci_cumulative_event_mask(byte_index, byte_mask,
+							 client_token);
+			temp += sizeof(int);
+			read_len += sizeof(int);
+			count++;
+			ret = DIAG_DCI_NO_ERROR;
+		}
+		/* send updated mask to userspace clients */
+		if (dci_entry->client_info.token == DCI_LOCAL_PROC)
+			diag_update_userspace_clients(DCI_EVENT_MASKS_TYPE);
+		/* send updated mask to peripherals */
+		ret = dci_ops_tbl[client_token].send_event_mask(client_token);
+		mutex_unlock(&driver->dci_mutex);
+	} else {
+		pr_alert("diag: Incorrect DCI transaction\n");
+	}
+	return ret;
+}
+
+
+struct diag_dci_client_tbl *diag_dci_get_client_entry(int client_id)
+{
+	struct list_head *start, *temp;
+	struct diag_dci_client_tbl *entry = NULL;
+
+	list_for_each_safe(start, temp, &driver->dci_client_list) {
+		entry = list_entry(start, struct diag_dci_client_tbl, track);
+		if (entry->client_info.client_id == client_id)
+			return entry;
+	}
+	return NULL;
+}
+
+struct diag_dci_client_tbl *dci_lookup_client_entry_pid(int tgid)
+{
+	struct list_head *start, *temp;
+	struct diag_dci_client_tbl *entry = NULL;
+
+	list_for_each_safe(start, temp, &driver->dci_client_list) {
+		entry = list_entry(start, struct diag_dci_client_tbl, track);
+		if (entry->client->tgid == tgid)
+			return entry;
+	}
+	return NULL;
+}
+
+void update_dci_cumulative_event_mask(int offset, uint8_t byte_mask, int token)
+{
+	uint8_t *event_mask_ptr, *update_ptr = NULL;
+	struct list_head *start, *temp;
+	struct diag_dci_client_tbl *entry = NULL;
+	bool is_set = false;
+
+	mutex_lock(&dci_event_mask_mutex);
+	update_ptr = dci_ops_tbl[token].event_mask_composite;
+	if (!update_ptr) {
+		mutex_unlock(&dci_event_mask_mutex);
+		return;
+	}
+	update_ptr += offset;
+	list_for_each_safe(start, temp, &driver->dci_client_list) {
+		entry = list_entry(start, struct diag_dci_client_tbl, track);
+		if (entry->client_info.token != token)
+			continue;
+		event_mask_ptr = entry->dci_event_mask;
+		event_mask_ptr += offset;
+		if ((*event_mask_ptr & byte_mask) == byte_mask) {
+			is_set = true;
+			/* break even if one client has the event mask set */
+			break;
+		}
+	}
+	if (is_set == false)
+		*update_ptr &= ~byte_mask;
+	else
+		*update_ptr |= byte_mask;
+	mutex_unlock(&dci_event_mask_mutex);
+}
+
+void diag_dci_invalidate_cumulative_event_mask(int token)
+{
+	int i = 0;
+	struct list_head *start, *temp;
+	struct diag_dci_client_tbl *entry = NULL;
+	uint8_t *event_mask_ptr, *update_ptr = NULL;
+
+	mutex_lock(&dci_event_mask_mutex);
+	update_ptr = dci_ops_tbl[token].event_mask_composite;
+	if (!update_ptr) {
+		mutex_unlock(&dci_event_mask_mutex);
+		return;
+	}
+
+	create_dci_event_mask_tbl(update_ptr);
+	list_for_each_safe(start, temp, &driver->dci_client_list) {
+		entry = list_entry(start, struct diag_dci_client_tbl, track);
+		if (entry->client_info.token != token)
+			continue;
+		event_mask_ptr = entry->dci_event_mask;
+		for (i = 0; i < DCI_EVENT_MASK_SIZE; i++)
+			*(update_ptr+i) |= *(event_mask_ptr+i);
+	}
+	mutex_unlock(&dci_event_mask_mutex);
+}
+
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+int diag_send_dci_event_mask_remote(int token)
+{
+	unsigned char *buf = NULL;
+	struct diag_dci_header_t dci_header;
+	struct diag_ctrl_event_mask event_mask;
+	int dci_header_size = sizeof(struct diag_dci_header_t);
+	int event_header_size = sizeof(struct diag_ctrl_event_mask);
+	int i, ret = DIAG_DCI_NO_ERROR, err = DIAG_DCI_NO_ERROR;
+	unsigned char *event_mask_ptr = NULL;
+	uint32_t write_len = 0;
+
+	mutex_lock(&dci_event_mask_mutex);
+	event_mask_ptr = dci_ops_tbl[token].event_mask_composite;
+	if (!event_mask_ptr) {
+		mutex_unlock(&dci_event_mask_mutex);
+		return -EINVAL;
+	}
+	buf = dci_get_buffer_from_bridge(token);
+	if (!buf) {
+		pr_err("diag: In %s, unable to get dci buffers to write data\n",
+			__func__);
+		mutex_unlock(&dci_event_mask_mutex);
+		return -EAGAIN;
+	}
+
+	/* Frame the DCI header */
+	dci_header.start = CONTROL_CHAR;
+	dci_header.version = 1;
+	dci_header.length = event_header_size + DCI_EVENT_MASK_SIZE + 1;
+	dci_header.cmd_code = DCI_CONTROL_PKT_CODE;
+
+	event_mask.cmd_type = DIAG_CTRL_MSG_EVENT_MASK;
+	event_mask.data_len = EVENT_MASK_CTRL_HEADER_LEN + DCI_EVENT_MASK_SIZE;
+	event_mask.stream_id = DCI_MASK_STREAM;
+	event_mask.status = DIAG_CTRL_MASK_VALID;
+	event_mask.event_config = 0; /* event config */
+	event_mask.event_mask_size = DCI_EVENT_MASK_SIZE;
+	for (i = 0; i < DCI_EVENT_MASK_SIZE; i++) {
+		if (event_mask_ptr[i] != 0) {
+			event_mask.event_config = 1;
+			break;
+		}
+	}
+	memcpy(buf + write_len, &dci_header, dci_header_size);
+	write_len += dci_header_size;
+	memcpy(buf + write_len, &event_mask, event_header_size);
+	write_len += event_header_size;
+	memcpy(buf + write_len, event_mask_ptr, DCI_EVENT_MASK_SIZE);
+	write_len += DCI_EVENT_MASK_SIZE;
+	*(buf + write_len) = CONTROL_CHAR; /* End Terminator */
+	write_len += sizeof(uint8_t);
+	err = diag_dci_write_bridge(token, buf, write_len);
+	if (err) {
+		pr_err("diag: error writing event mask to remote proc, token: %d, err: %d\n",
+		       token, err);
+		diagmem_free(driver, buf, dci_ops_tbl[token].mempool);
+		ret = err;
+	} else {
+		ret = DIAG_DCI_NO_ERROR;
+	}
+	mutex_unlock(&dci_event_mask_mutex);
+	return ret;
+}
+#endif
+
+int diag_send_dci_event_mask(int token)
+{
+	void *buf = event_mask.update_buf;
+	struct diag_ctrl_event_mask header;
+	int header_size = sizeof(struct diag_ctrl_event_mask);
+	int ret = DIAG_DCI_NO_ERROR, err = DIAG_DCI_NO_ERROR, i;
+	unsigned char *event_mask_ptr = NULL;
+
+	mutex_lock(&dci_event_mask_mutex);
+	event_mask_ptr = dci_ops_tbl[DCI_LOCAL_PROC].event_mask_composite;
+	if (!event_mask_ptr) {
+		mutex_unlock(&dci_event_mask_mutex);
+		return -EINVAL;
+	}
+
+	mutex_lock(&event_mask.lock);
+	/* send event mask update */
+	header.cmd_type = DIAG_CTRL_MSG_EVENT_MASK;
+	header.data_len = EVENT_MASK_CTRL_HEADER_LEN + DCI_EVENT_MASK_SIZE;
+	header.stream_id = DCI_MASK_STREAM;
+	header.status = DIAG_CTRL_MASK_VALID;
+	header.event_config = 0; /* event config */
+	header.event_mask_size = DCI_EVENT_MASK_SIZE;
+	for (i = 0; i < DCI_EVENT_MASK_SIZE; i++) {
+		if (event_mask_ptr[i] != 0) {
+			header.event_config = 1;
+			break;
+		}
+	}
+	memcpy(buf, &header, header_size);
+	memcpy(buf+header_size, event_mask_ptr, DCI_EVENT_MASK_SIZE);
+	for (i = 0; i < NUM_PERIPHERALS; i++) {
+		/*
+		 * Don't send to peripheral if its regular channel
+		 * is down. It may also mean that the peripheral doesn't
+		 * support DCI.
+		 */
+		err = diag_dci_write_proc(i, DIAG_CNTL_TYPE, buf,
+					  header_size + DCI_EVENT_MASK_SIZE);
+		if (err != DIAG_DCI_NO_ERROR)
+			ret = DIAG_DCI_SEND_DATA_FAIL;
+	}
+
+	mutex_unlock(&event_mask.lock);
+	mutex_unlock(&dci_event_mask_mutex);
+
+	return ret;
+}
+
+void update_dci_cumulative_log_mask(int offset, unsigned int byte_index,
+						uint8_t byte_mask, int token)
+{
+	uint8_t *log_mask_ptr, *update_ptr = NULL;
+	bool is_set = false;
+	struct list_head *start, *temp;
+	struct diag_dci_client_tbl *entry = NULL;
+
+	mutex_lock(&dci_log_mask_mutex);
+	update_ptr = dci_ops_tbl[token].log_mask_composite;
+	if (!update_ptr) {
+		mutex_unlock(&dci_log_mask_mutex);
+		return;
+	}
+
+	update_ptr += offset;
+	/* update the dirty bit */
+	*(update_ptr+1) = 1;
+	update_ptr = update_ptr + byte_index;
+	list_for_each_safe(start, temp, &driver->dci_client_list) {
+		entry = list_entry(start, struct diag_dci_client_tbl, track);
+		if (entry->client_info.token != token)
+			continue;
+		log_mask_ptr = entry->dci_log_mask;
+		log_mask_ptr = log_mask_ptr + offset + byte_index;
+		if ((*log_mask_ptr & byte_mask) == byte_mask) {
+			is_set = true;
+			/* break even if one client has the log mask set */
+			break;
+		}
+	}
+
+	if (is_set == false)
+		*update_ptr &= ~byte_mask;
+	else
+		*update_ptr |= byte_mask;
+	mutex_unlock(&dci_log_mask_mutex);
+}
+
+void diag_dci_invalidate_cumulative_log_mask(int token)
+{
+	int i = 0;
+	struct list_head *start, *temp;
+	struct diag_dci_client_tbl *entry = NULL;
+	uint8_t *log_mask_ptr, *update_ptr = NULL;
+
+	/* Clear the composite mask and redo all the masks */
+	mutex_lock(&dci_log_mask_mutex);
+	update_ptr = dci_ops_tbl[token].log_mask_composite;
+	if (!update_ptr) {
+		mutex_unlock(&dci_log_mask_mutex);
+		return;
+	}
+
+	create_dci_log_mask_tbl(update_ptr, DCI_LOG_MASK_DIRTY);
+	list_for_each_safe(start, temp, &driver->dci_client_list) {
+		entry = list_entry(start, struct diag_dci_client_tbl, track);
+		if (entry->client_info.token != token)
+			continue;
+		log_mask_ptr = entry->dci_log_mask;
+		for (i = 0; i < DCI_LOG_MASK_SIZE; i++)
+			*(update_ptr+i) |= *(log_mask_ptr+i);
+	}
+	mutex_unlock(&dci_log_mask_mutex);
+}
+
+static int dci_fill_log_mask(unsigned char *dest_ptr, unsigned char *src_ptr)
+{
+	struct diag_ctrl_log_mask header;
+	int header_len = sizeof(struct diag_ctrl_log_mask);
+
+	header.cmd_type = DIAG_CTRL_MSG_LOG_MASK;
+	header.num_items = DCI_MAX_ITEMS_PER_LOG_CODE;
+	header.data_len = 11 + DCI_MAX_ITEMS_PER_LOG_CODE;
+	header.stream_id = DCI_MASK_STREAM;
+	header.status = 3;
+	header.equip_id = *src_ptr;
+	header.log_mask_size = DCI_MAX_ITEMS_PER_LOG_CODE;
+	memcpy(dest_ptr, &header, header_len);
+	memcpy(dest_ptr + header_len, src_ptr + 2, DCI_MAX_ITEMS_PER_LOG_CODE);
+
+	return header_len + DCI_MAX_ITEMS_PER_LOG_CODE;
+}
+
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+int diag_send_dci_log_mask_remote(int token)
+{
+
+	unsigned char *buf = NULL;
+	struct diag_dci_header_t dci_header;
+	int dci_header_size = sizeof(struct diag_dci_header_t);
+	int log_header_size = sizeof(struct diag_ctrl_log_mask);
+	uint8_t *log_mask_ptr = NULL;
+	int i, ret = DIAG_DCI_NO_ERROR, err = DIAG_DCI_NO_ERROR;
+	int updated;
+	uint32_t write_len = 0;
+
+	mutex_lock(&dci_log_mask_mutex);
+	log_mask_ptr = dci_ops_tbl[token].log_mask_composite;
+	if (!log_mask_ptr) {
+		mutex_unlock(&dci_log_mask_mutex);
+		return -EINVAL;
+	}
+
+	/* DCI header is common to all equipment IDs */
+	dci_header.start = CONTROL_CHAR;
+	dci_header.version = 1;
+	dci_header.length = log_header_size + DCI_MAX_ITEMS_PER_LOG_CODE + 1;
+	dci_header.cmd_code = DCI_CONTROL_PKT_CODE;
+
+	for (i = 0; i < DCI_MAX_LOG_CODES; i++) {
+		updated = 1;
+		write_len = 0;
+		if (!*(log_mask_ptr + 1)) {
+			log_mask_ptr += 514;
+			continue;
+		}
+
+		buf = dci_get_buffer_from_bridge(token);
+		if (!buf) {
+			pr_err("diag: In %s, unable to get dci buffers to write data\n",
+				__func__);
+			mutex_unlock(&dci_log_mask_mutex);
+			return -EAGAIN;
+		}
+
+		memcpy(buf + write_len, &dci_header, dci_header_size);
+		write_len += dci_header_size;
+		write_len += dci_fill_log_mask(buf + write_len, log_mask_ptr);
+		*(buf + write_len) = CONTROL_CHAR; /* End Terminator */
+		write_len += sizeof(uint8_t);
+		err = diag_dci_write_bridge(token, buf, write_len);
+		if (err) {
+			pr_err("diag: error writing log mask to remote processor, equip_id: %d, token: %d, err: %d\n",
+			       i, token, err);
+			diagmem_free(driver, buf, dci_ops_tbl[token].mempool);
+			updated = 0;
+		}
+		if (updated)
+			*(log_mask_ptr + 1) = 0; /* clear dirty byte */
+		log_mask_ptr += 514;
+	}
+	mutex_unlock(&dci_log_mask_mutex);
+	return ret;
+}
+#endif
+
+int diag_send_dci_log_mask(int token)
+{
+	void *buf = log_mask.update_buf;
+	int write_len = 0;
+	uint8_t *log_mask_ptr = NULL;
+	int i, j, ret = DIAG_DCI_NO_ERROR, err = DIAG_DCI_NO_ERROR;
+	int updated;
+
+
+	mutex_lock(&dci_log_mask_mutex);
+	log_mask_ptr = dci_ops_tbl[DCI_LOCAL_PROC].log_mask_composite;
+	if (!log_mask_ptr) {
+		mutex_unlock(&dci_log_mask_mutex);
+		return -EINVAL;
+	}
+
+	mutex_lock(&log_mask.lock);
+	for (i = 0; i < 16; i++) {
+		updated = 1;
+		/* Dirty bit is set don't update the mask for this equip id */
+		if (!(*(log_mask_ptr + 1))) {
+			log_mask_ptr += 514;
+			continue;
+		}
+		write_len = dci_fill_log_mask(buf, log_mask_ptr);
+		for (j = 0; j < NUM_PERIPHERALS && write_len; j++) {
+			err = diag_dci_write_proc(j, DIAG_CNTL_TYPE, buf,
+						  write_len);
+			if (err != DIAG_DCI_NO_ERROR) {
+				updated = 0;
+				ret = DIAG_DCI_SEND_DATA_FAIL;
+			}
+		}
+		if (updated)
+			*(log_mask_ptr+1) = 0; /* clear dirty byte */
+		log_mask_ptr += 514;
+	}
+	mutex_unlock(&log_mask.lock);
+	mutex_unlock(&dci_log_mask_mutex);
+	return ret;
+}
+
+static int diag_dci_init_local(void)
+{
+	struct dci_ops_tbl_t *temp = &dci_ops_tbl[DCI_LOCAL_PROC];
+
+	create_dci_log_mask_tbl(temp->log_mask_composite, DCI_LOG_MASK_CLEAN);
+	create_dci_event_mask_tbl(temp->event_mask_composite);
+	temp->peripheral_status |= DIAG_CON_APSS;
+
+	return 0;
+}
+
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+static void diag_dci_init_handshake_remote(void)
+{
+	int i;
+	struct dci_channel_status_t *temp = NULL;
+
+	for (i = DCI_REMOTE_BASE; i < NUM_DCI_PROC; i++) {
+		temp = &dci_channel_status[i];
+		temp->id = i;
+		setup_timer(&temp->wait_time, dci_chk_handshake, i);
+		INIT_WORK(&temp->handshake_work, dci_handshake_work_fn);
+	}
+}
+
+static int diag_dci_init_remote(void)
+{
+	int i;
+	struct dci_ops_tbl_t *temp = NULL;
+
+	diagmem_init(driver, POOL_TYPE_MDM_DCI_WRITE);
+
+	for (i = DCI_REMOTE_BASE; i < DCI_REMOTE_LAST; i++) {
+		temp = &dci_ops_tbl[i];
+		create_dci_log_mask_tbl(temp->log_mask_composite,
+					DCI_LOG_MASK_CLEAN);
+		create_dci_event_mask_tbl(temp->event_mask_composite);
+	}
+
+	partial_pkt.data = kzalloc(MAX_DCI_PACKET_SZ, GFP_KERNEL);
+	if (!partial_pkt.data)
+		return -ENOMEM;
+
+	partial_pkt.total_len = 0;
+	partial_pkt.read_len = 0;
+	partial_pkt.remaining = 0;
+	partial_pkt.processing = 0;
+
+	diag_dci_init_handshake_remote();
+
+	return 0;
+}
+#else
+static int diag_dci_init_remote(void)
+{
+	return 0;
+}
+#endif
+
+static int diag_dci_init_ops_tbl(void)
+{
+	int err = 0;
+
+	err = diag_dci_init_local();
+	if (err)
+		goto err;
+	err = diag_dci_init_remote();
+	if (err)
+		goto err;
+
+	return 0;
+
+err:
+	return -ENOMEM;
+}
+
+int diag_dci_init(void)
+{
+	int ret = 0;
+
+	driver->dci_tag = 0;
+	driver->dci_client_id = 0;
+	driver->num_dci_client = 0;
+	mutex_init(&driver->dci_mutex);
+	mutex_init(&dci_log_mask_mutex);
+	mutex_init(&dci_event_mask_mutex);
+	spin_lock_init(&ws_lock);
+
+	ret = diag_dci_init_ops_tbl();
+	if (ret)
+		goto err;
+
+	if (driver->apps_dci_buf == NULL) {
+		driver->apps_dci_buf = kzalloc(DCI_BUF_SIZE, GFP_KERNEL);
+		if (driver->apps_dci_buf == NULL)
+			goto err;
+	}
+	INIT_LIST_HEAD(&driver->dci_client_list);
+	INIT_LIST_HEAD(&driver->dci_req_list);
+
+	driver->diag_dci_wq = create_singlethread_workqueue("diag_dci_wq");
+	if (!driver->diag_dci_wq)
+		goto err;
+
+	INIT_WORK(&dci_data_drain_work, dci_data_drain_work_fn);
+
+	setup_timer(&dci_drain_timer, dci_drain_data, 0);
+	return DIAG_DCI_NO_ERROR;
+err:
+	pr_err("diag: Could not initialize diag DCI buffers");
+	kfree(driver->apps_dci_buf);
+
+	if (driver->diag_dci_wq)
+		destroy_workqueue(driver->diag_dci_wq);
+	kfree(partial_pkt.data);
+	mutex_destroy(&driver->dci_mutex);
+	mutex_destroy(&dci_log_mask_mutex);
+	mutex_destroy(&dci_event_mask_mutex);
+	return DIAG_DCI_NO_REG;
+}
+
+void diag_dci_channel_init(void)
+{
+	uint8_t peripheral;
+
+	for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+		diagfwd_open(peripheral, TYPE_DCI);
+		diagfwd_open(peripheral, TYPE_DCI_CMD);
+	}
+}
+
+void diag_dci_exit(void)
+{
+	kfree(partial_pkt.data);
+	kfree(driver->apps_dci_buf);
+	mutex_destroy(&driver->dci_mutex);
+	mutex_destroy(&dci_log_mask_mutex);
+	mutex_destroy(&dci_event_mask_mutex);
+	destroy_workqueue(driver->diag_dci_wq);
+}
+
+int diag_dci_clear_log_mask(int client_id)
+{
+	int err = DIAG_DCI_NO_ERROR, token = DCI_LOCAL_PROC;
+	uint8_t *update_ptr;
+	struct diag_dci_client_tbl *entry = NULL;
+
+	entry = diag_dci_get_client_entry(client_id);
+	if (!entry) {
+		pr_err("diag: In %s, invalid client entry\n", __func__);
+		return DIAG_DCI_TABLE_ERR;
+	}
+	token = entry->client_info.token;
+	update_ptr = dci_ops_tbl[token].log_mask_composite;
+
+	create_dci_log_mask_tbl(entry->dci_log_mask, DCI_LOG_MASK_CLEAN);
+	diag_dci_invalidate_cumulative_log_mask(token);
+
+	/*
+	 * Send updated mask to userspace clients only if the client
+	 * is registered on the local processor
+	 */
+	if (token == DCI_LOCAL_PROC)
+		diag_update_userspace_clients(DCI_LOG_MASKS_TYPE);
+	/* Send updated mask to peripherals */
+	err = dci_ops_tbl[token].send_log_mask(token);
+	return err;
+}
+
+int diag_dci_clear_event_mask(int client_id)
+{
+	int err = DIAG_DCI_NO_ERROR, token = DCI_LOCAL_PROC;
+	uint8_t *update_ptr;
+	struct diag_dci_client_tbl *entry = NULL;
+
+	entry = diag_dci_get_client_entry(client_id);
+	if (!entry) {
+		pr_err("diag: In %s, invalid client entry\n", __func__);
+		return DIAG_DCI_TABLE_ERR;
+	}
+	token = entry->client_info.token;
+	update_ptr = dci_ops_tbl[token].event_mask_composite;
+
+	create_dci_event_mask_tbl(entry->dci_event_mask);
+	diag_dci_invalidate_cumulative_event_mask(token);
+
+	/*
+	 * Send updated mask to userspace clients only if the client is
+	 * registerted on the local processor
+	 */
+	if (token == DCI_LOCAL_PROC)
+		diag_update_userspace_clients(DCI_EVENT_MASKS_TYPE);
+	/* Send updated mask to peripherals */
+	err = dci_ops_tbl[token].send_event_mask(token);
+	return err;
+}
+
+uint8_t diag_dci_get_cumulative_real_time(int token)
+{
+	uint8_t real_time = MODE_NONREALTIME;
+	struct list_head *start, *temp;
+	struct diag_dci_client_tbl *entry = NULL;
+
+	list_for_each_safe(start, temp, &driver->dci_client_list) {
+		entry = list_entry(start, struct diag_dci_client_tbl, track);
+		if (entry->real_time == MODE_REALTIME &&
+					entry->client_info.token == token) {
+			real_time = 1;
+			break;
+		}
+	}
+	return real_time;
+}
+
+int diag_dci_set_real_time(struct diag_dci_client_tbl *entry, uint8_t real_time)
+{
+	if (!entry) {
+		pr_err("diag: In %s, invalid client entry\n", __func__);
+		return 0;
+	}
+	entry->real_time = real_time;
+	return 1;
+}
+
+int diag_dci_register_client(struct diag_dci_reg_tbl_t *reg_entry)
+{
+	int i, err = 0;
+	struct diag_dci_client_tbl *new_entry = NULL;
+	struct diag_dci_buf_peripheral_t *proc_buf = NULL;
+
+	if (!reg_entry)
+		return DIAG_DCI_NO_REG;
+	if (!VALID_DCI_TOKEN(reg_entry->token)) {
+		pr_alert("diag: Invalid DCI client token, %d\n",
+						reg_entry->token);
+		return DIAG_DCI_NO_REG;
+	}
+
+	if (driver->dci_state == DIAG_DCI_NO_REG)
+		return DIAG_DCI_NO_REG;
+
+	if (driver->num_dci_client >= MAX_DCI_CLIENTS)
+		return DIAG_DCI_NO_REG;
+
+	new_entry = kzalloc(sizeof(struct diag_dci_client_tbl), GFP_KERNEL);
+	if (!new_entry)
+		return DIAG_DCI_NO_REG;
+
+	mutex_lock(&driver->dci_mutex);
+
+	new_entry->client = current;
+	new_entry->tgid = current->tgid;
+	new_entry->client_info.notification_list =
+				reg_entry->notification_list;
+	new_entry->client_info.signal_type =
+				reg_entry->signal_type;
+	new_entry->client_info.token = reg_entry->token;
+	switch (reg_entry->token) {
+	case DCI_LOCAL_PROC:
+		new_entry->num_buffers = NUM_DCI_PERIPHERALS;
+		break;
+	case DCI_MDM_PROC:
+		new_entry->num_buffers = 1;
+		break;
+	}
+	new_entry->real_time = MODE_REALTIME;
+	new_entry->in_service = 0;
+	INIT_LIST_HEAD(&new_entry->list_write_buf);
+	mutex_init(&new_entry->write_buf_mutex);
+	new_entry->dci_log_mask =  kzalloc(DCI_LOG_MASK_SIZE, GFP_KERNEL);
+	if (!new_entry->dci_log_mask) {
+		pr_err("diag: Unable to create log mask for client, %d",
+							driver->dci_client_id);
+		goto fail_alloc;
+	}
+	create_dci_log_mask_tbl(new_entry->dci_log_mask, DCI_LOG_MASK_CLEAN);
+
+	new_entry->dci_event_mask =  kzalloc(DCI_EVENT_MASK_SIZE, GFP_KERNEL);
+	if (!new_entry->dci_event_mask)
+		goto fail_alloc;
+	create_dci_event_mask_tbl(new_entry->dci_event_mask);
+
+	new_entry->buffers = kzalloc(new_entry->num_buffers *
+				     sizeof(struct diag_dci_buf_peripheral_t),
+				     GFP_KERNEL);
+	if (!new_entry->buffers) {
+		pr_err("diag: Unable to allocate buffers for peripherals in %s\n",
+								__func__);
+		goto fail_alloc;
+	}
+
+	for (i = 0; i < new_entry->num_buffers; i++) {
+		proc_buf = &new_entry->buffers[i];
+		if (!proc_buf)
+			goto fail_alloc;
+
+		mutex_init(&proc_buf->health_mutex);
+		mutex_init(&proc_buf->buf_mutex);
+		proc_buf->health.dropped_events = 0;
+		proc_buf->health.dropped_logs = 0;
+		proc_buf->health.received_events = 0;
+		proc_buf->health.received_logs = 0;
+		proc_buf->buf_primary = kzalloc(
+					sizeof(struct diag_dci_buffer_t),
+					GFP_KERNEL);
+		if (!proc_buf->buf_primary)
+			goto fail_alloc;
+		proc_buf->buf_cmd = kzalloc(sizeof(struct diag_dci_buffer_t),
+					    GFP_KERNEL);
+		if (!proc_buf->buf_cmd)
+			goto fail_alloc;
+		err = diag_dci_init_buffer(proc_buf->buf_primary,
+					   DCI_BUF_PRIMARY);
+		if (err)
+			goto fail_alloc;
+		err = diag_dci_init_buffer(proc_buf->buf_cmd, DCI_BUF_CMD);
+		if (err)
+			goto fail_alloc;
+		proc_buf->buf_curr = proc_buf->buf_primary;
+	}
+
+	list_add_tail(&new_entry->track, &driver->dci_client_list);
+	driver->dci_client_id++;
+	new_entry->client_info.client_id = driver->dci_client_id;
+	reg_entry->client_id = driver->dci_client_id;
+	driver->num_dci_client++;
+	if (driver->num_dci_client == 1)
+		diag_update_proc_vote(DIAG_PROC_DCI, VOTE_UP, reg_entry->token);
+	queue_work(driver->diag_real_time_wq, &driver->diag_real_time_work);
+	mutex_unlock(&driver->dci_mutex);
+
+	return driver->dci_client_id;
+
+fail_alloc:
+	if (new_entry) {
+		for (i = 0; i < new_entry->num_buffers; i++) {
+			proc_buf = &new_entry->buffers[i];
+			if (proc_buf) {
+				mutex_destroy(&proc_buf->health_mutex);
+				if (proc_buf->buf_primary) {
+					kfree(proc_buf->buf_primary->data);
+					mutex_destroy(
+					   &proc_buf->buf_primary->data_mutex);
+				}
+				kfree(proc_buf->buf_primary);
+				if (proc_buf->buf_cmd) {
+					kfree(proc_buf->buf_cmd->data);
+					mutex_destroy(
+					   &proc_buf->buf_cmd->data_mutex);
+				}
+				kfree(proc_buf->buf_cmd);
+			}
+		}
+		kfree(new_entry->dci_event_mask);
+		kfree(new_entry->dci_log_mask);
+		kfree(new_entry->buffers);
+		kfree(new_entry);
+	}
+	mutex_unlock(&driver->dci_mutex);
+	return DIAG_DCI_NO_REG;
+}
+
+int diag_dci_deinit_client(struct diag_dci_client_tbl *entry)
+{
+	int ret = DIAG_DCI_NO_ERROR, real_time = MODE_REALTIME, i, peripheral;
+	struct diag_dci_buf_peripheral_t *proc_buf = NULL;
+	struct diag_dci_buffer_t *buf_entry, *temp;
+	struct list_head *start, *req_temp;
+	struct dci_pkt_req_entry_t *req_entry = NULL;
+	int token = DCI_LOCAL_PROC;
+
+	if (!entry)
+		return DIAG_DCI_NOT_SUPPORTED;
+
+	token = entry->client_info.token;
+	/*
+	 * Remove the entry from the list before freeing the buffers
+	 * to ensure that we don't have any invalid access.
+	 */
+	if (!list_empty(&entry->track))
+		list_del(&entry->track);
+	driver->num_dci_client--;
+	/*
+	 * Clear the client's log and event masks, update the cumulative
+	 * masks and send the masks to peripherals
+	 */
+	kfree(entry->dci_log_mask);
+	diag_dci_invalidate_cumulative_log_mask(token);
+	if (token == DCI_LOCAL_PROC)
+		diag_update_userspace_clients(DCI_LOG_MASKS_TYPE);
+	ret = dci_ops_tbl[token].send_log_mask(token);
+	if (ret != DIAG_DCI_NO_ERROR)
+		return ret;
+	kfree(entry->dci_event_mask);
+	diag_dci_invalidate_cumulative_event_mask(token);
+	if (token == DCI_LOCAL_PROC)
+		diag_update_userspace_clients(DCI_EVENT_MASKS_TYPE);
+	ret = dci_ops_tbl[token].send_event_mask(token);
+	if (ret != DIAG_DCI_NO_ERROR)
+		return ret;
+
+	list_for_each_safe(start, req_temp, &driver->dci_req_list) {
+		req_entry = list_entry(start, struct dci_pkt_req_entry_t,
+				       track);
+		if (req_entry->client_id == entry->client_info.client_id) {
+			if (!list_empty(&req_entry->track))
+				list_del(&req_entry->track);
+			kfree(req_entry);
+		}
+	}
+
+	/* Clean up any buffer that is pending write */
+	mutex_lock(&entry->write_buf_mutex);
+	list_for_each_entry_safe(buf_entry, temp, &entry->list_write_buf,
+							buf_track) {
+		if (!list_empty(&buf_entry->buf_track))
+			list_del(&buf_entry->buf_track);
+		if (buf_entry->buf_type == DCI_BUF_SECONDARY) {
+			mutex_lock(&buf_entry->data_mutex);
+			diagmem_free(driver, buf_entry->data, POOL_TYPE_DCI);
+			buf_entry->data = NULL;
+			mutex_unlock(&buf_entry->data_mutex);
+			kfree(buf_entry);
+		} else if (buf_entry->buf_type == DCI_BUF_CMD) {
+			peripheral = buf_entry->data_source;
+			if (peripheral == APPS_DATA)
+				continue;
+		}
+		/*
+		 * These are buffers that can't be written to the client which
+		 * means that the copy cannot be completed. Make sure that we
+		 * remove those references in DCI wakeup source.
+		 */
+		diag_ws_on_copy_fail(DIAG_WS_DCI);
+	}
+	mutex_unlock(&entry->write_buf_mutex);
+
+	for (i = 0; i < entry->num_buffers; i++) {
+		proc_buf = &entry->buffers[i];
+		buf_entry = proc_buf->buf_curr;
+		mutex_lock(&proc_buf->buf_mutex);
+		/* Clean up secondary buffer from mempool that is active */
+		if (buf_entry && buf_entry->buf_type == DCI_BUF_SECONDARY) {
+			mutex_lock(&buf_entry->data_mutex);
+			diagmem_free(driver, buf_entry->data, POOL_TYPE_DCI);
+			buf_entry->data = NULL;
+			mutex_unlock(&buf_entry->data_mutex);
+			mutex_destroy(&buf_entry->data_mutex);
+			kfree(buf_entry);
+		}
+
+		mutex_lock(&proc_buf->buf_primary->data_mutex);
+		kfree(proc_buf->buf_primary->data);
+		mutex_unlock(&proc_buf->buf_primary->data_mutex);
+
+		mutex_lock(&proc_buf->buf_cmd->data_mutex);
+		kfree(proc_buf->buf_cmd->data);
+		mutex_unlock(&proc_buf->buf_cmd->data_mutex);
+
+		mutex_destroy(&proc_buf->health_mutex);
+		mutex_destroy(&proc_buf->buf_primary->data_mutex);
+		mutex_destroy(&proc_buf->buf_cmd->data_mutex);
+
+		kfree(proc_buf->buf_primary);
+		kfree(proc_buf->buf_cmd);
+		mutex_unlock(&proc_buf->buf_mutex);
+	}
+	mutex_destroy(&entry->write_buf_mutex);
+
+	kfree(entry->buffers);
+	kfree(entry);
+
+	if (driver->num_dci_client == 0) {
+		diag_update_proc_vote(DIAG_PROC_DCI, VOTE_DOWN, token);
+	} else {
+		real_time = diag_dci_get_cumulative_real_time(token);
+		diag_update_real_time_vote(DIAG_PROC_DCI, real_time, token);
+	}
+	queue_work(driver->diag_real_time_wq, &driver->diag_real_time_work);
+
+	return DIAG_DCI_NO_ERROR;
+}
+
+int diag_dci_write_proc(uint8_t peripheral, int pkt_type, char *buf, int len)
+{
+	uint8_t dest_channel = TYPE_DATA;
+	int err = 0;
+
+	if (!buf || peripheral >= NUM_PERIPHERALS || len < 0 ||
+	    !(driver->feature[PERIPHERAL_MODEM].rcvd_feature_mask)) {
+		DIAG_LOG(DIAG_DEBUG_DCI,
+			"buf: 0x%pK, p: %d, len: %d, f_mask: %d\n",
+			buf, peripheral, len,
+			driver->feature[PERIPHERAL_MODEM].rcvd_feature_mask);
+		return -EINVAL;
+	}
+
+	if (pkt_type == DIAG_DATA_TYPE) {
+		dest_channel = TYPE_DCI_CMD;
+	} else if (pkt_type == DIAG_CNTL_TYPE) {
+		dest_channel = TYPE_CNTL;
+	} else {
+		pr_err("diag: Invalid DCI pkt type in %s", __func__);
+		return -EINVAL;
+	}
+
+	err = diagfwd_write(peripheral, dest_channel, buf, len);
+	if (err && err != -ENODEV) {
+		pr_err("diag: In %s, unable to write to peripheral: %d, type: %d, len: %d, err: %d\n",
+		       __func__, peripheral, dest_channel, len, err);
+	} else {
+		err = DIAG_DCI_NO_ERROR;
+	}
+
+	return err;
+}
+
+int diag_dci_copy_health_stats(struct diag_dci_health_stats_proc *stats_proc)
+{
+	struct diag_dci_client_tbl *entry = NULL;
+	struct diag_dci_health_t *health = NULL;
+	struct diag_dci_health_stats *stats = NULL;
+	int i, proc;
+
+	if (!stats_proc)
+		return -EINVAL;
+
+	stats = &stats_proc->health;
+	proc = stats_proc->proc;
+	if (proc < ALL_PROC || proc > APPS_DATA)
+		return -EINVAL;
+
+	entry = diag_dci_get_client_entry(stats_proc->client_id);
+	if (!entry)
+		return DIAG_DCI_NOT_SUPPORTED;
+
+	/*
+	 * If the client has registered for remote processor, the
+	 * proc field doesn't have any effect as they have only one buffer.
+	 */
+	if (entry->client_info.token)
+		proc = 0;
+
+	stats->stats.dropped_logs = 0;
+	stats->stats.dropped_events = 0;
+	stats->stats.received_logs = 0;
+	stats->stats.received_events = 0;
+
+	if (proc != ALL_PROC) {
+		health = &entry->buffers[proc].health;
+		stats->stats.dropped_logs = health->dropped_logs;
+		stats->stats.dropped_events = health->dropped_events;
+		stats->stats.received_logs = health->received_logs;
+		stats->stats.received_events = health->received_events;
+		if (stats->reset_status) {
+			mutex_lock(&entry->buffers[proc].health_mutex);
+			health->dropped_logs = 0;
+			health->dropped_events = 0;
+			health->received_logs = 0;
+			health->received_events = 0;
+			mutex_unlock(&entry->buffers[proc].health_mutex);
+		}
+		return DIAG_DCI_NO_ERROR;
+	}
+
+	for (i = 0; i < entry->num_buffers; i++) {
+		health = &entry->buffers[i].health;
+		stats->stats.dropped_logs += health->dropped_logs;
+		stats->stats.dropped_events += health->dropped_events;
+		stats->stats.received_logs += health->received_logs;
+		stats->stats.received_events += health->received_events;
+		if (stats->reset_status) {
+			mutex_lock(&entry->buffers[i].health_mutex);
+			health->dropped_logs = 0;
+			health->dropped_events = 0;
+			health->received_logs = 0;
+			health->received_events = 0;
+			mutex_unlock(&entry->buffers[i].health_mutex);
+		}
+	}
+	return DIAG_DCI_NO_ERROR;
+}
+
+int diag_dci_get_support_list(struct diag_dci_peripherals_t *support_list)
+{
+	if (!support_list)
+		return -ENOMEM;
+
+	if (!VALID_DCI_TOKEN(support_list->proc))
+		return -EIO;
+
+	support_list->list = dci_ops_tbl[support_list->proc].peripheral_status;
+	return DIAG_DCI_NO_ERROR;
+}
diff --git a/drivers/char/diag/diag_dci.h b/drivers/char/diag/diag_dci.h
new file mode 100644
index 0000000..61eb3f5
--- /dev/null
+++ b/drivers/char/diag/diag_dci.h
@@ -0,0 +1,328 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef DIAG_DCI_H
+#define DIAG_DCI_H
+
+#define MAX_DCI_CLIENTS		10
+#define DCI_PKT_RSP_CODE	0x93
+#define DCI_DELAYED_RSP_CODE	0x94
+#define DCI_CONTROL_PKT_CODE	0x9A
+#define EXT_HDR_CMD_CODE	0x98
+#define LOG_CMD_CODE		0x10
+#define EVENT_CMD_CODE		0x60
+#define DCI_PKT_RSP_TYPE	0
+#define DCI_LOG_TYPE		-1
+#define DCI_EVENT_TYPE		-2
+#define DCI_EXT_HDR_TYPE	-3
+#define SET_LOG_MASK		1
+#define DISABLE_LOG_MASK	0
+#define MAX_EVENT_SIZE		512
+#define DCI_CLIENT_INDEX_INVALID -1
+#define DCI_LOG_CON_MIN_LEN		14
+#define DCI_EVENT_CON_MIN_LEN		16
+
+#define EXT_HDR_LEN		8
+#define EXT_HDR_VERSION		1
+
+#define DCI_BUF_PRIMARY		1
+#define DCI_BUF_SECONDARY	2
+#define DCI_BUF_CMD		3
+
+#ifdef CONFIG_DEBUG_FS
+#define DIAG_DCI_DEBUG_CNT	100
+#define DIAG_DCI_DEBUG_LEN	100
+#endif
+
+/* 16 log code categories, each has:
+ * 1 bytes equip id + 1 dirty byte + 512 byte max log mask
+ */
+#define DCI_LOG_MASK_SIZE		(16*514)
+#define DCI_EVENT_MASK_SIZE		512
+#define DCI_MASK_STREAM			2
+#define DCI_MAX_LOG_CODES		16
+#define DCI_MAX_ITEMS_PER_LOG_CODE	512
+
+#define DCI_LOG_MASK_CLEAN		0
+#define DCI_LOG_MASK_DIRTY		1
+
+#define MIN_DELAYED_RSP_LEN		12
+/*
+ * Maximum data size that peripherals send = 8.5K log +
+ * DCI header + footer (6 bytes)
+ */
+#define MAX_DCI_PACKET_SZ		8710
+
+extern unsigned int dci_max_reg;
+extern unsigned int dci_max_clients;
+
+#define DCI_LOCAL_PROC		0
+#define DCI_REMOTE_BASE		1
+#define DCI_MDM_PROC		DCI_REMOTE_BASE
+#define DCI_REMOTE_LAST		(DCI_REMOTE_BASE + 1)
+
+#ifndef CONFIG_DIAGFWD_BRIDGE_CODE
+#define NUM_DCI_PROC		1
+#else
+#define NUM_DCI_PROC		DCI_REMOTE_LAST
+#endif
+
+#define DCI_REMOTE_DATA	0
+
+#define VALID_DCI_TOKEN(x)	((x >= 0 && x < NUM_DCI_PROC) ? 1 : 0)
+#define BRIDGE_TO_TOKEN(x)	(x - DIAGFWD_MDM_DCI + DCI_REMOTE_BASE)
+#define TOKEN_TO_BRIDGE(x)	(dci_ops_tbl[x].ctx)
+
+#define DCI_MAGIC		(0xAABB1122)
+
+struct dci_pkt_req_t {
+	int uid;
+	int client_id;
+} __packed;
+
+struct dci_stream_req_t {
+	int type;
+	int client_id;
+	int set_flag;
+	int count;
+} __packed;
+
+struct dci_pkt_req_entry_t {
+	int client_id;
+	int uid;
+	int tag;
+	struct list_head track;
+} __packed;
+
+struct diag_dci_reg_tbl_t {
+	int client_id;
+	uint16_t notification_list;
+	int signal_type;
+	int token;
+} __packed;
+
+struct diag_dci_health_t {
+	int dropped_logs;
+	int dropped_events;
+	int received_logs;
+	int received_events;
+};
+
+struct diag_dci_partial_pkt_t {
+	unsigned char *data;
+	uint32_t total_len;
+	uint32_t read_len;
+	uint32_t remaining;
+	uint8_t processing;
+} __packed;
+
+struct diag_dci_buffer_t {
+	unsigned char *data;
+	unsigned int data_len;
+	struct mutex data_mutex;
+	uint8_t in_busy;
+	uint8_t buf_type;
+	int data_source;
+	int capacity;
+	uint8_t in_list;
+	struct list_head buf_track;
+};
+
+struct diag_dci_buf_peripheral_t {
+	struct diag_dci_buffer_t *buf_curr;
+	struct diag_dci_buffer_t *buf_primary;
+	struct diag_dci_buffer_t *buf_cmd;
+	struct diag_dci_health_t health;
+	struct mutex health_mutex;
+	struct mutex buf_mutex;
+};
+
+struct diag_dci_client_tbl {
+	int tgid;
+	struct diag_dci_reg_tbl_t client_info;
+	struct task_struct *client;
+	unsigned char *dci_log_mask;
+	unsigned char *dci_event_mask;
+	uint8_t real_time;
+	struct list_head track;
+	struct diag_dci_buf_peripheral_t *buffers;
+	uint8_t num_buffers;
+	uint8_t in_service;
+	struct list_head list_write_buf;
+	struct mutex write_buf_mutex;
+};
+
+struct diag_dci_health_stats {
+	struct diag_dci_health_t stats;
+	int reset_status;
+};
+
+struct diag_dci_health_stats_proc {
+	int client_id;
+	struct diag_dci_health_stats health;
+	int proc;
+} __packed;
+
+struct diag_dci_peripherals_t {
+	int proc;
+	uint16_t list;
+} __packed;
+
+/* This is used for querying DCI Log or Event Mask */
+struct diag_log_event_stats {
+	int client_id;
+	uint16_t code;
+	int is_set;
+} __packed;
+
+struct diag_dci_pkt_rsp_header_t {
+	int type;
+	int length;
+	uint8_t delete_flag;
+	int uid;
+} __packed;
+
+struct diag_dci_pkt_header_t {
+	uint8_t start;
+	uint8_t version;
+	uint16_t len;
+	uint8_t pkt_code;
+	int tag;
+} __packed;
+
+struct diag_dci_header_t {
+	uint8_t start;
+	uint8_t version;
+	uint16_t length;
+	uint8_t cmd_code;
+} __packed;
+
+struct dci_ops_tbl_t {
+	int ctx;
+	int mempool;
+	unsigned char log_mask_composite[DCI_LOG_MASK_SIZE];
+	unsigned char event_mask_composite[DCI_EVENT_MASK_SIZE];
+	int (*send_log_mask)(int token);
+	int (*send_event_mask)(int token);
+	uint16_t peripheral_status;
+} __packed;
+
+struct dci_channel_status_t {
+	int id;
+	int open;
+	int retry_count;
+	struct timer_list wait_time;
+	struct work_struct handshake_work;
+} __packed;
+
+extern struct dci_ops_tbl_t dci_ops_tbl[NUM_DCI_PROC];
+
+enum {
+	DIAG_DCI_NO_ERROR = 1001,	/* No error */
+	DIAG_DCI_NO_REG,		/* Could not register */
+	DIAG_DCI_NO_MEM,		/* Failed memory allocation */
+	DIAG_DCI_NOT_SUPPORTED,	/* This particular client is not supported */
+	DIAG_DCI_HUGE_PACKET,	/* Request/Response Packet too huge */
+	DIAG_DCI_SEND_DATA_FAIL,/* writing to kernel or peripheral fails */
+	DIAG_DCI_TABLE_ERR	/* Error dealing with registration tables */
+};
+
+#define DCI_HDR_SIZE					\
+	((sizeof(struct diag_dci_pkt_header_t) >	\
+	  sizeof(struct diag_dci_header_t)) ?		\
+	(sizeof(struct diag_dci_pkt_header_t) + 1) :	\
+	(sizeof(struct diag_dci_header_t) + 1))		\
+
+#define DCI_BUF_SIZE (uint32_t)(DIAG_MAX_REQ_SIZE + DCI_HDR_SIZE)
+
+#define DCI_REQ_HDR_SIZE				\
+	((sizeof(struct dci_pkt_req_t) >		\
+	  sizeof(struct dci_stream_req_t)) ?		\
+	(sizeof(struct dci_pkt_req_t)) :		\
+	(sizeof(struct dci_stream_req_t)))		\
+
+#define DCI_REQ_BUF_SIZE (uint32_t)(DIAG_MAX_REQ_SIZE + DCI_REQ_HDR_SIZE)
+
+#ifdef CONFIG_DEBUG_FS
+/* To collect debug information during each socket read */
+struct diag_dci_data_info {
+	unsigned long iteration;
+	int data_size;
+	char time_stamp[DIAG_TS_SIZE];
+	uint8_t peripheral;
+	uint8_t ch_type;
+	uint8_t proc;
+};
+
+extern struct diag_dci_data_info *dci_traffic;
+extern struct mutex dci_stat_mutex;
+#endif
+
+int diag_dci_init(void);
+void diag_dci_channel_init(void);
+void diag_dci_exit(void);
+int diag_dci_register_client(struct diag_dci_reg_tbl_t *reg_entry);
+int diag_dci_deinit_client(struct diag_dci_client_tbl *entry);
+void diag_dci_channel_open_work(struct work_struct *work);
+void diag_dci_notify_client(int peripheral_mask, int data, int proc);
+void diag_dci_wakeup_clients(void);
+void diag_process_apps_dci_read_data(int data_type, void *buf, int recd_bytes);
+void diag_dci_process_peripheral_data(struct diagfwd_info *p_info, void *buf,
+				      int recd_bytes);
+int diag_process_dci_transaction(unsigned char *buf, int len);
+void extract_dci_pkt_rsp(unsigned char *buf, int len, int data_source,
+			 int token);
+void extract_dci_ctrl_pkt(unsigned char *buf, int len, int token);
+struct diag_dci_client_tbl *diag_dci_get_client_entry(int client_id);
+struct diag_dci_client_tbl *dci_lookup_client_entry_pid(int tgid);
+void diag_process_remote_dci_read_data(int index, void *buf, int recd_bytes);
+int diag_dci_get_support_list(struct diag_dci_peripherals_t *support_list);
+/* DCI Log streaming functions */
+void update_dci_cumulative_log_mask(int offset, unsigned int byte_index,
+						uint8_t byte_mask, int token);
+void diag_dci_invalidate_cumulative_log_mask(int token);
+int diag_send_dci_log_mask(int token);
+void extract_dci_log(unsigned char *buf, int len, int data_source, int token,
+	void *ext_hdr);
+int diag_dci_clear_log_mask(int client_id);
+int diag_dci_query_log_mask(struct diag_dci_client_tbl *entry,
+			    uint16_t log_code);
+/* DCI event streaming functions */
+void update_dci_cumulative_event_mask(int offset, uint8_t byte_mask, int token);
+void diag_dci_invalidate_cumulative_event_mask(int token);
+int diag_send_dci_event_mask(int token);
+void extract_dci_events(unsigned char *buf, int len, int data_source,
+			int token, void *ext_hdr);
+/* DCI extended header handling functions */
+void extract_dci_ext_pkt(unsigned char *buf, int len, int data_source,
+		int token);
+int diag_dci_clear_event_mask(int client_id);
+int diag_dci_query_event_mask(struct diag_dci_client_tbl *entry,
+			      uint16_t event_id);
+void diag_dci_record_traffic(int read_bytes, uint8_t ch_type,
+			     uint8_t peripheral, uint8_t proc);
+uint8_t diag_dci_get_cumulative_real_time(int token);
+int diag_dci_set_real_time(struct diag_dci_client_tbl *entry,
+			   uint8_t real_time);
+int diag_dci_copy_health_stats(struct diag_dci_health_stats_proc *stats_proc);
+int diag_dci_write_proc(uint8_t peripheral, int pkt_type, char *buf, int len);
+void dci_drain_data(unsigned long data);
+
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+int diag_send_dci_log_mask_remote(int token);
+int diag_send_dci_event_mask_remote(int token);
+unsigned char *dci_get_buffer_from_bridge(int token);
+int diag_dci_write_bridge(int token, unsigned char *buf, int len);
+int diag_dci_write_done_bridge(int index, unsigned char *buf, int len);
+int diag_dci_send_handshake_pkt(int index);
+#endif
+
+#endif
diff --git a/drivers/char/diag/diag_debugfs.c b/drivers/char/diag/diag_debugfs.c
new file mode 100644
index 0000000..89fba64
--- /dev/null
+++ b/drivers/char/diag/diag_debugfs.c
@@ -0,0 +1,1074 @@
+/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifdef CONFIG_DEBUG_FS
+
+#include <linux/slab.h>
+#include <linux/debugfs.h>
+#include <linux/atomic.h>
+#include <linux/uaccess.h>
+#include "diagchar.h"
+#include "diagfwd.h"
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+#include "diagfwd_bridge.h"
+#endif
+#ifdef CONFIG_USB_QCOM_DIAG_BRIDGE
+#include "diagfwd_hsic.h"
+#include "diagfwd_smux.h"
+#endif
+#ifdef CONFIG_MSM_MHI
+#include "diagfwd_mhi.h"
+#endif
+#include "diagmem.h"
+#include "diag_dci.h"
+#include "diag_usb.h"
+#include "diagfwd_peripheral.h"
+#include "diagfwd_socket.h"
+#include "diagfwd_glink.h"
+#include "diag_debugfs.h"
+#include "diag_ipc_logging.h"
+
+#define DEBUG_BUF_SIZE	4096
+static struct dentry *diag_dbgfs_dent;
+static int diag_dbgfs_table_index;
+static int diag_dbgfs_mempool_index;
+static int diag_dbgfs_usbinfo_index;
+static int diag_dbgfs_socketinfo_index;
+static int diag_dbgfs_glinkinfo_index;
+static int diag_dbgfs_hsicinfo_index;
+static int diag_dbgfs_mhiinfo_index;
+static int diag_dbgfs_bridgeinfo_index;
+static int diag_dbgfs_finished;
+static int diag_dbgfs_dci_data_index;
+static int diag_dbgfs_dci_finished;
+
+static ssize_t diag_dbgfs_read_status(struct file *file, char __user *ubuf,
+				      size_t count, loff_t *ppos)
+{
+	char *buf;
+	int ret, i;
+	unsigned int buf_size;
+
+	buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+	buf_size = ksize(buf);
+	ret = scnprintf(buf, buf_size,
+		"CPU Tools ID: %d\n"
+		"Check Polling Response: %d\n"
+		"Polling Registered: %d\n"
+		"Uses Device Tree: %d\n"
+		"Apps Supports Separate CMDRSP: %d\n"
+		"Apps Supports HDLC Encoding: %d\n"
+		"Apps Supports Sockets: %d\n"
+		"Logging Mode: %d\n"
+		"RSP Buffer is Busy: %d\n"
+		"HDLC Disabled: %d\n"
+		"Time Sync Enabled: %d\n"
+		"MD session mode: %d\n"
+		"MD session mask: %d\n"
+		"Uses Time API: %d\n",
+		chk_config_get_id(),
+		chk_polling_response(),
+		driver->polling_reg_flag,
+		driver->use_device_tree,
+		driver->supports_separate_cmdrsp,
+		driver->supports_apps_hdlc_encoding,
+		driver->supports_sockets,
+		driver->logging_mode,
+		driver->rsp_buf_busy,
+		driver->hdlc_disabled,
+		driver->time_sync_enabled,
+		driver->md_session_mode,
+		driver->md_session_mask,
+		driver->uses_time_api);
+
+	for (i = 0; i < NUM_PERIPHERALS; i++) {
+		ret += scnprintf(buf+ret, buf_size-ret,
+			"p: %s Feature: %02x %02x |%c%c%c%c%c%c%c%c|\n",
+			PERIPHERAL_STRING(i),
+			driver->feature[i].feature_mask[0],
+			driver->feature[i].feature_mask[1],
+			driver->feature[i].rcvd_feature_mask ? 'F':'f',
+			driver->feature[i].separate_cmd_rsp ? 'C':'c',
+			driver->feature[i].encode_hdlc ? 'H':'h',
+			driver->feature[i].peripheral_buffering ? 'B':'b',
+			driver->feature[i].mask_centralization ? 'M':'m',
+			driver->feature[i].stm_support ? 'Q':'q',
+			driver->feature[i].sockets_enabled ? 'S':'s',
+			driver->feature[i].sent_feature_mask ? 'T':'t');
+	}
+
+#ifdef CONFIG_DIAG_OVER_USB
+	ret += scnprintf(buf+ret, buf_size-ret,
+		"USB Connected: %d\n",
+		driver->usb_connected);
+#endif
+
+	for (i = 0; i < DIAG_NUM_PROC; i++) {
+		ret += scnprintf(buf+ret, buf_size-ret,
+				 "Real Time Mode: %d: %d\n", i,
+				 driver->real_time_mode[i]);
+	}
+
+	ret = simple_read_from_buffer(ubuf, count, ppos, buf, ret);
+
+	kfree(buf);
+	return ret;
+}
+
+static ssize_t diag_dbgfs_read_dcistats(struct file *file,
+				char __user *ubuf, size_t count, loff_t *ppos)
+{
+	char *buf = NULL;
+	unsigned int bytes_remaining, bytes_written = 0;
+	unsigned int bytes_in_buf = 0, i = 0;
+	struct diag_dci_data_info *temp_data = dci_traffic;
+	unsigned int buf_size;
+
+	buf_size = (count > DEBUG_BUF_SIZE) ? DEBUG_BUF_SIZE : count;
+
+	if (diag_dbgfs_dci_finished) {
+		diag_dbgfs_dci_finished = 0;
+		return 0;
+	}
+
+	buf = kcalloc(buf_size, sizeof(char), GFP_KERNEL);
+	if (ZERO_OR_NULL_PTR(buf))
+		return -ENOMEM;
+
+	buf_size = ksize(buf);
+	bytes_remaining = buf_size;
+
+	if (diag_dbgfs_dci_data_index == 0) {
+		bytes_written =
+			scnprintf(buf, buf_size,
+			"number of clients: %d\n"
+			"dci proc active: %d\n"
+			"dci real time vote: %d\n",
+			driver->num_dci_client,
+			(driver->proc_active_mask & DIAG_PROC_DCI) ? 1 : 0,
+			(driver->proc_rt_vote_mask[DIAG_LOCAL_PROC] &
+							DIAG_PROC_DCI) ? 1 : 0);
+		bytes_in_buf += bytes_written;
+		bytes_remaining -= bytes_written;
+#ifdef CONFIG_DIAG_OVER_USB
+		bytes_written = scnprintf(buf+bytes_in_buf, bytes_remaining,
+			"usb_connected: %d\n",
+			driver->usb_connected);
+		bytes_in_buf += bytes_written;
+		bytes_remaining -= bytes_written;
+#endif
+		bytes_written = scnprintf(buf+bytes_in_buf,
+					  bytes_remaining,
+					  "dci power: active, relax: %lu, %lu\n",
+					  driver->diag_dev->power.wakeup->
+						active_count,
+					  driver->diag_dev->
+						power.wakeup->relax_count);
+		bytes_in_buf += bytes_written;
+		bytes_remaining -= bytes_written;
+
+	}
+	temp_data += diag_dbgfs_dci_data_index;
+	for (i = diag_dbgfs_dci_data_index; i < DIAG_DCI_DEBUG_CNT; i++) {
+		if (temp_data->iteration != 0) {
+			bytes_written = scnprintf(
+				buf + bytes_in_buf, bytes_remaining,
+				"i %-5ld\t"
+				"s %-5d\t"
+				"p %-5d\t"
+				"r %-5d\t"
+				"c %-5d\t"
+				"t %-15s\n",
+				temp_data->iteration,
+				temp_data->data_size,
+				temp_data->peripheral,
+				temp_data->proc,
+				temp_data->ch_type,
+				temp_data->time_stamp);
+			bytes_in_buf += bytes_written;
+			bytes_remaining -= bytes_written;
+			/* Check if there is room for another entry */
+			if (bytes_remaining < bytes_written)
+				break;
+		}
+		temp_data++;
+	}
+
+	diag_dbgfs_dci_data_index = (i >= DIAG_DCI_DEBUG_CNT) ? 0 : i + 1;
+	bytes_written = simple_read_from_buffer(ubuf, count, ppos, buf,
+								bytes_in_buf);
+	kfree(buf);
+	diag_dbgfs_dci_finished = 1;
+	return bytes_written;
+}
+
+static ssize_t diag_dbgfs_read_power(struct file *file, char __user *ubuf,
+				     size_t count, loff_t *ppos)
+{
+	char *buf;
+	int ret;
+	unsigned int buf_size;
+
+	buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	buf_size = ksize(buf);
+	ret = scnprintf(buf, buf_size,
+		"DCI reference count: %d\n"
+		"DCI copy count: %d\n"
+		"DCI Client Count: %d\n\n"
+		"Memory Device reference count: %d\n"
+		"Memory Device copy count: %d\n"
+		"Logging mode: %d\n\n"
+		"Wakeup source active count: %lu\n"
+		"Wakeup source relax count: %lu\n\n",
+		driver->dci_ws.ref_count,
+		driver->dci_ws.copy_count,
+		driver->num_dci_client,
+		driver->md_ws.ref_count,
+		driver->md_ws.copy_count,
+		driver->logging_mode,
+		driver->diag_dev->power.wakeup->active_count,
+		driver->diag_dev->power.wakeup->relax_count);
+
+	ret = simple_read_from_buffer(ubuf, count, ppos, buf, ret);
+
+	kfree(buf);
+	return ret;
+}
+
+static ssize_t diag_dbgfs_read_table(struct file *file, char __user *ubuf,
+				     size_t count, loff_t *ppos)
+{
+	char *buf;
+	int ret = 0;
+	int i = 0;
+	int is_polling = 0;
+	unsigned int bytes_remaining;
+	unsigned int bytes_in_buffer = 0;
+	unsigned int bytes_written;
+	unsigned int buf_size;
+	struct list_head *start;
+	struct list_head *temp;
+	struct diag_cmd_reg_t *item = NULL;
+
+	if (diag_dbgfs_table_index == driver->cmd_reg_count) {
+		diag_dbgfs_table_index = 0;
+		return 0;
+	}
+
+	buf_size = (count > DEBUG_BUF_SIZE) ? DEBUG_BUF_SIZE : count;
+
+	buf = kcalloc(buf_size, sizeof(char), GFP_KERNEL);
+	if (ZERO_OR_NULL_PTR(buf)) {
+		pr_err("diag: %s, Error allocating memory\n", __func__);
+		return -ENOMEM;
+	}
+	buf_size = ksize(buf);
+	bytes_remaining = buf_size;
+
+	if (diag_dbgfs_table_index == 0) {
+		bytes_written = scnprintf(buf+bytes_in_buffer, bytes_remaining,
+					  "Client ids: Modem: %d, LPASS: %d, WCNSS: %d, SLPI: %d, APPS: %d\n",
+					  PERIPHERAL_MODEM, PERIPHERAL_LPASS,
+					  PERIPHERAL_WCNSS, PERIPHERAL_SENSORS,
+					  APPS_DATA);
+		bytes_in_buffer += bytes_written;
+		bytes_remaining -= bytes_written;
+	}
+
+	list_for_each_safe(start, temp, &driver->cmd_reg_list) {
+		item = list_entry(start, struct diag_cmd_reg_t, link);
+		if (i < diag_dbgfs_table_index) {
+			i++;
+			continue;
+		}
+
+		is_polling = diag_cmd_chk_polling(&item->entry);
+		bytes_written = scnprintf(buf+bytes_in_buffer, bytes_remaining,
+					  "i: %3d, cmd_code: %4x, subsys_id: %4x, cmd_code_lo: %4x, cmd_code_hi: %4x, proc: %d, process_id: %5d %s\n",
+					  i++,
+					  item->entry.cmd_code,
+					  item->entry.subsys_id,
+					  item->entry.cmd_code_lo,
+					  item->entry.cmd_code_hi,
+					  item->proc,
+					  item->pid,
+					  (is_polling == DIAG_CMD_POLLING) ?
+					  "<-- Polling Cmd" : "");
+
+		bytes_in_buffer += bytes_written;
+
+		/* Check if there is room to add another table entry */
+		bytes_remaining = buf_size - bytes_in_buffer;
+
+		if (bytes_remaining < bytes_written)
+			break;
+	}
+	diag_dbgfs_table_index = i;
+
+	*ppos = 0;
+	ret = simple_read_from_buffer(ubuf, count, ppos, buf, bytes_in_buffer);
+
+	kfree(buf);
+	return ret;
+}
+
+static ssize_t diag_dbgfs_read_mempool(struct file *file, char __user *ubuf,
+						size_t count, loff_t *ppos)
+{
+	char *buf = NULL;
+	int ret = 0;
+	int i = 0;
+	unsigned int buf_size;
+	unsigned int bytes_remaining = 0;
+	unsigned int bytes_written = 0;
+	unsigned int bytes_in_buffer = 0;
+	struct diag_mempool_t *mempool = NULL;
+
+	if (diag_dbgfs_mempool_index >= NUM_MEMORY_POOLS) {
+		/* Done. Reset to prepare for future requests */
+		diag_dbgfs_mempool_index = 0;
+		return 0;
+	}
+
+	buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
+	if (ZERO_OR_NULL_PTR(buf)) {
+		pr_err("diag: %s, Error allocating memory\n", __func__);
+		return -ENOMEM;
+	}
+
+	buf_size = ksize(buf);
+	bytes_remaining = buf_size;
+	bytes_written = scnprintf(buf+bytes_in_buffer, bytes_remaining,
+			"%-24s\t"
+			"%-10s\t"
+			"%-5s\t"
+			"%-5s\t"
+			"%-5s\n",
+			"POOL", "HANDLE", "COUNT", "SIZE", "ITEMSIZE");
+	bytes_in_buffer += bytes_written;
+	bytes_remaining = buf_size - bytes_in_buffer;
+
+	for (i = diag_dbgfs_mempool_index; i < NUM_MEMORY_POOLS; i++) {
+		mempool = &diag_mempools[i];
+		bytes_written = scnprintf(buf+bytes_in_buffer, bytes_remaining,
+			"%-24s\t"
+			"%-10p\t"
+			"%-5d\t"
+			"%-5d\t"
+			"%-5d\n",
+			mempool->name,
+			mempool->pool,
+			mempool->count,
+			mempool->poolsize,
+			mempool->itemsize);
+		bytes_in_buffer += bytes_written;
+
+		/* Check if there is room to add another table entry */
+		bytes_remaining = buf_size - bytes_in_buffer;
+
+		if (bytes_remaining < bytes_written)
+			break;
+	}
+	diag_dbgfs_mempool_index = i+1;
+	*ppos = 0;
+	ret = simple_read_from_buffer(ubuf, count, ppos, buf, bytes_in_buffer);
+
+	kfree(buf);
+	return ret;
+}
+
+static ssize_t diag_dbgfs_read_usbinfo(struct file *file, char __user *ubuf,
+				       size_t count, loff_t *ppos)
+{
+	char *buf = NULL;
+	int ret = 0;
+	int i = 0;
+	unsigned int buf_size;
+	unsigned int bytes_remaining = 0;
+	unsigned int bytes_written = 0;
+	unsigned int bytes_in_buffer = 0;
+	struct diag_usb_info *usb_info = NULL;
+
+	if (diag_dbgfs_usbinfo_index >= NUM_DIAG_USB_DEV) {
+		/* Done. Reset to prepare for future requests */
+		diag_dbgfs_usbinfo_index = 0;
+		return 0;
+	}
+
+	buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
+	if (ZERO_OR_NULL_PTR(buf)) {
+		pr_err("diag: %s, Error allocating memory\n", __func__);
+		return -ENOMEM;
+	}
+
+	buf_size = ksize(buf);
+	bytes_remaining = buf_size;
+	for (i = diag_dbgfs_usbinfo_index; i < NUM_DIAG_USB_DEV; i++) {
+		usb_info = &diag_usb[i];
+		if (!usb_info->enabled)
+			continue;
+		bytes_written = scnprintf(buf+bytes_in_buffer, bytes_remaining,
+			"id: %d\n"
+			"name: %s\n"
+			"hdl: %pK\n"
+			"connected: %d\n"
+			"diag state: %d\n"
+			"enabled: %d\n"
+			"mempool: %s\n"
+			"read pending: %d\n"
+			"read count: %lu\n"
+			"write count: %lu\n"
+			"read work pending: %d\n"
+			"read done work pending: %d\n"
+			"connect work pending: %d\n"
+			"disconnect work pending: %d\n"
+			"max size supported: %d\n\n",
+			usb_info->id,
+			usb_info->name,
+			usb_info->hdl,
+			atomic_read(&usb_info->connected),
+			atomic_read(&usb_info->diag_state),
+			usb_info->enabled,
+			DIAG_MEMPOOL_GET_NAME(usb_info->mempool),
+			atomic_read(&usb_info->read_pending),
+			usb_info->read_cnt,
+			usb_info->write_cnt,
+			work_pending(&usb_info->read_work),
+			work_pending(&usb_info->read_done_work),
+			work_pending(&usb_info->connect_work),
+			work_pending(&usb_info->disconnect_work),
+			usb_info->max_size);
+		bytes_in_buffer += bytes_written;
+
+		/* Check if there is room to add another table entry */
+		bytes_remaining = buf_size - bytes_in_buffer;
+
+		if (bytes_remaining < bytes_written)
+			break;
+	}
+	diag_dbgfs_usbinfo_index = i+1;
+	*ppos = 0;
+	ret = simple_read_from_buffer(ubuf, count, ppos, buf, bytes_in_buffer);
+
+	kfree(buf);
+	return ret;
+}
+
+static ssize_t diag_dbgfs_read_socketinfo(struct file *file, char __user *ubuf,
+					  size_t count, loff_t *ppos)
+{
+	char *buf = NULL;
+	int ret = 0;
+	int i = 0;
+	int j = 0;
+	unsigned int buf_size;
+	unsigned int bytes_remaining = 0;
+	unsigned int bytes_written = 0;
+	unsigned int bytes_in_buffer = 0;
+	struct diag_socket_info *info = NULL;
+	struct diagfwd_info *fwd_ctxt = NULL;
+
+	if (diag_dbgfs_socketinfo_index >= NUM_PERIPHERALS) {
+		/* Done. Reset to prepare for future requests */
+		diag_dbgfs_socketinfo_index = 0;
+		return 0;
+	}
+
+	buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
+	if (ZERO_OR_NULL_PTR(buf)) {
+		pr_err("diag: %s, Error allocating memory\n", __func__);
+		return -ENOMEM;
+	}
+
+	buf_size = ksize(buf);
+	bytes_remaining = buf_size;
+	for (i = 0; i < NUM_TYPES; i++) {
+		for (j = 0; j < NUM_PERIPHERALS; j++) {
+			switch (i) {
+			case TYPE_DATA:
+				info = &socket_data[j];
+				break;
+			case TYPE_CNTL:
+				info = &socket_cntl[j];
+				break;
+			case TYPE_DCI:
+				info = &socket_dci[j];
+				break;
+			case TYPE_CMD:
+				info = &socket_cmd[j];
+				break;
+			case TYPE_DCI_CMD:
+				info = &socket_dci_cmd[j];
+				break;
+			default:
+				return -EINVAL;
+			}
+
+			fwd_ctxt = (struct diagfwd_info *)(info->fwd_ctxt);
+
+			bytes_written = scnprintf(buf+bytes_in_buffer,
+				bytes_remaining,
+				"name\t\t:\t%s\n"
+				"hdl\t\t:\t%pK\n"
+				"inited\t\t:\t%d\n"
+				"opened\t\t:\t%d\n"
+				"diag_state\t:\t%d\n"
+				"buf_1 busy\t:\t%d\n"
+				"buf_2 busy\t:\t%d\n"
+				"flow ctrl count\t:\t%d\n"
+				"data_ready\t:\t%d\n"
+				"init pending\t:\t%d\n"
+				"read pending\t:\t%d\n"
+				"bytes read\t:\t%lu\n"
+				"bytes written\t:\t%lu\n"
+				"fwd inited\t:\t%d\n"
+				"fwd opened\t:\t%d\n"
+				"fwd ch_open\t:\t%d\n\n",
+				info->name,
+				info->hdl,
+				info->inited,
+				atomic_read(&info->opened),
+				atomic_read(&info->diag_state),
+				(fwd_ctxt && fwd_ctxt->buf_1) ?
+				atomic_read(&fwd_ctxt->buf_1->in_busy) : -1,
+				(fwd_ctxt && fwd_ctxt->buf_2) ?
+				atomic_read(&fwd_ctxt->buf_2->in_busy) : -1,
+				atomic_read(&info->flow_cnt),
+				info->data_ready,
+				work_pending(&info->init_work),
+				work_pending(&info->read_work),
+				(fwd_ctxt) ? fwd_ctxt->read_bytes : 0,
+				(fwd_ctxt) ? fwd_ctxt->write_bytes : 0,
+				(fwd_ctxt) ? fwd_ctxt->inited : -1,
+				(fwd_ctxt) ?
+				atomic_read(&fwd_ctxt->opened) : -1,
+				(fwd_ctxt) ? fwd_ctxt->ch_open : -1);
+			bytes_in_buffer += bytes_written;
+
+			/* Check if there is room to add another table entry */
+			bytes_remaining = buf_size - bytes_in_buffer;
+
+			if (bytes_remaining < bytes_written)
+				break;
+		}
+	}
+	diag_dbgfs_socketinfo_index = i+1;
+	*ppos = 0;
+	ret = simple_read_from_buffer(ubuf, count, ppos, buf, bytes_in_buffer);
+
+	kfree(buf);
+	return ret;
+}
+
+static ssize_t diag_dbgfs_read_glinkinfo(struct file *file, char __user *ubuf,
+					  size_t count, loff_t *ppos)
+{
+	char *buf = NULL;
+	int ret = 0;
+	int i = 0;
+	int j = 0;
+	unsigned int buf_size;
+	unsigned int bytes_remaining = 0;
+	unsigned int bytes_written = 0;
+	unsigned int bytes_in_buffer = 0;
+	struct diag_glink_info *info = NULL;
+	struct diagfwd_info *fwd_ctxt = NULL;
+
+	if (diag_dbgfs_glinkinfo_index >= NUM_PERIPHERALS) {
+		/* Done. Reset to prepare for future requests */
+		diag_dbgfs_socketinfo_index = 0;
+		return 0;
+	}
+
+	buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	buf_size = ksize(buf);
+	bytes_remaining = buf_size;
+	for (i = 0; i < NUM_TYPES; i++) {
+		for (j = 0; j < NUM_PERIPHERALS; j++) {
+			switch (i) {
+			case TYPE_DATA:
+				info = &glink_data[j];
+				break;
+			case TYPE_CNTL:
+				info = &glink_cntl[j];
+				break;
+			case TYPE_DCI:
+				info = &glink_dci[j];
+				break;
+			case TYPE_CMD:
+				info = &glink_cmd[j];
+				break;
+			case TYPE_DCI_CMD:
+				info = &glink_dci_cmd[j];
+				break;
+			default:
+				return -EINVAL;
+			}
+
+			fwd_ctxt = (struct diagfwd_info *)(info->fwd_ctxt);
+
+			bytes_written = scnprintf(buf+bytes_in_buffer,
+				bytes_remaining,
+				"name\t\t:\t%s\n"
+				"hdl\t\t:\t%pK\n"
+				"inited\t\t:\t%d\n"
+				"opened\t\t:\t%d\n"
+				"diag_state\t:\t%d\n"
+				"buf_1 busy\t:\t%d\n"
+				"buf_2 busy\t:\t%d\n"
+				"tx_intent_ready\t:\t%d\n"
+				"open pending\t:\t%d\n"
+				"close pending\t:\t%d\n"
+				"read pending\t:\t%d\n"
+				"bytes read\t:\t%lu\n"
+				"bytes written\t:\t%lu\n"
+				"fwd inited\t:\t%d\n"
+				"fwd opened\t:\t%d\n"
+				"fwd ch_open\t:\t%d\n\n",
+				info->name,
+				info->hdl,
+				info->inited,
+				atomic_read(&info->opened),
+				atomic_read(&info->diag_state),
+				(fwd_ctxt && fwd_ctxt->buf_1) ?
+				atomic_read(&fwd_ctxt->buf_1->in_busy) : -1,
+				(fwd_ctxt && fwd_ctxt->buf_2) ?
+				atomic_read(&fwd_ctxt->buf_2->in_busy) : -1,
+				atomic_read(&info->tx_intent_ready),
+				work_pending(&info->open_work),
+				work_pending(&info->close_work),
+				work_pending(&info->read_work),
+				(fwd_ctxt) ? fwd_ctxt->read_bytes : 0,
+				(fwd_ctxt) ? fwd_ctxt->write_bytes : 0,
+				(fwd_ctxt) ? fwd_ctxt->inited : -1,
+				(fwd_ctxt) ?
+				atomic_read(&fwd_ctxt->opened) : -1,
+				(fwd_ctxt) ? fwd_ctxt->ch_open : -1);
+			bytes_in_buffer += bytes_written;
+
+			/* Check if there is room to add another table entry */
+			bytes_remaining = buf_size - bytes_in_buffer;
+
+			if (bytes_remaining < bytes_written)
+				break;
+		}
+	}
+	diag_dbgfs_glinkinfo_index = i+1;
+	*ppos = 0;
+	ret = simple_read_from_buffer(ubuf, count, ppos, buf, bytes_in_buffer);
+
+	kfree(buf);
+	return ret;
+}
+
+static ssize_t diag_dbgfs_write_debug(struct file *fp, const char __user *buf,
+				      size_t count, loff_t *ppos)
+{
+	const int size = 10;
+	unsigned char cmd[size];
+	long value = 0;
+	int len = 0;
+
+	if (count < 1)
+		return -EINVAL;
+
+	len = (count < (size - 1)) ? count : size - 1;
+	if (copy_from_user(cmd, buf, len))
+		return -EFAULT;
+
+	cmd[len] = 0;
+	if (cmd[len-1] == '\n') {
+		cmd[len-1] = 0;
+		len--;
+	}
+
+	if (kstrtol(cmd, 10, &value))
+		return -EINVAL;
+
+	if (value < 0)
+		return -EINVAL;
+
+	diag_debug_mask = (uint16_t)value;
+	return count;
+}
+
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+#ifdef CONFIG_USB_QCOM_DIAG_BRIDGE
+static ssize_t diag_dbgfs_read_hsicinfo(struct file *file, char __user *ubuf,
+					size_t count, loff_t *ppos)
+{
+	char *buf = NULL;
+	int ret = 0;
+	int i = 0;
+	unsigned int buf_size;
+	unsigned int bytes_remaining = 0;
+	unsigned int bytes_written = 0;
+	unsigned int bytes_in_buffer = 0;
+	struct diag_hsic_info *hsic_info = NULL;
+
+	if (diag_dbgfs_hsicinfo_index >= NUM_DIAG_USB_DEV) {
+		/* Done. Reset to prepare for future requests */
+		diag_dbgfs_hsicinfo_index = 0;
+		return 0;
+	}
+
+	buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
+	if (ZERO_OR_NULL_PTR(buf)) {
+		pr_err("diag: %s, Error allocating memory\n", __func__);
+		return -ENOMEM;
+	}
+
+	buf_size = ksize(buf);
+	bytes_remaining = buf_size;
+	for (i = diag_dbgfs_hsicinfo_index; i < NUM_HSIC_DEV; i++) {
+		hsic_info = &diag_hsic[i];
+		if (!hsic_info->enabled)
+			continue;
+		bytes_written = scnprintf(buf+bytes_in_buffer, bytes_remaining,
+			"id: %d\n"
+			"name: %s\n"
+			"bridge index: %s\n"
+			"opened: %d\n"
+			"enabled: %d\n"
+			"suspended: %d\n"
+			"mempool: %s\n"
+			"read work pending: %d\n"
+			"open work pending: %d\n"
+			"close work pending: %d\n\n",
+			hsic_info->id,
+			hsic_info->name,
+			DIAG_BRIDGE_GET_NAME(hsic_info->dev_id),
+			hsic_info->opened,
+			hsic_info->enabled,
+			hsic_info->suspended,
+			DIAG_MEMPOOL_GET_NAME(hsic_info->mempool),
+			work_pending(&hsic_info->read_work),
+			work_pending(&hsic_info->open_work),
+			work_pending(&hsic_info->close_work));
+		bytes_in_buffer += bytes_written;
+
+		/* Check if there is room to add another table entry */
+		bytes_remaining = buf_size - bytes_in_buffer;
+
+		if (bytes_remaining < bytes_written)
+			break;
+	}
+	diag_dbgfs_hsicinfo_index = i+1;
+	*ppos = 0;
+	ret = simple_read_from_buffer(ubuf, count, ppos, buf, bytes_in_buffer);
+
+	kfree(buf);
+	return ret;
+}
+
+const struct file_operations diag_dbgfs_hsicinfo_ops = {
+	.read = diag_dbgfs_read_hsicinfo,
+};
+#endif
+#ifdef CONFIG_MSM_MHI
+static ssize_t diag_dbgfs_read_mhiinfo(struct file *file, char __user *ubuf,
+				       size_t count, loff_t *ppos)
+{
+	char *buf = NULL;
+	int ret = 0;
+	int i = 0;
+	unsigned int buf_size;
+	unsigned int bytes_remaining = 0;
+	unsigned int bytes_written = 0;
+	unsigned int bytes_in_buffer = 0;
+	struct diag_mhi_info *mhi_info = NULL;
+
+	if (diag_dbgfs_mhiinfo_index >= NUM_MHI_DEV) {
+		/* Done. Reset to prepare for future requests */
+		diag_dbgfs_mhiinfo_index = 0;
+		return 0;
+	}
+
+	buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
+	if (ZERO_OR_NULL_PTR(buf)) {
+		pr_err("diag: %s, Error allocating memory\n", __func__);
+		return -ENOMEM;
+	}
+
+	buf_size = ksize(buf);
+	bytes_remaining = buf_size;
+	for (i = diag_dbgfs_mhiinfo_index; i < NUM_MHI_DEV; i++) {
+		mhi_info = &diag_mhi[i];
+		bytes_written = scnprintf(buf+bytes_in_buffer, bytes_remaining,
+			"id: %d\n"
+			"name: %s\n"
+			"bridge index: %s\n"
+			"mempool: %s\n"
+			"read ch opened: %d\n"
+			"read ch hdl: %pK\n"
+			"write ch opened: %d\n"
+			"write ch hdl: %pK\n"
+			"read work pending: %d\n"
+			"read done work pending: %d\n"
+			"open work pending: %d\n"
+			"close work pending: %d\n\n",
+			mhi_info->id,
+			mhi_info->name,
+			DIAG_BRIDGE_GET_NAME(mhi_info->dev_id),
+			DIAG_MEMPOOL_GET_NAME(mhi_info->mempool),
+			atomic_read(&mhi_info->read_ch.opened),
+			mhi_info->read_ch.hdl,
+			atomic_read(&mhi_info->write_ch.opened),
+			mhi_info->write_ch.hdl,
+			work_pending(&mhi_info->read_work),
+			work_pending(&mhi_info->read_done_work),
+			work_pending(&mhi_info->open_work),
+			work_pending(&mhi_info->close_work));
+		bytes_in_buffer += bytes_written;
+
+		/* Check if there is room to add another table entry */
+		bytes_remaining = buf_size - bytes_in_buffer;
+
+		if (bytes_remaining < bytes_written)
+			break;
+	}
+	diag_dbgfs_mhiinfo_index = i+1;
+	*ppos = 0;
+	ret = simple_read_from_buffer(ubuf, count, ppos, buf, bytes_in_buffer);
+
+	kfree(buf);
+	return ret;
+}
+
+
+const struct file_operations diag_dbgfs_mhiinfo_ops = {
+	.read = diag_dbgfs_read_mhiinfo,
+};
+
+#endif
+static ssize_t diag_dbgfs_read_bridge(struct file *file, char __user *ubuf,
+				      size_t count, loff_t *ppos)
+{
+	char *buf = NULL;
+	int ret = 0;
+	int i = 0;
+	unsigned int buf_size;
+	unsigned int bytes_remaining = 0;
+	unsigned int bytes_written = 0;
+	unsigned int bytes_in_buffer = 0;
+	struct diagfwd_bridge_info *info = NULL;
+
+	if (diag_dbgfs_bridgeinfo_index >= NUM_DIAG_USB_DEV) {
+		/* Done. Reset to prepare for future requests */
+		diag_dbgfs_bridgeinfo_index = 0;
+		return 0;
+	}
+
+	buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
+	if (ZERO_OR_NULL_PTR(buf)) {
+		pr_err("diag: %s, Error allocating memory\n", __func__);
+		return -ENOMEM;
+	}
+
+	buf_size = ksize(buf);
+	bytes_remaining = buf_size;
+	for (i = diag_dbgfs_bridgeinfo_index; i < NUM_REMOTE_DEV; i++) {
+		info = &bridge_info[i];
+		if (!info->inited)
+			continue;
+		bytes_written = scnprintf(buf+bytes_in_buffer, bytes_remaining,
+			"id: %d\n"
+			"name: %s\n"
+			"type: %d\n"
+			"inited: %d\n"
+			"ctxt: %d\n"
+			"dev_ops: %pK\n"
+			"dci_read_buf: %pK\n"
+			"dci_read_ptr: %pK\n"
+			"dci_read_len: %d\n\n",
+			info->id,
+			info->name,
+			info->type,
+			info->inited,
+			info->ctxt,
+			info->dev_ops,
+			info->dci_read_buf,
+			info->dci_read_ptr,
+			info->dci_read_len);
+		bytes_in_buffer += bytes_written;
+
+		/* Check if there is room to add another table entry */
+		bytes_remaining = buf_size - bytes_in_buffer;
+
+		if (bytes_remaining < bytes_written)
+			break;
+	}
+	diag_dbgfs_bridgeinfo_index = i+1;
+	*ppos = 0;
+	ret = simple_read_from_buffer(ubuf, count, ppos, buf, bytes_in_buffer);
+
+	kfree(buf);
+	return ret;
+}
+
+const struct file_operations diag_dbgfs_bridge_ops = {
+	.read = diag_dbgfs_read_bridge,
+};
+
+#endif
+
+const struct file_operations diag_dbgfs_status_ops = {
+	.read = diag_dbgfs_read_status,
+};
+
+const struct file_operations diag_dbgfs_socketinfo_ops = {
+	.read = diag_dbgfs_read_socketinfo,
+};
+
+const struct file_operations diag_dbgfs_glinkinfo_ops = {
+	.read = diag_dbgfs_read_glinkinfo,
+};
+
+const struct file_operations diag_dbgfs_table_ops = {
+	.read = diag_dbgfs_read_table,
+};
+
+const struct file_operations diag_dbgfs_mempool_ops = {
+	.read = diag_dbgfs_read_mempool,
+};
+
+const struct file_operations diag_dbgfs_usbinfo_ops = {
+	.read = diag_dbgfs_read_usbinfo,
+};
+
+const struct file_operations diag_dbgfs_dcistats_ops = {
+	.read = diag_dbgfs_read_dcistats,
+};
+
+const struct file_operations diag_dbgfs_power_ops = {
+	.read = diag_dbgfs_read_power,
+};
+
+const struct file_operations diag_dbgfs_debug_ops = {
+	.write = diag_dbgfs_write_debug
+};
+
+int diag_debugfs_init(void)
+{
+	struct dentry *entry = NULL;
+
+	diag_dbgfs_dent = debugfs_create_dir("diag", 0);
+	if (IS_ERR(diag_dbgfs_dent))
+		return -ENOMEM;
+
+	entry = debugfs_create_file("status", 0444, diag_dbgfs_dent, 0,
+				    &diag_dbgfs_status_ops);
+	if (!entry)
+		goto err;
+
+	entry = debugfs_create_file("socketinfo", 0444, diag_dbgfs_dent, 0,
+				    &diag_dbgfs_socketinfo_ops);
+	if (!entry)
+		goto err;
+
+	entry = debugfs_create_file("glinkinfo", 0444, diag_dbgfs_dent, 0,
+				    &diag_dbgfs_glinkinfo_ops);
+	if (!entry)
+		goto err;
+
+	entry = debugfs_create_file("table", 0444, diag_dbgfs_dent, 0,
+				    &diag_dbgfs_table_ops);
+	if (!entry)
+		goto err;
+
+	entry = debugfs_create_file("mempool", 0444, diag_dbgfs_dent, 0,
+				    &diag_dbgfs_mempool_ops);
+	if (!entry)
+		goto err;
+
+	entry = debugfs_create_file("usbinfo", 0444, diag_dbgfs_dent, 0,
+				    &diag_dbgfs_usbinfo_ops);
+	if (!entry)
+		goto err;
+
+	entry = debugfs_create_file("dci_stats", 0444, diag_dbgfs_dent, 0,
+				    &diag_dbgfs_dcistats_ops);
+	if (!entry)
+		goto err;
+
+	entry = debugfs_create_file("power", 0444, diag_dbgfs_dent, 0,
+				    &diag_dbgfs_power_ops);
+	if (!entry)
+		goto err;
+
+	entry = debugfs_create_file("debug", 0444, diag_dbgfs_dent, 0,
+				    &diag_dbgfs_debug_ops);
+	if (!entry)
+		goto err;
+
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+	entry = debugfs_create_file("bridge", 0444, diag_dbgfs_dent, 0,
+				    &diag_dbgfs_bridge_ops);
+	if (!entry)
+		goto err;
+#ifdef CONFIG_USB_QCOM_DIAG_BRIDGE
+	entry = debugfs_create_file("hsicinfo", 0444, diag_dbgfs_dent, 0,
+				    &diag_dbgfs_hsicinfo_ops);
+	if (!entry)
+		goto err;
+#endif
+#ifdef CONFIG_MSM_MHI
+	entry = debugfs_create_file("mhiinfo", 0444, diag_dbgfs_dent, 0,
+				    &diag_dbgfs_mhiinfo_ops);
+	if (!entry)
+		goto err;
+#endif
+#endif
+	diag_dbgfs_table_index = 0;
+	diag_dbgfs_mempool_index = 0;
+	diag_dbgfs_usbinfo_index = 0;
+	diag_dbgfs_socketinfo_index = 0;
+	diag_dbgfs_hsicinfo_index = 0;
+	diag_dbgfs_bridgeinfo_index = 0;
+	diag_dbgfs_mhiinfo_index = 0;
+	diag_dbgfs_finished = 0;
+	diag_dbgfs_dci_data_index = 0;
+	diag_dbgfs_dci_finished = 0;
+
+	/* DCI related structures */
+	dci_traffic = kzalloc(sizeof(struct diag_dci_data_info) *
+				DIAG_DCI_DEBUG_CNT, GFP_KERNEL);
+	if (ZERO_OR_NULL_PTR(dci_traffic))
+		pr_warn("diag: could not allocate memory for dci debug info\n");
+
+	mutex_init(&dci_stat_mutex);
+	return 0;
+err:
+	kfree(dci_traffic);
+	debugfs_remove_recursive(diag_dbgfs_dent);
+	return -ENOMEM;
+}
+
+void diag_debugfs_cleanup(void)
+{
+	debugfs_remove_recursive(diag_dbgfs_dent);
+	diag_dbgfs_dent = NULL;
+	kfree(dci_traffic);
+	mutex_destroy(&dci_stat_mutex);
+}
+#else
+int diag_debugfs_init(void) { return 0; }
+void diag_debugfs_cleanup(void) { }
+#endif
diff --git a/drivers/char/diag/diag_debugfs.h b/drivers/char/diag/diag_debugfs.h
new file mode 100644
index 0000000..e8db56e
--- /dev/null
+++ b/drivers/char/diag/diag_debugfs.h
@@ -0,0 +1,19 @@
+/* Copyright (c) 2012, 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAG_DEBUGFS_H
+#define DIAG_DEBUGFS_H
+
+int diag_debugfs_init(void);
+void diag_debugfs_cleanup(void);
+
+#endif
diff --git a/drivers/char/diag/diag_ipc_logging.h b/drivers/char/diag/diag_ipc_logging.h
new file mode 100644
index 0000000..b9958a4
--- /dev/null
+++ b/drivers/char/diag/diag_ipc_logging.h
@@ -0,0 +1,45 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAGIPCLOG_H
+#define DIAGIPCLOG_H
+
+#include <linux/ipc_logging.h>
+
+#define DIAG_IPC_LOG_PAGES	50
+
+#define DIAG_DEBUG_USERSPACE	0x0001
+#define DIAG_DEBUG_MUX		0x0002
+#define DIAG_DEBUG_DCI		0x0004
+#define DIAG_DEBUG_PERIPHERALS	0x0008
+#define DIAG_DEBUG_MASKS	0x0010
+#define DIAG_DEBUG_POWER	0x0020
+#define DIAG_DEBUG_BRIDGE	0x0040
+
+#define DIAG_DEBUG
+
+#ifdef DIAG_DEBUG
+extern uint16_t diag_debug_mask;
+extern void *diag_ipc_log;
+
+#define DIAG_LOG(log_lvl, msg, ...)					\
+	do {								\
+		if (diag_ipc_log && (log_lvl & diag_debug_mask)) {	\
+			ipc_log_string(diag_ipc_log,			\
+				"[%s] " msg, __func__, ##__VA_ARGS__);	\
+		}							\
+	} while (0)
+#else
+#define DIAG_LOG(log_lvl, msg, ...)
+#endif
+
+#endif
diff --git a/drivers/char/diag/diag_masks.c b/drivers/char/diag/diag_masks.c
new file mode 100644
index 0000000..b831d9e
--- /dev/null
+++ b/drivers/char/diag/diag_masks.c
@@ -0,0 +1,2013 @@
+/* Copyright (c) 2008-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/diagchar.h>
+#include <linux/kmemleak.h>
+#include <linux/workqueue.h>
+#include <linux/uaccess.h>
+#include "diagchar.h"
+#include "diagfwd_cntl.h"
+#include "diag_masks.h"
+#include "diagfwd_peripheral.h"
+#include "diag_ipc_logging.h"
+
+#define ALL_EQUIP_ID		100
+#define ALL_SSID		-1
+
+#define DIAG_SET_FEATURE_MASK(x) (feature_bytes[(x)/8] |= (1 << (x & 0x7)))
+
+#define diag_check_update(x)	\
+	(!info || (info && (info->peripheral_mask & MD_PERIPHERAL_MASK(x)))) \
+
+struct diag_mask_info msg_mask;
+struct diag_mask_info msg_bt_mask;
+struct diag_mask_info log_mask;
+struct diag_mask_info event_mask;
+
+static const struct diag_ssid_range_t msg_mask_tbl[] = {
+	{ .ssid_first = MSG_SSID_0, .ssid_last = MSG_SSID_0_LAST },
+	{ .ssid_first = MSG_SSID_1, .ssid_last = MSG_SSID_1_LAST },
+	{ .ssid_first = MSG_SSID_2, .ssid_last = MSG_SSID_2_LAST },
+	{ .ssid_first = MSG_SSID_3, .ssid_last = MSG_SSID_3_LAST },
+	{ .ssid_first = MSG_SSID_4, .ssid_last = MSG_SSID_4_LAST },
+	{ .ssid_first = MSG_SSID_5, .ssid_last = MSG_SSID_5_LAST },
+	{ .ssid_first = MSG_SSID_6, .ssid_last = MSG_SSID_6_LAST },
+	{ .ssid_first = MSG_SSID_7, .ssid_last = MSG_SSID_7_LAST },
+	{ .ssid_first = MSG_SSID_8, .ssid_last = MSG_SSID_8_LAST },
+	{ .ssid_first = MSG_SSID_9, .ssid_last = MSG_SSID_9_LAST },
+	{ .ssid_first = MSG_SSID_10, .ssid_last = MSG_SSID_10_LAST },
+	{ .ssid_first = MSG_SSID_11, .ssid_last = MSG_SSID_11_LAST },
+	{ .ssid_first = MSG_SSID_12, .ssid_last = MSG_SSID_12_LAST },
+	{ .ssid_first = MSG_SSID_13, .ssid_last = MSG_SSID_13_LAST },
+	{ .ssid_first = MSG_SSID_14, .ssid_last = MSG_SSID_14_LAST },
+	{ .ssid_first = MSG_SSID_15, .ssid_last = MSG_SSID_15_LAST },
+	{ .ssid_first = MSG_SSID_16, .ssid_last = MSG_SSID_16_LAST },
+	{ .ssid_first = MSG_SSID_17, .ssid_last = MSG_SSID_17_LAST },
+	{ .ssid_first = MSG_SSID_18, .ssid_last = MSG_SSID_18_LAST },
+	{ .ssid_first = MSG_SSID_19, .ssid_last = MSG_SSID_19_LAST },
+	{ .ssid_first = MSG_SSID_20, .ssid_last = MSG_SSID_20_LAST },
+	{ .ssid_first = MSG_SSID_21, .ssid_last = MSG_SSID_21_LAST },
+	{ .ssid_first = MSG_SSID_22, .ssid_last = MSG_SSID_22_LAST },
+	{ .ssid_first = MSG_SSID_23, .ssid_last = MSG_SSID_23_LAST },
+	{ .ssid_first = MSG_SSID_24, .ssid_last = MSG_SSID_24_LAST }
+};
+
+static int diag_apps_responds(void)
+{
+	/*
+	 * Apps processor should respond to mask commands only if the
+	 * Modem channel is up, the feature mask is received from Modem
+	 * and if Modem supports Mask Centralization.
+	 */
+	if (!chk_apps_only())
+		return 0;
+
+	if (driver->diagfwd_cntl[PERIPHERAL_MODEM] &&
+	    driver->diagfwd_cntl[PERIPHERAL_MODEM]->ch_open &&
+	    driver->feature[PERIPHERAL_MODEM].rcvd_feature_mask) {
+		if (driver->feature[PERIPHERAL_MODEM].mask_centralization)
+			return 1;
+		return 0;
+	}
+	return 1;
+}
+
+static void diag_send_log_mask_update(uint8_t peripheral, int equip_id)
+{
+	int i;
+	int err = 0;
+	int send_once = 0;
+	int header_len = sizeof(struct diag_ctrl_log_mask);
+	uint8_t *buf = NULL;
+	uint8_t *temp = NULL;
+	uint32_t mask_size = 0;
+	struct diag_ctrl_log_mask ctrl_pkt;
+	struct diag_mask_info *mask_info = NULL;
+	struct diag_log_mask_t *mask = NULL;
+
+	if (peripheral >= NUM_PERIPHERALS)
+		return;
+
+	if (!driver->diagfwd_cntl[peripheral] ||
+	    !driver->diagfwd_cntl[peripheral]->ch_open) {
+		pr_debug("diag: In %s, control channel is not open, p: %d\n",
+			 __func__, peripheral);
+		return;
+	}
+
+	if (driver->md_session_mask != 0 &&
+	    driver->md_session_mask & MD_PERIPHERAL_MASK(peripheral))
+		mask_info = driver->md_session_map[peripheral]->log_mask;
+	else
+		mask_info = &log_mask;
+
+	if (!mask_info)
+		return;
+
+	mask = (struct diag_log_mask_t *)mask_info->ptr;
+	buf = mask_info->update_buf;
+
+	switch (mask_info->status) {
+	case DIAG_CTRL_MASK_ALL_DISABLED:
+		ctrl_pkt.equip_id = 0;
+		ctrl_pkt.num_items = 0;
+		ctrl_pkt.log_mask_size = 0;
+		send_once = 1;
+		break;
+	case DIAG_CTRL_MASK_ALL_ENABLED:
+		ctrl_pkt.equip_id = 0;
+		ctrl_pkt.num_items = 0;
+		ctrl_pkt.log_mask_size = 0;
+		send_once = 1;
+		break;
+	case DIAG_CTRL_MASK_VALID:
+		send_once = 0;
+		break;
+	default:
+		pr_debug("diag: In %s, invalid log_mask status\n", __func__);
+		return;
+	}
+
+	mutex_lock(&mask_info->lock);
+	for (i = 0; i < MAX_EQUIP_ID; i++, mask++) {
+		if (equip_id != i && equip_id != ALL_EQUIP_ID)
+			continue;
+
+		mutex_lock(&mask->lock);
+		ctrl_pkt.cmd_type = DIAG_CTRL_MSG_LOG_MASK;
+		ctrl_pkt.stream_id = 1;
+		ctrl_pkt.status = mask_info->status;
+		if (mask_info->status == DIAG_CTRL_MASK_VALID) {
+			mask_size = LOG_ITEMS_TO_SIZE(mask->num_items_tools);
+			ctrl_pkt.equip_id = i;
+			ctrl_pkt.num_items = mask->num_items_tools;
+			ctrl_pkt.log_mask_size = mask_size;
+		}
+		ctrl_pkt.data_len = LOG_MASK_CTRL_HEADER_LEN + mask_size;
+
+		if (header_len + mask_size > mask_info->update_buf_len) {
+			temp = krealloc(buf, header_len + mask_size,
+					GFP_KERNEL);
+			if (!temp) {
+				pr_err_ratelimited("diag: Unable to realloc log update buffer, new size: %d, equip_id: %d\n",
+				       header_len + mask_size, equip_id);
+				mutex_unlock(&mask->lock);
+				break;
+			}
+			mask_info->update_buf = temp;
+			mask_info->update_buf_len = header_len + mask_size;
+		}
+
+		memcpy(buf, &ctrl_pkt, header_len);
+		if (mask_size > 0)
+			memcpy(buf + header_len, mask->ptr, mask_size);
+		mutex_unlock(&mask->lock);
+
+		DIAG_LOG(DIAG_DEBUG_MASKS,
+			 "sending ctrl pkt to %d, e %d num_items %d size %d\n",
+			 peripheral, i, ctrl_pkt.num_items,
+			 ctrl_pkt.log_mask_size);
+
+		err = diagfwd_write(peripheral, TYPE_CNTL,
+				    buf, header_len + mask_size);
+		if (err && err != -ENODEV)
+			pr_err_ratelimited("diag: Unable to send log masks to peripheral %d, equip_id: %d, err: %d\n",
+			       peripheral, i, err);
+		if (send_once || equip_id != ALL_EQUIP_ID)
+			break;
+
+	}
+	mutex_unlock(&mask_info->lock);
+}
+
+static void diag_send_event_mask_update(uint8_t peripheral)
+{
+	uint8_t *buf = NULL;
+	uint8_t *temp = NULL;
+	struct diag_ctrl_event_mask header;
+	struct diag_mask_info *mask_info = NULL;
+	int num_bytes = EVENT_COUNT_TO_BYTES(driver->last_event_id);
+	int write_len = 0;
+	int err = 0;
+	int temp_len = 0;
+
+	if (num_bytes <= 0 || num_bytes > driver->event_mask_size) {
+		pr_debug("diag: In %s, invalid event mask length %d\n",
+			 __func__, num_bytes);
+		return;
+	}
+
+	if (peripheral >= NUM_PERIPHERALS)
+		return;
+
+	if (!driver->diagfwd_cntl[peripheral] ||
+	    !driver->diagfwd_cntl[peripheral]->ch_open) {
+		pr_debug("diag: In %s, control channel is not open, p: %d\n",
+			 __func__, peripheral);
+		return;
+	}
+
+	if (driver->md_session_mask != 0 &&
+	    (driver->md_session_mask & MD_PERIPHERAL_MASK(peripheral)))
+		mask_info = driver->md_session_map[peripheral]->event_mask;
+	else
+		mask_info = &event_mask;
+
+	if (!mask_info)
+		return;
+
+	buf = mask_info->update_buf;
+	mutex_lock(&mask_info->lock);
+	header.cmd_type = DIAG_CTRL_MSG_EVENT_MASK;
+	header.stream_id = 1;
+	header.status = mask_info->status;
+
+	switch (mask_info->status) {
+	case DIAG_CTRL_MASK_ALL_DISABLED:
+		header.event_config = 0;
+		header.event_mask_size = 0;
+		break;
+	case DIAG_CTRL_MASK_ALL_ENABLED:
+		header.event_config = 1;
+		header.event_mask_size = 0;
+		break;
+	case DIAG_CTRL_MASK_VALID:
+		header.event_config = 1;
+		header.event_mask_size = num_bytes;
+		if (num_bytes + sizeof(header) > mask_info->update_buf_len) {
+			temp_len = num_bytes + sizeof(header);
+			temp = krealloc(buf, temp_len, GFP_KERNEL);
+			if (!temp) {
+				pr_err("diag: Unable to realloc event mask update buffer\n");
+				goto err;
+			} else {
+				mask_info->update_buf = temp;
+				mask_info->update_buf_len = temp_len;
+			}
+		}
+		memcpy(buf + sizeof(header), mask_info->ptr, num_bytes);
+		write_len += num_bytes;
+		break;
+	default:
+		pr_debug("diag: In %s, invalid status %d\n", __func__,
+			 mask_info->status);
+		goto err;
+	}
+	header.data_len = EVENT_MASK_CTRL_HEADER_LEN + header.event_mask_size;
+	memcpy(buf, &header, sizeof(header));
+	write_len += sizeof(header);
+
+	err = diagfwd_write(peripheral, TYPE_CNTL, buf, write_len);
+	if (err && err != -ENODEV)
+		pr_err_ratelimited("diag: Unable to send event masks to peripheral %d\n",
+		       peripheral);
+err:
+	mutex_unlock(&mask_info->lock);
+}
+
+static void diag_send_msg_mask_update(uint8_t peripheral, int first, int last)
+{
+	int i;
+	int err = 0;
+	int header_len = sizeof(struct diag_ctrl_msg_mask);
+	int temp_len = 0;
+	uint8_t *buf = NULL;
+	uint8_t *temp = NULL;
+	uint32_t mask_size = 0;
+	struct diag_mask_info *mask_info = NULL;
+	struct diag_msg_mask_t *mask = NULL;
+	struct diag_ctrl_msg_mask header;
+
+	if (peripheral >= NUM_PERIPHERALS)
+		return;
+
+	if (!driver->diagfwd_cntl[peripheral] ||
+	    !driver->diagfwd_cntl[peripheral]->ch_open) {
+		pr_debug("diag: In %s, control channel is not open, p: %d\n",
+			 __func__, peripheral);
+		return;
+	}
+
+	if (driver->md_session_mask != 0 &&
+	    (driver->md_session_mask & MD_PERIPHERAL_MASK(peripheral)))
+		mask_info = driver->md_session_map[peripheral]->msg_mask;
+	else
+		mask_info = &msg_mask;
+
+	if (!mask_info)
+		return;
+
+	mask = (struct diag_msg_mask_t *)mask_info->ptr;
+	buf = mask_info->update_buf;
+	mutex_lock(&mask_info->lock);
+	switch (mask_info->status) {
+	case DIAG_CTRL_MASK_ALL_DISABLED:
+		mask_size = 0;
+		break;
+	case DIAG_CTRL_MASK_ALL_ENABLED:
+		mask_size = 1;
+		break;
+	case DIAG_CTRL_MASK_VALID:
+		break;
+	default:
+		pr_debug("diag: In %s, invalid status: %d\n", __func__,
+			 mask_info->status);
+		goto err;
+	}
+
+	for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
+		if (((first < mask->ssid_first) ||
+		     (last > mask->ssid_last_tools)) && first != ALL_SSID) {
+			continue;
+		}
+
+		mutex_lock(&mask->lock);
+		if (mask_info->status == DIAG_CTRL_MASK_VALID) {
+			mask_size =
+				mask->ssid_last_tools - mask->ssid_first + 1;
+			temp_len = mask_size * sizeof(uint32_t);
+			if (temp_len + header_len <= mask_info->update_buf_len)
+				goto proceed;
+			temp = krealloc(mask_info->update_buf, temp_len,
+					GFP_KERNEL);
+			if (!temp) {
+				pr_err("diag: In %s, unable to realloc msg_mask update buffer\n",
+				       __func__);
+				mask_size = (mask_info->update_buf_len -
+					    header_len) / sizeof(uint32_t);
+			} else {
+				mask_info->update_buf = temp;
+				mask_info->update_buf_len = temp_len;
+				pr_debug("diag: In %s, successfully reallocated msg_mask update buffer to len: %d\n",
+					 __func__, mask_info->update_buf_len);
+			}
+		} else if (mask_info->status == DIAG_CTRL_MASK_ALL_ENABLED) {
+			mask_size = 1;
+		}
+proceed:
+		header.cmd_type = DIAG_CTRL_MSG_F3_MASK;
+		header.status = mask_info->status;
+		header.stream_id = 1;
+		header.msg_mode = 0;
+		header.ssid_first = mask->ssid_first;
+		header.ssid_last = mask->ssid_last_tools;
+		header.msg_mask_size = mask_size;
+		mask_size *= sizeof(uint32_t);
+		header.data_len = MSG_MASK_CTRL_HEADER_LEN + mask_size;
+		memcpy(buf, &header, header_len);
+		if (mask_size > 0)
+			memcpy(buf + header_len, mask->ptr, mask_size);
+		mutex_unlock(&mask->lock);
+
+		err = diagfwd_write(peripheral, TYPE_CNTL, buf,
+				    header_len + mask_size);
+		if (err && err != -ENODEV)
+			pr_err_ratelimited("diag: Unable to send msg masks to peripheral %d\n",
+			       peripheral);
+
+		if (first != ALL_SSID)
+			break;
+	}
+err:
+	mutex_unlock(&mask_info->lock);
+}
+
+static void diag_send_time_sync_update(uint8_t peripheral)
+{
+	struct diag_ctrl_msg_time_sync time_sync_msg;
+	int msg_size = sizeof(struct diag_ctrl_msg_time_sync);
+	int err = 0;
+
+	if (peripheral >= NUM_PERIPHERALS) {
+		pr_err("diag: In %s, Invalid peripheral, %d\n",
+				__func__, peripheral);
+		return;
+	}
+
+	if (!driver->diagfwd_cntl[peripheral] ||
+		!driver->diagfwd_cntl[peripheral]->ch_open) {
+		pr_err("diag: In %s, control channel is not open, p: %d, %pK\n",
+			__func__, peripheral, driver->diagfwd_cntl[peripheral]);
+		return;
+	}
+
+	mutex_lock(&driver->diag_cntl_mutex);
+	time_sync_msg.ctrl_pkt_id = DIAG_CTRL_MSG_TIME_SYNC_PKT;
+	time_sync_msg.ctrl_pkt_data_len = 5;
+	time_sync_msg.version = 1;
+	time_sync_msg.time_api = driver->uses_time_api;
+
+	err = diagfwd_write(peripheral, TYPE_CNTL, &time_sync_msg, msg_size);
+	if (err)
+		pr_err("diag: In %s, unable to write to peripheral: %d, type: %d, len: %d, err: %d\n",
+				__func__, peripheral, TYPE_CNTL,
+				msg_size, err);
+	mutex_unlock(&driver->diag_cntl_mutex);
+}
+
+static void diag_send_feature_mask_update(uint8_t peripheral)
+{
+	void *buf = driver->buf_feature_mask_update;
+	int header_size = sizeof(struct diag_ctrl_feature_mask);
+	uint8_t feature_bytes[FEATURE_MASK_LEN] = {0, 0};
+	struct diag_ctrl_feature_mask feature_mask;
+	int total_len = 0;
+	int err = 0;
+
+	if (peripheral >= NUM_PERIPHERALS) {
+		pr_err("diag: In %s, Invalid peripheral, %d\n",
+			__func__, peripheral);
+		return;
+	}
+
+	if (!driver->diagfwd_cntl[peripheral] ||
+	    !driver->diagfwd_cntl[peripheral]->ch_open) {
+		pr_err("diag: In %s, control channel is not open, p: %d, %pK\n",
+		       __func__, peripheral, driver->diagfwd_cntl[peripheral]);
+		return;
+	}
+
+	mutex_lock(&driver->diag_cntl_mutex);
+	/* send feature mask update */
+	feature_mask.ctrl_pkt_id = DIAG_CTRL_MSG_FEATURE;
+	feature_mask.ctrl_pkt_data_len = sizeof(uint32_t) + FEATURE_MASK_LEN;
+	feature_mask.feature_mask_len = FEATURE_MASK_LEN;
+	memcpy(buf, &feature_mask, header_size);
+	DIAG_SET_FEATURE_MASK(F_DIAG_FEATURE_MASK_SUPPORT);
+	DIAG_SET_FEATURE_MASK(F_DIAG_LOG_ON_DEMAND_APPS);
+	DIAG_SET_FEATURE_MASK(F_DIAG_STM);
+	DIAG_SET_FEATURE_MASK(F_DIAG_DCI_EXTENDED_HEADER_SUPPORT);
+	if (driver->supports_separate_cmdrsp)
+		DIAG_SET_FEATURE_MASK(F_DIAG_REQ_RSP_SUPPORT);
+	if (driver->supports_apps_hdlc_encoding)
+		DIAG_SET_FEATURE_MASK(F_DIAG_APPS_HDLC_ENCODE);
+	DIAG_SET_FEATURE_MASK(F_DIAG_MASK_CENTRALIZATION);
+	if (driver->supports_sockets)
+		DIAG_SET_FEATURE_MASK(F_DIAG_SOCKETS_ENABLED);
+
+	memcpy(buf + header_size, &feature_bytes, FEATURE_MASK_LEN);
+	total_len = header_size + FEATURE_MASK_LEN;
+
+	err = diagfwd_write(peripheral, TYPE_CNTL, buf, total_len);
+	if (err) {
+		pr_err_ratelimited("diag: In %s, unable to write feature mask to peripheral: %d, type: %d, len: %d, err: %d\n",
+		       __func__, peripheral, TYPE_CNTL,
+		       total_len, err);
+		mutex_unlock(&driver->diag_cntl_mutex);
+		return;
+	}
+	driver->feature[peripheral].sent_feature_mask = 1;
+	mutex_unlock(&driver->diag_cntl_mutex);
+}
+
+static int diag_cmd_get_ssid_range(unsigned char *src_buf, int src_len,
+				   unsigned char *dest_buf, int dest_len,
+				   struct diag_md_session_t *info)
+{
+	int i;
+	int write_len = 0;
+	struct diag_msg_mask_t *mask_ptr = NULL;
+	struct diag_msg_ssid_query_t rsp;
+	struct diag_ssid_range_t ssid_range;
+	struct diag_mask_info *mask_info = NULL;
+
+	mask_info = (!info) ? &msg_mask : info->msg_mask;
+	if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 ||
+	    !mask_info) {
+		pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n",
+		       __func__, src_buf, src_len, dest_buf, dest_len,
+		       mask_info);
+		return -EINVAL;
+	}
+
+	if (!diag_apps_responds())
+		return 0;
+
+	rsp.cmd_code = DIAG_CMD_MSG_CONFIG;
+	rsp.sub_cmd = DIAG_CMD_OP_GET_SSID_RANGE;
+	rsp.status = MSG_STATUS_SUCCESS;
+	rsp.padding = 0;
+	rsp.count = driver->msg_mask_tbl_count;
+	memcpy(dest_buf, &rsp, sizeof(rsp));
+	write_len += sizeof(rsp);
+
+	mask_ptr = (struct diag_msg_mask_t *)mask_info->ptr;
+	for (i = 0; i <  driver->msg_mask_tbl_count; i++, mask_ptr++) {
+		if (write_len + sizeof(ssid_range) > dest_len) {
+			pr_err("diag: In %s, Truncating response due to size limitations of rsp buffer\n",
+			       __func__);
+			break;
+		}
+		ssid_range.ssid_first = mask_ptr->ssid_first;
+		ssid_range.ssid_last = mask_ptr->ssid_last_tools;
+		memcpy(dest_buf + write_len, &ssid_range, sizeof(ssid_range));
+		write_len += sizeof(ssid_range);
+	}
+
+	return write_len;
+}
+
+static int diag_cmd_get_build_mask(unsigned char *src_buf, int src_len,
+				   unsigned char *dest_buf, int dest_len,
+				   struct diag_md_session_t *info)
+{
+	int i = 0;
+	int write_len = 0;
+	int num_entries = 0;
+	int copy_len = 0;
+	struct diag_msg_mask_t *build_mask = NULL;
+	struct diag_build_mask_req_t *req = NULL;
+	struct diag_msg_build_mask_t rsp;
+
+	if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0) {
+		pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d\n",
+		       __func__, src_buf, src_len, dest_buf, dest_len);
+		return -EINVAL;
+	}
+
+	if (!diag_apps_responds())
+		return 0;
+
+	req = (struct diag_build_mask_req_t *)src_buf;
+	rsp.cmd_code = DIAG_CMD_MSG_CONFIG;
+	rsp.sub_cmd = DIAG_CMD_OP_GET_BUILD_MASK;
+	rsp.ssid_first = req->ssid_first;
+	rsp.ssid_last = req->ssid_last;
+	rsp.status = MSG_STATUS_FAIL;
+	rsp.padding = 0;
+
+	build_mask = (struct diag_msg_mask_t *)msg_bt_mask.ptr;
+	for (i = 0; i < driver->msg_mask_tbl_count; i++, build_mask++) {
+		if (build_mask->ssid_first != req->ssid_first)
+			continue;
+		num_entries = req->ssid_last - req->ssid_first + 1;
+		if (num_entries > build_mask->range) {
+			pr_warn("diag: In %s, truncating ssid range for ssid_first: %d ssid_last %d\n",
+				__func__, req->ssid_first, req->ssid_last);
+			num_entries = build_mask->range;
+			req->ssid_last = req->ssid_first + build_mask->range;
+		}
+		copy_len = num_entries * sizeof(uint32_t);
+		if (copy_len + sizeof(rsp) > dest_len)
+			copy_len = dest_len - sizeof(rsp);
+		memcpy(dest_buf + sizeof(rsp), build_mask->ptr, copy_len);
+		write_len += copy_len;
+		rsp.ssid_last = build_mask->ssid_last;
+		rsp.status = MSG_STATUS_SUCCESS;
+		break;
+	}
+	memcpy(dest_buf, &rsp, sizeof(rsp));
+	write_len += sizeof(rsp);
+
+	return write_len;
+}
+
+static int diag_cmd_get_msg_mask(unsigned char *src_buf, int src_len,
+				 unsigned char *dest_buf, int dest_len,
+				 struct diag_md_session_t *info)
+{
+	int i;
+	int write_len = 0;
+	uint32_t mask_size = 0;
+	struct diag_msg_mask_t *mask = NULL;
+	struct diag_build_mask_req_t *req = NULL;
+	struct diag_msg_build_mask_t rsp;
+	struct diag_mask_info *mask_info = NULL;
+
+	mask_info = (!info) ? &msg_mask : info->msg_mask;
+	if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 ||
+	    !mask_info) {
+		pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n",
+		       __func__, src_buf, src_len, dest_buf, dest_len,
+		       mask_info);
+		return -EINVAL;
+	}
+
+	if (!diag_apps_responds())
+		return 0;
+
+	req = (struct diag_build_mask_req_t *)src_buf;
+	rsp.cmd_code = DIAG_CMD_MSG_CONFIG;
+	rsp.sub_cmd = DIAG_CMD_OP_GET_MSG_MASK;
+	rsp.ssid_first = req->ssid_first;
+	rsp.ssid_last = req->ssid_last;
+	rsp.status = MSG_STATUS_FAIL;
+	rsp.padding = 0;
+
+	mask = (struct diag_msg_mask_t *)mask_info->ptr;
+	for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
+		if ((req->ssid_first < mask->ssid_first) ||
+		    (req->ssid_first > mask->ssid_last_tools)) {
+			continue;
+		}
+		mask_size = mask->range * sizeof(uint32_t);
+		/* Copy msg mask only till the end of the rsp buffer */
+		if (mask_size + sizeof(rsp) > dest_len)
+			mask_size = dest_len - sizeof(rsp);
+		memcpy(dest_buf + sizeof(rsp), mask->ptr, mask_size);
+		write_len += mask_size;
+		rsp.status = MSG_STATUS_SUCCESS;
+		break;
+	}
+	memcpy(dest_buf, &rsp, sizeof(rsp));
+	write_len += sizeof(rsp);
+
+	return write_len;
+}
+
+static int diag_cmd_set_msg_mask(unsigned char *src_buf, int src_len,
+				 unsigned char *dest_buf, int dest_len,
+				 struct diag_md_session_t *info)
+{
+	int i;
+	int write_len = 0;
+	int header_len = sizeof(struct diag_msg_build_mask_t);
+	int found = 0;
+	uint32_t mask_size = 0;
+	uint32_t offset = 0;
+	struct diag_msg_mask_t *mask = NULL;
+	struct diag_msg_build_mask_t *req = NULL;
+	struct diag_msg_build_mask_t rsp;
+	struct diag_mask_info *mask_info = NULL;
+	struct diag_msg_mask_t *mask_next = NULL;
+	uint32_t *temp = NULL;
+
+	mask_info = (!info) ? &msg_mask : info->msg_mask;
+	if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 ||
+	    !mask_info) {
+		pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n",
+		       __func__, src_buf, src_len, dest_buf, dest_len,
+		       mask_info);
+		return -EINVAL;
+	}
+
+	req = (struct diag_msg_build_mask_t *)src_buf;
+
+	mutex_lock(&mask_info->lock);
+	mask = (struct diag_msg_mask_t *)mask_info->ptr;
+	for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
+		if (i < (driver->msg_mask_tbl_count - 1)) {
+			mask_next = mask;
+			mask_next++;
+		} else
+			mask_next = NULL;
+
+		if ((req->ssid_first < mask->ssid_first) ||
+		    (req->ssid_first > mask->ssid_first + MAX_SSID_PER_RANGE) ||
+		    (mask_next && (req->ssid_first >= mask_next->ssid_first))) {
+			continue;
+		}
+		mask_next = NULL;
+		found = 1;
+		mutex_lock(&mask->lock);
+		mask_size = req->ssid_last - req->ssid_first + 1;
+		if (mask_size > MAX_SSID_PER_RANGE) {
+			pr_warn("diag: In %s, truncating ssid range, %d-%d to max allowed: %d\n",
+				__func__, mask->ssid_first, mask->ssid_last,
+				MAX_SSID_PER_RANGE);
+			mask_size = MAX_SSID_PER_RANGE;
+			mask->range_tools = MAX_SSID_PER_RANGE;
+			mask->ssid_last_tools =
+				mask->ssid_first + mask->range_tools;
+		}
+		if (req->ssid_last > mask->ssid_last_tools) {
+			pr_debug("diag: Msg SSID range mismatch\n");
+			if (mask_size != MAX_SSID_PER_RANGE)
+				mask->ssid_last_tools = req->ssid_last;
+			mask->range_tools =
+				mask->ssid_last_tools - mask->ssid_first + 1;
+			temp = krealloc(mask->ptr,
+					mask->range_tools * sizeof(uint32_t),
+					GFP_KERNEL);
+			if (!temp) {
+				pr_err_ratelimited("diag: In %s, unable to allocate memory for msg mask ptr, mask_size: %d\n",
+						   __func__, mask_size);
+				mutex_unlock(&mask->lock);
+				return -ENOMEM;
+			}
+			mask->ptr = temp;
+		}
+
+		offset = req->ssid_first - mask->ssid_first;
+		if (offset + mask_size > mask->range_tools) {
+			pr_err("diag: In %s, Not in msg mask range, mask_size: %d, offset: %d\n",
+			       __func__, mask_size, offset);
+			mutex_unlock(&mask->lock);
+			break;
+		}
+		mask_size = mask_size * sizeof(uint32_t);
+		memcpy(mask->ptr + offset, src_buf + header_len, mask_size);
+		mutex_unlock(&mask->lock);
+		mask_info->status = DIAG_CTRL_MASK_VALID;
+		break;
+	}
+	mutex_unlock(&mask_info->lock);
+
+	if (diag_check_update(APPS_DATA))
+		diag_update_userspace_clients(MSG_MASKS_TYPE);
+
+	/*
+	 * Apps processor must send the response to this command. Frame the
+	 * response.
+	 */
+	rsp.cmd_code = DIAG_CMD_MSG_CONFIG;
+	rsp.sub_cmd = DIAG_CMD_OP_SET_MSG_MASK;
+	rsp.ssid_first = req->ssid_first;
+	rsp.ssid_last = req->ssid_last;
+	rsp.status = found;
+	rsp.padding = 0;
+	memcpy(dest_buf, &rsp, header_len);
+	write_len += header_len;
+	if (!found)
+		goto end;
+	if (mask_size + write_len > dest_len)
+		mask_size = dest_len - write_len;
+	memcpy(dest_buf + write_len, src_buf + header_len, mask_size);
+	write_len += mask_size;
+	for (i = 0; i < NUM_PERIPHERALS; i++) {
+		if (!diag_check_update(i))
+			continue;
+		diag_send_msg_mask_update(i, req->ssid_first, req->ssid_last);
+	}
+end:
+	return write_len;
+}
+
+static int diag_cmd_set_all_msg_mask(unsigned char *src_buf, int src_len,
+				     unsigned char *dest_buf, int dest_len,
+				     struct diag_md_session_t *info)
+{
+	int i;
+	int write_len = 0;
+	int header_len = sizeof(struct diag_msg_config_rsp_t);
+	struct diag_msg_config_rsp_t rsp;
+	struct diag_msg_config_rsp_t *req = NULL;
+	struct diag_msg_mask_t *mask = NULL;
+	struct diag_mask_info *mask_info = NULL;
+
+	mask_info = (!info) ? &msg_mask : info->msg_mask;
+	if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 ||
+	    !mask_info) {
+		pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n",
+		       __func__, src_buf, src_len, dest_buf, dest_len,
+		       mask_info);
+		return -EINVAL;
+	}
+
+	req = (struct diag_msg_config_rsp_t *)src_buf;
+
+	mask = (struct diag_msg_mask_t *)mask_info->ptr;
+	mutex_lock(&mask_info->lock);
+	mask_info->status = (req->rt_mask) ? DIAG_CTRL_MASK_ALL_ENABLED :
+					   DIAG_CTRL_MASK_ALL_DISABLED;
+	for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
+		mutex_lock(&mask->lock);
+		memset(mask->ptr, req->rt_mask,
+		       mask->range * sizeof(uint32_t));
+		mutex_unlock(&mask->lock);
+	}
+	mutex_unlock(&mask_info->lock);
+
+	if (diag_check_update(APPS_DATA))
+		diag_update_userspace_clients(MSG_MASKS_TYPE);
+
+	/*
+	 * Apps processor must send the response to this command. Frame the
+	 * response.
+	 */
+	rsp.cmd_code = DIAG_CMD_MSG_CONFIG;
+	rsp.sub_cmd = DIAG_CMD_OP_SET_ALL_MSG_MASK;
+	rsp.status = MSG_STATUS_SUCCESS;
+	rsp.padding = 0;
+	rsp.rt_mask = req->rt_mask;
+	memcpy(dest_buf, &rsp, header_len);
+	write_len += header_len;
+
+	for (i = 0; i < NUM_PERIPHERALS; i++) {
+		if (!diag_check_update(i))
+			continue;
+		diag_send_msg_mask_update(i, ALL_SSID, ALL_SSID);
+	}
+
+	return write_len;
+}
+
+static int diag_cmd_get_event_mask(unsigned char *src_buf, int src_len,
+				   unsigned char *dest_buf, int dest_len,
+				   struct diag_md_session_t *info)
+{
+	int write_len = 0;
+	uint32_t mask_size;
+	struct diag_event_mask_config_t rsp;
+
+	if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0) {
+		pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d\n",
+		       __func__, src_buf, src_len, dest_buf, dest_len);
+		return -EINVAL;
+	}
+
+	if (!diag_apps_responds())
+		return 0;
+
+	mask_size = EVENT_COUNT_TO_BYTES(driver->last_event_id);
+	if (mask_size + sizeof(rsp) > dest_len) {
+		pr_err("diag: In %s, invalid mask size: %d\n", __func__,
+		       mask_size);
+		return -ENOMEM;
+	}
+
+	rsp.cmd_code = DIAG_CMD_GET_EVENT_MASK;
+	rsp.status = EVENT_STATUS_SUCCESS;
+	rsp.padding = 0;
+	rsp.num_bits = driver->last_event_id + 1;
+	memcpy(dest_buf, &rsp, sizeof(rsp));
+	write_len += sizeof(rsp);
+	memcpy(dest_buf + write_len, event_mask.ptr, mask_size);
+	write_len += mask_size;
+
+	return write_len;
+}
+
+static int diag_cmd_update_event_mask(unsigned char *src_buf, int src_len,
+				      unsigned char *dest_buf, int dest_len,
+				      struct diag_md_session_t *info)
+{
+	int i;
+	int write_len = 0;
+	int mask_len = 0;
+	int header_len = sizeof(struct diag_event_mask_config_t);
+	struct diag_event_mask_config_t rsp;
+	struct diag_event_mask_config_t *req;
+	struct diag_mask_info *mask_info = NULL;
+
+	mask_info = (!info) ? &event_mask : info->event_mask;
+	if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 ||
+	    !mask_info) {
+		pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n",
+		       __func__, src_buf, src_len, dest_buf, dest_len,
+		       mask_info);
+		return -EINVAL;
+	}
+
+	req = (struct diag_event_mask_config_t *)src_buf;
+	mask_len = EVENT_COUNT_TO_BYTES(req->num_bits);
+	if (mask_len <= 0 || mask_len > event_mask.mask_len) {
+		pr_err("diag: In %s, invalid event mask len: %d\n", __func__,
+		       mask_len);
+		return -EIO;
+	}
+
+	mutex_lock(&mask_info->lock);
+	memcpy(mask_info->ptr, src_buf + header_len, mask_len);
+	mask_info->status = DIAG_CTRL_MASK_VALID;
+	mutex_unlock(&mask_info->lock);
+	if (diag_check_update(APPS_DATA))
+		diag_update_userspace_clients(EVENT_MASKS_TYPE);
+
+	/*
+	 * Apps processor must send the response to this command. Frame the
+	 * response.
+	 */
+	rsp.cmd_code = DIAG_CMD_SET_EVENT_MASK;
+	rsp.status = EVENT_STATUS_SUCCESS;
+	rsp.padding = 0;
+	rsp.num_bits = driver->last_event_id + 1;
+	memcpy(dest_buf, &rsp, header_len);
+	write_len += header_len;
+	memcpy(dest_buf + write_len, mask_info->ptr, mask_len);
+	write_len += mask_len;
+
+	for (i = 0; i < NUM_PERIPHERALS; i++) {
+		if (!diag_check_update(i))
+			continue;
+		diag_send_event_mask_update(i);
+	}
+
+	return write_len;
+}
+
+static int diag_cmd_toggle_events(unsigned char *src_buf, int src_len,
+				  unsigned char *dest_buf, int dest_len,
+				  struct diag_md_session_t *info)
+{
+	int i;
+	int write_len = 0;
+	uint8_t toggle = 0;
+	struct diag_event_report_t header;
+	struct diag_mask_info *mask_info = NULL;
+
+	mask_info = (!info) ? &event_mask : info->event_mask;
+	if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 ||
+	    !mask_info) {
+		pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n",
+		       __func__, src_buf, src_len, dest_buf, dest_len,
+		       mask_info);
+		return -EINVAL;
+	}
+
+	toggle = *(src_buf + 1);
+	mutex_lock(&mask_info->lock);
+	if (toggle) {
+		mask_info->status = DIAG_CTRL_MASK_ALL_ENABLED;
+		memset(mask_info->ptr, 0xFF, mask_info->mask_len);
+	} else {
+		mask_info->status = DIAG_CTRL_MASK_ALL_DISABLED;
+		memset(mask_info->ptr, 0, mask_info->mask_len);
+	}
+	mutex_unlock(&mask_info->lock);
+	if (diag_check_update(APPS_DATA))
+		diag_update_userspace_clients(EVENT_MASKS_TYPE);
+
+	/*
+	 * Apps processor must send the response to this command. Frame the
+	 * response.
+	 */
+	header.cmd_code = DIAG_CMD_EVENT_TOGGLE;
+	header.padding = 0;
+	for (i = 0; i < NUM_PERIPHERALS; i++) {
+		if (!diag_check_update(i))
+			continue;
+		diag_send_event_mask_update(i);
+	}
+	memcpy(dest_buf, &header, sizeof(header));
+	write_len += sizeof(header);
+
+	return write_len;
+}
+
+static int diag_cmd_get_log_mask(unsigned char *src_buf, int src_len,
+				 unsigned char *dest_buf, int dest_len,
+				 struct diag_md_session_t *info)
+{
+	int i;
+	int status = LOG_STATUS_INVALID;
+	int write_len = 0;
+	int read_len = 0;
+	int req_header_len = sizeof(struct diag_log_config_req_t);
+	int rsp_header_len = sizeof(struct diag_log_config_rsp_t);
+	uint32_t mask_size = 0;
+	struct diag_log_mask_t *log_item = NULL;
+	struct diag_log_config_req_t *req;
+	struct diag_log_config_rsp_t rsp;
+	struct diag_mask_info *mask_info = NULL;
+
+	mask_info = (!info) ? &log_mask : info->log_mask;
+	if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 ||
+	    !mask_info) {
+		pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n",
+		       __func__, src_buf, src_len, dest_buf, dest_len,
+		       mask_info);
+		return -EINVAL;
+	}
+
+	if (!diag_apps_responds())
+		return 0;
+
+	req = (struct diag_log_config_req_t *)src_buf;
+	read_len += req_header_len;
+
+	rsp.cmd_code = DIAG_CMD_LOG_CONFIG;
+	rsp.padding[0] = 0;
+	rsp.padding[1] = 0;
+	rsp.padding[2] = 0;
+	rsp.sub_cmd = DIAG_CMD_OP_GET_LOG_MASK;
+	/*
+	 * Don't copy the response header now. Copy at the end after
+	 * calculating the status field value
+	 */
+	write_len += rsp_header_len;
+
+	log_item = (struct diag_log_mask_t *)mask_info->ptr;
+	for (i = 0; i < MAX_EQUIP_ID; i++, log_item++) {
+		if (log_item->equip_id != req->equip_id)
+			continue;
+		mutex_lock(&log_item->lock);
+		mask_size = LOG_ITEMS_TO_SIZE(log_item->num_items_tools);
+		/*
+		 * Make sure we have space to fill the response in the buffer.
+		 * Destination buffer should atleast be able to hold equip_id
+		 * (uint32_t), num_items(uint32_t), mask (mask_size) and the
+		 * response header.
+		 */
+		if ((mask_size + (2 * sizeof(uint32_t)) + rsp_header_len) >
+								dest_len) {
+			pr_err("diag: In %s, invalid length: %d, max rsp_len: %d\n",
+				__func__, mask_size, dest_len);
+			status = LOG_STATUS_FAIL;
+			mutex_unlock(&log_item->lock);
+			break;
+		}
+		*(uint32_t *)(dest_buf + write_len) = log_item->equip_id;
+		write_len += sizeof(uint32_t);
+		*(uint32_t *)(dest_buf + write_len) = log_item->num_items_tools;
+		write_len += sizeof(uint32_t);
+		if (mask_size > 0) {
+			memcpy(dest_buf + write_len, log_item->ptr, mask_size);
+			write_len += mask_size;
+		}
+		DIAG_LOG(DIAG_DEBUG_MASKS,
+			 "sending log e %d num_items %d size %d\n",
+			 log_item->equip_id, log_item->num_items_tools,
+			 log_item->range_tools);
+		mutex_unlock(&log_item->lock);
+		status = LOG_STATUS_SUCCESS;
+		break;
+	}
+
+	rsp.status = status;
+	memcpy(dest_buf, &rsp, rsp_header_len);
+
+	return write_len;
+}
+
+static int diag_cmd_get_log_range(unsigned char *src_buf, int src_len,
+				  unsigned char *dest_buf, int dest_len,
+				  struct diag_md_session_t *info)
+{
+	int i;
+	int write_len = 0;
+	struct diag_log_config_rsp_t rsp;
+	struct diag_mask_info *mask_info = NULL;
+	struct diag_log_mask_t *mask = (struct diag_log_mask_t *)log_mask.ptr;
+
+	if (!diag_apps_responds())
+		return 0;
+
+	mask_info = (!info) ? &log_mask : info->log_mask;
+	if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 ||
+	    !mask_info) {
+		pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n",
+		       __func__, src_buf, src_len, dest_buf, dest_len,
+		       mask_info);
+		return -EINVAL;
+	}
+
+	rsp.cmd_code = DIAG_CMD_LOG_CONFIG;
+	rsp.padding[0] = 0;
+	rsp.padding[1] = 0;
+	rsp.padding[2] = 0;
+	rsp.sub_cmd = DIAG_CMD_OP_GET_LOG_RANGE;
+	rsp.status = LOG_STATUS_SUCCESS;
+	memcpy(dest_buf, &rsp, sizeof(rsp));
+	write_len += sizeof(rsp);
+
+	for (i = 0; i < MAX_EQUIP_ID && write_len < dest_len; i++, mask++) {
+		*(uint32_t *)(dest_buf + write_len) = mask->num_items_tools;
+		write_len += sizeof(uint32_t);
+	}
+
+	return write_len;
+}
+
+static int diag_cmd_set_log_mask(unsigned char *src_buf, int src_len,
+				 unsigned char *dest_buf, int dest_len,
+				 struct diag_md_session_t *info)
+{
+	int i;
+	int write_len = 0;
+	int status = LOG_STATUS_SUCCESS;
+	int read_len = 0;
+	int payload_len = 0;
+	int req_header_len = sizeof(struct diag_log_config_req_t);
+	int rsp_header_len = sizeof(struct diag_log_config_set_rsp_t);
+	uint32_t mask_size = 0;
+	struct diag_log_config_req_t *req;
+	struct diag_log_config_set_rsp_t rsp;
+	struct diag_log_mask_t *mask = NULL;
+	unsigned char *temp_buf = NULL;
+	struct diag_mask_info *mask_info = NULL;
+
+	mask_info = (!info) ? &log_mask : info->log_mask;
+	if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 ||
+	    !mask_info) {
+		pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n",
+		       __func__, src_buf, src_len, dest_buf, dest_len,
+		       mask_info);
+		return -EINVAL;
+	}
+
+	req = (struct diag_log_config_req_t *)src_buf;
+	read_len += req_header_len;
+	mask = (struct diag_log_mask_t *)mask_info->ptr;
+
+	if (req->equip_id >= MAX_EQUIP_ID) {
+		pr_err("diag: In %s, Invalid logging mask request, equip_id: %d\n",
+		       __func__, req->equip_id);
+		status = LOG_STATUS_INVALID;
+	}
+
+	if (req->num_items == 0) {
+		pr_err("diag: In %s, Invalid number of items in log mask request, equip_id: %d\n",
+		       __func__, req->equip_id);
+		status = LOG_STATUS_INVALID;
+	}
+
+	mutex_lock(&mask_info->lock);
+	for (i = 0; i < MAX_EQUIP_ID && !status; i++, mask++) {
+		if (mask->equip_id != req->equip_id)
+			continue;
+		mutex_lock(&mask->lock);
+
+		DIAG_LOG(DIAG_DEBUG_MASKS, "e: %d current: %d %d new: %d %d",
+			 mask->equip_id, mask->num_items_tools,
+			 mask->range_tools, req->num_items,
+			 LOG_ITEMS_TO_SIZE(req->num_items));
+		/*
+		 * If the size of the log mask cannot fit into our
+		 * buffer, trim till we have space left in the buffer.
+		 * num_items should then reflect the items that we have
+		 * in our buffer.
+		 */
+		mask->num_items_tools = (req->num_items > MAX_ITEMS_ALLOWED) ?
+					MAX_ITEMS_ALLOWED : req->num_items;
+		mask_size = LOG_ITEMS_TO_SIZE(mask->num_items_tools);
+		memset(mask->ptr, 0, mask->range_tools);
+		if (mask_size > mask->range_tools) {
+			DIAG_LOG(DIAG_DEBUG_MASKS,
+				 "log range mismatch, e: %d old: %d new: %d\n",
+				 req->equip_id, mask->range_tools,
+				 LOG_ITEMS_TO_SIZE(mask->num_items_tools));
+			/* Change in the mask reported by tools */
+			temp_buf = krealloc(mask->ptr, mask_size, GFP_KERNEL);
+			if (!temp_buf) {
+				mask_info->status = DIAG_CTRL_MASK_INVALID;
+				mutex_unlock(&mask->lock);
+				break;
+			}
+			mask->ptr = temp_buf;
+			memset(mask->ptr, 0, mask_size);
+			mask->range_tools = mask_size;
+		}
+		req->num_items = mask->num_items_tools;
+		if (mask_size > 0)
+			memcpy(mask->ptr, src_buf + read_len, mask_size);
+		DIAG_LOG(DIAG_DEBUG_MASKS,
+			 "copying log mask, e %d num %d range %d size %d\n",
+			 req->equip_id, mask->num_items_tools,
+			 mask->range_tools, mask_size);
+		mutex_unlock(&mask->lock);
+		mask_info->status = DIAG_CTRL_MASK_VALID;
+		break;
+	}
+	mutex_unlock(&mask_info->lock);
+	if (diag_check_update(APPS_DATA))
+		diag_update_userspace_clients(LOG_MASKS_TYPE);
+
+	/*
+	 * Apps processor must send the response to this command. Frame the
+	 * response.
+	 */
+	payload_len = LOG_ITEMS_TO_SIZE(req->num_items);
+	if ((payload_len + rsp_header_len > dest_len) || (payload_len == 0)) {
+		pr_err("diag: In %s, invalid length, payload_len: %d, header_len: %d, dest_len: %d\n",
+		       __func__, payload_len, rsp_header_len, dest_len);
+		status = LOG_STATUS_FAIL;
+	}
+	rsp.cmd_code = DIAG_CMD_LOG_CONFIG;
+	rsp.padding[0] = 0;
+	rsp.padding[1] = 0;
+	rsp.padding[2] = 0;
+	rsp.sub_cmd = DIAG_CMD_OP_SET_LOG_MASK;
+	rsp.status = status;
+	rsp.equip_id = req->equip_id;
+	rsp.num_items = req->num_items;
+	memcpy(dest_buf, &rsp, rsp_header_len);
+	write_len += rsp_header_len;
+	if (status != LOG_STATUS_SUCCESS)
+		goto end;
+	memcpy(dest_buf + write_len, src_buf + read_len, payload_len);
+	write_len += payload_len;
+
+	for (i = 0; i < NUM_PERIPHERALS; i++) {
+		if (!diag_check_update(i))
+			continue;
+		diag_send_log_mask_update(i, req->equip_id);
+	}
+end:
+	return write_len;
+}
+
+static int diag_cmd_disable_log_mask(unsigned char *src_buf, int src_len,
+				     unsigned char *dest_buf, int dest_len,
+				     struct diag_md_session_t *info)
+{
+	struct diag_mask_info *mask_info = NULL;
+	struct diag_log_mask_t *mask = NULL;
+	struct diag_log_config_rsp_t header;
+	int write_len = 0;
+	int i;
+
+	mask_info = (!info) ? &log_mask : info->log_mask;
+	if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 ||
+	    !mask_info) {
+		pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n",
+		       __func__, src_buf, src_len, dest_buf, dest_len,
+		       mask_info);
+		return -EINVAL;
+	}
+
+	mask = (struct diag_log_mask_t *)mask_info->ptr;
+
+	for (i = 0; i < MAX_EQUIP_ID; i++, mask++) {
+		mutex_lock(&mask->lock);
+		memset(mask->ptr, 0, mask->range);
+		mutex_unlock(&mask->lock);
+	}
+	mask_info->status = DIAG_CTRL_MASK_ALL_DISABLED;
+	if (diag_check_update(APPS_DATA))
+		diag_update_userspace_clients(LOG_MASKS_TYPE);
+
+	/*
+	 * Apps processor must send the response to this command. Frame the
+	 * response.
+	 */
+	header.cmd_code = DIAG_CMD_LOG_CONFIG;
+	header.padding[0] = 0;
+	header.padding[1] = 0;
+	header.padding[2] = 0;
+	header.sub_cmd = DIAG_CMD_OP_LOG_DISABLE;
+	header.status = LOG_STATUS_SUCCESS;
+	memcpy(dest_buf, &header, sizeof(struct diag_log_config_rsp_t));
+	write_len += sizeof(struct diag_log_config_rsp_t);
+	for (i = 0; i < NUM_PERIPHERALS; i++) {
+		if (!diag_check_update(i))
+			continue;
+		diag_send_log_mask_update(i, ALL_EQUIP_ID);
+	}
+
+	return write_len;
+}
+
+int diag_create_msg_mask_table_entry(struct diag_msg_mask_t *msg_mask,
+				     struct diag_ssid_range_t *range)
+{
+	if (!msg_mask || !range)
+		return -EIO;
+	if (range->ssid_last < range->ssid_first)
+		return -EINVAL;
+	msg_mask->ssid_first = range->ssid_first;
+	msg_mask->ssid_last = range->ssid_last;
+	msg_mask->ssid_last_tools = range->ssid_last;
+	msg_mask->range = msg_mask->ssid_last - msg_mask->ssid_first + 1;
+	if (msg_mask->range < MAX_SSID_PER_RANGE)
+		msg_mask->range = MAX_SSID_PER_RANGE;
+	msg_mask->range_tools = msg_mask->range;
+	mutex_init(&msg_mask->lock);
+	if (msg_mask->range > 0) {
+		msg_mask->ptr = kcalloc(msg_mask->range, sizeof(uint32_t),
+					GFP_KERNEL);
+		if (!msg_mask->ptr)
+			return -ENOMEM;
+		kmemleak_not_leak(msg_mask->ptr);
+	}
+	return 0;
+}
+
+static int diag_create_msg_mask_table(void)
+{
+	int i;
+	int err = 0;
+	struct diag_msg_mask_t *mask = (struct diag_msg_mask_t *)msg_mask.ptr;
+	struct diag_ssid_range_t range;
+
+	mutex_lock(&msg_mask.lock);
+	driver->msg_mask_tbl_count = MSG_MASK_TBL_CNT;
+	for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
+		range.ssid_first = msg_mask_tbl[i].ssid_first;
+		range.ssid_last = msg_mask_tbl[i].ssid_last;
+		err = diag_create_msg_mask_table_entry(mask, &range);
+		if (err)
+			break;
+	}
+	mutex_unlock(&msg_mask.lock);
+	return err;
+}
+
+static int diag_create_build_time_mask(void)
+{
+	int i;
+	int err = 0;
+	const uint32_t *tbl = NULL;
+	uint32_t tbl_size = 0;
+	struct diag_msg_mask_t *build_mask = NULL;
+	struct diag_ssid_range_t range;
+
+	mutex_lock(&msg_bt_mask.lock);
+	build_mask = (struct diag_msg_mask_t *)msg_bt_mask.ptr;
+	for (i = 0; i < driver->msg_mask_tbl_count; i++, build_mask++) {
+		range.ssid_first = msg_mask_tbl[i].ssid_first;
+		range.ssid_last = msg_mask_tbl[i].ssid_last;
+		err = diag_create_msg_mask_table_entry(build_mask, &range);
+		if (err)
+			break;
+		switch (build_mask->ssid_first) {
+		case MSG_SSID_0:
+			tbl = msg_bld_masks_0;
+			tbl_size = sizeof(msg_bld_masks_0);
+			break;
+		case MSG_SSID_1:
+			tbl = msg_bld_masks_1;
+			tbl_size = sizeof(msg_bld_masks_1);
+			break;
+		case MSG_SSID_2:
+			tbl = msg_bld_masks_2;
+			tbl_size = sizeof(msg_bld_masks_2);
+			break;
+		case MSG_SSID_3:
+			tbl = msg_bld_masks_3;
+			tbl_size = sizeof(msg_bld_masks_3);
+			break;
+		case MSG_SSID_4:
+			tbl = msg_bld_masks_4;
+			tbl_size = sizeof(msg_bld_masks_4);
+			break;
+		case MSG_SSID_5:
+			tbl = msg_bld_masks_5;
+			tbl_size = sizeof(msg_bld_masks_5);
+			break;
+		case MSG_SSID_6:
+			tbl = msg_bld_masks_6;
+			tbl_size = sizeof(msg_bld_masks_6);
+			break;
+		case MSG_SSID_7:
+			tbl = msg_bld_masks_7;
+			tbl_size = sizeof(msg_bld_masks_7);
+			break;
+		case MSG_SSID_8:
+			tbl = msg_bld_masks_8;
+			tbl_size = sizeof(msg_bld_masks_8);
+			break;
+		case MSG_SSID_9:
+			tbl = msg_bld_masks_9;
+			tbl_size = sizeof(msg_bld_masks_9);
+			break;
+		case MSG_SSID_10:
+			tbl = msg_bld_masks_10;
+			tbl_size = sizeof(msg_bld_masks_10);
+			break;
+		case MSG_SSID_11:
+			tbl = msg_bld_masks_11;
+			tbl_size = sizeof(msg_bld_masks_11);
+			break;
+		case MSG_SSID_12:
+			tbl = msg_bld_masks_12;
+			tbl_size = sizeof(msg_bld_masks_12);
+			break;
+		case MSG_SSID_13:
+			tbl = msg_bld_masks_13;
+			tbl_size = sizeof(msg_bld_masks_13);
+			break;
+		case MSG_SSID_14:
+			tbl = msg_bld_masks_14;
+			tbl_size = sizeof(msg_bld_masks_14);
+			break;
+		case MSG_SSID_15:
+			tbl = msg_bld_masks_15;
+			tbl_size = sizeof(msg_bld_masks_15);
+			break;
+		case MSG_SSID_16:
+			tbl = msg_bld_masks_16;
+			tbl_size = sizeof(msg_bld_masks_16);
+			break;
+		case MSG_SSID_17:
+			tbl = msg_bld_masks_17;
+			tbl_size = sizeof(msg_bld_masks_17);
+			break;
+		case MSG_SSID_18:
+			tbl = msg_bld_masks_18;
+			tbl_size = sizeof(msg_bld_masks_18);
+			break;
+		case MSG_SSID_19:
+			tbl = msg_bld_masks_19;
+			tbl_size = sizeof(msg_bld_masks_19);
+			break;
+		case MSG_SSID_20:
+			tbl = msg_bld_masks_20;
+			tbl_size = sizeof(msg_bld_masks_20);
+			break;
+		case MSG_SSID_21:
+			tbl = msg_bld_masks_21;
+			tbl_size = sizeof(msg_bld_masks_21);
+			break;
+		case MSG_SSID_22:
+			tbl = msg_bld_masks_22;
+			tbl_size = sizeof(msg_bld_masks_22);
+			break;
+		}
+		if (!tbl)
+			continue;
+		if (tbl_size > build_mask->range * sizeof(uint32_t)) {
+			pr_warn("diag: In %s, table %d has more ssid than max, ssid_first: %d, ssid_last: %d\n",
+				__func__, i, build_mask->ssid_first,
+				build_mask->ssid_last);
+			tbl_size = build_mask->range * sizeof(uint32_t);
+		}
+		memcpy(build_mask->ptr, tbl, tbl_size);
+	}
+	mutex_unlock(&msg_bt_mask.lock);
+
+	return err;
+}
+
+static int diag_create_log_mask_table(void)
+{
+	struct diag_log_mask_t *mask = NULL;
+	uint8_t i;
+	int err = 0;
+
+	mutex_lock(&log_mask.lock);
+	mask = (struct diag_log_mask_t *)(log_mask.ptr);
+	for (i = 0; i < MAX_EQUIP_ID; i++, mask++) {
+		mask->equip_id = i;
+		mask->num_items = LOG_GET_ITEM_NUM(log_code_last_tbl[i]);
+		mask->num_items_tools = mask->num_items;
+		mutex_init(&mask->lock);
+		if (LOG_ITEMS_TO_SIZE(mask->num_items) > MAX_ITEMS_PER_EQUIP_ID)
+			mask->range = LOG_ITEMS_TO_SIZE(mask->num_items);
+		else
+			mask->range = MAX_ITEMS_PER_EQUIP_ID;
+		mask->range_tools = mask->range;
+		mask->ptr = kzalloc(mask->range, GFP_KERNEL);
+		if (!mask->ptr) {
+			err = -ENOMEM;
+			break;
+		}
+		kmemleak_not_leak(mask->ptr);
+	}
+	mutex_unlock(&log_mask.lock);
+	return err;
+}
+
+static int __diag_mask_init(struct diag_mask_info *mask_info, int mask_len,
+			    int update_buf_len)
+{
+	if (!mask_info || mask_len < 0 || update_buf_len < 0)
+		return -EINVAL;
+
+	mask_info->status = DIAG_CTRL_MASK_INVALID;
+	mask_info->mask_len = mask_len;
+	mask_info->update_buf_len = update_buf_len;
+	if (mask_len > 0) {
+		mask_info->ptr = kzalloc(mask_len, GFP_KERNEL);
+		if (!mask_info->ptr)
+			return -ENOMEM;
+		kmemleak_not_leak(mask_info->ptr);
+	}
+	if (update_buf_len > 0) {
+		mask_info->update_buf = kzalloc(update_buf_len, GFP_KERNEL);
+		if (!mask_info->update_buf) {
+			kfree(mask_info->ptr);
+			return -ENOMEM;
+		}
+		kmemleak_not_leak(mask_info->update_buf);
+	}
+	mutex_init(&mask_info->lock);
+	return 0;
+}
+
+static void __diag_mask_exit(struct diag_mask_info *mask_info)
+{
+	if (!mask_info)
+		return;
+
+	mutex_lock(&mask_info->lock);
+	kfree(mask_info->ptr);
+	mask_info->ptr = NULL;
+	kfree(mask_info->update_buf);
+	mask_info->update_buf = NULL;
+	mutex_unlock(&mask_info->lock);
+}
+
+int diag_log_mask_copy(struct diag_mask_info *dest, struct diag_mask_info *src)
+{
+	int i;
+	int err = 0;
+	struct diag_log_mask_t *src_mask = NULL;
+	struct diag_log_mask_t *dest_mask = NULL;
+
+	if (!src)
+		return -EINVAL;
+
+	err = __diag_mask_init(dest, LOG_MASK_SIZE, APPS_BUF_SIZE);
+	if (err)
+		return err;
+
+	mutex_lock(&dest->lock);
+	src_mask = (struct diag_log_mask_t *)(src->ptr);
+	dest_mask = (struct diag_log_mask_t *)(dest->ptr);
+
+	dest->mask_len = src->mask_len;
+	dest->status = src->status;
+
+	for (i = 0; i < MAX_EQUIP_ID; i++, src_mask++, dest_mask++) {
+		dest_mask->equip_id = src_mask->equip_id;
+		dest_mask->num_items = src_mask->num_items;
+		dest_mask->num_items_tools = src_mask->num_items_tools;
+		mutex_init(&dest_mask->lock);
+		dest_mask->range = src_mask->range;
+		dest_mask->range_tools = src_mask->range_tools;
+		dest_mask->ptr = kzalloc(dest_mask->range_tools, GFP_KERNEL);
+		if (!dest_mask->ptr) {
+			err = -ENOMEM;
+			break;
+		}
+		kmemleak_not_leak(dest_mask->ptr);
+		memcpy(dest_mask->ptr, src_mask->ptr, dest_mask->range_tools);
+	}
+	mutex_unlock(&dest->lock);
+
+	return err;
+}
+
+void diag_log_mask_free(struct diag_mask_info *mask_info)
+{
+	int i;
+	struct diag_log_mask_t *mask = NULL;
+
+	if (!mask_info)
+		return;
+
+	mutex_lock(&mask_info->lock);
+	mask = (struct diag_log_mask_t *)mask_info->ptr;
+	for (i = 0; i < MAX_EQUIP_ID; i++, mask++) {
+		kfree(mask->ptr);
+		mask->ptr = NULL;
+	}
+	mutex_unlock(&mask_info->lock);
+
+	__diag_mask_exit(mask_info);
+
+}
+
+static int diag_msg_mask_init(void)
+{
+	int err = 0;
+	int i;
+
+	err = __diag_mask_init(&msg_mask, MSG_MASK_SIZE, APPS_BUF_SIZE);
+	if (err)
+		return err;
+	err = diag_create_msg_mask_table();
+	if (err) {
+		pr_err("diag: Unable to create msg masks, err: %d\n", err);
+		return err;
+	}
+	driver->msg_mask = &msg_mask;
+
+	for (i = 0; i < NUM_PERIPHERALS; i++)
+		driver->max_ssid_count[i] = 0;
+
+	return 0;
+}
+
+int diag_msg_mask_copy(struct diag_mask_info *dest, struct diag_mask_info *src)
+{
+	int i;
+	int err = 0;
+	struct diag_msg_mask_t *src_mask = NULL;
+	struct diag_msg_mask_t *dest_mask = NULL;
+	struct diag_ssid_range_t range;
+
+	if (!src || !dest)
+		return -EINVAL;
+
+	err = __diag_mask_init(dest, MSG_MASK_SIZE, APPS_BUF_SIZE);
+	if (err)
+		return err;
+
+	mutex_lock(&dest->lock);
+	src_mask = (struct diag_msg_mask_t *)src->ptr;
+	dest_mask = (struct diag_msg_mask_t *)dest->ptr;
+
+	dest->mask_len = src->mask_len;
+	dest->status = src->status;
+	for (i = 0; i < driver->msg_mask_tbl_count; i++) {
+		range.ssid_first = src_mask->ssid_first;
+		range.ssid_last = src_mask->ssid_last;
+		err = diag_create_msg_mask_table_entry(dest_mask, &range);
+		if (err)
+			break;
+		memcpy(dest_mask->ptr, src_mask->ptr,
+		       dest_mask->range * sizeof(uint32_t));
+		src_mask++;
+		dest_mask++;
+	}
+	mutex_unlock(&dest->lock);
+
+	return err;
+}
+
+void diag_msg_mask_free(struct diag_mask_info *mask_info)
+{
+	int i;
+	struct diag_msg_mask_t *mask = NULL;
+
+	if (!mask_info)
+		return;
+
+	mutex_lock(&mask_info->lock);
+	mask = (struct diag_msg_mask_t *)mask_info->ptr;
+	for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
+		kfree(mask->ptr);
+		mask->ptr = NULL;
+	}
+	mutex_unlock(&mask_info->lock);
+
+	__diag_mask_exit(mask_info);
+}
+
+static void diag_msg_mask_exit(void)
+{
+	int i;
+	struct diag_msg_mask_t *mask = NULL;
+
+	mask = (struct diag_msg_mask_t *)(msg_mask.ptr);
+	if (mask) {
+		for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++)
+			kfree(mask->ptr);
+		kfree(msg_mask.ptr);
+	}
+
+	kfree(msg_mask.update_buf);
+}
+
+static int diag_build_time_mask_init(void)
+{
+	int err = 0;
+
+	/* There is no need for update buffer for Build Time masks */
+	err = __diag_mask_init(&msg_bt_mask, MSG_MASK_SIZE, 0);
+	if (err)
+		return err;
+	err = diag_create_build_time_mask();
+	if (err) {
+		pr_err("diag: Unable to create msg build time masks, err: %d\n",
+		       err);
+		return err;
+	}
+	driver->build_time_mask = &msg_bt_mask;
+	return 0;
+}
+
+static void diag_build_time_mask_exit(void)
+{
+	int i;
+	struct diag_msg_mask_t *mask = NULL;
+
+	mask = (struct diag_msg_mask_t *)(msg_bt_mask.ptr);
+	if (mask) {
+		for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++)
+			kfree(mask->ptr);
+		kfree(msg_mask.ptr);
+	}
+}
+
+static int diag_log_mask_init(void)
+{
+	int err = 0;
+	int i;
+
+	err = __diag_mask_init(&log_mask, LOG_MASK_SIZE, APPS_BUF_SIZE);
+	if (err)
+		return err;
+	err = diag_create_log_mask_table();
+	if (err)
+		return err;
+	driver->log_mask = &log_mask;
+
+	for (i = 0; i < NUM_PERIPHERALS; i++)
+		driver->num_equip_id[i] = 0;
+
+	return 0;
+}
+
+static void diag_log_mask_exit(void)
+{
+	int i;
+	struct diag_log_mask_t *mask = NULL;
+
+	mask = (struct diag_log_mask_t *)(log_mask.ptr);
+	if (mask) {
+		for (i = 0; i < MAX_EQUIP_ID; i++, mask++)
+			kfree(mask->ptr);
+		kfree(log_mask.ptr);
+	}
+
+	kfree(log_mask.update_buf);
+}
+
+static int diag_event_mask_init(void)
+{
+	int err = 0;
+	int i;
+
+	err = __diag_mask_init(&event_mask, EVENT_MASK_SIZE, APPS_BUF_SIZE);
+	if (err)
+		return err;
+	driver->event_mask_size = EVENT_MASK_SIZE;
+	driver->last_event_id = APPS_EVENT_LAST_ID;
+	driver->event_mask = &event_mask;
+
+	for (i = 0; i < NUM_PERIPHERALS; i++)
+		driver->num_event_id[i] = 0;
+
+	return 0;
+}
+
+int diag_event_mask_copy(struct diag_mask_info *dest,
+			 struct diag_mask_info *src)
+{
+	int err = 0;
+
+	if (!src || !dest)
+		return -EINVAL;
+
+	err = __diag_mask_init(dest, EVENT_MASK_SIZE, APPS_BUF_SIZE);
+	if (err)
+		return err;
+
+	mutex_lock(&dest->lock);
+	dest->mask_len = src->mask_len;
+	dest->status = src->status;
+	memcpy(dest->ptr, src->ptr, dest->mask_len);
+	mutex_unlock(&dest->lock);
+
+	return err;
+}
+
+void diag_event_mask_free(struct diag_mask_info *mask_info)
+{
+	if (!mask_info)
+		return;
+
+	__diag_mask_exit(mask_info);
+}
+
+static void diag_event_mask_exit(void)
+{
+	kfree(event_mask.ptr);
+	kfree(event_mask.update_buf);
+}
+
+int diag_copy_to_user_msg_mask(char __user *buf, size_t count,
+			       struct diag_md_session_t *info)
+{
+	int i;
+	int err = 0;
+	int len = 0;
+	int copy_len = 0;
+	int total_len = 0;
+	struct diag_msg_mask_userspace_t header;
+	struct diag_mask_info *mask_info = NULL;
+	struct diag_msg_mask_t *mask = NULL;
+	unsigned char *ptr = NULL;
+
+	if (!buf || count == 0)
+		return -EINVAL;
+
+	mask_info = (!info) ? &msg_mask : info->msg_mask;
+	if (!mask_info)
+		return -EIO;
+
+	mutex_lock(&driver->diag_maskclear_mutex);
+	if (driver->mask_clear) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"diag:%s: count = %zu\n", __func__, count);
+		mutex_unlock(&driver->diag_maskclear_mutex);
+		return -EIO;
+	}
+	mutex_unlock(&driver->diag_maskclear_mutex);
+
+	mutex_lock(&mask_info->lock);
+	mask = (struct diag_msg_mask_t *)(mask_info->ptr);
+	for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
+		ptr = mask_info->update_buf;
+		len = 0;
+		mutex_lock(&mask->lock);
+		header.ssid_first = mask->ssid_first;
+		header.ssid_last = mask->ssid_last_tools;
+		header.range = mask->range_tools;
+		memcpy(ptr, &header, sizeof(header));
+		len += sizeof(header);
+		copy_len = (sizeof(uint32_t) * mask->range_tools);
+		if ((len + copy_len) > mask_info->update_buf_len) {
+			pr_err("diag: In %s, no space to update msg mask, first: %d, last: %d\n",
+			       __func__, mask->ssid_first,
+			       mask->ssid_last_tools);
+			mutex_unlock(&mask->lock);
+			continue;
+		}
+		memcpy(ptr + len, mask->ptr, copy_len);
+		len += copy_len;
+		mutex_unlock(&mask->lock);
+		/* + sizeof(int) to account for data_type already in buf */
+		if (total_len + sizeof(int) + len > count) {
+			pr_err("diag: In %s, unable to send msg masks to user space, total_len: %d, count: %zu\n",
+			       __func__, total_len, count);
+			err = -ENOMEM;
+			break;
+		}
+		err = copy_to_user(buf + total_len, (void *)ptr, len);
+		if (err) {
+			pr_err("diag: In %s Unable to send msg masks to user space clients, err: %d\n",
+			       __func__, err);
+			break;
+		}
+		total_len += len;
+	}
+	mutex_unlock(&mask_info->lock);
+
+	return err ? err : total_len;
+}
+
+int diag_copy_to_user_log_mask(char __user *buf, size_t count,
+			       struct diag_md_session_t *info)
+{
+	int i;
+	int err = 0;
+	int len = 0;
+	int copy_len = 0;
+	int total_len = 0;
+	struct diag_log_mask_userspace_t header;
+	struct diag_log_mask_t *mask = NULL;
+	struct diag_mask_info *mask_info = NULL;
+	unsigned char *ptr = NULL;
+
+	if (!buf || count == 0)
+		return -EINVAL;
+
+	mask_info = (!info) ? &log_mask : info->log_mask;
+	if (!mask_info)
+		return -EIO;
+
+	mutex_lock(&mask_info->lock);
+	mask = (struct diag_log_mask_t *)(mask_info->ptr);
+	for (i = 0; i < MAX_EQUIP_ID; i++, mask++) {
+		ptr = mask_info->update_buf;
+		len = 0;
+		mutex_lock(&mask->lock);
+		header.equip_id = mask->equip_id;
+		header.num_items = mask->num_items_tools;
+		memcpy(ptr, &header, sizeof(header));
+		len += sizeof(header);
+		copy_len = LOG_ITEMS_TO_SIZE(header.num_items);
+		if ((len + copy_len) > mask_info->update_buf_len) {
+			pr_err("diag: In %s, no space to update log mask, equip_id: %d\n",
+			       __func__, mask->equip_id);
+			mutex_unlock(&mask->lock);
+			continue;
+		}
+		memcpy(ptr + len, mask->ptr, copy_len);
+		len += copy_len;
+		mutex_unlock(&mask->lock);
+		/* + sizeof(int) to account for data_type already in buf */
+		if (total_len + sizeof(int) + len > count) {
+			pr_err("diag: In %s, unable to send log masks to user space, total_len: %d, count: %zu\n",
+			       __func__, total_len, count);
+			err = -ENOMEM;
+			break;
+		}
+		err = copy_to_user(buf + total_len, (void *)ptr, len);
+		if (err) {
+			pr_err("diag: In %s Unable to send log masks to user space clients, err: %d\n",
+			       __func__, err);
+			break;
+		}
+		total_len += len;
+	}
+	mutex_unlock(&mask_info->lock);
+
+	return err ? err : total_len;
+}
+
+void diag_send_updates_peripheral(uint8_t peripheral)
+{
+	diag_send_feature_mask_update(peripheral);
+	if (driver->time_sync_enabled)
+		diag_send_time_sync_update(peripheral);
+	diag_send_msg_mask_update(peripheral, ALL_SSID, ALL_SSID);
+	diag_send_log_mask_update(peripheral, ALL_EQUIP_ID);
+	diag_send_event_mask_update(peripheral);
+	diag_send_real_time_update(peripheral,
+				driver->real_time_mode[DIAG_LOCAL_PROC]);
+	diag_send_peripheral_buffering_mode(
+				&driver->buffering_mode[peripheral]);
+}
+
+int diag_process_apps_masks(unsigned char *buf, int len,
+			    struct diag_md_session_t *info)
+{
+	int size = 0;
+	int sub_cmd = 0;
+	int (*hdlr)(unsigned char *src_buf, int src_len,
+		    unsigned char *dest_buf, int dest_len,
+		    struct diag_md_session_t *info) = NULL;
+
+	if (!buf || len <= 0)
+		return -EINVAL;
+
+	if (*buf == DIAG_CMD_LOG_CONFIG) {
+		sub_cmd = *(int *)(buf + sizeof(int));
+		switch (sub_cmd) {
+		case DIAG_CMD_OP_LOG_DISABLE:
+			hdlr = diag_cmd_disable_log_mask;
+			break;
+		case DIAG_CMD_OP_GET_LOG_RANGE:
+			hdlr = diag_cmd_get_log_range;
+			break;
+		case DIAG_CMD_OP_SET_LOG_MASK:
+			hdlr = diag_cmd_set_log_mask;
+			break;
+		case DIAG_CMD_OP_GET_LOG_MASK:
+			hdlr = diag_cmd_get_log_mask;
+			break;
+		}
+	} else if (*buf == DIAG_CMD_MSG_CONFIG) {
+		sub_cmd = *(uint8_t *)(buf + sizeof(uint8_t));
+		switch (sub_cmd) {
+		case DIAG_CMD_OP_GET_SSID_RANGE:
+			hdlr = diag_cmd_get_ssid_range;
+			break;
+		case DIAG_CMD_OP_GET_BUILD_MASK:
+			hdlr = diag_cmd_get_build_mask;
+			break;
+		case DIAG_CMD_OP_GET_MSG_MASK:
+			hdlr = diag_cmd_get_msg_mask;
+			break;
+		case DIAG_CMD_OP_SET_MSG_MASK:
+			hdlr = diag_cmd_set_msg_mask;
+			break;
+		case DIAG_CMD_OP_SET_ALL_MSG_MASK:
+			hdlr = diag_cmd_set_all_msg_mask;
+			break;
+		}
+	} else if (*buf == DIAG_CMD_GET_EVENT_MASK) {
+		hdlr = diag_cmd_get_event_mask;
+	} else if (*buf == DIAG_CMD_SET_EVENT_MASK) {
+		hdlr = diag_cmd_update_event_mask;
+	} else if (*buf == DIAG_CMD_EVENT_TOGGLE) {
+		hdlr = diag_cmd_toggle_events;
+	}
+
+	if (hdlr)
+		size = hdlr(buf, len, driver->apps_rsp_buf,
+			    DIAG_MAX_RSP_SIZE, info);
+
+	return (size > 0) ? size : 0;
+}
+
+int diag_masks_init(void)
+{
+	int err = 0;
+
+	err = diag_msg_mask_init();
+	if (err)
+		goto fail;
+
+	err = diag_build_time_mask_init();
+	if (err)
+		goto fail;
+
+	err = diag_log_mask_init();
+	if (err)
+		goto fail;
+
+	err = diag_event_mask_init();
+	if (err)
+		goto fail;
+
+	if (driver->buf_feature_mask_update == NULL) {
+		driver->buf_feature_mask_update = kzalloc(sizeof(
+					struct diag_ctrl_feature_mask) +
+					FEATURE_MASK_LEN, GFP_KERNEL);
+		if (driver->buf_feature_mask_update == NULL)
+			goto fail;
+		kmemleak_not_leak(driver->buf_feature_mask_update);
+	}
+
+	return 0;
+fail:
+	pr_err("diag: Could not initialize diag mask buffers\n");
+	diag_masks_exit();
+	return -ENOMEM;
+}
+
+void diag_masks_exit(void)
+{
+	diag_msg_mask_exit();
+	diag_build_time_mask_exit();
+	diag_log_mask_exit();
+	diag_event_mask_exit();
+	kfree(driver->buf_feature_mask_update);
+}
diff --git a/drivers/char/diag/diag_masks.h b/drivers/char/diag/diag_masks.h
new file mode 100644
index 0000000..1a52f94
--- /dev/null
+++ b/drivers/char/diag/diag_masks.h
@@ -0,0 +1,180 @@
+/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAG_MASKS_H
+#define DIAG_MASKS_H
+
+#include "diagfwd.h"
+
+struct diag_log_mask_t {
+	uint8_t equip_id;
+	uint32_t num_items;
+	uint32_t num_items_tools;
+	uint32_t range;
+	uint32_t range_tools;
+	struct mutex lock;
+	uint8_t *ptr;
+};
+
+struct diag_ssid_range_t {
+	uint16_t ssid_first;
+	uint16_t ssid_last;
+} __packed;
+
+struct diag_msg_mask_t {
+	uint32_t ssid_first;
+	uint32_t ssid_last;
+	uint32_t ssid_last_tools;
+	uint32_t range;
+	uint32_t range_tools;
+	struct mutex lock;
+	uint32_t *ptr;
+};
+
+struct diag_log_config_req_t {
+	uint8_t cmd_code;
+	uint8_t padding[3];
+	uint32_t sub_cmd;
+	uint32_t equip_id;
+	uint32_t num_items;
+} __packed;
+
+struct diag_log_config_rsp_t {
+	uint8_t cmd_code;
+	uint8_t padding[3];
+	uint32_t sub_cmd;
+	uint32_t status;
+} __packed;
+
+struct diag_log_config_set_rsp_t {
+	uint8_t cmd_code;
+	uint8_t padding[3];
+	uint32_t sub_cmd;
+	uint32_t status;
+	uint32_t equip_id;
+	uint32_t num_items;
+} __packed;
+
+struct diag_log_on_demand_rsp_t {
+	uint8_t cmd_code;
+	uint16_t log_code;
+	uint8_t status;
+} __packed;
+
+struct diag_event_report_t {
+	uint8_t cmd_code;
+	uint16_t padding;
+} __packed;
+
+struct diag_event_mask_config_t {
+	uint8_t cmd_code;
+	uint8_t status;
+	uint16_t padding;
+	uint16_t num_bits;
+} __packed;
+
+struct diag_msg_config_rsp_t {
+	uint8_t cmd_code;
+	uint8_t sub_cmd;
+	uint8_t status;
+	uint8_t padding;
+	uint32_t rt_mask;
+} __packed;
+
+struct diag_msg_ssid_query_t {
+	uint8_t cmd_code;
+	uint8_t sub_cmd;
+	uint8_t status;
+	uint8_t padding;
+	uint32_t count;
+} __packed;
+
+struct diag_build_mask_req_t {
+	uint8_t cmd_code;
+	uint8_t sub_cmd;
+	uint16_t ssid_first;
+	uint16_t ssid_last;
+} __packed;
+
+struct diag_msg_build_mask_t {
+	uint8_t cmd_code;
+	uint8_t sub_cmd;
+	uint16_t ssid_first;
+	uint16_t ssid_last;
+	uint8_t status;
+	uint8_t padding;
+} __packed;
+
+struct diag_msg_mask_userspace_t {
+	uint32_t ssid_first;
+	uint32_t ssid_last;
+	uint32_t range;
+} __packed;
+
+struct diag_log_mask_userspace_t {
+	uint8_t equip_id;
+	uint32_t num_items;
+} __packed;
+
+#define MAX_EQUIP_ID	16
+#define MSG_MASK_SIZE	(MSG_MASK_TBL_CNT * sizeof(struct diag_msg_mask_t))
+#define LOG_MASK_SIZE	(MAX_EQUIP_ID * sizeof(struct diag_log_mask_t))
+#define EVENT_MASK_SIZE 513
+#define MAX_ITEMS_PER_EQUIP_ID	512
+#define MAX_ITEMS_ALLOWED	0xFFF
+
+#define LOG_MASK_CTRL_HEADER_LEN	11
+#define MSG_MASK_CTRL_HEADER_LEN	11
+#define EVENT_MASK_CTRL_HEADER_LEN	7
+
+#define LOG_STATUS_SUCCESS	0
+#define LOG_STATUS_INVALID	1
+#define LOG_STATUS_FAIL		2
+
+#define MSG_STATUS_FAIL		0
+#define MSG_STATUS_SUCCESS	1
+
+#define EVENT_STATUS_SUCCESS	0
+#define EVENT_STATUS_FAIL	1
+
+#define DIAG_CTRL_MASK_INVALID		0
+#define DIAG_CTRL_MASK_ALL_DISABLED	1
+#define DIAG_CTRL_MASK_ALL_ENABLED	2
+#define DIAG_CTRL_MASK_VALID		3
+
+extern struct diag_mask_info msg_mask;
+extern struct diag_mask_info msg_bt_mask;
+extern struct diag_mask_info log_mask;
+extern struct diag_mask_info event_mask;
+
+int diag_masks_init(void);
+void diag_masks_exit(void);
+int diag_log_mask_copy(struct diag_mask_info *dest,
+		       struct diag_mask_info *src);
+int diag_msg_mask_copy(struct diag_mask_info *dest,
+		       struct diag_mask_info *src);
+int diag_event_mask_copy(struct diag_mask_info *dest,
+			 struct diag_mask_info *src);
+void diag_log_mask_free(struct diag_mask_info *mask_info);
+void diag_msg_mask_free(struct diag_mask_info *mask_info);
+void diag_event_mask_free(struct diag_mask_info *mask_info);
+int diag_process_apps_masks(unsigned char *buf, int len,
+			    struct diag_md_session_t *info);
+void diag_send_updates_peripheral(uint8_t peripheral);
+
+extern int diag_create_msg_mask_table_entry(struct diag_msg_mask_t *msg_mask,
+					    struct diag_ssid_range_t *range);
+extern int diag_copy_to_user_msg_mask(char __user *buf, size_t count,
+				      struct diag_md_session_t *info);
+extern int diag_copy_to_user_log_mask(char __user *buf, size_t count,
+				      struct diag_md_session_t *info);
+#endif
diff --git a/drivers/char/diag/diag_memorydevice.c b/drivers/char/diag/diag_memorydevice.c
new file mode 100644
index 0000000..558e362
--- /dev/null
+++ b/drivers/char/diag/diag_memorydevice.c
@@ -0,0 +1,379 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/sched.h>
+#include <linux/ratelimit.h>
+#include <linux/workqueue.h>
+#include <linux/diagchar.h>
+#include <linux/delay.h>
+#include <linux/kmemleak.h>
+#include <linux/uaccess.h>
+#include "diagchar.h"
+#include "diag_memorydevice.h"
+#include "diagfwd_bridge.h"
+#include "diag_mux.h"
+#include "diagmem.h"
+#include "diagfwd.h"
+#include "diagfwd_peripheral.h"
+
+struct diag_md_info diag_md[NUM_DIAG_MD_DEV] = {
+	{
+		.id = DIAG_MD_LOCAL,
+		.ctx = 0,
+		.mempool = POOL_TYPE_MUX_APPS,
+		.num_tbl_entries = 0,
+		.tbl = NULL,
+		.ops = NULL,
+	},
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+	{
+		.id = DIAG_MD_MDM,
+		.ctx = 0,
+		.mempool = POOL_TYPE_MDM_MUX,
+		.num_tbl_entries = 0,
+		.tbl = NULL,
+		.ops = NULL,
+	},
+	{
+		.id = DIAG_MD_MDM2,
+		.ctx = 0,
+		.mempool = POOL_TYPE_MDM2_MUX,
+		.num_tbl_entries = 0,
+		.tbl = NULL,
+		.ops = NULL,
+	},
+	{
+		.id = DIAG_MD_SMUX,
+		.ctx = 0,
+		.mempool = POOL_TYPE_QSC_MUX,
+		.num_tbl_entries = 0,
+		.tbl = NULL,
+		.ops = NULL,
+	}
+#endif
+};
+
+int diag_md_register(int id, int ctx, struct diag_mux_ops *ops)
+{
+	if (id < 0 || id >= NUM_DIAG_MD_DEV || !ops)
+		return -EINVAL;
+
+	diag_md[id].ops = ops;
+	diag_md[id].ctx = ctx;
+	return 0;
+}
+
+void diag_md_open_all(void)
+{
+	int i;
+	struct diag_md_info *ch = NULL;
+
+	for (i = 0; i < NUM_DIAG_MD_DEV; i++) {
+		ch = &diag_md[i];
+		if (ch->ops && ch->ops->open)
+			ch->ops->open(ch->ctx, DIAG_MEMORY_DEVICE_MODE);
+	}
+}
+
+void diag_md_close_all(void)
+{
+	int i, j;
+	unsigned long flags;
+	struct diag_md_info *ch = NULL;
+	struct diag_buf_tbl_t *entry = NULL;
+
+	for (i = 0; i < NUM_DIAG_MD_DEV; i++) {
+		ch = &diag_md[i];
+
+		if (ch->ops && ch->ops->close)
+			ch->ops->close(ch->ctx, DIAG_MEMORY_DEVICE_MODE);
+
+		/*
+		 * When we close the Memory device mode, make sure we flush the
+		 * internal buffers in the table so that there are no stale
+		 * entries.
+		 */
+		spin_lock_irqsave(&ch->lock, flags);
+		for (j = 0; j < ch->num_tbl_entries; j++) {
+			entry = &ch->tbl[j];
+			if (entry->len <= 0)
+				continue;
+			if (ch->ops && ch->ops->write_done)
+				ch->ops->write_done(entry->buf, entry->len,
+						    entry->ctx,
+						    DIAG_MEMORY_DEVICE_MODE);
+			entry->buf = NULL;
+			entry->len = 0;
+			entry->ctx = 0;
+		}
+		spin_unlock_irqrestore(&ch->lock, flags);
+	}
+
+	diag_ws_reset(DIAG_WS_MUX);
+}
+
+int diag_md_write(int id, unsigned char *buf, int len, int ctx)
+{
+	int i;
+	uint8_t found = 0;
+	unsigned long flags;
+	struct diag_md_info *ch = NULL;
+	uint8_t peripheral;
+	struct diag_md_session_t *session_info = NULL;
+
+	if (id < 0 || id >= NUM_DIAG_MD_DEV || id >= DIAG_NUM_PROC)
+		return -EINVAL;
+
+	if (!buf || len < 0)
+		return -EINVAL;
+
+	peripheral = GET_BUF_PERIPHERAL(ctx);
+	if (peripheral > NUM_PERIPHERALS)
+		return -EINVAL;
+
+	session_info = diag_md_session_get_peripheral(peripheral);
+	if (!session_info)
+		return -EIO;
+
+	ch = &diag_md[id];
+
+	spin_lock_irqsave(&ch->lock, flags);
+	for (i = 0; i < ch->num_tbl_entries && !found; i++) {
+		if (ch->tbl[i].buf != buf)
+			continue;
+		found = 1;
+		pr_err_ratelimited("diag: trying to write the same buffer buf: %pK, ctxt: %d len: %d at i: %d back to the table, proc: %d, mode: %d\n",
+				   buf, ctx, ch->tbl[i].len,
+				   i, id, driver->logging_mode);
+	}
+	spin_unlock_irqrestore(&ch->lock, flags);
+
+	if (found)
+		return -ENOMEM;
+
+	spin_lock_irqsave(&ch->lock, flags);
+	for (i = 0; i < ch->num_tbl_entries && !found; i++) {
+		if (ch->tbl[i].len == 0) {
+			ch->tbl[i].buf = buf;
+			ch->tbl[i].len = len;
+			ch->tbl[i].ctx = ctx;
+			found = 1;
+			diag_ws_on_read(DIAG_WS_MUX, len);
+		}
+	}
+	spin_unlock_irqrestore(&ch->lock, flags);
+
+	if (!found) {
+		pr_err_ratelimited("diag: Unable to find an empty space in table, please reduce logging rate, proc: %d\n",
+				   id);
+		return -ENOMEM;
+	}
+
+	found = 0;
+	for (i = 0; i < driver->num_clients && !found; i++) {
+		if ((driver->client_map[i].pid !=
+		     session_info->pid) ||
+		    (driver->client_map[i].pid == 0))
+			continue;
+
+		found = 1;
+		driver->data_ready[i] |= USER_SPACE_DATA_TYPE;
+		pr_debug("diag: wake up logging process\n");
+		wake_up_interruptible(&driver->wait_q);
+	}
+
+	if (!found)
+		return -EINVAL;
+
+	return 0;
+}
+
+int diag_md_copy_to_user(char __user *buf, int *pret, size_t buf_size,
+			struct diag_md_session_t *info)
+{
+	int i, j;
+	int err = 0;
+	int ret = *pret;
+	int num_data = 0;
+	int remote_token;
+	unsigned long flags;
+	struct diag_md_info *ch = NULL;
+	struct diag_buf_tbl_t *entry = NULL;
+	uint8_t drain_again = 0;
+	uint8_t peripheral = 0;
+	struct diag_md_session_t *session_info = NULL;
+
+	for (i = 0; i < NUM_DIAG_MD_DEV && !err; i++) {
+		ch = &diag_md[i];
+		for (j = 0; j < ch->num_tbl_entries && !err; j++) {
+			entry = &ch->tbl[j];
+			if (entry->len <= 0)
+				continue;
+			peripheral = GET_BUF_PERIPHERAL(entry->ctx);
+			/* Account for Apps data as well */
+			if (peripheral > NUM_PERIPHERALS)
+				goto drop_data;
+			session_info =
+			diag_md_session_get_peripheral(peripheral);
+			if (session_info && info &&
+				(session_info->pid != info->pid))
+				continue;
+			if ((info && (info->peripheral_mask &
+			    MD_PERIPHERAL_MASK(peripheral)) == 0))
+				goto drop_data;
+			/*
+			 * If the data is from remote processor, copy the remote
+			 * token first
+			 */
+			if (i > 0) {
+				if ((ret + (3 * sizeof(int)) + entry->len) >=
+							buf_size) {
+					drain_again = 1;
+					break;
+				}
+			} else {
+				if ((ret + (2 * sizeof(int)) + entry->len) >=
+						buf_size) {
+					drain_again = 1;
+					break;
+				}
+			}
+			if (i > 0) {
+				remote_token = diag_get_remote(i);
+				err = copy_to_user(buf + ret, &remote_token,
+						   sizeof(int));
+				if (err)
+					goto drop_data;
+				ret += sizeof(int);
+			}
+
+			/* Copy the length of data being passed */
+			err = copy_to_user(buf + ret, (void *)&(entry->len),
+					   sizeof(int));
+			if (err)
+				goto drop_data;
+			ret += sizeof(int);
+
+			/* Copy the actual data being passed */
+			err = copy_to_user(buf + ret, (void *)entry->buf,
+					   entry->len);
+			if (err)
+				goto drop_data;
+			ret += entry->len;
+
+			/*
+			 * The data is now copied to the user space client,
+			 * Notify that the write is complete and delete its
+			 * entry from the table
+			 */
+			num_data++;
+drop_data:
+			spin_lock_irqsave(&ch->lock, flags);
+			if (ch->ops && ch->ops->write_done)
+				ch->ops->write_done(entry->buf, entry->len,
+						    entry->ctx,
+						    DIAG_MEMORY_DEVICE_MODE);
+			diag_ws_on_copy(DIAG_WS_MUX);
+			entry->buf = NULL;
+			entry->len = 0;
+			entry->ctx = 0;
+			spin_unlock_irqrestore(&ch->lock, flags);
+		}
+	}
+
+	*pret = ret;
+	err = copy_to_user(buf + sizeof(int), (void *)&num_data, sizeof(int));
+	diag_ws_on_copy_complete(DIAG_WS_MUX);
+	if (drain_again)
+		chk_logging_wakeup();
+
+	return err;
+}
+
+int diag_md_close_peripheral(int id, uint8_t peripheral)
+{
+	int i;
+	uint8_t found = 0;
+	unsigned long flags;
+	struct diag_md_info *ch = NULL;
+	struct diag_buf_tbl_t *entry = NULL;
+
+	if (id < 0 || id >= NUM_DIAG_MD_DEV || id >= DIAG_NUM_PROC)
+		return -EINVAL;
+
+	ch = &diag_md[id];
+
+	spin_lock_irqsave(&ch->lock, flags);
+	for (i = 0; i < ch->num_tbl_entries && !found; i++) {
+		entry = &ch->tbl[i];
+		if (GET_BUF_PERIPHERAL(entry->ctx) != peripheral)
+			continue;
+		found = 1;
+		if (ch->ops && ch->ops->write_done) {
+			ch->ops->write_done(entry->buf, entry->len,
+					    entry->ctx,
+					    DIAG_MEMORY_DEVICE_MODE);
+			entry->buf = NULL;
+			entry->len = 0;
+			entry->ctx = 0;
+		}
+	}
+	spin_unlock_irqrestore(&ch->lock, flags);
+	return 0;
+}
+
+int diag_md_init(void)
+{
+	int i, j;
+	struct diag_md_info *ch = NULL;
+
+	for (i = 0; i < NUM_DIAG_MD_DEV; i++) {
+		ch = &diag_md[i];
+		ch->num_tbl_entries = diag_mempools[ch->mempool].poolsize;
+		ch->tbl = kzalloc(ch->num_tbl_entries *
+				  sizeof(struct diag_buf_tbl_t),
+				  GFP_KERNEL);
+		if (!ch->tbl)
+			goto fail;
+
+		for (j = 0; j < ch->num_tbl_entries; j++) {
+			ch->tbl[j].buf = NULL;
+			ch->tbl[j].len = 0;
+			ch->tbl[j].ctx = 0;
+			spin_lock_init(&(ch->lock));
+		}
+	}
+
+	return 0;
+
+fail:
+	diag_md_exit();
+	return -ENOMEM;
+}
+
+void diag_md_exit(void)
+{
+	int i;
+	struct diag_md_info *ch = NULL;
+
+	for (i = 0; i < NUM_DIAG_MD_DEV; i++) {
+		ch = &diag_md[i];
+		kfree(ch->tbl);
+		ch->num_tbl_entries = 0;
+		ch->ops = NULL;
+	}
+}
diff --git a/drivers/char/diag/diag_memorydevice.h b/drivers/char/diag/diag_memorydevice.h
new file mode 100644
index 0000000..35a1ee3
--- /dev/null
+++ b/drivers/char/diag/diag_memorydevice.h
@@ -0,0 +1,57 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAG_MEMORYDEVICE_H
+#define DIAG_MEMORYDEVICE_H
+
+#define DIAG_MD_LOCAL		0
+#define DIAG_MD_LOCAL_LAST	1
+#define DIAG_MD_BRIDGE_BASE	DIAG_MD_LOCAL_LAST
+#define DIAG_MD_MDM		(DIAG_MD_BRIDGE_BASE)
+#define DIAG_MD_MDM2		(DIAG_MD_BRIDGE_BASE + 1)
+#define DIAG_MD_SMUX		(DIAG_MD_BRIDGE_BASE + 2)
+#define DIAG_MD_BRIDGE_LAST	(DIAG_MD_BRIDGE_BASE + 3)
+
+#ifndef CONFIG_DIAGFWD_BRIDGE_CODE
+#define NUM_DIAG_MD_DEV		DIAG_MD_LOCAL_LAST
+#else
+#define NUM_DIAG_MD_DEV		DIAG_MD_BRIDGE_LAST
+#endif
+
+struct diag_buf_tbl_t {
+	unsigned char *buf;
+	int len;
+	int ctx;
+};
+
+struct diag_md_info {
+	int id;
+	int ctx;
+	int mempool;
+	int num_tbl_entries;
+	spinlock_t lock;
+	struct diag_buf_tbl_t *tbl;
+	struct diag_mux_ops *ops;
+};
+
+extern struct diag_md_info diag_md[NUM_DIAG_MD_DEV];
+
+int diag_md_init(void);
+void diag_md_exit(void);
+void diag_md_open_all(void);
+void diag_md_close_all(void);
+int diag_md_register(int id, int ctx, struct diag_mux_ops *ops);
+int diag_md_close_peripheral(int id, uint8_t peripheral);
+int diag_md_write(int id, unsigned char *buf, int len, int ctx);
+int diag_md_copy_to_user(char __user *buf, int *pret, size_t buf_size,
+			 struct diag_md_session_t *info);
+#endif
diff --git a/drivers/char/diag/diag_mux.c b/drivers/char/diag/diag_mux.c
new file mode 100644
index 0000000..8f5a002
--- /dev/null
+++ b/drivers/char/diag/diag_mux.c
@@ -0,0 +1,243 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/uaccess.h>
+#include <linux/diagchar.h>
+#include <linux/sched.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/workqueue.h>
+#include <linux/pm_runtime.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/ratelimit.h>
+#include "diagchar.h"
+#include "diagfwd.h"
+#include "diag_mux.h"
+#include "diag_usb.h"
+#include "diag_memorydevice.h"
+
+
+struct diag_mux_state_t *diag_mux;
+static struct diag_logger_t usb_logger;
+static struct diag_logger_t md_logger;
+
+static struct diag_logger_ops usb_log_ops = {
+	.open = diag_usb_connect_all,
+	.close = diag_usb_disconnect_all,
+	.queue_read = diag_usb_queue_read,
+	.write = diag_usb_write,
+	.close_peripheral = NULL
+};
+
+static struct diag_logger_ops md_log_ops = {
+	.open = diag_md_open_all,
+	.close = diag_md_close_all,
+	.queue_read = NULL,
+	.write = diag_md_write,
+	.close_peripheral = diag_md_close_peripheral,
+};
+
+int diag_mux_init(void)
+{
+	diag_mux = kzalloc(sizeof(struct diag_mux_state_t),
+			 GFP_KERNEL);
+	if (!diag_mux)
+		return -ENOMEM;
+	kmemleak_not_leak(diag_mux);
+
+	usb_logger.mode = DIAG_USB_MODE;
+	usb_logger.log_ops = &usb_log_ops;
+
+	md_logger.mode = DIAG_MEMORY_DEVICE_MODE;
+	md_logger.log_ops = &md_log_ops;
+	diag_md_init();
+
+	/*
+	 * Set USB logging as the default logger. This is the mode
+	 * Diag should be in when it initializes.
+	 */
+	diag_mux->usb_ptr = &usb_logger;
+	diag_mux->md_ptr = &md_logger;
+	diag_mux->logger = &usb_logger;
+	diag_mux->mux_mask = 0;
+	diag_mux->mode = DIAG_USB_MODE;
+	return 0;
+}
+
+void diag_mux_exit(void)
+{
+	kfree(diag_mux);
+}
+
+int diag_mux_register(int proc, int ctx, struct diag_mux_ops *ops)
+{
+	int err = 0;
+
+	if (!ops)
+		return -EINVAL;
+
+	if (proc < 0 || proc >= NUM_MUX_PROC)
+		return 0;
+
+	/* Register with USB logger */
+	usb_logger.ops[proc] = ops;
+	err = diag_usb_register(proc, ctx, ops);
+	if (err) {
+		pr_err("diag: MUX: unable to register usb operations for proc: %d, err: %d\n",
+		       proc, err);
+		return err;
+	}
+
+	md_logger.ops[proc] = ops;
+	err = diag_md_register(proc, ctx, ops);
+	if (err) {
+		pr_err("diag: MUX: unable to register md operations for proc: %d, err: %d\n",
+		       proc, err);
+		return err;
+	}
+
+	return 0;
+}
+
+int diag_mux_queue_read(int proc)
+{
+	struct diag_logger_t *logger = NULL;
+
+	if (proc < 0 || proc >= NUM_MUX_PROC)
+		return -EINVAL;
+	if (!diag_mux)
+		return -EIO;
+
+	if (diag_mux->mode == DIAG_MULTI_MODE)
+		logger = diag_mux->usb_ptr;
+	else
+		logger = diag_mux->logger;
+
+	if (logger && logger->log_ops && logger->log_ops->queue_read)
+		return logger->log_ops->queue_read(proc);
+
+	return 0;
+}
+
+int diag_mux_write(int proc, unsigned char *buf, int len, int ctx)
+{
+	struct diag_logger_t *logger = NULL;
+	int peripheral;
+
+	if (proc < 0 || proc >= NUM_MUX_PROC)
+		return -EINVAL;
+	if (!diag_mux)
+		return -EIO;
+
+	peripheral = GET_BUF_PERIPHERAL(ctx);
+	if (peripheral > NUM_PERIPHERALS)
+		return -EINVAL;
+
+	if (MD_PERIPHERAL_MASK(peripheral) & diag_mux->mux_mask)
+		logger = diag_mux->md_ptr;
+	else
+		logger = diag_mux->usb_ptr;
+
+	if (logger && logger->log_ops && logger->log_ops->write)
+		return logger->log_ops->write(proc, buf, len, ctx);
+	return 0;
+}
+
+int diag_mux_close_peripheral(int proc, uint8_t peripheral)
+{
+	struct diag_logger_t *logger = NULL;
+
+	if (proc < 0 || proc >= NUM_MUX_PROC)
+		return -EINVAL;
+	/* Peripheral should account for Apps data as well */
+	if (peripheral > NUM_PERIPHERALS)
+		return -EINVAL;
+	if (!diag_mux)
+		return -EIO;
+
+	if (MD_PERIPHERAL_MASK(peripheral) & diag_mux->mux_mask)
+		logger = diag_mux->md_ptr;
+	else
+		logger = diag_mux->logger;
+
+	if (logger && logger->log_ops && logger->log_ops->close_peripheral)
+		return logger->log_ops->close_peripheral(proc, peripheral);
+	return 0;
+}
+
+int diag_mux_switch_logging(int *req_mode, int *peripheral_mask)
+{
+	unsigned int new_mask = 0;
+
+	if (!req_mode)
+		return -EINVAL;
+
+	if (*peripheral_mask <= 0 || *peripheral_mask > DIAG_CON_ALL) {
+		pr_err("diag: mask %d in %s\n", *peripheral_mask, __func__);
+		return -EINVAL;
+	}
+
+	switch (*req_mode) {
+	case DIAG_USB_MODE:
+		new_mask = ~(*peripheral_mask) & diag_mux->mux_mask;
+		if (new_mask != DIAG_CON_NONE)
+			*req_mode = DIAG_MULTI_MODE;
+		break;
+	case DIAG_MEMORY_DEVICE_MODE:
+		new_mask = (*peripheral_mask) | diag_mux->mux_mask;
+		if (new_mask != DIAG_CON_ALL)
+			*req_mode = DIAG_MULTI_MODE;
+		break;
+	default:
+		pr_err("diag: Invalid mode %d in %s\n", *req_mode, __func__);
+		return -EINVAL;
+	}
+
+	switch (diag_mux->mode) {
+	case DIAG_USB_MODE:
+		if (*req_mode == DIAG_MEMORY_DEVICE_MODE) {
+			diag_mux->usb_ptr->log_ops->close();
+			diag_mux->logger = diag_mux->md_ptr;
+			diag_mux->md_ptr->log_ops->open();
+		} else if (*req_mode == DIAG_MULTI_MODE) {
+			diag_mux->md_ptr->log_ops->open();
+			diag_mux->logger = NULL;
+		}
+		break;
+	case DIAG_MEMORY_DEVICE_MODE:
+		if (*req_mode == DIAG_USB_MODE) {
+			diag_mux->md_ptr->log_ops->close();
+			diag_mux->logger = diag_mux->usb_ptr;
+			diag_mux->usb_ptr->log_ops->open();
+		} else if (*req_mode == DIAG_MULTI_MODE) {
+			diag_mux->usb_ptr->log_ops->open();
+			diag_mux->logger = NULL;
+		}
+		break;
+	case DIAG_MULTI_MODE:
+		if (*req_mode == DIAG_USB_MODE) {
+			diag_mux->md_ptr->log_ops->close();
+			diag_mux->logger = diag_mux->usb_ptr;
+		} else if (*req_mode == DIAG_MEMORY_DEVICE_MODE) {
+			diag_mux->usb_ptr->log_ops->close();
+			diag_mux->logger = diag_mux->md_ptr;
+		}
+		break;
+	}
+	diag_mux->mode = *req_mode;
+	diag_mux->mux_mask = new_mask;
+	*peripheral_mask = new_mask;
+	return 0;
+}
diff --git a/drivers/char/diag/diag_mux.h b/drivers/char/diag/diag_mux.h
new file mode 100644
index 0000000..e1fcebb
--- /dev/null
+++ b/drivers/char/diag/diag_mux.h
@@ -0,0 +1,76 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef DIAG_MUX_H
+#define DIAG_MUX_H
+#include "diagchar.h"
+
+struct diag_mux_state_t {
+	struct diag_logger_t *logger;
+	struct diag_logger_t *usb_ptr;
+	struct diag_logger_t *md_ptr;
+	unsigned int mux_mask;
+	unsigned int mode;
+};
+
+struct diag_mux_ops {
+	int (*open)(int id, int mode);
+	int (*close)(int id, int mode);
+	int (*read_done)(unsigned char *buf, int len, int id);
+	int (*write_done)(unsigned char *buf, int len, int buf_ctx,
+			      int id);
+};
+
+#define DIAG_USB_MODE			0
+#define DIAG_MEMORY_DEVICE_MODE		1
+#define DIAG_NO_LOGGING_MODE		2
+#define DIAG_MULTI_MODE			3
+
+#define DIAG_MUX_LOCAL		0
+#define DIAG_MUX_LOCAL_LAST	1
+#define DIAG_MUX_BRIDGE_BASE	DIAG_MUX_LOCAL_LAST
+#define DIAG_MUX_MDM		(DIAG_MUX_BRIDGE_BASE)
+#define DIAG_MUX_MDM2		(DIAG_MUX_BRIDGE_BASE + 1)
+#define DIAG_MUX_SMUX		(DIAG_MUX_BRIDGE_BASE + 2)
+#define DIAG_MUX_BRIDGE_LAST	(DIAG_MUX_BRIDGE_BASE + 3)
+
+#ifndef CONFIG_DIAGFWD_BRIDGE_CODE
+#define NUM_MUX_PROC		DIAG_MUX_LOCAL_LAST
+#else
+#define NUM_MUX_PROC		DIAG_MUX_BRIDGE_LAST
+#endif
+
+struct diag_logger_ops {
+	void (*open)(void);
+	void (*close)(void);
+	int (*queue_read)(int id);
+	int (*write)(int id, unsigned char *buf, int len, int ctx);
+	int (*close_peripheral)(int id, uint8_t peripheral);
+};
+
+struct diag_logger_t {
+	int mode;
+	struct diag_mux_ops *ops[NUM_MUX_PROC];
+	struct diag_logger_ops *log_ops;
+};
+
+extern struct diag_mux_state_t *diag_mux;
+
+int diag_mux_init(void);
+void diag_mux_exit(void);
+int diag_mux_register(int proc, int ctx, struct diag_mux_ops *ops);
+int diag_mux_queue_read(int proc);
+int diag_mux_write(int proc, unsigned char *buf, int len, int ctx);
+int diag_mux_close_peripheral(int proc, uint8_t peripheral);
+int diag_mux_open_all(struct diag_logger_t *logger);
+int diag_mux_close_all(void);
+int diag_mux_switch_logging(int *new_mode, int *peripheral_mask);
+#endif
diff --git a/drivers/char/diag/diag_usb.c b/drivers/char/diag/diag_usb.c
new file mode 100644
index 0000000..ac8a6d0
--- /dev/null
+++ b/drivers/char/diag/diag_usb.c
@@ -0,0 +1,684 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/sched.h>
+#include <linux/ratelimit.h>
+#include <linux/workqueue.h>
+#include <linux/diagchar.h>
+#include <linux/delay.h>
+#include <linux/kmemleak.h>
+#include <linux/list.h>
+#ifdef CONFIG_DIAG_OVER_USB
+#include <linux/usb/usbdiag.h>
+#endif
+#include "diag_usb.h"
+#include "diag_mux.h"
+#include "diagmem.h"
+#include "diag_ipc_logging.h"
+
+#define DIAG_USB_STRING_SZ	10
+#define DIAG_USB_MAX_SIZE	16384
+
+struct diag_usb_info diag_usb[NUM_DIAG_USB_DEV] = {
+	{
+		.id = DIAG_USB_LOCAL,
+		.name = DIAG_LEGACY,
+		.enabled = 0,
+		.mempool = POOL_TYPE_MUX_APPS,
+		.hdl = NULL,
+		.ops = NULL,
+		.read_buf = NULL,
+		.read_ptr = NULL,
+		.usb_wq = NULL,
+		.read_cnt = 0,
+		.write_cnt = 0,
+		.max_size = DIAG_USB_MAX_SIZE,
+	},
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+	{
+		.id = DIAG_USB_MDM,
+		.name = DIAG_MDM,
+		.enabled = 0,
+		.mempool = POOL_TYPE_MDM_MUX,
+		.hdl = NULL,
+		.ops = NULL,
+		.read_buf = NULL,
+		.read_ptr = NULL,
+		.usb_wq = NULL,
+		.read_cnt = 0,
+		.write_cnt = 0,
+		.max_size = DIAG_USB_MAX_SIZE,
+	},
+	{
+		.id = DIAG_USB_MDM2,
+		.name = DIAG_MDM2,
+		.enabled = 0,
+		.mempool = POOL_TYPE_MDM2_MUX,
+		.hdl = NULL,
+		.ops = NULL,
+		.read_buf = NULL,
+		.read_ptr = NULL,
+		.usb_wq = NULL,
+		.read_cnt = 0,
+		.write_cnt = 0,
+		.max_size = DIAG_USB_MAX_SIZE,
+	},
+	{
+		.id = DIAG_USB_QSC,
+		.name = DIAG_QSC,
+		.enabled = 0,
+		.mempool = POOL_TYPE_QSC_MUX,
+		.hdl = NULL,
+		.ops = NULL,
+		.read_buf = NULL,
+		.read_ptr = NULL,
+		.usb_wq = NULL,
+		.read_cnt = 0,
+		.write_cnt = 0,
+		.max_size = DIAG_USB_MAX_SIZE,
+	}
+#endif
+};
+
+static int diag_usb_buf_tbl_add(struct diag_usb_info *usb_info,
+				unsigned char *buf, uint32_t len, int ctxt)
+{
+	struct list_head *start, *temp;
+	struct diag_usb_buf_tbl_t *entry = NULL;
+
+	list_for_each_safe(start, temp, &usb_info->buf_tbl) {
+		entry = list_entry(start, struct diag_usb_buf_tbl_t, track);
+		if (entry->buf == buf) {
+			atomic_inc(&entry->ref_count);
+			return 0;
+		}
+	}
+
+	/* New buffer, not found in the list */
+	entry = kzalloc(sizeof(struct diag_usb_buf_tbl_t), GFP_ATOMIC);
+	if (!entry)
+		return -ENOMEM;
+
+	entry->buf = buf;
+	entry->ctxt = ctxt;
+	entry->len = len;
+	atomic_set(&entry->ref_count, 1);
+	INIT_LIST_HEAD(&entry->track);
+	list_add_tail(&entry->track, &usb_info->buf_tbl);
+
+	return 0;
+}
+
+static void diag_usb_buf_tbl_remove(struct diag_usb_info *usb_info,
+				    unsigned char *buf)
+{
+	struct list_head *start, *temp;
+	struct diag_usb_buf_tbl_t *entry = NULL;
+
+	list_for_each_safe(start, temp, &usb_info->buf_tbl) {
+		entry = list_entry(start, struct diag_usb_buf_tbl_t, track);
+		if (entry->buf == buf) {
+			DIAG_LOG(DIAG_DEBUG_MUX, "ref_count-- for %pK\n", buf);
+			atomic_dec(&entry->ref_count);
+			/*
+			 * Remove reference from the table if it is the
+			 * only instance of the buffer
+			 */
+			if (atomic_read(&entry->ref_count) == 0)
+				list_del(&entry->track);
+			break;
+		}
+	}
+}
+
+static struct diag_usb_buf_tbl_t *diag_usb_buf_tbl_get(
+				struct diag_usb_info *usb_info,
+				unsigned char *buf)
+{
+	struct list_head *start, *temp;
+	struct diag_usb_buf_tbl_t *entry = NULL;
+
+	list_for_each_safe(start, temp, &usb_info->buf_tbl) {
+		entry = list_entry(start, struct diag_usb_buf_tbl_t, track);
+		if (entry->buf == buf) {
+			DIAG_LOG(DIAG_DEBUG_MUX, "ref_count-- for %pK\n", buf);
+			atomic_dec(&entry->ref_count);
+			return entry;
+		}
+	}
+
+	return NULL;
+}
+
+/*
+ * This function is called asynchronously when USB is connected and
+ * synchronously when Diag wants to connect to USB explicitly.
+ */
+static void usb_connect(struct diag_usb_info *ch)
+{
+	int err = 0;
+	int num_write = 0;
+	int num_read = 1; /* Only one read buffer for any USB channel */
+
+	if (!ch || !atomic_read(&ch->connected))
+		return;
+
+	num_write = diag_mempools[ch->mempool].poolsize;
+	err = usb_diag_alloc_req(ch->hdl, num_write, num_read);
+	if (err) {
+		pr_err("diag: Unable to allocate usb requests for %s, write: %d read: %d, err: %d\n",
+		       ch->name, num_write, num_read, err);
+		return;
+	}
+
+	if (ch->ops && ch->ops->open) {
+		if (atomic_read(&ch->diag_state)) {
+			ch->ops->open(ch->ctxt, DIAG_USB_MODE);
+		} else {
+			/*
+			 * This case indicates that the USB is connected
+			 * but the logging is still happening in MEMORY
+			 * DEVICE MODE. Continue the logging without
+			 * resetting the buffers.
+			 */
+		}
+	}
+	/* As soon as we open the channel, queue a read */
+	queue_work(ch->usb_wq, &(ch->read_work));
+}
+
+static void usb_connect_work_fn(struct work_struct *work)
+{
+	struct diag_usb_info *ch = container_of(work, struct diag_usb_info,
+						connect_work);
+	usb_connect(ch);
+}
+
+/*
+ * This function is called asynchronously when USB is disconnected
+ * and synchronously when Diag wants to disconnect from USB
+ * explicitly.
+ */
+static void usb_disconnect(struct diag_usb_info *ch)
+{
+	if (!ch)
+		return;
+
+	if (!atomic_read(&ch->connected) && driver->usb_connected)
+		diag_clear_masks(NULL);
+
+	if (ch && ch->ops && ch->ops->close)
+		ch->ops->close(ch->ctxt, DIAG_USB_MODE);
+}
+
+static void usb_disconnect_work_fn(struct work_struct *work)
+{
+	struct diag_usb_info *ch = container_of(work, struct diag_usb_info,
+						disconnect_work);
+	usb_disconnect(ch);
+}
+
+static void usb_read_work_fn(struct work_struct *work)
+{
+	int err = 0;
+	unsigned long flags;
+	struct diag_request *req = NULL;
+	struct diag_usb_info *ch = container_of(work, struct diag_usb_info,
+						read_work);
+	if (!ch)
+		return;
+
+	if (!atomic_read(&ch->connected) || !ch->enabled ||
+	    atomic_read(&ch->read_pending) || !atomic_read(&ch->diag_state)) {
+		pr_debug_ratelimited("diag: Discarding USB read, ch: %s e: %d, c: %d, p: %d, d: %d\n",
+				     ch->name, ch->enabled,
+				     atomic_read(&ch->connected),
+				     atomic_read(&ch->read_pending),
+				     atomic_read(&ch->diag_state));
+		return;
+	}
+
+	spin_lock_irqsave(&ch->lock, flags);
+	req = ch->read_ptr;
+	if (req) {
+		atomic_set(&ch->read_pending, 1);
+		req->buf = ch->read_buf;
+		req->length = USB_MAX_OUT_BUF;
+		err = usb_diag_read(ch->hdl, req);
+		if (err) {
+			pr_debug("diag: In %s, error in reading from USB %s, err: %d\n",
+				 __func__, ch->name, err);
+			atomic_set(&ch->read_pending, 0);
+			queue_work(ch->usb_wq, &(ch->read_work));
+		}
+	} else {
+		pr_err_ratelimited("diag: In %s invalid read req\n", __func__);
+	}
+	spin_unlock_irqrestore(&ch->lock, flags);
+}
+
+static void usb_read_done_work_fn(struct work_struct *work)
+{
+	struct diag_request *req = NULL;
+	struct diag_usb_info *ch = container_of(work, struct diag_usb_info,
+						read_done_work);
+	if (!ch)
+		return;
+
+	/*
+	 * USB is disconnected/Disabled before the previous read completed.
+	 * Discard the packet and don't do any further processing.
+	 */
+	if (!atomic_read(&ch->connected) || !ch->enabled ||
+	    !atomic_read(&ch->diag_state))
+		return;
+
+	req = ch->read_ptr;
+	ch->read_cnt++;
+
+	if (ch->ops && ch->ops->read_done && req->status >= 0)
+		ch->ops->read_done(req->buf, req->actual, ch->ctxt);
+}
+
+static void diag_usb_write_done(struct diag_usb_info *ch,
+				struct diag_request *req)
+{
+	int ctxt = 0;
+	int len = 0;
+	struct diag_usb_buf_tbl_t *entry = NULL;
+	unsigned char *buf = NULL;
+	unsigned long flags;
+
+	if (!ch || !req)
+		return;
+
+	ch->write_cnt++;
+	entry = diag_usb_buf_tbl_get(ch, req->context);
+	if (!entry) {
+		pr_err_ratelimited("diag: In %s, unable to find entry %pK in the table\n",
+				   __func__, req->context);
+		return;
+	}
+	if (atomic_read(&entry->ref_count) != 0) {
+		DIAG_LOG(DIAG_DEBUG_MUX, "partial write_done ref %d\n",
+			 atomic_read(&entry->ref_count));
+		diag_ws_on_copy_complete(DIAG_WS_MUX);
+		diagmem_free(driver, req, ch->mempool);
+		return;
+	}
+	DIAG_LOG(DIAG_DEBUG_MUX, "full write_done, ctxt: %d\n",
+		 ctxt);
+	spin_lock_irqsave(&ch->write_lock, flags);
+	list_del(&entry->track);
+	ctxt = entry->ctxt;
+	buf = entry->buf;
+	len = entry->len;
+	kfree(entry);
+	diag_ws_on_copy_complete(DIAG_WS_MUX);
+
+	if (ch->ops && ch->ops->write_done)
+		ch->ops->write_done(buf, len, ctxt, DIAG_USB_MODE);
+	buf = NULL;
+	len = 0;
+	ctxt = 0;
+	spin_unlock_irqrestore(&ch->write_lock, flags);
+	diagmem_free(driver, req, ch->mempool);
+}
+
+static void diag_usb_notifier(void *priv, unsigned int event,
+			      struct diag_request *d_req)
+{
+	int id = 0;
+	unsigned long flags;
+	struct diag_usb_info *usb_info = NULL;
+
+	id = (int)(uintptr_t)priv;
+	if (id < 0 || id >= NUM_DIAG_USB_DEV)
+		return;
+	usb_info = &diag_usb[id];
+
+	switch (event) {
+	case USB_DIAG_CONNECT:
+		usb_info->max_size = usb_diag_request_size(usb_info->hdl);
+		atomic_set(&usb_info->connected, 1);
+		pr_info("diag: USB channel %s connected\n", usb_info->name);
+		queue_work(usb_info->usb_wq,
+			   &usb_info->connect_work);
+		break;
+	case USB_DIAG_DISCONNECT:
+		atomic_set(&usb_info->connected, 0);
+		pr_info("diag: USB channel %s disconnected\n", usb_info->name);
+		queue_work(usb_info->usb_wq,
+			   &usb_info->disconnect_work);
+		break;
+	case USB_DIAG_READ_DONE:
+		spin_lock_irqsave(&usb_info->lock, flags);
+		usb_info->read_ptr = d_req;
+		spin_unlock_irqrestore(&usb_info->lock, flags);
+		atomic_set(&usb_info->read_pending, 0);
+		queue_work(usb_info->usb_wq,
+			   &usb_info->read_done_work);
+		break;
+	case USB_DIAG_WRITE_DONE:
+		diag_usb_write_done(usb_info, d_req);
+		break;
+	default:
+		pr_err_ratelimited("diag: Unknown event from USB diag\n");
+		break;
+	}
+}
+
+int diag_usb_queue_read(int id)
+{
+	if (id < 0 || id >= NUM_DIAG_USB_DEV) {
+		pr_err_ratelimited("diag: In %s, Incorrect id %d\n",
+				   __func__, id);
+		return -EINVAL;
+	}
+	queue_work(diag_usb[id].usb_wq, &(diag_usb[id].read_work));
+	return 0;
+}
+
+static int diag_usb_write_ext(struct diag_usb_info *usb_info,
+			      unsigned char *buf, int len, int ctxt)
+{
+	int err = 0;
+	int write_len = 0;
+	int bytes_remaining = len;
+	int offset = 0;
+	unsigned long flags;
+	struct diag_request *req = NULL;
+
+	if (!usb_info || !buf || len <= 0) {
+		pr_err_ratelimited("diag: In %s, usb_info: %pK buf: %pK, len: %d\n",
+				   __func__, usb_info, buf, len);
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&usb_info->write_lock, flags);
+	while (bytes_remaining > 0) {
+		req = diagmem_alloc(driver, sizeof(struct diag_request),
+				    usb_info->mempool);
+		if (!req) {
+			/*
+			 * This should never happen. It either means that we are
+			 * trying to write more buffers than the max supported
+			 * by this particualar diag USB channel at any given
+			 * instance, or the previous write ptrs are stuck in
+			 * the USB layer.
+			 */
+			pr_err_ratelimited("diag: In %s, cannot retrieve USB write ptrs for USB channel %s\n",
+					   __func__, usb_info->name);
+			spin_unlock_irqrestore(&usb_info->write_lock, flags);
+			return -ENOMEM;
+		}
+
+		write_len = (bytes_remaining > usb_info->max_size) ?
+				usb_info->max_size : (bytes_remaining);
+
+		req->buf = buf + offset;
+		req->length = write_len;
+		req->context = (void *)buf;
+
+		if (!usb_info->hdl || !atomic_read(&usb_info->connected) ||
+		    !atomic_read(&usb_info->diag_state)) {
+			pr_debug_ratelimited("diag: USB ch %s is not connected\n",
+					     usb_info->name);
+			diagmem_free(driver, req, usb_info->mempool);
+			spin_unlock_irqrestore(&usb_info->write_lock, flags);
+			return -ENODEV;
+		}
+
+		if (diag_usb_buf_tbl_add(usb_info, buf, len, ctxt)) {
+			diagmem_free(driver, req, usb_info->mempool);
+			spin_unlock_irqrestore(&usb_info->write_lock, flags);
+			return -ENOMEM;
+		}
+
+		diag_ws_on_read(DIAG_WS_MUX, len);
+		err = usb_diag_write(usb_info->hdl, req);
+		diag_ws_on_copy(DIAG_WS_MUX);
+		if (err) {
+			pr_err_ratelimited("diag: In %s, error writing to usb channel %s, err: %d\n",
+					   __func__, usb_info->name, err);
+			DIAG_LOG(DIAG_DEBUG_MUX,
+				 "ERR! unable to write t usb, err: %d\n", err);
+			diag_ws_on_copy_fail(DIAG_WS_MUX);
+			diag_usb_buf_tbl_remove(usb_info, buf);
+			diagmem_free(driver, req, usb_info->mempool);
+			spin_unlock_irqrestore(&usb_info->write_lock, flags);
+			return err;
+		}
+		offset += write_len;
+		bytes_remaining -= write_len;
+		DIAG_LOG(DIAG_DEBUG_MUX,
+			 "bytes_remaining: %d write_len: %d, len: %d\n",
+			 bytes_remaining, write_len, len);
+	}
+	DIAG_LOG(DIAG_DEBUG_MUX, "done writing!");
+	spin_unlock_irqrestore(&usb_info->write_lock, flags);
+
+	return 0;
+}
+
+int diag_usb_write(int id, unsigned char *buf, int len, int ctxt)
+{
+	int err = 0;
+	struct diag_request *req = NULL;
+	struct diag_usb_info *usb_info = NULL;
+	unsigned long flags;
+
+	if (id < 0 || id >= NUM_DIAG_USB_DEV) {
+		pr_err_ratelimited("diag: In %s, Incorrect id %d\n",
+				   __func__, id);
+		return -EINVAL;
+	}
+
+	usb_info = &diag_usb[id];
+
+	if (len > usb_info->max_size) {
+		DIAG_LOG(DIAG_DEBUG_MUX, "len: %d, max_size: %d\n",
+			 len, usb_info->max_size);
+		return diag_usb_write_ext(usb_info, buf, len, ctxt);
+	}
+
+	req = diagmem_alloc(driver, sizeof(struct diag_request),
+			    usb_info->mempool);
+	if (!req) {
+		/*
+		 * This should never happen. It either means that we are
+		 * trying to write more buffers than the max supported by
+		 * this particualar diag USB channel at any given instance,
+		 * or the previous write ptrs are stuck in the USB layer.
+		 */
+		pr_err_ratelimited("diag: In %s, cannot retrieve USB write ptrs for USB channel %s\n",
+				   __func__, usb_info->name);
+		return -ENOMEM;
+	}
+
+	req->buf = buf;
+	req->length = len;
+	req->context = (void *)buf;
+
+	if (!usb_info->hdl || !atomic_read(&usb_info->connected) ||
+	    !atomic_read(&usb_info->diag_state)) {
+		pr_debug_ratelimited("diag: USB ch %s is not connected\n",
+				     usb_info->name);
+		diagmem_free(driver, req, usb_info->mempool);
+		return -ENODEV;
+	}
+
+	spin_lock_irqsave(&usb_info->write_lock, flags);
+	if (diag_usb_buf_tbl_add(usb_info, buf, len, ctxt)) {
+		DIAG_LOG(DIAG_DEBUG_MUX,
+					"ERR! unable to add buf %pK to table\n",
+			 buf);
+		diagmem_free(driver, req, usb_info->mempool);
+		spin_unlock_irqrestore(&usb_info->write_lock, flags);
+		return -ENOMEM;
+	}
+
+	diag_ws_on_read(DIAG_WS_MUX, len);
+	err = usb_diag_write(usb_info->hdl, req);
+	diag_ws_on_copy(DIAG_WS_MUX);
+	if (err) {
+		pr_err_ratelimited("diag: In %s, error writing to usb channel %s, err: %d\n",
+				   __func__, usb_info->name, err);
+		diag_ws_on_copy_fail(DIAG_WS_MUX);
+		DIAG_LOG(DIAG_DEBUG_MUX,
+			 "ERR! unable to write t usb, err: %d\n", err);
+		diag_usb_buf_tbl_remove(usb_info, buf);
+		diagmem_free(driver, req, usb_info->mempool);
+	}
+	spin_unlock_irqrestore(&usb_info->write_lock, flags);
+
+	return err;
+}
+
+/*
+ * This functions performs USB connect operations wrt Diag synchronously. It
+ * doesn't translate to actual USB connect. This is used when Diag switches
+ * logging to USB mode and wants to mimic USB connection.
+ */
+void diag_usb_connect_all(void)
+{
+	int i = 0;
+	struct diag_usb_info *usb_info = NULL;
+
+	for (i = 0; i < NUM_DIAG_USB_DEV; i++) {
+		usb_info = &diag_usb[i];
+		if (!usb_info->enabled)
+			continue;
+		atomic_set(&usb_info->diag_state, 1);
+		usb_connect(usb_info);
+	}
+}
+
+/*
+ * This functions performs USB disconnect operations wrt Diag synchronously.
+ * It doesn't translate to actual USB disconnect. This is used when Diag
+ * switches logging from USB mode and want to mimic USB disconnect.
+ */
+void diag_usb_disconnect_all(void)
+{
+	int i = 0;
+	struct diag_usb_info *usb_info = NULL;
+
+	for (i = 0; i < NUM_DIAG_USB_DEV; i++) {
+		usb_info = &diag_usb[i];
+		if (!usb_info->enabled)
+			continue;
+		atomic_set(&usb_info->diag_state, 0);
+		usb_disconnect(usb_info);
+	}
+}
+
+int diag_usb_register(int id, int ctxt, struct diag_mux_ops *ops)
+{
+	struct diag_usb_info *ch = NULL;
+	unsigned char wq_name[DIAG_USB_NAME_SZ + DIAG_USB_STRING_SZ];
+
+	if (id < 0 || id >= NUM_DIAG_USB_DEV) {
+		pr_err("diag: Unable to register with USB, id: %d\n", id);
+		return -EIO;
+	}
+
+	if (!ops) {
+		pr_err("diag: Invalid operations for USB\n");
+		return -EIO;
+	}
+
+	ch = &diag_usb[id];
+	ch->ops = ops;
+	ch->ctxt = ctxt;
+	spin_lock_init(&ch->lock);
+	spin_lock_init(&ch->write_lock);
+	ch->read_buf = kzalloc(USB_MAX_OUT_BUF, GFP_KERNEL);
+	if (!ch->read_buf)
+		goto err;
+	ch->read_ptr = kzalloc(sizeof(struct diag_request), GFP_KERNEL);
+	if (!ch->read_ptr)
+		goto err;
+	atomic_set(&ch->connected, 0);
+	atomic_set(&ch->read_pending, 0);
+	/*
+	 * This function is called when the mux registers with Diag-USB.
+	 * The registration happens during boot up and Diag always starts
+	 * in USB mode. Set the state to 1.
+	 */
+	atomic_set(&ch->diag_state, 1);
+	INIT_LIST_HEAD(&ch->buf_tbl);
+	diagmem_init(driver, ch->mempool);
+	INIT_WORK(&(ch->read_work), usb_read_work_fn);
+	INIT_WORK(&(ch->read_done_work), usb_read_done_work_fn);
+	INIT_WORK(&(ch->connect_work), usb_connect_work_fn);
+	INIT_WORK(&(ch->disconnect_work), usb_disconnect_work_fn);
+	strlcpy(wq_name, "DIAG_USB_", DIAG_USB_STRING_SZ);
+	strlcat(wq_name, ch->name, sizeof(ch->name));
+	ch->usb_wq = create_singlethread_workqueue(wq_name);
+	if (!ch->usb_wq)
+		goto err;
+	ch->hdl = usb_diag_open(ch->name, (void *)(uintptr_t)id,
+				diag_usb_notifier);
+	if (IS_ERR(ch->hdl)) {
+		pr_err("diag: Unable to open USB channel %s\n", ch->name);
+		goto err;
+	}
+	ch->enabled = 1;
+	pr_debug("diag: Successfully registered USB %s\n", ch->name);
+	return 0;
+
+err:
+	if (ch->usb_wq)
+		destroy_workqueue(ch->usb_wq);
+	kfree(ch->read_ptr);
+	kfree(ch->read_buf);
+	return -ENOMEM;
+}
+
+void diag_usb_exit(int id)
+{
+	struct diag_usb_info *ch = NULL;
+
+	if (id < 0 || id >= NUM_DIAG_USB_DEV) {
+		pr_err("diag: In %s, incorrect id %d\n", __func__, id);
+		return;
+	}
+
+	ch = &diag_usb[id];
+	ch->ops = NULL;
+	atomic_set(&ch->connected, 0);
+	atomic_set(&ch->read_pending, 0);
+	atomic_set(&ch->diag_state, 0);
+	ch->enabled = 0;
+	ch->ctxt = 0;
+	ch->read_cnt = 0;
+	ch->write_cnt = 0;
+	diagmem_exit(driver, ch->mempool);
+	ch->mempool = 0;
+	if (ch->hdl) {
+		usb_diag_close(ch->hdl);
+		ch->hdl = NULL;
+	}
+	if (ch->usb_wq)
+		destroy_workqueue(ch->usb_wq);
+	kfree(ch->read_ptr);
+	ch->read_ptr = NULL;
+	kfree(ch->read_buf);
+	ch->read_buf = NULL;
+}
+
diff --git a/drivers/char/diag/diag_usb.h b/drivers/char/diag/diag_usb.h
new file mode 100644
index 0000000..62ed7b3
--- /dev/null
+++ b/drivers/char/diag/diag_usb.h
@@ -0,0 +1,107 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAGUSB_H
+#define DIAGUSB_H
+
+#ifdef CONFIG_DIAG_OVER_USB
+#include <linux/usb/usbdiag.h>
+#endif
+#include "diagchar.h"
+#include "diag_mux.h"
+
+#define DIAG_USB_LOCAL		0
+#define DIAG_USB_LOCAL_LAST	1
+#define DIAG_USB_BRIDGE_BASE	DIAG_USB_LOCAL_LAST
+#define DIAG_USB_MDM		(DIAG_USB_BRIDGE_BASE)
+#define DIAG_USB_MDM2		(DIAG_USB_BRIDGE_BASE + 1)
+#define DIAG_USB_QSC		(DIAG_USB_BRIDGE_BASE + 2)
+#define DIAG_USB_BRIDGE_LAST	(DIAG_USB_BRIDGE_BASE + 3)
+
+#ifndef CONFIG_DIAGFWD_BRIDGE_CODE
+#define NUM_DIAG_USB_DEV	DIAG_USB_LOCAL_LAST
+#else
+#define NUM_DIAG_USB_DEV	DIAG_USB_BRIDGE_LAST
+#endif
+
+#define DIAG_USB_NAME_SZ	24
+#define DIAG_USB_GET_NAME(x)	(diag_usb[x].name)
+
+#define DIAG_USB_MODE		0
+
+struct diag_usb_buf_tbl_t {
+	struct list_head track;
+	unsigned char *buf;
+	uint32_t len;
+	atomic_t ref_count;
+	int ctxt;
+};
+
+struct diag_usb_info {
+	int id;
+	int ctxt;
+	char name[DIAG_USB_NAME_SZ];
+	atomic_t connected;
+	atomic_t diag_state;
+	atomic_t read_pending;
+	int enabled;
+	int mempool;
+	int max_size;
+	struct list_head buf_tbl;
+	unsigned long read_cnt;
+	unsigned long write_cnt;
+	spinlock_t lock;
+	spinlock_t write_lock;
+	struct usb_diag_ch *hdl;
+	struct diag_mux_ops *ops;
+	unsigned char *read_buf;
+	struct diag_request *read_ptr;
+	struct work_struct read_work;
+	struct work_struct read_done_work;
+	struct work_struct connect_work;
+	struct work_struct disconnect_work;
+	struct workqueue_struct *usb_wq;
+};
+
+#ifdef CONFIG_DIAG_OVER_USB
+extern struct diag_usb_info diag_usb[NUM_DIAG_USB_DEV];
+int diag_usb_register(int id, int ctxt, struct diag_mux_ops *ops);
+int diag_usb_queue_read(int id);
+int diag_usb_write(int id, unsigned char *buf, int len, int ctxt);
+void diag_usb_connect_all(void);
+void diag_usb_disconnect_all(void);
+void diag_usb_exit(int id);
+#else
+int diag_usb_register(int id, int ctxt, struct diag_mux_ops *ops)
+{
+	return 0;
+}
+int diag_usb_queue_read(int id)
+{
+	return 0;
+}
+int diag_usb_write(int id, unsigned char *buf, int len, int ctxt)
+{
+	return 0;
+}
+void diag_usb_connect_all(void)
+{
+}
+void diag_usb_disconnect_all(void)
+{
+}
+void diag_usb_exit(int id)
+{
+}
+#endif
+
+#endif
diff --git a/drivers/char/diag/diagchar.h b/drivers/char/diag/diagchar.h
new file mode 100644
index 0000000..768eb62
--- /dev/null
+++ b/drivers/char/diag/diagchar.h
@@ -0,0 +1,638 @@
+/* Copyright (c) 2008-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAGCHAR_H
+#define DIAGCHAR_H
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mempool.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/sched.h>
+#include <linux/wakelock.h>
+#include <linux/atomic.h>
+#include "diagfwd_bridge.h"
+
+/* Size of the USB buffers used for read and write*/
+#define USB_MAX_OUT_BUF 4096
+#define APPS_BUF_SIZE	4096
+#define IN_BUF_SIZE		16384
+#define MAX_SYNC_OBJ_NAME_SIZE	32
+
+#define DIAG_MAX_REQ_SIZE	(16 * 1024)
+#define DIAG_MAX_RSP_SIZE	(16 * 1024)
+#define APF_DIAG_PADDING	256
+/*
+ * In the worst case, the HDLC buffer can be atmost twice the size of the
+ * original packet. Add 3 bytes for 16 bit CRC (2 bytes) and a delimiter
+ * (1 byte)
+ */
+#define DIAG_MAX_HDLC_BUF_SIZE	((DIAG_MAX_REQ_SIZE * 2) + 3)
+
+/* The header of callback data type has remote processor token (of type int) */
+#define CALLBACK_HDR_SIZE	(sizeof(int))
+#define CALLBACK_BUF_SIZE	(DIAG_MAX_REQ_SIZE + CALLBACK_HDR_SIZE)
+
+#define MAX_SSID_PER_RANGE	200
+
+#define ALL_PROC		-1
+
+#define REMOTE_DATA		4
+
+#define USER_SPACE_DATA		16384
+
+#define DIAG_CTRL_MSG_LOG_MASK	9
+#define DIAG_CTRL_MSG_EVENT_MASK	10
+#define DIAG_CTRL_MSG_F3_MASK	11
+#define CONTROL_CHAR	0x7E
+
+#define DIAG_CON_APSS		(0x0001)	/* Bit mask for APSS */
+#define DIAG_CON_MPSS		(0x0002)	/* Bit mask for MPSS */
+#define DIAG_CON_LPASS		(0x0004)	/* Bit mask for LPASS */
+#define DIAG_CON_WCNSS		(0x0008)	/* Bit mask for WCNSS */
+#define DIAG_CON_SENSORS	(0x0010)	/* Bit mask for Sensors */
+#define DIAG_CON_WDSP (0x0020) /* Bit mask for WDSP */
+
+#define DIAG_CON_NONE		(0x0000)	/* Bit mask for No SS*/
+#define DIAG_CON_ALL		(DIAG_CON_APSS | DIAG_CON_MPSS \
+				| DIAG_CON_LPASS | DIAG_CON_WCNSS \
+				| DIAG_CON_SENSORS | DIAG_CON_WDSP)
+
+#define DIAG_STM_MODEM	0x01
+#define DIAG_STM_LPASS	0x02
+#define DIAG_STM_WCNSS	0x04
+#define DIAG_STM_APPS	0x08
+#define DIAG_STM_SENSORS 0x10
+#define DIAG_STM_WDSP 0x20
+
+#define INVALID_PID		-1
+#define DIAG_CMD_FOUND		1
+#define DIAG_CMD_NOT_FOUND	0
+#define DIAG_CMD_POLLING	1
+#define DIAG_CMD_NOT_POLLING	0
+#define DIAG_CMD_ADD		1
+#define DIAG_CMD_REMOVE		0
+
+#define DIAG_CMD_VERSION	0
+#define DIAG_CMD_ERROR		0x13
+#define DIAG_CMD_DOWNLOAD	0x3A
+#define DIAG_CMD_DIAG_SUBSYS	0x4B
+#define DIAG_CMD_LOG_CONFIG	0x73
+#define DIAG_CMD_LOG_ON_DMND	0x78
+#define DIAG_CMD_EXT_BUILD	0x7c
+#define DIAG_CMD_MSG_CONFIG	0x7D
+#define DIAG_CMD_GET_EVENT_MASK	0x81
+#define DIAG_CMD_SET_EVENT_MASK	0x82
+#define DIAG_CMD_EVENT_TOGGLE	0x60
+#define DIAG_CMD_NO_SUBSYS	0xFF
+#define DIAG_CMD_STATUS	0x0C
+#define DIAG_SS_WCDMA	0x04
+#define DIAG_CMD_QUERY_CALL	0x0E
+#define DIAG_SS_GSM	0x08
+#define DIAG_CMD_QUERY_TMC	0x02
+#define DIAG_SS_TDSCDMA	0x57
+#define DIAG_CMD_TDSCDMA_STATUS	0x0E
+#define DIAG_CMD_DIAG_SUBSYS_DELAY 0x80
+
+#define DIAG_SS_DIAG		0x12
+#define DIAG_SS_PARAMS		0x32
+#define DIAG_SS_FILE_READ_MODEM 0x0816
+#define DIAG_SS_FILE_READ_ADSP  0x0E10
+#define DIAG_SS_FILE_READ_WCNSS 0x141F
+#define DIAG_SS_FILE_READ_SLPI 0x01A18
+#define DIAG_SS_FILE_READ_APPS 0x020F
+
+#define DIAG_DIAG_MAX_PKT_SZ	0x55
+#define DIAG_DIAG_STM		0x214
+#define DIAG_DIAG_POLL		0x03
+#define DIAG_DEL_RSP_WRAP	0x04
+#define DIAG_DEL_RSP_WRAP_CNT	0x05
+#define DIAG_EXT_MOBILE_ID	0x06
+#define DIAG_GET_TIME_API	0x21B
+#define DIAG_SET_TIME_API	0x21C
+#define DIAG_SWITCH_COMMAND	0x081B
+#define DIAG_BUFFERING_MODE	0x080C
+
+#define DIAG_CMD_OP_LOG_DISABLE		0
+#define DIAG_CMD_OP_GET_LOG_RANGE	1
+#define DIAG_CMD_OP_SET_LOG_MASK	3
+#define DIAG_CMD_OP_GET_LOG_MASK	4
+
+#define DIAG_CMD_OP_GET_SSID_RANGE	1
+#define DIAG_CMD_OP_GET_BUILD_MASK	2
+#define DIAG_CMD_OP_GET_MSG_MASK	3
+#define DIAG_CMD_OP_SET_MSG_MASK	4
+#define DIAG_CMD_OP_SET_ALL_MSG_MASK	5
+
+#define DIAG_CMD_OP_GET_MSG_ALLOC       0x33
+#define DIAG_CMD_OP_GET_MSG_DROP	0x30
+#define DIAG_CMD_OP_RESET_MSG_STATS	0x2F
+#define DIAG_CMD_OP_GET_LOG_ALLOC	0x31
+#define DIAG_CMD_OP_GET_LOG_DROP	0x2C
+#define DIAG_CMD_OP_RESET_LOG_STATS	0x2B
+#define DIAG_CMD_OP_GET_EVENT_ALLOC	0x32
+#define DIAG_CMD_OP_GET_EVENT_DROP	0x2E
+#define DIAG_CMD_OP_RESET_EVENT_STATS	0x2D
+
+#define DIAG_CMD_OP_HDLC_DISABLE	0x218
+
+#define BAD_PARAM_RESPONSE_MESSAGE 20
+
+#define PERSIST_TIME_SUCCESS 0
+#define PERSIST_TIME_FAILURE 1
+#define PERSIST_TIME_NOT_SUPPORTED 2
+
+#define MODE_CMD	41
+#define RESET_ID	2
+
+#define PKT_DROP	0
+#define PKT_ALLOC	1
+#define PKT_RESET	2
+
+#define FEATURE_MASK_LEN	2
+
+#define DIAG_MD_NONE			0
+#define DIAG_MD_PERIPHERAL		1
+
+/*
+ * The status bit masks when received in a signal handler are to be
+ * used in conjunction with the peripheral list bit mask to determine the
+ * status for a peripheral. For instance, 0x00010002 would denote an open
+ * status on the MPSS
+ */
+#define DIAG_STATUS_OPEN (0x00010000)	/* DCI channel open status mask   */
+#define DIAG_STATUS_CLOSED (0x00020000)	/* DCI channel closed status mask */
+
+#define MODE_NONREALTIME	0
+#define MODE_REALTIME		1
+#define MODE_UNKNOWN		2
+
+#define DIAG_BUFFERING_MODE_STREAMING	0
+#define DIAG_BUFFERING_MODE_THRESHOLD	1
+#define DIAG_BUFFERING_MODE_CIRCULAR	2
+
+#define DIAG_MIN_WM_VAL		0
+#define DIAG_MAX_WM_VAL		100
+
+#define DEFAULT_LOW_WM_VAL	15
+#define DEFAULT_HIGH_WM_VAL	85
+
+#define TYPE_DATA		0
+#define TYPE_CNTL		1
+#define TYPE_DCI		2
+#define TYPE_CMD		3
+#define TYPE_DCI_CMD		4
+#define NUM_TYPES		5
+
+#define PERIPHERAL_MODEM	0
+#define PERIPHERAL_LPASS	1
+#define PERIPHERAL_WCNSS	2
+#define PERIPHERAL_SENSORS	3
+#define PERIPHERAL_WDSP		4
+#define NUM_PERIPHERALS		5
+#define APPS_DATA		(NUM_PERIPHERALS)
+
+/* Number of sessions possible in Memory Device Mode. +1 for Apps data */
+#define NUM_MD_SESSIONS		(NUM_PERIPHERALS + 1)
+
+#define MD_PERIPHERAL_MASK(x)	(1 << x)
+
+/*
+ * Number of stm processors includes all the peripherals and
+ * apps.Added 1 below to indicate apps
+ */
+#define NUM_STM_PROCESSORS	(NUM_PERIPHERALS + 1)
+/*
+ * Indicates number of peripherals that can support DCI and Apps
+ * processor. This doesn't mean that a peripheral has the
+ * feature.
+ */
+#define NUM_DCI_PERIPHERALS	(NUM_PERIPHERALS + 1)
+
+#define DIAG_PROC_DCI			1
+#define DIAG_PROC_MEMORY_DEVICE		2
+
+/* Flags to vote the DCI or Memory device process up or down
+ * when it becomes active or inactive.
+ */
+#define VOTE_DOWN			0
+#define VOTE_UP				1
+
+#define DIAG_TS_SIZE	50
+
+#define DIAG_MDM_BUF_SIZE	2048
+/* The Maximum request size is 2k + DCI header + footer (6 bytes) */
+#define DIAG_MDM_DCI_BUF_SIZE	(2048 + 6)
+
+#define DIAG_LOCAL_PROC	0
+
+#ifndef CONFIG_DIAGFWD_BRIDGE_CODE
+/* Local Processor only */
+#define DIAG_NUM_PROC	1
+#else
+/* Local Processor + Remote Devices */
+#define DIAG_NUM_PROC	(1 + NUM_REMOTE_DEV)
+#endif
+
+#define DIAG_WS_DCI		0
+#define DIAG_WS_MUX		1
+
+#define DIAG_DATA_TYPE		1
+#define DIAG_CNTL_TYPE		2
+#define DIAG_DCI_TYPE		3
+
+/* List of remote processor supported */
+enum remote_procs {
+	MDM = 1,
+	MDM2 = 2,
+	QSC = 5,
+};
+
+struct diag_pkt_header_t {
+	uint8_t cmd_code;
+	uint8_t subsys_id;
+	uint16_t subsys_cmd_code;
+} __packed;
+
+struct diag_cmd_ext_mobile_rsp_t {
+	struct diag_pkt_header_t header;
+	uint8_t version;
+	uint8_t padding[3];
+	uint32_t family;
+	uint32_t chip_id;
+} __packed;
+
+struct diag_cmd_time_sync_query_req_t {
+	struct diag_pkt_header_t header;
+	uint8_t version;
+};
+
+struct diag_cmd_time_sync_query_rsp_t {
+	struct diag_pkt_header_t header;
+	uint8_t version;
+	uint8_t time_api;
+};
+
+struct diag_cmd_time_sync_switch_req_t {
+	struct diag_pkt_header_t header;
+	uint8_t version;
+	uint8_t time_api;
+	uint8_t persist_time;
+};
+
+struct diag_cmd_time_sync_switch_rsp_t {
+	struct diag_pkt_header_t header;
+	uint8_t version;
+	uint8_t time_api;
+	uint8_t time_api_status;
+	uint8_t persist_time_status;
+};
+
+struct diag_cmd_reg_entry_t {
+	uint16_t cmd_code;
+	uint16_t subsys_id;
+	uint16_t cmd_code_lo;
+	uint16_t cmd_code_hi;
+} __packed;
+
+struct diag_cmd_reg_t {
+	struct list_head link;
+	struct diag_cmd_reg_entry_t entry;
+	uint8_t proc;
+	int pid;
+};
+
+/*
+ * @sync_obj_name: name of the synchronization object associated with this proc
+ * @count: number of entries in the bind
+ * @entries: the actual packet registrations
+ */
+struct diag_cmd_reg_tbl_t {
+	char sync_obj_name[MAX_SYNC_OBJ_NAME_SIZE];
+	uint32_t count;
+	struct diag_cmd_reg_entry_t *entries;
+};
+
+struct diag_client_map {
+	char name[20];
+	int pid;
+};
+
+struct real_time_vote_t {
+	int client_id;
+	uint16_t proc;
+	uint8_t real_time_vote;
+} __packed;
+
+struct real_time_query_t {
+	int real_time;
+	int proc;
+} __packed;
+
+struct diag_buffering_mode_t {
+	uint8_t peripheral;
+	uint8_t mode;
+	uint8_t high_wm_val;
+	uint8_t low_wm_val;
+} __packed;
+
+struct diag_callback_reg_t {
+	int proc;
+} __packed;
+
+struct diag_ws_ref_t {
+	int ref_count;
+	int copy_count;
+	spinlock_t lock;
+};
+
+/* This structure is defined in USB header file */
+#ifndef CONFIG_DIAG_OVER_USB
+struct diag_request {
+	char *buf;
+	int length;
+	int actual;
+	int status;
+	void *context;
+};
+#endif
+
+struct diag_pkt_stats_t {
+	uint32_t alloc_count;
+	uint32_t drop_count;
+};
+
+struct diag_cmd_stats_rsp_t {
+	struct diag_pkt_header_t header;
+	uint32_t payload;
+};
+
+struct diag_cmd_hdlc_disable_rsp_t {
+	struct diag_pkt_header_t header;
+	uint8_t framing_version;
+	uint8_t result;
+};
+
+struct diag_pkt_frame_t {
+	uint8_t start;
+	uint8_t version;
+	uint16_t length;
+};
+
+struct diag_partial_pkt_t {
+	uint32_t total_len;
+	uint32_t read_len;
+	uint32_t remaining;
+	uint32_t capacity;
+	uint8_t processing;
+	unsigned char *data;
+} __packed;
+
+struct diag_logging_mode_param_t {
+	uint32_t req_mode;
+	uint32_t peripheral_mask;
+	uint8_t mode_param;
+} __packed;
+
+struct diag_md_session_t {
+	int pid;
+	int peripheral_mask;
+	uint8_t hdlc_disabled;
+	struct timer_list hdlc_reset_timer;
+	struct diag_mask_info *msg_mask;
+	struct diag_mask_info *log_mask;
+	struct diag_mask_info *event_mask;
+	struct task_struct *task;
+};
+
+/*
+ * High level structure for storing Diag masks.
+ *
+ * @ptr: Pointer to the buffer that stores the masks
+ * @mask_len: Length of the buffer pointed by ptr
+ * @update_buf: Buffer for performing mask updates to peripherals
+ * @update_buf_len: Length of the buffer pointed by buf
+ * @status: status of the mask - all enable, disabled, valid
+ * @lock: To protect access to the mask variables
+ */
+struct diag_mask_info {
+	uint8_t *ptr;
+	int mask_len;
+	uint8_t *update_buf;
+	int update_buf_len;
+	uint8_t status;
+	struct mutex lock;
+};
+
+struct diag_md_proc_info {
+	int pid;
+	struct task_struct *socket_process;
+	struct task_struct *callback_process;
+	struct task_struct *mdlog_process;
+};
+
+struct diag_feature_t {
+	uint8_t feature_mask[FEATURE_MASK_LEN];
+	uint8_t rcvd_feature_mask;
+	uint8_t log_on_demand;
+	uint8_t separate_cmd_rsp;
+	uint8_t encode_hdlc;
+	uint8_t peripheral_buffering;
+	uint8_t mask_centralization;
+	uint8_t stm_support;
+	uint8_t sockets_enabled;
+	uint8_t sent_feature_mask;
+};
+
+struct diagchar_dev {
+
+	/* State for the char driver */
+	unsigned int major;
+	unsigned int minor_start;
+	int num;
+	struct cdev *cdev;
+	char *name;
+	struct class *diagchar_class;
+	struct device *diag_dev;
+	int ref_count;
+	int mask_clear;
+	struct mutex diag_maskclear_mutex;
+	struct mutex diagchar_mutex;
+	struct mutex diag_file_mutex;
+	wait_queue_head_t wait_q;
+	struct diag_client_map *client_map;
+	int *data_ready;
+	int num_clients;
+	int polling_reg_flag;
+	int use_device_tree;
+	int supports_separate_cmdrsp;
+	int supports_apps_hdlc_encoding;
+	int supports_sockets;
+	/* The state requested in the STM command */
+	int stm_state_requested[NUM_STM_PROCESSORS];
+	/* The current STM state */
+	int stm_state[NUM_STM_PROCESSORS];
+	uint16_t stm_peripheral;
+	struct work_struct stm_update_work;
+	uint16_t mask_update;
+	struct work_struct mask_update_work;
+	uint16_t close_transport;
+	struct work_struct close_transport_work;
+	struct workqueue_struct *cntl_wq;
+	struct mutex cntl_lock;
+	/* Whether or not the peripheral supports STM */
+	/* Delayed response Variables */
+	uint16_t delayed_rsp_id;
+	struct mutex delayed_rsp_mutex;
+	/* DCI related variables */
+	struct list_head dci_req_list;
+	struct list_head dci_client_list;
+	int dci_tag;
+	int dci_client_id;
+	struct mutex dci_mutex;
+	int num_dci_client;
+	unsigned char *apps_dci_buf;
+	int dci_state;
+	struct workqueue_struct *diag_dci_wq;
+	struct list_head cmd_reg_list;
+	struct mutex cmd_reg_mutex;
+	uint32_t cmd_reg_count;
+	struct mutex diagfwd_channel_mutex;
+	/* Sizes that reflect memory pool sizes */
+	unsigned int poolsize;
+	unsigned int poolsize_hdlc;
+	unsigned int poolsize_dci;
+	unsigned int poolsize_user;
+	/* Buffers for masks */
+	struct mutex diag_cntl_mutex;
+	/* Members for Sending response */
+	unsigned char *encoded_rsp_buf;
+	int encoded_rsp_len;
+	uint8_t rsp_buf_busy;
+	spinlock_t rsp_buf_busy_lock;
+	int rsp_buf_ctxt;
+	struct diagfwd_info *diagfwd_data[NUM_PERIPHERALS];
+	struct diagfwd_info *diagfwd_cntl[NUM_PERIPHERALS];
+	struct diagfwd_info *diagfwd_dci[NUM_PERIPHERALS];
+	struct diagfwd_info *diagfwd_cmd[NUM_PERIPHERALS];
+	struct diagfwd_info *diagfwd_dci_cmd[NUM_PERIPHERALS];
+	struct diag_feature_t feature[NUM_PERIPHERALS];
+	struct diag_buffering_mode_t buffering_mode[NUM_PERIPHERALS];
+	uint8_t buffering_flag[NUM_PERIPHERALS];
+	struct mutex mode_lock;
+	unsigned char *user_space_data_buf;
+	uint8_t user_space_data_busy;
+	struct diag_pkt_stats_t msg_stats;
+	struct diag_pkt_stats_t log_stats;
+	struct diag_pkt_stats_t event_stats;
+	/* buffer for updating mask to peripherals */
+	unsigned char *buf_feature_mask_update;
+	uint8_t hdlc_disabled;
+	struct mutex hdlc_disable_mutex;
+	struct timer_list hdlc_reset_timer;
+	struct mutex diag_hdlc_mutex;
+	unsigned char *hdlc_buf;
+	uint32_t hdlc_buf_len;
+	unsigned char *apps_rsp_buf;
+	struct diag_partial_pkt_t incoming_pkt;
+	int in_busy_pktdata;
+	/* Variables for non real time mode */
+	int real_time_mode[DIAG_NUM_PROC];
+	int real_time_update_busy;
+	uint16_t proc_active_mask;
+	uint16_t proc_rt_vote_mask[DIAG_NUM_PROC];
+	struct mutex real_time_mutex;
+	struct work_struct diag_real_time_work;
+	struct workqueue_struct *diag_real_time_wq;
+#ifdef CONFIG_DIAG_OVER_USB
+	int usb_connected;
+#endif
+	struct workqueue_struct *diag_wq;
+	struct work_struct diag_drain_work;
+	struct work_struct update_user_clients;
+	struct work_struct update_md_clients;
+	struct workqueue_struct *diag_cntl_wq;
+	uint8_t log_on_demand_support;
+	uint8_t *apps_req_buf;
+	uint32_t apps_req_buf_len;
+	uint8_t *dci_pkt_buf; /* For Apps DCI packets */
+	uint32_t dci_pkt_length;
+	int in_busy_dcipktdata;
+	int logging_mode;
+	int logging_mask;
+	int mask_check;
+	uint32_t md_session_mask;
+	uint8_t md_session_mode;
+	struct diag_md_session_t *md_session_map[NUM_MD_SESSIONS];
+	struct mutex md_session_lock;
+	/* Power related variables */
+	struct diag_ws_ref_t dci_ws;
+	struct diag_ws_ref_t md_ws;
+	/* Pointers to Diag Masks */
+	struct diag_mask_info *msg_mask;
+	struct diag_mask_info *log_mask;
+	struct diag_mask_info *event_mask;
+	struct diag_mask_info *build_time_mask;
+	uint8_t msg_mask_tbl_count;
+	uint16_t event_mask_size;
+	uint16_t last_event_id;
+	/* Variables for Mask Centralization */
+	uint16_t num_event_id[NUM_PERIPHERALS];
+	uint32_t num_equip_id[NUM_PERIPHERALS];
+	uint32_t max_ssid_count[NUM_PERIPHERALS];
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+	/* For sending command requests in callback mode */
+	unsigned char *hdlc_encode_buf;
+	int hdlc_encode_buf_len;
+#endif
+	int time_sync_enabled;
+	uint8_t uses_time_api;
+};
+
+extern struct diagchar_dev *driver;
+
+extern int wrap_enabled;
+extern uint16_t wrap_count;
+
+void diag_get_timestamp(char *time_str);
+void check_drain_timer(void);
+int diag_get_remote(int remote_info);
+
+void diag_ws_init(void);
+void diag_ws_on_notify(void);
+void diag_ws_on_read(int type, int pkt_len);
+void diag_ws_on_copy(int type);
+void diag_ws_on_copy_fail(int type);
+void diag_ws_on_copy_complete(int type);
+void diag_ws_reset(int type);
+void diag_ws_release(void);
+void chk_logging_wakeup(void);
+int diag_cmd_add_reg(struct diag_cmd_reg_entry_t *new_entry, uint8_t proc,
+		     int pid);
+struct diag_cmd_reg_entry_t *diag_cmd_search(
+			struct diag_cmd_reg_entry_t *entry,
+			int proc);
+void diag_cmd_remove_reg(struct diag_cmd_reg_entry_t *entry, uint8_t proc);
+void diag_cmd_remove_reg_by_pid(int pid);
+void diag_cmd_remove_reg_by_proc(int proc);
+int diag_cmd_chk_polling(struct diag_cmd_reg_entry_t *entry);
+void diag_clear_masks(struct diag_md_session_t *info);
+
+void diag_record_stats(int type, int flag);
+
+struct diag_md_session_t *diag_md_session_get_pid(int pid);
+struct diag_md_session_t *diag_md_session_get_peripheral(uint8_t peripheral);
+
+#endif
diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c
new file mode 100644
index 0000000..3b9f4e9
--- /dev/null
+++ b/drivers/char/diag/diagchar_core.c
@@ -0,0 +1,3553 @@
+/* Copyright (c) 2008-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/cdev.h>
+#include <linux/fs.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/uaccess.h>
+#include <linux/diagchar.h>
+#include <linux/sched.h>
+#include <linux/ratelimit.h>
+#include <linux/timer.h>
+#ifdef CONFIG_DIAG_OVER_USB
+#include <linux/usb/usbdiag.h>
+#endif
+#include <asm/current.h>
+#include "diagchar_hdlc.h"
+#include "diagmem.h"
+#include "diagchar.h"
+#include "diagfwd.h"
+#include "diagfwd_cntl.h"
+#include "diag_dci.h"
+#include "diag_debugfs.h"
+#include "diag_masks.h"
+#include "diagfwd_bridge.h"
+#include "diag_usb.h"
+#include "diag_memorydevice.h"
+#include "diag_mux.h"
+#include "diag_ipc_logging.h"
+#include "diagfwd_peripheral.h"
+
+#include <linux/coresight-stm.h>
+#include <linux/kernel.h>
+#ifdef CONFIG_COMPAT
+#include <linux/compat.h>
+#endif
+
+MODULE_DESCRIPTION("Diag Char Driver");
+MODULE_LICENSE("GPL v2");
+
+#define MIN_SIZ_ALLOW 4
+#define INIT	1
+#define EXIT	-1
+struct diagchar_dev *driver;
+struct diagchar_priv {
+	int pid;
+};
+
+#define USER_SPACE_RAW_DATA	0
+#define USER_SPACE_HDLC_DATA	1
+
+/* Memory pool variables */
+/* Used for copying any incoming packet from user space clients. */
+static unsigned int poolsize = 12;
+module_param(poolsize, uint, 0000);
+
+/*
+ * Used for HDLC encoding packets coming from the user
+ * space.
+ */
+static unsigned int poolsize_hdlc = 10;
+module_param(poolsize_hdlc, uint, 0000);
+
+/*
+ * This is used for incoming DCI requests from the user space clients.
+ * Don't expose itemsize as it is internal.
+ */
+static unsigned int poolsize_user = 8;
+module_param(poolsize_user, uint, 0000);
+
+/*
+ * USB structures allocated for writing Diag data generated on the Apps to USB.
+ * Don't expose itemsize as it is constant.
+ */
+static unsigned int itemsize_usb_apps = sizeof(struct diag_request);
+static unsigned int poolsize_usb_apps = 10;
+module_param(poolsize_usb_apps, uint, 0000);
+
+/* Used for DCI client buffers. Don't expose itemsize as it is constant. */
+static unsigned int poolsize_dci = 10;
+module_param(poolsize_dci, uint, 0000);
+
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+/* Used for reading data from the remote device. */
+static unsigned int itemsize_mdm = DIAG_MDM_BUF_SIZE;
+static unsigned int poolsize_mdm = 18;
+module_param(itemsize_mdm, uint, 0000);
+module_param(poolsize_mdm, uint, 0000);
+
+/*
+ * Used for reading DCI data from the remote device.
+ * Don't expose poolsize for DCI data. There is only one read buffer
+ */
+static unsigned int itemsize_mdm_dci = DIAG_MDM_BUF_SIZE;
+static unsigned int poolsize_mdm_dci = 1;
+module_param(itemsize_mdm_dci, uint, 0000);
+
+/*
+ * Used for USB structues associated with a remote device.
+ * Don't expose the itemsize since it is constant.
+ */
+static unsigned int itemsize_mdm_usb = sizeof(struct diag_request);
+static unsigned int poolsize_mdm_usb = 18;
+module_param(poolsize_mdm_usb, uint, 0000);
+
+/*
+ * Used for writing read DCI data to remote peripherals. Don't
+ * expose poolsize for DCI data. There is only one read
+ * buffer. Add 6 bytes for DCI header information: Start (1),
+ * Version (1), Length (2), Tag (2)
+ */
+static unsigned int itemsize_mdm_dci_write = DIAG_MDM_DCI_BUF_SIZE;
+static unsigned int poolsize_mdm_dci_write = 1;
+module_param(itemsize_mdm_dci_write, uint, 0000);
+
+/*
+ * Used for USB structures associated with a remote SMUX
+ * device Don't expose the itemsize since it is constant
+ */
+static unsigned int itemsize_qsc_usb = sizeof(struct diag_request);
+static unsigned int poolsize_qsc_usb = 8;
+module_param(poolsize_qsc_usb, uint, 0000);
+#endif
+
+/* This is the max number of user-space clients supported at initialization*/
+static unsigned int max_clients = 15;
+static unsigned int threshold_client_limit = 50;
+module_param(max_clients, uint, 0000);
+
+/* Timer variables */
+static struct timer_list drain_timer;
+static int timer_in_progress;
+
+struct diag_apps_data_t {
+	void *buf;
+	uint32_t len;
+	int ctxt;
+};
+
+static struct diag_apps_data_t hdlc_data;
+static struct diag_apps_data_t non_hdlc_data;
+static struct mutex apps_data_mutex;
+
+#define DIAGPKT_MAX_DELAYED_RSP 0xFFFF
+
+#ifdef DIAG_DEBUG
+uint16_t diag_debug_mask;
+void *diag_ipc_log;
+#endif
+
+static void diag_md_session_close(struct diag_md_session_t *session_info);
+
+/*
+ * Returns the next delayed rsp id. If wrapping is enabled,
+ * wraps the delayed rsp id to DIAGPKT_MAX_DELAYED_RSP.
+ */
+static uint16_t diag_get_next_delayed_rsp_id(void)
+{
+	uint16_t rsp_id = 0;
+
+	mutex_lock(&driver->delayed_rsp_mutex);
+	rsp_id = driver->delayed_rsp_id;
+	if (rsp_id < DIAGPKT_MAX_DELAYED_RSP)
+		rsp_id++;
+	else {
+		if (wrap_enabled) {
+			rsp_id = 1;
+			wrap_count++;
+		} else
+			rsp_id = DIAGPKT_MAX_DELAYED_RSP;
+	}
+	driver->delayed_rsp_id = rsp_id;
+	mutex_unlock(&driver->delayed_rsp_mutex);
+
+	return rsp_id;
+}
+
+static int diag_switch_logging(struct diag_logging_mode_param_t *param);
+
+#define COPY_USER_SPACE_OR_ERR(buf, data, length)		\
+do {								\
+	if ((count < ret+length) || (copy_to_user(buf,		\
+			(void *)&data, length))) {		\
+		ret = -EFAULT;					\
+	}							\
+	ret += length;						\
+} while (0)
+
+static void drain_timer_func(unsigned long data)
+{
+	queue_work(driver->diag_wq, &(driver->diag_drain_work));
+}
+
+static void diag_drain_apps_data(struct diag_apps_data_t *data)
+{
+	int err = 0;
+
+	if (!data || !data->buf)
+		return;
+
+	err = diag_mux_write(DIAG_LOCAL_PROC, data->buf, data->len,
+			     data->ctxt);
+	if (err)
+		diagmem_free(driver, data->buf, POOL_TYPE_HDLC);
+
+	data->buf = NULL;
+	data->len = 0;
+}
+
+void diag_update_user_client_work_fn(struct work_struct *work)
+{
+	diag_update_userspace_clients(HDLC_SUPPORT_TYPE);
+}
+
+static void diag_update_md_client_work_fn(struct work_struct *work)
+{
+	diag_update_md_clients(HDLC_SUPPORT_TYPE);
+}
+
+void diag_drain_work_fn(struct work_struct *work)
+{
+	struct diag_md_session_t *session_info = NULL;
+	uint8_t hdlc_disabled = 0;
+
+	timer_in_progress = 0;
+	mutex_lock(&apps_data_mutex);
+	session_info = diag_md_session_get_peripheral(APPS_DATA);
+	if (session_info)
+		hdlc_disabled = session_info->hdlc_disabled;
+	else
+		hdlc_disabled = driver->hdlc_disabled;
+
+	if (!hdlc_disabled)
+		diag_drain_apps_data(&hdlc_data);
+	else
+		diag_drain_apps_data(&non_hdlc_data);
+	mutex_unlock(&apps_data_mutex);
+}
+
+void check_drain_timer(void)
+{
+	int ret = 0;
+
+	if (!timer_in_progress) {
+		timer_in_progress = 1;
+		ret = mod_timer(&drain_timer, jiffies + msecs_to_jiffies(200));
+	}
+}
+
+void diag_add_client(int i, struct file *file)
+{
+	struct diagchar_priv *diagpriv_data;
+
+	driver->client_map[i].pid = current->tgid;
+	diagpriv_data = kmalloc(sizeof(struct diagchar_priv),
+							GFP_KERNEL);
+	if (diagpriv_data)
+		diagpriv_data->pid = current->tgid;
+	file->private_data = diagpriv_data;
+	strlcpy(driver->client_map[i].name, current->comm, 20);
+	driver->client_map[i].name[19] = '\0';
+}
+
+static void diag_mempool_init(void)
+{
+	uint32_t itemsize = DIAG_MAX_REQ_SIZE;
+	uint32_t itemsize_hdlc = DIAG_MAX_HDLC_BUF_SIZE + APF_DIAG_PADDING;
+	uint32_t itemsize_dci = IN_BUF_SIZE;
+	uint32_t itemsize_user = DCI_REQ_BUF_SIZE;
+
+	itemsize += ((DCI_HDR_SIZE > CALLBACK_HDR_SIZE) ? DCI_HDR_SIZE :
+		     CALLBACK_HDR_SIZE);
+	diagmem_setsize(POOL_TYPE_COPY, itemsize, poolsize);
+	diagmem_setsize(POOL_TYPE_HDLC, itemsize_hdlc, poolsize_hdlc);
+	diagmem_setsize(POOL_TYPE_DCI, itemsize_dci, poolsize_dci);
+	diagmem_setsize(POOL_TYPE_USER, itemsize_user, poolsize_user);
+
+	diagmem_init(driver, POOL_TYPE_COPY);
+	diagmem_init(driver, POOL_TYPE_HDLC);
+	diagmem_init(driver, POOL_TYPE_USER);
+	diagmem_init(driver, POOL_TYPE_DCI);
+}
+
+static void diag_mempool_exit(void)
+{
+	diagmem_exit(driver, POOL_TYPE_COPY);
+	diagmem_exit(driver, POOL_TYPE_HDLC);
+	diagmem_exit(driver, POOL_TYPE_USER);
+	diagmem_exit(driver, POOL_TYPE_DCI);
+}
+
+static int diagchar_open(struct inode *inode, struct file *file)
+{
+	int i = 0;
+	void *temp;
+
+	if (driver) {
+		mutex_lock(&driver->diagchar_mutex);
+
+		for (i = 0; i < driver->num_clients; i++)
+			if (driver->client_map[i].pid == 0)
+				break;
+
+		if (i < driver->num_clients) {
+			diag_add_client(i, file);
+		} else {
+			if (i < threshold_client_limit) {
+				driver->num_clients++;
+				temp = krealloc(driver->client_map
+					, (driver->num_clients) * sizeof(struct
+						 diag_client_map), GFP_KERNEL);
+				if (!temp)
+					goto fail;
+				else
+					driver->client_map = temp;
+				temp = krealloc(driver->data_ready
+					, (driver->num_clients) * sizeof(int),
+							GFP_KERNEL);
+				if (!temp)
+					goto fail;
+				else
+					driver->data_ready = temp;
+				diag_add_client(i, file);
+			} else {
+				mutex_unlock(&driver->diagchar_mutex);
+				pr_err_ratelimited("diag: Max client limit for DIAG reached\n");
+				pr_err_ratelimited("diag: Cannot open handle %s %d",
+						current->comm, current->tgid);
+				for (i = 0; i < driver->num_clients; i++)
+					pr_debug("%d) %s PID=%d", i, driver->
+						client_map[i].name,
+						driver->client_map[i].pid);
+				return -ENOMEM;
+			}
+		}
+		driver->data_ready[i] = 0x0;
+		driver->data_ready[i] |= MSG_MASKS_TYPE;
+		driver->data_ready[i] |= EVENT_MASKS_TYPE;
+		driver->data_ready[i] |= LOG_MASKS_TYPE;
+		driver->data_ready[i] |= DCI_LOG_MASKS_TYPE;
+		driver->data_ready[i] |= DCI_EVENT_MASKS_TYPE;
+
+		if (driver->ref_count == 0)
+			diag_mempool_init();
+		driver->ref_count++;
+		mutex_unlock(&driver->diagchar_mutex);
+		return 0;
+	}
+	return -ENOMEM;
+
+fail:
+	mutex_unlock(&driver->diagchar_mutex);
+	driver->num_clients--;
+	pr_err_ratelimited("diag: Insufficient memory for new client");
+	return -ENOMEM;
+}
+
+static uint32_t diag_translate_kernel_to_user_mask(uint32_t peripheral_mask)
+{
+	uint32_t ret = 0;
+
+	if (peripheral_mask & MD_PERIPHERAL_MASK(APPS_DATA))
+		ret |= DIAG_CON_APSS;
+	if (peripheral_mask & MD_PERIPHERAL_MASK(PERIPHERAL_MODEM))
+		ret |= DIAG_CON_MPSS;
+	if (peripheral_mask & MD_PERIPHERAL_MASK(PERIPHERAL_LPASS))
+		ret |= DIAG_CON_LPASS;
+	if (peripheral_mask & MD_PERIPHERAL_MASK(PERIPHERAL_WCNSS))
+		ret |= DIAG_CON_WCNSS;
+	if (peripheral_mask & MD_PERIPHERAL_MASK(PERIPHERAL_SENSORS))
+		ret |= DIAG_CON_SENSORS;
+	if (peripheral_mask & MD_PERIPHERAL_MASK(PERIPHERAL_WDSP))
+		ret |= DIAG_CON_WDSP;
+
+	return ret;
+}
+
+void diag_clear_masks(struct diag_md_session_t *info)
+{
+	int ret;
+	char cmd_disable_log_mask[] = { 0x73, 0, 0, 0, 0, 0, 0, 0};
+	char cmd_disable_msg_mask[] = { 0x7D, 0x05, 0, 0, 0, 0, 0, 0};
+	char cmd_disable_event_mask[] = { 0x60, 0};
+
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+	"diag: %s: masks clear request upon %s\n", __func__,
+	((info) ? "ODL exit" : "USB Disconnection"));
+
+	ret = diag_process_apps_masks(cmd_disable_log_mask,
+			sizeof(cmd_disable_log_mask), info);
+	ret = diag_process_apps_masks(cmd_disable_msg_mask,
+			sizeof(cmd_disable_msg_mask), info);
+	ret = diag_process_apps_masks(cmd_disable_event_mask,
+			sizeof(cmd_disable_event_mask), info);
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+	"diag:%s: masks cleared successfully\n", __func__);
+}
+
+static void diag_close_logging_process(const int pid)
+{
+	int i;
+	int session_peripheral_mask;
+	struct diag_md_session_t *session_info = NULL;
+	struct diag_logging_mode_param_t params;
+
+	session_info = diag_md_session_get_pid(pid);
+	if (!session_info)
+		return;
+
+	diag_clear_masks(session_info);
+
+	mutex_lock(&driver->diag_maskclear_mutex);
+	driver->mask_clear = 1;
+	mutex_unlock(&driver->diag_maskclear_mutex);
+
+	session_peripheral_mask = session_info->peripheral_mask;
+	diag_md_session_close(session_info);
+	for (i = 0; i < NUM_MD_SESSIONS; i++)
+		if (MD_PERIPHERAL_MASK(i) & session_peripheral_mask)
+			diag_mux_close_peripheral(DIAG_LOCAL_PROC, i);
+
+	params.req_mode = USB_MODE;
+	params.mode_param = 0;
+	params.peripheral_mask =
+		diag_translate_kernel_to_user_mask(session_peripheral_mask);
+	mutex_lock(&driver->diagchar_mutex);
+	diag_switch_logging(&params);
+	mutex_unlock(&driver->diagchar_mutex);
+}
+
+static int diag_remove_client_entry(struct file *file)
+{
+	int i = -1;
+	struct diagchar_priv *diagpriv_data = NULL;
+	struct diag_dci_client_tbl *dci_entry = NULL;
+
+	if (!driver)
+		return -ENOMEM;
+
+	mutex_lock(&driver->diag_file_mutex);
+	if (!file) {
+		DIAG_LOG(DIAG_DEBUG_USERSPACE, "Invalid file pointer\n");
+		mutex_unlock(&driver->diag_file_mutex);
+		return -ENOENT;
+	}
+	if (!(file->private_data)) {
+		DIAG_LOG(DIAG_DEBUG_USERSPACE, "Invalid private data\n");
+		mutex_unlock(&driver->diag_file_mutex);
+		return -EINVAL;
+	}
+
+	diagpriv_data = file->private_data;
+
+	/*
+	 * clean up any DCI registrations, if this is a DCI client
+	 * This will specially help in case of ungraceful exit of any DCI client
+	 * This call will remove any pending registrations of such client
+	 */
+	mutex_lock(&driver->dci_mutex);
+	dci_entry = dci_lookup_client_entry_pid(current->tgid);
+	if (dci_entry)
+		diag_dci_deinit_client(dci_entry);
+	mutex_unlock(&driver->dci_mutex);
+
+	diag_close_logging_process(current->tgid);
+
+	/* Delete the pkt response table entry for the exiting process */
+	diag_cmd_remove_reg_by_pid(current->tgid);
+
+	mutex_lock(&driver->diagchar_mutex);
+	driver->ref_count--;
+	if (driver->ref_count == 0)
+		diag_mempool_exit();
+
+	for (i = 0; i < driver->num_clients; i++) {
+		if (diagpriv_data && diagpriv_data->pid ==
+						driver->client_map[i].pid) {
+			driver->client_map[i].pid = 0;
+			kfree(diagpriv_data);
+			diagpriv_data = NULL;
+			file->private_data = 0;
+			break;
+		}
+	}
+	mutex_unlock(&driver->diagchar_mutex);
+	mutex_unlock(&driver->diag_file_mutex);
+	return 0;
+}
+static int diagchar_close(struct inode *inode, struct file *file)
+{
+	int ret;
+
+	DIAG_LOG(DIAG_DEBUG_USERSPACE, "diag: process exit %s\n",
+		current->comm);
+	ret = diag_remove_client_entry(file);
+	mutex_lock(&driver->diag_maskclear_mutex);
+	driver->mask_clear = 0;
+	mutex_unlock(&driver->diag_maskclear_mutex);
+	return ret;
+}
+
+void diag_record_stats(int type, int flag)
+{
+	struct diag_pkt_stats_t *pkt_stats = NULL;
+
+	switch (type) {
+	case DATA_TYPE_EVENT:
+		pkt_stats = &driver->event_stats;
+		break;
+	case DATA_TYPE_F3:
+		pkt_stats = &driver->msg_stats;
+		break;
+	case DATA_TYPE_LOG:
+		pkt_stats = &driver->log_stats;
+		break;
+	case DATA_TYPE_RESPONSE:
+		if (flag != PKT_DROP)
+			return;
+		pr_err_ratelimited("diag: In %s, dropping response. This shouldn't happen\n",
+				   __func__);
+		return;
+	case DATA_TYPE_DELAYED_RESPONSE:
+		/* No counters to increase for Delayed responses */
+		return;
+	default:
+		pr_err_ratelimited("diag: In %s, invalid pkt_type: %d\n",
+				   __func__, type);
+		return;
+	}
+
+	switch (flag) {
+	case PKT_ALLOC:
+		atomic_add(1, (atomic_t *)&pkt_stats->alloc_count);
+		break;
+	case PKT_DROP:
+		atomic_add(1, (atomic_t *)&pkt_stats->drop_count);
+		break;
+	case PKT_RESET:
+		atomic_set((atomic_t *)&pkt_stats->alloc_count, 0);
+		atomic_set((atomic_t *)&pkt_stats->drop_count, 0);
+		break;
+	default:
+		pr_err_ratelimited("diag: In %s, invalid flag: %d\n",
+				   __func__, flag);
+		return;
+	}
+}
+
+void diag_get_timestamp(char *time_str)
+{
+	struct timeval t;
+	struct tm broken_tm;
+
+	do_gettimeofday(&t);
+	if (!time_str)
+		return;
+	time_to_tm(t.tv_sec, 0, &broken_tm);
+	scnprintf(time_str, DIAG_TS_SIZE, "%d:%d:%d:%ld", broken_tm.tm_hour,
+				broken_tm.tm_min, broken_tm.tm_sec, t.tv_usec);
+}
+
+int diag_get_remote(int remote_info)
+{
+	int val = (remote_info < 0) ? -remote_info : remote_info;
+	int remote_val;
+
+	switch (val) {
+	case MDM:
+	case MDM2:
+	case QSC:
+		remote_val = -remote_info;
+		break;
+	default:
+		remote_val = 0;
+		break;
+	}
+
+	return remote_val;
+}
+
+int diag_cmd_chk_polling(struct diag_cmd_reg_entry_t *entry)
+{
+	int polling = DIAG_CMD_NOT_POLLING;
+
+	if (!entry)
+		return -EIO;
+
+	if (entry->cmd_code == DIAG_CMD_NO_SUBSYS) {
+		if (entry->subsys_id == DIAG_CMD_NO_SUBSYS &&
+		    entry->cmd_code_hi >= DIAG_CMD_STATUS &&
+		    entry->cmd_code_lo <= DIAG_CMD_STATUS)
+			polling = DIAG_CMD_POLLING;
+		else if (entry->subsys_id == DIAG_SS_WCDMA &&
+			 entry->cmd_code_hi >= DIAG_CMD_QUERY_CALL &&
+			 entry->cmd_code_lo <= DIAG_CMD_QUERY_CALL)
+			polling = DIAG_CMD_POLLING;
+		else if (entry->subsys_id == DIAG_SS_GSM &&
+			 entry->cmd_code_hi >= DIAG_CMD_QUERY_TMC &&
+			 entry->cmd_code_lo <= DIAG_CMD_QUERY_TMC)
+			polling = DIAG_CMD_POLLING;
+		else if (entry->subsys_id == DIAG_SS_PARAMS &&
+			 entry->cmd_code_hi >= DIAG_DIAG_POLL  &&
+			 entry->cmd_code_lo <= DIAG_DIAG_POLL)
+			polling = DIAG_CMD_POLLING;
+		else if (entry->subsys_id == DIAG_SS_TDSCDMA &&
+			 entry->cmd_code_hi >= DIAG_CMD_TDSCDMA_STATUS &&
+			 entry->cmd_code_lo <= DIAG_CMD_TDSCDMA_STATUS)
+			polling = DIAG_CMD_POLLING;
+	}
+
+	return polling;
+}
+
+static void diag_cmd_invalidate_polling(int change_flag)
+{
+	int polling = DIAG_CMD_NOT_POLLING;
+	struct list_head *start;
+	struct list_head *temp;
+	struct diag_cmd_reg_t *item = NULL;
+
+	if (change_flag == DIAG_CMD_ADD) {
+		if (driver->polling_reg_flag)
+			return;
+	}
+
+	driver->polling_reg_flag = 0;
+	list_for_each_safe(start, temp, &driver->cmd_reg_list) {
+		item = list_entry(start, struct diag_cmd_reg_t, link);
+		polling = diag_cmd_chk_polling(&item->entry);
+		if (polling == DIAG_CMD_POLLING) {
+			driver->polling_reg_flag = 1;
+			break;
+		}
+	}
+}
+
+int diag_cmd_add_reg(struct diag_cmd_reg_entry_t *new_entry, uint8_t proc,
+		     int pid)
+{
+	struct diag_cmd_reg_t *new_item = NULL;
+
+	if (!new_entry) {
+		pr_err("diag: In %s, invalid new entry\n", __func__);
+		return -EINVAL;
+	}
+
+	if (proc > APPS_DATA) {
+		pr_err("diag: In %s, invalid peripheral %d\n", __func__, proc);
+		return -EINVAL;
+	}
+
+	if (proc != APPS_DATA)
+		pid = INVALID_PID;
+
+	new_item = kzalloc(sizeof(struct diag_cmd_reg_t), GFP_KERNEL);
+	if (!new_item)
+		return -ENOMEM;
+	kmemleak_not_leak(new_item);
+
+	new_item->pid = pid;
+	new_item->proc = proc;
+	memcpy(&new_item->entry, new_entry,
+	       sizeof(struct diag_cmd_reg_entry_t));
+	INIT_LIST_HEAD(&new_item->link);
+
+	mutex_lock(&driver->cmd_reg_mutex);
+	list_add_tail(&new_item->link, &driver->cmd_reg_list);
+	driver->cmd_reg_count++;
+	diag_cmd_invalidate_polling(DIAG_CMD_ADD);
+	mutex_unlock(&driver->cmd_reg_mutex);
+
+	return 0;
+}
+
+struct diag_cmd_reg_entry_t *diag_cmd_search(
+			struct diag_cmd_reg_entry_t *entry, int proc)
+{
+	struct list_head *start;
+	struct list_head *temp;
+	struct diag_cmd_reg_t *item = NULL;
+	struct diag_cmd_reg_entry_t *temp_entry = NULL;
+
+	if (!entry) {
+		pr_err("diag: In %s, invalid entry\n", __func__);
+		return NULL;
+	}
+
+	list_for_each_safe(start, temp, &driver->cmd_reg_list) {
+		item = list_entry(start, struct diag_cmd_reg_t, link);
+		temp_entry = &item->entry;
+		if (temp_entry->cmd_code == entry->cmd_code &&
+		    temp_entry->subsys_id == entry->subsys_id &&
+		    temp_entry->cmd_code_hi >= entry->cmd_code_hi &&
+		    temp_entry->cmd_code_lo <= entry->cmd_code_lo &&
+		    (proc == item->proc || proc == ALL_PROC)) {
+			return &item->entry;
+		} else if (temp_entry->cmd_code == DIAG_CMD_NO_SUBSYS &&
+			   entry->cmd_code == DIAG_CMD_DIAG_SUBSYS) {
+			if (temp_entry->subsys_id == entry->subsys_id &&
+			    temp_entry->cmd_code_hi >= entry->cmd_code_hi &&
+			    temp_entry->cmd_code_lo <= entry->cmd_code_lo &&
+			    (proc == item->proc || proc == ALL_PROC)) {
+				return &item->entry;
+			}
+		} else if (temp_entry->cmd_code == DIAG_CMD_NO_SUBSYS &&
+			   temp_entry->subsys_id == DIAG_CMD_NO_SUBSYS) {
+			if ((temp_entry->cmd_code_hi >= entry->cmd_code) &&
+			    (temp_entry->cmd_code_lo <= entry->cmd_code) &&
+			    (proc == item->proc || proc == ALL_PROC)) {
+				if (entry->cmd_code == MODE_CMD) {
+					if (entry->subsys_id == RESET_ID &&
+						item->proc != APPS_DATA) {
+						continue;
+					}
+					if (entry->subsys_id != RESET_ID &&
+						item->proc == APPS_DATA) {
+						continue;
+					}
+				}
+				return &item->entry;
+			}
+		}
+	}
+
+	return NULL;
+}
+
+void diag_cmd_remove_reg(struct diag_cmd_reg_entry_t *entry, uint8_t proc)
+{
+	struct diag_cmd_reg_t *item = NULL;
+	struct diag_cmd_reg_entry_t *temp_entry;
+
+	if (!entry) {
+		pr_err("diag: In %s, invalid entry\n", __func__);
+		return;
+	}
+
+	mutex_lock(&driver->cmd_reg_mutex);
+	temp_entry = diag_cmd_search(entry, proc);
+	if (temp_entry) {
+		item = container_of(temp_entry, struct diag_cmd_reg_t, entry);
+		if (!item) {
+			mutex_unlock(&driver->cmd_reg_mutex);
+			return;
+		}
+		list_del(&item->link);
+		kfree(item);
+		driver->cmd_reg_count--;
+	}
+	diag_cmd_invalidate_polling(DIAG_CMD_REMOVE);
+	mutex_unlock(&driver->cmd_reg_mutex);
+}
+
+void diag_cmd_remove_reg_by_pid(int pid)
+{
+	struct list_head *start;
+	struct list_head *temp;
+	struct diag_cmd_reg_t *item = NULL;
+
+	mutex_lock(&driver->cmd_reg_mutex);
+	list_for_each_safe(start, temp, &driver->cmd_reg_list) {
+		item = list_entry(start, struct diag_cmd_reg_t, link);
+		if (item->pid == pid) {
+			list_del(&item->link);
+			kfree(item);
+			driver->cmd_reg_count--;
+		}
+	}
+	mutex_unlock(&driver->cmd_reg_mutex);
+}
+
+void diag_cmd_remove_reg_by_proc(int proc)
+{
+	struct list_head *start;
+	struct list_head *temp;
+	struct diag_cmd_reg_t *item = NULL;
+
+	mutex_lock(&driver->cmd_reg_mutex);
+	list_for_each_safe(start, temp, &driver->cmd_reg_list) {
+		item = list_entry(start, struct diag_cmd_reg_t, link);
+		if (item->proc == proc) {
+			list_del(&item->link);
+			kfree(item);
+			driver->cmd_reg_count--;
+		}
+	}
+	diag_cmd_invalidate_polling(DIAG_CMD_REMOVE);
+	mutex_unlock(&driver->cmd_reg_mutex);
+}
+
+static int diag_copy_dci(char __user *buf, size_t count,
+			struct diag_dci_client_tbl *entry, int *pret)
+{
+	int total_data_len = 0;
+	int ret = 0;
+	int exit_stat = 1;
+	uint8_t drain_again = 0;
+	struct diag_dci_buffer_t *buf_entry, *temp;
+
+	if (!buf || !entry || !pret)
+		return exit_stat;
+
+	ret = *pret;
+
+	ret += sizeof(int);
+	if (ret >= count) {
+		pr_err("diag: In %s, invalid value for ret: %d, count: %zu\n",
+		       __func__, ret, count);
+		return -EINVAL;
+	}
+
+	mutex_lock(&entry->write_buf_mutex);
+	list_for_each_entry_safe(buf_entry, temp, &entry->list_write_buf,
+								buf_track) {
+
+		if ((ret + buf_entry->data_len) > count) {
+			drain_again = 1;
+			break;
+		}
+
+		list_del(&buf_entry->buf_track);
+		mutex_lock(&buf_entry->data_mutex);
+		if ((buf_entry->data_len > 0) &&
+		    (buf_entry->in_busy) &&
+		    (buf_entry->data)) {
+			if (copy_to_user(buf+ret, (void *)buf_entry->data,
+					 buf_entry->data_len))
+				goto drop;
+			ret += buf_entry->data_len;
+			total_data_len += buf_entry->data_len;
+			diag_ws_on_copy(DIAG_WS_DCI);
+drop:
+			buf_entry->in_busy = 0;
+			buf_entry->data_len = 0;
+			buf_entry->in_list = 0;
+			if (buf_entry->buf_type == DCI_BUF_CMD) {
+				mutex_unlock(&buf_entry->data_mutex);
+				continue;
+			} else if (buf_entry->buf_type == DCI_BUF_SECONDARY) {
+				diagmem_free(driver, buf_entry->data,
+					     POOL_TYPE_DCI);
+				buf_entry->data = NULL;
+				mutex_unlock(&buf_entry->data_mutex);
+				kfree(buf_entry);
+				continue;
+			}
+
+		}
+		mutex_unlock(&buf_entry->data_mutex);
+	}
+
+	if (total_data_len > 0) {
+		/* Copy the total data length */
+		COPY_USER_SPACE_OR_ERR(buf+8, total_data_len, 4);
+		if (ret == -EFAULT)
+			goto exit;
+		ret -= 4;
+	} else {
+		pr_debug("diag: In %s, Trying to copy ZERO bytes, total_data_len: %d\n",
+			__func__, total_data_len);
+	}
+
+	exit_stat = 0;
+exit:
+	entry->in_service = 0;
+	mutex_unlock(&entry->write_buf_mutex);
+	*pret = ret;
+	if (drain_again)
+		dci_drain_data(0);
+
+	return exit_stat;
+}
+
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+static int diag_remote_init(void)
+{
+	diagmem_setsize(POOL_TYPE_MDM, itemsize_mdm, poolsize_mdm);
+	diagmem_setsize(POOL_TYPE_MDM2, itemsize_mdm, poolsize_mdm);
+	diagmem_setsize(POOL_TYPE_MDM_DCI, itemsize_mdm_dci, poolsize_mdm_dci);
+	diagmem_setsize(POOL_TYPE_MDM2_DCI, itemsize_mdm_dci,
+			poolsize_mdm_dci);
+	diagmem_setsize(POOL_TYPE_MDM_MUX, itemsize_mdm_usb, poolsize_mdm_usb);
+	diagmem_setsize(POOL_TYPE_MDM2_MUX, itemsize_mdm_usb, poolsize_mdm_usb);
+	diagmem_setsize(POOL_TYPE_MDM_DCI_WRITE, itemsize_mdm_dci_write,
+			poolsize_mdm_dci_write);
+	diagmem_setsize(POOL_TYPE_MDM2_DCI_WRITE, itemsize_mdm_dci_write,
+			poolsize_mdm_dci_write);
+	diagmem_setsize(POOL_TYPE_QSC_MUX, itemsize_qsc_usb,
+			poolsize_qsc_usb);
+	driver->hdlc_encode_buf = kzalloc(DIAG_MAX_HDLC_BUF_SIZE, GFP_KERNEL);
+	if (!driver->hdlc_encode_buf)
+		return -ENOMEM;
+	driver->hdlc_encode_buf_len = 0;
+	return 0;
+}
+
+static void diag_remote_exit(void)
+{
+	kfree(driver->hdlc_encode_buf);
+}
+
+static int diag_send_raw_data_remote(int proc, void *buf, int len,
+				    uint8_t hdlc_flag)
+{
+	int err = 0;
+	int max_len = 0;
+	uint8_t retry_count = 0;
+	uint8_t max_retries = 3;
+	uint16_t payload = 0;
+	struct diag_send_desc_type send = { NULL, NULL, DIAG_STATE_START, 0 };
+	struct diag_hdlc_dest_type enc = { NULL, NULL, 0 };
+	int bridge_index = proc - 1;
+	struct diag_md_session_t *session_info = NULL;
+	uint8_t hdlc_disabled = 0;
+
+	if (!buf)
+		return -EINVAL;
+
+	if (len <= 0) {
+		pr_err("diag: In %s, invalid len: %d", __func__, len);
+		return -EBADMSG;
+	}
+
+	if (bridge_index < 0 || bridge_index > NUM_REMOTE_DEV) {
+		pr_err("diag: In %s, invalid bridge index: %d\n", __func__,
+			bridge_index);
+		return -EINVAL;
+	}
+
+	do {
+		if (driver->hdlc_encode_buf_len == 0)
+			break;
+		usleep_range(10000, 10100);
+		retry_count++;
+	} while (retry_count < max_retries);
+
+	if (driver->hdlc_encode_buf_len != 0)
+		return -EAGAIN;
+	session_info = diag_md_session_get_peripheral(APPS_DATA);
+	if (session_info)
+		hdlc_disabled = session_info->hdlc_disabled;
+	else
+		hdlc_disabled = driver->hdlc_disabled;
+	if (hdlc_disabled) {
+		payload = *(uint16_t *)(buf + 2);
+		driver->hdlc_encode_buf_len = payload;
+		/*
+		 * Adding 4 bytes for start (1 byte), version (1 byte) and
+		 * payload (2 bytes)
+		 */
+		memcpy(driver->hdlc_encode_buf, buf + 4, payload);
+		goto send_data;
+	}
+
+	if (hdlc_flag) {
+		if (len > DIAG_MAX_HDLC_BUF_SIZE) {
+			pr_err("diag: Dropping packet, HDLC encoded packet payload size crosses buffer limit. Current payload size %d\n",
+			       len);
+			return -EBADMSG;
+		}
+		driver->hdlc_encode_buf_len = len;
+		memcpy(driver->hdlc_encode_buf, buf, len);
+		goto send_data;
+	}
+
+	/*
+	 * The worst case length will be twice as the incoming packet length.
+	 * Add 3 bytes for CRC bytes (2 bytes) and delimiter (1 byte)
+	 */
+	max_len = (2 * len) + 3;
+	if (max_len > DIAG_MAX_HDLC_BUF_SIZE) {
+		pr_err("diag: Dropping packet, HDLC encoded packet payload size crosses buffer limit. Current payload size %d\n",
+		       max_len);
+		return -EBADMSG;
+	}
+
+	/* Perform HDLC encoding on incoming data */
+	send.state = DIAG_STATE_START;
+	send.pkt = (void *)(buf);
+	send.last = (void *)(buf + len - 1);
+	send.terminate = 1;
+
+	enc.dest = driver->hdlc_encode_buf;
+	enc.dest_last = (void *)(driver->hdlc_encode_buf + max_len - 1);
+	diag_hdlc_encode(&send, &enc);
+	driver->hdlc_encode_buf_len = (int)(enc.dest -
+					(void *)driver->hdlc_encode_buf);
+
+send_data:
+	err = diagfwd_bridge_write(bridge_index, driver->hdlc_encode_buf,
+				   driver->hdlc_encode_buf_len);
+	if (err) {
+		pr_err_ratelimited("diag: Error writing Callback packet to proc: %d, err: %d\n",
+				   proc, err);
+		driver->hdlc_encode_buf_len = 0;
+	}
+
+	return err;
+}
+
+static int diag_process_userspace_remote(int proc, void *buf, int len)
+{
+	int bridge_index = proc - 1;
+
+	if (!buf || len < 0) {
+		pr_err("diag: Invalid input in %s, buf: %pK, len: %d\n",
+		       __func__, buf, len);
+		return -EINVAL;
+	}
+
+	if (bridge_index < 0 || bridge_index > NUM_REMOTE_DEV) {
+		pr_err("diag: In %s, invalid bridge index: %d\n", __func__,
+		       bridge_index);
+		return -EINVAL;
+	}
+
+	driver->user_space_data_busy = 1;
+	return diagfwd_bridge_write(bridge_index, buf, len);
+}
+#else
+static int diag_remote_init(void)
+{
+	return 0;
+}
+
+static void diag_remote_exit(void)
+{
+}
+
+int diagfwd_bridge_init(void)
+{
+	return 0;
+}
+
+void diagfwd_bridge_exit(void)
+{
+}
+
+uint16_t diag_get_remote_device_mask(void)
+{
+	return 0;
+}
+
+static int diag_send_raw_data_remote(int proc, void *buf, int len,
+				    uint8_t hdlc_flag)
+{
+	return -EINVAL;
+}
+
+static int diag_process_userspace_remote(int proc, void *buf, int len)
+{
+	return 0;
+}
+#endif
+
+static int mask_request_validate(unsigned char mask_buf[])
+{
+	uint8_t packet_id;
+	uint8_t subsys_id;
+	uint16_t ss_cmd;
+
+	packet_id = mask_buf[0];
+
+	if (packet_id == DIAG_CMD_DIAG_SUBSYS_DELAY) {
+		subsys_id = mask_buf[1];
+		ss_cmd = *(uint16_t *)(mask_buf + 2);
+		switch (subsys_id) {
+		case DIAG_SS_DIAG:
+			if ((ss_cmd == DIAG_SS_FILE_READ_MODEM) ||
+				(ss_cmd == DIAG_SS_FILE_READ_ADSP) ||
+				(ss_cmd == DIAG_SS_FILE_READ_WCNSS) ||
+				(ss_cmd == DIAG_SS_FILE_READ_SLPI) ||
+				(ss_cmd == DIAG_SS_FILE_READ_APPS))
+				return 1;
+			break;
+		default:
+			return 0;
+		}
+	} else if (packet_id == 0x4B) {
+		subsys_id = mask_buf[1];
+		ss_cmd = *(uint16_t *)(mask_buf + 2);
+		/* Packets with SSID which are allowed */
+		switch (subsys_id) {
+		case 0x04: /* DIAG_SUBSYS_WCDMA */
+			if ((ss_cmd == 0) || (ss_cmd == 0xF))
+				return 1;
+			break;
+		case 0x08: /* DIAG_SUBSYS_GSM */
+			if ((ss_cmd == 0) || (ss_cmd == 0x1))
+				return 1;
+			break;
+		case 0x09: /* DIAG_SUBSYS_UMTS */
+		case 0x0F: /* DIAG_SUBSYS_CM */
+			if (ss_cmd == 0)
+				return 1;
+			break;
+		case 0x0C: /* DIAG_SUBSYS_OS */
+			if ((ss_cmd == 2) || (ss_cmd == 0x100))
+				return 1; /* MPU and APU */
+			break;
+		case 0x12: /* DIAG_SUBSYS_DIAG_SERV */
+			if ((ss_cmd == 0) || (ss_cmd == 0x6) || (ss_cmd == 0x7))
+				return 1;
+			else if (ss_cmd == 0x218) /* HDLC Disabled Command*/
+				return 0;
+			else if (ss_cmd == DIAG_GET_TIME_API)
+				return 1;
+			else if (ss_cmd == DIAG_SET_TIME_API)
+				return 1;
+			else if (ss_cmd == DIAG_SWITCH_COMMAND)
+				return 1;
+			else if (ss_cmd == DIAG_BUFFERING_MODE)
+				return 1;
+			break;
+		case 0x13: /* DIAG_SUBSYS_FS */
+			if ((ss_cmd == 0) || (ss_cmd == 0x1))
+				return 1;
+			break;
+		default:
+			return 0;
+		}
+	} else {
+		switch (packet_id) {
+		case 0x00:    /* Version Number */
+		case 0x0C:    /* CDMA status packet */
+		case 0x1C:    /* Diag Version */
+		case 0x1D:    /* Time Stamp */
+		case 0x60:    /* Event Report Control */
+		case 0x63:    /* Status snapshot */
+		case 0x73:    /* Logging Configuration */
+		case 0x7C:    /* Extended build ID */
+		case 0x7D:    /* Extended Message configuration */
+		case 0x81:    /* Event get mask */
+		case 0x82:    /* Set the event mask */
+			return 1;
+		default:
+			return 0;
+		}
+	}
+	return 0;
+}
+
+static void diag_md_session_init(void)
+{
+	int i;
+
+	mutex_init(&driver->md_session_lock);
+	driver->md_session_mask = 0;
+	driver->md_session_mode = DIAG_MD_NONE;
+	for (i = 0; i < NUM_MD_SESSIONS; i++)
+		driver->md_session_map[i] = NULL;
+}
+
+static void diag_md_session_exit(void)
+{
+	int i;
+	struct diag_md_session_t *session_info = NULL;
+
+	for (i = 0; i < NUM_MD_SESSIONS; i++) {
+		if (driver->md_session_map[i]) {
+			session_info = driver->md_session_map[i];
+			diag_log_mask_free(session_info->log_mask);
+			kfree(session_info->log_mask);
+			session_info->log_mask = NULL;
+			diag_msg_mask_free(session_info->msg_mask);
+			kfree(session_info->msg_mask);
+			session_info->msg_mask = NULL;
+			diag_event_mask_free(session_info->event_mask);
+			kfree(session_info->event_mask);
+			session_info->event_mask = NULL;
+			kfree(session_info);
+			session_info = NULL;
+			driver->md_session_map[i] = NULL;
+		}
+	}
+	mutex_destroy(&driver->md_session_lock);
+	driver->md_session_mask = 0;
+	driver->md_session_mode = DIAG_MD_NONE;
+}
+
+int diag_md_session_create(int mode, int peripheral_mask, int proc)
+{
+	int i;
+	int err = 0;
+	struct diag_md_session_t *new_session = NULL;
+
+	/*
+	 * If a session is running with a peripheral mask and a new session
+	 * request comes in with same peripheral mask value then return
+	 * invalid param
+	 */
+	if (driver->md_session_mode == DIAG_MD_PERIPHERAL &&
+	    (driver->md_session_mask & peripheral_mask) != 0)
+		return -EINVAL;
+
+	mutex_lock(&driver->md_session_lock);
+	new_session = kzalloc(sizeof(struct diag_md_session_t), GFP_KERNEL);
+	if (!new_session) {
+		mutex_unlock(&driver->md_session_lock);
+		return -ENOMEM;
+	}
+
+	new_session->peripheral_mask = 0;
+	new_session->pid = current->tgid;
+	new_session->task = current;
+
+	new_session->log_mask = kzalloc(sizeof(struct diag_mask_info),
+					GFP_KERNEL);
+	if (!new_session->log_mask) {
+		err = -ENOMEM;
+		goto fail_peripheral;
+	}
+	new_session->event_mask = kzalloc(sizeof(struct diag_mask_info),
+					  GFP_KERNEL);
+	if (!new_session->event_mask) {
+		err = -ENOMEM;
+		goto fail_peripheral;
+	}
+	new_session->msg_mask = kzalloc(sizeof(struct diag_mask_info),
+					GFP_KERNEL);
+	if (!new_session->msg_mask) {
+		err = -ENOMEM;
+		goto fail_peripheral;
+	}
+
+	err = diag_log_mask_copy(new_session->log_mask, &log_mask);
+	if (err) {
+		DIAG_LOG(DIAG_DEBUG_USERSPACE,
+			 "return value of log copy. err %d\n", err);
+		goto fail_peripheral;
+	}
+	err = diag_event_mask_copy(new_session->event_mask, &event_mask);
+	if (err) {
+		DIAG_LOG(DIAG_DEBUG_USERSPACE,
+			 "return value of event copy. err %d\n", err);
+		goto fail_peripheral;
+	}
+	err = diag_msg_mask_copy(new_session->msg_mask, &msg_mask);
+	if (err) {
+		DIAG_LOG(DIAG_DEBUG_USERSPACE,
+			 "return value of msg copy. err %d\n", err);
+		goto fail_peripheral;
+	}
+	for (i = 0; i < NUM_MD_SESSIONS; i++) {
+		if ((MD_PERIPHERAL_MASK(i) & peripheral_mask) == 0)
+			continue;
+		if (driver->md_session_map[i] != NULL) {
+			DIAG_LOG(DIAG_DEBUG_USERSPACE,
+				 "another instance present for %d\n", i);
+			err = -EEXIST;
+			goto fail_peripheral;
+		}
+		new_session->peripheral_mask |= MD_PERIPHERAL_MASK(i);
+		driver->md_session_map[i] = new_session;
+		driver->md_session_mask |= MD_PERIPHERAL_MASK(i);
+	}
+	setup_timer(&new_session->hdlc_reset_timer,
+		diag_md_hdlc_reset_timer_func,
+		new_session->pid);
+
+	driver->md_session_mode = DIAG_MD_PERIPHERAL;
+	mutex_unlock(&driver->md_session_lock);
+	DIAG_LOG(DIAG_DEBUG_USERSPACE,
+		 "created session in peripheral mode\n");
+	return 0;
+
+fail_peripheral:
+	diag_log_mask_free(new_session->log_mask);
+	kfree(new_session->log_mask);
+	new_session->log_mask = NULL;
+	diag_event_mask_free(new_session->event_mask);
+	kfree(new_session->event_mask);
+	new_session->event_mask = NULL;
+	diag_msg_mask_free(new_session->msg_mask);
+	kfree(new_session->msg_mask);
+	new_session->msg_mask = NULL;
+	kfree(new_session);
+	new_session = NULL;
+	mutex_unlock(&driver->md_session_lock);
+	return err;
+}
+
+static void diag_md_session_close(struct diag_md_session_t *session_info)
+{
+	int i;
+	uint8_t found = 0;
+
+	if (!session_info)
+		return;
+
+	mutex_lock(&driver->md_session_lock);
+	for (i = 0; i < NUM_MD_SESSIONS; i++) {
+		if (driver->md_session_map[i] != session_info)
+			continue;
+		driver->md_session_map[i] = NULL;
+		driver->md_session_mask &= ~session_info->peripheral_mask;
+	}
+	diag_log_mask_free(session_info->log_mask);
+	kfree(session_info->log_mask);
+	session_info->log_mask = NULL;
+	diag_msg_mask_free(session_info->msg_mask);
+	kfree(session_info->msg_mask);
+	session_info->msg_mask = NULL;
+	diag_event_mask_free(session_info->event_mask);
+	kfree(session_info->event_mask);
+	session_info->event_mask = NULL;
+	del_timer(&session_info->hdlc_reset_timer);
+
+	for (i = 0; i < NUM_MD_SESSIONS && !found; i++) {
+		if (driver->md_session_map[i] != NULL)
+			found = 1;
+	}
+
+	driver->md_session_mode = (found) ? DIAG_MD_PERIPHERAL : DIAG_MD_NONE;
+	kfree(session_info);
+	session_info = NULL;
+	mutex_unlock(&driver->md_session_lock);
+	DIAG_LOG(DIAG_DEBUG_USERSPACE, "cleared up session\n");
+}
+
+struct diag_md_session_t *diag_md_session_get_pid(int pid)
+{
+	int i;
+
+	for (i = 0; i < NUM_MD_SESSIONS; i++) {
+		if (driver->md_session_map[i] &&
+		    driver->md_session_map[i]->pid == pid)
+			return driver->md_session_map[i];
+	}
+	return NULL;
+}
+
+struct diag_md_session_t *diag_md_session_get_peripheral(uint8_t peripheral)
+{
+	if (peripheral >= NUM_MD_SESSIONS)
+		return NULL;
+	return driver->md_session_map[peripheral];
+}
+
+static int diag_md_peripheral_switch(struct diag_md_session_t *session_info,
+				int peripheral_mask, int req_mode) {
+	int i, bit = 0;
+
+	if (!session_info)
+		return -EINVAL;
+	if (req_mode != DIAG_USB_MODE || req_mode != DIAG_MEMORY_DEVICE_MODE)
+		return -EINVAL;
+
+	/*
+	 * check that md_session_map for i == session_info,
+	 * if not then race condition occurred and bail
+	 */
+	mutex_lock(&driver->md_session_lock);
+	for (i = 0; i < NUM_MD_SESSIONS; i++) {
+		bit = MD_PERIPHERAL_MASK(i) & peripheral_mask;
+		if (!bit)
+			continue;
+		if (req_mode == DIAG_USB_MODE) {
+			if (driver->md_session_map[i] != session_info) {
+				mutex_unlock(&driver->md_session_lock);
+				return -EINVAL;
+			}
+			driver->md_session_map[i] = NULL;
+			driver->md_session_mask &= ~bit;
+			session_info->peripheral_mask &= ~bit;
+
+		} else {
+			if (driver->md_session_map[i] != NULL) {
+				mutex_unlock(&driver->md_session_lock);
+				return -EINVAL;
+			}
+			driver->md_session_map[i] = session_info;
+			driver->md_session_mask |= bit;
+			session_info->peripheral_mask |= bit;
+
+		}
+	}
+
+	driver->md_session_mode = DIAG_MD_PERIPHERAL;
+	mutex_unlock(&driver->md_session_lock);
+	DIAG_LOG(DIAG_DEBUG_USERSPACE, "Changed Peripherals:0x%x to mode:%d\n",
+		peripheral_mask, req_mode);
+}
+
+static int diag_md_session_check(int curr_mode, int req_mode,
+				 const struct diag_logging_mode_param_t *param,
+				 uint8_t *change_mode)
+{
+	int i, bit = 0, err = 0;
+	int change_mask = 0;
+	struct diag_md_session_t *session_info = NULL;
+
+	if (!param || !change_mode)
+		return -EIO;
+
+	*change_mode = 0;
+
+	switch (curr_mode) {
+	case DIAG_USB_MODE:
+	case DIAG_MEMORY_DEVICE_MODE:
+	case DIAG_MULTI_MODE:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (req_mode != DIAG_USB_MODE && req_mode != DIAG_MEMORY_DEVICE_MODE)
+		return -EINVAL;
+
+	if (req_mode == DIAG_USB_MODE) {
+		if (curr_mode == DIAG_USB_MODE)
+			return 0;
+		if (driver->md_session_mode == DIAG_MD_NONE
+		    && driver->md_session_mask == 0 && driver->logging_mask) {
+			*change_mode = 1;
+			return 0;
+		}
+
+		/*
+		 * curr_mode is either DIAG_MULTI_MODE or DIAG_MD_MODE
+		 * Check if requested peripherals are already in usb mode
+		 */
+		for (i = 0; i < NUM_MD_SESSIONS; i++) {
+			bit = MD_PERIPHERAL_MASK(i) & param->peripheral_mask;
+			if (!bit)
+				continue;
+			if (bit & driver->logging_mask)
+				change_mask |= bit;
+		}
+		if (!change_mask)
+			return 0;
+
+		/*
+		 * Change is needed. Check if this md_session has set all the
+		 * requested peripherals. If another md session set a requested
+		 * peripheral then we cannot switch that peripheral to USB.
+		 * If this session owns all the requested peripherals, then
+		 * call function to switch the modes/masks for the md_session
+		 */
+		session_info = diag_md_session_get_pid(current->tgid);
+		if (!session_info) {
+			*change_mode = 1;
+			return 0;
+		}
+		if ((change_mask & session_info->peripheral_mask)
+							!= change_mask) {
+			DIAG_LOG(DIAG_DEBUG_USERSPACE,
+			    "Another MD Session owns a requested peripheral\n");
+			return -EINVAL;
+		}
+		*change_mode = 1;
+
+		/* If all peripherals are being set to USB Mode, call close */
+		if (~change_mask & session_info->peripheral_mask) {
+			err = diag_md_peripheral_switch(session_info,
+					change_mask, DIAG_USB_MODE);
+		} else
+			diag_md_session_close(session_info);
+
+		return err;
+
+	} else if (req_mode == DIAG_MEMORY_DEVICE_MODE) {
+		/*
+		 * Get bit mask that represents what peripherals already have
+		 * been set. Check that requested peripherals already set are
+		 * owned by this md session
+		 */
+		change_mask = driver->md_session_mask & param->peripheral_mask;
+		session_info = diag_md_session_get_pid(current->tgid);
+
+		if (session_info) {
+			if ((session_info->peripheral_mask & change_mask)
+							!= change_mask) {
+				DIAG_LOG(DIAG_DEBUG_USERSPACE,
+				    "Another MD Session owns a requested peripheral\n");
+				return -EINVAL;
+			}
+			err = diag_md_peripheral_switch(session_info,
+					change_mask, DIAG_USB_MODE);
+		} else {
+			if (change_mask) {
+				DIAG_LOG(DIAG_DEBUG_USERSPACE,
+				    "Another MD Session owns a requested peripheral\n");
+				return -EINVAL;
+			}
+			err = diag_md_session_create(DIAG_MD_PERIPHERAL,
+				param->peripheral_mask, DIAG_LOCAL_PROC);
+		}
+		*change_mode = 1;
+		return err;
+	}
+	return -EINVAL;
+}
+
+static uint32_t diag_translate_mask(uint32_t peripheral_mask)
+{
+	uint32_t ret = 0;
+
+	if (peripheral_mask & DIAG_CON_APSS)
+		ret |= (1 << APPS_DATA);
+	if (peripheral_mask & DIAG_CON_MPSS)
+		ret |= (1 << PERIPHERAL_MODEM);
+	if (peripheral_mask & DIAG_CON_LPASS)
+		ret |= (1 << PERIPHERAL_LPASS);
+	if (peripheral_mask & DIAG_CON_WCNSS)
+		ret |= (1 << PERIPHERAL_WCNSS);
+	if (peripheral_mask & DIAG_CON_SENSORS)
+		ret |= (1 << PERIPHERAL_SENSORS);
+	if (peripheral_mask & DIAG_CON_WDSP)
+		ret |= (1 << PERIPHERAL_WDSP);
+
+	return ret;
+}
+
+static int diag_switch_logging(struct diag_logging_mode_param_t *param)
+{
+	int new_mode;
+	int curr_mode;
+	int err = 0;
+	uint8_t do_switch = 1;
+	uint32_t peripheral_mask = 0;
+
+	if (!param)
+		return -EINVAL;
+
+	if (!param->peripheral_mask) {
+		DIAG_LOG(DIAG_DEBUG_USERSPACE,
+			"asking for mode switch with no peripheral mask set\n");
+		return -EINVAL;
+	}
+
+	peripheral_mask = diag_translate_mask(param->peripheral_mask);
+	param->peripheral_mask = peripheral_mask;
+
+	switch (param->req_mode) {
+	case CALLBACK_MODE:
+	case UART_MODE:
+	case SOCKET_MODE:
+	case MEMORY_DEVICE_MODE:
+		new_mode = DIAG_MEMORY_DEVICE_MODE;
+		break;
+	case USB_MODE:
+		new_mode = DIAG_USB_MODE;
+		break;
+	default:
+		pr_err("diag: In %s, request to switch to invalid mode: %d\n",
+		       __func__, param->req_mode);
+		return -EINVAL;
+	}
+
+	curr_mode = driver->logging_mode;
+	DIAG_LOG(DIAG_DEBUG_USERSPACE,
+		"request to switch logging from %d mask:%0x to %d mask:%0x\n",
+		curr_mode, driver->md_session_mask, new_mode, peripheral_mask);
+
+	err = diag_md_session_check(curr_mode, new_mode, param, &do_switch);
+	if (err) {
+		DIAG_LOG(DIAG_DEBUG_USERSPACE,
+			 "err from diag_md_session_check, err: %d\n", err);
+		return err;
+	}
+
+	if (do_switch == 0) {
+		DIAG_LOG(DIAG_DEBUG_USERSPACE,
+			 "not switching modes c: %d n: %d\n",
+			 curr_mode, new_mode);
+		return 0;
+	}
+
+	diag_ws_reset(DIAG_WS_MUX);
+	err = diag_mux_switch_logging(&new_mode, &peripheral_mask);
+	if (err) {
+		pr_err("diag: In %s, unable to switch mode from %d to %d, err: %d\n",
+		       __func__, curr_mode, new_mode, err);
+		driver->logging_mode = curr_mode;
+		goto fail;
+	}
+	driver->logging_mode = new_mode;
+	driver->logging_mask = peripheral_mask;
+	DIAG_LOG(DIAG_DEBUG_USERSPACE,
+		"Switch logging to %d mask:%0x\n", new_mode, peripheral_mask);
+
+	/* Update to take peripheral_mask */
+	if (new_mode != DIAG_MEMORY_DEVICE_MODE) {
+		diag_update_real_time_vote(DIAG_PROC_MEMORY_DEVICE,
+					   MODE_REALTIME, ALL_PROC);
+	} else {
+		diag_update_proc_vote(DIAG_PROC_MEMORY_DEVICE, VOTE_UP,
+				      ALL_PROC);
+	}
+
+	if (!(new_mode == DIAG_MEMORY_DEVICE_MODE &&
+	      curr_mode == DIAG_USB_MODE)) {
+		queue_work(driver->diag_real_time_wq,
+			   &driver->diag_real_time_work);
+	}
+
+	return 0;
+fail:
+	return err;
+}
+
+static int diag_ioctl_dci_reg(unsigned long ioarg)
+{
+	int result = -EINVAL;
+	struct diag_dci_reg_tbl_t dci_reg_params;
+
+	if (copy_from_user(&dci_reg_params, (void __user *)ioarg,
+				sizeof(struct diag_dci_reg_tbl_t)))
+		return -EFAULT;
+
+	result = diag_dci_register_client(&dci_reg_params);
+
+	return result;
+}
+
+static int diag_ioctl_dci_health_stats(unsigned long ioarg)
+{
+	int result = -EINVAL;
+	struct diag_dci_health_stats_proc stats;
+
+	if (copy_from_user(&stats, (void __user *)ioarg,
+				sizeof(struct diag_dci_health_stats_proc)))
+		return -EFAULT;
+
+	result = diag_dci_copy_health_stats(&stats);
+	if (result == DIAG_DCI_NO_ERROR) {
+		if (copy_to_user((void __user *)ioarg, &stats,
+			sizeof(struct diag_dci_health_stats_proc)))
+			return -EFAULT;
+	}
+
+	return result;
+}
+
+static int diag_ioctl_dci_log_status(unsigned long ioarg)
+{
+	struct diag_log_event_stats le_stats;
+	struct diag_dci_client_tbl *dci_client = NULL;
+
+	if (copy_from_user(&le_stats, (void __user *)ioarg,
+				sizeof(struct diag_log_event_stats)))
+		return -EFAULT;
+
+	dci_client = diag_dci_get_client_entry(le_stats.client_id);
+	if (!dci_client)
+		return DIAG_DCI_NOT_SUPPORTED;
+	le_stats.is_set = diag_dci_query_log_mask(dci_client, le_stats.code);
+	if (copy_to_user((void __user *)ioarg, &le_stats,
+				sizeof(struct diag_log_event_stats)))
+		return -EFAULT;
+
+	return DIAG_DCI_NO_ERROR;
+}
+
+static int diag_ioctl_dci_event_status(unsigned long ioarg)
+{
+	struct diag_log_event_stats le_stats;
+	struct diag_dci_client_tbl *dci_client = NULL;
+
+	if (copy_from_user(&le_stats, (void __user *)ioarg,
+				sizeof(struct diag_log_event_stats)))
+		return -EFAULT;
+
+	dci_client = diag_dci_get_client_entry(le_stats.client_id);
+	if (!dci_client)
+		return DIAG_DCI_NOT_SUPPORTED;
+
+	le_stats.is_set = diag_dci_query_event_mask(dci_client, le_stats.code);
+	if (copy_to_user((void __user *)ioarg, &le_stats,
+				sizeof(struct diag_log_event_stats)))
+		return -EFAULT;
+
+	return DIAG_DCI_NO_ERROR;
+}
+
+static int diag_ioctl_lsm_deinit(void)
+{
+	int i;
+
+	for (i = 0; i < driver->num_clients; i++)
+		if (driver->client_map[i].pid == current->tgid)
+			break;
+
+	if (i == driver->num_clients)
+		return -EINVAL;
+
+	driver->data_ready[i] |= DEINIT_TYPE;
+	wake_up_interruptible(&driver->wait_q);
+
+	return 1;
+}
+
+static int diag_ioctl_vote_real_time(unsigned long ioarg)
+{
+	int real_time = 0;
+	int temp_proc = ALL_PROC;
+	struct real_time_vote_t vote;
+	struct diag_dci_client_tbl *dci_client = NULL;
+
+	if (copy_from_user(&vote, (void __user *)ioarg,
+			sizeof(struct real_time_vote_t)))
+		return -EFAULT;
+
+	if (vote.proc > DIAG_PROC_MEMORY_DEVICE ||
+		vote.real_time_vote > MODE_UNKNOWN ||
+		vote.client_id < 0) {
+		pr_err("diag: %s, invalid params, proc: %d, vote: %d, client_id: %d\n",
+			__func__, vote.proc, vote.real_time_vote,
+			vote.client_id);
+		return -EINVAL;
+	}
+
+	driver->real_time_update_busy++;
+	if (vote.proc == DIAG_PROC_DCI) {
+		dci_client = diag_dci_get_client_entry(vote.client_id);
+		if (!dci_client) {
+			driver->real_time_update_busy--;
+			return DIAG_DCI_NOT_SUPPORTED;
+		}
+		diag_dci_set_real_time(dci_client, vote.real_time_vote);
+		real_time = diag_dci_get_cumulative_real_time(
+					dci_client->client_info.token);
+		diag_update_real_time_vote(vote.proc, real_time,
+					dci_client->client_info.token);
+	} else {
+		real_time = vote.real_time_vote;
+		temp_proc = vote.client_id;
+		diag_update_real_time_vote(vote.proc, real_time,
+					   temp_proc);
+	}
+	queue_work(driver->diag_real_time_wq, &driver->diag_real_time_work);
+	return 0;
+}
+
+static int diag_ioctl_get_real_time(unsigned long ioarg)
+{
+	int i;
+	int retry_count = 0;
+	int timer = 0;
+	struct real_time_query_t rt_query;
+
+	if (copy_from_user(&rt_query, (void __user *)ioarg,
+					sizeof(struct real_time_query_t)))
+		return -EFAULT;
+	while (retry_count < 3) {
+		if (driver->real_time_update_busy > 0) {
+			retry_count++;
+			/*
+			 * The value 10000 was chosen empirically as an
+			 * optimum value in order to give the work in
+			 * diag_real_time_wq to complete processing.
+			 */
+			for (timer = 0; timer < 5; timer++)
+				usleep_range(10000, 10100);
+		} else {
+			break;
+		}
+	}
+
+	if (driver->real_time_update_busy > 0)
+		return -EAGAIN;
+
+	if (rt_query.proc < 0 || rt_query.proc >= DIAG_NUM_PROC) {
+		pr_err("diag: Invalid proc %d in %s\n", rt_query.proc,
+		       __func__);
+		return -EINVAL;
+	}
+	rt_query.real_time = driver->real_time_mode[rt_query.proc];
+	/*
+	 * For the local processor, if any of the peripherals is in buffering
+	 * mode, overwrite the value of real time with UNKNOWN_MODE
+	 */
+	if (rt_query.proc == DIAG_LOCAL_PROC) {
+		for (i = 0; i < NUM_PERIPHERALS; i++) {
+			if (!driver->feature[i].peripheral_buffering)
+				continue;
+			switch (driver->buffering_mode[i].mode) {
+			case DIAG_BUFFERING_MODE_CIRCULAR:
+			case DIAG_BUFFERING_MODE_THRESHOLD:
+				rt_query.real_time = MODE_UNKNOWN;
+				break;
+			}
+		}
+	}
+
+	if (copy_to_user((void __user *)ioarg, &rt_query,
+			 sizeof(struct real_time_query_t)))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int diag_ioctl_set_buffering_mode(unsigned long ioarg)
+{
+	struct diag_buffering_mode_t params;
+
+	if (copy_from_user(&params, (void __user *)ioarg, sizeof(params)))
+		return -EFAULT;
+
+	if (params.peripheral >= NUM_PERIPHERALS)
+		return -EINVAL;
+
+	mutex_lock(&driver->mode_lock);
+	driver->buffering_flag[params.peripheral] = 1;
+	mutex_unlock(&driver->mode_lock);
+
+	return diag_send_peripheral_buffering_mode(&params);
+}
+
+static int diag_ioctl_peripheral_drain_immediate(unsigned long ioarg)
+{
+	uint8_t peripheral;
+
+	if (copy_from_user(&peripheral, (void __user *)ioarg, sizeof(uint8_t)))
+		return -EFAULT;
+
+	if (peripheral >= NUM_PERIPHERALS) {
+		pr_err("diag: In %s, invalid peripheral %d\n", __func__,
+		       peripheral);
+		return -EINVAL;
+	}
+
+	if (!driver->feature[peripheral].peripheral_buffering) {
+		pr_err("diag: In %s, peripheral %d doesn't support buffering\n",
+		       __func__, peripheral);
+		return -EIO;
+	}
+
+	return diag_send_peripheral_drain_immediate(peripheral);
+}
+
+static int diag_ioctl_dci_support(unsigned long ioarg)
+{
+	struct diag_dci_peripherals_t dci_support;
+	int result = -EINVAL;
+
+	if (copy_from_user(&dci_support, (void __user *)ioarg,
+				sizeof(struct diag_dci_peripherals_t)))
+		return -EFAULT;
+
+	result = diag_dci_get_support_list(&dci_support);
+	if (result == DIAG_DCI_NO_ERROR)
+		if (copy_to_user((void __user *)ioarg, &dci_support,
+				sizeof(struct diag_dci_peripherals_t)))
+			return -EFAULT;
+
+	return result;
+}
+
+static int diag_ioctl_hdlc_toggle(unsigned long ioarg)
+{
+	uint8_t hdlc_support;
+	struct diag_md_session_t *session_info = NULL;
+
+	session_info = diag_md_session_get_pid(current->tgid);
+	if (copy_from_user(&hdlc_support, (void __user *)ioarg,
+				sizeof(uint8_t)))
+		return -EFAULT;
+	mutex_lock(&driver->hdlc_disable_mutex);
+	if (session_info) {
+		mutex_lock(&driver->md_session_lock);
+		session_info->hdlc_disabled = hdlc_support;
+		mutex_unlock(&driver->md_session_lock);
+	} else
+		driver->hdlc_disabled = hdlc_support;
+	mutex_unlock(&driver->hdlc_disable_mutex);
+	diag_update_md_clients(HDLC_SUPPORT_TYPE);
+
+	return 0;
+}
+
+static int diag_ioctl_register_callback(unsigned long ioarg)
+{
+	int err = 0;
+	struct diag_callback_reg_t reg;
+
+	if (copy_from_user(&reg, (void __user *)ioarg,
+			   sizeof(struct diag_callback_reg_t))) {
+		return -EFAULT;
+	}
+
+	if (reg.proc < 0 || reg.proc >= DIAG_NUM_PROC) {
+		pr_err("diag: In %s, invalid proc %d for callback registration\n",
+		       __func__, reg.proc);
+		return -EINVAL;
+	}
+
+	if (driver->md_session_mode == DIAG_MD_PERIPHERAL)
+		return -EIO;
+
+	return err;
+}
+
+static int diag_cmd_register_tbl(struct diag_cmd_reg_tbl_t *reg_tbl)
+{
+	int i;
+	int err = 0;
+	uint32_t count = 0;
+	struct diag_cmd_reg_entry_t *entries = NULL;
+	const uint16_t entry_len = sizeof(struct diag_cmd_reg_entry_t);
+
+
+	if (!reg_tbl) {
+		pr_err("diag: In %s, invalid registration table\n", __func__);
+		return -EINVAL;
+	}
+
+	count = reg_tbl->count;
+	if ((UINT_MAX / entry_len) < count) {
+		pr_warn("diag: In %s, possbile integer overflow.\n", __func__);
+		return -EFAULT;
+	}
+
+	entries = kzalloc(count * entry_len, GFP_KERNEL);
+	if (!entries)
+		return -ENOMEM;
+
+
+	err = copy_from_user(entries, reg_tbl->entries, count * entry_len);
+	if (err) {
+		pr_err("diag: In %s, error copying data from userspace, err: %d\n",
+		       __func__, err);
+		kfree(entries);
+		return -EFAULT;
+	}
+
+	for (i = 0; i < count; i++) {
+		err = diag_cmd_add_reg(&entries[i], APPS_DATA, current->tgid);
+		if (err) {
+			pr_err("diag: In %s, unable to register command, err: %d\n",
+			       __func__, err);
+			break;
+		}
+	}
+
+	kfree(entries);
+	return err;
+}
+
+static int diag_ioctl_cmd_reg(unsigned long ioarg)
+{
+	struct diag_cmd_reg_tbl_t reg_tbl;
+
+	if (copy_from_user(&reg_tbl, (void __user *)ioarg,
+			   sizeof(struct diag_cmd_reg_tbl_t))) {
+		return -EFAULT;
+	}
+
+	return diag_cmd_register_tbl(&reg_tbl);
+}
+
+static int diag_ioctl_cmd_dereg(void)
+{
+	diag_cmd_remove_reg_by_pid(current->tgid);
+	return 0;
+}
+
+#ifdef CONFIG_COMPAT
+/*
+ * @sync_obj_name: name of the synchronization object associated with this proc
+ * @count: number of entries in the bind
+ * @params: the actual packet registrations
+ */
+struct diag_cmd_reg_tbl_compat_t {
+	char sync_obj_name[MAX_SYNC_OBJ_NAME_SIZE];
+	uint32_t count;
+	compat_uptr_t entries;
+};
+
+static int diag_ioctl_cmd_reg_compat(unsigned long ioarg)
+{
+	struct diag_cmd_reg_tbl_compat_t reg_tbl_compat;
+	struct diag_cmd_reg_tbl_t reg_tbl;
+
+	if (copy_from_user(&reg_tbl_compat, (void __user *)ioarg,
+			   sizeof(struct diag_cmd_reg_tbl_compat_t))) {
+		return -EFAULT;
+	}
+
+	strlcpy(reg_tbl.sync_obj_name, reg_tbl_compat.sync_obj_name,
+		MAX_SYNC_OBJ_NAME_SIZE);
+	reg_tbl.count = reg_tbl_compat.count;
+	reg_tbl.entries = (struct diag_cmd_reg_entry_t *)
+			  (uintptr_t)reg_tbl_compat.entries;
+
+	return diag_cmd_register_tbl(&reg_tbl);
+}
+
+long diagchar_compat_ioctl(struct file *filp,
+			   unsigned int iocmd, unsigned long ioarg)
+{
+	int result = -EINVAL;
+	int client_id = 0;
+	uint16_t delayed_rsp_id = 0;
+	uint16_t remote_dev;
+	struct diag_dci_client_tbl *dci_client = NULL;
+	struct diag_logging_mode_param_t mode_param;
+
+	switch (iocmd) {
+	case DIAG_IOCTL_COMMAND_REG:
+		result = diag_ioctl_cmd_reg_compat(ioarg);
+		break;
+	case DIAG_IOCTL_COMMAND_DEREG:
+		result = diag_ioctl_cmd_dereg();
+		break;
+	case DIAG_IOCTL_GET_DELAYED_RSP_ID:
+		delayed_rsp_id = diag_get_next_delayed_rsp_id();
+		if (copy_to_user((void __user *)ioarg, &delayed_rsp_id,
+				 sizeof(uint16_t)))
+			result = -EFAULT;
+		else
+			result = 0;
+		break;
+	case DIAG_IOCTL_DCI_REG:
+		result = diag_ioctl_dci_reg(ioarg);
+		break;
+	case DIAG_IOCTL_DCI_DEINIT:
+		mutex_lock(&driver->dci_mutex);
+		if (copy_from_user((void *)&client_id, (void __user *)ioarg,
+			sizeof(int))) {
+			mutex_unlock(&driver->dci_mutex);
+			return -EFAULT;
+		}
+		dci_client = diag_dci_get_client_entry(client_id);
+		if (!dci_client) {
+			mutex_unlock(&driver->dci_mutex);
+			return DIAG_DCI_NOT_SUPPORTED;
+		}
+		result = diag_dci_deinit_client(dci_client);
+		mutex_unlock(&driver->dci_mutex);
+		break;
+	case DIAG_IOCTL_DCI_SUPPORT:
+		result = diag_ioctl_dci_support(ioarg);
+		break;
+	case DIAG_IOCTL_DCI_HEALTH_STATS:
+		mutex_lock(&driver->dci_mutex);
+		result = diag_ioctl_dci_health_stats(ioarg);
+		mutex_unlock(&driver->dci_mutex);
+		break;
+	case DIAG_IOCTL_DCI_LOG_STATUS:
+		mutex_lock(&driver->dci_mutex);
+		result = diag_ioctl_dci_log_status(ioarg);
+		mutex_unlock(&driver->dci_mutex);
+		break;
+	case DIAG_IOCTL_DCI_EVENT_STATUS:
+		mutex_lock(&driver->dci_mutex);
+		result = diag_ioctl_dci_event_status(ioarg);
+		mutex_unlock(&driver->dci_mutex);
+		break;
+	case DIAG_IOCTL_DCI_CLEAR_LOGS:
+		mutex_lock(&driver->dci_mutex);
+		if (copy_from_user((void *)&client_id, (void __user *)ioarg,
+			sizeof(int))) {
+			mutex_unlock(&driver->dci_mutex);
+			return -EFAULT;
+		}
+		result = diag_dci_clear_log_mask(client_id);
+		mutex_unlock(&driver->dci_mutex);
+		break;
+	case DIAG_IOCTL_DCI_CLEAR_EVENTS:
+		mutex_lock(&driver->dci_mutex);
+		if (copy_from_user(&client_id, (void __user *)ioarg,
+			sizeof(int))) {
+			mutex_unlock(&driver->dci_mutex);
+			return -EFAULT;
+		}
+		result = diag_dci_clear_event_mask(client_id);
+		mutex_unlock(&driver->dci_mutex);
+		break;
+	case DIAG_IOCTL_LSM_DEINIT:
+		result = diag_ioctl_lsm_deinit();
+		break;
+	case DIAG_IOCTL_SWITCH_LOGGING:
+		if (copy_from_user((void *)&mode_param, (void __user *)ioarg,
+				   sizeof(mode_param)))
+			return -EFAULT;
+		mutex_lock(&driver->diagchar_mutex);
+		result = diag_switch_logging(&mode_param);
+		mutex_unlock(&driver->diagchar_mutex);
+		break;
+	case DIAG_IOCTL_REMOTE_DEV:
+		remote_dev = diag_get_remote_device_mask();
+		if (copy_to_user((void __user *)ioarg, &remote_dev,
+			sizeof(uint16_t)))
+			result = -EFAULT;
+		else
+			result = 1;
+		break;
+	case DIAG_IOCTL_VOTE_REAL_TIME:
+		mutex_lock(&driver->dci_mutex);
+		result = diag_ioctl_vote_real_time(ioarg);
+		mutex_unlock(&driver->dci_mutex);
+		break;
+	case DIAG_IOCTL_GET_REAL_TIME:
+		result = diag_ioctl_get_real_time(ioarg);
+		break;
+	case DIAG_IOCTL_PERIPHERAL_BUF_CONFIG:
+		result = diag_ioctl_set_buffering_mode(ioarg);
+		break;
+	case DIAG_IOCTL_PERIPHERAL_BUF_DRAIN:
+		result = diag_ioctl_peripheral_drain_immediate(ioarg);
+		break;
+	case DIAG_IOCTL_REGISTER_CALLBACK:
+		result = diag_ioctl_register_callback(ioarg);
+		break;
+	case DIAG_IOCTL_HDLC_TOGGLE:
+		result = diag_ioctl_hdlc_toggle(ioarg);
+		break;
+	}
+	return result;
+}
+#endif
+
+long diagchar_ioctl(struct file *filp,
+			   unsigned int iocmd, unsigned long ioarg)
+{
+	int result = -EINVAL;
+	int client_id = 0;
+	uint16_t delayed_rsp_id;
+	uint16_t remote_dev;
+	struct diag_dci_client_tbl *dci_client = NULL;
+	struct diag_logging_mode_param_t mode_param;
+
+	switch (iocmd) {
+	case DIAG_IOCTL_COMMAND_REG:
+		result = diag_ioctl_cmd_reg(ioarg);
+		break;
+	case DIAG_IOCTL_COMMAND_DEREG:
+		result = diag_ioctl_cmd_dereg();
+		break;
+	case DIAG_IOCTL_GET_DELAYED_RSP_ID:
+		delayed_rsp_id = diag_get_next_delayed_rsp_id();
+		if (copy_to_user((void __user *)ioarg, &delayed_rsp_id,
+				 sizeof(uint16_t)))
+			result = -EFAULT;
+		else
+			result = 0;
+		break;
+	case DIAG_IOCTL_DCI_REG:
+		result = diag_ioctl_dci_reg(ioarg);
+		break;
+	case DIAG_IOCTL_DCI_DEINIT:
+		mutex_lock(&driver->dci_mutex);
+		if (copy_from_user((void *)&client_id, (void __user *)ioarg,
+			sizeof(int))) {
+			mutex_unlock(&driver->dci_mutex);
+			return -EFAULT;
+		}
+		dci_client = diag_dci_get_client_entry(client_id);
+		if (!dci_client) {
+			mutex_unlock(&driver->dci_mutex);
+			return DIAG_DCI_NOT_SUPPORTED;
+		}
+		result = diag_dci_deinit_client(dci_client);
+		mutex_unlock(&driver->dci_mutex);
+		break;
+	case DIAG_IOCTL_DCI_SUPPORT:
+		result = diag_ioctl_dci_support(ioarg);
+		break;
+	case DIAG_IOCTL_DCI_HEALTH_STATS:
+		mutex_lock(&driver->dci_mutex);
+		result = diag_ioctl_dci_health_stats(ioarg);
+		mutex_unlock(&driver->dci_mutex);
+		break;
+	case DIAG_IOCTL_DCI_LOG_STATUS:
+		mutex_lock(&driver->dci_mutex);
+		result = diag_ioctl_dci_log_status(ioarg);
+		mutex_unlock(&driver->dci_mutex);
+		break;
+	case DIAG_IOCTL_DCI_EVENT_STATUS:
+		result = diag_ioctl_dci_event_status(ioarg);
+		break;
+	case DIAG_IOCTL_DCI_CLEAR_LOGS:
+		mutex_lock(&driver->dci_mutex);
+		if (copy_from_user((void *)&client_id, (void __user *)ioarg,
+			sizeof(int))) {
+			mutex_unlock(&driver->dci_mutex);
+			return -EFAULT;
+		}
+		result = diag_dci_clear_log_mask(client_id);
+		mutex_unlock(&driver->dci_mutex);
+		break;
+	case DIAG_IOCTL_DCI_CLEAR_EVENTS:
+		mutex_lock(&driver->dci_mutex);
+		if (copy_from_user(&client_id, (void __user *)ioarg,
+			sizeof(int))) {
+			mutex_unlock(&driver->dci_mutex);
+			return -EFAULT;
+		}
+		result = diag_dci_clear_event_mask(client_id);
+		mutex_unlock(&driver->dci_mutex);
+		break;
+	case DIAG_IOCTL_LSM_DEINIT:
+		result = diag_ioctl_lsm_deinit();
+		break;
+	case DIAG_IOCTL_SWITCH_LOGGING:
+		if (copy_from_user((void *)&mode_param, (void __user *)ioarg,
+				   sizeof(mode_param)))
+			return -EFAULT;
+		mutex_lock(&driver->diagchar_mutex);
+		result = diag_switch_logging(&mode_param);
+		mutex_unlock(&driver->diagchar_mutex);
+		break;
+	case DIAG_IOCTL_REMOTE_DEV:
+		remote_dev = diag_get_remote_device_mask();
+		if (copy_to_user((void __user *)ioarg, &remote_dev,
+			sizeof(uint16_t)))
+			result = -EFAULT;
+		else
+			result = 1;
+		break;
+	case DIAG_IOCTL_VOTE_REAL_TIME:
+		mutex_lock(&driver->dci_mutex);
+		result = diag_ioctl_vote_real_time(ioarg);
+		mutex_unlock(&driver->dci_mutex);
+		break;
+	case DIAG_IOCTL_GET_REAL_TIME:
+		result = diag_ioctl_get_real_time(ioarg);
+		break;
+	case DIAG_IOCTL_PERIPHERAL_BUF_CONFIG:
+		result = diag_ioctl_set_buffering_mode(ioarg);
+		break;
+	case DIAG_IOCTL_PERIPHERAL_BUF_DRAIN:
+		result = diag_ioctl_peripheral_drain_immediate(ioarg);
+		break;
+	case DIAG_IOCTL_REGISTER_CALLBACK:
+		result = diag_ioctl_register_callback(ioarg);
+		break;
+	case DIAG_IOCTL_HDLC_TOGGLE:
+		result = diag_ioctl_hdlc_toggle(ioarg);
+		break;
+	}
+	return result;
+}
+
+static int diag_process_apps_data_hdlc(unsigned char *buf, int len,
+				       int pkt_type)
+{
+	int err = 0;
+	int ret = PKT_DROP;
+	struct diag_apps_data_t *data = &hdlc_data;
+	struct diag_send_desc_type send = { NULL, NULL, DIAG_STATE_START, 0 };
+	struct diag_hdlc_dest_type enc = { NULL, NULL, 0 };
+	/*
+	 * The maximum encoded size of the buffer can be atmost twice the length
+	 * of the packet. Add three bytes foe footer - 16 bit CRC (2 bytes) +
+	 * delimiter (1 byte).
+	 */
+	const uint32_t max_encoded_size = ((2 * len) + 3);
+
+	if (!buf || len <= 0) {
+		pr_err("diag: In %s, invalid buf: %pK len: %d\n",
+		       __func__, buf, len);
+		return -EIO;
+	}
+
+	if (max_encoded_size > DIAG_MAX_HDLC_BUF_SIZE) {
+		pr_err_ratelimited("diag: In %s, encoded data is larger %d than the buffer size %d\n",
+		       __func__, max_encoded_size, DIAG_MAX_HDLC_BUF_SIZE);
+		return -EBADMSG;
+	}
+
+	send.state = DIAG_STATE_START;
+	send.pkt = buf;
+	send.last = (void *)(buf + len - 1);
+	send.terminate = 1;
+
+	if (!data->buf)
+		data->buf = diagmem_alloc(driver, DIAG_MAX_HDLC_BUF_SIZE +
+					APF_DIAG_PADDING,
+					  POOL_TYPE_HDLC);
+	if (!data->buf) {
+		ret = PKT_DROP;
+		goto fail_ret;
+	}
+
+	if ((DIAG_MAX_HDLC_BUF_SIZE - data->len) <= max_encoded_size) {
+		err = diag_mux_write(DIAG_LOCAL_PROC, data->buf, data->len,
+				     data->ctxt);
+		if (err) {
+			ret = -EIO;
+			goto fail_free_buf;
+		}
+		data->buf = NULL;
+		data->len = 0;
+		data->buf = diagmem_alloc(driver, DIAG_MAX_HDLC_BUF_SIZE +
+					APF_DIAG_PADDING,
+					  POOL_TYPE_HDLC);
+		if (!data->buf) {
+			ret = PKT_DROP;
+			goto fail_ret;
+		}
+	}
+
+	enc.dest = data->buf + data->len;
+	enc.dest_last = (void *)(data->buf + data->len + max_encoded_size);
+	diag_hdlc_encode(&send, &enc);
+
+	/*
+	 * This is to check if after HDLC encoding, we are still within
+	 * the limits of aggregation buffer. If not, we write out the
+	 * current buffer and start aggregation in a newly allocated
+	 * buffer.
+	 */
+	if ((uintptr_t)enc.dest >= (uintptr_t)(data->buf +
+					       DIAG_MAX_HDLC_BUF_SIZE)) {
+		err = diag_mux_write(DIAG_LOCAL_PROC, data->buf, data->len,
+				     data->ctxt);
+		if (err) {
+			ret = -EIO;
+			goto fail_free_buf;
+		}
+		data->buf = NULL;
+		data->len = 0;
+		data->buf = diagmem_alloc(driver, DIAG_MAX_HDLC_BUF_SIZE +
+					APF_DIAG_PADDING,
+					 POOL_TYPE_HDLC);
+		if (!data->buf) {
+			ret = PKT_DROP;
+			goto fail_ret;
+		}
+
+		enc.dest = data->buf + data->len;
+		enc.dest_last = (void *)(data->buf + data->len +
+					 max_encoded_size);
+		diag_hdlc_encode(&send, &enc);
+	}
+
+	data->len = (((uintptr_t)enc.dest - (uintptr_t)data->buf) <
+			DIAG_MAX_HDLC_BUF_SIZE) ?
+			((uintptr_t)enc.dest - (uintptr_t)data->buf) :
+			DIAG_MAX_HDLC_BUF_SIZE;
+
+	if (pkt_type == DATA_TYPE_RESPONSE) {
+		err = diag_mux_write(DIAG_LOCAL_PROC, data->buf, data->len,
+				     data->ctxt);
+		if (err) {
+			ret = -EIO;
+			goto fail_free_buf;
+		}
+		data->buf = NULL;
+		data->len = 0;
+	}
+
+	return PKT_ALLOC;
+
+fail_free_buf:
+	diagmem_free(driver, data->buf, POOL_TYPE_HDLC);
+	data->buf = NULL;
+	data->len = 0;
+
+fail_ret:
+	return ret;
+}
+
+static int diag_process_apps_data_non_hdlc(unsigned char *buf, int len,
+					   int pkt_type)
+{
+	int err = 0;
+	int ret = PKT_DROP;
+	struct diag_pkt_frame_t header;
+	struct diag_apps_data_t *data = &non_hdlc_data;
+	/*
+	 * The maximum packet size, when the data is non hdlc encoded is equal
+	 * to the size of the packet frame header and the length. Add 1 for the
+	 * delimiter 0x7E at the end.
+	 */
+	const uint32_t max_pkt_size = sizeof(header) + len + 1;
+
+	if (!buf || len <= 0) {
+		pr_err("diag: In %s, invalid buf: %pK len: %d\n",
+		       __func__, buf, len);
+		return -EIO;
+	}
+
+	if (!data->buf) {
+		data->buf = diagmem_alloc(driver, DIAG_MAX_HDLC_BUF_SIZE +
+					APF_DIAG_PADDING,
+					  POOL_TYPE_HDLC);
+		if (!data->buf) {
+			ret = PKT_DROP;
+			goto fail_ret;
+		}
+	}
+
+	if ((DIAG_MAX_HDLC_BUF_SIZE - data->len) <= max_pkt_size) {
+		err = diag_mux_write(DIAG_LOCAL_PROC, data->buf, data->len,
+				     data->ctxt);
+		if (err) {
+			ret = -EIO;
+			goto fail_free_buf;
+		}
+		data->buf = NULL;
+		data->len = 0;
+		data->buf = diagmem_alloc(driver, DIAG_MAX_HDLC_BUF_SIZE +
+					APF_DIAG_PADDING,
+					  POOL_TYPE_HDLC);
+		if (!data->buf) {
+			ret = PKT_DROP;
+			goto fail_ret;
+		}
+	}
+
+	header.start = CONTROL_CHAR;
+	header.version = 1;
+	header.length = len;
+	memcpy(data->buf + data->len, &header, sizeof(header));
+	data->len += sizeof(header);
+	memcpy(data->buf + data->len, buf, len);
+	data->len += len;
+	*(uint8_t *)(data->buf + data->len) = CONTROL_CHAR;
+	data->len += sizeof(uint8_t);
+	if (pkt_type == DATA_TYPE_RESPONSE) {
+		err = diag_mux_write(DIAG_LOCAL_PROC, data->buf, data->len,
+				     data->ctxt);
+		if (err) {
+			ret = -EIO;
+			goto fail_free_buf;
+		}
+		data->buf = NULL;
+		data->len = 0;
+	}
+
+	return PKT_ALLOC;
+
+fail_free_buf:
+	diagmem_free(driver, data->buf, POOL_TYPE_HDLC);
+	data->buf = NULL;
+	data->len = 0;
+
+fail_ret:
+	return ret;
+}
+
+static int diag_user_process_dci_data(const char __user *buf, int len)
+{
+	int err = 0;
+	const int mempool = POOL_TYPE_USER;
+	unsigned char *user_space_data = NULL;
+
+	if (!buf || len <= 0 || len > diag_mempools[mempool].itemsize) {
+		pr_err_ratelimited("diag: In %s, invalid buf %pK len: %d\n",
+				   __func__, buf, len);
+		return -EBADMSG;
+	}
+
+	user_space_data = diagmem_alloc(driver, len, mempool);
+	if (!user_space_data)
+		return -ENOMEM;
+
+	err = copy_from_user(user_space_data, buf, len);
+	if (err) {
+		pr_err_ratelimited("diag: In %s, unable to copy data from userspace, err: %d\n",
+				   __func__, err);
+		err = DIAG_DCI_SEND_DATA_FAIL;
+		goto fail;
+	}
+
+	err = diag_process_dci_transaction(user_space_data, len);
+fail:
+	diagmem_free(driver, user_space_data, mempool);
+	user_space_data = NULL;
+	return err;
+}
+
+static int diag_user_process_dci_apps_data(const char __user *buf, int len,
+					   int pkt_type)
+{
+	int err = 0;
+	const int mempool = POOL_TYPE_COPY;
+	unsigned char *user_space_data = NULL;
+
+	if (!buf || len <= 0 || len > diag_mempools[mempool].itemsize) {
+		pr_err_ratelimited("diag: In %s, invalid buf %pK len: %d\n",
+				   __func__, buf, len);
+		return -EBADMSG;
+	}
+
+	pkt_type &= (DCI_PKT_TYPE | DATA_TYPE_DCI_LOG | DATA_TYPE_DCI_EVENT);
+	if (!pkt_type) {
+		pr_err_ratelimited("diag: In %s, invalid pkt_type: %d\n",
+				   __func__, pkt_type);
+		return -EBADMSG;
+	}
+
+	user_space_data = diagmem_alloc(driver, len, mempool);
+	if (!user_space_data)
+		return -ENOMEM;
+
+	err = copy_from_user(user_space_data, buf, len);
+	if (err) {
+		pr_alert("diag: In %s, unable to copy data from userspace, err: %d\n",
+			 __func__, err);
+		goto fail;
+	}
+
+	diag_process_apps_dci_read_data(pkt_type, user_space_data, len);
+fail:
+	diagmem_free(driver, user_space_data, mempool);
+	user_space_data = NULL;
+	return err;
+}
+
+static int diag_user_process_raw_data(const char __user *buf, int len)
+{
+	int err = 0;
+	int ret = 0;
+	int token_offset = 0;
+	int remote_proc = 0;
+	const int mempool = POOL_TYPE_COPY;
+	unsigned char *user_space_data = NULL;
+	struct diag_md_session_t *info = NULL;
+
+	if (!buf || len <= 0 || len > CALLBACK_BUF_SIZE) {
+		pr_err_ratelimited("diag: In %s, invalid buf %pK len: %d\n",
+				   __func__, buf, len);
+		return -EBADMSG;
+	}
+
+	user_space_data = diagmem_alloc(driver, len, mempool);
+	if (!user_space_data)
+		return -ENOMEM;
+
+	err = copy_from_user(user_space_data, buf, len);
+	if (err) {
+		pr_err("diag: copy failed for user space data\n");
+		goto fail;
+	}
+
+	/* Check for proc_type */
+	remote_proc = diag_get_remote(*(int *)user_space_data);
+	if (remote_proc) {
+		token_offset = sizeof(int);
+		if (len <= MIN_SIZ_ALLOW) {
+			pr_err("diag: In %s, possible integer underflow, payload size: %d\n",
+		       __func__, len);
+			diagmem_free(driver, user_space_data, mempool);
+			user_space_data = NULL;
+			return -EBADMSG;
+		}
+		len -= sizeof(int);
+	}
+	if (driver->mask_check) {
+		if (!mask_request_validate(user_space_data +
+						token_offset)) {
+			pr_alert("diag: mask request Invalid\n");
+			diagmem_free(driver, user_space_data, mempool);
+			user_space_data = NULL;
+			return -EFAULT;
+		}
+	}
+	if (remote_proc) {
+		ret = diag_send_raw_data_remote(remote_proc,
+				(void *)(user_space_data + token_offset),
+				len, USER_SPACE_RAW_DATA);
+		if (ret) {
+			pr_err("diag: Error sending data to remote proc %d, err: %d\n",
+				remote_proc, ret);
+		}
+	} else {
+		wait_event_interruptible(driver->wait_q,
+					 (driver->in_busy_pktdata == 0));
+		info = diag_md_session_get_pid(current->tgid);
+		ret = diag_process_apps_pkt(user_space_data, len, info);
+		if (ret == 1)
+			diag_send_error_rsp((void *)(user_space_data), len);
+	}
+fail:
+	diagmem_free(driver, user_space_data, mempool);
+	user_space_data = NULL;
+	return ret;
+}
+
+static int diag_user_process_userspace_data(const char __user *buf, int len)
+{
+	int err = 0;
+	int max_retries = 3;
+	int retry_count = 0;
+	int remote_proc = 0;
+	int token_offset = 0;
+	struct diag_md_session_t *session_info = NULL;
+	uint8_t hdlc_disabled;
+
+	if (!buf || len <= 0 || len > USER_SPACE_DATA) {
+		pr_err_ratelimited("diag: In %s, invalid buf %pK len: %d\n",
+				   __func__, buf, len);
+		return -EBADMSG;
+	}
+
+	do {
+		if (!driver->user_space_data_busy)
+			break;
+		retry_count++;
+		usleep_range(10000, 10100);
+	} while (retry_count < max_retries);
+
+	if (driver->user_space_data_busy)
+		return -EAGAIN;
+
+	err = copy_from_user(driver->user_space_data_buf, buf, len);
+	if (err) {
+		pr_err("diag: In %s, failed to copy data from userspace, err: %d\n",
+		       __func__, err);
+		return -EIO;
+	}
+
+	/* Check for proc_type */
+	remote_proc = diag_get_remote(*(int *)driver->user_space_data_buf);
+	if (remote_proc) {
+		if (len <= MIN_SIZ_ALLOW) {
+			pr_err("diag: Integer underflow in %s, payload size: %d",
+			       __func__, len);
+			return -EBADMSG;
+		}
+		token_offset = sizeof(int);
+		len -= sizeof(int);
+	}
+
+	/* Check masks for On-Device logging */
+	if (driver->mask_check) {
+		if (!mask_request_validate(driver->user_space_data_buf +
+					   token_offset)) {
+			pr_alert("diag: mask request Invalid\n");
+			return -EFAULT;
+		}
+	}
+
+	/* send masks to local processor now */
+	if (!remote_proc) {
+		session_info = diag_md_session_get_pid(current->tgid);
+		if (!session_info) {
+			pr_err("diag:In %s request came from invalid md session pid:%d",
+				__func__, current->tgid);
+			return -EINVAL;
+		}
+		if (session_info)
+			hdlc_disabled = session_info->hdlc_disabled;
+		else
+			hdlc_disabled = driver->hdlc_disabled;
+		if (!hdlc_disabled)
+			diag_process_hdlc_pkt((void *)
+				(driver->user_space_data_buf),
+				len, session_info);
+		else
+			diag_process_non_hdlc_pkt((char *)
+						(driver->user_space_data_buf),
+						len, session_info);
+		return 0;
+	}
+
+	err = diag_process_userspace_remote(remote_proc,
+					    driver->user_space_data_buf +
+					    token_offset, len);
+	if (err) {
+		driver->user_space_data_busy = 0;
+		pr_err("diag: Error sending mask to remote proc %d, err: %d\n",
+		       remote_proc, err);
+	}
+
+	return err;
+}
+
+static int diag_user_process_apps_data(const char __user *buf, int len,
+				       int pkt_type)
+{
+	int ret = 0;
+	int stm_size = 0;
+	const int mempool = POOL_TYPE_COPY;
+	unsigned char *user_space_data = NULL;
+	struct diag_md_session_t *session_info = NULL;
+	uint8_t hdlc_disabled;
+
+	if (!buf || len <= 0 || len > DIAG_MAX_RSP_SIZE) {
+		pr_err_ratelimited("diag: In %s, invalid buf %pK len: %d\n",
+				   __func__, buf, len);
+		return -EBADMSG;
+	}
+
+	switch (pkt_type) {
+	case DATA_TYPE_EVENT:
+	case DATA_TYPE_F3:
+	case DATA_TYPE_LOG:
+	case DATA_TYPE_RESPONSE:
+	case DATA_TYPE_DELAYED_RESPONSE:
+		break;
+	default:
+		pr_err_ratelimited("diag: In %s, invalid pkt_type: %d\n",
+				   __func__, pkt_type);
+		return -EBADMSG;
+	}
+
+	user_space_data = diagmem_alloc(driver, len, mempool);
+	if (!user_space_data) {
+		diag_record_stats(pkt_type, PKT_DROP);
+		return -ENOMEM;
+	}
+
+	ret = copy_from_user(user_space_data, buf, len);
+	if (ret) {
+		pr_alert("diag: In %s, unable to copy data from userspace, err: %d\n",
+			 __func__, ret);
+		diagmem_free(driver, user_space_data, mempool);
+		user_space_data = NULL;
+		diag_record_stats(pkt_type, PKT_DROP);
+		return -EBADMSG;
+	}
+
+	if (driver->stm_state[APPS_DATA] &&
+	    (pkt_type >= DATA_TYPE_EVENT) && (pkt_type <= DATA_TYPE_LOG)) {
+		stm_size = stm_log_inv_ts(OST_ENTITY_DIAG, 0, user_space_data,
+					  len);
+		if (stm_size == 0) {
+			pr_debug("diag: In %s, stm_log_inv_ts returned size of 0\n",
+				 __func__);
+		}
+		diagmem_free(driver, user_space_data, mempool);
+		user_space_data = NULL;
+
+		return 0;
+	}
+
+	mutex_lock(&apps_data_mutex);
+	mutex_lock(&driver->hdlc_disable_mutex);
+	session_info = diag_md_session_get_peripheral(APPS_DATA);
+	if (session_info)
+		hdlc_disabled = session_info->hdlc_disabled;
+	else
+		hdlc_disabled = driver->hdlc_disabled;
+	if (hdlc_disabled)
+		ret = diag_process_apps_data_non_hdlc(user_space_data, len,
+						      pkt_type);
+	else
+		ret = diag_process_apps_data_hdlc(user_space_data, len,
+						  pkt_type);
+	mutex_unlock(&driver->hdlc_disable_mutex);
+	mutex_unlock(&apps_data_mutex);
+
+	diagmem_free(driver, user_space_data, mempool);
+	user_space_data = NULL;
+
+	check_drain_timer();
+
+	if (ret == PKT_DROP)
+		diag_record_stats(pkt_type, PKT_DROP);
+	else if (ret == PKT_ALLOC)
+		diag_record_stats(pkt_type, PKT_ALLOC);
+	else
+		return ret;
+
+	return 0;
+}
+
+static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
+			  loff_t *ppos)
+{
+	struct diag_dci_client_tbl *entry;
+	struct list_head *start, *temp;
+	int index = -1, i = 0, ret = 0;
+	int data_type;
+	int copy_dci_data = 0;
+	int exit_stat = 0;
+	int write_len = 0;
+	struct diag_md_session_t *session_info = NULL;
+
+	for (i = 0; i < driver->num_clients; i++)
+		if (driver->client_map[i].pid == current->tgid)
+			index = i;
+
+	if (index == -1) {
+		pr_err("diag: Client PID not found in table");
+		return -EINVAL;
+	}
+	if (!buf) {
+		pr_err("diag: bad address from user side\n");
+		return -EFAULT;
+	}
+	wait_event_interruptible(driver->wait_q, driver->data_ready[index]);
+
+	mutex_lock(&driver->diagchar_mutex);
+
+	if ((driver->data_ready[index] & USER_SPACE_DATA_TYPE) &&
+	    (driver->logging_mode == DIAG_MEMORY_DEVICE_MODE ||
+	     driver->logging_mode == DIAG_MULTI_MODE)) {
+		pr_debug("diag: process woken up\n");
+		/*Copy the type of data being passed*/
+		data_type = driver->data_ready[index] & USER_SPACE_DATA_TYPE;
+		driver->data_ready[index] ^= USER_SPACE_DATA_TYPE;
+		COPY_USER_SPACE_OR_ERR(buf, data_type, sizeof(int));
+		if (ret == -EFAULT)
+			goto exit;
+		/* place holder for number of data field */
+		ret += sizeof(int);
+		session_info = diag_md_session_get_pid(current->tgid);
+		exit_stat = diag_md_copy_to_user(buf, &ret, count,
+						 session_info);
+		goto exit;
+	} else if (driver->data_ready[index] & USER_SPACE_DATA_TYPE) {
+		/* In case, the thread wakes up and the logging mode is not
+		 * memory device any more, the condition needs to be cleared.
+		 */
+		driver->data_ready[index] ^= USER_SPACE_DATA_TYPE;
+	}
+
+	if (driver->data_ready[index] & HDLC_SUPPORT_TYPE) {
+		data_type = driver->data_ready[index] & HDLC_SUPPORT_TYPE;
+		driver->data_ready[index] ^= HDLC_SUPPORT_TYPE;
+		COPY_USER_SPACE_OR_ERR(buf, data_type, sizeof(int));
+		if (ret == -EFAULT)
+			goto exit;
+
+		session_info = diag_md_session_get_pid(current->tgid);
+		if (session_info) {
+			COPY_USER_SPACE_OR_ERR(buf+4,
+					session_info->hdlc_disabled,
+					sizeof(uint8_t));
+			if (ret == -EFAULT)
+				goto exit;
+		}
+		goto exit;
+	}
+
+	if (driver->data_ready[index] & DEINIT_TYPE) {
+		/*Copy the type of data being passed*/
+		data_type = driver->data_ready[index] & DEINIT_TYPE;
+		COPY_USER_SPACE_OR_ERR(buf, data_type, 4);
+		if (ret == -EFAULT)
+			goto exit;
+		driver->data_ready[index] ^= DEINIT_TYPE;
+		mutex_unlock(&driver->diagchar_mutex);
+		diag_remove_client_entry(file);
+		return ret;
+	}
+
+	if (driver->data_ready[index] & MSG_MASKS_TYPE) {
+		/*Copy the type of data being passed*/
+		data_type = driver->data_ready[index] & MSG_MASKS_TYPE;
+		session_info = diag_md_session_get_peripheral(APPS_DATA);
+		COPY_USER_SPACE_OR_ERR(buf, data_type, sizeof(int));
+		if (ret == -EFAULT)
+			goto exit;
+		write_len = diag_copy_to_user_msg_mask(buf + ret, count,
+						       session_info);
+		if (write_len > 0)
+			ret += write_len;
+		driver->data_ready[index] ^= MSG_MASKS_TYPE;
+		goto exit;
+	}
+
+	if (driver->data_ready[index] & EVENT_MASKS_TYPE) {
+		/*Copy the type of data being passed*/
+		data_type = driver->data_ready[index] & EVENT_MASKS_TYPE;
+		session_info = diag_md_session_get_peripheral(APPS_DATA);
+		COPY_USER_SPACE_OR_ERR(buf, data_type, 4);
+		if (ret == -EFAULT)
+			goto exit;
+
+		if (session_info && session_info->event_mask &&
+		    session_info->event_mask->ptr) {
+			COPY_USER_SPACE_OR_ERR(buf + sizeof(int),
+					*(session_info->event_mask->ptr),
+					session_info->event_mask->mask_len);
+			if (ret == -EFAULT)
+				goto exit;
+		} else {
+			COPY_USER_SPACE_OR_ERR(buf + sizeof(int),
+						*(event_mask.ptr),
+						event_mask.mask_len);
+			if (ret == -EFAULT)
+				goto exit;
+		}
+		driver->data_ready[index] ^= EVENT_MASKS_TYPE;
+		goto exit;
+	}
+
+	if (driver->data_ready[index] & LOG_MASKS_TYPE) {
+		/*Copy the type of data being passed*/
+		data_type = driver->data_ready[index] & LOG_MASKS_TYPE;
+		session_info = diag_md_session_get_peripheral(APPS_DATA);
+		COPY_USER_SPACE_OR_ERR(buf, data_type, sizeof(int));
+		if (ret == -EFAULT)
+			goto exit;
+
+		write_len = diag_copy_to_user_log_mask(buf + ret, count,
+						       session_info);
+		if (write_len > 0)
+			ret += write_len;
+		driver->data_ready[index] ^= LOG_MASKS_TYPE;
+		goto exit;
+	}
+
+	if (driver->data_ready[index] & PKT_TYPE) {
+		/*Copy the type of data being passed*/
+		data_type = driver->data_ready[index] & PKT_TYPE;
+		COPY_USER_SPACE_OR_ERR(buf, data_type, sizeof(data_type));
+		if (ret == -EFAULT)
+			goto exit;
+
+		COPY_USER_SPACE_OR_ERR(buf + sizeof(data_type),
+					*(driver->apps_req_buf),
+					driver->apps_req_buf_len);
+		if (ret == -EFAULT)
+			goto exit;
+		driver->data_ready[index] ^= PKT_TYPE;
+		driver->in_busy_pktdata = 0;
+		goto exit;
+	}
+
+	if (driver->data_ready[index] & DCI_PKT_TYPE) {
+		/* Copy the type of data being passed */
+		data_type = driver->data_ready[index] & DCI_PKT_TYPE;
+		COPY_USER_SPACE_OR_ERR(buf, data_type, 4);
+		if (ret == -EFAULT)
+			goto exit;
+
+		COPY_USER_SPACE_OR_ERR(buf+4, *(driver->dci_pkt_buf),
+					driver->dci_pkt_length);
+		if (ret == -EFAULT)
+			goto exit;
+
+		driver->data_ready[index] ^= DCI_PKT_TYPE;
+		driver->in_busy_dcipktdata = 0;
+		goto exit;
+	}
+
+	if (driver->data_ready[index] & DCI_EVENT_MASKS_TYPE) {
+		/*Copy the type of data being passed*/
+		data_type = driver->data_ready[index] & DCI_EVENT_MASKS_TYPE;
+		COPY_USER_SPACE_OR_ERR(buf, data_type, 4);
+		if (ret == -EFAULT)
+			goto exit;
+
+		COPY_USER_SPACE_OR_ERR(buf+4, driver->num_dci_client, 4);
+		if (ret == -EFAULT)
+			goto exit;
+
+		COPY_USER_SPACE_OR_ERR(buf + 8, (dci_ops_tbl[DCI_LOCAL_PROC].
+				event_mask_composite), DCI_EVENT_MASK_SIZE);
+		if (ret == -EFAULT)
+			goto exit;
+
+		driver->data_ready[index] ^= DCI_EVENT_MASKS_TYPE;
+		goto exit;
+	}
+
+	if (driver->data_ready[index] & DCI_LOG_MASKS_TYPE) {
+		/*Copy the type of data being passed*/
+		data_type = driver->data_ready[index] & DCI_LOG_MASKS_TYPE;
+		COPY_USER_SPACE_OR_ERR(buf, data_type, 4);
+		if (ret == -EFAULT)
+			goto exit;
+
+		COPY_USER_SPACE_OR_ERR(buf+4, driver->num_dci_client, 4);
+		if (ret == -EFAULT)
+			goto exit;
+
+		COPY_USER_SPACE_OR_ERR(buf+8, (dci_ops_tbl[DCI_LOCAL_PROC].
+				log_mask_composite), DCI_LOG_MASK_SIZE);
+		if (ret == -EFAULT)
+			goto exit;
+		driver->data_ready[index] ^= DCI_LOG_MASKS_TYPE;
+		goto exit;
+	}
+
+exit:
+	mutex_unlock(&driver->diagchar_mutex);
+	if (driver->data_ready[index] & DCI_DATA_TYPE) {
+		mutex_lock(&driver->dci_mutex);
+		/* Copy the type of data being passed */
+		data_type = driver->data_ready[index] & DCI_DATA_TYPE;
+		list_for_each_safe(start, temp, &driver->dci_client_list) {
+			entry = list_entry(start, struct diag_dci_client_tbl,
+									track);
+			if (entry->client->tgid != current->tgid)
+				continue;
+			if (!entry->in_service)
+				continue;
+			if (copy_to_user(buf + ret, &data_type, sizeof(int))) {
+				mutex_unlock(&driver->dci_mutex);
+				goto end;
+			}
+			ret += sizeof(int);
+			if (copy_to_user(buf + ret, &entry->client_info.token,
+				sizeof(int))) {
+				mutex_unlock(&driver->dci_mutex);
+				goto end;
+			}
+			ret += sizeof(int);
+			copy_dci_data = 1;
+			exit_stat = diag_copy_dci(buf, count, entry, &ret);
+			mutex_lock(&driver->diagchar_mutex);
+			driver->data_ready[index] ^= DCI_DATA_TYPE;
+			mutex_unlock(&driver->diagchar_mutex);
+			if (exit_stat == 1) {
+				mutex_unlock(&driver->dci_mutex);
+				goto end;
+			}
+		}
+		mutex_unlock(&driver->dci_mutex);
+		goto end;
+	}
+end:
+	/*
+	 * Flush any read that is currently pending on DCI data and
+	 * command channnels. This will ensure that the next read is not
+	 * missed.
+	 */
+	if (copy_dci_data) {
+		diag_ws_on_copy_complete(DIAG_WS_DCI);
+		flush_workqueue(driver->diag_dci_wq);
+	}
+	return ret;
+}
+
+static ssize_t diagchar_write(struct file *file, const char __user *buf,
+			      size_t count, loff_t *ppos)
+{
+	int err = 0;
+	int pkt_type = 0;
+	int payload_len = 0;
+	const char __user *payload_buf = NULL;
+
+	/*
+	 * The data coming from the user sapce should at least have the
+	 * packet type heeader.
+	 */
+	if (count < sizeof(int)) {
+		pr_err("diag: In %s, client is sending short data, len: %d\n",
+		       __func__, (int)count);
+		return -EBADMSG;
+	}
+
+	err = copy_from_user((&pkt_type), buf, sizeof(int));
+	if (err) {
+		pr_err_ratelimited("diag: In %s, unable to copy pkt_type from userspace, err: %d\n",
+				   __func__, err);
+		return -EIO;
+	}
+
+	if (driver->logging_mode == DIAG_USB_MODE && !driver->usb_connected) {
+		if (!((pkt_type == DCI_DATA_TYPE) ||
+		    (pkt_type == DCI_PKT_TYPE) ||
+		    (pkt_type & DATA_TYPE_DCI_LOG) ||
+		    (pkt_type & DATA_TYPE_DCI_EVENT))) {
+			pr_debug("diag: In %s, Dropping non DCI packet type\n",
+				 __func__);
+			return -EIO;
+		}
+	}
+
+	payload_buf = buf + sizeof(int);
+	payload_len = count - sizeof(int);
+
+	if (pkt_type == DCI_PKT_TYPE)
+		return diag_user_process_dci_apps_data(payload_buf,
+						       payload_len,
+						       pkt_type);
+	else if (pkt_type == DCI_DATA_TYPE)
+		return diag_user_process_dci_data(payload_buf, payload_len);
+	else if (pkt_type == USER_SPACE_RAW_DATA_TYPE)
+		return diag_user_process_raw_data(payload_buf,
+							    payload_len);
+	else if (pkt_type == USER_SPACE_DATA_TYPE)
+		return diag_user_process_userspace_data(payload_buf,
+							payload_len);
+	if (pkt_type & (DATA_TYPE_DCI_LOG | DATA_TYPE_DCI_EVENT)) {
+		err = diag_user_process_dci_apps_data(payload_buf, payload_len,
+						      pkt_type);
+		if (pkt_type & DATA_TYPE_DCI_LOG)
+			pkt_type ^= DATA_TYPE_DCI_LOG;
+		if (pkt_type & DATA_TYPE_DCI_EVENT)
+			pkt_type ^= DATA_TYPE_DCI_EVENT;
+		/*
+		 * Check if the log or event is selected even on the regular
+		 * stream. If USB is not connected and we are not in memory
+		 * device mode, we should not process these logs/events.
+		 */
+		if (pkt_type && driver->logging_mode == DIAG_USB_MODE &&
+		    !driver->usb_connected)
+			return err;
+	}
+
+	switch (pkt_type) {
+	case DATA_TYPE_EVENT:
+	case DATA_TYPE_F3:
+	case DATA_TYPE_LOG:
+	case DATA_TYPE_DELAYED_RESPONSE:
+	case DATA_TYPE_RESPONSE:
+		return diag_user_process_apps_data(payload_buf, payload_len,
+						   pkt_type);
+	default:
+		pr_err_ratelimited("diag: In %s, invalid pkt_type: %d\n",
+				   __func__, pkt_type);
+		return -EINVAL;
+	}
+
+	return err;
+}
+
+void diag_ws_init(void)
+{
+	driver->dci_ws.ref_count = 0;
+	driver->dci_ws.copy_count = 0;
+	spin_lock_init(&driver->dci_ws.lock);
+
+	driver->md_ws.ref_count = 0;
+	driver->md_ws.copy_count = 0;
+	spin_lock_init(&driver->md_ws.lock);
+}
+
+static void diag_stats_init(void)
+{
+	if (!driver)
+		return;
+
+	driver->msg_stats.alloc_count = 0;
+	driver->msg_stats.drop_count = 0;
+
+	driver->log_stats.alloc_count = 0;
+	driver->log_stats.drop_count = 0;
+
+	driver->event_stats.alloc_count = 0;
+	driver->event_stats.drop_count = 0;
+}
+
+void diag_ws_on_notify(void)
+{
+	/*
+	 * Do not deal with reference count here as there can be spurious
+	 * interrupts.
+	 */
+	pm_stay_awake(driver->diag_dev);
+}
+
+void diag_ws_on_read(int type, int pkt_len)
+{
+	unsigned long flags;
+	struct diag_ws_ref_t *ws_ref = NULL;
+
+	switch (type) {
+	case DIAG_WS_DCI:
+		ws_ref = &driver->dci_ws;
+		break;
+	case DIAG_WS_MUX:
+		ws_ref = &driver->md_ws;
+		break;
+	default:
+		pr_err_ratelimited("diag: In %s, invalid type: %d\n",
+				   __func__, type);
+		return;
+	}
+
+	spin_lock_irqsave(&ws_ref->lock, flags);
+	if (pkt_len > 0) {
+		ws_ref->ref_count++;
+	} else {
+		if (ws_ref->ref_count < 1) {
+			ws_ref->ref_count = 0;
+			ws_ref->copy_count = 0;
+		}
+		diag_ws_release();
+	}
+	spin_unlock_irqrestore(&ws_ref->lock, flags);
+}
+
+
+void diag_ws_on_copy(int type)
+{
+	unsigned long flags;
+	struct diag_ws_ref_t *ws_ref = NULL;
+
+	switch (type) {
+	case DIAG_WS_DCI:
+		ws_ref = &driver->dci_ws;
+		break;
+	case DIAG_WS_MUX:
+		ws_ref = &driver->md_ws;
+		break;
+	default:
+		pr_err_ratelimited("diag: In %s, invalid type: %d\n",
+				   __func__, type);
+		return;
+	}
+
+	spin_lock_irqsave(&ws_ref->lock, flags);
+	ws_ref->copy_count++;
+	spin_unlock_irqrestore(&ws_ref->lock, flags);
+}
+
+void diag_ws_on_copy_fail(int type)
+{
+	unsigned long flags;
+	struct diag_ws_ref_t *ws_ref = NULL;
+
+	switch (type) {
+	case DIAG_WS_DCI:
+		ws_ref = &driver->dci_ws;
+		break;
+	case DIAG_WS_MUX:
+		ws_ref = &driver->md_ws;
+		break;
+	default:
+		pr_err_ratelimited("diag: In %s, invalid type: %d\n",
+				   __func__, type);
+		return;
+	}
+
+	spin_lock_irqsave(&ws_ref->lock, flags);
+	ws_ref->ref_count--;
+	spin_unlock_irqrestore(&ws_ref->lock, flags);
+
+	diag_ws_release();
+}
+
+void diag_ws_on_copy_complete(int type)
+{
+	unsigned long flags;
+	struct diag_ws_ref_t *ws_ref = NULL;
+
+	switch (type) {
+	case DIAG_WS_DCI:
+		ws_ref = &driver->dci_ws;
+		break;
+	case DIAG_WS_MUX:
+		ws_ref = &driver->md_ws;
+		break;
+	default:
+		pr_err_ratelimited("diag: In %s, invalid type: %d\n",
+				   __func__, type);
+		return;
+	}
+
+	spin_lock_irqsave(&ws_ref->lock, flags);
+	ws_ref->ref_count -= ws_ref->copy_count;
+		if (ws_ref->ref_count < 1)
+			ws_ref->ref_count = 0;
+		ws_ref->copy_count = 0;
+	spin_unlock_irqrestore(&ws_ref->lock, flags);
+
+	diag_ws_release();
+}
+
+void diag_ws_reset(int type)
+{
+	unsigned long flags;
+	struct diag_ws_ref_t *ws_ref = NULL;
+
+	switch (type) {
+	case DIAG_WS_DCI:
+		ws_ref = &driver->dci_ws;
+		break;
+	case DIAG_WS_MUX:
+		ws_ref = &driver->md_ws;
+		break;
+	default:
+		pr_err_ratelimited("diag: In %s, invalid type: %d\n",
+				   __func__, type);
+		return;
+	}
+
+	spin_lock_irqsave(&ws_ref->lock, flags);
+	ws_ref->ref_count = 0;
+	ws_ref->copy_count = 0;
+	spin_unlock_irqrestore(&ws_ref->lock, flags);
+
+	diag_ws_release();
+}
+
+void diag_ws_release(void)
+{
+	if (driver->dci_ws.ref_count == 0 && driver->md_ws.ref_count == 0)
+		pm_relax(driver->diag_dev);
+}
+
+#ifdef DIAG_DEBUG
+static void diag_debug_init(void)
+{
+	diag_ipc_log = ipc_log_context_create(DIAG_IPC_LOG_PAGES, "diag", 0);
+	if (!diag_ipc_log)
+		pr_err("diag: Failed to create IPC logging context\n");
+	/*
+	 * Set the bit mask here as per diag_ipc_logging.h to enable debug logs
+	 * to be logged to IPC
+	 */
+	diag_debug_mask = DIAG_DEBUG_PERIPHERALS | DIAG_DEBUG_DCI |
+				DIAG_DEBUG_BRIDGE;
+}
+#else
+static void diag_debug_init(void)
+{
+
+}
+#endif
+
+static int diag_real_time_info_init(void)
+{
+	int i;
+
+	if (!driver)
+		return -EIO;
+	for (i = 0; i < DIAG_NUM_PROC; i++) {
+		driver->real_time_mode[i] = 1;
+		driver->proc_rt_vote_mask[i] |= DIAG_PROC_DCI;
+		driver->proc_rt_vote_mask[i] |= DIAG_PROC_MEMORY_DEVICE;
+	}
+	driver->real_time_update_busy = 0;
+	driver->proc_active_mask = 0;
+	driver->diag_real_time_wq = create_singlethread_workqueue(
+							"diag_real_time_wq");
+	if (!driver->diag_real_time_wq)
+		return -ENOMEM;
+	INIT_WORK(&(driver->diag_real_time_work), diag_real_time_work_fn);
+	mutex_init(&driver->real_time_mutex);
+	return 0;
+}
+
+static const struct file_operations diagcharfops = {
+	.owner = THIS_MODULE,
+	.read = diagchar_read,
+	.write = diagchar_write,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = diagchar_compat_ioctl,
+#endif
+	.unlocked_ioctl = diagchar_ioctl,
+	.open = diagchar_open,
+	.release = diagchar_close
+};
+
+static int diagchar_setup_cdev(dev_t devno)
+{
+
+	int err;
+
+	cdev_init(driver->cdev, &diagcharfops);
+
+	driver->cdev->owner = THIS_MODULE;
+	driver->cdev->ops = &diagcharfops;
+
+	err = cdev_add(driver->cdev, devno, 1);
+
+	if (err) {
+		pr_info("diagchar cdev registration failed !\n");
+		return err;
+	}
+
+	driver->diagchar_class = class_create(THIS_MODULE, "diag");
+
+	if (IS_ERR(driver->diagchar_class)) {
+		pr_err("Error creating diagchar class.\n");
+		return PTR_ERR(driver->diagchar_class);
+	}
+
+	driver->diag_dev = device_create(driver->diagchar_class, NULL, devno,
+					 (void *)driver, "diag");
+
+	if (!driver->diag_dev)
+		return -EIO;
+
+	driver->diag_dev->power.wakeup = wakeup_source_register("DIAG_WS");
+	return 0;
+
+}
+
+static int diagchar_cleanup(void)
+{
+	if (driver) {
+		if (driver->cdev) {
+			/* TODO - Check if device exists before deleting */
+			device_destroy(driver->diagchar_class,
+				       MKDEV(driver->major,
+					     driver->minor_start));
+			cdev_del(driver->cdev);
+		}
+		if (!IS_ERR(driver->diagchar_class))
+			class_destroy(driver->diagchar_class);
+		kfree(driver);
+	}
+	return 0;
+}
+
+static int __init diagchar_init(void)
+{
+	dev_t dev;
+	int ret;
+
+	pr_debug("diagfwd initializing ..\n");
+	ret = 0;
+	driver = kzalloc(sizeof(struct diagchar_dev) + 5, GFP_KERNEL);
+	if (!driver)
+		return -ENOMEM;
+	kmemleak_not_leak(driver);
+
+	timer_in_progress = 0;
+	driver->delayed_rsp_id = 0;
+	driver->hdlc_disabled = 0;
+	driver->dci_state = DIAG_DCI_NO_ERROR;
+	setup_timer(&drain_timer, drain_timer_func, 1234);
+	driver->supports_sockets = 1;
+	driver->time_sync_enabled = 0;
+	driver->uses_time_api = 0;
+	driver->poolsize = poolsize;
+	driver->poolsize_hdlc = poolsize_hdlc;
+	driver->poolsize_dci = poolsize_dci;
+	driver->poolsize_user = poolsize_user;
+	/*
+	 * POOL_TYPE_MUX_APPS is for the buffers in the Diag MUX layer.
+	 * The number of buffers encompasses Diag data generated on
+	 * the Apss processor + 1 for the responses generated exclusively on
+	 * the Apps processor + data from data channels (4 channels per
+	 * peripheral) + data from command channels (2)
+	 */
+	diagmem_setsize(POOL_TYPE_MUX_APPS, itemsize_usb_apps,
+			poolsize_usb_apps + 1 + (NUM_PERIPHERALS * 6));
+	driver->num_clients = max_clients;
+	driver->logging_mode = DIAG_USB_MODE;
+	driver->mask_check = 0;
+	driver->in_busy_pktdata = 0;
+	driver->in_busy_dcipktdata = 0;
+	driver->rsp_buf_ctxt = SET_BUF_CTXT(APPS_DATA, TYPE_CMD, 1);
+	hdlc_data.ctxt = SET_BUF_CTXT(APPS_DATA, TYPE_DATA, 1);
+	hdlc_data.len = 0;
+	non_hdlc_data.ctxt = SET_BUF_CTXT(APPS_DATA, TYPE_DATA, 1);
+	non_hdlc_data.len = 0;
+	mutex_init(&driver->hdlc_disable_mutex);
+	mutex_init(&driver->diagchar_mutex);
+	mutex_init(&driver->diag_maskclear_mutex);
+	mutex_init(&driver->diag_file_mutex);
+	mutex_init(&driver->delayed_rsp_mutex);
+	mutex_init(&apps_data_mutex);
+	mutex_init(&driver->diagfwd_channel_mutex);
+	init_waitqueue_head(&driver->wait_q);
+	INIT_WORK(&(driver->diag_drain_work), diag_drain_work_fn);
+	INIT_WORK(&(driver->update_user_clients),
+			diag_update_user_client_work_fn);
+	INIT_WORK(&(driver->update_md_clients),
+			diag_update_md_client_work_fn);
+	diag_ws_init();
+	diag_stats_init();
+	diag_debug_init();
+	diag_md_session_init();
+
+	driver->incoming_pkt.capacity = DIAG_MAX_REQ_SIZE;
+	driver->incoming_pkt.data = kzalloc(DIAG_MAX_REQ_SIZE, GFP_KERNEL);
+	if (!driver->incoming_pkt.data) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+	kmemleak_not_leak(driver->incoming_pkt.data);
+	driver->incoming_pkt.processing = 0;
+	driver->incoming_pkt.read_len = 0;
+	driver->incoming_pkt.remaining = 0;
+	driver->incoming_pkt.total_len = 0;
+
+	ret = diag_real_time_info_init();
+	if (ret)
+		goto fail;
+	ret = diag_debugfs_init();
+	if (ret)
+		goto fail;
+	ret = diag_masks_init();
+	if (ret)
+		goto fail;
+	ret = diag_remote_init();
+	if (ret)
+		goto fail;
+	ret = diag_mux_init();
+	if (ret)
+		goto fail;
+	ret = diagfwd_init();
+	if (ret)
+		goto fail;
+	ret = diagfwd_cntl_init();
+	if (ret)
+		goto fail;
+	driver->dci_state = diag_dci_init();
+	ret = diagfwd_peripheral_init();
+	if (ret)
+		goto fail;
+	diagfwd_cntl_channel_init();
+	if (driver->dci_state == DIAG_DCI_NO_ERROR)
+		diag_dci_channel_init();
+	pr_debug("diagchar initializing ..\n");
+	driver->num = 1;
+	driver->name = ((void *)driver) + sizeof(struct diagchar_dev);
+	strlcpy(driver->name, "diag", 4);
+	/* Get major number from kernel and initialize */
+	ret = alloc_chrdev_region(&dev, driver->minor_start,
+				    driver->num, driver->name);
+	if (!ret) {
+		driver->major = MAJOR(dev);
+		driver->minor_start = MINOR(dev);
+	} else {
+		pr_err("diag: Major number not allocated\n");
+		goto fail;
+	}
+	driver->cdev = cdev_alloc();
+	ret = diagchar_setup_cdev(dev);
+	if (ret)
+		goto fail;
+
+	pr_debug("diagchar initialized now");
+	ret = diagfwd_bridge_init();
+	if (ret)
+		diagfwd_bridge_exit();
+	return 0;
+
+fail:
+	pr_err("diagchar is not initialized, ret: %d\n", ret);
+	diag_debugfs_cleanup();
+	diagchar_cleanup();
+	diag_mux_exit();
+	diagfwd_peripheral_exit();
+	diagfwd_bridge_exit();
+	diagfwd_exit();
+	diagfwd_cntl_exit();
+	diag_dci_exit();
+	diag_masks_exit();
+	diag_remote_exit();
+	return ret;
+
+}
+
+static void diagchar_exit(void)
+{
+	pr_info("diagchar exiting...\n");
+	diag_mempool_exit();
+	diag_mux_exit();
+	diagfwd_peripheral_exit();
+	diagfwd_exit();
+	diagfwd_cntl_exit();
+	diag_dci_exit();
+	diag_masks_exit();
+	diag_md_session_exit();
+	diag_remote_exit();
+	diag_debugfs_cleanup();
+	diagchar_cleanup();
+	pr_info("done diagchar exit\n");
+}
+
+module_init(diagchar_init);
+module_exit(diagchar_exit);
diff --git a/drivers/char/diag/diagchar_hdlc.c b/drivers/char/diag/diagchar_hdlc.c
new file mode 100644
index 0000000..6dd571f
--- /dev/null
+++ b/drivers/char/diag/diagchar_hdlc.c
@@ -0,0 +1,251 @@
+/* Copyright (c) 2008-2009, 2012-2014, 2016 The Linux Foundation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/cdev.h>
+#include <linux/fs.h>
+#include <linux/device.h>
+#include <linux/uaccess.h>
+#include <linux/ratelimit.h>
+#include <linux/crc-ccitt.h>
+#include "diagchar_hdlc.h"
+#include "diagchar.h"
+
+
+MODULE_LICENSE("GPL v2");
+
+#define CRC_16_L_SEED           0xFFFF
+
+#define CRC_16_L_STEP(xx_crc, xx_c) \
+	crc_ccitt_byte(xx_crc, xx_c)
+
+void diag_hdlc_encode(struct diag_send_desc_type *src_desc,
+		      struct diag_hdlc_dest_type *enc)
+{
+	uint8_t *dest;
+	uint8_t *dest_last;
+	const uint8_t *src;
+	const uint8_t *src_last;
+	uint16_t crc;
+	unsigned char src_byte = 0;
+	enum diag_send_state_enum_type state;
+	unsigned int used = 0;
+
+	if (!src_desc || !enc)
+		return;
+
+	/* Copy parts to local variables. */
+	src = src_desc->pkt;
+	src_last = src_desc->last;
+	state = src_desc->state;
+	dest = enc->dest;
+	dest_last = enc->dest_last;
+
+	if (state == DIAG_STATE_START) {
+		crc = CRC_16_L_SEED;
+		state++;
+	} else {
+		/* Get a local copy of the CRC */
+		crc = enc->crc;
+	}
+
+	/* dest or dest_last may be NULL to trigger a
+	 * state transition only.
+	 */
+	if (dest && dest_last) {
+		/* This condition needs to include the possibility
+		 * of 2 dest bytes for an escaped byte
+		 */
+		while (src <= src_last && dest <= dest_last) {
+
+			src_byte = *src++;
+			if ((src_byte == CONTROL_CHAR) ||
+			    (src_byte == ESC_CHAR)) {
+				/* If the escape character is not the
+				 * last byte
+				 */
+				if (dest != dest_last) {
+					crc = CRC_16_L_STEP(crc, src_byte);
+					*dest++ = ESC_CHAR;
+					used++;
+					*dest++ = src_byte ^ ESC_MASK;
+					used++;
+				} else {
+					src--;
+					break;
+				}
+			} else {
+				crc = CRC_16_L_STEP(crc, src_byte);
+				*dest++ = src_byte;
+				used++;
+			}
+		}
+
+		if (src > src_last) {
+			if (state == DIAG_STATE_BUSY) {
+				if (src_desc->terminate) {
+					crc = ~crc;
+					state++;
+				} else {
+					/* Done with fragment */
+					state = DIAG_STATE_COMPLETE;
+				}
+			}
+
+			while (dest <= dest_last && state >= DIAG_STATE_CRC1
+					&& state < DIAG_STATE_TERM) {
+				/* Encode a byte of the CRC next */
+				src_byte = crc & 0xFF;
+
+				if ((src_byte == CONTROL_CHAR)
+				    || (src_byte == ESC_CHAR)) {
+
+					if (dest != dest_last) {
+						*dest++ = ESC_CHAR;
+						used++;
+						*dest++ = src_byte ^ ESC_MASK;
+						used++;
+						crc >>= 8;
+					} else
+						break;
+				} else {
+
+					crc >>= 8;
+					*dest++ = src_byte;
+					used++;
+				}
+				state++;
+			}
+
+			if (state == DIAG_STATE_TERM) {
+				if (dest_last >= dest) {
+					*dest++ = CONTROL_CHAR;
+					used++;
+					state++;	/* Complete */
+				}
+			}
+		}
+	}
+
+	/* Copy local variables back into the encode structure. */
+	enc->dest = dest;
+	enc->dest_last = dest_last;
+	enc->crc = crc;
+	src_desc->pkt = src;
+	src_desc->last = src_last;
+	src_desc->state = state;
+}
+
+
+int diag_hdlc_decode(struct diag_hdlc_decode_type *hdlc)
+{
+	uint8_t *src_ptr = NULL, *dest_ptr = NULL;
+	unsigned int src_length = 0, dest_length = 0;
+
+	unsigned int len = 0;
+	unsigned int i;
+	uint8_t src_byte;
+
+	int pkt_bnd = HDLC_INCOMPLETE;
+	int msg_start;
+
+	if (hdlc && hdlc->src_ptr && hdlc->dest_ptr &&
+	    (hdlc->src_size > hdlc->src_idx) &&
+	    (hdlc->dest_size > hdlc->dest_idx)) {
+
+		msg_start = (hdlc->src_idx == 0) ? 1 : 0;
+
+		src_ptr = hdlc->src_ptr;
+		src_ptr = &src_ptr[hdlc->src_idx];
+		src_length = hdlc->src_size - hdlc->src_idx;
+
+		dest_ptr = hdlc->dest_ptr;
+		dest_ptr = &dest_ptr[hdlc->dest_idx];
+		dest_length = hdlc->dest_size - hdlc->dest_idx;
+
+		for (i = 0; i < src_length; i++) {
+
+			src_byte = src_ptr[i];
+
+			if (hdlc->escaping) {
+				dest_ptr[len++] = src_byte ^ ESC_MASK;
+				hdlc->escaping = 0;
+			} else if (src_byte == ESC_CHAR) {
+				if (i == (src_length - 1)) {
+					hdlc->escaping = 1;
+					i++;
+					break;
+				}
+				dest_ptr[len++] = src_ptr[++i] ^ ESC_MASK;
+			} else if (src_byte == CONTROL_CHAR) {
+				if (msg_start && i == 0 && src_length > 1)
+					continue;
+				/* Byte 0x7E will be considered as end of
+				 * packet
+				 */
+				dest_ptr[len++] = src_byte;
+				i++;
+				pkt_bnd = HDLC_COMPLETE;
+				break;
+			} else {
+				dest_ptr[len++] = src_byte;
+			}
+
+			if (len >= dest_length) {
+				i++;
+				break;
+			}
+		}
+
+		hdlc->src_idx += i;
+		hdlc->dest_idx += len;
+	}
+
+	return pkt_bnd;
+}
+
+int crc_check(uint8_t *buf, uint16_t len)
+{
+	uint16_t crc = CRC_16_L_SEED;
+	uint8_t sent_crc[2] = {0, 0};
+
+	/*
+	 * The minimum length of a valid incoming packet is 4. 1 byte
+	 * of data and 3 bytes for CRC
+	 */
+	if (!buf || len < 4) {
+		pr_err_ratelimited("diag: In %s, invalid packet or length, buf: 0x%p, len: %d",
+				   __func__, buf, len);
+		return -EIO;
+	}
+
+	/*
+	 * Run CRC check for the original input. Skip the last 3 CRC
+	 * bytes
+	 */
+	crc = crc_ccitt(crc, buf, len-3);
+	crc ^= CRC_16_L_SEED;
+
+	/* Check the computed CRC against the original CRC bytes. */
+	sent_crc[0] = buf[len-3];
+	sent_crc[1] = buf[len-2];
+	if (crc != *((uint16_t *)sent_crc)) {
+		pr_debug("diag: In %s, crc mismatch. expected: %x, sent %x.\n",
+				__func__, crc, *((uint16_t *)sent_crc));
+		return -EIO;
+	}
+
+	return 0;
+}
diff --git a/drivers/char/diag/diagchar_hdlc.h b/drivers/char/diag/diagchar_hdlc.h
new file mode 100644
index 0000000..357651eb
--- /dev/null
+++ b/drivers/char/diag/diagchar_hdlc.h
@@ -0,0 +1,66 @@
+/* Copyright (c) 2008-2009, 2012-2014, 2016 The Linux Foundation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAGCHAR_HDLC
+#define DIAGCHAR_HDLC
+
+enum diag_send_state_enum_type {
+	DIAG_STATE_START,
+	DIAG_STATE_BUSY,
+	DIAG_STATE_CRC1,
+	DIAG_STATE_CRC2,
+	DIAG_STATE_TERM,
+	DIAG_STATE_COMPLETE
+};
+
+struct diag_send_desc_type {
+	const void *pkt;
+	const void *last;	/* Address of last byte to send. */
+	enum diag_send_state_enum_type state;
+	/* True if this fragment terminates the packet */
+	unsigned char terminate;
+};
+
+struct diag_hdlc_dest_type {
+	void *dest;
+	void *dest_last;
+	/* Below: internal use only */
+	uint16_t crc;
+};
+
+struct diag_hdlc_decode_type {
+	uint8_t *src_ptr;
+	unsigned int src_idx;
+	unsigned int src_size;
+	uint8_t *dest_ptr;
+	unsigned int dest_idx;
+	unsigned int dest_size;
+	int escaping;
+
+};
+
+void diag_hdlc_encode(struct diag_send_desc_type *src_desc,
+		      struct diag_hdlc_dest_type *enc);
+
+int diag_hdlc_decode(struct diag_hdlc_decode_type *hdlc);
+
+int crc_check(uint8_t *buf, uint16_t len);
+
+#define ESC_CHAR     0x7D
+#define ESC_MASK     0x20
+
+#define HDLC_INCOMPLETE		0
+#define HDLC_COMPLETE		1
+
+#define HDLC_FOOTER_LEN		3
+#endif
diff --git a/drivers/char/diag/diagfwd.c b/drivers/char/diag/diagfwd.c
new file mode 100644
index 0000000..e132f36
--- /dev/null
+++ b/drivers/char/diag/diagfwd.c
@@ -0,0 +1,1686 @@
+/* Copyright (c) 2008-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/sched.h>
+#include <linux/ratelimit.h>
+#include <linux/workqueue.h>
+#include <linux/pm_runtime.h>
+#include <linux/diagchar.h>
+#include <linux/delay.h>
+#include <linux/reboot.h>
+#include <linux/of.h>
+#include <linux/kmemleak.h>
+#ifdef CONFIG_DIAG_OVER_USB
+#include <linux/usb/usbdiag.h>
+#endif
+#include <soc/qcom/socinfo.h>
+#include <soc/qcom/restart.h>
+#include "diagmem.h"
+#include "diagchar.h"
+#include "diagfwd.h"
+#include "diagfwd_peripheral.h"
+#include "diagfwd_cntl.h"
+#include "diagchar_hdlc.h"
+#include "diag_dci.h"
+#include "diag_masks.h"
+#include "diag_usb.h"
+#include "diag_mux.h"
+
+#define STM_CMD_VERSION_OFFSET	4
+#define STM_CMD_MASK_OFFSET	5
+#define STM_CMD_DATA_OFFSET	6
+#define STM_CMD_NUM_BYTES	7
+
+#define STM_RSP_SUPPORTED_INDEX		7
+#define STM_RSP_STATUS_INDEX		8
+#define STM_RSP_NUM_BYTES		9
+
+static int timestamp_switch;
+module_param(timestamp_switch, int, 0644);
+
+int wrap_enabled;
+uint16_t wrap_count;
+static struct diag_hdlc_decode_type *hdlc_decode;
+
+#define DIAG_NUM_COMMON_CMD	1
+static uint8_t common_cmds[DIAG_NUM_COMMON_CMD] = {
+	DIAG_CMD_LOG_ON_DMND
+};
+
+static uint8_t hdlc_timer_in_progress;
+
+/* Determine if this device uses a device tree */
+#ifdef CONFIG_OF
+static int has_device_tree(void)
+{
+	struct device_node *node;
+
+	node = of_find_node_by_path("/");
+	if (node) {
+		of_node_put(node);
+		return 1;
+	}
+	return 0;
+}
+#else
+static int has_device_tree(void)
+{
+	return 0;
+}
+#endif
+
+int chk_config_get_id(void)
+{
+	switch (socinfo_get_msm_cpu()) {
+	case MSM_CPU_8X60:
+		return APQ8060_TOOLS_ID;
+	case MSM_CPU_8960:
+	case MSM_CPU_8960AB:
+		return AO8960_TOOLS_ID;
+	case MSM_CPU_8064:
+	case MSM_CPU_8064AB:
+	case MSM_CPU_8064AA:
+		return APQ8064_TOOLS_ID;
+	case MSM_CPU_8930:
+	case MSM_CPU_8930AA:
+	case MSM_CPU_8930AB:
+		return MSM8930_TOOLS_ID;
+	case MSM_CPU_8974:
+		return MSM8974_TOOLS_ID;
+	case MSM_CPU_8625:
+		return MSM8625_TOOLS_ID;
+	case MSM_CPU_8084:
+		return APQ8084_TOOLS_ID;
+	case MSM_CPU_8916:
+		return MSM8916_TOOLS_ID;
+	case MSM_CPU_8939:
+		return MSM8939_TOOLS_ID;
+	case MSM_CPU_8994:
+		return MSM8994_TOOLS_ID;
+	case MSM_CPU_8226:
+		return APQ8026_TOOLS_ID;
+	case MSM_CPU_8909:
+		return MSM8909_TOOLS_ID;
+	case MSM_CPU_8992:
+		return MSM8992_TOOLS_ID;
+	case MSM_CPU_8996:
+		return MSM_8996_TOOLS_ID;
+	default:
+		if (driver->use_device_tree) {
+			if (machine_is_msm8974())
+				return MSM8974_TOOLS_ID;
+			else if (machine_is_apq8074())
+				return APQ8074_TOOLS_ID;
+			else
+				return 0;
+		} else {
+			return 0;
+		}
+	}
+}
+
+/*
+ * This will return TRUE for targets which support apps only mode and hence SSR.
+ * This applies to 8960 and newer targets.
+ */
+int chk_apps_only(void)
+{
+	if (driver->use_device_tree)
+		return 1;
+
+	switch (socinfo_get_msm_cpu()) {
+	case MSM_CPU_8960:
+	case MSM_CPU_8960AB:
+	case MSM_CPU_8064:
+	case MSM_CPU_8064AB:
+	case MSM_CPU_8064AA:
+	case MSM_CPU_8930:
+	case MSM_CPU_8930AA:
+	case MSM_CPU_8930AB:
+	case MSM_CPU_8627:
+	case MSM_CPU_9615:
+	case MSM_CPU_8974:
+		return 1;
+	default:
+		return 0;
+	}
+}
+
+/*
+ * This will return TRUE for targets which support apps as master.
+ * Thus, SW DLOAD and Mode Reset are supported on apps processor.
+ * This applies to 8960 and newer targets.
+ */
+int chk_apps_master(void)
+{
+	if (driver->use_device_tree)
+		return 1;
+	else
+		return 0;
+}
+
+int chk_polling_response(void)
+{
+	if (!(driver->polling_reg_flag) && chk_apps_master())
+		/*
+		 * If the apps processor is master and no other processor
+		 * has registered to respond for polling
+		 */
+		return 1;
+	else if (!(driver->diagfwd_cntl[PERIPHERAL_MODEM] &&
+		   driver->diagfwd_cntl[PERIPHERAL_MODEM]->ch_open) &&
+		 (driver->feature[PERIPHERAL_MODEM].rcvd_feature_mask))
+		/*
+		 * If the apps processor is not the master and the modem
+		 * is not up or we did not receive the feature masks from Modem
+		 */
+		return 1;
+	else
+		return 0;
+}
+
+/*
+ * This function should be called if you feel that the logging process may
+ * need to be woken up. For instance, if the logging mode is MEMORY_DEVICE MODE
+ * and while trying to read data from data channel there are no buffers
+ * available to read the data into, then this function should be called to
+ * determine if the logging process needs to be woken up.
+ */
+void chk_logging_wakeup(void)
+{
+	int i;
+	int j;
+	int pid = 0;
+
+	for (j = 0; j < NUM_MD_SESSIONS; j++) {
+		if (!driver->md_session_map[j])
+			continue;
+		pid = driver->md_session_map[j]->pid;
+
+		/* Find the index of the logging process */
+		for (i = 0; i < driver->num_clients; i++) {
+			if (driver->client_map[i].pid != pid)
+				continue;
+			if (driver->data_ready[i] & USER_SPACE_DATA_TYPE)
+				continue;
+			/*
+			 * At very high logging rates a race condition can
+			 * occur where the buffers containing the data from
+			 * a channel are all in use, but the data_ready flag
+			 * is cleared. In this case, the buffers never have
+			 * their data read/logged. Detect and remedy this
+			 * situation.
+			 */
+			driver->data_ready[i] |= USER_SPACE_DATA_TYPE;
+			pr_debug("diag: Force wakeup of logging process\n");
+			wake_up_interruptible(&driver->wait_q);
+			break;
+		}
+		/*
+		 * Diag Memory Device is in normal. Check only for the first
+		 * index as all the indices point to the same session
+		 * structure.
+		 */
+		if ((driver->md_session_mask == DIAG_CON_ALL) && (j == 0))
+			break;
+	}
+}
+
+static void pack_rsp_and_send(unsigned char *buf, int len)
+{
+	int err;
+	int retry_count = 0;
+	uint32_t write_len = 0;
+	unsigned long flags;
+	unsigned char *rsp_ptr = driver->encoded_rsp_buf;
+	struct diag_pkt_frame_t header;
+
+	if (!rsp_ptr || !buf)
+		return;
+
+	if (len > DIAG_MAX_RSP_SIZE || len < 0) {
+		pr_err("diag: In %s, invalid len %d, permissible len %d\n",
+		       __func__, len, DIAG_MAX_RSP_SIZE);
+		return;
+	}
+
+	/*
+	 * Keep trying till we get the buffer back. It should probably
+	 * take one or two iterations. When this loops till UINT_MAX, it
+	 * means we did not get a write complete for the previous
+	 * response.
+	 */
+	while (retry_count < UINT_MAX) {
+		if (!driver->rsp_buf_busy)
+			break;
+		/*
+		 * Wait for sometime and try again. The value 10000 was chosen
+		 * empirically as an optimum value for USB to complete a write
+		 */
+		usleep_range(10000, 10100);
+		retry_count++;
+
+		/*
+		 * There can be a race conditon that clears the data ready flag
+		 * for responses. Make sure we don't miss previous wakeups for
+		 * draining responses when we are in Memory Device Mode.
+		 */
+		if (driver->logging_mode == DIAG_MEMORY_DEVICE_MODE ||
+				driver->logging_mode == DIAG_MULTI_MODE)
+			chk_logging_wakeup();
+	}
+	if (driver->rsp_buf_busy) {
+		pr_err("diag: unable to get hold of response buffer\n");
+		return;
+	}
+
+	driver->rsp_buf_busy = 1;
+	header.start = CONTROL_CHAR;
+	header.version = 1;
+	header.length = len;
+	memcpy(rsp_ptr, &header, sizeof(header));
+	write_len += sizeof(header);
+	memcpy(rsp_ptr + write_len, buf, len);
+	write_len += len;
+	*(uint8_t *)(rsp_ptr + write_len) = CONTROL_CHAR;
+	write_len += sizeof(uint8_t);
+
+	err = diag_mux_write(DIAG_LOCAL_PROC, rsp_ptr, write_len,
+			     driver->rsp_buf_ctxt);
+	if (err) {
+		pr_err("diag: In %s, unable to write to mux, err: %d\n",
+		       __func__, err);
+		spin_lock_irqsave(&driver->rsp_buf_busy_lock, flags);
+		driver->rsp_buf_busy = 0;
+		spin_unlock_irqrestore(&driver->rsp_buf_busy_lock, flags);
+	}
+}
+
+static void encode_rsp_and_send(unsigned char *buf, int len)
+{
+	struct diag_send_desc_type send = { NULL, NULL, DIAG_STATE_START, 0 };
+	struct diag_hdlc_dest_type enc = { NULL, NULL, 0 };
+	unsigned char *rsp_ptr = driver->encoded_rsp_buf;
+	int err, retry_count = 0;
+	unsigned long flags;
+
+	if (!rsp_ptr || !buf)
+		return;
+
+	if (len > DIAG_MAX_RSP_SIZE || len < 0) {
+		pr_err("diag: In %s, invalid len %d, permissible len %d\n",
+		       __func__, len, DIAG_MAX_RSP_SIZE);
+		return;
+	}
+
+	/*
+	 * Keep trying till we get the buffer back. It should probably
+	 * take one or two iterations. When this loops till UINT_MAX, it
+	 * means we did not get a write complete for the previous
+	 * response.
+	 */
+	while (retry_count < UINT_MAX) {
+		if (!driver->rsp_buf_busy)
+			break;
+		/*
+		 * Wait for sometime and try again. The value 10000 was chosen
+		 * empirically as an optimum value for USB to complete a write
+		 */
+		usleep_range(10000, 10100);
+		retry_count++;
+
+		/*
+		 * There can be a race conditon that clears the data ready flag
+		 * for responses. Make sure we don't miss previous wakeups for
+		 * draining responses when we are in Memory Device Mode.
+		 */
+		if (driver->logging_mode == DIAG_MEMORY_DEVICE_MODE ||
+				driver->logging_mode == DIAG_MULTI_MODE)
+			chk_logging_wakeup();
+	}
+
+	if (driver->rsp_buf_busy) {
+		pr_err("diag: unable to get hold of response buffer\n");
+		return;
+	}
+
+	spin_lock_irqsave(&driver->rsp_buf_busy_lock, flags);
+	driver->rsp_buf_busy = 1;
+	spin_unlock_irqrestore(&driver->rsp_buf_busy_lock, flags);
+	send.state = DIAG_STATE_START;
+	send.pkt = buf;
+	send.last = (void *)(buf + len - 1);
+	send.terminate = 1;
+	enc.dest = rsp_ptr;
+	enc.dest_last = (void *)(rsp_ptr + DIAG_MAX_HDLC_BUF_SIZE - 1);
+	diag_hdlc_encode(&send, &enc);
+	driver->encoded_rsp_len = (int)(enc.dest - (void *)rsp_ptr);
+	err = diag_mux_write(DIAG_LOCAL_PROC, rsp_ptr, driver->encoded_rsp_len,
+			     driver->rsp_buf_ctxt);
+	if (err) {
+		pr_err("diag: In %s, Unable to write to device, err: %d\n",
+			__func__, err);
+		spin_lock_irqsave(&driver->rsp_buf_busy_lock, flags);
+		driver->rsp_buf_busy = 0;
+		spin_unlock_irqrestore(&driver->rsp_buf_busy_lock, flags);
+	}
+	memset(buf, '\0', DIAG_MAX_RSP_SIZE);
+}
+
+void diag_send_rsp(unsigned char *buf, int len)
+{
+	struct diag_md_session_t *session_info = NULL;
+	uint8_t hdlc_disabled;
+
+	session_info = diag_md_session_get_peripheral(APPS_DATA);
+	if (session_info)
+		hdlc_disabled = session_info->hdlc_disabled;
+	else
+		hdlc_disabled = driver->hdlc_disabled;
+
+	if (hdlc_disabled)
+		pack_rsp_and_send(buf, len);
+	else
+		encode_rsp_and_send(buf, len);
+}
+
+void diag_update_pkt_buffer(unsigned char *buf, uint32_t len, int type)
+{
+	unsigned char *ptr = NULL;
+	unsigned char *temp = buf;
+	int *in_busy = NULL;
+	uint32_t *length = NULL;
+	uint32_t max_len = 0;
+
+	if (!buf || len == 0) {
+		pr_err("diag: In %s, Invalid ptr %pK and length %d\n",
+		       __func__, buf, len);
+		return;
+	}
+
+	switch (type) {
+	case PKT_TYPE:
+		ptr = driver->apps_req_buf;
+		length = &driver->apps_req_buf_len;
+		max_len = DIAG_MAX_REQ_SIZE;
+		in_busy = &driver->in_busy_pktdata;
+		break;
+	case DCI_PKT_TYPE:
+		ptr = driver->dci_pkt_buf;
+		length = &driver->dci_pkt_length;
+		max_len = DCI_BUF_SIZE;
+		in_busy = &driver->in_busy_dcipktdata;
+		break;
+	default:
+		pr_err("diag: Invalid type %d in %s\n", type, __func__);
+		return;
+	}
+
+	mutex_lock(&driver->diagchar_mutex);
+	if (CHK_OVERFLOW(ptr, ptr, ptr + max_len, len)) {
+		memcpy(ptr, temp, len);
+		*length = len;
+		*in_busy = 1;
+	} else {
+		pr_alert("diag: In %s, no space for response packet, len: %d, type: %d\n",
+			 __func__, len, type);
+	}
+	mutex_unlock(&driver->diagchar_mutex);
+}
+
+void diag_update_userspace_clients(unsigned int type)
+{
+	int i;
+
+	mutex_lock(&driver->diagchar_mutex);
+	for (i = 0; i < driver->num_clients; i++)
+		if (driver->client_map[i].pid != 0)
+			driver->data_ready[i] |= type;
+	wake_up_interruptible(&driver->wait_q);
+	mutex_unlock(&driver->diagchar_mutex);
+}
+
+void diag_update_md_clients(unsigned int type)
+{
+	int i, j;
+
+	mutex_lock(&driver->diagchar_mutex);
+	for (i = 0; i < NUM_MD_SESSIONS; i++) {
+		if (driver->md_session_map[i] != NULL)
+			for (j = 0; j < driver->num_clients; j++) {
+				if (driver->client_map[j].pid != 0 &&
+					driver->client_map[j].pid ==
+					driver->md_session_map[i]->pid) {
+					driver->data_ready[j] |= type;
+					break;
+				}
+			}
+	}
+	wake_up_interruptible(&driver->wait_q);
+	mutex_unlock(&driver->diagchar_mutex);
+}
+void diag_update_sleeping_process(int process_id, int data_type)
+{
+	int i;
+
+	mutex_lock(&driver->diagchar_mutex);
+	for (i = 0; i < driver->num_clients; i++)
+		if (driver->client_map[i].pid == process_id) {
+			driver->data_ready[i] |= data_type;
+			break;
+		}
+	wake_up_interruptible(&driver->wait_q);
+	mutex_unlock(&driver->diagchar_mutex);
+}
+
+static int diag_send_data(struct diag_cmd_reg_t *entry, unsigned char *buf,
+			  int len)
+{
+	if (!entry)
+		return -EIO;
+
+	if (entry->proc == APPS_DATA) {
+		diag_update_pkt_buffer(buf, len, PKT_TYPE);
+		diag_update_sleeping_process(entry->pid, PKT_TYPE);
+		return 0;
+	}
+
+	return diagfwd_write(entry->proc, TYPE_CMD, buf, len);
+}
+
+void diag_process_stm_mask(uint8_t cmd, uint8_t data_mask, int data_type)
+{
+	int status = 0;
+
+	if (data_type >= PERIPHERAL_MODEM && data_type <= PERIPHERAL_SENSORS) {
+		if (driver->feature[data_type].stm_support) {
+			status = diag_send_stm_state(data_type, cmd);
+			if (status == 0)
+				driver->stm_state[data_type] = cmd;
+		}
+		driver->stm_state_requested[data_type] = cmd;
+	} else if (data_type == APPS_DATA) {
+		driver->stm_state[data_type] = cmd;
+		driver->stm_state_requested[data_type] = cmd;
+	}
+}
+
+int diag_process_stm_cmd(unsigned char *buf, unsigned char *dest_buf)
+{
+	uint8_t version, mask, cmd;
+	uint8_t rsp_supported = 0;
+	uint8_t rsp_status = 0;
+	int i;
+
+	if (!buf || !dest_buf) {
+		pr_err("diag: Invalid pointers buf: %pK, dest_buf %pK in %s\n",
+		       buf, dest_buf, __func__);
+		return -EIO;
+	}
+
+	version = *(buf + STM_CMD_VERSION_OFFSET);
+	mask = *(buf + STM_CMD_MASK_OFFSET);
+	cmd = *(buf + STM_CMD_DATA_OFFSET);
+
+	/*
+	 * Check if command is valid. If the command is asking for
+	 * status, then the processor mask field is to be ignored.
+	 */
+	if ((version != 2) || (cmd > STATUS_STM) ||
+		((cmd != STATUS_STM) && ((mask == 0) || (0 != (mask >> 4))))) {
+		/* Command is invalid. Send bad param message response */
+		dest_buf[0] = BAD_PARAM_RESPONSE_MESSAGE;
+		for (i = 0; i < STM_CMD_NUM_BYTES; i++)
+			dest_buf[i+1] = *(buf + i);
+		return STM_CMD_NUM_BYTES+1;
+	} else if (cmd != STATUS_STM) {
+		if (mask & DIAG_STM_MODEM)
+			diag_process_stm_mask(cmd, DIAG_STM_MODEM,
+					      PERIPHERAL_MODEM);
+
+		if (mask & DIAG_STM_LPASS)
+			diag_process_stm_mask(cmd, DIAG_STM_LPASS,
+					      PERIPHERAL_LPASS);
+
+		if (mask & DIAG_STM_WCNSS)
+			diag_process_stm_mask(cmd, DIAG_STM_WCNSS,
+					      PERIPHERAL_WCNSS);
+
+		if (mask & DIAG_STM_SENSORS)
+			diag_process_stm_mask(cmd, DIAG_STM_SENSORS,
+						PERIPHERAL_SENSORS);
+		if (mask & DIAG_STM_WDSP)
+			diag_process_stm_mask(cmd, DIAG_STM_WDSP,
+						PERIPHERAL_WDSP);
+
+		if (mask & DIAG_STM_APPS)
+			diag_process_stm_mask(cmd, DIAG_STM_APPS, APPS_DATA);
+	}
+
+	for (i = 0; i < STM_CMD_NUM_BYTES; i++)
+		dest_buf[i] = *(buf + i);
+
+	/* Set mask denoting which peripherals support STM */
+	if (driver->feature[PERIPHERAL_MODEM].stm_support)
+		rsp_supported |= DIAG_STM_MODEM;
+
+	if (driver->feature[PERIPHERAL_LPASS].stm_support)
+		rsp_supported |= DIAG_STM_LPASS;
+
+	if (driver->feature[PERIPHERAL_WCNSS].stm_support)
+		rsp_supported |= DIAG_STM_WCNSS;
+
+	if (driver->feature[PERIPHERAL_SENSORS].stm_support)
+		rsp_supported |= DIAG_STM_SENSORS;
+
+	if (driver->feature[PERIPHERAL_WDSP].stm_support)
+		rsp_supported |= DIAG_STM_WDSP;
+
+	rsp_supported |= DIAG_STM_APPS;
+
+	/* Set mask denoting STM state/status for each peripheral/APSS */
+	if (driver->stm_state[PERIPHERAL_MODEM])
+		rsp_status |= DIAG_STM_MODEM;
+
+	if (driver->stm_state[PERIPHERAL_LPASS])
+		rsp_status |= DIAG_STM_LPASS;
+
+	if (driver->stm_state[PERIPHERAL_WCNSS])
+		rsp_status |= DIAG_STM_WCNSS;
+
+	if (driver->stm_state[PERIPHERAL_SENSORS])
+		rsp_status |= DIAG_STM_SENSORS;
+
+	if (driver->stm_state[PERIPHERAL_WDSP])
+		rsp_status |= DIAG_STM_WDSP;
+
+	if (driver->stm_state[APPS_DATA])
+		rsp_status |= DIAG_STM_APPS;
+
+	dest_buf[STM_RSP_SUPPORTED_INDEX] = rsp_supported;
+	dest_buf[STM_RSP_STATUS_INDEX] = rsp_status;
+
+	return STM_RSP_NUM_BYTES;
+}
+
+int diag_process_time_sync_query_cmd(unsigned char *src_buf, int src_len,
+				      unsigned char *dest_buf, int dest_len)
+{
+	int write_len = 0;
+	struct diag_cmd_time_sync_query_req_t *req = NULL;
+	struct diag_cmd_time_sync_query_rsp_t rsp;
+
+	if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0) {
+		pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d",
+			__func__, src_buf, src_len, dest_buf, dest_len);
+		return -EINVAL;
+	}
+
+	req = (struct diag_cmd_time_sync_query_req_t *)src_buf;
+	rsp.header.cmd_code = req->header.cmd_code;
+	rsp.header.subsys_id = req->header.subsys_id;
+	rsp.header.subsys_cmd_code = req->header.subsys_cmd_code;
+	rsp.version = req->version;
+	rsp.time_api = driver->uses_time_api;
+	memcpy(dest_buf, &rsp, sizeof(rsp));
+	write_len = sizeof(rsp);
+	return write_len;
+}
+
+int diag_process_time_sync_switch_cmd(unsigned char *src_buf, int src_len,
+				      unsigned char *dest_buf, int dest_len)
+{
+	uint8_t peripheral, status = 0;
+	struct diag_cmd_time_sync_switch_req_t *req = NULL;
+	struct diag_cmd_time_sync_switch_rsp_t rsp;
+	struct diag_ctrl_msg_time_sync time_sync_msg;
+	int msg_size = sizeof(struct diag_ctrl_msg_time_sync);
+	int err = 0, write_len = 0;
+
+	if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0) {
+		pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d",
+			__func__, src_buf, src_len, dest_buf, dest_len);
+		return -EINVAL;
+	}
+
+	req = (struct diag_cmd_time_sync_switch_req_t *)src_buf;
+	rsp.header.cmd_code = req->header.cmd_code;
+	rsp.header.subsys_id = req->header.subsys_id;
+	rsp.header.subsys_cmd_code = req->header.subsys_cmd_code;
+	rsp.version = req->version;
+	rsp.time_api = req->time_api;
+	if ((req->version > 1) || (req->time_api > 1) ||
+					(req->persist_time > 0)) {
+		dest_buf[0] = BAD_PARAM_RESPONSE_MESSAGE;
+		rsp.time_api_status = 0;
+		rsp.persist_time_status = PERSIST_TIME_NOT_SUPPORTED;
+		memcpy(dest_buf + 1, &rsp, sizeof(rsp));
+		write_len = sizeof(rsp) + 1;
+		timestamp_switch = 0;
+		return write_len;
+	}
+
+	time_sync_msg.ctrl_pkt_id = DIAG_CTRL_MSG_TIME_SYNC_PKT;
+	time_sync_msg.ctrl_pkt_data_len = 5;
+	time_sync_msg.version = 1;
+	time_sync_msg.time_api = req->time_api;
+
+	for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+		err = diagfwd_write(peripheral, TYPE_CNTL, &time_sync_msg,
+					msg_size);
+		if (err && err != -ENODEV) {
+			pr_err("diag: In %s, unable to write to peripheral: %d, type: %d, len: %d, err: %d\n",
+				__func__, peripheral, TYPE_CNTL,
+				msg_size, err);
+			status |= (1 << peripheral);
+		}
+	}
+
+	driver->time_sync_enabled = 1;
+	driver->uses_time_api = req->time_api;
+
+	switch (req->time_api) {
+	case 0:
+		timestamp_switch = 0;
+		break;
+	case 1:
+		timestamp_switch = 1;
+		break;
+	default:
+		timestamp_switch = 0;
+		break;
+	}
+
+	rsp.time_api_status = status;
+	rsp.persist_time_status = PERSIST_TIME_NOT_SUPPORTED;
+	memcpy(dest_buf, &rsp, sizeof(rsp));
+	write_len = sizeof(rsp);
+	return write_len;
+}
+
+int diag_cmd_log_on_demand(unsigned char *src_buf, int src_len,
+			   unsigned char *dest_buf, int dest_len)
+{
+	int write_len = 0;
+	struct diag_log_on_demand_rsp_t header;
+
+	if (!driver->diagfwd_cntl[PERIPHERAL_MODEM] ||
+	    !driver->diagfwd_cntl[PERIPHERAL_MODEM]->ch_open ||
+	    !driver->log_on_demand_support)
+		return 0;
+
+	if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0) {
+		pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d",
+		       __func__, src_buf, src_len, dest_buf, dest_len);
+		return -EINVAL;
+	}
+
+	header.cmd_code = DIAG_CMD_LOG_ON_DMND;
+	header.log_code = *(uint16_t *)(src_buf + 1);
+	header.status = 1;
+	memcpy(dest_buf, &header, sizeof(struct diag_log_on_demand_rsp_t));
+	write_len += sizeof(struct diag_log_on_demand_rsp_t);
+
+	return write_len;
+}
+
+int diag_cmd_get_mobile_id(unsigned char *src_buf, int src_len,
+			   unsigned char *dest_buf, int dest_len)
+{
+	int write_len = 0;
+	struct diag_pkt_header_t *header = NULL;
+	struct diag_cmd_ext_mobile_rsp_t rsp;
+
+	if (!src_buf || src_len != sizeof(*header) || !dest_buf ||
+	    dest_len < sizeof(rsp))
+		return -EIO;
+
+	header = (struct diag_pkt_header_t *)src_buf;
+	rsp.header.cmd_code = header->cmd_code;
+	rsp.header.subsys_id = header->subsys_id;
+	rsp.header.subsys_cmd_code = header->subsys_cmd_code;
+	rsp.version = 2;
+	rsp.padding[0] = 0;
+	rsp.padding[1] = 0;
+	rsp.padding[2] = 0;
+	rsp.family = 0;
+	rsp.chip_id = (uint32_t)socinfo_get_id();
+
+	memcpy(dest_buf, &rsp, sizeof(rsp));
+	write_len += sizeof(rsp);
+
+	return write_len;
+}
+
+int diag_check_common_cmd(struct diag_pkt_header_t *header)
+{
+	int i;
+
+	if (!header)
+		return -EIO;
+
+	for (i = 0; i < DIAG_NUM_COMMON_CMD; i++) {
+		if (header->cmd_code == common_cmds[i])
+			return 1;
+	}
+
+	return 0;
+}
+
+static int diag_cmd_chk_stats(unsigned char *src_buf, int src_len,
+			      unsigned char *dest_buf, int dest_len)
+{
+	int payload = 0;
+	int write_len = 0;
+	struct diag_pkt_header_t *header = NULL;
+	struct diag_cmd_stats_rsp_t rsp;
+
+	if (!src_buf || src_len < sizeof(struct diag_pkt_header_t) ||
+	    !dest_buf || dest_len < sizeof(rsp))
+		return -EINVAL;
+
+	header = (struct diag_pkt_header_t *)src_buf;
+
+	if (header->cmd_code != DIAG_CMD_DIAG_SUBSYS ||
+	    header->subsys_id != DIAG_SS_DIAG)
+		return -EINVAL;
+
+	switch (header->subsys_cmd_code) {
+	case DIAG_CMD_OP_GET_MSG_ALLOC:
+		payload = driver->msg_stats.alloc_count;
+		break;
+	case DIAG_CMD_OP_GET_MSG_DROP:
+		payload = driver->msg_stats.drop_count;
+		break;
+	case DIAG_CMD_OP_RESET_MSG_STATS:
+		diag_record_stats(DATA_TYPE_F3, PKT_RESET);
+		break;
+	case DIAG_CMD_OP_GET_LOG_ALLOC:
+		payload = driver->log_stats.alloc_count;
+		break;
+	case DIAG_CMD_OP_GET_LOG_DROP:
+		payload = driver->log_stats.drop_count;
+		break;
+	case DIAG_CMD_OP_RESET_LOG_STATS:
+		diag_record_stats(DATA_TYPE_LOG, PKT_RESET);
+		break;
+	case DIAG_CMD_OP_GET_EVENT_ALLOC:
+		payload = driver->event_stats.alloc_count;
+		break;
+	case DIAG_CMD_OP_GET_EVENT_DROP:
+		payload = driver->event_stats.drop_count;
+		break;
+	case DIAG_CMD_OP_RESET_EVENT_STATS:
+		diag_record_stats(DATA_TYPE_EVENT, PKT_RESET);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	memcpy(&rsp.header, header, sizeof(struct diag_pkt_header_t));
+	rsp.payload = payload;
+	write_len = sizeof(rsp);
+	memcpy(dest_buf, &rsp, sizeof(rsp));
+
+	return write_len;
+}
+
+static int diag_cmd_disable_hdlc(unsigned char *src_buf, int src_len,
+				 unsigned char *dest_buf, int dest_len)
+{
+	struct diag_pkt_header_t *header = NULL;
+	struct diag_cmd_hdlc_disable_rsp_t rsp;
+	int write_len = 0;
+
+	if (!src_buf || src_len < sizeof(*header) ||
+	    !dest_buf || dest_len < sizeof(rsp)) {
+		return -EIO;
+	}
+
+	header = (struct diag_pkt_header_t *)src_buf;
+	if (header->cmd_code != DIAG_CMD_DIAG_SUBSYS ||
+	    header->subsys_id != DIAG_SS_DIAG ||
+	    header->subsys_cmd_code != DIAG_CMD_OP_HDLC_DISABLE) {
+		return -EINVAL;
+	}
+
+	memcpy(&rsp.header, header, sizeof(struct diag_pkt_header_t));
+	rsp.framing_version = 1;
+	rsp.result = 0;
+	write_len = sizeof(rsp);
+	memcpy(dest_buf, &rsp, sizeof(rsp));
+
+	return write_len;
+}
+
+void diag_send_error_rsp(unsigned char *buf, int len)
+{
+	/* -1 to accommodate the first byte 0x13 */
+	if (len > (DIAG_MAX_RSP_SIZE - 1)) {
+		pr_err("diag: cannot send err rsp, huge length: %d\n", len);
+		return;
+	}
+
+	*(uint8_t *)driver->apps_rsp_buf = DIAG_CMD_ERROR;
+	memcpy((driver->apps_rsp_buf + sizeof(uint8_t)), buf, len);
+	diag_send_rsp(driver->apps_rsp_buf, len + 1);
+}
+
+int diag_process_apps_pkt(unsigned char *buf, int len,
+			struct diag_md_session_t *info)
+{
+	int i;
+	int mask_ret;
+	int write_len = 0;
+	unsigned char *temp = NULL;
+	struct diag_cmd_reg_entry_t entry;
+	struct diag_cmd_reg_entry_t *temp_entry = NULL;
+	struct diag_cmd_reg_t *reg_item = NULL;
+
+	if (!buf)
+		return -EIO;
+
+	/* Check if the command is a supported mask command */
+	mask_ret = diag_process_apps_masks(buf, len, info);
+	if (mask_ret > 0) {
+		diag_send_rsp(driver->apps_rsp_buf, mask_ret);
+		return 0;
+	}
+
+	temp = buf;
+	entry.cmd_code = (uint16_t)(*(uint8_t *)temp);
+	temp += sizeof(uint8_t);
+	entry.subsys_id = (uint16_t)(*(uint8_t *)temp);
+	temp += sizeof(uint8_t);
+	entry.cmd_code_hi = (uint16_t)(*(uint16_t *)temp);
+	entry.cmd_code_lo = (uint16_t)(*(uint16_t *)temp);
+	temp += sizeof(uint16_t);
+
+	pr_debug("diag: In %s, received cmd %02x %02x %02x\n",
+		 __func__, entry.cmd_code, entry.subsys_id, entry.cmd_code_hi);
+
+	if (*buf == DIAG_CMD_LOG_ON_DMND && driver->log_on_demand_support &&
+	    driver->feature[PERIPHERAL_MODEM].rcvd_feature_mask) {
+		write_len = diag_cmd_log_on_demand(buf, len,
+						   driver->apps_rsp_buf,
+						   DIAG_MAX_RSP_SIZE);
+		if (write_len > 0)
+			diag_send_rsp(driver->apps_rsp_buf, write_len);
+		return 0;
+	}
+
+	mutex_lock(&driver->cmd_reg_mutex);
+	temp_entry = diag_cmd_search(&entry, ALL_PROC);
+	if (temp_entry) {
+		reg_item = container_of(temp_entry, struct diag_cmd_reg_t,
+								entry);
+		if (info) {
+			if (MD_PERIPHERAL_MASK(reg_item->proc) &
+				info->peripheral_mask)
+				write_len = diag_send_data(reg_item, buf, len);
+		} else {
+			if (MD_PERIPHERAL_MASK(reg_item->proc) &
+				driver->logging_mask)
+				diag_send_error_rsp(buf, len);
+			else
+				write_len = diag_send_data(reg_item, buf, len);
+		}
+		mutex_unlock(&driver->cmd_reg_mutex);
+		return write_len;
+	}
+	mutex_unlock(&driver->cmd_reg_mutex);
+
+#if defined(CONFIG_DIAG_OVER_USB)
+	/* Check for the command/respond msg for the maximum packet length */
+	if ((*buf == 0x4b) && (*(buf+1) == 0x12) &&
+		(*(uint16_t *)(buf+2) == 0x0055)) {
+		for (i = 0; i < 4; i++)
+			*(driver->apps_rsp_buf+i) = *(buf+i);
+		*(uint32_t *)(driver->apps_rsp_buf+4) = DIAG_MAX_REQ_SIZE;
+		diag_send_rsp(driver->apps_rsp_buf, 8);
+		return 0;
+	} else if ((*buf == 0x4b) && (*(buf+1) == 0x12) &&
+		(*(uint16_t *)(buf+2) == DIAG_DIAG_STM)) {
+		len = diag_process_stm_cmd(buf, driver->apps_rsp_buf);
+		if (len > 0) {
+			diag_send_rsp(driver->apps_rsp_buf, len);
+			return 0;
+		}
+		return len;
+	}
+	/* Check for time sync query command */
+	else if ((*buf == DIAG_CMD_DIAG_SUBSYS) &&
+		(*(buf+1) == DIAG_SS_DIAG) &&
+		(*(uint16_t *)(buf+2) == DIAG_GET_TIME_API)) {
+		write_len = diag_process_time_sync_query_cmd(buf, len,
+							driver->apps_rsp_buf,
+							DIAG_MAX_RSP_SIZE);
+		if (write_len > 0)
+			diag_send_rsp(driver->apps_rsp_buf, write_len);
+		return 0;
+	}
+	/* Check for time sync switch command */
+	else if ((*buf == DIAG_CMD_DIAG_SUBSYS) &&
+		(*(buf+1) == DIAG_SS_DIAG) &&
+		(*(uint16_t *)(buf+2) == DIAG_SET_TIME_API)) {
+		write_len = diag_process_time_sync_switch_cmd(buf, len,
+							driver->apps_rsp_buf,
+							DIAG_MAX_RSP_SIZE);
+		if (write_len > 0)
+			diag_send_rsp(driver->apps_rsp_buf, write_len);
+		return 0;
+	}
+	/* Check for download command */
+	else if ((chk_apps_master()) && (*buf == 0x3A)) {
+		/* send response back */
+		driver->apps_rsp_buf[0] = *buf;
+		diag_send_rsp(driver->apps_rsp_buf, 1);
+		msleep(5000);
+		/* call download API */
+		msm_set_restart_mode(RESTART_DLOAD);
+		pr_crit("diag: download mode set, Rebooting SoC..\n");
+		kernel_restart(NULL);
+		/* Not required, represents that command isn't sent to modem */
+		return 0;
+	}
+	/* Check for polling for Apps only DIAG */
+	else if ((*buf == 0x4b) && (*(buf+1) == 0x32) &&
+		(*(buf+2) == 0x03)) {
+		/* If no one has registered for polling */
+		if (chk_polling_response()) {
+			/* Respond to polling for Apps only DIAG */
+			for (i = 0; i < 3; i++)
+				driver->apps_rsp_buf[i] = *(buf+i);
+			for (i = 0; i < 13; i++)
+				driver->apps_rsp_buf[i+3] = 0;
+
+			diag_send_rsp(driver->apps_rsp_buf, 16);
+			return 0;
+		}
+	}
+	/* Return the Delayed Response Wrap Status */
+	else if ((*buf == 0x4b) && (*(buf+1) == 0x32) &&
+		(*(buf+2) == 0x04) && (*(buf+3) == 0x0)) {
+		memcpy(driver->apps_rsp_buf, buf, 4);
+		driver->apps_rsp_buf[4] = wrap_enabled;
+		diag_send_rsp(driver->apps_rsp_buf, 5);
+		return 0;
+	}
+	/* Wrap the Delayed Rsp ID */
+	else if ((*buf == 0x4b) && (*(buf+1) == 0x32) &&
+		(*(buf+2) == 0x05) && (*(buf+3) == 0x0)) {
+		wrap_enabled = true;
+		memcpy(driver->apps_rsp_buf, buf, 4);
+		driver->apps_rsp_buf[4] = wrap_count;
+		diag_send_rsp(driver->apps_rsp_buf, 6);
+		return 0;
+	}
+	/* Mobile ID Rsp */
+	else if ((*buf == DIAG_CMD_DIAG_SUBSYS) &&
+			(*(buf+1) == DIAG_SS_PARAMS) &&
+			(*(buf+2) == DIAG_EXT_MOBILE_ID) && (*(buf+3) == 0x0)) {
+		write_len = diag_cmd_get_mobile_id(buf, len,
+						   driver->apps_rsp_buf,
+						   DIAG_MAX_RSP_SIZE);
+		if (write_len > 0) {
+			diag_send_rsp(driver->apps_rsp_buf, write_len);
+			return 0;
+		}
+	}
+	 /*
+	  * If the apps processor is master and no other
+	  * processor has registered for polling command.
+	  * If modem is not up and we have not received feature
+	  * mask update from modem, in that case APPS should
+	  * respond for 0X7C command
+	  */
+	else if (chk_apps_master() &&
+		 !(driver->polling_reg_flag) &&
+		 !(driver->diagfwd_cntl[PERIPHERAL_MODEM]->ch_open) &&
+		 !(driver->feature[PERIPHERAL_MODEM].rcvd_feature_mask)) {
+		/* respond to 0x0 command */
+		if (*buf == 0x00) {
+			for (i = 0; i < 55; i++)
+				driver->apps_rsp_buf[i] = 0;
+
+			diag_send_rsp(driver->apps_rsp_buf, 55);
+			return 0;
+		}
+		/* respond to 0x7c command */
+		else if (*buf == 0x7c) {
+			driver->apps_rsp_buf[0] = 0x7c;
+			for (i = 1; i < 8; i++)
+				driver->apps_rsp_buf[i] = 0;
+			/* Tools ID for APQ 8060 */
+			*(int *)(driver->apps_rsp_buf + 8) =
+							 chk_config_get_id();
+			*(unsigned char *)(driver->apps_rsp_buf + 12) = '\0';
+			*(unsigned char *)(driver->apps_rsp_buf + 13) = '\0';
+			diag_send_rsp(driver->apps_rsp_buf, 14);
+			return 0;
+		}
+	}
+	write_len = diag_cmd_chk_stats(buf, len, driver->apps_rsp_buf,
+				       DIAG_MAX_RSP_SIZE);
+	if (write_len > 0) {
+		diag_send_rsp(driver->apps_rsp_buf, write_len);
+		return 0;
+	}
+	write_len = diag_cmd_disable_hdlc(buf, len, driver->apps_rsp_buf,
+					  DIAG_MAX_RSP_SIZE);
+	if (write_len > 0) {
+		/*
+		 * This mutex lock is necessary since we need to drain all the
+		 * pending buffers from peripherals which may be HDLC encoded
+		 * before disabling HDLC encoding on Apps processor.
+		 */
+		mutex_lock(&driver->hdlc_disable_mutex);
+		diag_send_rsp(driver->apps_rsp_buf, write_len);
+		/*
+		 * Set the value of hdlc_disabled after sending the response to
+		 * the tools. This is required since the tools is expecting a
+		 * HDLC encoded response for this request.
+		 */
+		pr_debug("diag: In %s, disabling HDLC encoding\n",
+		       __func__);
+		if (info)
+			info->hdlc_disabled = 1;
+		else
+			driver->hdlc_disabled = 1;
+		diag_update_md_clients(HDLC_SUPPORT_TYPE);
+		mutex_unlock(&driver->hdlc_disable_mutex);
+		return 0;
+	}
+#endif
+
+	/* We have now come to the end of the function. */
+	if (chk_apps_only())
+		diag_send_error_rsp(buf, len);
+
+	return 0;
+}
+
+void diag_process_hdlc_pkt(void *data, unsigned int len,
+			   struct diag_md_session_t *info)
+{
+	int err = 0;
+	int ret = 0;
+
+	if (len > DIAG_MAX_HDLC_BUF_SIZE) {
+		pr_err("diag: In %s, invalid length: %d\n", __func__, len);
+		return;
+	}
+
+	mutex_lock(&driver->diag_hdlc_mutex);
+	pr_debug("diag: In %s, received packet of length: %d, req_buf_len: %d\n",
+		 __func__, len, driver->hdlc_buf_len);
+
+	if (driver->hdlc_buf_len >= DIAG_MAX_REQ_SIZE) {
+		pr_err("diag: In %s, request length is more than supported len. Dropping packet.\n",
+		       __func__);
+		goto fail;
+	}
+
+	hdlc_decode->dest_ptr = driver->hdlc_buf + driver->hdlc_buf_len;
+	hdlc_decode->dest_size = DIAG_MAX_HDLC_BUF_SIZE - driver->hdlc_buf_len;
+	hdlc_decode->src_ptr = data;
+	hdlc_decode->src_size = len;
+	hdlc_decode->src_idx = 0;
+	hdlc_decode->dest_idx = 0;
+
+	ret = diag_hdlc_decode(hdlc_decode);
+	/*
+	 * driver->hdlc_buf is of size DIAG_MAX_HDLC_BUF_SIZE. But the decoded
+	 * packet should be within DIAG_MAX_REQ_SIZE.
+	 */
+	if (driver->hdlc_buf_len + hdlc_decode->dest_idx <= DIAG_MAX_REQ_SIZE) {
+		driver->hdlc_buf_len += hdlc_decode->dest_idx;
+	} else {
+		pr_err_ratelimited("diag: In %s, Dropping packet. pkt_size: %d, max: %d\n",
+				   __func__,
+				   driver->hdlc_buf_len + hdlc_decode->dest_idx,
+				   DIAG_MAX_REQ_SIZE);
+		goto fail;
+	}
+
+	if (ret == HDLC_COMPLETE) {
+		err = crc_check(driver->hdlc_buf, driver->hdlc_buf_len);
+		if (err) {
+			/* CRC check failed. */
+			pr_err_ratelimited("diag: In %s, bad CRC. Dropping packet\n",
+					   __func__);
+			goto fail;
+		}
+		driver->hdlc_buf_len -= HDLC_FOOTER_LEN;
+
+		if (driver->hdlc_buf_len < 1) {
+			pr_err_ratelimited("diag: In %s, message is too short, len: %d, dest len: %d\n",
+					   __func__, driver->hdlc_buf_len,
+					   hdlc_decode->dest_idx);
+			goto fail;
+		}
+
+		err = diag_process_apps_pkt(driver->hdlc_buf,
+					    driver->hdlc_buf_len, info);
+		if (err < 0)
+			goto fail;
+	} else {
+		goto end;
+	}
+
+	driver->hdlc_buf_len = 0;
+	mutex_unlock(&driver->diag_hdlc_mutex);
+	return;
+
+fail:
+	/*
+	 * Tools needs to get a response in order to start its
+	 * recovery algorithm. Send an error response if the
+	 * packet is not in expected format.
+	 */
+	diag_send_error_rsp(driver->hdlc_buf, driver->hdlc_buf_len);
+	driver->hdlc_buf_len = 0;
+end:
+	mutex_unlock(&driver->diag_hdlc_mutex);
+}
+
+static int diagfwd_mux_open(int id, int mode)
+{
+	uint8_t i;
+	unsigned long flags;
+
+	switch (mode) {
+	case DIAG_USB_MODE:
+		driver->usb_connected = 1;
+		break;
+	case DIAG_MEMORY_DEVICE_MODE:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (driver->rsp_buf_busy) {
+		/*
+		 * When a client switches from callback mode to USB mode
+		 * explicitly, there can be a situation when the last response
+		 * is not drained to the user space application. Reset the
+		 * in_busy flag in this case.
+		 */
+		spin_lock_irqsave(&driver->rsp_buf_busy_lock, flags);
+		driver->rsp_buf_busy = 0;
+		spin_unlock_irqrestore(&driver->rsp_buf_busy_lock, flags);
+	}
+	for (i = 0; i < NUM_PERIPHERALS; i++) {
+		diagfwd_open(i, TYPE_DATA);
+		diagfwd_open(i, TYPE_CMD);
+	}
+	queue_work(driver->diag_real_time_wq, &driver->diag_real_time_work);
+	return 0;
+}
+
+static int diagfwd_mux_close(int id, int mode)
+{
+	switch (mode) {
+	case DIAG_USB_MODE:
+		driver->usb_connected = 0;
+		break;
+	case DIAG_MEMORY_DEVICE_MODE:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if ((driver->logging_mode == DIAG_MULTI_MODE &&
+		driver->md_session_mode == DIAG_MD_NONE) ||
+		(driver->md_session_mode == DIAG_MD_PERIPHERAL)) {
+		/*
+		 * This case indicates that the USB is removed
+		 * but there is a client running in background
+		 * with Memory Device mode.
+		 */
+	} else {
+		/*
+		 * With clearing of masks on ODL exit and
+		 * USB disconnection, closing of the channel is
+		 * not needed.This enables read and drop of stale packets.
+		 */
+		pr_debug("diag: In %s, re-enabling HDLC encoding\n",
+		       __func__);
+		mutex_lock(&driver->hdlc_disable_mutex);
+		if (driver->md_session_mode == DIAG_MD_NONE)
+			driver->hdlc_disabled = 0;
+		mutex_unlock(&driver->hdlc_disable_mutex);
+		queue_work(driver->diag_wq,
+			&(driver->update_user_clients));
+	}
+	queue_work(driver->diag_real_time_wq,
+		   &driver->diag_real_time_work);
+	return 0;
+}
+
+static uint8_t hdlc_reset;
+
+static void hdlc_reset_timer_start(struct diag_md_session_t *info)
+{
+	if (!hdlc_timer_in_progress) {
+		hdlc_timer_in_progress = 1;
+		if (info)
+			mod_timer(&info->hdlc_reset_timer,
+			  jiffies + msecs_to_jiffies(200));
+		else
+			mod_timer(&driver->hdlc_reset_timer,
+			  jiffies + msecs_to_jiffies(200));
+	}
+}
+
+static void hdlc_reset_timer_func(unsigned long data)
+{
+	pr_debug("diag: In %s, re-enabling HDLC encoding\n",
+		       __func__);
+	if (hdlc_reset) {
+		driver->hdlc_disabled = 0;
+		queue_work(driver->diag_wq,
+			&(driver->update_user_clients));
+	}
+	hdlc_timer_in_progress = 0;
+}
+
+void diag_md_hdlc_reset_timer_func(unsigned long pid)
+{
+	struct diag_md_session_t *session_info = NULL;
+
+	pr_debug("diag: In %s, re-enabling HDLC encoding\n",
+		       __func__);
+	if (hdlc_reset) {
+		session_info = diag_md_session_get_pid(pid);
+		if (session_info)
+			session_info->hdlc_disabled = 0;
+		queue_work(driver->diag_wq,
+			&(driver->update_md_clients));
+	}
+	hdlc_timer_in_progress = 0;
+}
+
+static void diag_hdlc_start_recovery(unsigned char *buf, int len,
+				     struct diag_md_session_t *info)
+{
+	int i;
+	static uint32_t bad_byte_counter;
+	unsigned char *start_ptr = NULL;
+	struct diag_pkt_frame_t *actual_pkt = NULL;
+
+	hdlc_reset = 1;
+	hdlc_reset_timer_start(info);
+
+	actual_pkt = (struct diag_pkt_frame_t *)buf;
+	for (i = 0; i < len; i++) {
+		if (actual_pkt->start == CONTROL_CHAR &&
+				actual_pkt->version == 1 &&
+				actual_pkt->length < len &&
+				(*(uint8_t *)(buf +
+				sizeof(struct diag_pkt_frame_t) +
+				actual_pkt->length) == CONTROL_CHAR)) {
+			start_ptr = &buf[i];
+			break;
+		}
+		bad_byte_counter++;
+		if (bad_byte_counter > (DIAG_MAX_REQ_SIZE +
+				sizeof(struct diag_pkt_frame_t) + 1)) {
+			bad_byte_counter = 0;
+			pr_err("diag: In %s, re-enabling HDLC encoding\n",
+					__func__);
+			mutex_lock(&driver->hdlc_disable_mutex);
+			if (info)
+				info->hdlc_disabled = 0;
+			else
+				driver->hdlc_disabled = 0;
+			mutex_unlock(&driver->hdlc_disable_mutex);
+			diag_update_md_clients(HDLC_SUPPORT_TYPE);
+
+			return;
+		}
+	}
+
+	if (start_ptr) {
+		/* Discard any partial packet reads */
+		driver->incoming_pkt.processing = 0;
+		diag_process_non_hdlc_pkt(start_ptr, len - i, info);
+	}
+}
+
+void diag_process_non_hdlc_pkt(unsigned char *buf, int len,
+			       struct diag_md_session_t *info)
+{
+	int err = 0;
+	uint16_t pkt_len = 0;
+	uint32_t read_bytes = 0;
+	const uint32_t header_len = sizeof(struct diag_pkt_frame_t);
+	struct diag_pkt_frame_t *actual_pkt = NULL;
+	unsigned char *data_ptr = NULL;
+	struct diag_partial_pkt_t *partial_pkt = &driver->incoming_pkt;
+
+	if (!buf || len <= 0)
+		return;
+
+	if (!partial_pkt->processing)
+		goto start;
+
+	if (partial_pkt->remaining > len) {
+		if ((partial_pkt->read_len + len) > partial_pkt->capacity) {
+			pr_err("diag: Invalid length %d, %d received in %s\n",
+			       partial_pkt->read_len, len, __func__);
+			goto end;
+		}
+		memcpy(partial_pkt->data + partial_pkt->read_len, buf, len);
+		read_bytes += len;
+		buf += read_bytes;
+		partial_pkt->read_len += len;
+		partial_pkt->remaining -= len;
+	} else {
+		if ((partial_pkt->read_len + partial_pkt->remaining) >
+						partial_pkt->capacity) {
+			pr_err("diag: Invalid length during partial read %d, %d received in %s\n",
+			       partial_pkt->read_len,
+			       partial_pkt->remaining, __func__);
+			goto end;
+		}
+		memcpy(partial_pkt->data + partial_pkt->read_len, buf,
+						partial_pkt->remaining);
+		read_bytes += partial_pkt->remaining;
+		buf += read_bytes;
+		partial_pkt->read_len += partial_pkt->remaining;
+		partial_pkt->remaining = 0;
+	}
+
+	if (partial_pkt->remaining == 0) {
+		actual_pkt = (struct diag_pkt_frame_t *)(partial_pkt->data);
+		data_ptr = partial_pkt->data + header_len;
+		if (*(uint8_t *)(data_ptr + actual_pkt->length) != CONTROL_CHAR)
+			diag_hdlc_start_recovery(buf, len, info);
+		err = diag_process_apps_pkt(data_ptr,
+					    actual_pkt->length, info);
+		if (err) {
+			pr_err("diag: In %s, unable to process incoming data packet, err: %d\n",
+			       __func__, err);
+			goto end;
+		}
+		partial_pkt->read_len = 0;
+		partial_pkt->total_len = 0;
+		partial_pkt->processing = 0;
+		goto start;
+	}
+	goto end;
+
+start:
+	while (read_bytes < len) {
+		actual_pkt = (struct diag_pkt_frame_t *)buf;
+		pkt_len = actual_pkt->length;
+
+		if (actual_pkt->start != CONTROL_CHAR) {
+			diag_hdlc_start_recovery(buf, len, info);
+			diag_send_error_rsp(buf, len);
+			goto end;
+		}
+
+		if (pkt_len + header_len > partial_pkt->capacity) {
+			pr_err("diag: In %s, incoming data is too large for the request buffer %d\n",
+			       __func__, pkt_len);
+			diag_hdlc_start_recovery(buf, len, info);
+			break;
+		}
+
+		if ((pkt_len + header_len) > (len - read_bytes)) {
+			partial_pkt->read_len = len - read_bytes;
+			partial_pkt->total_len = pkt_len + header_len;
+			partial_pkt->remaining = partial_pkt->total_len -
+						 partial_pkt->read_len;
+			partial_pkt->processing = 1;
+			memcpy(partial_pkt->data, buf, partial_pkt->read_len);
+			break;
+		}
+		data_ptr = buf + header_len;
+		if (*(uint8_t *)(data_ptr + actual_pkt->length) != CONTROL_CHAR)
+			diag_hdlc_start_recovery(buf, len, info);
+		else
+			hdlc_reset = 0;
+		err = diag_process_apps_pkt(data_ptr,
+					    actual_pkt->length, info);
+		if (err)
+			break;
+		read_bytes += header_len + pkt_len + 1;
+		buf += header_len + pkt_len + 1; /* advance to next pkt */
+	}
+end:
+	return;
+}
+
+static int diagfwd_mux_read_done(unsigned char *buf, int len, int ctxt)
+{
+	if (!buf || len <= 0)
+		return -EINVAL;
+
+	if (!driver->hdlc_disabled)
+		diag_process_hdlc_pkt(buf, len, NULL);
+	else
+		diag_process_non_hdlc_pkt(buf, len, NULL);
+
+	diag_mux_queue_read(ctxt);
+	return 0;
+}
+
+static int diagfwd_mux_write_done(unsigned char *buf, int len, int buf_ctxt,
+				  int ctxt)
+{
+	unsigned long flags;
+	int peripheral = -1;
+	int type = -1;
+	int num = -1;
+
+	if (!buf || len < 0)
+		return -EINVAL;
+
+	peripheral = GET_BUF_PERIPHERAL(buf_ctxt);
+	type = GET_BUF_TYPE(buf_ctxt);
+	num = GET_BUF_NUM(buf_ctxt);
+
+	switch (type) {
+	case TYPE_DATA:
+		if (peripheral >= 0 && peripheral < NUM_PERIPHERALS) {
+			diagfwd_write_done(peripheral, type, num);
+			diag_ws_on_copy(DIAG_WS_MUX);
+		} else if (peripheral == APPS_DATA) {
+			diagmem_free(driver, (unsigned char *)buf,
+				     POOL_TYPE_HDLC);
+			buf = NULL;
+		} else {
+			pr_err_ratelimited("diag: Invalid peripheral %d in %s, type: %d\n",
+					   peripheral, __func__, type);
+		}
+		break;
+	case TYPE_CMD:
+		if (peripheral >= 0 && peripheral < NUM_PERIPHERALS) {
+			diagfwd_write_done(peripheral, type, num);
+		} else if (peripheral == APPS_DATA) {
+			spin_lock_irqsave(&driver->rsp_buf_busy_lock, flags);
+			driver->rsp_buf_busy = 0;
+			driver->encoded_rsp_len = 0;
+			spin_unlock_irqrestore(&driver->rsp_buf_busy_lock,
+					       flags);
+		} else {
+			pr_err_ratelimited("diag: Invalid peripheral %d in %s, type: %d\n",
+					   peripheral, __func__, type);
+		}
+		break;
+	default:
+		pr_err_ratelimited("diag: Incorrect data type %d, buf_ctxt: %d in %s\n",
+				   type, buf_ctxt, __func__);
+		break;
+	}
+
+	return 0;
+}
+
+static struct diag_mux_ops diagfwd_mux_ops = {
+	.open = diagfwd_mux_open,
+	.close = diagfwd_mux_close,
+	.read_done = diagfwd_mux_read_done,
+	.write_done = diagfwd_mux_write_done
+};
+
+int diagfwd_init(void)
+{
+	int ret;
+	int i;
+
+	wrap_enabled = 0;
+	wrap_count = 0;
+	driver->use_device_tree = has_device_tree();
+	for (i = 0; i < DIAG_NUM_PROC; i++)
+		driver->real_time_mode[i] = 1;
+	driver->supports_separate_cmdrsp = 1;
+	driver->supports_apps_hdlc_encoding = 1;
+	mutex_init(&driver->diag_hdlc_mutex);
+	mutex_init(&driver->diag_cntl_mutex);
+	mutex_init(&driver->mode_lock);
+	driver->encoded_rsp_buf = kzalloc(DIAG_MAX_HDLC_BUF_SIZE +
+				APF_DIAG_PADDING, GFP_KERNEL);
+	if (!driver->encoded_rsp_buf)
+		goto err;
+	kmemleak_not_leak(driver->encoded_rsp_buf);
+	hdlc_decode = kzalloc(sizeof(struct diag_hdlc_decode_type),
+			      GFP_KERNEL);
+	if (!hdlc_decode)
+		goto err;
+	setup_timer(&driver->hdlc_reset_timer, hdlc_reset_timer_func, 0);
+	kmemleak_not_leak(hdlc_decode);
+	driver->encoded_rsp_len = 0;
+	driver->rsp_buf_busy = 0;
+	spin_lock_init(&driver->rsp_buf_busy_lock);
+	driver->user_space_data_busy = 0;
+	driver->hdlc_buf_len = 0;
+	INIT_LIST_HEAD(&driver->cmd_reg_list);
+	driver->cmd_reg_count = 0;
+	mutex_init(&driver->cmd_reg_mutex);
+
+	for (i = 0; i < NUM_PERIPHERALS; i++) {
+		driver->feature[i].separate_cmd_rsp = 0;
+		driver->feature[i].stm_support = DISABLE_STM;
+		driver->feature[i].rcvd_feature_mask = 0;
+		driver->feature[i].peripheral_buffering = 0;
+		driver->feature[i].encode_hdlc = 0;
+		driver->feature[i].mask_centralization = 0;
+		driver->feature[i].log_on_demand = 0;
+		driver->feature[i].sent_feature_mask = 0;
+		driver->buffering_mode[i].peripheral = i;
+		driver->buffering_mode[i].mode = DIAG_BUFFERING_MODE_STREAMING;
+		driver->buffering_mode[i].high_wm_val = DEFAULT_HIGH_WM_VAL;
+		driver->buffering_mode[i].low_wm_val = DEFAULT_LOW_WM_VAL;
+	}
+
+	for (i = 0; i < NUM_STM_PROCESSORS; i++) {
+		driver->stm_state_requested[i] = DISABLE_STM;
+		driver->stm_state[i] = DISABLE_STM;
+	}
+
+	if (driver->hdlc_buf == NULL) {
+		driver->hdlc_buf = kzalloc(DIAG_MAX_HDLC_BUF_SIZE, GFP_KERNEL);
+		if (!driver->hdlc_buf)
+			goto err;
+		kmemleak_not_leak(driver->hdlc_buf);
+	}
+	if (driver->user_space_data_buf == NULL)
+		driver->user_space_data_buf = kzalloc(USER_SPACE_DATA,
+							GFP_KERNEL);
+	if (driver->user_space_data_buf == NULL)
+		goto err;
+	kmemleak_not_leak(driver->user_space_data_buf);
+
+	if (!driver->client_map) {
+		driver->client_map = kcalloc(driver->num_clients,
+				sizeof(struct diag_client_map), GFP_KERNEL);
+		if (!driver->client_map)
+			goto err;
+	}
+	kmemleak_not_leak(driver->client_map);
+
+	if (!driver->data_ready) {
+		driver->data_ready = kcalloc(driver->num_clients,
+				sizeof(int), GFP_KERNEL);
+		if (!driver->data_ready)
+			goto err;
+	}
+	kmemleak_not_leak(driver->data_ready);
+
+	if (driver->apps_req_buf == NULL) {
+		driver->apps_req_buf = kzalloc(DIAG_MAX_REQ_SIZE, GFP_KERNEL);
+		if (!driver->apps_req_buf)
+			goto err;
+		kmemleak_not_leak(driver->apps_req_buf);
+	}
+	if (driver->dci_pkt_buf == NULL) {
+		driver->dci_pkt_buf = kzalloc(DCI_BUF_SIZE, GFP_KERNEL);
+		if (!driver->dci_pkt_buf)
+			goto err;
+		kmemleak_not_leak(driver->dci_pkt_buf);
+	}
+	if (driver->apps_rsp_buf == NULL) {
+		driver->apps_rsp_buf = kzalloc(DIAG_MAX_RSP_SIZE, GFP_KERNEL);
+		if (driver->apps_rsp_buf == NULL)
+			goto err;
+		kmemleak_not_leak(driver->apps_rsp_buf);
+	}
+	driver->diag_wq = create_singlethread_workqueue("diag_wq");
+	if (!driver->diag_wq)
+		goto err;
+	ret = diag_mux_register(DIAG_LOCAL_PROC, DIAG_LOCAL_PROC,
+				&diagfwd_mux_ops);
+	if (ret) {
+		pr_err("diag: Unable to register with USB, err: %d\n", ret);
+		goto err;
+	}
+
+	return 0;
+err:
+	pr_err("diag: In %s, couldn't initialize diag\n", __func__);
+
+	diag_usb_exit(DIAG_USB_LOCAL);
+	kfree(driver->encoded_rsp_buf);
+	kfree(driver->hdlc_buf);
+	kfree(driver->client_map);
+	kfree(driver->data_ready);
+	kfree(driver->apps_req_buf);
+	kfree(driver->dci_pkt_buf);
+	kfree(driver->apps_rsp_buf);
+	kfree(hdlc_decode);
+	kfree(driver->user_space_data_buf);
+	if (driver->diag_wq)
+		destroy_workqueue(driver->diag_wq);
+	return -ENOMEM;
+}
+
+void diagfwd_exit(void)
+{
+	kfree(driver->encoded_rsp_buf);
+	kfree(driver->hdlc_buf);
+	kfree(hdlc_decode);
+	kfree(driver->client_map);
+	kfree(driver->data_ready);
+	kfree(driver->apps_req_buf);
+	kfree(driver->dci_pkt_buf);
+	kfree(driver->apps_rsp_buf);
+	kfree(driver->user_space_data_buf);
+	destroy_workqueue(driver->diag_wq);
+}
diff --git a/drivers/char/diag/diagfwd.h b/drivers/char/diag/diagfwd.h
new file mode 100644
index 0000000..47c8555
--- /dev/null
+++ b/drivers/char/diag/diagfwd.h
@@ -0,0 +1,53 @@
+/* Copyright (c) 2008-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAGFWD_H
+#define DIAGFWD_H
+
+/*
+ * The context applies to Diag SMD data buffers. It is used to identify the
+ * buffer once these buffers are writtent to USB.
+ */
+#define SET_BUF_CTXT(p, d, n) \
+	(((p & 0xFF) << 16) | ((d & 0xFF) << 8) | (n & 0xFF))
+#define GET_BUF_PERIPHERAL(p)	((p & 0xFF0000) >> 16)
+#define GET_BUF_TYPE(d)		((d & 0x00FF00) >> 8)
+#define GET_BUF_NUM(n)		((n & 0x0000FF))
+
+#define CHK_OVERFLOW(bufStart, start, end, length) \
+	((((bufStart) <= (start)) && ((end) - (start) >= (length))) ? 1 : 0)
+
+int diagfwd_init(void);
+void diagfwd_exit(void);
+void diag_process_hdlc_pkt(void *data, unsigned int len,
+			   struct diag_md_session_t *info);
+void diag_process_non_hdlc_pkt(unsigned char *data, int len,
+			       struct diag_md_session_t *info);
+int chk_config_get_id(void);
+int chk_apps_only(void);
+int chk_apps_master(void);
+int chk_polling_response(void);
+int diag_cmd_log_on_demand(unsigned char *src_buf, int src_len,
+			   unsigned char *dest_buf, int dest_len);
+int diag_cmd_get_mobile_id(unsigned char *src_buf, int src_len,
+			   unsigned char *dest_buf, int dest_len);
+int diag_check_common_cmd(struct diag_pkt_header_t *header);
+void diag_update_userspace_clients(unsigned int type);
+void diag_update_sleeping_process(int process_id, int data_type);
+int diag_process_apps_pkt(unsigned char *buf, int len,
+			  struct diag_md_session_t *info);
+void diag_send_error_rsp(unsigned char *buf, int len);
+void diag_update_pkt_buffer(unsigned char *buf, uint32_t len, int type);
+int diag_process_stm_cmd(unsigned char *buf, unsigned char *dest_buf);
+void diag_md_hdlc_reset_timer_func(unsigned long pid);
+void diag_update_md_clients(unsigned int type);
+#endif
diff --git a/drivers/char/diag/diagfwd_bridge.c b/drivers/char/diag/diagfwd_bridge.c
new file mode 100644
index 0000000..3684b8d
--- /dev/null
+++ b/drivers/char/diag/diagfwd_bridge.c
@@ -0,0 +1,317 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/diagchar.h>
+#include <linux/kmemleak.h>
+#include <linux/err.h>
+#include <linux/workqueue.h>
+#include <linux/ratelimit.h>
+#include <linux/platform_device.h>
+#ifdef USB_QCOM_DIAG_BRIDGE
+#include <linux/smux.h>
+#endif
+#include "diag_mux.h"
+#include "diagfwd_bridge.h"
+#ifdef USB_QCOM_DIAG_BRIDGE
+#include "diagfwd_hsic.h"
+#include "diagfwd_smux.h"
+#endif
+#include "diagfwd_mhi.h"
+#include "diag_dci.h"
+
+#ifdef CONFIG_MSM_MHI
+#define diag_mdm_init		diag_mhi_init
+#else
+#define diag_mdm_init		diag_hsic_init
+#endif
+
+#define BRIDGE_TO_MUX(x)	(x + DIAG_MUX_BRIDGE_BASE)
+
+struct diagfwd_bridge_info bridge_info[NUM_REMOTE_DEV] = {
+	{
+		.id = DIAGFWD_MDM,
+		.type = DIAG_DATA_TYPE,
+		.name = "MDM",
+		.inited = 0,
+		.ctxt = 0,
+		.dev_ops = NULL,
+		.dci_read_ptr = NULL,
+		.dci_read_buf = NULL,
+		.dci_read_len = 0,
+		.dci_wq = NULL,
+	},
+	{
+		.id = DIAGFWD_SMUX,
+		.type = DIAG_DATA_TYPE,
+		.name = "SMUX",
+		.inited = 0,
+		.ctxt = 0,
+		.dci_read_ptr = NULL,
+		.dev_ops = NULL,
+		.dci_read_buf = NULL,
+		.dci_read_len = 0,
+		.dci_wq = NULL,
+	},
+	{
+		.id = DIAGFWD_MDM_DCI,
+		.type = DIAG_DCI_TYPE,
+		.name = "MDM_DCI",
+		.inited = 0,
+		.ctxt = 0,
+		.dci_read_ptr = NULL,
+		.dev_ops = NULL,
+		.dci_read_buf = NULL,
+		.dci_read_len = 0,
+		.dci_wq = NULL,
+	},
+};
+
+static int diagfwd_bridge_mux_connect(int id, int mode)
+{
+	if (id < 0 || id >= NUM_REMOTE_DEV)
+		return -EINVAL;
+	if (bridge_info[id].dev_ops && bridge_info[id].dev_ops->open)
+		bridge_info[id].dev_ops->open(bridge_info[id].ctxt);
+	return 0;
+}
+
+static int diagfwd_bridge_mux_disconnect(int id, int mode)
+{
+	if (id < 0 || id >= NUM_REMOTE_DEV)
+		return -EINVAL;
+	if (bridge_info[id].dev_ops && bridge_info[id].dev_ops->close)
+		bridge_info[id].dev_ops->close(bridge_info[id].ctxt);
+	return 0;
+}
+
+static int diagfwd_bridge_mux_read_done(unsigned char *buf, int len, int id)
+{
+	return diagfwd_bridge_write(id, buf, len);
+}
+
+static int diagfwd_bridge_mux_write_done(unsigned char *buf, int len,
+					 int buf_ctx, int id)
+{
+	struct diagfwd_bridge_info *ch = NULL;
+
+	if (id < 0 || id >= NUM_REMOTE_DEV)
+		return -EINVAL;
+	ch = &bridge_info[buf_ctx];
+	if (ch->dev_ops && ch->dev_ops->fwd_complete)
+		ch->dev_ops->fwd_complete(ch->ctxt, buf, len, 0);
+	return 0;
+}
+
+static struct diag_mux_ops diagfwd_bridge_mux_ops = {
+	.open = diagfwd_bridge_mux_connect,
+	.close = diagfwd_bridge_mux_disconnect,
+	.read_done = diagfwd_bridge_mux_read_done,
+	.write_done = diagfwd_bridge_mux_write_done
+};
+
+static void bridge_dci_read_work_fn(struct work_struct *work)
+{
+	struct diagfwd_bridge_info *ch = container_of(work,
+					struct diagfwd_bridge_info,
+					dci_read_work);
+	if (!ch)
+		return;
+	diag_process_remote_dci_read_data(ch->id, ch->dci_read_buf,
+					  ch->dci_read_len);
+	if (ch->dev_ops && ch->dev_ops->fwd_complete) {
+		ch->dev_ops->fwd_complete(ch->ctxt, ch->dci_read_ptr,
+					  ch->dci_read_len, 0);
+	}
+}
+
+int diagfwd_bridge_register(int id, int ctxt, struct diag_remote_dev_ops *ops)
+{
+	int err = 0;
+	struct diagfwd_bridge_info *ch = NULL;
+	char wq_name[DIAG_BRIDGE_NAME_SZ + 10];
+
+	if (!ops) {
+		pr_err("diag: Invalid pointers ops: %pK ctxt: %d\n", ops, ctxt);
+		return -EINVAL;
+	}
+
+	if (id < 0 || id >= NUM_REMOTE_DEV)
+		return -EINVAL;
+
+	ch = &bridge_info[id];
+	ch->ctxt = ctxt;
+	ch->dev_ops = ops;
+	switch (ch->type) {
+	case DIAG_DATA_TYPE:
+		err = diag_mux_register(BRIDGE_TO_MUX(id), id,
+					&diagfwd_bridge_mux_ops);
+		if (err)
+			return err;
+		break;
+	case DIAG_DCI_TYPE:
+		ch->dci_read_buf = kzalloc(DIAG_MDM_BUF_SIZE, GFP_KERNEL);
+		if (!ch->dci_read_buf)
+			return -ENOMEM;
+		ch->dci_read_len = 0;
+		strlcpy(wq_name, "diag_dci_", 10);
+		strlcat(wq_name, ch->name, sizeof(ch->name));
+		INIT_WORK(&(ch->dci_read_work), bridge_dci_read_work_fn);
+		ch->dci_wq = create_singlethread_workqueue(wq_name);
+		if (!ch->dci_wq) {
+			kfree(ch->dci_read_buf);
+			return -ENOMEM;
+		}
+		break;
+	default:
+		pr_err("diag: Invalid channel type %d in %s\n", ch->type,
+		       __func__);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+int diag_remote_dev_open(int id)
+{
+	if (id < 0 || id >= NUM_REMOTE_DEV)
+		return -EINVAL;
+	bridge_info[id].inited = 1;
+	if (bridge_info[id].type == DIAG_DATA_TYPE)
+		return diag_mux_queue_read(BRIDGE_TO_MUX(id));
+	else if (bridge_info[id].type == DIAG_DCI_TYPE)
+		return diag_dci_send_handshake_pkt(bridge_info[id].id);
+
+	return 0;
+}
+
+void diag_remote_dev_close(int id)
+{
+}
+
+int diag_remote_dev_read_done(int id, unsigned char *buf, int len)
+{
+	int err = 0;
+	struct diagfwd_bridge_info *ch = NULL;
+
+	if (id < 0 || id >= NUM_REMOTE_DEV)
+		return -EINVAL;
+	ch = &bridge_info[id];
+	if (ch->type == DIAG_DATA_TYPE) {
+		err = diag_mux_write(BRIDGE_TO_MUX(id), buf, len, id);
+		if (ch->dev_ops && ch->dev_ops->queue_read)
+			ch->dev_ops->queue_read(ch->ctxt);
+		return err;
+	}
+	/*
+	 * For DCI channels copy to the internal buffer. Don't queue any
+	 * further reads. A read should be queued once we are done processing
+	 * the current packet
+	 */
+	if (len <= 0 || len > DIAG_MDM_BUF_SIZE) {
+		pr_err_ratelimited("diag: Invalid len %d in %s, ch: %s\n",
+				   len, __func__, ch->name);
+		return -EINVAL;
+	}
+	ch->dci_read_ptr = buf;
+	memcpy(ch->dci_read_buf, buf, len);
+	ch->dci_read_len = len;
+	queue_work(ch->dci_wq, &ch->dci_read_work);
+	return 0;
+}
+
+int diag_remote_dev_write_done(int id, unsigned char *buf, int len, int ctxt)
+{
+	int err = 0;
+
+	if (id < 0 || id >= NUM_REMOTE_DEV)
+		return -EINVAL;
+
+	if (bridge_info[id].type == DIAG_DATA_TYPE) {
+		if (buf == driver->hdlc_encode_buf)
+			driver->hdlc_encode_buf_len = 0;
+		/*
+		 * For remote processor, the token offset is stripped from the
+		 * buffer. Account for the token offset while checking against
+		 * the original buffer
+		 */
+		if (buf == (driver->user_space_data_buf + sizeof(int)))
+			driver->user_space_data_busy = 0;
+		err = diag_mux_queue_read(BRIDGE_TO_MUX(id));
+	} else {
+		err = diag_dci_write_done_bridge(id, buf, len);
+	}
+	return err;
+}
+
+int diagfwd_bridge_init(void)
+{
+	int err = 0;
+
+	err = diag_mdm_init();
+	if (err)
+		goto fail;
+	#ifdef USB_QCOM_DIAG_BRIDGE
+	err = diag_smux_init();
+	if (err)
+		goto fail;
+	#endif
+	return 0;
+
+fail:
+	pr_err("diag: Unable to initialze diagfwd bridge, err: %d\n", err);
+	return err;
+}
+
+void diagfwd_bridge_exit(void)
+{
+	#ifdef USB_QCOM_DIAG_BRIDGE
+	diag_hsic_exit();
+	diag_smux_exit();
+	#endif
+}
+
+int diagfwd_bridge_close(int id)
+{
+	if (id < 0 || id >= NUM_REMOTE_DEV)
+		return -EINVAL;
+	if (bridge_info[id].dev_ops && bridge_info[id].dev_ops->close)
+		return bridge_info[id].dev_ops->close(bridge_info[id].ctxt);
+	return 0;
+}
+
+int diagfwd_bridge_write(int id, unsigned char *buf, int len)
+{
+	if (id < 0 || id >= NUM_REMOTE_DEV)
+		return -EINVAL;
+	if (bridge_info[id].dev_ops && bridge_info[id].dev_ops->write) {
+		return bridge_info[id].dev_ops->write(bridge_info[id].ctxt,
+						      buf, len, 0);
+	}
+	return 0;
+}
+
+uint16_t diag_get_remote_device_mask(void)
+{
+	int i;
+	uint16_t remote_dev = 0;
+
+	for (i = 0; i < NUM_REMOTE_DEV; i++) {
+		if (bridge_info[i].inited &&
+		    bridge_info[i].type == DIAG_DATA_TYPE) {
+			remote_dev |= 1 << i;
+		}
+	}
+
+	return remote_dev;
+}
+
diff --git a/drivers/char/diag/diagfwd_bridge.h b/drivers/char/diag/diagfwd_bridge.h
new file mode 100644
index 0000000..62d6b08
--- /dev/null
+++ b/drivers/char/diag/diagfwd_bridge.h
@@ -0,0 +1,67 @@
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAGFWD_BRIDGE_H
+#define DIAGFWD_BRIDGE_H
+
+/*
+ * Add Data channels at the top half and the DCI channels at the
+ * bottom half of this list.
+ */
+#define DIAGFWD_MDM		0
+#define DIAGFWD_SMUX		1
+#define NUM_REMOTE_DATA_DEV	2
+#define DIAGFWD_MDM_DCI		NUM_REMOTE_DATA_DEV
+#define NUM_REMOTE_DCI_DEV	(DIAGFWD_MDM_DCI - NUM_REMOTE_DATA_DEV + 1)
+#define NUM_REMOTE_DEV		(NUM_REMOTE_DATA_DEV + NUM_REMOTE_DCI_DEV)
+
+#define DIAG_BRIDGE_NAME_SZ	24
+#define DIAG_BRIDGE_GET_NAME(x)	(bridge_info[x].name)
+
+struct diag_remote_dev_ops {
+	int (*open)(int id);
+	int (*close)(int id);
+	int (*queue_read)(int id);
+	int (*write)(int id, unsigned char *buf, int len, int ctxt);
+	int (*fwd_complete)(int id, unsigned char *buf, int len, int ctxt);
+};
+
+struct diagfwd_bridge_info {
+	int id;
+	int type;
+	int inited;
+	int ctxt;
+	char name[DIAG_BRIDGE_NAME_SZ];
+	struct diag_remote_dev_ops *dev_ops;
+	/* DCI related variables. These would be NULL for data channels */
+	void *dci_read_ptr;
+	unsigned char *dci_read_buf;
+	int dci_read_len;
+	struct workqueue_struct *dci_wq;
+	struct work_struct dci_read_work;
+};
+
+extern struct diagfwd_bridge_info bridge_info[NUM_REMOTE_DEV];
+int diagfwd_bridge_init(void);
+void diagfwd_bridge_exit(void);
+int diagfwd_bridge_close(int id);
+int diagfwd_bridge_write(int id, unsigned char *buf, int len);
+uint16_t diag_get_remote_device_mask(void);
+
+/* The following functions must be called by Diag remote devices only. */
+int diagfwd_bridge_register(int id, int ctxt, struct diag_remote_dev_ops *ops);
+int diag_remote_dev_open(int id);
+void diag_remote_dev_close(int id);
+int diag_remote_dev_read_done(int id, unsigned char *buf, int len);
+int diag_remote_dev_write_done(int id, unsigned char *buf, int len, int ctxt);
+
+#endif
diff --git a/drivers/char/diag/diagfwd_cntl.c b/drivers/char/diag/diagfwd_cntl.c
new file mode 100644
index 0000000..4cbd9da
--- /dev/null
+++ b/drivers/char/diag/diagfwd_cntl.c
@@ -0,0 +1,1321 @@
+/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/diagchar.h>
+#include <linux/kmemleak.h>
+#include <linux/delay.h>
+#include "diagchar.h"
+#include "diagfwd.h"
+#include "diagfwd_cntl.h"
+#include "diagfwd_peripheral.h"
+#include "diagfwd_bridge.h"
+#include "diag_dci.h"
+#include "diagmem.h"
+#include "diag_masks.h"
+#include "diag_ipc_logging.h"
+#include "diag_mux.h"
+
+#define FEATURE_SUPPORTED(x)	((feature_mask << (i * 8)) & (1 << x))
+
+/* tracks which peripheral is undergoing SSR */
+static uint16_t reg_dirty;
+static void diag_notify_md_client(uint8_t peripheral, int data);
+
+static void diag_mask_update_work_fn(struct work_struct *work)
+{
+	uint8_t peripheral;
+
+	for (peripheral = 0; peripheral <= NUM_PERIPHERALS; peripheral++) {
+		if (!(driver->mask_update & PERIPHERAL_MASK(peripheral)))
+			continue;
+		mutex_lock(&driver->cntl_lock);
+		driver->mask_update ^= PERIPHERAL_MASK(peripheral);
+		mutex_unlock(&driver->cntl_lock);
+		diag_send_updates_peripheral(peripheral);
+	}
+}
+
+void diag_cntl_channel_open(struct diagfwd_info *p_info)
+{
+	if (!p_info)
+		return;
+	driver->mask_update |= PERIPHERAL_MASK(p_info->peripheral);
+	queue_work(driver->cntl_wq, &driver->mask_update_work);
+	diag_notify_md_client(p_info->peripheral, DIAG_STATUS_OPEN);
+}
+
+void diag_cntl_channel_close(struct diagfwd_info *p_info)
+{
+	uint8_t peripheral;
+
+	if (!p_info)
+		return;
+
+	peripheral = p_info->peripheral;
+	if (peripheral >= NUM_PERIPHERALS)
+		return;
+
+	driver->feature[peripheral].sent_feature_mask = 0;
+	driver->feature[peripheral].rcvd_feature_mask = 0;
+	flush_workqueue(driver->cntl_wq);
+	reg_dirty |= PERIPHERAL_MASK(peripheral);
+	diag_cmd_remove_reg_by_proc(peripheral);
+	driver->feature[peripheral].stm_support = DISABLE_STM;
+	driver->feature[peripheral].log_on_demand = 0;
+	driver->stm_state[peripheral] = DISABLE_STM;
+	driver->stm_state_requested[peripheral] = DISABLE_STM;
+	reg_dirty ^= PERIPHERAL_MASK(peripheral);
+	diag_notify_md_client(peripheral, DIAG_STATUS_CLOSED);
+}
+
+static void diag_stm_update_work_fn(struct work_struct *work)
+{
+	uint8_t i;
+	uint16_t peripheral_mask = 0;
+	int err = 0;
+
+	mutex_lock(&driver->cntl_lock);
+	peripheral_mask = driver->stm_peripheral;
+	driver->stm_peripheral = 0;
+	mutex_unlock(&driver->cntl_lock);
+
+	if (peripheral_mask == 0)
+		return;
+
+	for (i = 0; i < NUM_PERIPHERALS; i++) {
+		if (!driver->feature[i].stm_support)
+			continue;
+		if (peripheral_mask & PERIPHERAL_MASK(i)) {
+			err = diag_send_stm_state(i,
+				(uint8_t)(driver->stm_state_requested[i]));
+			if (!err) {
+				driver->stm_state[i] =
+					driver->stm_state_requested[i];
+			}
+		}
+	}
+}
+
+void diag_notify_md_client(uint8_t peripheral, int data)
+{
+	int stat = 0;
+	struct siginfo info;
+
+	if (peripheral > NUM_PERIPHERALS)
+		return;
+
+	if (driver->logging_mode != DIAG_MEMORY_DEVICE_MODE)
+		return;
+
+	mutex_lock(&driver->md_session_lock);
+	memset(&info, 0, sizeof(struct siginfo));
+	info.si_code = SI_QUEUE;
+	info.si_int = (PERIPHERAL_MASK(peripheral) | data);
+	info.si_signo = SIGCONT;
+	if (driver->md_session_map[peripheral] &&
+		driver->md_session_map[peripheral]->task) {
+		if (driver->md_session_map[peripheral]->pid ==
+			driver->md_session_map[peripheral]->task->tgid) {
+			DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+				"md_session %d pid = %d, md_session %d task tgid = %d\n",
+				peripheral,
+				driver->md_session_map[peripheral]->pid,
+				peripheral,
+				driver->md_session_map[peripheral]->task->tgid);
+			stat = send_sig_info(info.si_signo, &info,
+				driver->md_session_map[peripheral]->task);
+			if (stat)
+				pr_err("diag: Err sending signal to memory device client, signal data: 0x%x, stat: %d\n",
+					info.si_int, stat);
+		} else
+			pr_err("diag: md_session_map[%d] data is corrupted, signal data: 0x%x, stat: %d\n",
+				peripheral, info.si_int, stat);
+	}
+	mutex_unlock(&driver->md_session_lock);
+}
+
+static void process_pd_status(uint8_t *buf, uint32_t len,
+			      uint8_t peripheral)
+{
+	struct diag_ctrl_msg_pd_status *pd_msg = NULL;
+	uint32_t pd;
+	int status = DIAG_STATUS_CLOSED;
+
+	if (!buf || peripheral >= NUM_PERIPHERALS || len < sizeof(*pd_msg))
+		return;
+
+	pd_msg = (struct diag_ctrl_msg_pd_status *)buf;
+	pd = pd_msg->pd_id;
+	status = (pd_msg->status == 0) ? DIAG_STATUS_OPEN : DIAG_STATUS_CLOSED;
+	diag_notify_md_client(peripheral, status);
+}
+
+static void enable_stm_feature(uint8_t peripheral)
+{
+	if (peripheral >= NUM_PERIPHERALS)
+		return;
+
+	mutex_lock(&driver->cntl_lock);
+	driver->feature[peripheral].stm_support = ENABLE_STM;
+	driver->stm_peripheral |= PERIPHERAL_MASK(peripheral);
+	mutex_unlock(&driver->cntl_lock);
+
+	queue_work(driver->cntl_wq, &(driver->stm_update_work));
+}
+
+static void enable_socket_feature(uint8_t peripheral)
+{
+	if (peripheral >= NUM_PERIPHERALS)
+		return;
+
+	if (driver->supports_sockets)
+		driver->feature[peripheral].sockets_enabled = 1;
+	else
+		driver->feature[peripheral].sockets_enabled = 0;
+}
+
+static void process_hdlc_encoding_feature(uint8_t peripheral)
+{
+	if (peripheral >= NUM_PERIPHERALS)
+		return;
+
+	if (driver->supports_apps_hdlc_encoding) {
+		driver->feature[peripheral].encode_hdlc =
+					ENABLE_APPS_HDLC_ENCODING;
+	} else {
+		driver->feature[peripheral].encode_hdlc =
+					DISABLE_APPS_HDLC_ENCODING;
+	}
+}
+
+static void process_command_deregistration(uint8_t *buf, uint32_t len,
+					   uint8_t peripheral)
+{
+	uint8_t *ptr = buf;
+	int i;
+	int header_len = sizeof(struct diag_ctrl_cmd_dereg);
+	int read_len = 0;
+	struct diag_ctrl_cmd_dereg *dereg = NULL;
+	struct cmd_code_range *range = NULL;
+	struct diag_cmd_reg_entry_t del_entry;
+
+	/*
+	 * Perform Basic sanity. The len field is the size of the data payload.
+	 * This doesn't include the header size.
+	 */
+	if (!buf || peripheral >= NUM_PERIPHERALS || len == 0)
+		return;
+
+	dereg = (struct diag_ctrl_cmd_dereg *)ptr;
+	ptr += header_len;
+	/* Don't account for pkt_id and length */
+	read_len += header_len - (2 * sizeof(uint32_t));
+
+	if (dereg->count_entries == 0) {
+		pr_debug("diag: In %s, received reg tbl with no entries\n",
+			 __func__);
+		return;
+	}
+
+	for (i = 0; i < dereg->count_entries && read_len < len; i++) {
+		range = (struct cmd_code_range *)ptr;
+		ptr += sizeof(struct cmd_code_range) - sizeof(uint32_t);
+		read_len += sizeof(struct cmd_code_range) - sizeof(uint32_t);
+		del_entry.cmd_code = dereg->cmd_code;
+		del_entry.subsys_id = dereg->subsysid;
+		del_entry.cmd_code_hi = range->cmd_code_hi;
+		del_entry.cmd_code_lo = range->cmd_code_lo;
+		diag_cmd_remove_reg(&del_entry, peripheral);
+	}
+
+	if (i != dereg->count_entries) {
+		pr_err("diag: In %s, reading less than available, read_len: %d, len: %d count: %d\n",
+		       __func__, read_len, len, dereg->count_entries);
+	}
+}
+static void process_command_registration(uint8_t *buf, uint32_t len,
+					 uint8_t peripheral)
+{
+	uint8_t *ptr = buf;
+	int i;
+	int header_len = sizeof(struct diag_ctrl_cmd_reg);
+	int read_len = 0;
+	struct diag_ctrl_cmd_reg *reg = NULL;
+	struct cmd_code_range *range = NULL;
+	struct diag_cmd_reg_entry_t new_entry;
+
+	/*
+	 * Perform Basic sanity. The len field is the size of the data payload.
+	 * This doesn't include the header size.
+	 */
+	if (!buf || peripheral >= NUM_PERIPHERALS || len == 0)
+		return;
+
+	reg = (struct diag_ctrl_cmd_reg *)ptr;
+	ptr += header_len;
+	/* Don't account for pkt_id and length */
+	read_len += header_len - (2 * sizeof(uint32_t));
+
+	if (reg->count_entries == 0) {
+		pr_debug("diag: In %s, received reg tbl with no entries\n",
+			 __func__);
+		return;
+	}
+
+	for (i = 0; i < reg->count_entries && read_len < len; i++) {
+		range = (struct cmd_code_range *)ptr;
+		ptr += sizeof(struct cmd_code_range);
+		read_len += sizeof(struct cmd_code_range);
+		new_entry.cmd_code = reg->cmd_code;
+		new_entry.subsys_id = reg->subsysid;
+		new_entry.cmd_code_hi = range->cmd_code_hi;
+		new_entry.cmd_code_lo = range->cmd_code_lo;
+		diag_cmd_add_reg(&new_entry, peripheral, INVALID_PID);
+	}
+
+	if (i != reg->count_entries) {
+		pr_err("diag: In %s, reading less than available, read_len: %d, len: %d count: %d\n",
+		       __func__, read_len, len, reg->count_entries);
+	}
+}
+
+static void diag_close_transport_work_fn(struct work_struct *work)
+{
+	uint8_t transport;
+	uint8_t peripheral;
+
+	mutex_lock(&driver->cntl_lock);
+	for (peripheral = 0; peripheral <= NUM_PERIPHERALS; peripheral++) {
+		if (!(driver->close_transport & PERIPHERAL_MASK(peripheral)))
+			continue;
+		driver->close_transport ^= PERIPHERAL_MASK(peripheral);
+		transport = driver->feature[peripheral].sockets_enabled ?
+					TRANSPORT_GLINK : TRANSPORT_SOCKET;
+		diagfwd_close_transport(transport, peripheral);
+	}
+	mutex_unlock(&driver->cntl_lock);
+}
+
+static void process_socket_feature(uint8_t peripheral)
+{
+	if (peripheral >= NUM_PERIPHERALS)
+		return;
+
+	mutex_lock(&driver->cntl_lock);
+	driver->close_transport |= PERIPHERAL_MASK(peripheral);
+	queue_work(driver->cntl_wq, &driver->close_transport_work);
+	mutex_unlock(&driver->cntl_lock);
+}
+
+static void process_log_on_demand_feature(uint8_t peripheral)
+{
+	/* Log On Demand command is registered only on Modem */
+	if (peripheral != PERIPHERAL_MODEM)
+		return;
+
+	if (driver->feature[PERIPHERAL_MODEM].log_on_demand)
+		driver->log_on_demand_support = 1;
+	else
+		driver->log_on_demand_support = 0;
+}
+
+static void process_incoming_feature_mask(uint8_t *buf, uint32_t len,
+					  uint8_t peripheral)
+{
+	int i;
+	int header_len = sizeof(struct diag_ctrl_feature_mask);
+	int read_len = 0;
+	struct diag_ctrl_feature_mask *header = NULL;
+	uint32_t feature_mask_len = 0;
+	uint32_t feature_mask = 0;
+	uint8_t *ptr = buf;
+
+	if (!buf || peripheral >= NUM_PERIPHERALS || len == 0)
+		return;
+
+	header = (struct diag_ctrl_feature_mask *)ptr;
+	ptr += header_len;
+	feature_mask_len = header->feature_mask_len;
+
+	if (feature_mask_len == 0) {
+		pr_debug("diag: In %s, received invalid feature mask from peripheral %d\n",
+			 __func__, peripheral);
+		return;
+	}
+
+	if (feature_mask_len > FEATURE_MASK_LEN) {
+		pr_alert("diag: Receiving feature mask length more than Apps support\n");
+		feature_mask_len = FEATURE_MASK_LEN;
+	}
+
+	driver->feature[peripheral].rcvd_feature_mask = 1;
+
+	for (i = 0; i < feature_mask_len && read_len < len; i++) {
+		feature_mask = *(uint8_t *)ptr;
+		driver->feature[peripheral].feature_mask[i] = feature_mask;
+		ptr += sizeof(uint8_t);
+		read_len += sizeof(uint8_t);
+
+		if (FEATURE_SUPPORTED(F_DIAG_LOG_ON_DEMAND_APPS))
+			driver->feature[peripheral].log_on_demand = 1;
+		if (FEATURE_SUPPORTED(F_DIAG_REQ_RSP_SUPPORT))
+			driver->feature[peripheral].separate_cmd_rsp = 1;
+		if (FEATURE_SUPPORTED(F_DIAG_APPS_HDLC_ENCODE))
+			process_hdlc_encoding_feature(peripheral);
+		if (FEATURE_SUPPORTED(F_DIAG_STM))
+			enable_stm_feature(peripheral);
+		if (FEATURE_SUPPORTED(F_DIAG_MASK_CENTRALIZATION))
+			driver->feature[peripheral].mask_centralization = 1;
+		if (FEATURE_SUPPORTED(F_DIAG_PERIPHERAL_BUFFERING))
+			driver->feature[peripheral].peripheral_buffering = 1;
+		if (FEATURE_SUPPORTED(F_DIAG_SOCKETS_ENABLED))
+			enable_socket_feature(peripheral);
+	}
+
+	process_socket_feature(peripheral);
+	process_log_on_demand_feature(peripheral);
+}
+
+static void process_last_event_report(uint8_t *buf, uint32_t len,
+				      uint8_t peripheral)
+{
+	struct diag_ctrl_last_event_report *header = NULL;
+	uint8_t *ptr = buf;
+	uint8_t *temp = NULL;
+	uint32_t pkt_len = sizeof(uint32_t) + sizeof(uint16_t);
+	uint16_t event_size = 0;
+
+	if (!buf || peripheral >= NUM_PERIPHERALS || len != pkt_len)
+		return;
+
+	mutex_lock(&event_mask.lock);
+	header = (struct diag_ctrl_last_event_report *)ptr;
+	event_size = ((header->event_last_id / 8) + 1);
+	if (event_size >= driver->event_mask_size) {
+		pr_debug("diag: In %s, receiving event mask size more that Apps can handle\n",
+			 __func__);
+		temp = krealloc(driver->event_mask->ptr, event_size,
+				GFP_KERNEL);
+		if (!temp) {
+			pr_err("diag: In %s, unable to reallocate event mask to support events from %d\n",
+			       __func__, peripheral);
+			goto err;
+		}
+		driver->event_mask->ptr = temp;
+		driver->event_mask_size = event_size;
+	}
+
+	driver->num_event_id[peripheral] = header->event_last_id;
+	if (header->event_last_id > driver->last_event_id)
+		driver->last_event_id = header->event_last_id;
+err:
+	mutex_unlock(&event_mask.lock);
+}
+
+static void process_log_range_report(uint8_t *buf, uint32_t len,
+				     uint8_t peripheral)
+{
+	int i;
+	int read_len = 0;
+	int header_len = sizeof(struct diag_ctrl_log_range_report);
+	uint8_t *ptr = buf;
+	struct diag_ctrl_log_range_report *header = NULL;
+	struct diag_ctrl_log_range *log_range = NULL;
+	struct diag_log_mask_t *mask_ptr = NULL;
+
+	if (!buf || peripheral >= NUM_PERIPHERALS || len < 0)
+		return;
+
+	header = (struct diag_ctrl_log_range_report *)ptr;
+	ptr += header_len;
+	/* Don't account for pkt_id and length */
+	read_len += header_len - (2 * sizeof(uint32_t));
+
+	driver->num_equip_id[peripheral] = header->num_ranges;
+	for (i = 0; i < header->num_ranges && read_len < len; i++) {
+		log_range = (struct diag_ctrl_log_range *)ptr;
+		ptr += sizeof(struct diag_ctrl_log_range);
+		read_len += sizeof(struct diag_ctrl_log_range);
+
+		if (log_range->equip_id >= MAX_EQUIP_ID) {
+			pr_err("diag: receiving log equip id %d more than supported equip id: %d from peripheral: %d\n",
+			       log_range->equip_id, MAX_EQUIP_ID, peripheral);
+			continue;
+		}
+		mask_ptr = (struct diag_log_mask_t *)log_mask.ptr;
+		mask_ptr = &mask_ptr[log_range->equip_id];
+
+		mutex_lock(&(mask_ptr->lock));
+		mask_ptr->num_items = log_range->num_items;
+		mask_ptr->range = LOG_ITEMS_TO_SIZE(log_range->num_items);
+		mutex_unlock(&(mask_ptr->lock));
+	}
+}
+
+static int update_msg_mask_tbl_entry(struct diag_msg_mask_t *mask,
+				     struct diag_ssid_range_t *range)
+{
+	uint32_t temp_range;
+
+	if (!mask || !range)
+		return -EIO;
+	if (range->ssid_last < range->ssid_first) {
+		pr_err("diag: In %s, invalid ssid range, first: %d, last: %d\n",
+		       __func__, range->ssid_first, range->ssid_last);
+		return -EINVAL;
+	}
+	if (range->ssid_last >= mask->ssid_last) {
+		temp_range = range->ssid_last - mask->ssid_first + 1;
+		mask->ssid_last = range->ssid_last;
+		mask->range = temp_range;
+	}
+
+	return 0;
+}
+
+static void process_ssid_range_report(uint8_t *buf, uint32_t len,
+				      uint8_t peripheral)
+{
+	int i;
+	int j;
+	int read_len = 0;
+	int found = 0;
+	int new_size = 0;
+	int err = 0;
+	struct diag_ctrl_ssid_range_report *header = NULL;
+	struct diag_ssid_range_t *ssid_range = NULL;
+	int header_len = sizeof(struct diag_ctrl_ssid_range_report);
+	struct diag_msg_mask_t *mask_ptr = NULL;
+	uint8_t *ptr = buf;
+	uint8_t *temp = NULL;
+	uint32_t min_len = header_len - sizeof(struct diag_ctrl_pkt_header_t);
+
+	if (!buf || peripheral >= NUM_PERIPHERALS || len < min_len)
+		return;
+
+	header = (struct diag_ctrl_ssid_range_report *)ptr;
+	ptr += header_len;
+	/* Don't account for pkt_id and length */
+	read_len += header_len - (2 * sizeof(uint32_t));
+
+	driver->max_ssid_count[peripheral] = header->count;
+	for (i = 0; i < header->count && read_len < len; i++) {
+		ssid_range = (struct diag_ssid_range_t *)ptr;
+		ptr += sizeof(struct diag_ssid_range_t);
+		read_len += sizeof(struct diag_ssid_range_t);
+		mask_ptr = (struct diag_msg_mask_t *)msg_mask.ptr;
+		found = 0;
+		for (j = 0; j < driver->msg_mask_tbl_count; j++, mask_ptr++) {
+			if (mask_ptr->ssid_first != ssid_range->ssid_first)
+				continue;
+			mutex_lock(&mask_ptr->lock);
+			err = update_msg_mask_tbl_entry(mask_ptr, ssid_range);
+			mutex_unlock(&mask_ptr->lock);
+			if (err == -ENOMEM) {
+				pr_err("diag: In %s, unable to increase the msg mask table range\n",
+				       __func__);
+			}
+			found = 1;
+			break;
+		}
+
+		if (found)
+			continue;
+
+		new_size = (driver->msg_mask_tbl_count + 1) *
+			   sizeof(struct diag_msg_mask_t);
+		temp = krealloc(msg_mask.ptr, new_size, GFP_KERNEL);
+		if (!temp) {
+			pr_err("diag: In %s, Unable to add new ssid table to msg mask, ssid first: %d, last: %d\n",
+			       __func__, ssid_range->ssid_first,
+			       ssid_range->ssid_last);
+			continue;
+		}
+		msg_mask.ptr = temp;
+		err = diag_create_msg_mask_table_entry(mask_ptr, ssid_range);
+		if (err) {
+			pr_err("diag: In %s, Unable to create a new msg mask table entry, first: %d last: %d err: %d\n",
+			       __func__, ssid_range->ssid_first,
+			       ssid_range->ssid_last, err);
+			continue;
+		}
+		driver->msg_mask_tbl_count += 1;
+	}
+}
+
+static void diag_build_time_mask_update(uint8_t *buf,
+					struct diag_ssid_range_t *range)
+{
+	int i;
+	int j;
+	int num_items = 0;
+	int err = 0;
+	int found = 0;
+	int new_size = 0;
+	uint8_t *temp = NULL;
+	uint32_t *mask_ptr = (uint32_t *)buf;
+	uint32_t *dest_ptr = NULL;
+	struct diag_msg_mask_t *build_mask = NULL;
+
+	if (!range || !buf)
+		return;
+
+	if (range->ssid_last < range->ssid_first) {
+		pr_err("diag: In %s, invalid ssid range, first: %d, last: %d\n",
+		       __func__, range->ssid_first, range->ssid_last);
+		return;
+	}
+
+	build_mask = (struct diag_msg_mask_t *)(driver->build_time_mask->ptr);
+	num_items = range->ssid_last - range->ssid_first + 1;
+
+	for (i = 0; i < driver->msg_mask_tbl_count; i++, build_mask++) {
+		if (build_mask->ssid_first != range->ssid_first)
+			continue;
+		found = 1;
+		mutex_lock(&build_mask->lock);
+		err = update_msg_mask_tbl_entry(build_mask, range);
+		if (err == -ENOMEM) {
+			pr_err("diag: In %s, unable to increase the msg build mask table range\n",
+			       __func__);
+		}
+		dest_ptr = build_mask->ptr;
+		for (j = 0; j < build_mask->range; j++, mask_ptr++, dest_ptr++)
+			*(uint32_t *)dest_ptr |= *mask_ptr;
+		mutex_unlock(&build_mask->lock);
+		break;
+	}
+
+	if (found)
+		goto end;
+	new_size = (driver->msg_mask_tbl_count + 1) *
+		   sizeof(struct diag_msg_mask_t);
+	temp = krealloc(driver->build_time_mask->ptr, new_size, GFP_KERNEL);
+	if (!temp) {
+		pr_err("diag: In %s, unable to create a new entry for build time mask\n",
+		       __func__);
+		goto end;
+	}
+	driver->build_time_mask->ptr = temp;
+	err = diag_create_msg_mask_table_entry(build_mask, range);
+	if (err) {
+		pr_err("diag: In %s, Unable to create a new msg mask table entry, err: %d\n",
+		       __func__, err);
+		goto end;
+	}
+	driver->msg_mask_tbl_count += 1;
+end:
+	return;
+}
+
+static void process_build_mask_report(uint8_t *buf, uint32_t len,
+				      uint8_t peripheral)
+{
+	int i;
+	int read_len = 0;
+	int num_items = 0;
+	int header_len = sizeof(struct diag_ctrl_build_mask_report);
+	uint8_t *ptr = buf;
+	struct diag_ctrl_build_mask_report *header = NULL;
+	struct diag_ssid_range_t *range = NULL;
+
+	if (!buf || peripheral >= NUM_PERIPHERALS || len < header_len)
+		return;
+
+	header = (struct diag_ctrl_build_mask_report *)ptr;
+	ptr += header_len;
+	/* Don't account for pkt_id and length */
+	read_len += header_len - (2 * sizeof(uint32_t));
+
+	for (i = 0; i < header->count && read_len < len; i++) {
+		range = (struct diag_ssid_range_t *)ptr;
+		ptr += sizeof(struct diag_ssid_range_t);
+		read_len += sizeof(struct diag_ssid_range_t);
+		num_items = range->ssid_last - range->ssid_first + 1;
+		diag_build_time_mask_update(ptr, range);
+		ptr += num_items * sizeof(uint32_t);
+		read_len += num_items * sizeof(uint32_t);
+	}
+}
+
+void diag_cntl_process_read_data(struct diagfwd_info *p_info, void *buf,
+				 int len)
+{
+	uint32_t read_len = 0;
+	uint32_t header_len = sizeof(struct diag_ctrl_pkt_header_t);
+	uint8_t *ptr = buf;
+	struct diag_ctrl_pkt_header_t *ctrl_pkt = NULL;
+
+	if (!buf || len <= 0 || !p_info)
+		return;
+
+	if (reg_dirty & PERIPHERAL_MASK(p_info->peripheral)) {
+		pr_err_ratelimited("diag: dropping command registration from peripheral %d\n",
+		       p_info->peripheral);
+		return;
+	}
+
+	while (read_len + header_len < len) {
+		ctrl_pkt = (struct diag_ctrl_pkt_header_t *)ptr;
+		switch (ctrl_pkt->pkt_id) {
+		case DIAG_CTRL_MSG_REG:
+			process_command_registration(ptr, ctrl_pkt->len,
+						     p_info->peripheral);
+			break;
+		case DIAG_CTRL_MSG_DEREG:
+			process_command_deregistration(ptr, ctrl_pkt->len,
+						       p_info->peripheral);
+			break;
+		case DIAG_CTRL_MSG_FEATURE:
+			process_incoming_feature_mask(ptr, ctrl_pkt->len,
+						      p_info->peripheral);
+			break;
+		case DIAG_CTRL_MSG_LAST_EVENT_REPORT:
+			process_last_event_report(ptr, ctrl_pkt->len,
+						  p_info->peripheral);
+			break;
+		case DIAG_CTRL_MSG_LOG_RANGE_REPORT:
+			process_log_range_report(ptr, ctrl_pkt->len,
+						 p_info->peripheral);
+			break;
+		case DIAG_CTRL_MSG_SSID_RANGE_REPORT:
+			process_ssid_range_report(ptr, ctrl_pkt->len,
+						  p_info->peripheral);
+			break;
+		case DIAG_CTRL_MSG_BUILD_MASK_REPORT:
+			process_build_mask_report(ptr, ctrl_pkt->len,
+						  p_info->peripheral);
+			break;
+		case DIAG_CTRL_MSG_PD_STATUS:
+			process_pd_status(ptr, ctrl_pkt->len,
+						p_info->peripheral);
+			break;
+		default:
+			pr_debug("diag: Control packet %d not supported\n",
+				 ctrl_pkt->pkt_id);
+		}
+		ptr += header_len + ctrl_pkt->len;
+		read_len += header_len + ctrl_pkt->len;
+	}
+}
+
+static int diag_compute_real_time(int idx)
+{
+	int real_time = MODE_REALTIME;
+
+	if (driver->proc_active_mask == 0) {
+		/*
+		 * There are no DCI or Memory Device processes. Diag should
+		 * be in Real Time mode irrespective of USB connection
+		 */
+		real_time = MODE_REALTIME;
+	} else if (driver->proc_rt_vote_mask[idx] & driver->proc_active_mask) {
+		/*
+		 * Atleast one process is alive and is voting for Real Time
+		 * data - Diag should be in real time mode irrespective of USB
+		 * connection.
+		 */
+		real_time = MODE_REALTIME;
+	} else if (driver->usb_connected) {
+		/*
+		 * If USB is connected, check individual process. If Memory
+		 * Device Mode is active, set the mode requested by Memory
+		 * Device process. Set to realtime mode otherwise.
+		 */
+		if ((driver->proc_rt_vote_mask[idx] &
+						DIAG_PROC_MEMORY_DEVICE) == 0)
+			real_time = MODE_NONREALTIME;
+		else
+			real_time = MODE_REALTIME;
+	} else {
+		/*
+		 * We come here if USB is not connected and the active
+		 * processes are voting for Non realtime mode.
+		 */
+		real_time = MODE_NONREALTIME;
+	}
+	return real_time;
+}
+
+static void diag_create_diag_mode_ctrl_pkt(unsigned char *dest_buf,
+					   int real_time)
+{
+	struct diag_ctrl_msg_diagmode diagmode;
+	int msg_size = sizeof(struct diag_ctrl_msg_diagmode);
+
+	if (!dest_buf)
+		return;
+
+	diagmode.ctrl_pkt_id = DIAG_CTRL_MSG_DIAGMODE;
+	diagmode.ctrl_pkt_data_len = DIAG_MODE_PKT_LEN;
+	diagmode.version = 1;
+	diagmode.sleep_vote = real_time ? 1 : 0;
+	/*
+	 * 0 - Disables real-time logging (to prevent
+	 *     frequent APPS wake-ups, etc.).
+	 * 1 - Enable real-time logging
+	 */
+	diagmode.real_time = real_time;
+	diagmode.use_nrt_values = 0;
+	diagmode.commit_threshold = 0;
+	diagmode.sleep_threshold = 0;
+	diagmode.sleep_time = 0;
+	diagmode.drain_timer_val = 0;
+	diagmode.event_stale_timer_val = 0;
+
+	memcpy(dest_buf, &diagmode, msg_size);
+}
+
+void diag_update_proc_vote(uint16_t proc, uint8_t vote, int index)
+{
+	int i;
+
+	mutex_lock(&driver->real_time_mutex);
+	if (vote)
+		driver->proc_active_mask |= proc;
+	else {
+		driver->proc_active_mask &= ~proc;
+		if (index == ALL_PROC) {
+			for (i = 0; i < DIAG_NUM_PROC; i++)
+				driver->proc_rt_vote_mask[i] |= proc;
+		} else {
+			driver->proc_rt_vote_mask[index] |= proc;
+		}
+	}
+	mutex_unlock(&driver->real_time_mutex);
+}
+
+void diag_update_real_time_vote(uint16_t proc, uint8_t real_time, int index)
+{
+	int i;
+
+	if (index >= DIAG_NUM_PROC) {
+		pr_err("diag: In %s, invalid index %d\n", __func__, index);
+		return;
+	}
+
+	mutex_lock(&driver->real_time_mutex);
+	if (index == ALL_PROC) {
+		for (i = 0; i < DIAG_NUM_PROC; i++) {
+			if (real_time)
+				driver->proc_rt_vote_mask[i] |= proc;
+			else
+				driver->proc_rt_vote_mask[i] &= ~proc;
+		}
+	} else {
+		if (real_time)
+			driver->proc_rt_vote_mask[index] |= proc;
+		else
+			driver->proc_rt_vote_mask[index] &= ~proc;
+	}
+	mutex_unlock(&driver->real_time_mutex);
+}
+
+
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+static void diag_send_diag_mode_update_remote(int token, int real_time)
+{
+	unsigned char *buf = NULL;
+	int err = 0;
+	struct diag_dci_header_t dci_header;
+	int dci_header_size = sizeof(struct diag_dci_header_t);
+	int msg_size = sizeof(struct diag_ctrl_msg_diagmode);
+	uint32_t write_len = 0;
+
+	if (token < 0 || token >= NUM_DCI_PROC) {
+		pr_err("diag: Invalid remote device channel in %s, token: %d\n",
+							__func__, token);
+		return;
+	}
+
+	if (real_time != MODE_REALTIME && real_time != MODE_NONREALTIME) {
+		pr_err("diag: Invalid real time value in %s, type: %d\n",
+							__func__, real_time);
+		return;
+	}
+
+	buf = dci_get_buffer_from_bridge(token);
+	if (!buf) {
+		pr_err("diag: In %s, unable to get dci buffers to write data\n",
+			__func__);
+		return;
+	}
+	/* Frame the DCI header */
+	dci_header.start = CONTROL_CHAR;
+	dci_header.version = 1;
+	dci_header.length = msg_size + 1;
+	dci_header.cmd_code = DCI_CONTROL_PKT_CODE;
+
+	memcpy(buf + write_len, &dci_header, dci_header_size);
+	write_len += dci_header_size;
+	diag_create_diag_mode_ctrl_pkt(buf + write_len, real_time);
+	write_len += msg_size;
+	*(buf + write_len) = CONTROL_CHAR; /* End Terminator */
+	write_len += sizeof(uint8_t);
+	err = diagfwd_bridge_write(TOKEN_TO_BRIDGE(token), buf, write_len);
+	if (err != write_len) {
+		pr_err("diag: cannot send nrt mode ctrl pkt, err: %d\n", err);
+		diagmem_free(driver, buf, dci_ops_tbl[token].mempool);
+	} else {
+		driver->real_time_mode[token + 1] = real_time;
+	}
+}
+#else
+static inline void diag_send_diag_mode_update_remote(int token, int real_time)
+{
+}
+#endif
+
+#ifdef CONFIG_DIAG_OVER_USB
+void diag_real_time_work_fn(struct work_struct *work)
+{
+	int temp_real_time = MODE_REALTIME, i, j;
+	uint8_t send_update = 1;
+
+	/*
+	 * If any peripheral in the local processor is in either threshold or
+	 * circular buffering mode, don't send the real time mode control
+	 * packet.
+	 */
+	for (i = 0; i < NUM_PERIPHERALS; i++) {
+		if (!driver->feature[i].peripheral_buffering)
+			continue;
+		switch (driver->buffering_mode[i].mode) {
+		case DIAG_BUFFERING_MODE_THRESHOLD:
+		case DIAG_BUFFERING_MODE_CIRCULAR:
+			send_update = 0;
+			break;
+		}
+	}
+
+	mutex_lock(&driver->mode_lock);
+	for (i = 0; i < DIAG_NUM_PROC; i++) {
+		temp_real_time = diag_compute_real_time(i);
+		if (temp_real_time == driver->real_time_mode[i]) {
+			pr_debug("diag: did not update real time mode on proc %d, already in the req mode %d",
+				i, temp_real_time);
+			continue;
+		}
+
+		if (i == DIAG_LOCAL_PROC) {
+			if (!send_update) {
+				pr_debug("diag: In %s, cannot send real time mode pkt since one of the periperhal is in buffering mode\n",
+					 __func__);
+				break;
+			}
+			for (j = 0; j < NUM_PERIPHERALS; j++)
+				diag_send_real_time_update(j,
+						temp_real_time);
+		} else {
+			diag_send_diag_mode_update_remote(i - 1,
+							   temp_real_time);
+		}
+	}
+	mutex_unlock(&driver->mode_lock);
+
+	if (driver->real_time_update_busy > 0)
+		driver->real_time_update_busy--;
+}
+#else
+void diag_real_time_work_fn(struct work_struct *work)
+{
+	int temp_real_time = MODE_REALTIME, i, j;
+
+	for (i = 0; i < DIAG_NUM_PROC; i++) {
+		if (driver->proc_active_mask == 0) {
+			/*
+			 * There are no DCI or Memory Device processes.
+			 * Diag should be in Real Time mode.
+			 */
+			temp_real_time = MODE_REALTIME;
+		} else if (!(driver->proc_rt_vote_mask[i] &
+						driver->proc_active_mask)) {
+			/* No active process is voting for real time mode */
+			temp_real_time = MODE_NONREALTIME;
+		}
+		if (temp_real_time == driver->real_time_mode[i]) {
+			pr_debug("diag: did not update real time mode on proc %d, already in the req mode %d",
+				i, temp_real_time);
+			continue;
+		}
+
+		if (i == DIAG_LOCAL_PROC) {
+			for (j = 0; j < NUM_PERIPHERALS; j++)
+				diag_send_real_time_update(
+					j, temp_real_time);
+		} else {
+			diag_send_diag_mode_update_remote(i - 1,
+							  temp_real_time);
+		}
+	}
+
+	if (driver->real_time_update_busy > 0)
+		driver->real_time_update_busy--;
+}
+#endif
+
+static int __diag_send_real_time_update(uint8_t peripheral, int real_time)
+{
+	char buf[sizeof(struct diag_ctrl_msg_diagmode)];
+	int msg_size = sizeof(struct diag_ctrl_msg_diagmode);
+	int err = 0;
+
+	if (peripheral >= NUM_PERIPHERALS)
+		return -EINVAL;
+
+	if (!driver->diagfwd_cntl[peripheral] ||
+	    !driver->diagfwd_cntl[peripheral]->ch_open) {
+		pr_debug("diag: In %s, control channel is not open, p: %d\n",
+			 __func__, peripheral);
+		return err;
+	}
+
+	if (real_time != MODE_NONREALTIME && real_time != MODE_REALTIME) {
+		pr_err("diag: In %s, invalid real time mode %d, peripheral: %d\n",
+		       __func__, real_time, peripheral);
+		return -EINVAL;
+	}
+
+	diag_create_diag_mode_ctrl_pkt(buf, real_time);
+
+	mutex_lock(&driver->diag_cntl_mutex);
+	err = diagfwd_write(peripheral, TYPE_CNTL, buf, msg_size);
+	if (err && err != -ENODEV) {
+		pr_err("diag: In %s, unable to write to socket, peripheral: %d, type: %d, len: %d, err: %d\n",
+		       __func__, peripheral, TYPE_CNTL,
+		       msg_size, err);
+	} else {
+		driver->real_time_mode[DIAG_LOCAL_PROC] = real_time;
+	}
+
+	mutex_unlock(&driver->diag_cntl_mutex);
+
+	return err;
+}
+
+int diag_send_real_time_update(uint8_t peripheral, int real_time)
+{
+	int i;
+
+	for (i = 0; i < NUM_PERIPHERALS; i++) {
+		if (!driver->buffering_flag[i])
+			continue;
+		/*
+		 * One of the peripherals is in buffering mode. Don't set
+		 * the RT value.
+		 */
+		return -EINVAL;
+	}
+
+	return __diag_send_real_time_update(peripheral, real_time);
+}
+
+int diag_send_peripheral_buffering_mode(struct diag_buffering_mode_t *params)
+{
+	int err = 0;
+	int mode = MODE_REALTIME;
+	uint8_t peripheral = 0;
+
+	if (!params)
+		return -EIO;
+
+	peripheral = params->peripheral;
+	if (peripheral >= NUM_PERIPHERALS) {
+		pr_err("diag: In %s, invalid peripheral %d\n", __func__,
+		       peripheral);
+		return -EINVAL;
+	}
+
+	if (!driver->buffering_flag[peripheral])
+		return -EINVAL;
+
+	switch (params->mode) {
+	case DIAG_BUFFERING_MODE_STREAMING:
+		mode = MODE_REALTIME;
+		break;
+	case DIAG_BUFFERING_MODE_THRESHOLD:
+	case DIAG_BUFFERING_MODE_CIRCULAR:
+		mode = MODE_NONREALTIME;
+		break;
+	default:
+		pr_err("diag: In %s, invalid tx mode %d\n", __func__,
+		       params->mode);
+		return -EINVAL;
+	}
+
+	if (!driver->feature[peripheral].peripheral_buffering) {
+		pr_debug("diag: In %s, peripheral %d doesn't support buffering\n",
+			 __func__, peripheral);
+		driver->buffering_flag[peripheral] = 0;
+		return -EIO;
+	}
+
+	/*
+	 * Perform sanity on watermark values. These values must be
+	 * checked irrespective of the buffering mode.
+	 */
+	if (((params->high_wm_val > DIAG_MAX_WM_VAL) ||
+	     (params->low_wm_val > DIAG_MAX_WM_VAL)) ||
+	    (params->low_wm_val > params->high_wm_val) ||
+	    ((params->low_wm_val == params->high_wm_val) &&
+	     (params->low_wm_val != DIAG_MIN_WM_VAL))) {
+		pr_err("diag: In %s, invalid watermark values, high: %d, low: %d, peripheral: %d\n",
+		       __func__, params->high_wm_val, params->low_wm_val,
+		       peripheral);
+		return -EINVAL;
+	}
+
+	mutex_lock(&driver->mode_lock);
+	err = diag_send_buffering_tx_mode_pkt(peripheral, params);
+	if (err) {
+		pr_err("diag: In %s, unable to send buffering mode packet to peripheral %d, err: %d\n",
+		       __func__, peripheral, err);
+		goto fail;
+	}
+	err = diag_send_buffering_wm_values(peripheral, params);
+	if (err) {
+		pr_err("diag: In %s, unable to send buffering wm value packet to peripheral %d, err: %d\n",
+		       __func__, peripheral, err);
+		goto fail;
+	}
+	err = __diag_send_real_time_update(peripheral, mode);
+	if (err) {
+		pr_err("diag: In %s, unable to send mode update to peripheral %d, mode: %d, err: %d\n",
+		       __func__, peripheral, mode, err);
+		goto fail;
+	}
+	driver->buffering_mode[peripheral].peripheral = peripheral;
+	driver->buffering_mode[peripheral].mode = params->mode;
+	driver->buffering_mode[peripheral].low_wm_val = params->low_wm_val;
+	driver->buffering_mode[peripheral].high_wm_val = params->high_wm_val;
+	if (params->mode == DIAG_BUFFERING_MODE_STREAMING)
+		driver->buffering_flag[peripheral] = 0;
+fail:
+	mutex_unlock(&driver->mode_lock);
+	return err;
+}
+
+int diag_send_stm_state(uint8_t peripheral, uint8_t stm_control_data)
+{
+	struct diag_ctrl_msg_stm stm_msg;
+	int msg_size = sizeof(struct diag_ctrl_msg_stm);
+	int err = 0;
+
+	if (peripheral >= NUM_PERIPHERALS)
+		return -EIO;
+
+	if (!driver->diagfwd_cntl[peripheral] ||
+	    !driver->diagfwd_cntl[peripheral]->ch_open) {
+		pr_debug("diag: In %s, control channel is not open, p: %d\n",
+			 __func__, peripheral);
+		return -ENODEV;
+	}
+
+	if (driver->feature[peripheral].stm_support == DISABLE_STM)
+		return -EINVAL;
+
+	stm_msg.ctrl_pkt_id = 21;
+	stm_msg.ctrl_pkt_data_len = 5;
+	stm_msg.version = 1;
+	stm_msg.control_data = stm_control_data;
+	err = diagfwd_write(peripheral, TYPE_CNTL, &stm_msg, msg_size);
+	if (err && err != -ENODEV) {
+		pr_err("diag: In %s, unable to write to socket, peripheral: %d, type: %d, len: %d, err: %d\n",
+		       __func__, peripheral, TYPE_CNTL,
+		       msg_size, err);
+	}
+
+	return err;
+}
+
+int diag_send_peripheral_drain_immediate(uint8_t peripheral)
+{
+	int err = 0;
+	struct diag_ctrl_drain_immediate ctrl_pkt;
+
+	if (!driver->feature[peripheral].peripheral_buffering) {
+		pr_debug("diag: In %s, peripheral  %d doesn't support buffering\n",
+			 __func__, peripheral);
+		return -EINVAL;
+	}
+
+	if (!driver->diagfwd_cntl[peripheral] ||
+	    !driver->diagfwd_cntl[peripheral]->ch_open) {
+		pr_debug("diag: In %s, control channel is not open, p: %d\n",
+			 __func__, peripheral);
+		return -ENODEV;
+	}
+
+	ctrl_pkt.pkt_id = DIAG_CTRL_MSG_PERIPHERAL_BUF_DRAIN_IMM;
+	/* The length of the ctrl pkt is size of version and stream id */
+	ctrl_pkt.len = sizeof(uint32_t) + sizeof(uint8_t);
+	ctrl_pkt.version = 1;
+	ctrl_pkt.stream_id = 1;
+
+	err = diagfwd_write(peripheral, TYPE_CNTL, &ctrl_pkt, sizeof(ctrl_pkt));
+	if (err && err != -ENODEV) {
+		pr_err("diag: Unable to send drain immediate ctrl packet to peripheral %d, err: %d\n",
+		       peripheral, err);
+	}
+
+	return err;
+}
+
+int diag_send_buffering_tx_mode_pkt(uint8_t peripheral,
+				    struct diag_buffering_mode_t *params)
+{
+	int err = 0;
+	struct diag_ctrl_peripheral_tx_mode ctrl_pkt;
+
+	if (!params)
+		return -EIO;
+
+	if (peripheral >= NUM_PERIPHERALS)
+		return -EINVAL;
+
+	if (!driver->feature[peripheral].peripheral_buffering) {
+		pr_debug("diag: In %s, peripheral  %d doesn't support buffering\n",
+			 __func__, peripheral);
+		return -EINVAL;
+	}
+
+	if (params->peripheral != peripheral)
+		return -EINVAL;
+
+	switch (params->mode) {
+	case DIAG_BUFFERING_MODE_STREAMING:
+	case DIAG_BUFFERING_MODE_THRESHOLD:
+	case DIAG_BUFFERING_MODE_CIRCULAR:
+		break;
+	default:
+		pr_err("diag: In %s, invalid tx mode: %d\n", __func__,
+		       params->mode);
+		return -EINVAL;
+	}
+
+	ctrl_pkt.pkt_id = DIAG_CTRL_MSG_CONFIG_PERIPHERAL_TX_MODE;
+	/* Control packet length is size of version, stream_id and tx_mode */
+	ctrl_pkt.len = sizeof(uint32_t) +  (2 * sizeof(uint8_t));
+	ctrl_pkt.version = 1;
+	ctrl_pkt.stream_id = 1;
+	ctrl_pkt.tx_mode = params->mode;
+
+	err = diagfwd_write(peripheral, TYPE_CNTL, &ctrl_pkt, sizeof(ctrl_pkt));
+	if (err && err != -ENODEV) {
+		pr_err("diag: Unable to send tx_mode ctrl packet to peripheral %d, err: %d\n",
+		       peripheral, err);
+		goto fail;
+	}
+	driver->buffering_mode[peripheral].mode = params->mode;
+
+fail:
+	return err;
+}
+
+int diag_send_buffering_wm_values(uint8_t peripheral,
+				  struct diag_buffering_mode_t *params)
+{
+	int err = 0;
+	struct diag_ctrl_set_wq_val ctrl_pkt;
+
+	if (!params)
+		return -EIO;
+
+	if (peripheral >= NUM_PERIPHERALS)
+		return -EINVAL;
+
+	if (!driver->feature[peripheral].peripheral_buffering) {
+		pr_debug("diag: In %s, peripheral  %d doesn't support buffering\n",
+			 __func__, peripheral);
+		return -EINVAL;
+	}
+
+	if (!driver->diagfwd_cntl[peripheral] ||
+	    !driver->diagfwd_cntl[peripheral]->ch_open) {
+		pr_debug("diag: In %s, control channel is not open, p: %d\n",
+			 __func__, peripheral);
+		return -ENODEV;
+	}
+
+	if (params->peripheral != peripheral)
+		return -EINVAL;
+
+	switch (params->mode) {
+	case DIAG_BUFFERING_MODE_STREAMING:
+	case DIAG_BUFFERING_MODE_THRESHOLD:
+	case DIAG_BUFFERING_MODE_CIRCULAR:
+		break;
+	default:
+		pr_err("diag: In %s, invalid tx mode: %d\n", __func__,
+		       params->mode);
+		return -EINVAL;
+	}
+
+	ctrl_pkt.pkt_id = DIAG_CTRL_MSG_CONFIG_PERIPHERAL_WMQ_VAL;
+	/* Control packet length is size of version, stream_id and wmq values */
+	ctrl_pkt.len = sizeof(uint32_t) + (3 * sizeof(uint8_t));
+	ctrl_pkt.version = 1;
+	ctrl_pkt.stream_id = 1;
+	ctrl_pkt.high_wm_val = params->high_wm_val;
+	ctrl_pkt.low_wm_val = params->low_wm_val;
+
+	err = diagfwd_write(peripheral, TYPE_CNTL, &ctrl_pkt,
+			    sizeof(ctrl_pkt));
+	if (err && err != -ENODEV) {
+		pr_err("diag: Unable to send watermark values to peripheral %d, err: %d\n",
+		       peripheral, err);
+	}
+
+	return err;
+}
+
+int diagfwd_cntl_init(void)
+{
+	uint8_t peripheral = 0;
+
+	reg_dirty = 0;
+	driver->polling_reg_flag = 0;
+	driver->log_on_demand_support = 1;
+	driver->stm_peripheral = 0;
+	driver->close_transport = 0;
+	for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++)
+		driver->buffering_flag[peripheral] = 0;
+
+	mutex_init(&driver->cntl_lock);
+	INIT_WORK(&(driver->stm_update_work), diag_stm_update_work_fn);
+	INIT_WORK(&(driver->mask_update_work), diag_mask_update_work_fn);
+	INIT_WORK(&(driver->close_transport_work),
+		  diag_close_transport_work_fn);
+
+	driver->cntl_wq = create_singlethread_workqueue("diag_cntl_wq");
+	if (!driver->cntl_wq)
+		return -ENOMEM;
+
+	return 0;
+}
+
+void diagfwd_cntl_channel_init(void)
+{
+	uint8_t peripheral;
+
+	for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+		diagfwd_early_open(peripheral);
+		diagfwd_open(peripheral, TYPE_CNTL);
+	}
+}
+
+void diagfwd_cntl_exit(void)
+{
+	if (driver->cntl_wq)
+		destroy_workqueue(driver->cntl_wq);
+}
diff --git a/drivers/char/diag/diagfwd_cntl.h b/drivers/char/diag/diagfwd_cntl.h
new file mode 100644
index 0000000..129cb1f
--- /dev/null
+++ b/drivers/char/diag/diagfwd_cntl.h
@@ -0,0 +1,282 @@
+/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAGFWD_CNTL_H
+#define DIAGFWD_CNTL_H
+
+/* Message registration commands */
+#define DIAG_CTRL_MSG_REG		1
+/* Message passing for DTR events */
+#define DIAG_CTRL_MSG_DTR		2
+/* Control Diag sleep vote, buffering etc */
+#define DIAG_CTRL_MSG_DIAGMODE		3
+/* Diag data based on "light" diag mask */
+#define DIAG_CTRL_MSG_DIAGDATA		4
+/* Send diag internal feature mask 'diag_int_feature_mask' */
+#define DIAG_CTRL_MSG_FEATURE		8
+/* Send Diag log mask for a particular equip id */
+#define DIAG_CTRL_MSG_EQUIP_LOG_MASK	9
+/* Send Diag event mask */
+#define DIAG_CTRL_MSG_EVENT_MASK_V2	10
+/* Send Diag F3 mask */
+#define DIAG_CTRL_MSG_F3_MASK_V2	11
+#define DIAG_CTRL_MSG_NUM_PRESETS	12
+#define DIAG_CTRL_MSG_SET_PRESET_ID	13
+#define DIAG_CTRL_MSG_LOG_MASK_WITH_PRESET_ID	14
+#define DIAG_CTRL_MSG_EVENT_MASK_WITH_PRESET_ID	15
+#define DIAG_CTRL_MSG_F3_MASK_WITH_PRESET_ID	16
+#define DIAG_CTRL_MSG_CONFIG_PERIPHERAL_TX_MODE	17
+#define DIAG_CTRL_MSG_PERIPHERAL_BUF_DRAIN_IMM	18
+#define DIAG_CTRL_MSG_CONFIG_PERIPHERAL_WMQ_VAL	19
+#define DIAG_CTRL_MSG_DCI_CONNECTION_STATUS	20
+#define DIAG_CTRL_MSG_LAST_EVENT_REPORT		22
+#define DIAG_CTRL_MSG_LOG_RANGE_REPORT		23
+#define DIAG_CTRL_MSG_SSID_RANGE_REPORT		24
+#define DIAG_CTRL_MSG_BUILD_MASK_REPORT		25
+#define DIAG_CTRL_MSG_DEREG		27
+#define DIAG_CTRL_MSG_DCI_HANDSHAKE_PKT		29
+#define DIAG_CTRL_MSG_PD_STATUS			30
+#define DIAG_CTRL_MSG_TIME_SYNC_PKT		31
+
+/*
+ * Feature Mask Definitions: Feature mask is used to specify Diag features
+ * supported by the Apps processor
+ *
+ * F_DIAG_FEATURE_MASK_SUPPORT - Denotes we support sending and receiving
+ *                               feature masks
+ * F_DIAG_LOG_ON_DEMAND_APPS - Apps responds to Log on Demand request
+ * F_DIAG_REQ_RSP_SUPPORT - Apps supported dedicated request response Channel
+ * F_DIAG_APPS_HDLC_ENCODE - HDLC encoding is done on the forward channel
+ * F_DIAG_STM - Denotes Apps supports Diag over STM
+ */
+#define F_DIAG_FEATURE_MASK_SUPPORT		0
+#define F_DIAG_LOG_ON_DEMAND_APPS		2
+#define F_DIAG_REQ_RSP_SUPPORT			4
+#define F_DIAG_APPS_HDLC_ENCODE			6
+#define F_DIAG_STM				9
+#define F_DIAG_PERIPHERAL_BUFFERING		10
+#define F_DIAG_MASK_CENTRALIZATION		11
+#define F_DIAG_SOCKETS_ENABLED			13
+#define F_DIAG_DCI_EXTENDED_HEADER_SUPPORT	14
+
+#define ENABLE_SEPARATE_CMDRSP	1
+#define DISABLE_SEPARATE_CMDRSP	0
+
+#define DISABLE_STM	0
+#define ENABLE_STM	1
+#define STATUS_STM	2
+
+#define UPDATE_PERIPHERAL_STM_STATE	1
+#define CLEAR_PERIPHERAL_STM_STATE	2
+
+#define ENABLE_APPS_HDLC_ENCODING	1
+#define DISABLE_APPS_HDLC_ENCODING	0
+
+#define DIAG_MODE_PKT_LEN	36
+
+struct diag_ctrl_pkt_header_t {
+	uint32_t pkt_id;
+	uint32_t len;
+};
+
+struct cmd_code_range {
+	uint16_t cmd_code_lo;
+	uint16_t cmd_code_hi;
+	uint32_t data;
+};
+
+struct diag_ctrl_cmd_reg {
+	uint32_t pkt_id;
+	uint32_t len;
+	uint32_t version;
+	uint16_t cmd_code;
+	uint16_t subsysid;
+	uint16_t count_entries;
+	uint16_t port;
+};
+
+struct diag_ctrl_cmd_dereg {
+	uint32_t pkt_id;
+	uint32_t len;
+	uint32_t version;
+	uint16_t cmd_code;
+	uint16_t subsysid;
+	uint16_t count_entries;
+} __packed;
+
+struct diag_ctrl_event_mask {
+	uint32_t cmd_type;
+	uint32_t data_len;
+	uint8_t stream_id;
+	uint8_t status;
+	uint8_t event_config;
+	uint32_t event_mask_size;
+	/* Copy event mask here */
+} __packed;
+
+struct diag_ctrl_log_mask {
+	uint32_t cmd_type;
+	uint32_t data_len;
+	uint8_t stream_id;
+	uint8_t status;
+	uint8_t equip_id;
+	uint32_t num_items; /* Last log code for this equip_id */
+	uint32_t log_mask_size; /* Size of log mask stored in log_mask[] */
+	/* Copy log mask here */
+} __packed;
+
+struct diag_ctrl_msg_mask {
+	uint32_t cmd_type;
+	uint32_t data_len;
+	uint8_t stream_id;
+	uint8_t status;
+	uint8_t msg_mode;
+	uint16_t ssid_first; /* Start of range of supported SSIDs */
+	uint16_t ssid_last; /* Last SSID in range */
+	uint32_t msg_mask_size; /* ssid_last - ssid_first + 1 */
+	/* Copy msg mask here */
+} __packed;
+
+struct diag_ctrl_feature_mask {
+	uint32_t ctrl_pkt_id;
+	uint32_t ctrl_pkt_data_len;
+	uint32_t feature_mask_len;
+	/* Copy feature mask here */
+} __packed;
+
+struct diag_ctrl_msg_diagmode {
+	uint32_t ctrl_pkt_id;
+	uint32_t ctrl_pkt_data_len;
+	uint32_t version;
+	uint32_t sleep_vote;
+	uint32_t real_time;
+	uint32_t use_nrt_values;
+	uint32_t commit_threshold;
+	uint32_t sleep_threshold;
+	uint32_t sleep_time;
+	uint32_t drain_timer_val;
+	uint32_t event_stale_timer_val;
+} __packed;
+
+struct diag_ctrl_msg_stm {
+	uint32_t ctrl_pkt_id;
+	uint32_t ctrl_pkt_data_len;
+	uint32_t version;
+	uint8_t  control_data;
+} __packed;
+
+struct diag_ctrl_msg_time_sync {
+	uint32_t ctrl_pkt_id;
+	uint32_t ctrl_pkt_data_len;
+	uint32_t version;
+	uint8_t  time_api;
+} __packed;
+
+struct diag_ctrl_dci_status {
+	uint32_t ctrl_pkt_id;
+	uint32_t ctrl_pkt_data_len;
+	uint32_t version;
+	uint8_t count;
+} __packed;
+
+struct diag_ctrl_dci_handshake_pkt {
+	uint32_t ctrl_pkt_id;
+	uint32_t ctrl_pkt_data_len;
+	uint32_t version;
+	uint32_t magic;
+} __packed;
+
+struct diag_ctrl_msg_pd_status {
+	uint32_t ctrl_pkt_id;
+	uint32_t ctrl_pkt_data_len;
+	uint32_t version;
+	uint32_t pd_id;
+	uint8_t status;
+} __packed;
+
+struct diag_ctrl_last_event_report {
+	uint32_t pkt_id;
+	uint32_t len;
+	uint32_t version;
+	uint16_t event_last_id;
+} __packed;
+
+struct diag_ctrl_log_range_report {
+	uint32_t pkt_id;
+	uint32_t len;
+	uint32_t version;
+	uint32_t last_equip_id;
+	uint32_t num_ranges;
+} __packed;
+
+struct diag_ctrl_log_range {
+	uint32_t equip_id;
+	uint32_t num_items;
+} __packed;
+
+struct diag_ctrl_ssid_range_report {
+	uint32_t pkt_id;
+	uint32_t len;
+	uint32_t version;
+	uint32_t count;
+} __packed;
+
+struct diag_ctrl_build_mask_report {
+	uint32_t pkt_id;
+	uint32_t len;
+	uint32_t version;
+	uint32_t count;
+} __packed;
+
+struct diag_ctrl_peripheral_tx_mode {
+	uint32_t pkt_id;
+	uint32_t len;
+	uint32_t version;
+	uint8_t stream_id;
+	uint8_t tx_mode;
+} __packed;
+
+struct diag_ctrl_drain_immediate {
+	uint32_t pkt_id;
+	uint32_t len;
+	uint32_t version;
+	uint8_t stream_id;
+} __packed;
+
+struct diag_ctrl_set_wq_val {
+	uint32_t pkt_id;
+	uint32_t len;
+	uint32_t version;
+	uint8_t stream_id;
+	uint8_t high_wm_val;
+	uint8_t low_wm_val;
+} __packed;
+
+int diagfwd_cntl_init(void);
+void diagfwd_cntl_channel_init(void);
+void diagfwd_cntl_exit(void);
+void diag_cntl_channel_open(struct diagfwd_info *p_info);
+void diag_cntl_channel_close(struct diagfwd_info *p_info);
+void diag_cntl_process_read_data(struct diagfwd_info *p_info, void *buf,
+				 int len);
+int diag_send_real_time_update(uint8_t peripheral, int real_time);
+int diag_send_peripheral_buffering_mode(struct diag_buffering_mode_t *params);
+void diag_update_proc_vote(uint16_t proc, uint8_t vote, int index);
+void diag_update_real_time_vote(uint16_t proc, uint8_t real_time, int index);
+void diag_real_time_work_fn(struct work_struct *work);
+int diag_send_stm_state(uint8_t peripheral, uint8_t stm_control_data);
+int diag_send_peripheral_drain_immediate(uint8_t peripheral);
+int diag_send_buffering_tx_mode_pkt(uint8_t peripheral,
+				    struct diag_buffering_mode_t *params);
+int diag_send_buffering_wm_values(uint8_t peripheral,
+				  struct diag_buffering_mode_t *params);
+#endif
diff --git a/drivers/char/diag/diagfwd_glink.c b/drivers/char/diag/diagfwd_glink.c
new file mode 100644
index 0000000..74f7dc7
--- /dev/null
+++ b/drivers/char/diag/diagfwd_glink.c
@@ -0,0 +1,706 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/sched.h>
+#include <linux/ratelimit.h>
+#include <linux/workqueue.h>
+#include <linux/pm_runtime.h>
+#include <linux/delay.h>
+#include <linux/atomic.h>
+#include <linux/diagchar.h>
+#include <linux/of.h>
+#include <linux/kmemleak.h>
+#include <soc/qcom/glink.h>
+#include "diagchar.h"
+#include "diagfwd.h"
+#include "diagfwd_peripheral.h"
+#include "diagfwd_glink.h"
+#include "diag_ipc_logging.h"
+
+struct diag_glink_info glink_data[NUM_PERIPHERALS] = {
+	{
+		.peripheral = PERIPHERAL_MODEM,
+		.type = TYPE_DATA,
+		.edge = "mpss",
+		.name = "DIAG_DATA",
+		.hdl = NULL
+	},
+	{
+		.peripheral = PERIPHERAL_LPASS,
+		.type = TYPE_DATA,
+		.edge = "lpass",
+		.name = "DIAG_DATA",
+		.hdl = NULL
+	},
+	{
+		.peripheral = PERIPHERAL_WCNSS,
+		.type = TYPE_DATA,
+		.edge = "wcnss",
+		.name = "DIAG_DATA",
+		.hdl = NULL
+	},
+	{
+		.peripheral = PERIPHERAL_SENSORS,
+		.type = TYPE_DATA,
+		.edge = "dsps",
+		.name = "DIAG_DATA",
+		.hdl = NULL
+	},
+	{
+		.peripheral = PERIPHERAL_WDSP,
+		.type = TYPE_DATA,
+		.edge = "wdsp",
+		.name = "DIAG_DATA",
+		.hdl = NULL
+	}
+};
+
+struct diag_glink_info glink_cntl[NUM_PERIPHERALS] = {
+	{
+		.peripheral = PERIPHERAL_MODEM,
+		.type = TYPE_CNTL,
+		.edge = "mpss",
+		.name = "DIAG_CTRL",
+		.hdl = NULL
+	},
+	{
+		.peripheral = PERIPHERAL_LPASS,
+		.type = TYPE_CNTL,
+		.edge = "lpass",
+		.name = "DIAG_CTRL",
+		.hdl = NULL
+	},
+	{
+		.peripheral = PERIPHERAL_WCNSS,
+		.type = TYPE_CNTL,
+		.edge = "wcnss",
+		.name = "DIAG_CTRL",
+		.hdl = NULL
+	},
+	{
+		.peripheral = PERIPHERAL_SENSORS,
+		.type = TYPE_CNTL,
+		.edge = "dsps",
+		.name = "DIAG_CTRL",
+		.hdl = NULL
+	},
+	{
+		.peripheral = PERIPHERAL_WDSP,
+		.type = TYPE_CNTL,
+		.edge = "wdsp",
+		.name = "DIAG_CTRL",
+		.hdl = NULL
+	}
+};
+
+struct diag_glink_info glink_dci[NUM_PERIPHERALS] = {
+	{
+		.peripheral = PERIPHERAL_MODEM,
+		.type = TYPE_DCI,
+		.edge = "mpss",
+		.name = "DIAG_DCI_DATA",
+		.hdl = NULL
+	},
+	{
+		.peripheral = PERIPHERAL_LPASS,
+		.type = TYPE_DCI,
+		.edge = "lpass",
+		.name = "DIAG_DCI_DATA",
+		.hdl = NULL
+	},
+	{
+		.peripheral = PERIPHERAL_WCNSS,
+		.type = TYPE_DCI,
+		.edge = "wcnss",
+		.name = "DIAG_DCI_DATA",
+		.hdl = NULL
+	},
+	{
+		.peripheral = PERIPHERAL_SENSORS,
+		.type = TYPE_DCI,
+		.edge = "dsps",
+		.name = "DIAG_DCI_DATA",
+		.hdl = NULL
+	},
+	{
+		.peripheral = PERIPHERAL_WDSP,
+		.type = TYPE_DCI,
+		.edge = "wdsp",
+		.name = "DIAG_DCI_DATA",
+		.hdl = NULL
+	}
+};
+
+struct diag_glink_info glink_cmd[NUM_PERIPHERALS] = {
+	{
+		.peripheral = PERIPHERAL_MODEM,
+		.type = TYPE_CMD,
+		.edge = "mpss",
+		.name = "DIAG_CMD",
+		.hdl = NULL
+	},
+	{
+		.peripheral = PERIPHERAL_LPASS,
+		.type = TYPE_CMD,
+		.edge = "lpass",
+		.name = "DIAG_CMD",
+		.hdl = NULL
+	},
+	{
+		.peripheral = PERIPHERAL_WCNSS,
+		.type = TYPE_CMD,
+		.edge = "wcnss",
+		.name = "DIAG_CMD",
+		.hdl = NULL
+	},
+	{
+		.peripheral = PERIPHERAL_SENSORS,
+		.type = TYPE_CMD,
+		.edge = "dsps",
+		.name = "DIAG_CMD",
+		.hdl = NULL
+	},
+	{
+		.peripheral = PERIPHERAL_WDSP,
+		.type = TYPE_CMD,
+		.edge = "wdsp",
+		.name = "DIAG_CMD",
+		.hdl = NULL
+	}
+};
+
+struct diag_glink_info glink_dci_cmd[NUM_PERIPHERALS] = {
+	{
+		.peripheral = PERIPHERAL_MODEM,
+		.type = TYPE_DCI_CMD,
+		.edge = "mpss",
+		.name = "DIAG_DCI_CMD",
+		.hdl = NULL
+	},
+	{
+		.peripheral = PERIPHERAL_LPASS,
+		.type = TYPE_DCI_CMD,
+		.edge = "lpass",
+		.name = "DIAG_DCI_CMD",
+		.hdl = NULL
+	},
+	{
+		.peripheral = PERIPHERAL_WCNSS,
+		.type = TYPE_DCI_CMD,
+		.edge = "wcnss",
+		.name = "DIAG_DCI_CMD",
+		.hdl = NULL
+	},
+	{
+		.peripheral = PERIPHERAL_SENSORS,
+		.type = TYPE_DCI_CMD,
+		.edge = "dsps",
+		.name = "DIAG_DCI_CMD",
+		.hdl = NULL
+	},
+	{
+		.peripheral = PERIPHERAL_WDSP,
+		.type = TYPE_DCI_CMD,
+		.edge = "wdsp",
+		.name = "DIAG_DCI_CMD",
+		.hdl = NULL
+	}
+};
+
+static void diag_state_open_glink(void *ctxt);
+static void diag_state_close_glink(void *ctxt);
+static int diag_glink_write(void *ctxt, unsigned char *buf, int len);
+static int diag_glink_read(void *ctxt, unsigned char *buf, int buf_len);
+static void diag_glink_queue_read(void *ctxt);
+
+static struct diag_peripheral_ops glink_ops = {
+	.open = diag_state_open_glink,
+	.close = diag_state_close_glink,
+	.write = diag_glink_write,
+	.read = diag_glink_read,
+	.queue_read = diag_glink_queue_read
+};
+
+static void diag_state_open_glink(void *ctxt)
+{
+	struct diag_glink_info *glink_info = NULL;
+
+	if (!ctxt)
+		return;
+
+	glink_info = (struct diag_glink_info *)(ctxt);
+	atomic_set(&glink_info->diag_state, 1);
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			 "%s setting diag state to 1", glink_info->name);
+}
+
+static void diag_glink_queue_read(void *ctxt)
+{
+	struct diag_glink_info *glink_info = NULL;
+
+	if (!ctxt)
+		return;
+
+	glink_info = (struct diag_glink_info *)ctxt;
+	if (glink_info->hdl && glink_info->wq &&
+		atomic_read(&glink_info->opened))
+		queue_work(glink_info->wq, &(glink_info->read_work));
+}
+
+static void diag_state_close_glink(void *ctxt)
+{
+	struct diag_glink_info *glink_info = NULL;
+
+	if (!ctxt)
+		return;
+
+	glink_info = (struct diag_glink_info *)(ctxt);
+	atomic_set(&glink_info->diag_state, 0);
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			 "%s setting diag state to 0", glink_info->name);
+	wake_up_interruptible(&glink_info->read_wait_q);
+	flush_workqueue(glink_info->wq);
+}
+
+int diag_glink_check_state(void *ctxt)
+{
+	struct diag_glink_info *info = NULL;
+
+	if (!ctxt)
+		return 0;
+
+	info = (struct diag_glink_info *)ctxt;
+	return (int)(atomic_read(&info->diag_state));
+}
+
+static int diag_glink_read(void *ctxt, unsigned char *buf, int buf_len)
+{
+	struct diag_glink_info *glink_info =  NULL;
+	int ret_val = 0;
+
+	if (!ctxt || !buf || buf_len <= 0)
+		return -EIO;
+
+	glink_info = (struct diag_glink_info *)ctxt;
+	if (!glink_info || !atomic_read(&glink_info->opened) ||
+		!glink_info->hdl || !glink_info->inited) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"diag:Glink channel not opened");
+		return -EIO;
+	}
+
+	ret_val = glink_queue_rx_intent(glink_info->hdl, buf, buf_len);
+	if (ret_val == 0)
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"diag: queued an rx intent ch:%s perip:%d buf:%pK of len:%d\n",
+		glink_info->name, glink_info->peripheral, buf, buf_len);
+
+	return ret_val;
+}
+
+static void diag_glink_read_work_fn(struct work_struct *work)
+{
+	struct diag_glink_info *glink_info = container_of(work,
+							struct diag_glink_info,
+							read_work);
+
+	if (!glink_info || !atomic_read(&glink_info->opened))
+		return;
+
+	if (!glink_info->inited) {
+		diag_ws_release();
+		return;
+	}
+
+	diagfwd_channel_read(glink_info->fwd_ctxt);
+}
+
+static void diag_glink_notify_rx(void *hdl, const void *priv,
+				const void *pkt_priv, const void *ptr,
+				size_t size)
+{
+	struct diag_glink_info *glink_info = (struct diag_glink_info *)priv;
+	int err = 0;
+
+	if (!glink_info || !glink_info->hdl || !ptr || !pkt_priv || !hdl)
+		return;
+
+	if (size <= 0)
+		return;
+
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"diag: received a packet %pK of len:%d from periph:%d ch:%d\n",
+		ptr, (int)size, glink_info->peripheral, glink_info->type);
+
+	memcpy((void *)pkt_priv, ptr, size);
+	err = diagfwd_channel_read_done(glink_info->fwd_ctxt,
+					(unsigned char *)pkt_priv, size);
+	glink_rx_done(glink_info->hdl, ptr, false);
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"diag: Rx done for packet %pK of len:%d periph:%d ch:%d\n",
+		ptr, (int)size, glink_info->peripheral, glink_info->type);
+}
+
+static void diag_glink_notify_remote_rx_intent(void *hdl, const void *priv,
+						size_t size)
+{
+	struct diag_glink_info *glink_info = (struct diag_glink_info *)priv;
+
+	if (!glink_info)
+		return;
+
+	atomic_inc(&glink_info->tx_intent_ready);
+	wake_up_interruptible(&glink_info->wait_q);
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"diag:received remote rx intent for %d type %d\n",
+		glink_info->peripheral, glink_info->type);
+}
+
+static void diag_glink_notify_tx_done(void *hdl, const void *priv,
+					const void *pkt_priv,
+					const void *ptr)
+{
+	struct diag_glink_info *glink_info = NULL;
+	struct diagfwd_info *fwd_info = NULL;
+	int found = 0;
+
+	glink_info = (struct diag_glink_info *)priv;
+	if (!glink_info)
+		return;
+
+	fwd_info = glink_info->fwd_ctxt;
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"diag: Received glink tx done notify for ptr%pK pkt_priv %pK\n",
+		ptr, pkt_priv);
+	found = diagfwd_write_buffer_done(fwd_info, ptr);
+	if (!found)
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"Received Tx done on invalid buffer ptr %pK\n", ptr);
+}
+
+static int  diag_glink_write(void *ctxt, unsigned char *buf, int len)
+{
+	struct diag_glink_info *glink_info = NULL;
+	int err = 0;
+	uint32_t tx_flags = GLINK_TX_REQ_INTENT;
+
+	if (!ctxt || !buf)
+		return -EIO;
+
+	glink_info = (struct diag_glink_info *)ctxt;
+	if (!glink_info || len <= 0) {
+		pr_err_ratelimited("diag: In %s, invalid params, glink_info: %pK, buf: %pK, len: %d\n",
+				__func__, glink_info, buf, len);
+		return -EINVAL;
+	}
+
+	if (!glink_info->inited || !glink_info->hdl ||
+		!atomic_read(&glink_info->opened)) {
+		pr_err_ratelimited("diag: In %s, glink not inited, glink_info: %pK, buf: %pK, len: %d\n",
+				 __func__, glink_info, buf, len);
+		return -ENODEV;
+	}
+
+	if (atomic_read(&glink_info->tx_intent_ready)) {
+		atomic_dec(&glink_info->tx_intent_ready);
+		err = glink_tx(glink_info->hdl, glink_info, buf, len, tx_flags);
+		if (!err) {
+			DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+				"%s wrote to glink, len: %d\n",
+				glink_info->name, len);
+		}
+	} else
+		err = -ENOMEM;
+
+	return err;
+
+}
+static void diag_glink_transport_notify_state(void *handle, const void *priv,
+						unsigned int event)
+{
+	struct diag_glink_info *glink_info = (struct diag_glink_info *)priv;
+
+	if (!glink_info)
+		return;
+
+	switch (event) {
+	case GLINK_CONNECTED:
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"%s received channel connect for periph:%d\n",
+			 glink_info->name, glink_info->peripheral);
+		atomic_set(&glink_info->opened, 1);
+		diagfwd_channel_open(glink_info->fwd_ctxt);
+		diagfwd_late_open(glink_info->fwd_ctxt);
+		break;
+	case GLINK_LOCAL_DISCONNECTED:
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"%s received channel disconnect for periph:%d\n",
+			glink_info->name, glink_info->peripheral);
+
+		break;
+	case GLINK_REMOTE_DISCONNECTED:
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"%s received channel remote disconnect for periph:%d\n",
+			 glink_info->name, glink_info->peripheral);
+		atomic_set(&glink_info->opened, 0);
+		diagfwd_channel_close(glink_info->fwd_ctxt);
+		atomic_set(&glink_info->tx_intent_ready, 0);
+		break;
+	default:
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"%s received invalid notification\n",
+			glink_info->name);
+		break;
+	}
+
+}
+static void diag_glink_open_work_fn(struct work_struct *work)
+{
+	struct diag_glink_info *glink_info = container_of(work,
+							struct diag_glink_info,
+							open_work);
+	struct glink_open_config open_cfg;
+	void *handle = NULL;
+
+	if (!glink_info || glink_info->hdl)
+		return;
+
+	memset(&open_cfg, 0, sizeof(struct glink_open_config));
+	open_cfg.priv = glink_info;
+	open_cfg.edge = glink_info->edge;
+	open_cfg.name = glink_info->name;
+	open_cfg.notify_rx = diag_glink_notify_rx;
+	open_cfg.notify_tx_done = diag_glink_notify_tx_done;
+	open_cfg.notify_state = diag_glink_transport_notify_state;
+	open_cfg.notify_remote_rx_intent = diag_glink_notify_remote_rx_intent;
+	handle = glink_open(&open_cfg);
+	if (IS_ERR_OR_NULL(handle)) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "error opening channel %s",
+			glink_info->name);
+	} else
+		glink_info->hdl = handle;
+}
+
+static void diag_glink_close_work_fn(struct work_struct *work)
+{
+	struct diag_glink_info *glink_info = container_of(work,
+							struct diag_glink_info,
+							close_work);
+	if (!glink_info || !glink_info->inited || !glink_info->hdl)
+		return;
+
+	glink_close(glink_info->hdl);
+	atomic_set(&glink_info->opened, 0);
+	atomic_set(&glink_info->tx_intent_ready, 0);
+	glink_info->hdl = NULL;
+	diagfwd_channel_close(glink_info->fwd_ctxt);
+}
+
+static void diag_glink_notify_cb(struct glink_link_state_cb_info *cb_info,
+				void *priv)
+{
+	struct diag_glink_info *glink_info = NULL;
+
+	glink_info = (struct diag_glink_info *)priv;
+	if (!glink_info)
+		return;
+	if (!cb_info)
+		return;
+
+	switch (cb_info->link_state) {
+	case GLINK_LINK_STATE_UP:
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"%s channel opened for periph:%d\n",
+			glink_info->name, glink_info->peripheral);
+		queue_work(glink_info->wq, &glink_info->open_work);
+		break;
+	case GLINK_LINK_STATE_DOWN:
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"%s channel closed for periph:%d\n",
+			glink_info->name, glink_info->peripheral);
+		queue_work(glink_info->wq, &glink_info->close_work);
+		break;
+	default:
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			"Invalid link state notification for ch:%s\n",
+			glink_info->name);
+		break;
+
+	}
+}
+
+static void glink_late_init(struct diag_glink_info *glink_info)
+{
+	struct diagfwd_info *fwd_info = NULL;
+
+	if (!glink_info)
+		return;
+
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s entering\n",
+		 glink_info->name);
+
+	diagfwd_register(TRANSPORT_GLINK, glink_info->peripheral,
+			glink_info->type, (void *)glink_info,
+			&glink_ops, &glink_info->fwd_ctxt);
+	fwd_info = glink_info->fwd_ctxt;
+	if (!fwd_info)
+		return;
+
+	glink_info->inited = 1;
+
+	if (atomic_read(&glink_info->opened))
+		diagfwd_channel_open(glink_info->fwd_ctxt);
+
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s exiting\n",
+		 glink_info->name);
+}
+
+int diag_glink_init_peripheral(uint8_t peripheral)
+{
+	if (peripheral >= NUM_PERIPHERALS) {
+		pr_err("diag: In %s, invalid peripheral %d\n",
+		       __func__, peripheral);
+		return -EINVAL;
+	}
+
+	glink_late_init(&glink_data[peripheral]);
+	glink_late_init(&glink_dci[peripheral]);
+	glink_late_init(&glink_cmd[peripheral]);
+	glink_late_init(&glink_dci_cmd[peripheral]);
+
+	return 0;
+}
+
+static void __diag_glink_init(struct diag_glink_info *glink_info)
+{
+	char wq_name[DIAG_GLINK_NAME_SZ + 12];
+	struct glink_link_info link_info;
+	void *link_state_handle = NULL;
+
+	if (!glink_info)
+		return;
+
+	init_waitqueue_head(&glink_info->wait_q);
+	init_waitqueue_head(&glink_info->read_wait_q);
+	mutex_init(&glink_info->lock);
+	strlcpy(wq_name, "DIAG_GLINK_", 12);
+	strlcat(wq_name, glink_info->name, sizeof(glink_info->name));
+	glink_info->wq = create_singlethread_workqueue(wq_name);
+	if (!glink_info->wq) {
+		pr_err("diag: In %s, unable to create workqueue for glink ch:%s\n",
+			   __func__, glink_info->name);
+		return;
+	}
+	INIT_WORK(&(glink_info->open_work), diag_glink_open_work_fn);
+	INIT_WORK(&(glink_info->close_work), diag_glink_close_work_fn);
+	INIT_WORK(&(glink_info->read_work), diag_glink_read_work_fn);
+	link_info.glink_link_state_notif_cb = diag_glink_notify_cb;
+	link_info.transport = NULL;
+	link_info.edge = glink_info->edge;
+	glink_info->link_state_handle = NULL;
+	link_state_handle = glink_register_link_state_cb(&link_info,
+							(void *)glink_info);
+	if (IS_ERR_OR_NULL(link_state_handle)) {
+		pr_err("diag: In %s, unable to register for glink channel %s\n",
+			   __func__, glink_info->name);
+		destroy_workqueue(glink_info->wq);
+		return;
+	}
+	glink_info->link_state_handle = link_state_handle;
+	glink_info->fwd_ctxt = NULL;
+	atomic_set(&glink_info->tx_intent_ready, 0);
+	atomic_set(&glink_info->opened, 0);
+	atomic_set(&glink_info->diag_state, 0);
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		"%s initialized fwd_ctxt: %pK hdl: %pK\n",
+		glink_info->name, glink_info->fwd_ctxt,
+		glink_info->link_state_handle);
+}
+
+void diag_glink_invalidate(void *ctxt, struct diagfwd_info *fwd_ctxt)
+{
+	struct diag_glink_info *info = NULL;
+
+	if (!ctxt || !fwd_ctxt)
+		return;
+
+	info = (struct diag_glink_info *)ctxt;
+	info->fwd_ctxt = fwd_ctxt;
+}
+
+int diag_glink_init(void)
+{
+	uint8_t peripheral;
+	struct diag_glink_info *glink_info = NULL;
+
+	for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+		glink_info = &glink_cntl[peripheral];
+		__diag_glink_init(glink_info);
+		diagfwd_cntl_register(TRANSPORT_GLINK, glink_info->peripheral,
+					(void *)glink_info, &glink_ops,
+					&(glink_info->fwd_ctxt));
+		glink_info->inited = 1;
+		__diag_glink_init(&glink_data[peripheral]);
+		__diag_glink_init(&glink_cmd[peripheral]);
+		__diag_glink_init(&glink_dci[peripheral]);
+		__diag_glink_init(&glink_dci_cmd[peripheral]);
+	}
+	return 0;
+}
+
+static void __diag_glink_exit(struct diag_glink_info *glink_info)
+{
+	if (!glink_info)
+		return;
+
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s entering\n",
+			 glink_info->name);
+
+	diagfwd_deregister(glink_info->peripheral, glink_info->type,
+					   (void *)glink_info);
+	glink_info->fwd_ctxt = NULL;
+	glink_info->hdl = NULL;
+	if (glink_info->wq)
+		destroy_workqueue(glink_info->wq);
+
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s exiting\n",
+			 glink_info->name);
+}
+
+void diag_glink_early_exit(void)
+{
+	int peripheral = 0;
+
+	for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+		__diag_glink_exit(&glink_cntl[peripheral]);
+		glink_unregister_link_state_cb(&glink_cntl[peripheral].hdl);
+	}
+}
+
+void diag_glink_exit(void)
+{
+	int peripheral = 0;
+
+	for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+		__diag_glink_exit(&glink_data[peripheral]);
+		__diag_glink_exit(&glink_cmd[peripheral]);
+		__diag_glink_exit(&glink_dci[peripheral]);
+		__diag_glink_exit(&glink_dci_cmd[peripheral]);
+		glink_unregister_link_state_cb(&glink_data[peripheral].hdl);
+		glink_unregister_link_state_cb(&glink_cmd[peripheral].hdl);
+		glink_unregister_link_state_cb(&glink_dci[peripheral].hdl);
+		glink_unregister_link_state_cb(&glink_dci_cmd[peripheral].hdl);
+	}
+}
diff --git a/drivers/char/diag/diagfwd_glink.h b/drivers/char/diag/diagfwd_glink.h
new file mode 100644
index 0000000..bad4629
--- /dev/null
+++ b/drivers/char/diag/diagfwd_glink.h
@@ -0,0 +1,54 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAGFWD_GLINK_H
+#define DIAGFWD_GLINK_H
+
+#define DIAG_GLINK_NAME_SZ	24
+#define GLINK_DRAIN_BUF_SIZE	4096
+
+struct diag_glink_info {
+	uint8_t peripheral;
+	uint8_t type;
+	uint8_t inited;
+	atomic_t opened;
+	atomic_t diag_state;
+	uint32_t fifo_size;
+	atomic_t tx_intent_ready;
+	void *hdl;
+	void *link_state_handle;
+	char edge[DIAG_GLINK_NAME_SZ];
+	char name[DIAG_GLINK_NAME_SZ];
+	struct mutex lock;
+	wait_queue_head_t read_wait_q;
+	wait_queue_head_t wait_q;
+	struct workqueue_struct *wq;
+	struct work_struct open_work;
+	struct work_struct close_work;
+	struct work_struct read_work;
+	struct diagfwd_info *fwd_ctxt;
+};
+
+extern struct diag_glink_info glink_data[NUM_PERIPHERALS];
+extern struct diag_glink_info glink_cntl[NUM_PERIPHERALS];
+extern struct diag_glink_info glink_cmd[NUM_PERIPHERALS];
+extern struct diag_glink_info glink_dci_cmd[NUM_PERIPHERALS];
+extern struct diag_glink_info glink_dci[NUM_PERIPHERALS];
+
+int diag_glink_init_peripheral(uint8_t peripheral);
+void diag_glink_exit(void);
+int diag_glink_init(void);
+void diag_glink_early_exit(void);
+void diag_glink_invalidate(void *ctxt, struct diagfwd_info *fwd_ctxt);
+int diag_glink_check_state(void *ctxt);
+
+#endif
diff --git a/drivers/char/diag/diagfwd_hsic.c b/drivers/char/diag/diagfwd_hsic.c
new file mode 100644
index 0000000..81afcae
--- /dev/null
+++ b/drivers/char/diag/diagfwd_hsic.c
@@ -0,0 +1,451 @@
+/* Copyright (c) 2012-2014, 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/uaccess.h>
+#include <linux/diagchar.h>
+#include <linux/sched.h>
+#include <linux/err.h>
+#include <linux/ratelimit.h>
+#include <linux/workqueue.h>
+#include <linux/pm_runtime.h>
+#include <linux/platform_device.h>
+#include <asm/current.h>
+#include "diagmem.h"
+#include "diagfwd_bridge.h"
+#include "diagfwd_hsic.h"
+
+#define DIAG_HSIC_STRING_SZ	11
+
+struct diag_hsic_info diag_hsic[NUM_HSIC_DEV] = {
+	{
+		.id = HSIC_1,
+		.dev_id = DIAGFWD_MDM,
+		.name = "MDM",
+		.mempool = POOL_TYPE_MDM,
+		.opened = 0,
+		.enabled = 0,
+		.suspended = 0,
+		.hsic_wq = NULL
+	},
+	{
+		.id = HSIC_2,
+		.dev_id = DIAGFWD_MDM_DCI,
+		.name = "MDM_DCI",
+		.mempool = POOL_TYPE_MDM_DCI,
+		.opened = 0,
+		.enabled = 0,
+		.suspended = 0,
+		.hsic_wq = NULL
+	}
+};
+
+static void diag_hsic_read_complete(void *ctxt, char *buf, int len,
+				    int actual_size)
+{
+	int err = 0;
+	int index = (int)(uintptr_t)ctxt;
+	struct diag_hsic_info *ch = NULL;
+
+	if (index < 0 || index >= NUM_HSIC_DEV) {
+		pr_err_ratelimited("diag: In %s, invalid HSIC index %d\n",
+				   __func__, index);
+		return;
+	}
+	ch = &diag_hsic[index];
+
+	/*
+	 * Don't pass on the buffer if the channel is closed when a pending read
+	 * completes. Also, actual size can be negative error codes - do not
+	 * pass on the buffer.
+	 */
+	if (!ch->opened || actual_size <= 0)
+		goto fail;
+	err = diag_remote_dev_read_done(ch->dev_id, buf, actual_size);
+	if (err)
+		goto fail;
+	return;
+
+fail:
+	diagmem_free(driver, buf, ch->mempool);
+	queue_work(ch->hsic_wq, &ch->read_work);
+}
+
+static void diag_hsic_write_complete(void *ctxt, char *buf, int len,
+				     int actual_size)
+{
+	int index = (int)(uintptr_t)ctxt;
+	struct diag_hsic_info *ch = NULL;
+
+	if (index < 0 || index >= NUM_HSIC_DEV) {
+		pr_err_ratelimited("diag: In %s, invalid HSIC index %d\n",
+				   __func__, index);
+		return;
+	}
+
+	ch = &diag_hsic[index];
+	diag_remote_dev_write_done(ch->dev_id, buf, actual_size, ch->id);
+}
+
+static int diag_hsic_suspend(void *ctxt)
+{
+	int index = (int)(uintptr_t)ctxt;
+	unsigned long flags;
+	struct diag_hsic_info *ch = NULL;
+
+	if (index < 0 || index >= NUM_HSIC_DEV) {
+		pr_err_ratelimited("diag: In %s, invalid HSIC index %d\n",
+				   __func__, index);
+		return -EINVAL;
+	}
+
+	ch = &diag_hsic[index];
+	spin_lock_irqsave(&ch->lock, flags);
+	ch->suspended = 1;
+	spin_unlock_irqrestore(&ch->lock, flags);
+	return 0;
+}
+
+static void diag_hsic_resume(void *ctxt)
+{
+	int index = (int)(uintptr_t)ctxt;
+	unsigned long flags;
+	struct diag_hsic_info *ch = NULL;
+
+	if (index < 0 || index >= NUM_HSIC_DEV) {
+		pr_err_ratelimited("diag: In %s, invalid HSIC index %d\n",
+				   __func__, index);
+		return;
+	}
+	ch = &diag_hsic[index];
+	spin_lock_irqsave(&ch->lock, flags);
+	ch->suspended = 0;
+	spin_unlock_irqrestore(&ch->lock, flags);
+	queue_work(ch->hsic_wq, &(ch->read_work));
+}
+
+static struct diag_bridge_ops diag_hsic_ops[NUM_HSIC_DEV] = {
+	{
+		.ctxt = (void *)HSIC_1,
+		.read_complete_cb = diag_hsic_read_complete,
+		.write_complete_cb = diag_hsic_write_complete,
+		.suspend = diag_hsic_suspend,
+		.resume = diag_hsic_resume,
+	},
+	{
+		.ctxt = (void *)HSIC_2,
+		.read_complete_cb = diag_hsic_read_complete,
+		.write_complete_cb = diag_hsic_write_complete,
+		.suspend = diag_hsic_suspend,
+		.resume = diag_hsic_resume,
+	}
+};
+
+static int hsic_open(int id)
+{
+	int err = 0;
+	unsigned long flags;
+	struct diag_hsic_info *ch = NULL;
+
+	if (id < 0 || id >= NUM_HSIC_DEV) {
+		pr_err("diag: Invalid index %d in %s\n", id, __func__);
+		return -EINVAL;
+	}
+
+	ch = &diag_hsic[id];
+	if (!ch->enabled)
+		return -ENODEV;
+
+	if (ch->opened) {
+		pr_debug("diag: HSIC channel %d is already opened\n", ch->id);
+		return -ENODEV;
+	}
+
+	err = diag_bridge_open(ch->id, &diag_hsic_ops[ch->id]);
+	if (err) {
+		pr_err("diag: Unable to open HSIC channel %d, err: %d",
+		       ch->id, err);
+		return err;
+	}
+	spin_lock_irqsave(&ch->lock, flags);
+	ch->opened = 1;
+	spin_unlock_irqrestore(&ch->lock, flags);
+	diagmem_init(driver, ch->mempool);
+	/* Notify the bridge that the channel is open */
+	diag_remote_dev_open(ch->dev_id);
+	queue_work(ch->hsic_wq, &(ch->read_work));
+	return 0;
+}
+
+static void hsic_open_work_fn(struct work_struct *work)
+{
+	struct diag_hsic_info *ch = container_of(work, struct diag_hsic_info,
+						 open_work);
+	if (ch)
+		hsic_open(ch->id);
+}
+
+static int hsic_close(int id)
+{
+	unsigned long flags;
+	struct diag_hsic_info *ch = NULL;
+
+	if (id < 0 || id >= NUM_HSIC_DEV) {
+		pr_err("diag: Invalid index %d in %s\n", id, __func__);
+		return -EINVAL;
+	}
+
+	ch = &diag_hsic[id];
+	if (!ch->enabled)
+		return -ENODEV;
+
+	if (!ch->opened) {
+		pr_debug("diag: HSIC channel %d is already closed\n", ch->id);
+		return -ENODEV;
+	}
+
+	spin_lock_irqsave(&ch->lock, flags);
+	ch->opened = 0;
+	spin_unlock_irqrestore(&ch->lock, flags);
+	diag_bridge_close(ch->id);
+	diagmem_exit(driver, ch->mempool);
+	diag_remote_dev_close(ch->dev_id);
+	return 0;
+}
+
+static void hsic_close_work_fn(struct work_struct *work)
+{
+	struct diag_hsic_info *ch = container_of(work, struct diag_hsic_info,
+						 close_work);
+	if (ch)
+		hsic_close(ch->id);
+}
+
+static void hsic_read_work_fn(struct work_struct *work)
+{
+	int err = 0;
+	unsigned char *buf = NULL;
+	struct diag_hsic_info *ch = container_of(work, struct diag_hsic_info,
+						 read_work);
+	if (!ch || !ch->enabled || !ch->opened)
+		return;
+
+	do {
+		buf = diagmem_alloc(driver, DIAG_MDM_BUF_SIZE, ch->mempool);
+		if (!buf) {
+			err = -ENOMEM;
+			break;
+		}
+
+		err = diag_bridge_read(ch->id, buf, DIAG_MDM_BUF_SIZE);
+		if (err) {
+			diagmem_free(driver, buf, ch->mempool);
+			pr_err_ratelimited("diag: Unable to read from HSIC channel %d, err: %d\n",
+					   ch->id, err);
+			break;
+		}
+	} while (buf);
+
+	/* Read from the HSIC channel continuously if the channel is present */
+	if (!err)
+		queue_work(ch->hsic_wq, &ch->read_work);
+}
+
+static int diag_hsic_probe(struct platform_device *pdev)
+{
+	unsigned long flags;
+	struct diag_hsic_info *ch = NULL;
+
+	if (!pdev)
+		return -EIO;
+
+	pr_debug("diag: hsic probe pdev: %d\n", pdev->id);
+	if (pdev->id >= NUM_HSIC_DEV) {
+		pr_err("diag: No support for HSIC device %d\n", pdev->id);
+		return -EIO;
+	}
+
+	ch = &diag_hsic[pdev->id];
+	if (!ch->enabled) {
+		spin_lock_irqsave(&ch->lock, flags);
+		ch->enabled = 1;
+		spin_unlock_irqrestore(&ch->lock, flags);
+	}
+	queue_work(ch->hsic_wq, &(ch->open_work));
+	return 0;
+}
+
+static int diag_hsic_remove(struct platform_device *pdev)
+{
+	struct diag_hsic_info *ch = NULL;
+
+	if (!pdev)
+		return -EIO;
+
+	pr_debug("diag: hsic close pdev: %d\n", pdev->id);
+	if (pdev->id >= NUM_HSIC_DEV) {
+		pr_err("diag: No support for HSIC device %d\n", pdev->id);
+		return -EIO;
+	}
+
+	ch = &diag_hsic[pdev->id];
+	queue_work(ch->hsic_wq, &(ch->close_work));
+	return 0;
+}
+
+static int diagfwd_hsic_runtime_suspend(struct device *dev)
+{
+	dev_dbg(dev, "pm_runtime: suspending...\n");
+	return 0;
+}
+
+static int diagfwd_hsic_runtime_resume(struct device *dev)
+{
+	dev_dbg(dev, "pm_runtime: resuming...\n");
+	return 0;
+}
+
+static const struct dev_pm_ops diagfwd_hsic_dev_pm_ops = {
+	.runtime_suspend = diagfwd_hsic_runtime_suspend,
+	.runtime_resume = diagfwd_hsic_runtime_resume,
+};
+
+static struct platform_driver msm_hsic_ch_driver = {
+	.probe = diag_hsic_probe,
+	.remove = diag_hsic_remove,
+	.driver = {
+		   .name = "diag_bridge",
+		   .owner = THIS_MODULE,
+		   .pm   = &diagfwd_hsic_dev_pm_ops,
+		   },
+};
+
+static int hsic_queue_read(int id)
+{
+	if (id < 0 || id >= NUM_HSIC_DEV) {
+		pr_err_ratelimited("diag: In %s, invalid index %d\n",
+				   __func__, id);
+		return -EINVAL;
+	}
+	queue_work(diag_hsic[id].hsic_wq, &(diag_hsic[id].read_work));
+	return 0;
+}
+
+static int hsic_write(int id, unsigned char *buf, int len, int ctxt)
+{
+	int err = 0;
+	struct diag_hsic_info *ch = NULL;
+
+	if (id < 0 || id >= NUM_HSIC_DEV) {
+		pr_err_ratelimited("diag: In %s, invalid index %d\n",
+				   __func__, id);
+		return -EINVAL;
+	}
+	if (!buf || len <= 0) {
+		pr_err_ratelimited("diag: In %s, ch %d, invalid buf %pK len %d\n",
+				   __func__, id, buf, len);
+		return -EINVAL;
+	}
+
+	ch = &diag_hsic[id];
+	if (!ch->opened || !ch->enabled) {
+		pr_debug_ratelimited("diag: In %s, ch %d is disabled. opened %d enabled: %d\n",
+				     __func__, id, ch->opened, ch->enabled);
+		return -EIO;
+	}
+
+	err = diag_bridge_write(ch->id, buf, len);
+	if (err) {
+		pr_err_ratelimited("diag: cannot write to HSIC ch %d, err: %d\n",
+				   ch->id, err);
+	}
+	return err;
+}
+
+static int hsic_fwd_complete(int id, unsigned char *buf, int len, int ctxt)
+{
+	if (id < 0 || id >= NUM_HSIC_DEV) {
+		pr_err_ratelimited("diag: In %s, invalid index %d\n",
+				   __func__, id);
+		return -EINVAL;
+	}
+	if (!buf)
+		return -EIO;
+	diagmem_free(driver, buf, diag_hsic[id].mempool);
+	queue_work(diag_hsic[id].hsic_wq, &(diag_hsic[id].read_work));
+	return 0;
+}
+
+static struct diag_remote_dev_ops diag_hsic_fwd_ops = {
+	.open = hsic_open,
+	.close = hsic_close,
+	.queue_read = hsic_queue_read,
+	.write = hsic_write,
+	.fwd_complete = hsic_fwd_complete,
+};
+
+int diag_hsic_init(void)
+{
+	int i;
+	int err = 0;
+	struct diag_hsic_info *ch = NULL;
+	char wq_name[DIAG_HSIC_NAME_SZ + DIAG_HSIC_STRING_SZ];
+
+	for (i = 0; i < NUM_HSIC_DEV; i++) {
+		ch = &diag_hsic[i];
+		spin_lock_init(&ch->lock);
+		INIT_WORK(&(ch->read_work), hsic_read_work_fn);
+		INIT_WORK(&(ch->open_work), hsic_open_work_fn);
+		INIT_WORK(&(ch->close_work), hsic_close_work_fn);
+		strlcpy(wq_name, "DIAG_HSIC_", DIAG_HSIC_STRING_SZ);
+		strlcat(wq_name, ch->name, sizeof(ch->name));
+		ch->hsic_wq = create_singlethread_workqueue(wq_name);
+		if (!ch->hsic_wq)
+			goto fail;
+		err = diagfwd_bridge_register(ch->dev_id, ch->id,
+					      &diag_hsic_fwd_ops);
+		if (err) {
+			pr_err("diag: Unable to register HSIC channel %d with bridge, err: %d\n",
+			       i, err);
+			goto fail;
+		}
+	}
+
+	err = platform_driver_register(&msm_hsic_ch_driver);
+	if (err) {
+		pr_err("diag: could not register HSIC device, err: %d\n", err);
+		goto fail;
+	}
+
+	return 0;
+fail:
+	diag_hsic_exit();
+	return -ENOMEM;
+}
+
+void diag_hsic_exit(void)
+{
+	int i;
+	struct diag_hsic_info *ch = NULL;
+
+	for (i = 0; i < NUM_HSIC_DEV; i++) {
+		ch = &diag_hsic[i];
+		ch->enabled = 0;
+		ch->opened = 0;
+		ch->suspended = 0;
+		if (ch->hsic_wq)
+			destroy_workqueue(ch->hsic_wq);
+	}
+	platform_driver_unregister(&msm_hsic_ch_driver);
+}
+
diff --git a/drivers/char/diag/diagfwd_hsic.h b/drivers/char/diag/diagfwd_hsic.h
new file mode 100644
index 0000000..c4d87a2
--- /dev/null
+++ b/drivers/char/diag/diagfwd_hsic.h
@@ -0,0 +1,47 @@
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAGFWD_HSIC_H
+#define DIAGFWD_HSIC_H
+#ifdef CONFIG_DIAG_OVER_USB
+#include <linux/usb/usbdiag.h>
+#endif
+#include <linux/usb/diag_bridge.h>
+
+#define HSIC_1			0
+#define HSIC_2			1
+#define NUM_HSIC_DEV		2
+
+#define DIAG_HSIC_NAME_SZ	24
+
+struct diag_hsic_info {
+	int id;
+	int dev_id;
+	int mempool;
+	uint8_t opened;
+	uint8_t enabled;
+	uint8_t suspended;
+	char name[DIAG_HSIC_NAME_SZ];
+	struct work_struct read_work;
+	struct work_struct open_work;
+	struct work_struct close_work;
+	struct workqueue_struct *hsic_wq;
+	spinlock_t lock;
+};
+
+extern struct diag_hsic_info diag_hsic[NUM_HSIC_DEV];
+
+int diag_hsic_init(void);
+void diag_hsic_exit(void);
+
+#endif
+
diff --git a/drivers/char/diag/diagfwd_mhi.c b/drivers/char/diag/diagfwd_mhi.c
new file mode 100644
index 0000000..f27f358
--- /dev/null
+++ b/drivers/char/diag/diagfwd_mhi.c
@@ -0,0 +1,733 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/uaccess.h>
+#include <linux/diagchar.h>
+#include <linux/sched.h>
+#include <linux/err.h>
+#include <linux/ratelimit.h>
+#include <linux/workqueue.h>
+#include <linux/pm_runtime.h>
+#include <linux/platform_device.h>
+#include <linux/msm_mhi.h>
+#include <linux/delay.h>
+#include <linux/vmalloc.h>
+#include <asm/current.h>
+#include <linux/atomic.h>
+#include "diagmem.h"
+#include "diagfwd_bridge.h"
+#include "diagfwd_mhi.h"
+#include "diag_ipc_logging.h"
+
+#define SET_CH_CTXT(index, type)	(((index & 0xFF) << 8) | (type & 0xFF))
+#define GET_INFO_INDEX(val)		((val & 0xFF00) >> 8)
+#define GET_CH_TYPE(val)		((val & 0x00FF))
+
+#define CHANNELS_OPENED			0
+#define OPEN_CHANNELS			1
+#define CHANNELS_CLOSED			0
+#define CLOSE_CHANNELS			1
+
+#define DIAG_MHI_STRING_SZ		11
+
+struct diag_mhi_info diag_mhi[NUM_MHI_DEV] = {
+	{
+		.id = MHI_1,
+		.dev_id = DIAGFWD_MDM,
+		.name = "MDM",
+		.enabled = 0,
+		.num_read = 0,
+		.mempool = POOL_TYPE_MDM,
+		.mempool_init = 0,
+		.mhi_wq = NULL,
+		.read_ch = {
+			.chan = MHI_CLIENT_DIAG_IN,
+			.type = TYPE_MHI_READ_CH,
+			.hdl = NULL,
+		},
+		.write_ch = {
+			.chan = MHI_CLIENT_DIAG_OUT,
+			.type = TYPE_MHI_WRITE_CH,
+			.hdl = NULL,
+		}
+	},
+	{
+		.id = MHI_DCI_1,
+		.dev_id = DIAGFWD_MDM_DCI,
+		.name = "MDM_DCI",
+		.enabled = 0,
+		.num_read = 0,
+		.mempool = POOL_TYPE_MDM_DCI,
+		.mempool_init = 0,
+		.mhi_wq = NULL,
+		.read_ch = {
+			.chan = MHI_CLIENT_DCI_IN,
+			.type = TYPE_MHI_READ_CH,
+			.hdl = NULL,
+		},
+		.write_ch = {
+			.chan = MHI_CLIENT_DCI_OUT,
+			.type = TYPE_MHI_WRITE_CH,
+			.hdl = NULL,
+		}
+	}
+};
+
+static int mhi_ch_open(struct diag_mhi_ch_t *ch)
+{
+	int err = 0;
+
+	if (!ch)
+		return -EINVAL;
+
+	if (atomic_read(&ch->opened)) {
+		pr_debug("diag: In %s, channel is already opened, id: %d\n",
+			 __func__, ch->type);
+		return 0;
+	}
+	err = mhi_open_channel(ch->hdl);
+	if (err) {
+		pr_err("diag: In %s, unable to open ch, type: %d, err: %d\n",
+		       __func__, ch->type, err);
+		return err;
+	}
+
+	atomic_set(&ch->opened, 1);
+	INIT_LIST_HEAD(&ch->buf_tbl);
+	return 0;
+}
+
+static int mhi_buf_tbl_add(struct diag_mhi_info *mhi_info, int type,
+			   void *buf, int len)
+{
+	unsigned long flags;
+	struct diag_mhi_buf_tbl_t *item;
+	struct diag_mhi_ch_t *ch = NULL;
+
+	if (!mhi_info || !buf || len < 0)
+		return -EINVAL;
+
+	switch (type) {
+	case TYPE_MHI_READ_CH:
+		ch = &mhi_info->read_ch;
+		break;
+	case TYPE_MHI_WRITE_CH:
+		ch = &mhi_info->write_ch;
+		break;
+	default:
+		pr_err_ratelimited("diag: In %s, invalid type: %d\n",
+				   __func__, type);
+		return -EINVAL;
+	}
+
+	item = kzalloc(sizeof(struct diag_mhi_buf_tbl_t), GFP_KERNEL);
+	if (!item)
+		return -ENOMEM;
+	kmemleak_not_leak(item);
+
+	spin_lock_irqsave(&ch->lock, flags);
+	item->buf = buf;
+	item->len = len;
+	list_add_tail(&item->link, &ch->buf_tbl);
+	spin_unlock_irqrestore(&ch->lock, flags);
+
+	return 0;
+}
+
+static void mhi_buf_tbl_remove(struct diag_mhi_info *mhi_info, int type,
+			       void *buf, int len)
+{
+	int found = 0;
+	unsigned long flags;
+	struct list_head *start, *temp;
+	struct diag_mhi_buf_tbl_t *item = NULL;
+	struct diag_mhi_ch_t *ch = NULL;
+
+	if (!mhi_info || !buf || len < 0)
+		return;
+
+	switch (type) {
+	case TYPE_MHI_READ_CH:
+		ch = &mhi_info->read_ch;
+		break;
+	case TYPE_MHI_WRITE_CH:
+		ch = &mhi_info->write_ch;
+		break;
+	default:
+		pr_err_ratelimited("diag: In %s, invalid type: %d\n",
+				   __func__, type);
+		return;
+	}
+
+	spin_lock_irqsave(&ch->lock, flags);
+	list_for_each_safe(start, temp, &ch->buf_tbl) {
+		item = list_entry(start, struct diag_mhi_buf_tbl_t, link);
+		if (item->buf != buf)
+			continue;
+		list_del(&item->link);
+		if (type == TYPE_MHI_READ_CH)
+			diagmem_free(driver, item->buf, mhi_info->mempool);
+		kfree(item);
+		found = 1;
+	}
+	spin_unlock_irqrestore(&ch->lock, flags);
+
+	if (!found) {
+		pr_err_ratelimited("diag: In %s, unable to find buffer, ch: %pK, type: %d, buf: %pK\n",
+				   __func__, ch, ch->type, buf);
+	}
+}
+
+static void mhi_buf_tbl_clear(struct diag_mhi_info *mhi_info)
+{
+	unsigned long flags;
+	struct list_head *start, *temp;
+	struct diag_mhi_buf_tbl_t *item = NULL;
+	struct diag_mhi_ch_t *ch = NULL;
+
+	if (!mhi_info || !mhi_info->enabled)
+		return;
+
+	/* Clear all the pending reads */
+	ch = &mhi_info->read_ch;
+	/* At this point, the channel should already by closed */
+	if (!(atomic_read(&ch->opened))) {
+		spin_lock_irqsave(&ch->lock, flags);
+		list_for_each_safe(start, temp, &ch->buf_tbl) {
+			item = list_entry(start, struct diag_mhi_buf_tbl_t,
+					  link);
+			list_del(&item->link);
+			diagmem_free(driver, item->buf, mhi_info->mempool);
+			kfree(item);
+
+		}
+		spin_unlock_irqrestore(&ch->lock, flags);
+	}
+
+	/* Clear all the pending writes */
+	ch = &mhi_info->write_ch;
+	/* At this point, the channel should already by closed */
+	if (!(atomic_read(&ch->opened))) {
+		spin_lock_irqsave(&ch->lock, flags);
+		list_for_each_safe(start, temp, &ch->buf_tbl) {
+			item = list_entry(start, struct diag_mhi_buf_tbl_t,
+					  link);
+			list_del(&item->link);
+			diag_remote_dev_write_done(mhi_info->dev_id, item->buf,
+						   item->len, mhi_info->id);
+			kfree(item);
+
+		}
+		spin_unlock_irqrestore(&ch->lock, flags);
+	}
+}
+
+static int __mhi_close(struct diag_mhi_info *mhi_info, int close_flag)
+{
+	if (!mhi_info)
+		return -EIO;
+
+	if (!mhi_info->enabled)
+		return -ENODEV;
+
+	if (close_flag == CLOSE_CHANNELS) {
+		atomic_set(&(mhi_info->read_ch.opened), 0);
+		atomic_set(&(mhi_info->write_ch.opened), 0);
+	}
+
+	if (!(atomic_read(&(mhi_info->read_ch.opened)))) {
+		flush_workqueue(mhi_info->mhi_wq);
+		mhi_close_channel(mhi_info->read_ch.hdl);
+	}
+
+	if (!(atomic_read(&(mhi_info->write_ch.opened)))) {
+		flush_workqueue(mhi_info->mhi_wq);
+		mhi_close_channel(mhi_info->write_ch.hdl);
+	}
+
+	mhi_buf_tbl_clear(mhi_info);
+	diag_remote_dev_close(mhi_info->dev_id);
+	return 0;
+}
+
+static int mhi_close(int id)
+{
+	if (id < 0 || id >= NUM_MHI_DEV) {
+		pr_err("diag: In %s, invalid index %d\n", __func__, id);
+		return -EINVAL;
+	}
+
+	if (!diag_mhi[id].enabled)
+		return -ENODEV;
+	/*
+	 * This function is called whenever the channel needs to be closed
+	 * explicitly by Diag. Close both the read and write channels (denoted
+	 * by CLOSE_CHANNELS flag)
+	 */
+	return __mhi_close(&diag_mhi[id], CLOSE_CHANNELS);
+}
+
+static void mhi_close_work_fn(struct work_struct *work)
+{
+	struct diag_mhi_info *mhi_info = container_of(work,
+						      struct diag_mhi_info,
+						      close_work);
+	/*
+	 * This is a part of work function which is queued after the channels
+	 * are explicitly closed. Do not close channels again (denoted by
+	 * CHANNELS_CLOSED flag)
+	 */
+	if (mhi_info)
+		__mhi_close(mhi_info, CHANNELS_CLOSED);
+}
+
+static int __mhi_open(struct diag_mhi_info *mhi_info, int open_flag)
+{
+	int err = 0;
+	unsigned long flags;
+
+	if (!mhi_info)
+		return -EIO;
+
+	if (open_flag == OPEN_CHANNELS) {
+		if (!atomic_read(&mhi_info->read_ch.opened)) {
+			err = mhi_ch_open(&mhi_info->read_ch);
+			if (err)
+				goto fail;
+			DIAG_LOG(DIAG_DEBUG_BRIDGE,
+				 "opened mhi read channel, port: %d\n",
+				 mhi_info->id);
+		}
+		if (!atomic_read(&mhi_info->write_ch.opened)) {
+			err = mhi_ch_open(&mhi_info->write_ch);
+			if (err)
+				goto fail;
+			DIAG_LOG(DIAG_DEBUG_BRIDGE,
+				 "opened mhi write channel, port: %d\n",
+				 mhi_info->id);
+		}
+	} else if (open_flag == CHANNELS_OPENED) {
+		if (!atomic_read(&(mhi_info->read_ch.opened)) ||
+		    !atomic_read(&(mhi_info->write_ch.opened))) {
+			return -ENODEV;
+		}
+	}
+
+	spin_lock_irqsave(&mhi_info->lock, flags);
+	mhi_info->enabled = 1;
+	spin_unlock_irqrestore(&mhi_info->lock, flags);
+	diag_remote_dev_open(mhi_info->dev_id);
+	queue_work(mhi_info->mhi_wq, &(mhi_info->read_work));
+	return 0;
+
+fail:
+	pr_err("diag: Failed to open mhi channlels, err: %d\n", err);
+	mhi_close(mhi_info->id);
+	return err;
+}
+
+static int mhi_open(int id)
+{
+	if (id < 0 || id >= NUM_MHI_DEV) {
+		pr_err("diag: In %s, invalid index %d\n", __func__, id);
+		return -EINVAL;
+	}
+
+	if (!diag_mhi[id].enabled)
+		return -ENODEV;
+	/*
+	 * This function is called whenever the channel needs to be opened
+	 * explicitly by Diag. Open both the read and write channels (denoted by
+	 * OPEN_CHANNELS flag)
+	 */
+	__mhi_open(&diag_mhi[id], OPEN_CHANNELS);
+	diag_remote_dev_open(diag_mhi[id].dev_id);
+	queue_work(diag_mhi[id].mhi_wq, &(diag_mhi[id].read_work));
+
+	return 0;
+}
+
+static void mhi_open_work_fn(struct work_struct *work)
+{
+	struct diag_mhi_info *mhi_info = container_of(work,
+						      struct diag_mhi_info,
+						      open_work);
+	/*
+	 * This is a part of work function which is queued after the channels
+	 * are explicitly opened. Do not open channels again (denoted by
+	 * CHANNELS_OPENED flag)
+	 */
+	if (mhi_info) {
+		diag_remote_dev_open(mhi_info->dev_id);
+		queue_work(mhi_info->mhi_wq, &(mhi_info->read_work));
+	}
+}
+
+static void mhi_read_done_work_fn(struct work_struct *work)
+{
+	unsigned char *buf = NULL;
+	struct mhi_result result;
+	int err = 0;
+	struct diag_mhi_info *mhi_info = container_of(work,
+						      struct diag_mhi_info,
+						      read_done_work);
+	if (!mhi_info)
+		return;
+
+	do {
+		if (!(atomic_read(&(mhi_info->read_ch.opened))))
+			break;
+		err = mhi_poll_inbound(mhi_info->read_ch.hdl, &result);
+		if (err) {
+			pr_debug("diag: In %s, err %d\n", __func__, err);
+			break;
+		}
+		buf = result.buf_addr;
+		if (!buf)
+			break;
+		DIAG_LOG(DIAG_DEBUG_BRIDGE,
+			 "read from mhi port %d buf %pK\n",
+			 mhi_info->id, buf);
+		/*
+		 * The read buffers can come after the MHI channels are closed.
+		 * If the channels are closed at the time of read, discard the
+		 * buffers here and do not forward them to the mux layer.
+		 */
+		if ((atomic_read(&(mhi_info->read_ch.opened)))) {
+			err = diag_remote_dev_read_done(mhi_info->dev_id, buf,
+						  result.bytes_xferd);
+			if (err)
+				mhi_buf_tbl_remove(mhi_info, TYPE_MHI_READ_CH,
+					buf, result.bytes_xferd);
+		} else {
+			mhi_buf_tbl_remove(mhi_info, TYPE_MHI_READ_CH, buf,
+					   result.bytes_xferd);
+		}
+	} while (buf);
+}
+
+static void mhi_read_work_fn(struct work_struct *work)
+{
+	int err = 0;
+	unsigned char *buf = NULL;
+	enum MHI_FLAGS mhi_flags = MHI_EOT;
+	struct diag_mhi_ch_t *read_ch = NULL;
+	unsigned long flags;
+	struct diag_mhi_info *mhi_info = container_of(work,
+						      struct diag_mhi_info,
+						      read_work);
+	if (!mhi_info)
+		return;
+
+	read_ch = &mhi_info->read_ch;
+	do {
+		if (!(atomic_read(&(read_ch->opened))))
+			break;
+
+		buf = diagmem_alloc(driver, DIAG_MDM_BUF_SIZE,
+				    mhi_info->mempool);
+		if (!buf)
+			break;
+
+		err = mhi_buf_tbl_add(mhi_info, TYPE_MHI_READ_CH, buf,
+				      DIAG_MDM_BUF_SIZE);
+		if (err)
+			goto fail;
+
+		DIAG_LOG(DIAG_DEBUG_BRIDGE,
+			 "queueing a read buf %pK, ch: %s\n",
+			 buf, mhi_info->name);
+		spin_lock_irqsave(&read_ch->lock, flags);
+		err = mhi_queue_xfer(read_ch->hdl, buf, DIAG_MDM_BUF_SIZE,
+				     mhi_flags);
+		spin_unlock_irqrestore(&read_ch->lock, flags);
+		if (err) {
+			pr_err_ratelimited("diag: Unable to read from MHI channel %s, err: %d\n",
+					   mhi_info->name, err);
+			goto fail;
+		}
+	} while (buf);
+
+	return;
+fail:
+	mhi_buf_tbl_remove(mhi_info, TYPE_MHI_READ_CH, buf, DIAG_MDM_BUF_SIZE);
+	queue_work(mhi_info->mhi_wq, &mhi_info->read_work);
+}
+
+static int mhi_queue_read(int id)
+{
+	if (id < 0 || id >= NUM_MHI_DEV) {
+		pr_err_ratelimited("diag: In %s, invalid index %d\n", __func__,
+				   id);
+		return -EINVAL;
+	}
+	queue_work(diag_mhi[id].mhi_wq, &(diag_mhi[id].read_work));
+	return 0;
+}
+
+static int mhi_write(int id, unsigned char *buf, int len, int ctxt)
+{
+	int err = 0;
+	enum MHI_FLAGS mhi_flags = MHI_EOT;
+	unsigned long flags;
+	struct diag_mhi_ch_t *ch = NULL;
+
+	if (id < 0 || id >= NUM_MHI_DEV) {
+		pr_err_ratelimited("diag: In %s, invalid index %d\n", __func__,
+				   id);
+		return -EINVAL;
+	}
+
+	if (!buf || len <= 0) {
+		pr_err("diag: In %s, ch %d, invalid buf %pK len %d\n",
+			__func__, id, buf, len);
+		return -EINVAL;
+	}
+
+	if (!diag_mhi[id].enabled) {
+		pr_err_ratelimited("diag: In %s, MHI channel %s is not enabled\n",
+				   __func__, diag_mhi[id].name);
+		return -EIO;
+	}
+
+	ch = &diag_mhi[id].write_ch;
+	if (!(atomic_read(&(ch->opened)))) {
+		pr_err_ratelimited("diag: In %s, MHI write channel %s is not open\n",
+				   __func__, diag_mhi[id].name);
+		return -EIO;
+	}
+
+	err = mhi_buf_tbl_add(&diag_mhi[id], TYPE_MHI_WRITE_CH, buf,
+			      len);
+	if (err)
+		goto fail;
+
+	spin_lock_irqsave(&ch->lock, flags);
+	err = mhi_queue_xfer(ch->hdl, buf, len, mhi_flags);
+	spin_unlock_irqrestore(&ch->lock, flags);
+	if (err) {
+		pr_err_ratelimited("diag: In %s, cannot write to MHI channel %pK, len %d, err: %d\n",
+				   __func__, diag_mhi[id].name, len, err);
+		mhi_buf_tbl_remove(&diag_mhi[id], TYPE_MHI_WRITE_CH, buf, len);
+		goto fail;
+	}
+
+	return 0;
+fail:
+	return err;
+}
+
+static int mhi_fwd_complete(int id, unsigned char *buf, int len, int ctxt)
+{
+	if (id < 0 || id >= NUM_MHI_DEV) {
+		pr_err_ratelimited("diag: In %s, invalid index %d\n", __func__,
+				   id);
+		return -EINVAL;
+	}
+
+	if (!buf)
+		return -EINVAL;
+
+	mhi_buf_tbl_remove(&diag_mhi[id], TYPE_MHI_READ_CH, buf, len);
+	queue_work(diag_mhi[id].mhi_wq, &(diag_mhi[id].read_work));
+	return 0;
+}
+
+static void mhi_notifier(struct mhi_cb_info *cb_info)
+{
+	int index;
+	int type;
+	int err = 0;
+	struct mhi_result *result = NULL;
+	struct diag_mhi_ch_t *ch = NULL;
+	void *buf = NULL;
+
+	if (!cb_info)
+		return;
+
+	result = cb_info->result;
+	if (!result) {
+		pr_err_ratelimited("diag: failed to obtain mhi result from callback\n");
+		return;
+	}
+
+	index = GET_INFO_INDEX((uintptr_t)cb_info->result->user_data);
+	if (index < 0 || index >= NUM_MHI_DEV) {
+		pr_err_ratelimited("diag: In %s, invalid MHI index %d\n",
+				   __func__, index);
+		return;
+	}
+
+	type = GET_CH_TYPE((uintptr_t)cb_info->result->user_data);
+	switch (type) {
+	case TYPE_MHI_READ_CH:
+		ch = &diag_mhi[index].read_ch;
+		break;
+	case TYPE_MHI_WRITE_CH:
+		ch = &diag_mhi[index].write_ch;
+		break;
+	default:
+		pr_err_ratelimited("diag: In %s, invalid channel type %d\n",
+				   __func__, type);
+		return;
+	}
+
+	switch (cb_info->cb_reason) {
+	case MHI_CB_MHI_ENABLED:
+		DIAG_LOG(DIAG_DEBUG_BRIDGE,
+			 "received mhi enabled notifiation port: %d ch: %d\n",
+			 index, ch->type);
+		err = mhi_ch_open(ch);
+		if (err)
+			break;
+		if (ch->type == TYPE_MHI_READ_CH) {
+			diag_mhi[index].num_read = mhi_get_free_desc(ch->hdl);
+			if (diag_mhi[index].num_read <= 0) {
+				pr_err("diag: In %s, invalid number of descriptors %d\n",
+				       __func__, diag_mhi[index].num_read);
+				break;
+			}
+		}
+		__mhi_open(&diag_mhi[index], CHANNELS_OPENED);
+		queue_work(diag_mhi[index].mhi_wq,
+			   &(diag_mhi[index].open_work));
+		break;
+	case MHI_CB_MHI_DISABLED:
+		DIAG_LOG(DIAG_DEBUG_BRIDGE,
+			 "received mhi disabled notifiation port: %d ch: %d\n",
+			 index, ch->type);
+		atomic_set(&(ch->opened), 0);
+		__mhi_close(&diag_mhi[index], CHANNELS_CLOSED);
+		break;
+	case MHI_CB_XFER:
+		/*
+		 * If the channel is a read channel, this is a read
+		 * complete notification - write complete if the channel is
+		 * a write channel.
+		 */
+		if (type == TYPE_MHI_READ_CH) {
+			if (!atomic_read(&(diag_mhi[index].read_ch.opened)))
+				break;
+
+			queue_work(diag_mhi[index].mhi_wq,
+				   &(diag_mhi[index].read_done_work));
+			break;
+		}
+		buf = result->buf_addr;
+		if (!buf) {
+			pr_err_ratelimited("diag: In %s, unable to de-serialize the data\n",
+					   __func__);
+			break;
+		}
+		mhi_buf_tbl_remove(&diag_mhi[index], TYPE_MHI_WRITE_CH, buf,
+				   result->bytes_xferd);
+		diag_remote_dev_write_done(diag_mhi[index].dev_id, buf,
+					   result->bytes_xferd,
+					   diag_mhi[index].id);
+		break;
+	default:
+		pr_err("diag: In %s, invalid cb reason 0x%x\n", __func__,
+		       cb_info->cb_reason);
+		break;
+	}
+}
+
+static struct diag_remote_dev_ops diag_mhi_fwd_ops = {
+	.open = mhi_open,
+	.close = mhi_close,
+	.queue_read = mhi_queue_read,
+	.write = mhi_write,
+	.fwd_complete = mhi_fwd_complete,
+};
+
+static int diag_mhi_register_ch(int id, struct diag_mhi_ch_t *ch)
+{
+	int ctxt = 0;
+
+	if (!ch)
+		return -EIO;
+	if (id < 0 || id >= NUM_MHI_DEV)
+		return -EINVAL;
+	spin_lock_init(&ch->lock);
+	atomic_set(&(ch->opened), 0);
+	ctxt = SET_CH_CTXT(id, ch->type);
+	ch->client_info.mhi_client_cb = mhi_notifier;
+	return mhi_register_channel(&ch->hdl, ch->chan, 0, &ch->client_info,
+				    (void *)(uintptr_t)ctxt);
+}
+
+int diag_mhi_init(void)
+{
+	int i;
+	int err = 0;
+	struct diag_mhi_info *mhi_info = NULL;
+	char wq_name[DIAG_MHI_NAME_SZ + DIAG_MHI_STRING_SZ];
+
+	for (i = 0; i < NUM_MHI_DEV; i++) {
+		mhi_info = &diag_mhi[i];
+		spin_lock_init(&mhi_info->lock);
+		INIT_WORK(&(mhi_info->read_work), mhi_read_work_fn);
+		INIT_WORK(&(mhi_info->read_done_work), mhi_read_done_work_fn);
+		INIT_WORK(&(mhi_info->open_work), mhi_open_work_fn);
+		INIT_WORK(&(mhi_info->close_work), mhi_close_work_fn);
+		strlcpy(wq_name, "diag_mhi_", DIAG_MHI_STRING_SZ);
+		strlcat(wq_name, mhi_info->name, sizeof(mhi_info->name));
+		diagmem_init(driver, mhi_info->mempool);
+		mhi_info->mempool_init = 1;
+		mhi_info->mhi_wq = create_singlethread_workqueue(wq_name);
+		if (!mhi_info->mhi_wq)
+			goto fail;
+		err = diagfwd_bridge_register(mhi_info->dev_id, mhi_info->id,
+					      &diag_mhi_fwd_ops);
+		if (err) {
+			pr_err("diag: Unable to register MHI channel %d with bridge, err: %d\n",
+			       i, err);
+			goto fail;
+		}
+		err = diag_mhi_register_ch(mhi_info->id, &mhi_info->read_ch);
+		if (err) {
+			pr_err("diag: Unable to register MHI read channel for %d, err: %d\n",
+			       i, err);
+			goto fail;
+		}
+		err = diag_mhi_register_ch(mhi_info->id, &mhi_info->write_ch);
+		if (err) {
+			pr_err("diag: Unable to register MHI write channel for %d, err: %d\n",
+			       i, err);
+			goto fail;
+		}
+		DIAG_LOG(DIAG_DEBUG_BRIDGE, "mhi port %d is initailzed\n", i);
+	}
+
+	return 0;
+fail:
+	diag_mhi_exit();
+	return -ENOMEM;
+}
+
+void diag_mhi_exit(void)
+{
+	int i;
+	struct diag_mhi_info *mhi_info = NULL;
+
+	for (i = 0; i < NUM_MHI_DEV; i++) {
+		mhi_info = &diag_mhi[i];
+		if (mhi_info->mhi_wq)
+			destroy_workqueue(mhi_info->mhi_wq);
+		mhi_close(mhi_info->id);
+		if (mhi_info->mempool_init)
+			diagmem_exit(driver, mhi_info->mempool);
+	}
+}
+
diff --git a/drivers/char/diag/diagfwd_mhi.h b/drivers/char/diag/diagfwd_mhi.h
new file mode 100644
index 0000000..a446697
--- /dev/null
+++ b/drivers/char/diag/diagfwd_mhi.h
@@ -0,0 +1,88 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAGFWD_MHI_H
+#define DIAGFWD_MHI_H
+
+#include "diagchar.h"
+#include <linux/msm_mhi.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/completion.h>
+#include <linux/fs.h>
+#include <linux/types.h>
+#include <linux/cdev.h>
+#include <linux/list.h>
+#include <linux/wait.h>
+#include <linux/pci.h>
+#include <linux/vmalloc.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/platform_device.h>
+#include <linux/tty.h>
+#include <linux/delay.h>
+#include <linux/ipc_logging.h>
+#include <linux/msm_mhi.h>
+
+#define MHI_1			0
+#define MHI_DCI_1		1
+#define NUM_MHI_DEV		2
+
+#define TYPE_MHI_READ_CH	0
+#define TYPE_MHI_WRITE_CH	1
+
+#define DIAG_MHI_NAME_SZ	24
+
+struct diag_mhi_buf_tbl_t {
+	struct list_head link;
+	unsigned char *buf;
+	int len;
+};
+
+struct diag_mhi_ch_t {
+	uint8_t type;
+	u32 channel;
+	enum MHI_CLIENT_CHANNEL chan;
+	atomic_t opened;
+	spinlock_t lock;
+	struct mhi_client_info_t client_info;
+	struct mhi_client_handle *hdl;
+	struct list_head buf_tbl;
+};
+
+struct diag_mhi_info {
+	int id;
+	int dev_id;
+	int mempool;
+	int mempool_init;
+	int num_read;
+	uint8_t enabled;
+	char name[DIAG_MHI_NAME_SZ];
+	struct work_struct read_work;
+	struct work_struct read_done_work;
+	struct work_struct open_work;
+	struct work_struct close_work;
+	struct workqueue_struct *mhi_wq;
+	wait_queue_head_t mhi_wait_q;
+	struct diag_mhi_ch_t read_ch;
+	struct diag_mhi_ch_t write_ch;
+	spinlock_t lock;
+};
+
+extern struct diag_mhi_info diag_mhi[NUM_MHI_DEV];
+
+int diag_mhi_init(void);
+void diag_mhi_exit(void);
+
+#endif
diff --git a/drivers/char/diag/diagfwd_peripheral.c b/drivers/char/diag/diagfwd_peripheral.c
new file mode 100644
index 0000000..4f7c1e0
--- /dev/null
+++ b/drivers/char/diag/diagfwd_peripheral.c
@@ -0,0 +1,1250 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/sched.h>
+#include <linux/ratelimit.h>
+#include <linux/workqueue.h>
+#include <linux/diagchar.h>
+#include <linux/of.h>
+#include <linux/kmemleak.h>
+#include <linux/delay.h>
+#include <linux/atomic.h>
+#include "diagchar.h"
+#include "diagchar_hdlc.h"
+#include "diagfwd_peripheral.h"
+#include "diagfwd_cntl.h"
+#include "diag_masks.h"
+#include "diag_dci.h"
+#include "diagfwd.h"
+#include "diagfwd_socket.h"
+#include "diag_mux.h"
+#include "diag_ipc_logging.h"
+#include "diagfwd_glink.h"
+
+struct data_header {
+	uint8_t control_char;
+	uint8_t version;
+	uint16_t length;
+};
+
+static struct diagfwd_info *early_init_info[NUM_TRANSPORT];
+
+static void diagfwd_queue_read(struct diagfwd_info *fwd_info);
+static void diagfwd_buffers_exit(struct diagfwd_info *fwd_info);
+static void diagfwd_cntl_open(struct diagfwd_info *fwd_info);
+static void diagfwd_cntl_close(struct diagfwd_info *fwd_info);
+static void diagfwd_dci_open(struct diagfwd_info *fwd_info);
+static void diagfwd_dci_close(struct diagfwd_info *fwd_info);
+static void diagfwd_data_read_done(struct diagfwd_info *fwd_info,
+				   unsigned char *buf, int len);
+static void diagfwd_cntl_read_done(struct diagfwd_info *fwd_info,
+				   unsigned char *buf, int len);
+static void diagfwd_dci_read_done(struct diagfwd_info *fwd_info,
+				  unsigned char *buf, int len);
+static void diagfwd_write_buffers_init(struct diagfwd_info *fwd_info);
+static void diagfwd_write_buffers_exit(struct diagfwd_info *fwd_info);
+struct diagfwd_info peripheral_info[NUM_TYPES][NUM_PERIPHERALS];
+
+static struct diag_channel_ops data_ch_ops = {
+	.open = NULL,
+	.close = NULL,
+	.read_done = diagfwd_data_read_done
+};
+
+static struct diag_channel_ops cntl_ch_ops = {
+	.open = diagfwd_cntl_open,
+	.close = diagfwd_cntl_close,
+	.read_done = diagfwd_cntl_read_done
+};
+
+static struct diag_channel_ops dci_ch_ops = {
+	.open = diagfwd_dci_open,
+	.close = diagfwd_dci_close,
+	.read_done = diagfwd_dci_read_done
+};
+
+static void diagfwd_cntl_open(struct diagfwd_info *fwd_info)
+{
+	if (!fwd_info)
+		return;
+	diag_cntl_channel_open(fwd_info);
+}
+
+static void diagfwd_cntl_close(struct diagfwd_info *fwd_info)
+{
+	if (!fwd_info)
+		return;
+	diag_cntl_channel_close(fwd_info);
+}
+
+static void diagfwd_dci_open(struct diagfwd_info *fwd_info)
+{
+	if (!fwd_info)
+		return;
+
+	diag_dci_notify_client(PERIPHERAL_MASK(fwd_info->peripheral),
+			       DIAG_STATUS_OPEN, DCI_LOCAL_PROC);
+}
+
+static void diagfwd_dci_close(struct diagfwd_info *fwd_info)
+{
+	if (!fwd_info)
+		return;
+
+	diag_dci_notify_client(PERIPHERAL_MASK(fwd_info->peripheral),
+			       DIAG_STATUS_CLOSED, DCI_LOCAL_PROC);
+}
+
+static int diag_add_hdlc_encoding(unsigned char *dest_buf, int *dest_len,
+				  unsigned char *buf, int len)
+{
+	struct diag_send_desc_type send = { NULL, NULL, DIAG_STATE_START, 0 };
+	struct diag_hdlc_dest_type enc = { NULL, NULL, 0 };
+	struct data_header *header;
+	int header_size = sizeof(struct data_header);
+	uint8_t *end_control_char = NULL;
+	uint8_t *payload = NULL;
+	uint8_t *temp_buf = NULL;
+	uint8_t *temp_encode_buf = NULL;
+	int src_pkt_len;
+	int encoded_pkt_length;
+	int max_size;
+	int total_processed = 0;
+	int bytes_remaining;
+	int err = 0;
+	uint8_t loop_count = 0;
+
+	if (!dest_buf || !dest_len || !buf)
+		return -EIO;
+
+	temp_buf = buf;
+	temp_encode_buf = dest_buf;
+	bytes_remaining = *dest_len;
+
+	while (total_processed < len) {
+		loop_count++;
+		header = (struct data_header *)temp_buf;
+		/* Perform initial error checking */
+		if (header->control_char != CONTROL_CHAR ||
+		    header->version != 1) {
+			err = -EINVAL;
+			break;
+		}
+
+		if (header->length >= bytes_remaining)
+			break;
+
+		payload = temp_buf + header_size;
+		end_control_char = payload + header->length;
+		if (*end_control_char != CONTROL_CHAR) {
+			err = -EINVAL;
+			break;
+		}
+
+		max_size = 2 * header->length + 3;
+		if (bytes_remaining < max_size) {
+			err = -EINVAL;
+			break;
+		}
+
+		/* Prepare for encoding the data */
+		send.state = DIAG_STATE_START;
+		send.pkt = payload;
+		send.last = (void *)(payload + header->length - 1);
+		send.terminate = 1;
+
+		enc.dest = temp_encode_buf;
+		enc.dest_last = (void *)(temp_encode_buf + max_size);
+		enc.crc = 0;
+		diag_hdlc_encode(&send, &enc);
+
+		/* Prepare for next packet */
+		src_pkt_len = (header_size + header->length + 1);
+		total_processed += src_pkt_len;
+		temp_buf += src_pkt_len;
+
+		encoded_pkt_length = (uint8_t *)enc.dest - temp_encode_buf;
+		bytes_remaining -= encoded_pkt_length;
+		temp_encode_buf = enc.dest;
+	}
+
+	*dest_len = (int)(temp_encode_buf - dest_buf);
+
+	return err;
+}
+
+static int check_bufsize_for_encoding(struct diagfwd_buf_t *buf, uint32_t len)
+{
+	uint32_t max_size = 0;
+	unsigned char *temp_buf = NULL;
+
+	if (!buf || len == 0)
+		return -EINVAL;
+
+	max_size = (2 * len) + 3;
+	if (max_size > PERIPHERAL_BUF_SZ) {
+		if (max_size > MAX_PERIPHERAL_HDLC_BUF_SZ) {
+			pr_err("diag: In %s, max_size is going beyond limit %d\n",
+			       __func__, max_size);
+			max_size = MAX_PERIPHERAL_HDLC_BUF_SZ;
+		}
+
+		if (buf->len < max_size) {
+			temp_buf = krealloc(buf->data, max_size +
+						APF_DIAG_PADDING,
+					    GFP_KERNEL);
+			if (!temp_buf)
+				return -ENOMEM;
+			buf->data = temp_buf;
+			buf->len = max_size;
+		}
+	}
+
+	return buf->len;
+}
+
+static void diagfwd_data_read_done(struct diagfwd_info *fwd_info,
+				   unsigned char *buf, int len)
+{
+	int err = 0;
+	int write_len = 0;
+	unsigned char *write_buf = NULL;
+	struct diagfwd_buf_t *temp_buf = NULL;
+	struct diag_md_session_t *session_info = NULL;
+	uint8_t hdlc_disabled = 0;
+
+	if (!fwd_info || !buf || len <= 0) {
+		diag_ws_release();
+		return;
+	}
+
+	switch (fwd_info->type) {
+	case TYPE_DATA:
+	case TYPE_CMD:
+		break;
+	default:
+		pr_err_ratelimited("diag: In %s, invalid type %d for peripheral %d\n",
+				   __func__, fwd_info->type,
+				   fwd_info->peripheral);
+		diag_ws_release();
+		return;
+	}
+
+	mutex_lock(&driver->hdlc_disable_mutex);
+	mutex_lock(&fwd_info->data_mutex);
+	session_info = diag_md_session_get_peripheral(fwd_info->peripheral);
+	if (session_info)
+		hdlc_disabled = session_info->hdlc_disabled;
+	else
+		hdlc_disabled = driver->hdlc_disabled;
+
+	if (!driver->feature[fwd_info->peripheral].encode_hdlc) {
+		if (fwd_info->buf_1 && fwd_info->buf_1->data == buf) {
+			temp_buf = fwd_info->buf_1;
+			write_buf = fwd_info->buf_1->data;
+		} else if (fwd_info->buf_2 && fwd_info->buf_2->data == buf) {
+			temp_buf = fwd_info->buf_2;
+			write_buf = fwd_info->buf_2->data;
+		} else {
+			pr_err("diag: In %s, no match for buffer %pK, peripheral %d, type: %d\n",
+			       __func__, buf, fwd_info->peripheral,
+			       fwd_info->type);
+			goto end;
+		}
+		write_len = len;
+	} else if (hdlc_disabled) {
+		/* The data is raw and and on APPS side HDLC is disabled */
+		if (fwd_info->buf_1 && fwd_info->buf_1->data_raw == buf) {
+			temp_buf = fwd_info->buf_1;
+		} else if (fwd_info->buf_2 &&
+			   fwd_info->buf_2->data_raw == buf) {
+			temp_buf = fwd_info->buf_2;
+		} else {
+			pr_err("diag: In %s, no match for non encode buffer %pK, peripheral %d, type: %d\n",
+			       __func__, buf, fwd_info->peripheral,
+			       fwd_info->type);
+			goto end;
+		}
+		if (len > PERIPHERAL_BUF_SZ) {
+			pr_err("diag: In %s, Incoming buffer too large %d, peripheral %d, type: %d\n",
+			       __func__, len, fwd_info->peripheral,
+			       fwd_info->type);
+			goto end;
+		}
+		write_len = len;
+		write_buf = buf;
+	} else {
+		if (fwd_info->buf_1 && fwd_info->buf_1->data_raw == buf) {
+			temp_buf = fwd_info->buf_1;
+		} else if (fwd_info->buf_2 &&
+			   fwd_info->buf_2->data_raw == buf) {
+			temp_buf = fwd_info->buf_2;
+		} else {
+			pr_err("diag: In %s, no match for non encode buffer %pK, peripheral %d, type: %d\n",
+				__func__, buf, fwd_info->peripheral,
+				fwd_info->type);
+			goto end;
+		}
+		write_len = check_bufsize_for_encoding(temp_buf, len);
+		if (write_len <= 0) {
+			pr_err("diag: error in checking buf for encoding\n");
+			goto end;
+		}
+		write_buf = temp_buf->data;
+		err = diag_add_hdlc_encoding(write_buf, &write_len, buf, len);
+		if (err) {
+			pr_err("diag: error in adding hdlc encoding\n");
+			goto end;
+		}
+	}
+
+	if (write_len > 0) {
+		err = diag_mux_write(DIAG_LOCAL_PROC, write_buf, write_len,
+				     temp_buf->ctxt);
+		if (err) {
+			pr_err_ratelimited("diag: In %s, unable to write to mux error: %d\n",
+					   __func__, err);
+			goto end;
+		}
+	}
+	mutex_unlock(&fwd_info->data_mutex);
+	mutex_unlock(&driver->hdlc_disable_mutex);
+	diagfwd_queue_read(fwd_info);
+	return;
+
+end:
+	diag_ws_release();
+	mutex_unlock(&fwd_info->data_mutex);
+	mutex_unlock(&driver->hdlc_disable_mutex);
+	if (temp_buf) {
+		diagfwd_write_done(fwd_info->peripheral, fwd_info->type,
+				   GET_BUF_NUM(temp_buf->ctxt));
+	}
+	diagfwd_queue_read(fwd_info);
+}
+
+static void diagfwd_cntl_read_done(struct diagfwd_info *fwd_info,
+				   unsigned char *buf, int len)
+{
+	if (!fwd_info) {
+		diag_ws_release();
+		return;
+	}
+
+	if (fwd_info->type != TYPE_CNTL) {
+		pr_err("diag: In %s, invalid type %d for peripheral %d\n",
+		       __func__, fwd_info->type, fwd_info->peripheral);
+		diag_ws_release();
+		return;
+	}
+
+	diag_ws_on_read(DIAG_WS_MUX, len);
+	diag_cntl_process_read_data(fwd_info, buf, len);
+	/*
+	 * Control packets are not consumed by the clients. Mimic
+	 * consumption by setting and clearing the wakeup source copy_count
+	 * explicitly.
+	 */
+	diag_ws_on_copy_fail(DIAG_WS_MUX);
+	/* Reset the buffer in_busy value after processing the data */
+	if (fwd_info->buf_1)
+		atomic_set(&fwd_info->buf_1->in_busy, 0);
+
+	diagfwd_queue_read(fwd_info);
+	diagfwd_queue_read(&peripheral_info[TYPE_DATA][fwd_info->peripheral]);
+	diagfwd_queue_read(&peripheral_info[TYPE_CMD][fwd_info->peripheral]);
+}
+
+static void diagfwd_dci_read_done(struct diagfwd_info *fwd_info,
+				  unsigned char *buf, int len)
+{
+	if (!fwd_info)
+		return;
+
+	switch (fwd_info->type) {
+	case TYPE_DCI:
+	case TYPE_DCI_CMD:
+		break;
+	default:
+		pr_err("diag: In %s, invalid type %d for peripheral %d\n",
+		       __func__, fwd_info->type, fwd_info->peripheral);
+		return;
+	}
+
+	diag_dci_process_peripheral_data(fwd_info, (void *)buf, len);
+	/* Reset the buffer in_busy value after processing the data */
+	if (fwd_info->buf_1)
+		atomic_set(&fwd_info->buf_1->in_busy, 0);
+
+	diagfwd_queue_read(fwd_info);
+}
+
+static void diagfwd_reset_buffers(struct diagfwd_info *fwd_info,
+				  unsigned char *buf)
+{
+	if (!fwd_info || !buf)
+		return;
+
+	if (!driver->feature[fwd_info->peripheral].encode_hdlc) {
+		if (fwd_info->buf_1 && fwd_info->buf_1->data == buf)
+			atomic_set(&fwd_info->buf_1->in_busy, 0);
+		else if (fwd_info->buf_2 && fwd_info->buf_2->data == buf)
+			atomic_set(&fwd_info->buf_2->in_busy, 0);
+	} else {
+		if (fwd_info->buf_1 && fwd_info->buf_1->data_raw == buf)
+			atomic_set(&fwd_info->buf_1->in_busy, 0);
+		else if (fwd_info->buf_2 && fwd_info->buf_2->data_raw == buf)
+			atomic_set(&fwd_info->buf_2->in_busy, 0);
+	}
+}
+
+int diagfwd_peripheral_init(void)
+{
+	uint8_t peripheral;
+	uint8_t transport;
+	uint8_t type;
+	struct diagfwd_info *fwd_info = NULL;
+
+	for (transport = 0; transport < NUM_TRANSPORT; transport++) {
+		early_init_info[transport] = kzalloc(
+				sizeof(struct diagfwd_info) * NUM_PERIPHERALS,
+				GFP_KERNEL);
+		if (!early_init_info[transport])
+			return -ENOMEM;
+		kmemleak_not_leak(early_init_info[transport]);
+	}
+
+	for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+		for (transport = 0; transport < NUM_TRANSPORT; transport++) {
+			fwd_info = &early_init_info[transport][peripheral];
+			fwd_info->peripheral = peripheral;
+			fwd_info->type = TYPE_CNTL;
+			fwd_info->transport = transport;
+			fwd_info->ctxt = NULL;
+			fwd_info->p_ops = NULL;
+			fwd_info->ch_open = 0;
+			fwd_info->inited = 1;
+			fwd_info->read_bytes = 0;
+			fwd_info->write_bytes = 0;
+			spin_lock_init(&fwd_info->buf_lock);
+			spin_lock_init(&fwd_info->write_buf_lock);
+			mutex_init(&fwd_info->data_mutex);
+		}
+	}
+
+	for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+		for (type = 0; type < NUM_TYPES; type++) {
+			fwd_info = &peripheral_info[type][peripheral];
+			fwd_info->peripheral = peripheral;
+			fwd_info->type = type;
+			fwd_info->ctxt = NULL;
+			fwd_info->p_ops = NULL;
+			fwd_info->ch_open = 0;
+			fwd_info->read_bytes = 0;
+			fwd_info->write_bytes = 0;
+			spin_lock_init(&fwd_info->buf_lock);
+			spin_lock_init(&fwd_info->write_buf_lock);
+			mutex_init(&fwd_info->data_mutex);
+			/*
+			 * This state shouldn't be set for Control channels
+			 * during initialization. This is set when the feature
+			 * mask is received for the first time.
+			 */
+			if (type != TYPE_CNTL)
+				fwd_info->inited = 1;
+		}
+		driver->diagfwd_data[peripheral] =
+			&peripheral_info[TYPE_DATA][peripheral];
+		driver->diagfwd_cntl[peripheral] =
+			&peripheral_info[TYPE_CNTL][peripheral];
+		driver->diagfwd_dci[peripheral] =
+			&peripheral_info[TYPE_DCI][peripheral];
+		driver->diagfwd_cmd[peripheral] =
+			&peripheral_info[TYPE_CMD][peripheral];
+		driver->diagfwd_dci_cmd[peripheral] =
+			&peripheral_info[TYPE_DCI_CMD][peripheral];
+	}
+
+	if (driver->supports_sockets)
+		diag_socket_init();
+	diag_glink_init();
+
+	return 0;
+}
+
+void diagfwd_peripheral_exit(void)
+{
+	uint8_t peripheral;
+	uint8_t type;
+	struct diagfwd_info *fwd_info = NULL;
+
+	diag_socket_exit();
+
+	for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+		for (type = 0; type < NUM_TYPES; type++) {
+			fwd_info = &peripheral_info[type][peripheral];
+			fwd_info->ctxt = NULL;
+			fwd_info->p_ops = NULL;
+			fwd_info->ch_open = 0;
+			diagfwd_buffers_exit(fwd_info);
+		}
+	}
+
+	for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+		driver->diagfwd_data[peripheral] = NULL;
+		driver->diagfwd_cntl[peripheral] = NULL;
+		driver->diagfwd_dci[peripheral] = NULL;
+		driver->diagfwd_cmd[peripheral] = NULL;
+		driver->diagfwd_dci_cmd[peripheral] = NULL;
+	}
+
+	kfree(early_init_info);
+}
+
+int diagfwd_cntl_register(uint8_t transport, uint8_t peripheral, void *ctxt,
+			  struct diag_peripheral_ops *ops,
+			  struct diagfwd_info **fwd_ctxt)
+{
+	struct diagfwd_info *fwd_info = NULL;
+
+	if (!ctxt || !ops)
+		return -EIO;
+
+	if (transport >= NUM_TRANSPORT || peripheral >= NUM_PERIPHERALS)
+		return -EINVAL;
+
+	fwd_info = &early_init_info[transport][peripheral];
+	*fwd_ctxt = &early_init_info[transport][peripheral];
+	fwd_info->ctxt = ctxt;
+	fwd_info->p_ops = ops;
+	fwd_info->c_ops = &cntl_ch_ops;
+
+	return 0;
+}
+
+int diagfwd_register(uint8_t transport, uint8_t peripheral, uint8_t type,
+		     void *ctxt, struct diag_peripheral_ops *ops,
+		     struct diagfwd_info **fwd_ctxt)
+{
+	struct diagfwd_info *fwd_info = NULL;
+
+	if (peripheral >= NUM_PERIPHERALS || type >= NUM_TYPES ||
+	    !ctxt || !ops || transport >= NUM_TRANSPORT) {
+		pr_err("diag: In %s, returning error\n", __func__);
+		return -EIO;
+	}
+
+	fwd_info = &peripheral_info[type][peripheral];
+	*fwd_ctxt = &peripheral_info[type][peripheral];
+	fwd_info->ctxt = ctxt;
+	fwd_info->p_ops = ops;
+	fwd_info->transport = transport;
+	fwd_info->ch_open = 0;
+
+	switch (type) {
+	case TYPE_DATA:
+	case TYPE_CMD:
+		fwd_info->c_ops = &data_ch_ops;
+		break;
+	case TYPE_DCI:
+	case TYPE_DCI_CMD:
+		fwd_info->c_ops = &dci_ch_ops;
+		break;
+	default:
+		pr_err("diag: In %s, invalid type: %d\n", __func__, type);
+		return -EINVAL;
+	}
+
+	if (atomic_read(&fwd_info->opened) &&
+	    fwd_info->p_ops && fwd_info->p_ops->open) {
+		/*
+		 * The registration can happen late, like in the case of
+		 * sockets. fwd_info->opened reflects diag_state. Propagate the
+		 * state to the peipherals.
+		 */
+		fwd_info->p_ops->open(fwd_info->ctxt);
+	}
+
+	return 0;
+}
+
+void diagfwd_deregister(uint8_t peripheral, uint8_t type, void *ctxt)
+{
+	struct diagfwd_info *fwd_info = NULL;
+
+	if (peripheral >= NUM_PERIPHERALS || type >= NUM_TYPES || !ctxt)
+		return;
+
+	fwd_info = &peripheral_info[type][peripheral];
+	if (fwd_info->ctxt != ctxt) {
+		pr_err("diag: In %s, unable to find a match for p: %d t: %d\n",
+		       __func__, peripheral, type);
+		return;
+	}
+	fwd_info->ctxt = NULL;
+	fwd_info->p_ops = NULL;
+	fwd_info->ch_open = 0;
+	diagfwd_buffers_exit(fwd_info);
+
+	switch (type) {
+	case TYPE_DATA:
+		driver->diagfwd_data[peripheral] = NULL;
+		break;
+	case TYPE_CNTL:
+		driver->diagfwd_cntl[peripheral] = NULL;
+		break;
+	case TYPE_DCI:
+		driver->diagfwd_dci[peripheral] = NULL;
+		break;
+	case TYPE_CMD:
+		driver->diagfwd_cmd[peripheral] = NULL;
+		break;
+	case TYPE_DCI_CMD:
+		driver->diagfwd_dci_cmd[peripheral] = NULL;
+		break;
+	}
+}
+
+void diagfwd_close_transport(uint8_t transport, uint8_t peripheral)
+{
+	struct diagfwd_info *fwd_info = NULL;
+	struct diagfwd_info *dest_info = NULL;
+	int (*init_fn)(uint8_t) = NULL;
+	void (*invalidate_fn)(void *, struct diagfwd_info *) = NULL;
+	int (*check_channel_state)(void *) = NULL;
+	uint8_t transport_open = 0;
+	int i = 0;
+
+	if (peripheral >= NUM_PERIPHERALS)
+		return;
+
+	switch (transport) {
+	case TRANSPORT_GLINK:
+		transport_open = TRANSPORT_SOCKET;
+		init_fn = diag_socket_init_peripheral;
+		invalidate_fn = diag_socket_invalidate;
+		check_channel_state = diag_socket_check_state;
+		break;
+	case TRANSPORT_SOCKET:
+		transport_open = TRANSPORT_GLINK;
+		init_fn = diag_glink_init_peripheral;
+		invalidate_fn = diag_glink_invalidate;
+		check_channel_state = diag_glink_check_state;
+		break;
+	default:
+		return;
+
+	}
+
+	mutex_lock(&driver->diagfwd_channel_mutex);
+	fwd_info = &early_init_info[transport][peripheral];
+	if (fwd_info->p_ops && fwd_info->p_ops->close)
+		fwd_info->p_ops->close(fwd_info->ctxt);
+	fwd_info = &early_init_info[transport_open][peripheral];
+	dest_info = &peripheral_info[TYPE_CNTL][peripheral];
+	dest_info->inited = 1;
+	dest_info->ctxt = fwd_info->ctxt;
+	dest_info->p_ops = fwd_info->p_ops;
+	dest_info->c_ops = fwd_info->c_ops;
+	dest_info->ch_open = fwd_info->ch_open;
+	dest_info->read_bytes = fwd_info->read_bytes;
+	dest_info->write_bytes = fwd_info->write_bytes;
+	dest_info->inited = fwd_info->inited;
+	dest_info->buf_1 = fwd_info->buf_1;
+	dest_info->buf_2 = fwd_info->buf_2;
+	dest_info->transport = fwd_info->transport;
+	invalidate_fn(dest_info->ctxt, dest_info);
+	for (i = 0; i < NUM_WRITE_BUFFERS; i++)
+		dest_info->buf_ptr[i] = fwd_info->buf_ptr[i];
+	if (!check_channel_state(dest_info->ctxt))
+		diagfwd_late_open(dest_info);
+	diagfwd_cntl_open(dest_info);
+	init_fn(peripheral);
+	mutex_unlock(&driver->diagfwd_channel_mutex);
+	diagfwd_queue_read(&peripheral_info[TYPE_DATA][peripheral]);
+	diagfwd_queue_read(&peripheral_info[TYPE_CMD][peripheral]);
+}
+
+void *diagfwd_request_write_buf(struct diagfwd_info *fwd_info)
+{
+	void *buf = NULL;
+	int index;
+	unsigned long flags;
+
+	spin_lock_irqsave(&fwd_info->write_buf_lock, flags);
+	for (index = 0 ; index < NUM_WRITE_BUFFERS; index++) {
+		if (!atomic_read(&(fwd_info->buf_ptr[index]->in_busy))) {
+			atomic_set(&(fwd_info->buf_ptr[index]->in_busy), 1);
+			buf = fwd_info->buf_ptr[index]->data;
+			if (!buf)
+				return NULL;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&fwd_info->write_buf_lock, flags);
+	return buf;
+}
+
+int diagfwd_write(uint8_t peripheral, uint8_t type, void *buf, int len)
+{
+	struct diagfwd_info *fwd_info = NULL;
+	int err = 0;
+	uint8_t retry_count = 0;
+	uint8_t max_retries = 3;
+	void *buf_ptr = NULL;
+
+	if (peripheral >= NUM_PERIPHERALS || type >= NUM_TYPES)
+		return -EINVAL;
+
+	if (type == TYPE_CMD || type == TYPE_DCI_CMD) {
+		if (!driver->feature[peripheral].rcvd_feature_mask ||
+			!driver->feature[peripheral].sent_feature_mask) {
+			pr_debug_ratelimited("diag: In %s, feature mask for peripheral: %d not received or sent yet\n",
+					     __func__, peripheral);
+			return 0;
+		}
+		if (!driver->feature[peripheral].separate_cmd_rsp)
+			type = (type == TYPE_CMD) ? TYPE_DATA : TYPE_DCI;
+	}
+
+	fwd_info = &peripheral_info[type][peripheral];
+	if (!fwd_info->inited || !atomic_read(&fwd_info->opened))
+		return -ENODEV;
+
+	if (!(fwd_info->p_ops && fwd_info->p_ops->write && fwd_info->ctxt))
+		return -EIO;
+
+	if (fwd_info->transport == TRANSPORT_GLINK) {
+		buf_ptr = diagfwd_request_write_buf(fwd_info);
+		if (buf_ptr)
+			memcpy(buf_ptr, buf, len);
+		else {
+			DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+				 "diag: buffer not found for writing\n");
+			return -EIO;
+		}
+	} else
+		buf_ptr = buf;
+
+	while (retry_count < max_retries) {
+		err = 0;
+		err = fwd_info->p_ops->write(fwd_info->ctxt, buf_ptr, len);
+		if (err && err != -ENODEV) {
+			usleep_range(100000, 101000);
+			retry_count++;
+			continue;
+		}
+		break;
+	}
+
+	if (!err)
+		fwd_info->write_bytes += len;
+	else
+		if (fwd_info->transport == TRANSPORT_GLINK)
+			diagfwd_write_buffer_done(fwd_info, buf_ptr);
+	return err;
+}
+
+static void __diag_fwd_open(struct diagfwd_info *fwd_info)
+{
+	if (!fwd_info)
+		return;
+
+	atomic_set(&fwd_info->opened, 1);
+	if (!fwd_info->inited)
+		return;
+
+	if (fwd_info->buf_1)
+		atomic_set(&fwd_info->buf_1->in_busy, 0);
+	if (fwd_info->buf_2)
+		atomic_set(&fwd_info->buf_2->in_busy, 0);
+
+	if (fwd_info->p_ops && fwd_info->p_ops->open)
+		fwd_info->p_ops->open(fwd_info->ctxt);
+
+	diagfwd_queue_read(fwd_info);
+}
+
+void diagfwd_early_open(uint8_t peripheral)
+{
+	uint8_t transport = 0;
+	struct diagfwd_info *fwd_info = NULL;
+
+	if (peripheral >= NUM_PERIPHERALS)
+		return;
+
+	for (transport = 0; transport < NUM_TRANSPORT; transport++) {
+		fwd_info = &early_init_info[transport][peripheral];
+		__diag_fwd_open(fwd_info);
+	}
+}
+
+void diagfwd_open(uint8_t peripheral, uint8_t type)
+{
+	struct diagfwd_info *fwd_info = NULL;
+
+	if (peripheral >= NUM_PERIPHERALS || type >= NUM_TYPES)
+		return;
+
+	fwd_info = &peripheral_info[type][peripheral];
+	__diag_fwd_open(fwd_info);
+}
+
+void diagfwd_late_open(struct diagfwd_info *fwd_info)
+{
+	__diag_fwd_open(fwd_info);
+}
+
+void diagfwd_close(uint8_t peripheral, uint8_t type)
+{
+	struct diagfwd_info *fwd_info = NULL;
+
+	if (peripheral >= NUM_PERIPHERALS || type >= NUM_TYPES)
+		return;
+
+	fwd_info = &peripheral_info[type][peripheral];
+	atomic_set(&fwd_info->opened, 0);
+	if (!fwd_info->inited)
+		return;
+
+	if (fwd_info->p_ops && fwd_info->p_ops->close)
+		fwd_info->p_ops->close(fwd_info->ctxt);
+
+	if (fwd_info->buf_1)
+		atomic_set(&fwd_info->buf_1->in_busy, 1);
+	/*
+	 * Only Data channels have two buffers. Set both the buffers
+	 * to busy on close.
+	 */
+	if (fwd_info->buf_2)
+		atomic_set(&fwd_info->buf_2->in_busy, 1);
+}
+
+int diagfwd_channel_open(struct diagfwd_info *fwd_info)
+{
+	int i;
+
+	if (!fwd_info)
+		return -EIO;
+
+	if (!fwd_info->inited) {
+		pr_debug("diag: In %s, channel is not inited, p: %d, t: %d\n",
+			 __func__, fwd_info->peripheral, fwd_info->type);
+		return -EINVAL;
+	}
+
+	if (fwd_info->ch_open) {
+		pr_debug("diag: In %s, channel is already open, p: %d, t: %d\n",
+			 __func__, fwd_info->peripheral, fwd_info->type);
+		return 0;
+	}
+
+	fwd_info->ch_open = 1;
+	diagfwd_buffers_init(fwd_info);
+	diagfwd_write_buffers_init(fwd_info);
+	if (fwd_info && fwd_info->c_ops && fwd_info->c_ops->open)
+		fwd_info->c_ops->open(fwd_info);
+	for (i = 0; i < NUM_WRITE_BUFFERS; i++) {
+		if (fwd_info->buf_ptr[i])
+			atomic_set(&fwd_info->buf_ptr[i]->in_busy, 0);
+	}
+	diagfwd_queue_read(fwd_info);
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "p: %d t: %d considered opened\n",
+		 fwd_info->peripheral, fwd_info->type);
+
+	if (atomic_read(&fwd_info->opened)) {
+		if (fwd_info->p_ops && fwd_info->p_ops->open)
+			fwd_info->p_ops->open(fwd_info->ctxt);
+	}
+
+	return 0;
+}
+
+int diagfwd_channel_close(struct diagfwd_info *fwd_info)
+{
+	int i;
+
+	if (!fwd_info)
+		return -EIO;
+
+	fwd_info->ch_open = 0;
+	if (fwd_info && fwd_info->c_ops && fwd_info->c_ops->close)
+		fwd_info->c_ops->close(fwd_info);
+
+	if (fwd_info->buf_1 && fwd_info->buf_1->data)
+		atomic_set(&fwd_info->buf_1->in_busy, 0);
+	if (fwd_info->buf_2 && fwd_info->buf_2->data)
+		atomic_set(&fwd_info->buf_2->in_busy, 0);
+
+	for (i = 0; i < NUM_WRITE_BUFFERS; i++) {
+		if (fwd_info->buf_ptr[i])
+			atomic_set(&fwd_info->buf_ptr[i]->in_busy, 1);
+	}
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "p: %d t: %d considered closed\n",
+		 fwd_info->peripheral, fwd_info->type);
+
+	return 0;
+}
+
+int diagfwd_channel_read_done(struct diagfwd_info *fwd_info,
+			      unsigned char *buf, uint32_t len)
+{
+	if (!fwd_info) {
+		diag_ws_release();
+		return -EIO;
+	}
+
+	/*
+	 * Diag peripheral layers should send len as 0 if there is any error
+	 * in reading data from the transport. Use this information to reset the
+	 * in_busy flags. No need to queue read in this case.
+	 */
+	if (len == 0) {
+		diagfwd_reset_buffers(fwd_info, buf);
+		diag_ws_release();
+		return 0;
+	}
+
+	if (fwd_info && fwd_info->c_ops && fwd_info->c_ops->read_done)
+		fwd_info->c_ops->read_done(fwd_info, buf, len);
+	fwd_info->read_bytes += len;
+
+	return 0;
+}
+
+void diagfwd_write_done(uint8_t peripheral, uint8_t type, int ctxt)
+{
+	struct diagfwd_info *fwd_info = NULL;
+
+	if (peripheral >= NUM_PERIPHERALS || type >= NUM_TYPES)
+		return;
+
+	fwd_info = &peripheral_info[type][peripheral];
+	if (ctxt == 1 && fwd_info->buf_1)
+		atomic_set(&fwd_info->buf_1->in_busy, 0);
+	else if (ctxt == 2 && fwd_info->buf_2)
+		atomic_set(&fwd_info->buf_2->in_busy, 0);
+	else
+		pr_err("diag: In %s, invalid ctxt %d\n", __func__, ctxt);
+
+	diagfwd_queue_read(fwd_info);
+}
+
+int diagfwd_write_buffer_done(struct diagfwd_info *fwd_info, const void *ptr)
+{
+
+	int found = 0;
+	int index = 0;
+	unsigned long flags;
+
+	if (!fwd_info || !ptr)
+		return found;
+	spin_lock_irqsave(&fwd_info->write_buf_lock, flags);
+	for (index = 0; index < NUM_WRITE_BUFFERS; index++) {
+		if (fwd_info->buf_ptr[index]->data == ptr) {
+			atomic_set(&fwd_info->buf_ptr[index]->in_busy, 0);
+			found = 1;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&fwd_info->write_buf_lock, flags);
+	return found;
+}
+
+void diagfwd_channel_read(struct diagfwd_info *fwd_info)
+{
+	int err = 0;
+	uint32_t read_len = 0;
+	unsigned char *read_buf = NULL;
+	struct diagfwd_buf_t *temp_buf = NULL;
+
+	if (!fwd_info) {
+		diag_ws_release();
+		return;
+	}
+
+	if (!fwd_info->inited || !atomic_read(&fwd_info->opened)) {
+		pr_debug("diag: In %s, p: %d, t: %d, inited: %d, opened: %d  ch_open: %d\n",
+			 __func__, fwd_info->peripheral, fwd_info->type,
+			 fwd_info->inited, atomic_read(&fwd_info->opened),
+			 fwd_info->ch_open);
+		diag_ws_release();
+		return;
+	}
+
+	if (fwd_info->buf_1 && !atomic_read(&fwd_info->buf_1->in_busy)) {
+		temp_buf = fwd_info->buf_1;
+		atomic_set(&temp_buf->in_busy, 1);
+		if (driver->feature[fwd_info->peripheral].encode_hdlc &&
+		    (fwd_info->type == TYPE_DATA ||
+		     fwd_info->type == TYPE_CMD)) {
+			read_buf = fwd_info->buf_1->data_raw;
+			read_len = fwd_info->buf_1->len_raw;
+		} else {
+			read_buf = fwd_info->buf_1->data;
+			read_len = fwd_info->buf_1->len;
+		}
+	} else if (fwd_info->buf_2 && !atomic_read(&fwd_info->buf_2->in_busy)) {
+		temp_buf = fwd_info->buf_2;
+		atomic_set(&temp_buf->in_busy, 1);
+		if (driver->feature[fwd_info->peripheral].encode_hdlc &&
+		    (fwd_info->type == TYPE_DATA ||
+		     fwd_info->type == TYPE_CMD)) {
+			read_buf = fwd_info->buf_2->data_raw;
+			read_len = fwd_info->buf_2->len_raw;
+		} else {
+			read_buf = fwd_info->buf_2->data;
+			read_len = fwd_info->buf_2->len;
+		}
+	} else {
+		pr_debug("diag: In %s, both buffers are empty for p: %d, t: %d\n",
+			 __func__, fwd_info->peripheral, fwd_info->type);
+	}
+
+	if (!read_buf) {
+		diag_ws_release();
+		return;
+	}
+
+	if (!(fwd_info->p_ops && fwd_info->p_ops->read && fwd_info->ctxt))
+		goto fail_return;
+
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "issued a read p: %d t: %d buf: %pK\n",
+		 fwd_info->peripheral, fwd_info->type, read_buf);
+	err = fwd_info->p_ops->read(fwd_info->ctxt, read_buf, read_len);
+	if (err)
+		goto fail_return;
+
+	return;
+
+fail_return:
+	diag_ws_release();
+	atomic_set(&temp_buf->in_busy, 0);
+}
+
+static void diagfwd_queue_read(struct diagfwd_info *fwd_info)
+{
+	if (!fwd_info)
+		return;
+
+	if (!fwd_info->inited || !atomic_read(&fwd_info->opened)) {
+		pr_debug("diag: In %s, p: %d, t: %d, inited: %d, opened: %d  ch_open: %d\n",
+			 __func__, fwd_info->peripheral, fwd_info->type,
+			 fwd_info->inited, atomic_read(&fwd_info->opened),
+			 fwd_info->ch_open);
+		return;
+	}
+
+	/*
+	 * Don't queue a read on the data and command channels before receiving
+	 * the feature mask from the peripheral. We won't know which buffer to
+	 * use - HDLC or non HDLC buffer for reading.
+	 */
+	if ((!driver->feature[fwd_info->peripheral].rcvd_feature_mask) &&
+	    (fwd_info->type != TYPE_CNTL)) {
+		return;
+	}
+
+	if (fwd_info->p_ops && fwd_info->p_ops->queue_read && fwd_info->ctxt)
+		fwd_info->p_ops->queue_read(fwd_info->ctxt);
+}
+
+void diagfwd_buffers_init(struct diagfwd_info *fwd_info)
+{
+	unsigned long flags;
+
+	if (!fwd_info)
+		return;
+
+	if (!fwd_info->inited) {
+		pr_err("diag: In %s, channel not inited, p: %d, t: %d\n",
+		       __func__, fwd_info->peripheral, fwd_info->type);
+		return;
+	}
+
+	spin_lock_irqsave(&fwd_info->buf_lock, flags);
+	if (!fwd_info->buf_1) {
+		fwd_info->buf_1 = kzalloc(sizeof(struct diagfwd_buf_t),
+					  GFP_ATOMIC);
+		if (!fwd_info->buf_1)
+			goto err;
+		kmemleak_not_leak(fwd_info->buf_1);
+	}
+	if (!fwd_info->buf_1->data) {
+		fwd_info->buf_1->data = kzalloc(PERIPHERAL_BUF_SZ +
+					APF_DIAG_PADDING,
+					GFP_ATOMIC);
+		if (!fwd_info->buf_1->data)
+			goto err;
+		fwd_info->buf_1->len = PERIPHERAL_BUF_SZ;
+		kmemleak_not_leak(fwd_info->buf_1->data);
+		fwd_info->buf_1->ctxt = SET_BUF_CTXT(fwd_info->peripheral,
+						     fwd_info->type, 1);
+	}
+
+	if (fwd_info->type == TYPE_DATA) {
+		if (!fwd_info->buf_2) {
+			fwd_info->buf_2 = kzalloc(sizeof(struct diagfwd_buf_t),
+					      GFP_ATOMIC);
+			if (!fwd_info->buf_2)
+				goto err;
+			kmemleak_not_leak(fwd_info->buf_2);
+		}
+
+		if (!fwd_info->buf_2->data) {
+			fwd_info->buf_2->data = kzalloc(PERIPHERAL_BUF_SZ +
+							APF_DIAG_PADDING,
+						    GFP_ATOMIC);
+			if (!fwd_info->buf_2->data)
+				goto err;
+			fwd_info->buf_2->len = PERIPHERAL_BUF_SZ;
+			kmemleak_not_leak(fwd_info->buf_2->data);
+			fwd_info->buf_2->ctxt = SET_BUF_CTXT(
+							fwd_info->peripheral,
+							fwd_info->type, 2);
+		}
+
+		if (driver->supports_apps_hdlc_encoding) {
+			/* In support of hdlc encoding */
+			if (!fwd_info->buf_1->data_raw) {
+				fwd_info->buf_1->data_raw =
+					kzalloc(PERIPHERAL_BUF_SZ +
+						APF_DIAG_PADDING,
+						GFP_ATOMIC);
+				if (!fwd_info->buf_1->data_raw)
+					goto err;
+				fwd_info->buf_1->len_raw = PERIPHERAL_BUF_SZ;
+				kmemleak_not_leak(fwd_info->buf_1->data_raw);
+			}
+			if (!fwd_info->buf_2->data_raw) {
+				fwd_info->buf_2->data_raw =
+					kzalloc(PERIPHERAL_BUF_SZ +
+						APF_DIAG_PADDING,
+						GFP_ATOMIC);
+				if (!fwd_info->buf_2->data_raw)
+					goto err;
+				fwd_info->buf_2->len_raw = PERIPHERAL_BUF_SZ;
+				kmemleak_not_leak(fwd_info->buf_2->data_raw);
+			}
+		}
+	}
+
+	if (fwd_info->type == TYPE_CMD && driver->supports_apps_hdlc_encoding) {
+		/* In support of hdlc encoding */
+		if (!fwd_info->buf_1->data_raw) {
+			fwd_info->buf_1->data_raw = kzalloc(PERIPHERAL_BUF_SZ +
+						APF_DIAG_PADDING,
+							GFP_ATOMIC);
+			if (!fwd_info->buf_1->data_raw)
+				goto err;
+			fwd_info->buf_1->len_raw = PERIPHERAL_BUF_SZ;
+			kmemleak_not_leak(fwd_info->buf_1->data_raw);
+		}
+	}
+
+	spin_unlock_irqrestore(&fwd_info->buf_lock, flags);
+	return;
+
+err:
+	spin_unlock_irqrestore(&fwd_info->buf_lock, flags);
+	diagfwd_buffers_exit(fwd_info);
+}
+
+static void diagfwd_buffers_exit(struct diagfwd_info *fwd_info)
+{
+	unsigned long flags;
+
+	if (!fwd_info)
+		return;
+
+	spin_lock_irqsave(&fwd_info->buf_lock, flags);
+	if (fwd_info->buf_1) {
+		kfree(fwd_info->buf_1->data);
+		fwd_info->buf_1->data = NULL;
+		kfree(fwd_info->buf_1->data_raw);
+		fwd_info->buf_1->data_raw = NULL;
+		kfree(fwd_info->buf_1);
+		fwd_info->buf_1 = NULL;
+	}
+	if (fwd_info->buf_2) {
+		kfree(fwd_info->buf_2->data);
+		fwd_info->buf_2->data = NULL;
+		kfree(fwd_info->buf_2->data_raw);
+		fwd_info->buf_2->data_raw = NULL;
+		kfree(fwd_info->buf_2);
+		fwd_info->buf_2 = NULL;
+	}
+	spin_unlock_irqrestore(&fwd_info->buf_lock, flags);
+}
+
+void diagfwd_write_buffers_init(struct diagfwd_info *fwd_info)
+{
+	unsigned long flags;
+	int i;
+
+	if (!fwd_info)
+		return;
+
+	if (!fwd_info->inited) {
+		pr_err("diag: In %s, channel not inited, p: %d, t: %d\n",
+		       __func__, fwd_info->peripheral, fwd_info->type);
+		return;
+	}
+
+	spin_lock_irqsave(&fwd_info->write_buf_lock, flags);
+	for (i = 0; i < NUM_WRITE_BUFFERS; i++) {
+		if (!fwd_info->buf_ptr[i])
+			fwd_info->buf_ptr[i] =
+					kzalloc(sizeof(struct diagfwd_buf_t),
+						GFP_ATOMIC);
+		if (!fwd_info->buf_ptr[i])
+			goto err;
+		kmemleak_not_leak(fwd_info->buf_ptr[i]);
+		if (!fwd_info->buf_ptr[i]->data) {
+			fwd_info->buf_ptr[i]->data = kzalloc(PERIPHERAL_BUF_SZ,
+								GFP_ATOMIC);
+			if (!fwd_info->buf_ptr[i]->data)
+				goto err;
+			fwd_info->buf_ptr[i]->len = PERIPHERAL_BUF_SZ;
+			kmemleak_not_leak(fwd_info->buf_ptr[i]->data);
+		}
+	}
+	spin_unlock_irqrestore(&fwd_info->write_buf_lock, flags);
+	return;
+
+err:
+	spin_unlock_irqrestore(&fwd_info->write_buf_lock, flags);
+	pr_err("diag:unable to allocate write buffers\n");
+	diagfwd_write_buffers_exit(fwd_info);
+
+}
+
+static void diagfwd_write_buffers_exit(struct diagfwd_info *fwd_info)
+{
+	unsigned long flags;
+	int i;
+
+	if (!fwd_info)
+		return;
+
+	spin_lock_irqsave(&fwd_info->write_buf_lock, flags);
+	for (i = 0; i < NUM_WRITE_BUFFERS; i++) {
+		if (fwd_info->buf_ptr[i]) {
+			kfree(fwd_info->buf_ptr[i]->data);
+			fwd_info->buf_ptr[i]->data = NULL;
+			kfree(fwd_info->buf_ptr[i]);
+			fwd_info->buf_ptr[i] = NULL;
+		}
+	}
+	spin_unlock_irqrestore(&fwd_info->write_buf_lock, flags);
+}
diff --git a/drivers/char/diag/diagfwd_peripheral.h b/drivers/char/diag/diagfwd_peripheral.h
new file mode 100644
index 0000000..ed4bd76
--- /dev/null
+++ b/drivers/char/diag/diagfwd_peripheral.h
@@ -0,0 +1,117 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAGFWD_PERIPHERAL_H
+#define DIAGFWD_PERIPHERAL_H
+
+#define PERIPHERAL_BUF_SZ		16384
+#define MAX_PERIPHERAL_BUF_SZ		32768
+#define MAX_PERIPHERAL_HDLC_BUF_SZ	65539
+
+#define TRANSPORT_UNKNOWN		-1
+#define TRANSPORT_SOCKET		0
+#define TRANSPORT_GLINK			1
+#define NUM_TRANSPORT			2
+#define NUM_WRITE_BUFFERS		2
+#define PERIPHERAL_MASK(x)					\
+	((x == PERIPHERAL_MODEM) ? DIAG_CON_MPSS :		\
+	((x == PERIPHERAL_LPASS) ? DIAG_CON_LPASS :		\
+	((x == PERIPHERAL_WCNSS) ? DIAG_CON_WCNSS :		\
+	((x == PERIPHERAL_SENSORS) ? DIAG_CON_SENSORS : \
+	((x == PERIPHERAL_WDSP) ? DIAG_CON_WDSP : 0)))))	\
+
+#define PERIPHERAL_STRING(x)					\
+	((x == PERIPHERAL_MODEM) ? "MODEM" :			\
+	((x == PERIPHERAL_LPASS) ? "LPASS" :			\
+	((x == PERIPHERAL_WCNSS) ? "WCNSS" :			\
+	((x == PERIPHERAL_SENSORS) ? "SENSORS" :		\
+	((x == PERIPHERAL_WDSP) ? "WDSP" : "UNKNOWN")))))	\
+
+struct diagfwd_buf_t {
+	unsigned char *data;
+	unsigned char *data_raw;
+	uint32_t len;
+	uint32_t len_raw;
+	atomic_t in_busy;
+	int ctxt;
+};
+
+struct diag_channel_ops {
+	void (*open)(struct diagfwd_info *fwd_info);
+	void (*close)(struct diagfwd_info *fwd_info);
+	void (*read_done)(struct diagfwd_info *fwd_info,
+			  unsigned char *buf, int len);
+};
+
+struct diag_peripheral_ops {
+	void (*open)(void *ctxt);
+	void (*close)(void *ctxt);
+	int (*write)(void *ctxt, unsigned char *buf, int len);
+	int (*read)(void *ctxt, unsigned char *buf, int len);
+	void (*queue_read)(void *ctxt);
+};
+
+struct diagfwd_info {
+	uint8_t peripheral;
+	uint8_t type;
+	uint8_t transport;
+	uint8_t inited;
+	uint8_t ch_open;
+	atomic_t opened;
+	unsigned long read_bytes;
+	unsigned long write_bytes;
+	spinlock_t buf_lock;
+	spinlock_t write_buf_lock;
+	struct mutex data_mutex;
+	void *ctxt;
+	struct diagfwd_buf_t *buf_1;
+	struct diagfwd_buf_t *buf_2;
+	struct diagfwd_buf_t *buf_ptr[NUM_WRITE_BUFFERS];
+	struct diag_peripheral_ops *p_ops;
+	struct diag_channel_ops *c_ops;
+};
+
+extern struct diagfwd_info peripheral_info[NUM_TYPES][NUM_PERIPHERALS];
+
+int diagfwd_peripheral_init(void);
+void diagfwd_peripheral_exit(void);
+
+void diagfwd_close_transport(uint8_t transport, uint8_t peripheral);
+
+void diagfwd_open(uint8_t peripheral, uint8_t type);
+void diagfwd_early_open(uint8_t peripheral);
+
+void diagfwd_late_open(struct diagfwd_info *fwd_info);
+void diagfwd_close(uint8_t peripheral, uint8_t type);
+int diagfwd_register(uint8_t transport, uint8_t peripheral, uint8_t type,
+		     void *ctxt, struct diag_peripheral_ops *ops,
+		     struct diagfwd_info **fwd_ctxt);
+int diagfwd_cntl_register(uint8_t transport, uint8_t peripheral, void *ctxt,
+			  struct diag_peripheral_ops *ops,
+			  struct diagfwd_info **fwd_ctxt);
+void diagfwd_deregister(uint8_t peripheral, uint8_t type, void *ctxt);
+
+int diagfwd_write(uint8_t peripheral, uint8_t type, void *buf, int len);
+void diagfwd_write_done(uint8_t peripheral, uint8_t type, int ctxt);
+void diagfwd_buffers_init(struct diagfwd_info *fwd_info);
+
+/*
+ * The following functions are called by the channels
+ */
+int diagfwd_channel_open(struct diagfwd_info *fwd_info);
+int diagfwd_channel_close(struct diagfwd_info *fwd_info);
+void diagfwd_channel_read(struct diagfwd_info *fwd_info);
+int diagfwd_channel_read_done(struct diagfwd_info *fwd_info,
+			      unsigned char *buf, uint32_t len);
+int diagfwd_write_buffer_done(struct diagfwd_info *fwd_info, const void *ptr);
+
+#endif
diff --git a/drivers/char/diag/diagfwd_smux.c b/drivers/char/diag/diagfwd_smux.c
new file mode 100644
index 0000000..33f91d1
--- /dev/null
+++ b/drivers/char/diag/diagfwd_smux.c
@@ -0,0 +1,331 @@
+/* Copyright (c) 2012, 2014, 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/termios.h>
+#include <linux/slab.h>
+#include <linux/diagchar.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/usb/usbdiag.h>
+
+#include "diagchar.h"
+#include "diagfwd_bridge.h"
+#include "diagfwd_smux.h"
+
+struct diag_smux_info diag_smux[NUM_SMUX_DEV] = {
+	{
+		.id = SMUX_1,
+		.lcid = SMUX_USB_DIAG_0,
+		.dev_id = DIAGFWD_SMUX,
+		.name = "SMUX_1",
+		.read_buf = NULL,
+		.read_len = 0,
+		.in_busy = 0,
+		.enabled = 0,
+		.opened = 0,
+	},
+};
+
+static void diag_smux_event(void *priv, int event_type, const void *metadata)
+{
+	int len = 0;
+	int id = (int)priv;
+	unsigned char *rx_buf = NULL;
+	struct diag_smux_info *ch = NULL;
+
+	if (id < 0 || id >= NUM_SMUX_DEV)
+		return;
+
+	ch = &diag_smux[id];
+	if (metadata) {
+		len = ((struct smux_meta_read *)metadata)->len;
+		rx_buf = ((struct smux_meta_read *)metadata)->buffer;
+	}
+
+	switch (event_type) {
+	case SMUX_CONNECTED:
+		pr_info("diag: SMUX_CONNECTED received, ch: %d\n", ch->id);
+		ch->opened = 1;
+		ch->in_busy = 0;
+		break;
+	case SMUX_DISCONNECTED:
+		ch->opened = 0;
+		msm_smux_close(ch->lcid);
+		pr_info("diag: SMUX_DISCONNECTED received, ch: %d\n", ch->id);
+		break;
+	case SMUX_WRITE_DONE:
+		pr_debug("diag: SMUX Write done, ch: %d\n", ch->id);
+		diag_remote_dev_write_done(ch->dev_id, rx_buf, len, ch->id);
+		break;
+	case SMUX_WRITE_FAIL:
+		pr_info("diag: SMUX Write Failed, ch: %d\n", ch->id);
+		break;
+	case SMUX_READ_FAIL:
+		pr_info("diag: SMUX Read Failed, ch: %d\n", ch->id);
+		break;
+	case SMUX_READ_DONE:
+		ch->read_buf = rx_buf;
+		ch->read_len = len;
+		ch->in_busy = 1;
+		diag_remote_dev_read_done(ch->dev_id, ch->read_buf,
+					  ch->read_len);
+		break;
+	};
+}
+
+static int diag_smux_init_ch(struct diag_smux_info *ch)
+{
+	if (!ch)
+		return -EINVAL;
+
+	if (!ch->enabled) {
+		pr_debug("diag: SMUX channel is not enabled id: %d\n", ch->id);
+		return -ENODEV;
+	}
+
+	if (ch->inited) {
+		pr_debug("diag: SMUX channel %d is already initialize\n",
+			 ch->id);
+		return 0;
+	}
+
+	ch->read_buf = kzalloc(IN_BUF_SIZE, GFP_KERNEL);
+	if (!ch->read_buf)
+		return -ENOMEM;
+
+	ch->inited = 1;
+
+	return 0;
+}
+
+static int smux_get_rx_buffer(void *priv, void **pkt_priv, void **buf,
+			      int size)
+{
+	int id = (int)priv;
+	struct diag_smux_info *ch = NULL;
+
+	if (id < 0 || id >= NUM_SMUX_DEV)
+		return -EINVAL;
+
+	ch = &diag_smux[id];
+
+	if (ch->in_busy) {
+		pr_debug("diag: read buffer for SMUX is BUSY\n");
+		return -EAGAIN;
+	}
+
+	*pkt_priv = (void *)0x1234;
+	*buf = ch->read_buf;
+	ch->in_busy = 1;
+	return 0;
+}
+
+static int smux_open(int id)
+{
+	int err = 0;
+	struct diag_smux_info *ch = NULL;
+
+	if (id < 0 || id >= NUM_SMUX_DEV)
+		return -EINVAL;
+
+	ch = &diag_smux[id];
+	if (ch->opened) {
+		pr_debug("diag: SMUX channel %d is already connected\n",
+			 ch->id);
+		return 0;
+	}
+
+	err = diag_smux_init_ch(ch);
+	if (err) {
+		pr_err("diag: Unable to initialize SMUX channel %d, err: %d\n",
+		       ch->id, err);
+		return err;
+	}
+
+	err = msm_smux_open(ch->lcid, (void *)ch->id, diag_smux_event,
+			    smux_get_rx_buffer);
+	if (err) {
+		pr_err("diag: failed to open SMUX ch %d, err: %d\n",
+		       ch->id, err);
+		return err;
+	}
+	msm_smux_tiocm_set(ch->lcid, TIOCM_DTR, 0);
+	ch->opened = 1;
+	pr_info("diag: SMUX ch %d is connected\n", ch->id);
+	return 0;
+}
+
+static int smux_close(int id)
+{
+	struct diag_smux_info *ch = NULL;
+
+	if (id < 0 || id >= NUM_SMUX_DEV)
+		return -EINVAL;
+
+	ch = &diag_smux[id];
+	if (!ch->enabled) {
+		pr_debug("diag: SMUX channel is not enabled id: %d\n", ch->id);
+		return -ENODEV;
+	}
+
+	msm_smux_close(ch->lcid);
+	ch->opened = 0;
+	ch->in_busy = 1;
+	kfree(ch->read_buf);
+	ch->read_buf = NULL;
+	return 0;
+}
+
+static int smux_queue_read(int id)
+{
+	return 0;
+}
+
+static int smux_write(int id, unsigned char *buf, int len, int ctxt)
+{
+	struct diag_smux_info *ch = NULL;
+
+	if (id < 0 || id >= NUM_SMUX_DEV)
+		return -EINVAL;
+
+	ch = &diag_smux[id];
+	return  msm_smux_write(ch->lcid, NULL, buf, len);
+}
+
+static int smux_fwd_complete(int id, unsigned char *buf, int len, int ctxt)
+{
+	if (id < 0 || id >= NUM_SMUX_DEV)
+		return -EINVAL;
+
+	diag_smux[id].in_busy = 0;
+	return 0;
+}
+
+static int diagfwd_smux_runtime_suspend(struct device *dev)
+{
+	dev_dbg(dev, "pm_runtime: suspending...\n");
+	return 0;
+}
+
+static int diagfwd_smux_runtime_resume(struct device *dev)
+{
+	dev_dbg(dev, "pm_runtime: resuming...\n");
+	return 0;
+}
+
+static const struct dev_pm_ops diagfwd_smux_dev_pm_ops = {
+	.runtime_suspend = diagfwd_smux_runtime_suspend,
+	.runtime_resume = diagfwd_smux_runtime_resume,
+};
+
+static int diagfwd_smux_probe(struct platform_device *pdev)
+{
+	if (!pdev)
+		return -EINVAL;
+
+	pr_debug("diag: SMUX probe called, pdev->id: %d\n", pdev->id);
+	if (pdev->id < 0 || pdev->id >= NUM_SMUX_DEV) {
+		pr_err("diag: No support for SMUX device %d\n", pdev->id);
+		return -EINVAL;
+	}
+
+	diag_smux[pdev->id].enabled = 1;
+	return smux_open(pdev->id);
+}
+
+static int diagfwd_smux_remove(struct platform_device *pdev)
+{
+	if (!pdev)
+		return -EINVAL;
+
+	pr_debug("diag: SMUX probe called, pdev->id: %d\n", pdev->id);
+	if (pdev->id < 0 || pdev->id >= NUM_SMUX_DEV) {
+		pr_err("diag: No support for SMUX device %d\n", pdev->id);
+		return -EINVAL;
+	}
+	if (!diag_smux[pdev->id].enabled) {
+		pr_err("diag: SMUX channel %d is not enabled\n",
+		       diag_smux[pdev->id].id);
+		return -ENODEV;
+	}
+	return smux_close(pdev->id);
+}
+
+static struct platform_driver msm_diagfwd_smux_driver = {
+	.probe = diagfwd_smux_probe,
+	.remove = diagfwd_smux_remove,
+	.driver = {
+		   .name = "SMUX_DIAG",
+		   .owner = THIS_MODULE,
+		   .pm   = &diagfwd_smux_dev_pm_ops,
+		   },
+};
+
+static struct diag_remote_dev_ops diag_smux_fwd_ops = {
+	.open = smux_open,
+	.close = smux_close,
+	.queue_read = smux_queue_read,
+	.write = smux_write,
+	.fwd_complete = smux_fwd_complete,
+};
+
+int diag_smux_init(void)
+{
+	int i;
+	int err = 0;
+	struct diag_smux_info *ch = NULL;
+	char wq_name[DIAG_SMUX_NAME_SZ + 11];
+
+	for (i = 0; i < NUM_SMUX_DEV; i++) {
+		ch = &diag_smux[i];
+		strlcpy(wq_name, "DIAG_SMUX_", 11);
+		strlcat(wq_name, ch->name, sizeof(ch->name));
+		ch->smux_wq = create_singlethread_workqueue(wq_name);
+		if (!ch->smux_wq) {
+			err = -ENOMEM;
+			goto fail;
+		}
+		err = diagfwd_bridge_register(ch->dev_id, ch->id,
+					      &diag_smux_fwd_ops);
+		if (err) {
+			pr_err("diag: Unable to register SMUX ch %d with bridge\n",
+			       ch->id);
+			goto fail;
+		}
+	}
+
+	err = platform_driver_register(&msm_diagfwd_smux_driver);
+	if (err) {
+		pr_err("diag: Unable to register SMUX device, err: %d\n", err);
+		goto fail;
+	}
+
+	return 0;
+fail:
+	diag_smux_exit();
+	return err;
+}
+
+void diag_smux_exit(void)
+{
+	int i;
+	struct diag_smux_info *ch = NULL;
+
+	for (i = 0; i < NUM_SMUX_DEV; i++) {
+		ch = &diag_smux[i];
+		kfree(ch->read_buf);
+		ch->read_buf = NULL;
+		ch->enabled = 0;
+		ch->opened = 0;
+		ch->read_len = 0;
+	}
+	platform_driver_unregister(&msm_diagfwd_smux_driver);
+}
diff --git a/drivers/char/diag/diagfwd_smux.h b/drivers/char/diag/diagfwd_smux.h
new file mode 100644
index 0000000..f2514a2
--- /dev/null
+++ b/drivers/char/diag/diagfwd_smux.h
@@ -0,0 +1,43 @@
+/* Copyright (c) 2012,2014 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAGFWD_SMUX_H
+#define DIAGFWD_SMUX_H
+
+#include <linux/smux.h>
+
+#define SMUX_1			0
+#define NUM_SMUX_DEV		1
+
+#define DIAG_SMUX_NAME_SZ	24
+
+struct diag_smux_info {
+	int id;
+	int lcid;
+	int dev_id;
+	char name[DIAG_SMUX_NAME_SZ];
+	unsigned char *read_buf;
+	int read_len;
+	int in_busy;
+	int enabled;
+	int inited;
+	int opened;
+	struct work_struct read_work;
+	struct workqueue_struct *smux_wq;
+};
+
+extern struct diag_smux_info diag_smux[NUM_SMUX_DEV];
+
+int diag_smux_init(void);
+void diag_smux_exit(void);
+
+#endif
diff --git a/drivers/char/diag/diagfwd_socket.c b/drivers/char/diag/diagfwd_socket.c
new file mode 100644
index 0000000..c82c918
--- /dev/null
+++ b/drivers/char/diag/diagfwd_socket.c
@@ -0,0 +1,1107 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/sched.h>
+#include <linux/ratelimit.h>
+#include <linux/workqueue.h>
+#include <linux/msm_ipc.h>
+#include <linux/socket.h>
+#include <linux/pm_runtime.h>
+#include <linux/delay.h>
+#include <linux/diagchar.h>
+#include <linux/of.h>
+#include <linux/kmemleak.h>
+#include <asm/current.h>
+#include <net/sock.h>
+#include <linux/ipc_router.h>
+#include <linux/notifier.h>
+#include "diagchar.h"
+#include "diagfwd.h"
+#include "diagfwd_peripheral.h"
+#include "diagfwd_socket.h"
+#include "diag_ipc_logging.h"
+
+#define DIAG_SVC_ID		0x1001
+
+#define MODEM_INST_BASE		0
+#define LPASS_INST_BASE		64
+#define WCNSS_INST_BASE		128
+#define SENSORS_INST_BASE	192
+#define WDSP_INST_BASE	256
+
+#define INST_ID_CNTL		0
+#define INST_ID_CMD		1
+#define INST_ID_DATA		2
+#define INST_ID_DCI_CMD		3
+#define INST_ID_DCI		4
+
+struct diag_cntl_socket_info *cntl_socket;
+
+struct diag_socket_info socket_data[NUM_PERIPHERALS] = {
+	{
+		.peripheral = PERIPHERAL_MODEM,
+		.type = TYPE_DATA,
+		.name = "MODEM_DATA"
+	},
+	{
+		.peripheral = PERIPHERAL_LPASS,
+		.type = TYPE_DATA,
+		.name = "LPASS_DATA"
+	},
+	{
+		.peripheral = PERIPHERAL_WCNSS,
+		.type = TYPE_DATA,
+		.name = "WCNSS_DATA"
+	},
+	{
+		.peripheral = PERIPHERAL_SENSORS,
+		.type = TYPE_DATA,
+		.name = "SENSORS_DATA"
+	},
+	{
+		.peripheral = PERIPHERAL_WDSP,
+		.type = TYPE_DATA,
+		.name = "DIAG_DATA"
+	}
+};
+
+struct diag_socket_info socket_cntl[NUM_PERIPHERALS] = {
+	{
+		.peripheral = PERIPHERAL_MODEM,
+		.type = TYPE_CNTL,
+		.name = "MODEM_CNTL"
+	},
+	{
+		.peripheral = PERIPHERAL_LPASS,
+		.type = TYPE_CNTL,
+		.name = "LPASS_CNTL"
+	},
+	{
+		.peripheral = PERIPHERAL_WCNSS,
+		.type = TYPE_CNTL,
+		.name = "WCNSS_CNTL"
+	},
+	{
+		.peripheral = PERIPHERAL_SENSORS,
+		.type = TYPE_CNTL,
+		.name = "SENSORS_CNTL"
+	},
+	{
+		.peripheral = PERIPHERAL_WDSP,
+		.type = TYPE_CNTL,
+		.name = "DIAG_CTRL"
+	}
+};
+
+struct diag_socket_info socket_dci[NUM_PERIPHERALS] = {
+	{
+		.peripheral = PERIPHERAL_MODEM,
+		.type = TYPE_DCI,
+		.name = "MODEM_DCI"
+	},
+	{
+		.peripheral = PERIPHERAL_LPASS,
+		.type = TYPE_DCI,
+		.name = "LPASS_DCI"
+	},
+	{
+		.peripheral = PERIPHERAL_WCNSS,
+		.type = TYPE_DCI,
+		.name = "WCNSS_DCI"
+	},
+	{
+		.peripheral = PERIPHERAL_SENSORS,
+		.type = TYPE_DCI,
+		.name = "SENSORS_DCI"
+	},
+	{
+		.peripheral = PERIPHERAL_WDSP,
+		.type = TYPE_DCI,
+		.name = "DIAG_DCI_DATA"
+	}
+};
+
+struct diag_socket_info socket_cmd[NUM_PERIPHERALS] = {
+	{
+		.peripheral = PERIPHERAL_MODEM,
+		.type = TYPE_CMD,
+		.name = "MODEM_CMD"
+	},
+	{
+		.peripheral = PERIPHERAL_LPASS,
+		.type = TYPE_CMD,
+		.name = "LPASS_CMD"
+	},
+	{
+		.peripheral = PERIPHERAL_WCNSS,
+		.type = TYPE_CMD,
+		.name = "WCNSS_CMD"
+	},
+	{
+		.peripheral = PERIPHERAL_SENSORS,
+		.type = TYPE_CMD,
+		.name = "SENSORS_CMD"
+	},
+	{
+		.peripheral = PERIPHERAL_WDSP,
+		.type = TYPE_CMD,
+		.name = "DIAG_CMD"
+	}
+
+};
+
+struct diag_socket_info socket_dci_cmd[NUM_PERIPHERALS] = {
+	{
+		.peripheral = PERIPHERAL_MODEM,
+		.type = TYPE_DCI_CMD,
+		.name = "MODEM_DCI_CMD"
+	},
+	{
+		.peripheral = PERIPHERAL_LPASS,
+		.type = TYPE_DCI_CMD,
+		.name = "LPASS_DCI_CMD"
+	},
+	{
+		.peripheral = PERIPHERAL_WCNSS,
+		.type = TYPE_DCI_CMD,
+		.name = "WCNSS_DCI_CMD"
+	},
+	{
+		.peripheral = PERIPHERAL_SENSORS,
+		.type = TYPE_DCI_CMD,
+		.name = "SENSORS_DCI_CMD"
+	},
+	{
+		.peripheral = PERIPHERAL_WDSP,
+		.type = TYPE_DCI_CMD,
+		.name = "DIAG_DCI_CMD"
+	}
+};
+
+static void diag_state_open_socket(void *ctxt);
+static void diag_state_close_socket(void *ctxt);
+static int diag_socket_write(void *ctxt, unsigned char *buf, int len);
+static int diag_socket_read(void *ctxt, unsigned char *buf, int buf_len);
+static void diag_socket_queue_read(void *ctxt);
+static void socket_init_work_fn(struct work_struct *work);
+static int socket_ready_notify(struct notifier_block *nb,
+			       unsigned long action, void *data);
+
+static struct diag_peripheral_ops socket_ops = {
+	.open = diag_state_open_socket,
+	.close = diag_state_close_socket,
+	.write = diag_socket_write,
+	.read = diag_socket_read,
+	.queue_read = diag_socket_queue_read
+};
+
+static struct notifier_block socket_notify = {
+	.notifier_call = socket_ready_notify,
+};
+
+static void diag_state_open_socket(void *ctxt)
+{
+	struct diag_socket_info *info = NULL;
+
+	if (!ctxt)
+		return;
+
+	info = (struct diag_socket_info *)(ctxt);
+	atomic_set(&info->diag_state, 1);
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		 "%s setting diag state to 1", info->name);
+}
+
+static void diag_state_close_socket(void *ctxt)
+{
+	struct diag_socket_info *info = NULL;
+
+	if (!ctxt)
+		return;
+
+	info = (struct diag_socket_info *)(ctxt);
+	atomic_set(&info->diag_state, 0);
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+		 "%s setting diag state to 0", info->name);
+	wake_up_interruptible(&info->read_wait_q);
+	flush_workqueue(info->wq);
+}
+
+static void socket_data_ready(struct sock *sk_ptr)
+{
+	unsigned long flags;
+	struct diag_socket_info *info = NULL;
+
+	if (!sk_ptr) {
+		pr_err_ratelimited("diag: In %s, invalid sk_ptr", __func__);
+		return;
+	}
+
+	info = (struct diag_socket_info *)(sk_ptr->sk_user_data);
+	if (!info) {
+		pr_err_ratelimited("diag: In %s, invalid info\n", __func__);
+		return;
+	}
+
+	spin_lock_irqsave(&info->lock, flags);
+	info->data_ready++;
+	spin_unlock_irqrestore(&info->lock, flags);
+	diag_ws_on_notify();
+
+	/*
+	 * Initialize read buffers for the servers. The servers must read data
+	 * first to get the address of its clients.
+	 */
+	if (!atomic_read(&info->opened) && info->port_type == PORT_TYPE_SERVER)
+		diagfwd_buffers_init(info->fwd_ctxt);
+
+	queue_work(info->wq, &(info->read_work));
+	wake_up_interruptible(&info->read_wait_q);
+}
+
+static void cntl_socket_data_ready(struct sock *sk_ptr)
+{
+	if (!sk_ptr || !cntl_socket) {
+		pr_err_ratelimited("diag: In %s, invalid ptrs. sk_ptr: %pK cntl_socket: %pK\n",
+				   __func__, sk_ptr, cntl_socket);
+		return;
+	}
+
+	atomic_inc(&cntl_socket->data_ready);
+	wake_up_interruptible(&cntl_socket->read_wait_q);
+	queue_work(cntl_socket->wq, &(cntl_socket->read_work));
+}
+
+static void socket_flow_cntl(struct sock *sk_ptr)
+{
+	struct diag_socket_info *info = NULL;
+
+	if (!sk_ptr)
+		return;
+
+	info = (struct diag_socket_info *)(sk_ptr->sk_user_data);
+	if (!info) {
+		pr_err_ratelimited("diag: In %s, invalid info\n", __func__);
+		return;
+	}
+
+	atomic_inc(&info->flow_cnt);
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s flow controlled\n", info->name);
+	pr_debug("diag: In %s, channel %s flow controlled\n",
+		 __func__, info->name);
+}
+
+static int lookup_server(struct diag_socket_info *info)
+{
+	int ret = 0;
+	struct server_lookup_args *args = NULL;
+	struct sockaddr_msm_ipc *srv_addr = NULL;
+
+	if (!info)
+		return -EINVAL;
+
+	args = kzalloc((sizeof(struct server_lookup_args) +
+			sizeof(struct msm_ipc_server_info)), GFP_KERNEL);
+	if (!args)
+		return -ENOMEM;
+	kmemleak_not_leak(args);
+
+	args->lookup_mask = 0xFFFFFFFF;
+	args->port_name.service = info->svc_id;
+	args->port_name.instance = info->ins_id;
+	args->num_entries_in_array = 1;
+	args->num_entries_found = 0;
+
+	ret = kernel_sock_ioctl(info->hdl, IPC_ROUTER_IOCTL_LOOKUP_SERVER,
+				(unsigned long)args);
+	if (ret < 0) {
+		pr_err("diag: In %s, cannot find service for %s\n", __func__,
+		       info->name);
+		kfree(args);
+		return -EFAULT;
+	}
+
+	srv_addr = &info->remote_addr;
+	srv_addr->family = AF_MSM_IPC;
+	srv_addr->address.addrtype = MSM_IPC_ADDR_ID;
+	srv_addr->address.addr.port_addr.node_id = args->srv_info[0].node_id;
+	srv_addr->address.addr.port_addr.port_id = args->srv_info[0].port_id;
+	ret = args->num_entries_found;
+	kfree(args);
+	if (ret < 1)
+		return -EIO;
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s found server node: %d port: %d",
+		 info->name, srv_addr->address.addr.port_addr.node_id,
+		 srv_addr->address.addr.port_addr.port_id);
+	return 0;
+}
+
+static void __socket_open_channel(struct diag_socket_info *info)
+{
+	if (!info)
+		return;
+
+	if (!info->inited) {
+		pr_debug("diag: In %s, socket %s is not initialized\n",
+			 __func__, info->name);
+		return;
+	}
+
+	if (atomic_read(&info->opened)) {
+		pr_debug("diag: In %s, socket %s already opened\n",
+			 __func__, info->name);
+		return;
+	}
+
+	atomic_set(&info->opened, 1);
+	diagfwd_channel_open(info->fwd_ctxt);
+}
+
+static void socket_open_client(struct diag_socket_info *info)
+{
+	int ret = 0;
+
+	if (!info || info->port_type != PORT_TYPE_CLIENT)
+		return;
+
+	ret = sock_create(AF_MSM_IPC, SOCK_DGRAM, 0, &info->hdl);
+	if (ret < 0 || !info->hdl) {
+		pr_err("diag: In %s, socket not initialized for %s\n", __func__,
+		       info->name);
+		return;
+	}
+
+	write_lock_bh(&info->hdl->sk->sk_callback_lock);
+	info->hdl->sk->sk_user_data = (void *)(info);
+	info->hdl->sk->sk_data_ready = socket_data_ready;
+	info->hdl->sk->sk_write_space = socket_flow_cntl;
+	write_unlock_bh(&info->hdl->sk->sk_callback_lock);
+	ret = lookup_server(info);
+	if (ret) {
+		pr_err("diag: In %s, failed to lookup server, ret: %d\n",
+		       __func__, ret);
+		return;
+	}
+	__socket_open_channel(info);
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s exiting\n", info->name);
+}
+
+static void socket_open_server(struct diag_socket_info *info)
+{
+	int ret = 0;
+	struct sockaddr_msm_ipc srv_addr = { 0 };
+
+	if (!info)
+		return;
+
+	ret = sock_create(AF_MSM_IPC, SOCK_DGRAM, 0, &info->hdl);
+	if (ret < 0 || !info->hdl) {
+		pr_err("diag: In %s, socket not initialized for %s\n", __func__,
+		       info->name);
+		return;
+	}
+
+	write_lock_bh(&info->hdl->sk->sk_callback_lock);
+	info->hdl->sk->sk_user_data = (void *)(info);
+	info->hdl->sk->sk_data_ready = socket_data_ready;
+	info->hdl->sk->sk_write_space = socket_flow_cntl;
+	write_unlock_bh(&info->hdl->sk->sk_callback_lock);
+
+	srv_addr.family = AF_MSM_IPC;
+	srv_addr.address.addrtype = MSM_IPC_ADDR_NAME;
+	srv_addr.address.addr.port_name.service = info->svc_id;
+	srv_addr.address.addr.port_name.instance = info->ins_id;
+
+	ret = kernel_bind(info->hdl, (struct sockaddr *)&srv_addr,
+			  sizeof(srv_addr));
+	if (ret) {
+		pr_err("diag: In %s, failed to bind, ch: %s, svc_id: %d ins_id: %d, err: %d\n",
+		       __func__, info->name, info->svc_id, info->ins_id, ret);
+		return;
+	}
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s opened server svc: %d ins: %d",
+		 info->name, info->svc_id, info->ins_id);
+}
+
+static void socket_init_work_fn(struct work_struct *work)
+{
+	struct diag_socket_info *info = container_of(work,
+						     struct diag_socket_info,
+						     init_work);
+	if (!info)
+		return;
+
+	if (!info->inited) {
+		pr_debug("diag: In %s, socket %s is not initialized\n",
+			 __func__, info->name);
+		return;
+	}
+
+	switch (info->port_type) {
+	case PORT_TYPE_SERVER:
+		socket_open_server(info);
+		break;
+	case PORT_TYPE_CLIENT:
+		socket_open_client(info);
+		break;
+	default:
+		pr_err("diag: In %s, unknown type %d\n", __func__,
+		       info->port_type);
+		break;
+	}
+}
+
+static void __socket_close_channel(struct diag_socket_info *info)
+{
+	if (!info || !info->hdl)
+		return;
+
+	if (!atomic_read(&info->opened))
+		return;
+
+	memset(&info->remote_addr, 0, sizeof(struct sockaddr_msm_ipc));
+	diagfwd_channel_close(info->fwd_ctxt);
+
+	atomic_set(&info->opened, 0);
+
+	/* Don't close the server. Server should always remain open */
+	if (info->port_type != PORT_TYPE_SERVER) {
+		write_lock_bh(&info->hdl->sk->sk_callback_lock);
+		info->hdl->sk->sk_user_data = NULL;
+		info->hdl->sk->sk_data_ready = NULL;
+		write_unlock_bh(&info->hdl->sk->sk_callback_lock);
+		sock_release(info->hdl);
+		info->hdl = NULL;
+		wake_up_interruptible(&info->read_wait_q);
+	}
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s exiting\n", info->name);
+}
+
+static void socket_close_channel(struct diag_socket_info *info)
+{
+	if (!info)
+		return;
+
+	__socket_close_channel(info);
+
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s exiting\n", info->name);
+}
+
+static int cntl_socket_process_msg_server(uint32_t cmd, uint32_t svc_id,
+					  uint32_t ins_id)
+{
+	uint8_t peripheral;
+	uint8_t found = 0;
+	struct diag_socket_info *info = NULL;
+
+	for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+		info = &socket_cmd[peripheral];
+		if ((svc_id == info->svc_id) &&
+		    (ins_id == info->ins_id)) {
+			found = 1;
+			break;
+		}
+
+		info = &socket_dci_cmd[peripheral];
+		if ((svc_id == info->svc_id) &&
+		    (ins_id == info->ins_id)) {
+			found = 1;
+			break;
+		}
+	}
+
+	if (!found)
+		return -EIO;
+
+	switch (cmd) {
+	case CNTL_CMD_NEW_SERVER:
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s received new server\n",
+			 info->name);
+		diagfwd_register(TRANSPORT_SOCKET, info->peripheral,
+				 info->type, (void *)info, &socket_ops,
+				 &info->fwd_ctxt);
+		queue_work(info->wq, &(info->init_work));
+		break;
+	case CNTL_CMD_REMOVE_SERVER:
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s received remove server\n",
+			 info->name);
+		socket_close_channel(info);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int cntl_socket_process_msg_client(uint32_t cmd, uint32_t node_id,
+					  uint32_t port_id)
+{
+	uint8_t peripheral;
+	uint8_t found = 0;
+	struct diag_socket_info *info = NULL;
+	struct msm_ipc_port_addr remote_port = {0};
+
+	for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+		info = &socket_data[peripheral];
+		remote_port = info->remote_addr.address.addr.port_addr;
+		if ((remote_port.node_id == node_id) &&
+		    (remote_port.port_id == port_id)) {
+			found = 1;
+			break;
+		}
+
+		info = &socket_cntl[peripheral];
+		remote_port = info->remote_addr.address.addr.port_addr;
+		if ((remote_port.node_id == node_id) &&
+		    (remote_port.port_id == port_id)) {
+			found = 1;
+			break;
+		}
+
+		info = &socket_dci[peripheral];
+		remote_port = info->remote_addr.address.addr.port_addr;
+		if ((remote_port.node_id == node_id) &&
+		    (remote_port.port_id == port_id)) {
+			found = 1;
+			break;
+		}
+	}
+
+	if (!found)
+		return -EIO;
+
+	switch (cmd) {
+	case CNTL_CMD_REMOVE_CLIENT:
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s received remove client\n",
+			 info->name);
+		socket_close_channel(info);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void cntl_socket_read_work_fn(struct work_struct *work)
+{
+	union cntl_port_msg msg;
+	int ret = 0;
+	struct kvec iov = { 0 };
+	struct msghdr read_msg = { 0 };
+
+
+	if (!cntl_socket)
+		return;
+
+	ret = wait_event_interruptible(cntl_socket->read_wait_q,
+				(atomic_read(&cntl_socket->data_ready) > 0));
+	if (ret)
+		return;
+
+	do {
+		iov.iov_base = &msg;
+		iov.iov_len = sizeof(msg);
+		read_msg.msg_name = NULL;
+		read_msg.msg_namelen = 0;
+		ret = kernel_recvmsg(cntl_socket->hdl, &read_msg, &iov, 1,
+				     sizeof(msg), MSG_DONTWAIT);
+		if (ret < 0) {
+			pr_debug("diag: In %s, Error recving data %d\n",
+				 __func__, ret);
+			break;
+		}
+
+		atomic_dec(&cntl_socket->data_ready);
+
+		switch (msg.srv.cmd) {
+		case CNTL_CMD_NEW_SERVER:
+		case CNTL_CMD_REMOVE_SERVER:
+			cntl_socket_process_msg_server(msg.srv.cmd,
+						       msg.srv.service,
+						       msg.srv.instance);
+			break;
+		case CNTL_CMD_REMOVE_CLIENT:
+			cntl_socket_process_msg_client(msg.cli.cmd,
+						       msg.cli.node_id,
+						       msg.cli.port_id);
+			break;
+		}
+	} while (atomic_read(&cntl_socket->data_ready) > 0);
+}
+
+static void socket_read_work_fn(struct work_struct *work)
+{
+	struct diag_socket_info *info = container_of(work,
+						     struct diag_socket_info,
+						     read_work);
+
+	if (!info)
+		return;
+
+	diagfwd_channel_read(info->fwd_ctxt);
+}
+
+static void diag_socket_queue_read(void *ctxt)
+{
+	struct diag_socket_info *info = NULL;
+
+	if (!ctxt)
+		return;
+
+	info = (struct diag_socket_info *)ctxt;
+	if (info->hdl && info->wq)
+		queue_work(info->wq, &(info->read_work));
+}
+
+void diag_socket_invalidate(void *ctxt, struct diagfwd_info *fwd_ctxt)
+{
+	struct diag_socket_info *info = NULL;
+
+	if (!ctxt || !fwd_ctxt)
+		return;
+
+	info = (struct diag_socket_info *)ctxt;
+	info->fwd_ctxt = fwd_ctxt;
+}
+
+int diag_socket_check_state(void *ctxt)
+{
+	struct diag_socket_info *info = NULL;
+
+	if (!ctxt)
+		return 0;
+
+	info = (struct diag_socket_info *)ctxt;
+	return (int)(atomic_read(&info->diag_state));
+}
+
+static void __diag_socket_init(struct diag_socket_info *info)
+{
+	uint16_t ins_base = 0;
+	uint16_t ins_offset = 0;
+
+	char wq_name[DIAG_SOCKET_NAME_SZ + 10];
+
+	if (!info)
+		return;
+
+	init_waitqueue_head(&info->wait_q);
+	info->inited = 0;
+	atomic_set(&info->opened, 0);
+	atomic_set(&info->diag_state, 0);
+	info->pkt_len = 0;
+	info->pkt_read = 0;
+	info->hdl = NULL;
+	info->fwd_ctxt = NULL;
+	info->data_ready = 0;
+	atomic_set(&info->flow_cnt, 0);
+	spin_lock_init(&info->lock);
+	strlcpy(wq_name, "DIAG_SOCKET_", 10);
+	strlcat(wq_name, info->name, sizeof(info->name));
+	init_waitqueue_head(&info->read_wait_q);
+	info->wq = create_singlethread_workqueue(wq_name);
+	if (!info->wq) {
+		pr_err("diag: In %s, unable to create workqueue for socket channel %s\n",
+		       __func__, info->name);
+		return;
+	}
+	INIT_WORK(&(info->init_work), socket_init_work_fn);
+	INIT_WORK(&(info->read_work), socket_read_work_fn);
+
+	switch (info->peripheral) {
+	case PERIPHERAL_MODEM:
+		ins_base = MODEM_INST_BASE;
+		break;
+	case PERIPHERAL_LPASS:
+		ins_base = LPASS_INST_BASE;
+		break;
+	case PERIPHERAL_WCNSS:
+		ins_base = WCNSS_INST_BASE;
+		break;
+	case PERIPHERAL_SENSORS:
+		ins_base = SENSORS_INST_BASE;
+		break;
+	case PERIPHERAL_WDSP:
+		ins_base = WDSP_INST_BASE;
+		break;
+	}
+
+	switch (info->type) {
+	case TYPE_DATA:
+		ins_offset = INST_ID_DATA;
+		info->port_type = PORT_TYPE_SERVER;
+		break;
+	case TYPE_CNTL:
+		ins_offset = INST_ID_CNTL;
+		info->port_type = PORT_TYPE_SERVER;
+		break;
+	case TYPE_DCI:
+		ins_offset = INST_ID_DCI;
+		info->port_type = PORT_TYPE_SERVER;
+		break;
+	case TYPE_CMD:
+		ins_offset = INST_ID_CMD;
+		info->port_type = PORT_TYPE_CLIENT;
+		break;
+	case TYPE_DCI_CMD:
+		ins_offset = INST_ID_DCI_CMD;
+		info->port_type = PORT_TYPE_CLIENT;
+		break;
+	}
+
+	info->svc_id = DIAG_SVC_ID;
+	info->ins_id = ins_base + ins_offset;
+	info->inited = 1;
+}
+
+static void cntl_socket_init_work_fn(struct work_struct *work)
+{
+	int ret = 0;
+
+	if (!cntl_socket)
+		return;
+
+	ret = sock_create(AF_MSM_IPC, SOCK_DGRAM, 0, &cntl_socket->hdl);
+	if (ret < 0 || !cntl_socket->hdl) {
+		pr_err("diag: In %s, cntl socket is not initialized, ret: %d\n",
+		       __func__, ret);
+		return;
+	}
+
+	write_lock_bh(&cntl_socket->hdl->sk->sk_callback_lock);
+	cntl_socket->hdl->sk->sk_user_data = (void *)cntl_socket;
+	cntl_socket->hdl->sk->sk_data_ready = cntl_socket_data_ready;
+	write_unlock_bh(&cntl_socket->hdl->sk->sk_callback_lock);
+
+	ret = kernel_sock_ioctl(cntl_socket->hdl,
+				IPC_ROUTER_IOCTL_BIND_CONTROL_PORT, 0);
+	if (ret < 0) {
+		pr_err("diag: In %s Could not bind as control port, ret: %d\n",
+		       __func__, ret);
+	}
+
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "Initialized control sockets");
+}
+
+static int __diag_cntl_socket_init(void)
+{
+	cntl_socket = kzalloc(sizeof(struct diag_cntl_socket_info), GFP_KERNEL);
+	if (!cntl_socket)
+		return -ENOMEM;
+
+	cntl_socket->svc_id = DIAG_SVC_ID;
+	cntl_socket->ins_id = 1;
+	atomic_set(&cntl_socket->data_ready, 0);
+	init_waitqueue_head(&cntl_socket->read_wait_q);
+	cntl_socket->wq = create_singlethread_workqueue("DIAG_CNTL_SOCKET");
+	INIT_WORK(&(cntl_socket->read_work), cntl_socket_read_work_fn);
+	INIT_WORK(&(cntl_socket->init_work), cntl_socket_init_work_fn);
+
+	return 0;
+}
+
+int diag_socket_init(void)
+{
+	int err = 0;
+	int peripheral = 0;
+	struct diag_socket_info *info = NULL;
+
+	for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+		info = &socket_cntl[peripheral];
+		__diag_socket_init(&socket_cntl[peripheral]);
+
+		diagfwd_cntl_register(TRANSPORT_SOCKET, peripheral,
+			(void *)info, &socket_ops, &(info->fwd_ctxt));
+
+		__diag_socket_init(&socket_data[peripheral]);
+		__diag_socket_init(&socket_cmd[peripheral]);
+		__diag_socket_init(&socket_dci[peripheral]);
+		__diag_socket_init(&socket_dci_cmd[peripheral]);
+	}
+
+	err = __diag_cntl_socket_init();
+	if (err) {
+		pr_err("diag: Unable to open control sockets, err: %d\n", err);
+		goto fail;
+	}
+
+	register_ipcrtr_af_init_notifier(&socket_notify);
+fail:
+	return err;
+}
+
+static int socket_ready_notify(struct notifier_block *nb,
+			       unsigned long action, void *data)
+{
+	uint8_t peripheral;
+	struct diag_socket_info *info = NULL;
+
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "received notification from IPCR");
+
+	if (action != IPCRTR_AF_INIT) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			 "action not recognized by diag %lu\n", action);
+		return 0;
+	}
+
+	/* Initialize only the servers */
+	for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+		info = &socket_cntl[peripheral];
+		queue_work(info->wq, &(info->init_work));
+		info = &socket_data[peripheral];
+		queue_work(info->wq, &(info->init_work));
+		info = &socket_dci[peripheral];
+		queue_work(info->wq, &(info->init_work));
+	}
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "Initialized all servers");
+
+	queue_work(cntl_socket->wq, &(cntl_socket->init_work));
+
+	return 0;
+}
+
+int diag_socket_init_peripheral(uint8_t peripheral)
+{
+	struct diag_socket_info *info = NULL;
+
+	if (peripheral >= NUM_PERIPHERALS)
+		return -EINVAL;
+
+	info = &socket_data[peripheral];
+	diagfwd_register(TRANSPORT_SOCKET, info->peripheral,
+			 info->type, (void *)info, &socket_ops,
+			 &info->fwd_ctxt);
+
+	info = &socket_dci[peripheral];
+	diagfwd_register(TRANSPORT_SOCKET, info->peripheral,
+			 info->type, (void *)info, &socket_ops,
+			 &info->fwd_ctxt);
+	return 0;
+}
+
+static void __diag_socket_exit(struct diag_socket_info *info)
+{
+	if (!info)
+		return;
+
+	diagfwd_deregister(info->peripheral, info->type, (void *)info);
+	info->fwd_ctxt = NULL;
+	info->hdl = NULL;
+	if (info->wq)
+		destroy_workqueue(info->wq);
+
+}
+
+void diag_socket_early_exit(void)
+{
+	int i = 0;
+
+	for (i = 0; i < NUM_PERIPHERALS; i++)
+		__diag_socket_exit(&socket_cntl[i]);
+}
+
+void diag_socket_exit(void)
+{
+	int i = 0;
+
+	for (i = 0; i < NUM_PERIPHERALS; i++) {
+		__diag_socket_exit(&socket_data[i]);
+		__diag_socket_exit(&socket_cmd[i]);
+		__diag_socket_exit(&socket_dci[i]);
+		__diag_socket_exit(&socket_dci_cmd[i]);
+	}
+}
+
+static int diag_socket_read(void *ctxt, unsigned char *buf, int buf_len)
+{
+	int err = 0;
+	int pkt_len = 0;
+	int read_len = 0;
+	int bytes_remaining = 0;
+	int total_recd = 0;
+	int loop_count = 0;
+	uint8_t buf_full = 0;
+	unsigned char *temp = NULL;
+	struct kvec iov = {0};
+	struct msghdr read_msg = {0};
+	struct sockaddr_msm_ipc src_addr = {0};
+	struct diag_socket_info *info = NULL;
+	unsigned long flags;
+
+	info = (struct diag_socket_info *)(ctxt);
+	if (!info)
+		return -ENODEV;
+
+	if (!buf || !ctxt || buf_len <= 0)
+		return -EINVAL;
+
+	temp = buf;
+	bytes_remaining = buf_len;
+
+	err = wait_event_interruptible(info->read_wait_q,
+				      (info->data_ready > 0) || (!info->hdl) ||
+				      (atomic_read(&info->diag_state) == 0));
+	if (err) {
+		mutex_lock(&driver->diagfwd_channel_mutex);
+		diagfwd_channel_read_done(info->fwd_ctxt, buf, 0);
+		mutex_unlock(&driver->diagfwd_channel_mutex);
+		return -ERESTARTSYS;
+	}
+
+	/*
+	 * There is no need to continue reading over peripheral in this case.
+	 * Release the wake source hold earlier.
+	 */
+	if (atomic_read(&info->diag_state) == 0) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+			 "%s closing read thread. diag state is closed\n",
+			 info->name);
+		mutex_lock(&driver->diagfwd_channel_mutex);
+		diagfwd_channel_read_done(info->fwd_ctxt, buf, 0);
+		mutex_unlock(&driver->diagfwd_channel_mutex);
+		return 0;
+	}
+
+	if (!info->hdl) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s closing read thread\n",
+			 info->name);
+		goto fail;
+	}
+
+	do {
+		loop_count++;
+		iov.iov_base = temp;
+		iov.iov_len = bytes_remaining;
+		read_msg.msg_name = &src_addr;
+		read_msg.msg_namelen = sizeof(src_addr);
+
+		pkt_len = kernel_recvmsg(info->hdl, &read_msg, &iov, 1, 0,
+					 MSG_PEEK);
+		if (pkt_len <= 0)
+			break;
+
+		if (pkt_len > bytes_remaining) {
+			buf_full = 1;
+			break;
+		}
+
+		spin_lock_irqsave(&info->lock, flags);
+		info->data_ready--;
+		spin_unlock_irqrestore(&info->lock, flags);
+
+		read_len = kernel_recvmsg(info->hdl, &read_msg, &iov, 1,
+					  pkt_len, 0);
+		if (read_len <= 0)
+			goto fail;
+
+		if (!atomic_read(&info->opened) &&
+		    info->port_type == PORT_TYPE_SERVER) {
+			/*
+			 * This is the first packet from the client. Copy its
+			 * address to the connection object. Consider this
+			 * channel open for communication.
+			 */
+			memcpy(&info->remote_addr, &src_addr, sizeof(src_addr));
+			if (info->ins_id == INST_ID_DCI)
+				atomic_set(&info->opened, 1);
+			else
+				__socket_open_channel(info);
+		}
+
+		if (read_len < 0) {
+			pr_err_ratelimited("diag: In %s, error receiving data, err: %d\n",
+					   __func__, pkt_len);
+			err = read_len;
+			goto fail;
+		}
+		temp += read_len;
+		total_recd += read_len;
+		bytes_remaining -= read_len;
+	} while (info->data_ready > 0);
+
+	if (buf_full || (info->type == TYPE_DATA && pkt_len))
+		err = queue_work(info->wq, &(info->read_work));
+
+	if (total_recd > 0) {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s read total bytes: %d\n",
+			 info->name, total_recd);
+		mutex_lock(&driver->diagfwd_channel_mutex);
+		err = diagfwd_channel_read_done(info->fwd_ctxt,
+						buf, total_recd);
+		mutex_unlock(&driver->diagfwd_channel_mutex);
+		if (err)
+			goto fail;
+	} else {
+		DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s error in read, err: %d\n",
+			 info->name, total_recd);
+		goto fail;
+	}
+
+	diag_socket_queue_read(info);
+	return 0;
+
+fail:
+	mutex_lock(&driver->diagfwd_channel_mutex);
+	diagfwd_channel_read_done(info->fwd_ctxt, buf, 0);
+	mutex_unlock(&driver->diagfwd_channel_mutex);
+	return -EIO;
+}
+
+static int diag_socket_write(void *ctxt, unsigned char *buf, int len)
+{
+	int err = 0;
+	int write_len = 0;
+	struct kvec iov = {0};
+	struct msghdr write_msg = {0};
+	struct diag_socket_info *info = NULL;
+
+	if (!ctxt || !buf || len <= 0)
+		return -EIO;
+
+	info = (struct diag_socket_info *)(ctxt);
+	if (!atomic_read(&info->opened) || !info->hdl)
+		return -ENODEV;
+
+	iov.iov_base = buf;
+	iov.iov_len = len;
+	write_msg.msg_name = &info->remote_addr;
+	write_msg.msg_namelen = sizeof(info->remote_addr);
+	write_msg.msg_flags |= MSG_DONTWAIT;
+	write_len = kernel_sendmsg(info->hdl, &write_msg, &iov, 1, len);
+	if (write_len < 0) {
+		err = write_len;
+		/*
+		 * -EAGAIN means that the number of packets in flight is at
+		 * max capactity and the peripheral hasn't read the data.
+		 */
+		if (err != -EAGAIN) {
+			pr_err_ratelimited("diag: In %s, error sending data, err: %d, ch: %s\n",
+					   __func__, err, info->name);
+		}
+	} else if (write_len != len) {
+		err = write_len;
+		pr_err_ratelimited("diag: In %s, wrote partial packet to %s, len: %d, wrote: %d\n",
+				   __func__, info->name, len, write_len);
+	}
+
+	DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s wrote to socket, len: %d\n",
+		 info->name, write_len);
+
+	return err;
+}
+
diff --git a/drivers/char/diag/diagfwd_socket.h b/drivers/char/diag/diagfwd_socket.h
new file mode 100644
index 0000000..a2b922a
--- /dev/null
+++ b/drivers/char/diag/diagfwd_socket.h
@@ -0,0 +1,96 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAGFWD_SOCKET_H
+#define DIAGFWD_SOCKET_H
+
+#include <linux/socket.h>
+#include <linux/msm_ipc.h>
+
+#define DIAG_SOCKET_NAME_SZ		24
+
+#define DIAG_SOCK_MODEM_SVC_ID		64
+#define DIAG_SOCK_MODEM_INS_ID		3
+
+#define PORT_TYPE_SERVER		0
+#define PORT_TYPE_CLIENT		1
+
+#define CNTL_CMD_NEW_SERVER		4
+#define CNTL_CMD_REMOVE_SERVER		5
+#define CNTL_CMD_REMOVE_CLIENT		6
+
+struct diag_socket_info {
+	uint8_t peripheral;
+	uint8_t type;
+	uint8_t port_type;
+	uint8_t inited;
+	atomic_t opened;
+	atomic_t diag_state;
+	uint32_t pkt_len;
+	uint32_t pkt_read;
+	uint32_t svc_id;
+	uint32_t ins_id;
+	uint32_t data_ready;
+	atomic_t flow_cnt;
+	char name[DIAG_SOCKET_NAME_SZ];
+	spinlock_t lock;
+	wait_queue_head_t wait_q;
+	struct sockaddr_msm_ipc remote_addr;
+	struct socket *hdl;
+	struct workqueue_struct *wq;
+	struct work_struct init_work;
+	struct work_struct read_work;
+	struct diagfwd_info *fwd_ctxt;
+	wait_queue_head_t read_wait_q;
+};
+
+union cntl_port_msg {
+	struct {
+		uint32_t cmd;
+		uint32_t service;
+		uint32_t instance;
+		uint32_t node_id;
+		uint32_t port_id;
+	} srv;
+	struct {
+		uint32_t cmd;
+		uint32_t node_id;
+		uint32_t port_id;
+	} cli;
+};
+
+struct diag_cntl_socket_info {
+	uint32_t svc_id;
+	uint32_t ins_id;
+	atomic_t data_ready;
+	struct workqueue_struct *wq;
+	struct work_struct read_work;
+	struct work_struct init_work;
+	wait_queue_head_t read_wait_q;
+	struct socket *hdl;
+};
+
+extern struct diag_socket_info socket_data[NUM_PERIPHERALS];
+extern struct diag_socket_info socket_cntl[NUM_PERIPHERALS];
+extern struct diag_socket_info socket_dci[NUM_PERIPHERALS];
+extern struct diag_socket_info socket_cmd[NUM_PERIPHERALS];
+extern struct diag_socket_info socket_dci_cmd[NUM_PERIPHERALS];
+
+extern struct diag_cntl_socket_info *cntl_socket;
+
+int diag_socket_init(void);
+int diag_socket_init_peripheral(uint8_t peripheral);
+void diag_socket_exit(void);
+void diag_socket_early_exit(void);
+void diag_socket_invalidate(void *ctxt, struct diagfwd_info *fwd_ctxt);
+int diag_socket_check_state(void *ctxt);
+#endif
diff --git a/drivers/char/diag/diagmem.c b/drivers/char/diag/diagmem.c
new file mode 100644
index 0000000..ada645d
--- /dev/null
+++ b/drivers/char/diag/diagmem.c
@@ -0,0 +1,295 @@
+/* Copyright (c) 2008-2014, 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mempool.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/kmemleak.h>
+#include <linux/ratelimit.h>
+#include <linux/atomic.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/kmemleak.h>
+
+#include "diagchar.h"
+#include "diagmem.h"
+
+struct diag_mempool_t diag_mempools[NUM_MEMORY_POOLS] = {
+	{
+		.id = POOL_TYPE_COPY,
+		.name = "POOL_COPY",
+		.pool = NULL,
+		.itemsize = 0,
+		.poolsize = 0,
+		.count = 0
+	},
+	{
+		.id = POOL_TYPE_HDLC,
+		.name = "POOL_HDLC",
+		.pool = NULL,
+		.itemsize = 0,
+		.poolsize = 0,
+		.count = 0
+	},
+	{
+		.id = POOL_TYPE_USER,
+		.name = "POOL_USER",
+		.pool = NULL,
+		.itemsize = 0,
+		.poolsize = 0,
+		.count = 0
+	},
+	{
+		.id = POOL_TYPE_MUX_APPS,
+		.name = "POOL_MUX_APPS",
+		.pool = NULL,
+		.itemsize = 0,
+		.poolsize = 0,
+		.count = 0
+	},
+	{
+		.id = POOL_TYPE_DCI,
+		.name = "POOL_DCI",
+		.pool = NULL,
+		.itemsize = 0,
+		.poolsize = 0,
+		.count = 0
+	},
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+	{
+		.id = POOL_TYPE_MDM,
+		.name = "POOL_MDM",
+		.pool = NULL,
+		.itemsize = 0,
+		.poolsize = 0,
+		.count = 0
+	},
+	{
+		.id = POOL_TYPE_MDM2,
+		.name = "POOL_MDM2",
+		.pool = NULL,
+		.itemsize = 0,
+		.poolsize = 0,
+		.count = 0
+	},
+	{
+		.id = POOL_TYPE_MDM_DCI,
+		.name = "POOL_MDM_DCI",
+		.pool = NULL,
+		.itemsize = 0,
+		.poolsize = 0,
+		.count = 0
+	},
+	{
+		.id = POOL_TYPE_MDM2_DCI,
+		.name = "POOL_MDM2_DCI",
+		.pool = NULL,
+		.itemsize = 0,
+		.poolsize = 0,
+		.count = 0
+	},
+	{
+		.id = POOL_TYPE_MDM_MUX,
+		.name = "POOL_MDM_MUX",
+		.pool = NULL,
+		.itemsize = 0,
+		.poolsize = 0,
+		.count = 0
+	},
+	{
+		.id = POOL_TYPE_MDM2_MUX,
+		.name = "POOL_MDM2_MUX",
+		.pool = NULL,
+		.itemsize = 0,
+		.poolsize = 0,
+		.count = 0
+	},
+	{
+		.id = POOL_TYPE_MDM_DCI_WRITE,
+		.name = "POOL_MDM_DCI_WRITE",
+		.pool = NULL,
+		.itemsize = 0,
+		.poolsize = 0,
+		.count = 0
+	},
+	{
+		.id = POOL_TYPE_MDM2_DCI_WRITE,
+		.name = "POOL_MDM2_DCI_WRITE",
+		.pool = NULL,
+		.itemsize = 0,
+		.poolsize = 0,
+		.count = 0
+	},
+	{
+		.id = POOL_TYPE_QSC_MUX,
+		.name = "POOL_QSC_MUX",
+		.pool = NULL,
+		.itemsize = 0,
+		.poolsize = 0,
+		.count = 0
+	}
+#endif
+};
+
+void diagmem_setsize(int pool_idx, int itemsize, int poolsize)
+{
+	if (pool_idx < 0 || pool_idx >= NUM_MEMORY_POOLS) {
+		pr_err("diag: Invalid pool index %d in %s\n", pool_idx,
+		       __func__);
+		return;
+	}
+
+	diag_mempools[pool_idx].itemsize = itemsize;
+	diag_mempools[pool_idx].poolsize = poolsize;
+	pr_debug("diag: Mempool %s sizes: itemsize %d poolsize %d\n",
+		 diag_mempools[pool_idx].name, diag_mempools[pool_idx].itemsize,
+		 diag_mempools[pool_idx].poolsize);
+}
+
+void *diagmem_alloc(struct diagchar_dev *driver, int size, int pool_type)
+{
+	void *buf = NULL;
+	int i = 0;
+	unsigned long flags;
+	struct diag_mempool_t *mempool = NULL;
+
+	if (!driver)
+		return NULL;
+
+	for (i = 0; i < NUM_MEMORY_POOLS; i++) {
+		mempool = &diag_mempools[i];
+		if (pool_type != mempool->id)
+			continue;
+		if (!mempool->pool) {
+			pr_err_ratelimited("diag: %s mempool is not initialized yet\n",
+					   mempool->name);
+			break;
+		}
+		if (size == 0 || size > mempool->itemsize) {
+			pr_err_ratelimited("diag: cannot alloc from mempool %s, invalid size: %d\n",
+					   mempool->name, size);
+			break;
+		}
+		spin_lock_irqsave(&mempool->lock, flags);
+		if (mempool->count < mempool->poolsize) {
+			atomic_add(1, (atomic_t *)&mempool->count);
+			buf = mempool_alloc(mempool->pool, GFP_ATOMIC);
+			kmemleak_not_leak(buf);
+		}
+		spin_unlock_irqrestore(&mempool->lock, flags);
+		if (!buf) {
+			pr_debug_ratelimited("diag: Unable to allocate buffer from memory pool %s, size: %d/%d count: %d/%d\n",
+					     mempool->name,
+					     size, mempool->itemsize,
+					     mempool->count,
+					     mempool->poolsize);
+		}
+		break;
+	}
+
+	return buf;
+}
+
+void diagmem_free(struct diagchar_dev *driver, void *buf, int pool_type)
+{
+	int i = 0;
+	unsigned long flags;
+	struct diag_mempool_t *mempool = NULL;
+
+	if (!driver || !buf)
+		return;
+
+	for (i = 0; i < NUM_MEMORY_POOLS; i++) {
+		mempool = &diag_mempools[i];
+		if (pool_type != mempool->id)
+			continue;
+		if (!mempool->pool) {
+			pr_err_ratelimited("diag: %s mempool is not initialized yet\n",
+					   mempool->name);
+			break;
+		}
+		spin_lock_irqsave(&mempool->lock, flags);
+		if (mempool->count > 0) {
+			mempool_free(buf, mempool->pool);
+			atomic_add(-1, (atomic_t *)&mempool->count);
+		} else {
+			pr_err_ratelimited("diag: Attempting to free items from %s mempool which is already empty\n",
+					   mempool->name);
+		}
+		spin_unlock_irqrestore(&mempool->lock, flags);
+		break;
+	}
+}
+
+void diagmem_init(struct diagchar_dev *driver, int index)
+{
+	struct diag_mempool_t *mempool = NULL;
+
+	if (!driver)
+		return;
+
+	if (index < 0 || index >= NUM_MEMORY_POOLS) {
+		pr_err("diag: In %s, Invalid index %d\n", __func__, index);
+		return;
+	}
+
+	mempool = &diag_mempools[index];
+	if (mempool->pool) {
+		pr_debug("diag: mempool %s is already initialized\n",
+			 mempool->name);
+		return;
+	}
+	if (mempool->itemsize <= 0 || mempool->poolsize <= 0) {
+		pr_err("diag: Unable to initialize %s mempool, itemsize: %d poolsize: %d\n",
+		       mempool->name, mempool->itemsize,
+		       mempool->poolsize);
+		return;
+	}
+
+	mempool->pool = mempool_create_kmalloc_pool(mempool->poolsize,
+						    mempool->itemsize);
+	if (!mempool->pool)
+		pr_err("diag: cannot allocate %s mempool\n", mempool->name);
+	else
+		kmemleak_not_leak(mempool->pool);
+
+	spin_lock_init(&mempool->lock);
+}
+
+void diagmem_exit(struct diagchar_dev *driver, int index)
+{
+	unsigned long flags;
+	struct diag_mempool_t *mempool = NULL;
+
+	if (!driver)
+		return;
+
+	if (index < 0 || index >= NUM_MEMORY_POOLS) {
+		pr_err("diag: In %s, Invalid index %d\n", __func__, index);
+		return;
+	}
+
+	mempool = &diag_mempools[index];
+	spin_lock_irqsave(&mempool->lock, flags);
+	if (mempool->count == 0 && mempool->pool != NULL) {
+		mempool_destroy(mempool->pool);
+		mempool->pool = NULL;
+	} else {
+		pr_err("diag: Unable to destroy %s pool, count: %d\n",
+		       mempool->name, mempool->count);
+	}
+	spin_unlock_irqrestore(&mempool->lock, flags);
+}
+
diff --git a/drivers/char/diag/diagmem.h b/drivers/char/diag/diagmem.h
new file mode 100644
index 0000000..d097a37
--- /dev/null
+++ b/drivers/char/diag/diagmem.h
@@ -0,0 +1,63 @@
+/* Copyright (c) 2008-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAGMEM_H
+#define DIAGMEM_H
+#include "diagchar.h"
+
+#define POOL_TYPE_COPY			0
+#define POOL_TYPE_HDLC			1
+#define POOL_TYPE_USER			2
+#define POOL_TYPE_MUX_APPS		3
+#define POOL_TYPE_DCI			4
+#define POOL_TYPE_LOCAL_LAST		5
+
+#define POOL_TYPE_REMOTE_BASE		POOL_TYPE_LOCAL_LAST
+#define POOL_TYPE_MDM			POOL_TYPE_REMOTE_BASE
+#define POOL_TYPE_MDM2			(POOL_TYPE_REMOTE_BASE + 1)
+#define POOL_TYPE_MDM_DCI		(POOL_TYPE_REMOTE_BASE + 2)
+#define POOL_TYPE_MDM2_DCI		(POOL_TYPE_REMOTE_BASE + 3)
+#define POOL_TYPE_MDM_MUX		(POOL_TYPE_REMOTE_BASE + 4)
+#define POOL_TYPE_MDM2_MUX		(POOL_TYPE_REMOTE_BASE + 5)
+#define POOL_TYPE_MDM_DCI_WRITE		(POOL_TYPE_REMOTE_BASE + 6)
+#define POOL_TYPE_MDM2_DCI_WRITE	(POOL_TYPE_REMOTE_BASE + 7)
+#define POOL_TYPE_QSC_MUX		(POOL_TYPE_REMOTE_BASE + 8)
+#define POOL_TYPE_REMOTE_LAST		(POOL_TYPE_REMOTE_BASE + 9)
+
+#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+#define NUM_MEMORY_POOLS		POOL_TYPE_REMOTE_LAST
+#else
+#define NUM_MEMORY_POOLS		POOL_TYPE_LOCAL_LAST
+#endif
+
+#define DIAG_MEMPOOL_NAME_SZ		24
+#define DIAG_MEMPOOL_GET_NAME(x)	(diag_mempools[x].name)
+
+struct diag_mempool_t {
+	int id;
+	char name[DIAG_MEMPOOL_NAME_SZ];
+	mempool_t *pool;
+	unsigned int itemsize;
+	unsigned int poolsize;
+	int count;
+	spinlock_t lock;
+} __packed;
+
+extern struct diag_mempool_t diag_mempools[NUM_MEMORY_POOLS];
+
+void diagmem_setsize(int pool_idx, int itemsize, int poolsize);
+void *diagmem_alloc(struct diagchar_dev *driver, int size, int pool_type);
+void diagmem_free(struct diagchar_dev *driver, void *buf, int pool_type);
+void diagmem_init(struct diagchar_dev *driver, int type);
+void diagmem_exit(struct diagchar_dev *driver, int type);
+
+#endif
diff --git a/drivers/clk/bcm/Kconfig b/drivers/clk/bcm/Kconfig
index f21e9b7..e3eed5a 100644
--- a/drivers/clk/bcm/Kconfig
+++ b/drivers/clk/bcm/Kconfig
@@ -20,7 +20,7 @@
 
 config COMMON_CLK_IPROC
 	bool "Broadcom iProc clock support"
-	depends on ARCH_BCM_IPROC || COMPILE_TEST
+	depends on ARCH_BCM_IPROC || ARCH_BCM_63XX || COMPILE_TEST
 	depends on COMMON_CLK
 	default ARCH_BCM_IPROC
 	help
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 98eef6fe..33b28dd 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -2278,6 +2278,7 @@
 
 static struct dentry *rootdir;
 static int inited = 0;
+static u32 debug_suspend;
 static DEFINE_MUTEX(clk_debug_lock);
 static HLIST_HEAD(clk_debug_list);
 
@@ -2420,6 +2421,309 @@
 	.release	= single_release,
 };
 
+static int clock_debug_rate_set(void *data, u64 val)
+{
+	struct clk_core *core = data;
+	int ret;
+
+	ret = clk_set_rate(core->hw->clk, val);
+	if (ret)
+		pr_err("clk_set_rate(%lu) failed (%d)\n",
+				(unsigned long)val, ret);
+
+	return ret;
+}
+
+static int clock_debug_rate_get(void *data, u64 *val)
+{
+	struct clk_core *core = data;
+
+	*val = core->hw->core->rate;
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(clock_rate_fops, clock_debug_rate_get,
+			clock_debug_rate_set, "%llu\n");
+
+static ssize_t clock_parent_read(struct file *filp, char __user *ubuf,
+		size_t cnt, loff_t *ppos)
+{
+	char name[256] = {0};
+	struct clk_core *core = filp->private_data;
+	struct clk_core *p = core->hw->core->parent;
+
+	snprintf(name, sizeof(name), "%s\n", p ? p->name : "None\n");
+
+	return simple_read_from_buffer(ubuf, cnt, ppos, name, strlen(name));
+}
+
+static const struct file_operations clock_parent_fops = {
+	.open	= simple_open,
+	.read	= clock_parent_read,
+};
+
+static int clock_debug_enable_set(void *data, u64 val)
+{
+	struct clk_core *core = data;
+	int rc = 0;
+
+	if (val)
+		rc = clk_prepare_enable(core->hw->clk);
+	else
+		clk_disable_unprepare(core->hw->clk);
+
+	return rc;
+}
+
+static int clock_debug_enable_get(void *data, u64 *val)
+{
+	struct clk_core *core = data;
+	int enabled = 0;
+
+	enabled = core->enable_count;
+
+	*val = enabled;
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(clock_enable_fops, clock_debug_enable_get,
+			clock_debug_enable_set, "%lld\n");
+
+#define clock_debug_output(m, c, fmt, ...)		\
+do {							\
+	if (m)						\
+		seq_printf(m, fmt, ##__VA_ARGS__);	\
+	else if (c)					\
+		pr_cont(fmt, ##__VA_ARGS__);		\
+	else						\
+		pr_info(fmt, ##__VA_ARGS__);		\
+} while (0)
+
+int clock_debug_print_clock(struct clk_core *c, struct seq_file *s)
+{
+	char *start = "";
+	struct clk *clk;
+
+	if (!c || !c->prepare_count)
+		return 0;
+
+	clk = c->hw->clk;
+
+	clock_debug_output(s, 0, "\t");
+
+	do {
+		if (clk->core->vdd_class)
+			clock_debug_output(s, 1, "%s%s:%u:%u [%ld, %d]", start,
+					clk->core->name,
+					clk->core->prepare_count,
+					clk->core->enable_count,
+					clk->core->rate,
+				clk_find_vdd_level(clk->core, clk->core->rate));
+		else
+			clock_debug_output(s, 1, "%s%s:%u:%u [%ld]", start,
+					clk->core->name,
+					clk->core->prepare_count,
+					clk->core->enable_count,
+					clk->core->rate);
+		start = " -> ";
+	} while ((clk = clk_get_parent(clk)));
+
+	clock_debug_output(s, 1, "\n");
+
+	return 1;
+}
+
+/*
+ * clock_debug_print_enabled_clocks() - Print names of enabled clocks
+ */
+static void clock_debug_print_enabled_clocks(struct seq_file *s)
+{
+	struct clk_core *core;
+	int cnt = 0;
+
+	clock_debug_output(s, 0, "Enabled clocks:\n");
+
+	mutex_lock(&clk_debug_lock);
+
+	hlist_for_each_entry(core, &clk_debug_list, debug_node)
+		cnt += clock_debug_print_clock(core, s);
+
+	mutex_unlock(&clk_debug_lock);
+
+	if (cnt)
+		clock_debug_output(s, 0, "Enabled clock count: %d\n", cnt);
+	else
+		clock_debug_output(s, 0, "No clocks enabled.\n");
+}
+
+static int enabled_clocks_show(struct seq_file *s, void *unused)
+{
+	clock_debug_print_enabled_clocks(s);
+
+	return 0;
+}
+
+static int enabled_clocks_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, enabled_clocks_show, inode->i_private);
+}
+
+static const struct file_operations clk_enabled_list_fops = {
+	.open		= enabled_clocks_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= seq_release,
+};
+
+static void clk_debug_print_hw(struct clk_core *clk, struct seq_file *f)
+{
+	if (IS_ERR_OR_NULL(clk))
+		return;
+
+	clk_debug_print_hw(clk->parent, f);
+
+	clock_debug_output(f, false, "%s\n", clk->name);
+
+	if (!clk->ops->list_registers)
+		return;
+
+	clk->ops->list_registers(f, clk->hw);
+}
+
+static int print_hw_show(struct seq_file *m, void *unused)
+{
+	struct clk_core *c = m->private;
+
+	clk_debug_print_hw(c, m);
+
+	return 0;
+}
+
+static int print_hw_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, print_hw_show, inode->i_private);
+}
+
+static const struct file_operations clock_print_hw_fops = {
+	.open		= print_hw_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= seq_release,
+};
+
+static int list_rates_show(struct seq_file *s, void *unused)
+{
+	struct clk_core *core = s->private;
+	int level = 0, i = 0;
+	unsigned long rate, rate_max = 0;
+
+	/* Find max frequency supported within voltage constraints. */
+	if (!core->vdd_class) {
+		rate_max = ULONG_MAX;
+	} else {
+		for (level = 0; level < core->num_rate_max; level++)
+			if (core->rate_max[level])
+				rate_max = core->rate_max[level];
+	}
+
+	/*
+	 * List supported frequencies <= rate_max. Higher frequencies may
+	 * appear in the frequency table, but are not valid and should not
+	 * be listed.
+	 */
+	while (!IS_ERR_VALUE(rate =
+			core->ops->list_rate(core->hw, i++, rate_max))) {
+		if (rate <= 0)
+			break;
+		if (rate <= rate_max)
+			seq_printf(s, "%lu\n", rate);
+	}
+
+	return 0;
+}
+
+static int list_rates_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, list_rates_show, inode->i_private);
+}
+
+static const struct file_operations list_rates_fops = {
+	.open		= list_rates_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= seq_release,
+};
+
+static void clock_print_rate_max_by_level(struct seq_file *s, int level)
+{
+	struct clk_core *core = s->private;
+	struct clk_vdd_class *vdd_class = core->vdd_class;
+	int off, i, vdd_level, nregs = vdd_class->num_regulators;
+
+	vdd_level = clk_find_vdd_level(core, core->rate);
+
+	seq_printf(s, "%2s%10lu", vdd_level == level ? "[" : "",
+		core->rate_max[level]);
+
+	for (i = 0; i < nregs; i++) {
+		off = nregs*level + i;
+		if (vdd_class->vdd_uv)
+			seq_printf(s, "%10u", vdd_class->vdd_uv[off]);
+	}
+
+	if (vdd_level == level)
+		seq_puts(s, "]");
+
+	seq_puts(s, "\n");
+}
+
+static int rate_max_show(struct seq_file *s, void *unused)
+{
+	struct clk_core *core = s->private;
+	struct clk_vdd_class *vdd_class = core->vdd_class;
+	int level = 0, i, nregs = vdd_class->num_regulators;
+	char reg_name[10];
+
+	int vdd_level = clk_find_vdd_level(core, core->rate);
+
+	if (vdd_level < 0) {
+		seq_printf(s, "could not find_vdd_level for %s, %ld\n",
+			core->name, core->rate);
+		return 0;
+	}
+
+	seq_printf(s, "%12s", "");
+	for (i = 0; i < nregs; i++) {
+		snprintf(reg_name, ARRAY_SIZE(reg_name), "reg %d", i);
+		seq_printf(s, "%10s", reg_name);
+	}
+
+	seq_printf(s, "\n%12s", "freq");
+	for (i = 0; i < nregs; i++)
+		seq_printf(s, "%10s", "uV");
+
+	seq_puts(s, "\n");
+
+	for (level = 0; level < core->num_rate_max; level++)
+		clock_print_rate_max_by_level(s, level);
+
+	return 0;
+}
+
+static int rate_max_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, rate_max_show, inode->i_private);
+}
+
+static const struct file_operations rate_max_fops = {
+	.open		= rate_max_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= seq_release,
+};
+
 static int clk_debug_create_one(struct clk_core *core, struct dentry *pdentry)
 {
 	struct dentry *d;
@@ -2436,41 +2740,61 @@
 
 	core->dentry = d;
 
-	d = debugfs_create_u32("clk_rate", S_IRUGO, core->dentry,
-			(u32 *)&core->rate);
+	d = debugfs_create_file("clk_rate", 0444, core->dentry, core,
+			&clock_rate_fops);
 	if (!d)
 		goto err_out;
 
-	d = debugfs_create_u32("clk_accuracy", S_IRUGO, core->dentry,
+	if (core->ops->list_rate) {
+		if (!debugfs_create_file("clk_list_rates",
+				0444, core->dentry, core, &list_rates_fops))
+			goto err_out;
+	}
+
+	if (core->vdd_class && !debugfs_create_file("clk_rate_max",
+				0444, core->dentry, core, &rate_max_fops))
+		goto err_out;
+
+	d = debugfs_create_u32("clk_accuracy", 0444, core->dentry,
 			(u32 *)&core->accuracy);
 	if (!d)
 		goto err_out;
 
-	d = debugfs_create_u32("clk_phase", S_IRUGO, core->dentry,
+	d = debugfs_create_u32("clk_phase", 0444, core->dentry,
 			(u32 *)&core->phase);
 	if (!d)
 		goto err_out;
 
-	d = debugfs_create_x32("clk_flags", S_IRUGO, core->dentry,
+	d = debugfs_create_x32("clk_flags", 0444, core->dentry,
 			(u32 *)&core->flags);
 	if (!d)
 		goto err_out;
 
-	d = debugfs_create_u32("clk_prepare_count", S_IRUGO, core->dentry,
+	d = debugfs_create_u32("clk_prepare_count", 0444, core->dentry,
 			(u32 *)&core->prepare_count);
 	if (!d)
 		goto err_out;
 
-	d = debugfs_create_u32("clk_enable_count", S_IRUGO, core->dentry,
-			(u32 *)&core->enable_count);
+	d = debugfs_create_file("clk_enable_count", 0444, core->dentry,
+			core, &clock_enable_fops);
 	if (!d)
 		goto err_out;
 
-	d = debugfs_create_u32("clk_notifier_count", S_IRUGO, core->dentry,
+	d = debugfs_create_u32("clk_notifier_count", 0444, core->dentry,
 			(u32 *)&core->notifier_count);
 	if (!d)
 		goto err_out;
 
+	d = debugfs_create_file("clk_parent", 0444, core->dentry, core,
+			&clock_parent_fops);
+	if (!d)
+		goto err_out;
+
+	d = debugfs_create_file("clk_print_regs", 0444, core->dentry,
+			core, &clock_print_hw_fops);
+	if (!d)
+		goto err_out;
+
 	if (core->ops->debug_init) {
 		ret = core->ops->debug_init(core->hw, core->dentry);
 		if (ret)
@@ -2542,6 +2866,19 @@
 }
 EXPORT_SYMBOL_GPL(clk_debugfs_add_file);
 
+/*
+ * Print the names of all enabled clocks and their parents if
+ * debug_suspend is set from debugfs.
+ */
+void clock_debug_print_enabled(void)
+{
+	if (likely(!debug_suspend))
+		return;
+
+	clock_debug_print_enabled_clocks(NULL);
+}
+EXPORT_SYMBOL_GPL(clock_debug_print_enabled);
+
 /**
  * clk_debug_init - lazily populate the debugfs clk directory
  *
@@ -2561,26 +2898,36 @@
 	if (!rootdir)
 		return -ENOMEM;
 
-	d = debugfs_create_file("clk_summary", S_IRUGO, rootdir, &all_lists,
+	d = debugfs_create_file("clk_summary", 0444, rootdir, &all_lists,
 				&clk_summary_fops);
 	if (!d)
 		return -ENOMEM;
 
-	d = debugfs_create_file("clk_dump", S_IRUGO, rootdir, &all_lists,
+	d = debugfs_create_file("clk_dump", 0444, rootdir, &all_lists,
 				&clk_dump_fops);
 	if (!d)
 		return -ENOMEM;
 
-	d = debugfs_create_file("clk_orphan_summary", S_IRUGO, rootdir,
+	d = debugfs_create_file("clk_orphan_summary", 0444, rootdir,
 				&orphan_list, &clk_summary_fops);
 	if (!d)
 		return -ENOMEM;
 
-	d = debugfs_create_file("clk_orphan_dump", S_IRUGO, rootdir,
+	d = debugfs_create_file("clk_orphan_dump", 0444, rootdir,
 				&orphan_list, &clk_dump_fops);
 	if (!d)
 		return -ENOMEM;
 
+	d = debugfs_create_file("clk_enabled_list", 0444, rootdir,
+				&clk_debug_list, &clk_enabled_list_fops);
+	if (!d)
+		return -ENOMEM;
+
+
+	d = debugfs_create_u32("debug_suspend", 0644, rootdir, &debug_suspend);
+	if (!d)
+		return -ENOMEM;
+
 	mutex_lock(&clk_debug_lock);
 	hlist_for_each_entry(core, &clk_debug_list, debug_node)
 		clk_debug_create_one(core, rootdir);
diff --git a/drivers/clk/clk.h b/drivers/clk/clk.h
index 00b35a1..331e086 100644
--- a/drivers/clk/clk.h
+++ b/drivers/clk/clk.h
@@ -20,6 +20,10 @@
 struct clk *__clk_create_clk(struct clk_hw *hw, const char *dev_id,
 			     const char *con_id);
 void __clk_free_clk(struct clk *clk);
+
+/* Debugfs API to print the enabled clocks */
+void clock_debug_print_enabled(void);
+
 #else
 /* All these casts to avoid ifdefs in clkdev... */
 static inline struct clk *
diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
index 6ee6a6b..43c69ac 100644
--- a/drivers/clk/qcom/clk-alpha-pll.c
+++ b/drivers/clk/qcom/clk-alpha-pll.c
@@ -414,6 +414,41 @@
 	return clamp(rate, min_freq, max_freq);
 }
 
+static void clk_alpha_pll_list_registers(struct seq_file *f, struct clk_hw *hw)
+{
+	struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+	int size, i, val;
+
+	static struct clk_register_data data[] = {
+		{"PLL_MODE", 0x0},
+		{"PLL_L_VAL", 0x4},
+		{"PLL_ALPHA_VAL", 0x8},
+		{"PLL_ALPHA_VAL_U", 0xC},
+		{"PLL_USER_CTL", 0x10},
+		{"PLL_CONFIG_CTL", 0x18},
+	};
+
+	static struct clk_register_data data1[] = {
+		{"APSS_PLL_VOTE", 0x0},
+	};
+
+	size = ARRAY_SIZE(data);
+
+	for (i = 0; i < size; i++) {
+		regmap_read(pll->clkr.regmap, pll->offset + data[i].offset,
+					&val);
+		seq_printf(f, "%20s: 0x%.8x\n", data[i].name, val);
+	}
+
+	regmap_read(pll->clkr.regmap, pll->offset + data[0].offset, &val);
+
+	if (val & PLL_FSM_ENA) {
+		regmap_read(pll->clkr.regmap, pll->clkr.enable_reg +
+					data1[0].offset, &val);
+		seq_printf(f, "%20s: 0x%.8x\n", data1[0].name, val);
+	}
+}
+
 void clk_fabia_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
 				const struct pll_config *config)
 {
@@ -617,12 +652,48 @@
 	return 0;
 }
 
+static void clk_fabia_pll_list_registers(struct seq_file *f, struct clk_hw *hw)
+{
+	struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+	int size, i, val;
+
+	static struct clk_register_data data[] = {
+		{"PLL_MODE", 0x0},
+		{"PLL_L_VAL", 0x4},
+		{"PLL_FRAC_VAL", 0x38},
+		{"PLL_USER_CTL", 0xc},
+		{"PLL_CONFIG_CTL", 0x14},
+		{"PLL_OPMODE", 0x2c},
+	};
+
+	static struct clk_register_data data1[] = {
+		{"APSS_PLL_VOTE", 0x0},
+	};
+
+	size = ARRAY_SIZE(data);
+
+	for (i = 0; i < size; i++) {
+		regmap_read(pll->clkr.regmap, pll->offset + data[i].offset,
+					&val);
+		seq_printf(f, "%20s: 0x%.8x\n", data[i].name, val);
+	}
+
+	regmap_read(pll->clkr.regmap, pll->offset + data[0].offset, &val);
+
+	if (val & PLL_FSM_ENA) {
+		regmap_read(pll->clkr.regmap, pll->clkr.enable_reg +
+					data1[0].offset, &val);
+		seq_printf(f, "%20s: 0x%.8x\n", data1[0].name, val);
+	}
+}
+
 const struct clk_ops clk_alpha_pll_ops = {
 	.enable = clk_alpha_pll_enable,
 	.disable = clk_alpha_pll_disable,
 	.recalc_rate = clk_alpha_pll_recalc_rate,
 	.round_rate = clk_alpha_pll_round_rate,
 	.set_rate = clk_alpha_pll_set_rate,
+	.list_registers = clk_alpha_pll_list_registers,
 };
 EXPORT_SYMBOL_GPL(clk_alpha_pll_ops);
 
@@ -632,6 +703,7 @@
 	.recalc_rate = clk_alpha_pll_recalc_rate,
 	.round_rate = clk_alpha_pll_round_rate,
 	.set_rate = clk_alpha_pll_set_rate,
+	.list_registers = clk_alpha_pll_list_registers,
 };
 EXPORT_SYMBOL_GPL(clk_alpha_pll_hwfsm_ops);
 
@@ -641,6 +713,7 @@
 	.recalc_rate = clk_fabia_pll_recalc_rate,
 	.round_rate = clk_alpha_pll_round_rate,
 	.set_rate = clk_fabia_pll_set_rate,
+	.list_registers = clk_fabia_pll_list_registers,
 };
 EXPORT_SYMBOL_GPL(clk_fabia_pll_ops);
 
@@ -649,6 +722,7 @@
 	.disable = clk_fabia_pll_disable,
 	.recalc_rate = clk_fabia_pll_recalc_rate,
 	.round_rate = clk_alpha_pll_round_rate,
+	.list_registers = clk_fabia_pll_list_registers,
 };
 EXPORT_SYMBOL_GPL(clk_fabia_fixed_pll_ops);
 
diff --git a/drivers/clk/qcom/clk-branch.c b/drivers/clk/qcom/clk-branch.c
index f9d3f86..bd56314 100644
--- a/drivers/clk/qcom/clk-branch.c
+++ b/drivers/clk/qcom/clk-branch.c
@@ -16,11 +16,13 @@
 #include <linux/err.h>
 #include <linux/delay.h>
 #include <linux/export.h>
+#include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/regmap.h>
 #include <linux/clk/qcom.h>
 
 #include "clk-branch.h"
+#include "clk-regmap.h"
 
 static bool clk_branch_in_hwcg_mode(const struct clk_branch *br)
 {
@@ -182,6 +184,43 @@
 };
 EXPORT_SYMBOL_GPL(clk_branch_ops);
 
+static void clk_branch2_list_registers(struct seq_file *f, struct clk_hw *hw)
+{
+	struct clk_branch *br = to_clk_branch(hw);
+	struct clk_regmap *rclk = to_clk_regmap(hw);
+	int size, i, val;
+
+	static struct clk_register_data data[] = {
+		{"CBCR", 0x0},
+	};
+
+	static struct clk_register_data data1[] = {
+		{"APSS_VOTE", 0x0},
+		{"APSS_SLEEP_VOTE", 0x4},
+	};
+
+	size = ARRAY_SIZE(data);
+
+	for (i = 0; i < size; i++) {
+		regmap_read(br->clkr.regmap, br->halt_reg + data[i].offset,
+					&val);
+		seq_printf(f, "%20s: 0x%.8x\n", data[i].name, val);
+	}
+
+	if ((br->halt_check & BRANCH_HALT_VOTED) &&
+			!(br->halt_check & BRANCH_VOTED)) {
+		if (rclk->enable_reg) {
+			size = ARRAY_SIZE(data1);
+			for (i = 0; i < size; i++) {
+				regmap_read(br->clkr.regmap, rclk->enable_reg +
+						data1[i].offset, &val);
+				seq_printf(f, "%20s: 0x%.8x\n",
+						data1[i].name, val);
+			}
+		}
+	}
+}
+
 static int clk_branch2_enable(struct clk_hw *hw)
 {
 	return clk_branch_toggle(hw, true, clk_branch2_check_halt);
@@ -197,6 +236,7 @@
 	.disable = clk_branch2_disable,
 	.is_enabled = clk_is_enabled_regmap,
 	.set_flags = clk_branch_set_flags,
+	.list_registers = clk_branch2_list_registers,
 };
 EXPORT_SYMBOL_GPL(clk_branch2_ops);
 
@@ -229,10 +269,29 @@
 	clk_gate_toggle(hw, false);
 }
 
+static void clk_gate2_list_registers(struct seq_file *f, struct clk_hw *hw)
+{
+	struct clk_gate2 *gt = to_clk_gate2(hw);
+	int size, i, val;
+
+	static struct clk_register_data data[] = {
+		{"EN_REG", 0x0},
+	};
+
+	size = ARRAY_SIZE(data);
+
+	for (i = 0; i < size; i++) {
+		regmap_read(gt->clkr.regmap, gt->clkr.enable_reg +
+					data[i].offset, &val);
+		seq_printf(f, "%20s: 0x%.8x\n", data[i].name, val);
+	}
+}
+
 const struct clk_ops clk_gate2_ops = {
 	.enable = clk_gate2_enable,
 	.disable = clk_gate2_disable,
 	.is_enabled = clk_is_enabled_regmap,
+	.list_registers = clk_gate2_list_registers,
 };
 EXPORT_SYMBOL_GPL(clk_gate2_ops);
 
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index 590cf45..c8e317a 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -310,6 +310,53 @@
 	return update_config(rcg);
 }
 
+static void clk_rcg2_list_registers(struct seq_file *f, struct clk_hw *hw)
+{
+	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+	int i = 0, size = 0, val;
+
+	static struct clk_register_data data[] = {
+		{"CMD_RCGR", 0x0},
+		{"CFG_RCGR", 0x4},
+	};
+
+	static struct clk_register_data data1[] = {
+		{"CMD_RCGR", 0x0},
+		{"CFG_RCGR", 0x4},
+		{"M_VAL", 0x8},
+		{"N_VAL", 0xC},
+		{"D_VAL", 0x10},
+	};
+
+	if (rcg->mnd_width) {
+		size = ARRAY_SIZE(data1);
+		for (i = 0; i < size; i++) {
+			regmap_read(rcg->clkr.regmap, (rcg->cmd_rcgr +
+					data1[i].offset), &val);
+			seq_printf(f, "%20s: 0x%.8x\n",	data1[i].name, val);
+		}
+	} else {
+		size = ARRAY_SIZE(data);
+		for (i = 0; i < size; i++) {
+			regmap_read(rcg->clkr.regmap, (rcg->cmd_rcgr +
+				data[i].offset), &val);
+			seq_printf(f, "%20s: 0x%.8x\n",	data[i].name, val);
+		}
+	}
+}
+
+/* Return the nth supported frequency for a given clock. */
+static long clk_rcg2_list_rate(struct clk_hw *hw, unsigned int n,
+		unsigned long fmax)
+{
+	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+
+	if (!rcg->freq_tbl)
+		return -ENXIO;
+
+	return (rcg->freq_tbl + n)->freq;
+}
+
 static int __clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate)
 {
 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
@@ -408,6 +455,8 @@
 	.determine_rate = clk_rcg2_determine_rate,
 	.set_rate = clk_rcg2_set_rate,
 	.set_rate_and_parent = clk_rcg2_set_rate_and_parent,
+	.list_rate = clk_rcg2_list_rate,
+	.list_registers = clk_rcg2_list_registers,
 };
 EXPORT_SYMBOL_GPL(clk_rcg2_ops);
 
@@ -614,6 +663,7 @@
 	.set_rate = clk_edp_pixel_set_rate,
 	.set_rate_and_parent = clk_edp_pixel_set_rate_and_parent,
 	.determine_rate = clk_edp_pixel_determine_rate,
+	.list_registers = clk_rcg2_list_registers,
 };
 EXPORT_SYMBOL_GPL(clk_edp_pixel_ops);
 
@@ -672,6 +722,7 @@
 	.set_rate = clk_byte_set_rate,
 	.set_rate_and_parent = clk_byte_set_rate_and_parent,
 	.determine_rate = clk_byte_determine_rate,
+	.list_registers = clk_rcg2_list_registers,
 };
 EXPORT_SYMBOL_GPL(clk_byte_ops);
 
@@ -742,6 +793,7 @@
 	.set_rate = clk_byte2_set_rate,
 	.set_rate_and_parent = clk_byte2_set_rate_and_parent,
 	.determine_rate = clk_byte2_determine_rate,
+	.list_registers = clk_rcg2_list_registers,
 };
 EXPORT_SYMBOL_GPL(clk_byte2_ops);
 
@@ -832,6 +884,7 @@
 	.set_rate = clk_pixel_set_rate,
 	.set_rate_and_parent = clk_pixel_set_rate_and_parent,
 	.determine_rate = clk_pixel_determine_rate,
+	.list_registers = clk_rcg2_list_registers,
 };
 EXPORT_SYMBOL_GPL(clk_pixel_ops);
 
@@ -919,5 +972,6 @@
 	.set_rate = clk_gfx3d_set_rate,
 	.set_rate_and_parent = clk_gfx3d_set_rate_and_parent,
 	.determine_rate = clk_gfx3d_determine_rate,
+	.list_registers = clk_rcg2_list_registers,
 };
 EXPORT_SYMBOL_GPL(clk_gfx3d_ops);
diff --git a/drivers/clk/qcom/clk-regmap.h b/drivers/clk/qcom/clk-regmap.h
index 90d95cd..45e9f93 100644
--- a/drivers/clk/qcom/clk-regmap.h
+++ b/drivers/clk/qcom/clk-regmap.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014, 2016, The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -15,6 +15,7 @@
 #define __QCOM_CLK_REGMAP_H__
 
 #include <linux/clk-provider.h>
+#include <linux/debugfs.h>
 
 struct regmap;
 
@@ -41,4 +42,9 @@
 void clk_disable_regmap(struct clk_hw *hw);
 int devm_clk_register_regmap(struct device *dev, struct clk_regmap *rclk);
 
+struct clk_register_data {
+	char *name;
+	u32 offset;
+};
+
 #endif
diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
index 4a82a49..fc75a33 100644
--- a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
+++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
@@ -143,7 +143,7 @@
 					4, 2,	/* K */
 					0, 4,	/* M */
 					21, 0,	/* mux */
-					BIT(31),	/* gate */
+					BIT(31) | BIT(23) | BIT(22), /* gate */
 					BIT(28),	/* lock */
 					CLK_SET_RATE_UNGATE);
 
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a33.c b/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
index 96b40ca..9bd1f78 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
@@ -131,7 +131,7 @@
 				    8, 4,		/* N */
 				    4, 2,		/* K */
 				    0, 4,		/* M */
-				    BIT(31),		/* gate */
+				    BIT(31) | BIT(23) | BIT(22), /* gate */
 				    BIT(28),		/* lock */
 				    CLK_SET_RATE_UNGATE);
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index 02ca5dd..6c343a9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -485,7 +485,6 @@
  */
 static bool amdgpu_atpx_pci_probe_handle(struct pci_dev *pdev)
 {
-	struct pci_dev *parent_pdev = pci_upstream_bridge(pdev);
 	acpi_handle dhandle, atpx_handle;
 	acpi_status status;
 
@@ -500,7 +499,6 @@
 	}
 	amdgpu_atpx_priv.dhandle = dhandle;
 	amdgpu_atpx_priv.atpx.handle = atpx_handle;
-	amdgpu_atpx_priv.bridge_pm_usable = parent_pdev && parent_pdev->bridge_d3;
 	return true;
 }
 
@@ -562,17 +560,25 @@
 	struct pci_dev *pdev = NULL;
 	bool has_atpx = false;
 	int vga_count = 0;
+	bool d3_supported = false;
+	struct pci_dev *parent_pdev;
 
 	while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
 		vga_count++;
 
 		has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true);
+
+		parent_pdev = pci_upstream_bridge(pdev);
+		d3_supported |= parent_pdev && parent_pdev->bridge_d3;
 	}
 
 	while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) {
 		vga_count++;
 
 		has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true);
+
+		parent_pdev = pci_upstream_bridge(pdev);
+		d3_supported |= parent_pdev && parent_pdev->bridge_d3;
 	}
 
 	if (has_atpx && vga_count == 2) {
@@ -580,6 +586,7 @@
 		printk(KERN_INFO "vga_switcheroo: detected switching method %s handle\n",
 		       acpi_method_name);
 		amdgpu_atpx_priv.atpx_detected = true;
+		amdgpu_atpx_priv.bridge_pm_usable = d3_supported;
 		amdgpu_atpx_init();
 		return true;
 	}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c
index 4ccc0b7..71bb2f8 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c
@@ -2214,6 +2214,7 @@
 int polaris10_process_firmware_header(struct pp_hwmgr *hwmgr)
 {
 	struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+	struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 	uint32_t tmp;
 	int result;
 	bool error = false;
@@ -2233,8 +2234,10 @@
 			offsetof(SMU74_Firmware_Header, SoftRegisters),
 			&tmp, SMC_RAM_END);
 
-	if (!result)
+	if (!result) {
+		data->soft_regs_start = tmp;
 		smu_data->smu7_data.soft_regs_start = tmp;
+	}
 
 	error |= (0 != result);
 
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c
index fb6a418..e138fb5 100644
--- a/drivers/gpu/drm/arm/hdlcd_drv.c
+++ b/drivers/gpu/drm/arm/hdlcd_drv.c
@@ -375,7 +375,6 @@
 
 err_fbdev:
 	drm_kms_helper_poll_fini(drm);
-	drm_mode_config_cleanup(drm);
 	drm_vblank_cleanup(drm);
 err_vblank:
 	pm_runtime_disable(drm->dev);
@@ -387,6 +386,7 @@
 	drm_irq_uninstall(drm);
 	of_reserved_mem_device_release(drm->dev);
 err_free:
+	drm_mode_config_cleanup(drm);
 	dev_set_drvdata(dev, NULL);
 	drm_dev_unref(drm);
 
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 0ad2c47..71c3473 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -254,10 +254,12 @@
 		req->value = dev->mode_config.async_page_flip;
 		break;
 	case DRM_CAP_PAGE_FLIP_TARGET:
-		req->value = 1;
-		drm_for_each_crtc(crtc, dev) {
-			if (!crtc->funcs->page_flip_target)
-				req->value = 0;
+		if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+			req->value = 1;
+			drm_for_each_crtc(crtc, dev) {
+				if (!crtc->funcs->page_flip_target)
+					req->value = 0;
+			}
 		}
 		break;
 	case DRM_CAP_CURSOR_WIDTH:
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 91ab7e9..00eb481 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2268,7 +2268,7 @@
 			page = shmem_read_mapping_page(mapping, i);
 			if (IS_ERR(page)) {
 				ret = PTR_ERR(page);
-				goto err_pages;
+				goto err_sg;
 			}
 		}
 #ifdef CONFIG_SWIOTLB
@@ -2311,8 +2311,9 @@
 
 	return 0;
 
-err_pages:
+err_sg:
 	sg_mark_end(sg);
+err_pages:
 	for_each_sgt_page(page, sgt_iter, st)
 		put_page(page);
 	sg_free_table(st);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 81c1149..3cb70d7 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -12260,7 +12260,7 @@
 	intel_crtc->reset_count = i915_reset_count(&dev_priv->gpu_error);
 	if (i915_reset_in_progress_or_wedged(&dev_priv->gpu_error)) {
 		ret = -EIO;
-		goto cleanup;
+		goto unlock;
 	}
 
 	atomic_inc(&intel_crtc->unpin_work_count);
@@ -12352,6 +12352,7 @@
 	intel_unpin_fb_obj(fb, crtc->primary->state->rotation);
 cleanup_pending:
 	atomic_dec(&intel_crtc->unpin_work_count);
+unlock:
 	mutex_unlock(&dev->struct_mutex);
 cleanup:
 	crtc->primary->fb = old_fb;
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index 4129b12..0ae13cd2 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -479,7 +479,6 @@
  */
 static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
 {
-	struct pci_dev *parent_pdev = pci_upstream_bridge(pdev);
 	acpi_handle dhandle, atpx_handle;
 	acpi_status status;
 
@@ -493,7 +492,6 @@
 
 	radeon_atpx_priv.dhandle = dhandle;
 	radeon_atpx_priv.atpx.handle = atpx_handle;
-	radeon_atpx_priv.bridge_pm_usable = parent_pdev && parent_pdev->bridge_d3;
 	return true;
 }
 
@@ -555,11 +553,16 @@
 	struct pci_dev *pdev = NULL;
 	bool has_atpx = false;
 	int vga_count = 0;
+	bool d3_supported = false;
+	struct pci_dev *parent_pdev;
 
 	while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
 		vga_count++;
 
 		has_atpx |= (radeon_atpx_pci_probe_handle(pdev) == true);
+
+		parent_pdev = pci_upstream_bridge(pdev);
+		d3_supported |= parent_pdev && parent_pdev->bridge_d3;
 	}
 
 	/* some newer PX laptops mark the dGPU as a non-VGA display device */
@@ -567,6 +570,9 @@
 		vga_count++;
 
 		has_atpx |= (radeon_atpx_pci_probe_handle(pdev) == true);
+
+		parent_pdev = pci_upstream_bridge(pdev);
+		d3_supported |= parent_pdev && parent_pdev->bridge_d3;
 	}
 
 	if (has_atpx && vga_count == 2) {
@@ -574,6 +580,7 @@
 		printk(KERN_INFO "vga_switcheroo: detected switching method %s handle\n",
 		       acpi_method_name);
 		radeon_atpx_priv.atpx_detected = true;
+		radeon_atpx_priv.bridge_pm_usable = d3_supported;
 		radeon_atpx_init();
 		return true;
 	}
diff --git a/drivers/i2c/busses/i2c-octeon-core.c b/drivers/i2c/busses/i2c-octeon-core.c
index 419b54b..5e63b17 100644
--- a/drivers/i2c/busses/i2c-octeon-core.c
+++ b/drivers/i2c/busses/i2c-octeon-core.c
@@ -381,9 +381,7 @@
 		if (result)
 			return result;
 
-		data[i] = octeon_i2c_data_read(i2c, &result);
-		if (result)
-			return result;
+		data[i] = octeon_i2c_data_read(i2c);
 		if (recv_len && i == 0) {
 			if (data[i] > I2C_SMBUS_BLOCK_MAX + 1)
 				return -EPROTO;
diff --git a/drivers/i2c/busses/i2c-octeon-core.h b/drivers/i2c/busses/i2c-octeon-core.h
index 1db7c83..87151ea 100644
--- a/drivers/i2c/busses/i2c-octeon-core.h
+++ b/drivers/i2c/busses/i2c-octeon-core.h
@@ -5,7 +5,6 @@
 #include <linux/i2c.h>
 #include <linux/i2c-smbus.h>
 #include <linux/io.h>
-#include <linux/iopoll.h>
 #include <linux/kernel.h>
 #include <linux/pci.h>
 
@@ -145,9 +144,9 @@
 	u64 tmp;
 
 	__raw_writeq(SW_TWSI_V | eop_reg | data, i2c->twsi_base + SW_TWSI(i2c));
-
-	readq_poll_timeout(i2c->twsi_base + SW_TWSI(i2c), tmp, tmp & SW_TWSI_V,
-			   I2C_OCTEON_EVENT_WAIT, i2c->adap.timeout);
+	do {
+		tmp = __raw_readq(i2c->twsi_base + SW_TWSI(i2c));
+	} while ((tmp & SW_TWSI_V) != 0);
 }
 
 #define octeon_i2c_ctl_write(i2c, val)					\
@@ -164,28 +163,24 @@
  *
  * The I2C core registers are accessed indirectly via the SW_TWSI CSR.
  */
-static inline int octeon_i2c_reg_read(struct octeon_i2c *i2c, u64 eop_reg,
-				      int *error)
+static inline u8 octeon_i2c_reg_read(struct octeon_i2c *i2c, u64 eop_reg)
 {
 	u64 tmp;
-	int ret;
 
 	__raw_writeq(SW_TWSI_V | eop_reg | SW_TWSI_R, i2c->twsi_base + SW_TWSI(i2c));
+	do {
+		tmp = __raw_readq(i2c->twsi_base + SW_TWSI(i2c));
+	} while ((tmp & SW_TWSI_V) != 0);
 
-	ret = readq_poll_timeout(i2c->twsi_base + SW_TWSI(i2c), tmp,
-				 tmp & SW_TWSI_V, I2C_OCTEON_EVENT_WAIT,
-				 i2c->adap.timeout);
-	if (error)
-		*error = ret;
 	return tmp & 0xFF;
 }
 
 #define octeon_i2c_ctl_read(i2c)					\
-	octeon_i2c_reg_read(i2c, SW_TWSI_EOP_TWSI_CTL, NULL)
-#define octeon_i2c_data_read(i2c, error)				\
-	octeon_i2c_reg_read(i2c, SW_TWSI_EOP_TWSI_DATA, error)
+	octeon_i2c_reg_read(i2c, SW_TWSI_EOP_TWSI_CTL)
+#define octeon_i2c_data_read(i2c)					\
+	octeon_i2c_reg_read(i2c, SW_TWSI_EOP_TWSI_DATA)
 #define octeon_i2c_stat_read(i2c)					\
-	octeon_i2c_reg_read(i2c, SW_TWSI_EOP_TWSI_STAT, NULL)
+	octeon_i2c_reg_read(i2c, SW_TWSI_EOP_TWSI_STAT)
 
 /**
  * octeon_i2c_read_int - read the TWSI_INT register
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
index fb4b185..bee2674 100644
--- a/drivers/input/mouse/psmouse-base.c
+++ b/drivers/input/mouse/psmouse-base.c
@@ -1115,10 +1115,6 @@
 		if (psmouse_try_protocol(psmouse, PSMOUSE_TOUCHKIT_PS2,
 					 &max_proto, set_properties, true))
 			return PSMOUSE_TOUCHKIT_PS2;
-
-		if (psmouse_try_protocol(psmouse, PSMOUSE_BYD,
-					 &max_proto, set_properties, true))
-			return PSMOUSE_BYD;
 	}
 
 	/*
diff --git a/drivers/net/can/usb/peak_usb/pcan_ucan.h b/drivers/net/can/usb/peak_usb/pcan_ucan.h
index e8fc495..2147678 100644
--- a/drivers/net/can/usb/peak_usb/pcan_ucan.h
+++ b/drivers/net/can/usb/peak_usb/pcan_ucan.h
@@ -43,11 +43,22 @@
 	u16	args[3];
 };
 
+#define PUCAN_TSLOW_BRP_BITS		10
+#define PUCAN_TSLOW_TSGEG1_BITS		8
+#define PUCAN_TSLOW_TSGEG2_BITS		7
+#define PUCAN_TSLOW_SJW_BITS		7
+
+#define PUCAN_TSLOW_BRP_MASK		((1 << PUCAN_TSLOW_BRP_BITS) - 1)
+#define PUCAN_TSLOW_TSEG1_MASK		((1 << PUCAN_TSLOW_TSGEG1_BITS) - 1)
+#define PUCAN_TSLOW_TSEG2_MASK		((1 << PUCAN_TSLOW_TSGEG2_BITS) - 1)
+#define PUCAN_TSLOW_SJW_MASK		((1 << PUCAN_TSLOW_SJW_BITS) - 1)
+
 /* uCAN TIMING_SLOW command fields */
-#define PUCAN_TSLOW_SJW_T(s, t)		(((s) & 0xf) | ((!!(t)) << 7))
-#define PUCAN_TSLOW_TSEG2(t)		((t) & 0xf)
-#define PUCAN_TSLOW_TSEG1(t)		((t) & 0x3f)
-#define PUCAN_TSLOW_BRP(b)		((b) & 0x3ff)
+#define PUCAN_TSLOW_SJW_T(s, t)		(((s) & PUCAN_TSLOW_SJW_MASK) | \
+								((!!(t)) << 7))
+#define PUCAN_TSLOW_TSEG2(t)		((t) & PUCAN_TSLOW_TSEG2_MASK)
+#define PUCAN_TSLOW_TSEG1(t)		((t) & PUCAN_TSLOW_TSEG1_MASK)
+#define PUCAN_TSLOW_BRP(b)		((b) & PUCAN_TSLOW_BRP_MASK)
 
 struct __packed pucan_timing_slow {
 	__le16	opcode_channel;
@@ -60,11 +71,21 @@
 	__le16	brp;		/* BaudRate Prescaler */
 };
 
+#define PUCAN_TFAST_BRP_BITS		10
+#define PUCAN_TFAST_TSGEG1_BITS		5
+#define PUCAN_TFAST_TSGEG2_BITS		4
+#define PUCAN_TFAST_SJW_BITS		4
+
+#define PUCAN_TFAST_BRP_MASK		((1 << PUCAN_TFAST_BRP_BITS) - 1)
+#define PUCAN_TFAST_TSEG1_MASK		((1 << PUCAN_TFAST_TSGEG1_BITS) - 1)
+#define PUCAN_TFAST_TSEG2_MASK		((1 << PUCAN_TFAST_TSGEG2_BITS) - 1)
+#define PUCAN_TFAST_SJW_MASK		((1 << PUCAN_TFAST_SJW_BITS) - 1)
+
 /* uCAN TIMING_FAST command fields */
-#define PUCAN_TFAST_SJW(s)		((s) & 0x3)
-#define PUCAN_TFAST_TSEG2(t)		((t) & 0x7)
-#define PUCAN_TFAST_TSEG1(t)		((t) & 0xf)
-#define PUCAN_TFAST_BRP(b)		((b) & 0x3ff)
+#define PUCAN_TFAST_SJW(s)		((s) & PUCAN_TFAST_SJW_MASK)
+#define PUCAN_TFAST_TSEG2(t)		((t) & PUCAN_TFAST_TSEG2_MASK)
+#define PUCAN_TFAST_TSEG1(t)		((t) & PUCAN_TFAST_TSEG1_MASK)
+#define PUCAN_TFAST_BRP(b)		((b) & PUCAN_TFAST_BRP_MASK)
 
 struct __packed pucan_timing_fast {
 	__le16	opcode_channel;
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index c06382c..f3141ca 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -39,6 +39,7 @@
 	{USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBPRO_PRODUCT_ID)},
 	{USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBFD_PRODUCT_ID)},
 	{USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBPROFD_PRODUCT_ID)},
+	{USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBX6_PRODUCT_ID)},
 	{} /* Terminating entry */
 };
 
@@ -50,6 +51,7 @@
 	&pcan_usb_pro,
 	&pcan_usb_fd,
 	&pcan_usb_pro_fd,
+	&pcan_usb_x6,
 };
 
 /*
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.h b/drivers/net/can/usb/peak_usb/pcan_usb_core.h
index 506fe50..3cbfb06 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.h
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.h
@@ -27,6 +27,7 @@
 #define PCAN_USBPRO_PRODUCT_ID		0x000d
 #define PCAN_USBPROFD_PRODUCT_ID	0x0011
 #define PCAN_USBFD_PRODUCT_ID		0x0012
+#define PCAN_USBX6_PRODUCT_ID		0x0014
 
 #define PCAN_USB_DRIVER_NAME		"peak_usb"
 
@@ -90,6 +91,7 @@
 extern const struct peak_usb_adapter pcan_usb_pro;
 extern const struct peak_usb_adapter pcan_usb_fd;
 extern const struct peak_usb_adapter pcan_usb_pro_fd;
+extern const struct peak_usb_adapter pcan_usb_x6;
 
 struct peak_time_ref {
 	struct timeval tv_host_0, tv_host;
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
index ce44a03..3047325 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
@@ -993,24 +993,24 @@
 static const struct can_bittiming_const pcan_usb_fd_const = {
 	.name = "pcan_usb_fd",
 	.tseg1_min = 1,
-	.tseg1_max = 64,
+	.tseg1_max = (1 << PUCAN_TSLOW_TSGEG1_BITS),
 	.tseg2_min = 1,
-	.tseg2_max = 16,
-	.sjw_max = 16,
+	.tseg2_max = (1 << PUCAN_TSLOW_TSGEG2_BITS),
+	.sjw_max = (1 << PUCAN_TSLOW_SJW_BITS),
 	.brp_min = 1,
-	.brp_max = 1024,
+	.brp_max = (1 << PUCAN_TSLOW_BRP_BITS),
 	.brp_inc = 1,
 };
 
 static const struct can_bittiming_const pcan_usb_fd_data_const = {
 	.name = "pcan_usb_fd",
 	.tseg1_min = 1,
-	.tseg1_max = 16,
+	.tseg1_max = (1 << PUCAN_TFAST_TSGEG1_BITS),
 	.tseg2_min = 1,
-	.tseg2_max = 8,
-	.sjw_max = 4,
+	.tseg2_max = (1 << PUCAN_TFAST_TSGEG2_BITS),
+	.sjw_max = (1 << PUCAN_TFAST_SJW_BITS),
 	.brp_min = 1,
-	.brp_max = 1024,
+	.brp_max = (1 << PUCAN_TFAST_BRP_BITS),
 	.brp_inc = 1,
 };
 
@@ -1065,24 +1065,24 @@
 static const struct can_bittiming_const pcan_usb_pro_fd_const = {
 	.name = "pcan_usb_pro_fd",
 	.tseg1_min = 1,
-	.tseg1_max = 64,
+	.tseg1_max = (1 << PUCAN_TSLOW_TSGEG1_BITS),
 	.tseg2_min = 1,
-	.tseg2_max = 16,
-	.sjw_max = 16,
+	.tseg2_max = (1 << PUCAN_TSLOW_TSGEG2_BITS),
+	.sjw_max = (1 << PUCAN_TSLOW_SJW_BITS),
 	.brp_min = 1,
-	.brp_max = 1024,
+	.brp_max = (1 << PUCAN_TSLOW_BRP_BITS),
 	.brp_inc = 1,
 };
 
 static const struct can_bittiming_const pcan_usb_pro_fd_data_const = {
 	.name = "pcan_usb_pro_fd",
 	.tseg1_min = 1,
-	.tseg1_max = 16,
+	.tseg1_max = (1 << PUCAN_TFAST_TSGEG1_BITS),
 	.tseg2_min = 1,
-	.tseg2_max = 8,
-	.sjw_max = 4,
+	.tseg2_max = (1 << PUCAN_TFAST_TSGEG2_BITS),
+	.sjw_max = (1 << PUCAN_TFAST_SJW_BITS),
 	.brp_min = 1,
-	.brp_max = 1024,
+	.brp_max = (1 << PUCAN_TFAST_BRP_BITS),
 	.brp_inc = 1,
 };
 
@@ -1132,3 +1132,75 @@
 
 	.do_get_berr_counter = pcan_usb_fd_get_berr_counter,
 };
+
+/* describes the PCAN-USB X6 adapter */
+static const struct can_bittiming_const pcan_usb_x6_const = {
+	.name = "pcan_usb_x6",
+	.tseg1_min = 1,
+	.tseg1_max = (1 << PUCAN_TSLOW_TSGEG1_BITS),
+	.tseg2_min = 1,
+	.tseg2_max = (1 << PUCAN_TSLOW_TSGEG2_BITS),
+	.sjw_max = (1 << PUCAN_TSLOW_SJW_BITS),
+	.brp_min = 1,
+	.brp_max = (1 << PUCAN_TSLOW_BRP_BITS),
+	.brp_inc = 1,
+};
+
+static const struct can_bittiming_const pcan_usb_x6_data_const = {
+	.name = "pcan_usb_x6",
+	.tseg1_min = 1,
+	.tseg1_max = (1 << PUCAN_TFAST_TSGEG1_BITS),
+	.tseg2_min = 1,
+	.tseg2_max = (1 << PUCAN_TFAST_TSGEG2_BITS),
+	.sjw_max = (1 << PUCAN_TFAST_SJW_BITS),
+	.brp_min = 1,
+	.brp_max = (1 << PUCAN_TFAST_BRP_BITS),
+	.brp_inc = 1,
+};
+
+const struct peak_usb_adapter pcan_usb_x6 = {
+	.name = "PCAN-USB X6",
+	.device_id = PCAN_USBX6_PRODUCT_ID,
+	.ctrl_count = PCAN_USBPROFD_CHANNEL_COUNT,
+	.ctrlmode_supported = CAN_CTRLMODE_FD |
+			CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY,
+	.clock = {
+		.freq = PCAN_UFD_CRYSTAL_HZ,
+	},
+	.bittiming_const = &pcan_usb_x6_const,
+	.data_bittiming_const = &pcan_usb_x6_data_const,
+
+	/* size of device private data */
+	.sizeof_dev_private = sizeof(struct pcan_usb_fd_device),
+
+	/* timestamps usage */
+	.ts_used_bits = 32,
+	.ts_period = 1000000, /* calibration period in ts. */
+	.us_per_ts_scale = 1, /* us = (ts * scale) >> shift */
+	.us_per_ts_shift = 0,
+
+	/* give here messages in/out endpoints */
+	.ep_msg_in = PCAN_USBPRO_EP_MSGIN,
+	.ep_msg_out = {PCAN_USBPRO_EP_MSGOUT_0, PCAN_USBPRO_EP_MSGOUT_1},
+
+	/* size of rx/tx usb buffers */
+	.rx_buffer_size = PCAN_UFD_RX_BUFFER_SIZE,
+	.tx_buffer_size = PCAN_UFD_TX_BUFFER_SIZE,
+
+	/* device callbacks */
+	.intf_probe = pcan_usb_pro_probe,	/* same as PCAN-USB Pro */
+	.dev_init = pcan_usb_fd_init,
+
+	.dev_exit = pcan_usb_fd_exit,
+	.dev_free = pcan_usb_fd_free,
+	.dev_set_bus = pcan_usb_fd_set_bus,
+	.dev_set_bittiming = pcan_usb_fd_set_bittiming_slow,
+	.dev_set_data_bittiming = pcan_usb_fd_set_bittiming_fast,
+	.dev_decode_buf = pcan_usb_fd_decode_buf,
+	.dev_start = pcan_usb_fd_start,
+	.dev_stop = pcan_usb_fd_stop,
+	.dev_restart_async = pcan_usb_fd_restart_async,
+	.dev_encode_msg = pcan_usb_fd_encode_msg,
+
+	.do_get_berr_counter = pcan_usb_fd_get_berr_counter,
+};
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index bda31f3..a0eee72 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -400,12 +400,6 @@
 
 		skb_put(skb, pktlength);
 
-		/* make cache consistent with receive packet buffer */
-		dma_sync_single_for_cpu(priv->device,
-					priv->rx_ring[entry].dma_addr,
-					priv->rx_ring[entry].len,
-					DMA_FROM_DEVICE);
-
 		dma_unmap_single(priv->device, priv->rx_ring[entry].dma_addr,
 				 priv->rx_ring[entry].len, DMA_FROM_DEVICE);
 
@@ -469,7 +463,6 @@
 
 	if (unlikely(netif_queue_stopped(priv->dev) &&
 		     tse_tx_avail(priv) > TSE_TX_THRESH(priv))) {
-		netif_tx_lock(priv->dev);
 		if (netif_queue_stopped(priv->dev) &&
 		    tse_tx_avail(priv) > TSE_TX_THRESH(priv)) {
 			if (netif_msg_tx_done(priv))
@@ -477,7 +470,6 @@
 					   __func__);
 			netif_wake_queue(priv->dev);
 		}
-		netif_tx_unlock(priv->dev);
 	}
 
 	spin_unlock(&priv->tx_lock);
@@ -592,10 +584,6 @@
 	buffer->dma_addr = dma_addr;
 	buffer->len = nopaged_len;
 
-	/* Push data out of the cache hierarchy into main memory */
-	dma_sync_single_for_device(priv->device, buffer->dma_addr,
-				   buffer->len, DMA_TO_DEVICE);
-
 	priv->dmaops->tx_buffer(priv, buffer);
 
 	skb_tx_timestamp(skb);
@@ -819,6 +807,8 @@
 
 	if (!phydev) {
 		netdev_err(dev, "Could not find the PHY\n");
+		if (fixed_link)
+			of_phy_deregister_fixed_link(priv->device->of_node);
 		return -ENODEV;
 	}
 
@@ -1545,10 +1535,15 @@
 static int altera_tse_remove(struct platform_device *pdev)
 {
 	struct net_device *ndev = platform_get_drvdata(pdev);
+	struct altera_tse_private *priv = netdev_priv(ndev);
 
-	if (ndev->phydev)
+	if (ndev->phydev) {
 		phy_disconnect(ndev->phydev);
 
+		if (of_phy_is_fixed_link(priv->device->of_node))
+			of_phy_deregister_fixed_link(priv->device->of_node);
+	}
+
 	platform_set_drvdata(pdev, NULL);
 	altera_tse_mdio_destroy(ndev);
 	unregister_netdev(ndev);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
index 9de0788..4f76351 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
@@ -829,7 +829,7 @@
 	return 0;
 }
 
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 static int xgbe_suspend(struct device *dev)
 {
 	struct net_device *netdev = dev_get_drvdata(dev);
@@ -874,7 +874,7 @@
 
 	return ret;
 }
-#endif /* CONFIG_PM */
+#endif /* CONFIG_PM_SLEEP */
 
 #ifdef CONFIG_ACPI
 static const struct acpi_device_id xgbe_acpi_match[] = {
diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c
index 00c38bf..e078d8d 100644
--- a/drivers/net/ethernet/aurora/nb8800.c
+++ b/drivers/net/ethernet/aurora/nb8800.c
@@ -1466,12 +1466,12 @@
 
 	ret = nb8800_hw_init(dev);
 	if (ret)
-		goto err_free_bus;
+		goto err_deregister_fixed_link;
 
 	if (ops && ops->init) {
 		ret = ops->init(dev);
 		if (ret)
-			goto err_free_bus;
+			goto err_deregister_fixed_link;
 	}
 
 	dev->netdev_ops = &nb8800_netdev_ops;
@@ -1504,6 +1504,9 @@
 
 err_free_dma:
 	nb8800_dma_free(dev);
+err_deregister_fixed_link:
+	if (of_phy_is_fixed_link(pdev->dev.of_node))
+		of_phy_deregister_fixed_link(pdev->dev.of_node);
 err_free_bus:
 	of_node_put(priv->phy_node);
 	mdiobus_unregister(bus);
@@ -1521,6 +1524,8 @@
 	struct nb8800_priv *priv = netdev_priv(ndev);
 
 	unregister_netdev(ndev);
+	if (of_phy_is_fixed_link(pdev->dev.of_node))
+		of_phy_deregister_fixed_link(pdev->dev.of_node);
 	of_node_put(priv->phy_node);
 
 	mdiobus_unregister(priv->mii_bus);
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index c3354b9..25d1eb4 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -1755,13 +1755,13 @@
 	if (priv->irq0 <= 0 || priv->irq1 <= 0) {
 		dev_err(&pdev->dev, "invalid interrupts\n");
 		ret = -EINVAL;
-		goto err;
+		goto err_free_netdev;
 	}
 
 	priv->base = devm_ioremap_resource(&pdev->dev, r);
 	if (IS_ERR(priv->base)) {
 		ret = PTR_ERR(priv->base);
-		goto err;
+		goto err_free_netdev;
 	}
 
 	priv->netdev = dev;
@@ -1779,7 +1779,7 @@
 		ret = of_phy_register_fixed_link(dn);
 		if (ret) {
 			dev_err(&pdev->dev, "failed to register fixed PHY\n");
-			goto err;
+			goto err_free_netdev;
 		}
 
 		priv->phy_dn = dn;
@@ -1821,7 +1821,7 @@
 	ret = register_netdev(dev);
 	if (ret) {
 		dev_err(&pdev->dev, "failed to register net_device\n");
-		goto err;
+		goto err_deregister_fixed_link;
 	}
 
 	priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK;
@@ -1832,7 +1832,11 @@
 		 priv->base, priv->irq0, priv->irq1, txq, rxq);
 
 	return 0;
-err:
+
+err_deregister_fixed_link:
+	if (of_phy_is_fixed_link(dn))
+		of_phy_deregister_fixed_link(dn);
+err_free_netdev:
 	free_netdev(dev);
 	return ret;
 }
@@ -1840,11 +1844,14 @@
 static int bcm_sysport_remove(struct platform_device *pdev)
 {
 	struct net_device *dev = dev_get_drvdata(&pdev->dev);
+	struct device_node *dn = pdev->dev.of_node;
 
 	/* Not much to do, ndo_close has been called
 	 * and we use managed allocations
 	 */
 	unregister_netdev(dev);
+	if (of_phy_is_fixed_link(dn))
+		of_phy_deregister_fixed_link(dn);
 	free_netdev(dev);
 	dev_set_drvdata(&pdev->dev, NULL);
 
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 4464bc5..a4e60e5 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1172,6 +1172,7 @@
 					  struct bcmgenet_tx_ring *ring)
 {
 	struct bcmgenet_priv *priv = netdev_priv(dev);
+	struct device *kdev = &priv->pdev->dev;
 	struct enet_cb *tx_cb_ptr;
 	struct netdev_queue *txq;
 	unsigned int pkts_compl = 0;
@@ -1199,13 +1200,13 @@
 		if (tx_cb_ptr->skb) {
 			pkts_compl++;
 			bytes_compl += GENET_CB(tx_cb_ptr->skb)->bytes_sent;
-			dma_unmap_single(&dev->dev,
+			dma_unmap_single(kdev,
 					 dma_unmap_addr(tx_cb_ptr, dma_addr),
 					 dma_unmap_len(tx_cb_ptr, dma_len),
 					 DMA_TO_DEVICE);
 			bcmgenet_free_cb(tx_cb_ptr);
 		} else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) {
-			dma_unmap_page(&dev->dev,
+			dma_unmap_page(kdev,
 				       dma_unmap_addr(tx_cb_ptr, dma_addr),
 				       dma_unmap_len(tx_cb_ptr, dma_len),
 				       DMA_TO_DEVICE);
@@ -1775,6 +1776,7 @@
 
 static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv)
 {
+	struct device *kdev = &priv->pdev->dev;
 	struct enet_cb *cb;
 	int i;
 
@@ -1782,7 +1784,7 @@
 		cb = &priv->rx_cbs[i];
 
 		if (dma_unmap_addr(cb, dma_addr)) {
-			dma_unmap_single(&priv->dev->dev,
+			dma_unmap_single(kdev,
 					 dma_unmap_addr(cb, dma_addr),
 					 priv->rx_buf_len, DMA_FROM_DEVICE);
 			dma_unmap_addr_set(cb, dma_addr, 0);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 457c3bc..e876076 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -542,8 +542,10 @@
 	/* Make sure we initialize MoCA PHYs with a link down */
 	if (phy_mode == PHY_INTERFACE_MODE_MOCA) {
 		phydev = of_phy_find_device(dn);
-		if (phydev)
+		if (phydev) {
 			phydev->link = 0;
+			put_device(&phydev->mdio.dev);
+		}
 	}
 
 	return 0;
@@ -625,6 +627,7 @@
 int bcmgenet_mii_init(struct net_device *dev)
 {
 	struct bcmgenet_priv *priv = netdev_priv(dev);
+	struct device_node *dn = priv->pdev->dev.of_node;
 	int ret;
 
 	ret = bcmgenet_mii_alloc(priv);
@@ -638,6 +641,8 @@
 	return 0;
 
 out:
+	if (of_phy_is_fixed_link(dn))
+		of_phy_deregister_fixed_link(dn);
 	of_node_put(priv->phy_dn);
 	mdiobus_unregister(priv->mii_bus);
 	mdiobus_free(priv->mii_bus);
@@ -647,7 +652,10 @@
 void bcmgenet_mii_exit(struct net_device *dev)
 {
 	struct bcmgenet_priv *priv = netdev_priv(dev);
+	struct device_node *dn = priv->pdev->dev.of_node;
 
+	if (of_phy_is_fixed_link(dn))
+		of_phy_deregister_fixed_link(dn);
 	of_node_put(priv->phy_dn);
 	mdiobus_unregister(priv->mii_bus);
 	mdiobus_free(priv->mii_bus);
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index 533653b..ec09fce 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -975,6 +975,7 @@
 		addr += bp->rx_buffer_size;
 	}
 	bp->rx_ring[RX_RING_SIZE - 1].addr |= MACB_BIT(RX_WRAP);
+	bp->rx_tail = 0;
 }
 
 static int macb_rx(struct macb *bp, int budget)
@@ -1156,6 +1157,7 @@
 		if (status & MACB_BIT(RXUBR)) {
 			ctrl = macb_readl(bp, NCR);
 			macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
+			wmb();
 			macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
 
 			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
@@ -1616,8 +1618,6 @@
 	bp->queues[0].tx_head = 0;
 	bp->queues[0].tx_tail = 0;
 	bp->queues[0].tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
-
-	bp->rx_tail = 0;
 }
 
 static void macb_reset_hw(struct macb *bp)
@@ -2770,6 +2770,7 @@
 	if (intstatus & MACB_BIT(RXUBR)) {
 		ctl = macb_readl(lp, NCR);
 		macb_writel(lp, NCR, ctl & ~MACB_BIT(RE));
+		wmb();
 		macb_writel(lp, NCR, ctl | MACB_BIT(RE));
 	}
 
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
index df1573c..ecf3ccc 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -168,6 +168,7 @@
 	CH_PCI_ID_TABLE_FENTRY(0x509a),	/* Custom T520-CR */
 	CH_PCI_ID_TABLE_FENTRY(0x509b),	/* Custom T540-CR LOM */
 	CH_PCI_ID_TABLE_FENTRY(0x509c),	/* Custom T520-CR*/
+	CH_PCI_ID_TABLE_FENTRY(0x509d),	/* Custom T540-CR*/
 
 	/* T6 adapters:
 	 */
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index c865135..5ea740b 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -574,6 +574,8 @@
 	unsigned int reload_period;
 	int pps_enable;
 	unsigned int next_counter;
+
+	u64 ethtool_stats[0];
 };
 
 void fec_ptp_init(struct platform_device *pdev);
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 5aa9d4d..5f77caa 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -2313,14 +2313,24 @@
 	{ "IEEE_rx_octets_ok", IEEE_R_OCTETS_OK },
 };
 
-static void fec_enet_get_ethtool_stats(struct net_device *dev,
-	struct ethtool_stats *stats, u64 *data)
+static void fec_enet_update_ethtool_stats(struct net_device *dev)
 {
 	struct fec_enet_private *fep = netdev_priv(dev);
 	int i;
 
 	for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
-		data[i] = readl(fep->hwp + fec_stats[i].offset);
+		fep->ethtool_stats[i] = readl(fep->hwp + fec_stats[i].offset);
+}
+
+static void fec_enet_get_ethtool_stats(struct net_device *dev,
+				       struct ethtool_stats *stats, u64 *data)
+{
+	struct fec_enet_private *fep = netdev_priv(dev);
+
+	if (netif_running(dev))
+		fec_enet_update_ethtool_stats(dev);
+
+	memcpy(data, fep->ethtool_stats, ARRAY_SIZE(fec_stats) * sizeof(u64));
 }
 
 static void fec_enet_get_strings(struct net_device *netdev,
@@ -2874,6 +2884,8 @@
 	if (fep->quirks & FEC_QUIRK_ERR006687)
 		imx6q_cpuidle_fec_irqs_unused();
 
+	fec_enet_update_ethtool_stats(ndev);
+
 	fec_enet_clk_enable(ndev, false);
 	pinctrl_pm_select_sleep_state(&fep->pdev->dev);
 	pm_runtime_mark_last_busy(&fep->pdev->dev);
@@ -3180,6 +3192,8 @@
 
 	fec_restart(ndev);
 
+	fec_enet_update_ethtool_stats(ndev);
+
 	return 0;
 }
 
@@ -3278,7 +3292,8 @@
 	fec_enet_get_queue_num(pdev, &num_tx_qs, &num_rx_qs);
 
 	/* Init network device */
-	ndev = alloc_etherdev_mqs(sizeof(struct fec_enet_private),
+	ndev = alloc_etherdev_mqs(sizeof(struct fec_enet_private) +
+				  ARRAY_SIZE(fec_stats) * sizeof(u64),
 				  num_tx_qs, num_rx_qs);
 	if (!ndev)
 		return -ENOMEM;
@@ -3475,6 +3490,8 @@
 failed_clk_ipg:
 	fec_enet_clk_enable(ndev, false);
 failed_clk:
+	if (of_phy_is_fixed_link(np))
+		of_phy_deregister_fixed_link(np);
 failed_phy:
 	of_node_put(phy_node);
 failed_ioremap:
@@ -3488,6 +3505,7 @@
 {
 	struct net_device *ndev = platform_get_drvdata(pdev);
 	struct fec_enet_private *fep = netdev_priv(ndev);
+	struct device_node *np = pdev->dev.of_node;
 
 	cancel_work_sync(&fep->tx_timeout_work);
 	fec_ptp_stop(pdev);
@@ -3495,6 +3513,8 @@
 	fec_enet_mii_remove(fep);
 	if (fep->reg_phy)
 		regulator_disable(fep->reg_phy);
+	if (of_phy_is_fixed_link(np))
+		of_phy_deregister_fixed_link(np);
 	of_node_put(fep->phy_node);
 	free_netdev(ndev);
 
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c
index 53ef51e..71a5ded 100644
--- a/drivers/net/ethernet/freescale/fman/fman_memac.c
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.c
@@ -1107,6 +1107,9 @@
 {
 	free_init_resources(memac);
 
+	if (memac->pcsphy)
+		put_device(&memac->pcsphy->mdio.dev);
+
 	kfree(memac->memac_drv_param);
 	kfree(memac);
 
diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c
index 8fe6b3e..736db9d 100644
--- a/drivers/net/ethernet/freescale/fman/mac.c
+++ b/drivers/net/ethernet/freescale/fman/mac.c
@@ -892,6 +892,8 @@
 		priv->fixed_link->duplex = phy->duplex;
 		priv->fixed_link->pause = phy->pause;
 		priv->fixed_link->asym_pause = phy->asym_pause;
+
+		put_device(&phy->mdio.dev);
 	}
 
 	err = mac_dev->init(mac_dev);
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index dc120c1..4b86260 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -980,7 +980,7 @@
 		err = clk_prepare_enable(clk);
 		if (err) {
 			ret = err;
-			goto out_free_fpi;
+			goto out_deregister_fixed_link;
 		}
 		fpi->clk_per = clk;
 	}
@@ -1061,6 +1061,9 @@
 	of_node_put(fpi->phy_node);
 	if (fpi->clk_per)
 		clk_disable_unprepare(fpi->clk_per);
+out_deregister_fixed_link:
+	if (of_phy_is_fixed_link(ofdev->dev.of_node))
+		of_phy_deregister_fixed_link(ofdev->dev.of_node);
 out_free_fpi:
 	kfree(fpi);
 	return ret;
@@ -1079,6 +1082,8 @@
 	of_node_put(fep->fpi->phy_node);
 	if (fep->fpi->clk_per)
 		clk_disable_unprepare(fep->fpi->clk_per);
+	if (of_phy_is_fixed_link(ofdev->dev.of_node))
+		of_phy_deregister_fixed_link(ofdev->dev.of_node);
 	free_netdev(ndev);
 	return 0;
 }
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 4b4f5bc..9061c2f 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -1312,6 +1312,7 @@
  */
 static int gfar_probe(struct platform_device *ofdev)
 {
+	struct device_node *np = ofdev->dev.of_node;
 	struct net_device *dev = NULL;
 	struct gfar_private *priv = NULL;
 	int err = 0, i;
@@ -1462,6 +1463,8 @@
 	return 0;
 
 register_fail:
+	if (of_phy_is_fixed_link(np))
+		of_phy_deregister_fixed_link(np);
 	unmap_group_regs(priv);
 	gfar_free_rx_queues(priv);
 	gfar_free_tx_queues(priv);
@@ -1474,11 +1477,16 @@
 static int gfar_remove(struct platform_device *ofdev)
 {
 	struct gfar_private *priv = platform_get_drvdata(ofdev);
+	struct device_node *np = ofdev->dev.of_node;
 
 	of_node_put(priv->phy_node);
 	of_node_put(priv->tbi_node);
 
 	unregister_netdev(priv->ndev);
+
+	if (of_phy_is_fixed_link(np))
+		of_phy_deregister_fixed_link(np);
+
 	unmap_group_regs(priv);
 	gfar_free_rx_queues(priv);
 	gfar_free_tx_queues(priv);
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 186ef8f..f76d332 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -3868,9 +3868,8 @@
 	dev = alloc_etherdev(sizeof(*ugeth));
 
 	if (dev == NULL) {
-		of_node_put(ug_info->tbi_node);
-		of_node_put(ug_info->phy_node);
-		return -ENOMEM;
+		err = -ENOMEM;
+		goto err_deregister_fixed_link;
 	}
 
 	ugeth = netdev_priv(dev);
@@ -3907,10 +3906,7 @@
 		if (netif_msg_probe(ugeth))
 			pr_err("%s: Cannot register net device, aborting\n",
 			       dev->name);
-		free_netdev(dev);
-		of_node_put(ug_info->tbi_node);
-		of_node_put(ug_info->phy_node);
-		return err;
+		goto err_free_netdev;
 	}
 
 	mac_addr = of_get_mac_address(np);
@@ -3923,16 +3919,29 @@
 	ugeth->node = np;
 
 	return 0;
+
+err_free_netdev:
+	free_netdev(dev);
+err_deregister_fixed_link:
+	if (of_phy_is_fixed_link(np))
+		of_phy_deregister_fixed_link(np);
+	of_node_put(ug_info->tbi_node);
+	of_node_put(ug_info->phy_node);
+
+	return err;
 }
 
 static int ucc_geth_remove(struct platform_device* ofdev)
 {
 	struct net_device *dev = platform_get_drvdata(ofdev);
 	struct ucc_geth_private *ugeth = netdev_priv(dev);
+	struct device_node *np = ofdev->dev.of_node;
 
 	unregister_netdev(dev);
 	free_netdev(dev);
 	ucc_geth_memclean(ugeth);
+	if (of_phy_is_fixed_link(np))
+		of_phy_deregister_fixed_link(np);
 	of_node_put(ugeth->ug_info->tbi_node);
 	of_node_put(ugeth->ug_info->phy_node);
 
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index edc9a6a..9affd7c 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -4931,11 +4931,15 @@
 
 	/* initialize outer IP header fields */
 	if (ip.v4->version == 4) {
+		unsigned char *csum_start = skb_checksum_start(skb);
+		unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
+
 		/* IP header will have to cancel out any data that
 		 * is not a part of the outer IP header
 		 */
-		ip.v4->check = csum_fold(csum_add(lco_csum(skb),
-						  csum_unfold(l4.tcp->check)));
+		ip.v4->check = csum_fold(csum_partial(trans_start,
+						      csum_start - trans_start,
+						      0));
 		type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
 
 		ip.v4->tot_len = 0;
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 12bb877..7dff7f6 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -1965,11 +1965,15 @@
 
 	/* initialize outer IP header fields */
 	if (ip.v4->version == 4) {
+		unsigned char *csum_start = skb_checksum_start(skb);
+		unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
+
 		/* IP header will have to cancel out any data that
 		 * is not a part of the outer IP header
 		 */
-		ip.v4->check = csum_fold(csum_add(lco_csum(skb),
-						  csum_unfold(l4.tcp->check)));
+		ip.v4->check = csum_fold(csum_partial(trans_start,
+						      csum_start - trans_start,
+						      0));
 		type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
 
 		ip.v4->tot_len = 0;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index bd93d82..fee1f29 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -7277,11 +7277,15 @@
 
 	/* initialize outer IP header fields */
 	if (ip.v4->version == 4) {
+		unsigned char *csum_start = skb_checksum_start(skb);
+		unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
+
 		/* IP header will have to cancel out any data that
 		 * is not a part of the outer IP header
 		 */
-		ip.v4->check = csum_fold(csum_add(lco_csum(skb),
-						  csum_unfold(l4.tcp->check)));
+		ip.v4->check = csum_fold(csum_partial(trans_start,
+						      csum_start - trans_start,
+						      0));
 		type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
 
 		ip.v4->tot_len = 0;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 7eaac32..cbf70fe 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -3329,11 +3329,15 @@
 
 	/* initialize outer IP header fields */
 	if (ip.v4->version == 4) {
+		unsigned char *csum_start = skb_checksum_start(skb);
+		unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
+
 		/* IP header will have to cancel out any data that
 		 * is not a part of the outer IP header
 		 */
-		ip.v4->check = csum_fold(csum_add(lco_csum(skb),
-						  csum_unfold(l4.tcp->check)));
+		ip.v4->check = csum_fold(csum_partial(trans_start,
+						      csum_start - trans_start,
+						      0));
 		type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
 
 		ip.v4->tot_len = 0;
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 0c0a45a..707bc46 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -4191,6 +4191,8 @@
 	clk_disable_unprepare(pp->clk);
 err_put_phy_node:
 	of_node_put(phy_node);
+	if (of_phy_is_fixed_link(dn))
+		of_phy_deregister_fixed_link(dn);
 err_free_irq:
 	irq_dispose_mapping(dev->irq);
 err_free_netdev:
@@ -4202,6 +4204,7 @@
 static int mvneta_remove(struct platform_device *pdev)
 {
 	struct net_device  *dev = platform_get_drvdata(pdev);
+	struct device_node *dn = pdev->dev.of_node;
 	struct mvneta_port *pp = netdev_priv(dev);
 
 	unregister_netdev(dev);
@@ -4209,6 +4212,8 @@
 	clk_disable_unprepare(pp->clk);
 	free_percpu(pp->ports);
 	free_percpu(pp->stats);
+	if (of_phy_is_fixed_link(dn))
+		of_phy_deregister_fixed_link(dn);
 	irq_dispose_mapping(dev->irq);
 	of_node_put(pp->phy_node);
 	free_netdev(dev);
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 4a62ffd..86a89cb 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -318,6 +318,8 @@
 	return 0;
 
 err_phy:
+	if (of_phy_is_fixed_link(mac->of_node))
+		of_phy_deregister_fixed_link(mac->of_node);
 	of_node_put(np);
 	dev_err(eth->dev, "%s: invalid phy\n", __func__);
 	return -EINVAL;
@@ -1923,6 +1925,8 @@
 	struct mtk_eth *eth = mac->hw;
 
 	phy_disconnect(dev->phydev);
+	if (of_phy_is_fixed_link(mac->of_node))
+		of_phy_deregister_fixed_link(mac->of_node);
 	mtk_irq_disable(eth, MTK_QDMA_INT_MASK, ~0);
 	mtk_irq_disable(eth, MTK_PDMA_INT_MASK, ~0);
 }
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index a60f635..fb8bb02 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -2079,13 +2079,6 @@
 	return -ENOMEM;
 }
 
-static void mlx4_en_shutdown(struct net_device *dev)
-{
-	rtnl_lock();
-	netif_device_detach(dev);
-	mlx4_en_close(dev);
-	rtnl_unlock();
-}
 
 static int mlx4_en_copy_priv(struct mlx4_en_priv *dst,
 			     struct mlx4_en_priv *src,
@@ -2162,8 +2155,6 @@
 {
 	struct mlx4_en_priv *priv = netdev_priv(dev);
 	struct mlx4_en_dev *mdev = priv->mdev;
-	bool shutdown = mdev->dev->persist->interface_state &
-					    MLX4_INTERFACE_STATE_SHUTDOWN;
 
 	en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
 
@@ -2171,10 +2162,7 @@
 	if (priv->registered) {
 		devlink_port_type_clear(mlx4_get_devlink_port(mdev->dev,
 							      priv->port));
-		if (shutdown)
-			mlx4_en_shutdown(dev);
-		else
-			unregister_netdev(dev);
+		unregister_netdev(dev);
 	}
 
 	if (priv->allocated)
@@ -2203,8 +2191,7 @@
 	kfree(priv->tx_ring);
 	kfree(priv->tx_cq);
 
-	if (!shutdown)
-		free_netdev(dev);
+	free_netdev(dev);
 }
 
 static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 6f4e67b..75d07fa 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -4147,11 +4147,8 @@
 
 	mlx4_info(persist->dev, "mlx4_shutdown was called\n");
 	mutex_lock(&persist->interface_state_mutex);
-	if (persist->interface_state & MLX4_INTERFACE_STATE_UP) {
-		/* Notify mlx4 clients that the kernel is being shut down */
-		persist->interface_state |= MLX4_INTERFACE_STATE_SHUTDOWN;
+	if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
 		mlx4_unload_one(pdev);
-	}
 	mutex_unlock(&persist->interface_state_mutex);
 }
 
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index 94b891c..1a670b6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -1457,7 +1457,12 @@
 int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port,
 				u32 qpn, enum mlx4_net_trans_promisc_mode mode)
 {
-	struct mlx4_net_trans_rule rule;
+	struct mlx4_net_trans_rule rule = {
+		.queue_mode = MLX4_NET_TRANS_Q_FIFO,
+		.exclusive = 0,
+		.allow_loopback = 1,
+	};
+
 	u64 *regid_p;
 
 	switch (mode) {
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-phy.c b/drivers/net/ethernet/qualcomm/emac/emac-phy.c
index da4e90d..99a14df 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-phy.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-phy.c
@@ -212,6 +212,7 @@
 
 		phy_np = of_parse_phandle(np, "phy-handle", 0);
 		adpt->phydev = of_phy_find_device(phy_np);
+		of_node_put(phy_np);
 	}
 
 	if (!adpt->phydev) {
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index 4fede4b..57b35ae 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -711,6 +711,8 @@
 err_undo_napi:
 	netif_napi_del(&adpt->rx_q.napi);
 err_undo_mdiobus:
+	if (!has_acpi_companion(&pdev->dev))
+		put_device(&adpt->phydev->mdio.dev);
 	mdiobus_unregister(adpt->mii_bus);
 err_undo_clocks:
 	emac_clks_teardown(adpt);
@@ -730,6 +732,8 @@
 
 	emac_clks_teardown(adpt);
 
+	if (!has_acpi_companion(&pdev->dev))
+		put_device(&adpt->phydev->mdio.dev);
 	mdiobus_unregister(adpt->mii_bus);
 	free_netdev(netdev);
 
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 630536b..d6a2178 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -1008,20 +1008,18 @@
 	of_node_put(pn);
 	if (!phydev) {
 		netdev_err(ndev, "failed to connect PHY\n");
-		return -ENOENT;
+		err = -ENOENT;
+		goto err_deregister_fixed_link;
 	}
 
 	/* This driver only support 10/100Mbit speeds on Gen3
 	 * at this time.
 	 */
 	if (priv->chip_id == RCAR_GEN3) {
-		int err;
-
 		err = phy_set_max_speed(phydev, SPEED_100);
 		if (err) {
 			netdev_err(ndev, "failed to limit PHY to 100Mbit/s\n");
-			phy_disconnect(phydev);
-			return err;
+			goto err_phy_disconnect;
 		}
 
 		netdev_info(ndev, "limited PHY to 100Mbit/s\n");
@@ -1033,6 +1031,14 @@
 	phy_attached_info(phydev);
 
 	return 0;
+
+err_phy_disconnect:
+	phy_disconnect(phydev);
+err_deregister_fixed_link:
+	if (of_phy_is_fixed_link(np))
+		of_phy_deregister_fixed_link(np);
+
+	return err;
 }
 
 /* PHY control start function */
@@ -1634,6 +1640,7 @@
 /* Device close function for Ethernet AVB */
 static int ravb_close(struct net_device *ndev)
 {
+	struct device_node *np = ndev->dev.parent->of_node;
 	struct ravb_private *priv = netdev_priv(ndev);
 	struct ravb_tstamp_skb *ts_skb, *ts_skb2;
 
@@ -1663,6 +1670,8 @@
 	if (ndev->phydev) {
 		phy_stop(ndev->phydev);
 		phy_disconnect(ndev->phydev);
+		if (of_phy_is_fixed_link(np))
+			of_phy_deregister_fixed_link(np);
 	}
 
 	if (priv->chip_id != RCAR_GEN2) {
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 05b0dc5..1a92de7 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -518,7 +518,7 @@
 
 	.ecsr_value	= ECSR_ICD,
 	.ecsipr_value	= ECSIPR_ICDIP,
-	.eesipr_value	= 0xff7f009f,
+	.eesipr_value	= 0xe77f009f,
 
 	.tx_check	= EESR_TC1 | EESR_FTC,
 	.eesr_err_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
index b1e5f24..e6e6c2f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
@@ -50,10 +50,23 @@
 	if (plat_dat->init) {
 		ret = plat_dat->init(pdev, plat_dat->bsp_priv);
 		if (ret)
-			return ret;
+			goto err_remove_config_dt;
 	}
 
-	return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+	ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+	if (ret)
+		goto err_exit;
+
+	return 0;
+
+err_exit:
+	if (plat_dat->exit)
+		plat_dat->exit(pdev, plat_dat->bsp_priv);
+err_remove_config_dt:
+	if (pdev->dev.of_node)
+		stmmac_remove_config_dt(pdev, plat_dat);
+
+	return ret;
 }
 
 static const struct of_device_id dwmac_generic_match[] = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
index 36d3355..866444b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
@@ -271,15 +271,17 @@
 		return PTR_ERR(plat_dat);
 
 	gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL);
-	if (!gmac)
-		return -ENOMEM;
+	if (!gmac) {
+		err = -ENOMEM;
+		goto err_remove_config_dt;
+	}
 
 	gmac->pdev = pdev;
 
 	err = ipq806x_gmac_of_parse(gmac);
 	if (err) {
 		dev_err(dev, "device tree parsing error\n");
-		return err;
+		goto err_remove_config_dt;
 	}
 
 	regmap_write(gmac->qsgmii_csr, QSGMII_PCS_CAL_LCKDT_CTL,
@@ -300,7 +302,8 @@
 	default:
 		dev_err(&pdev->dev, "Unsupported PHY mode: \"%s\"\n",
 			phy_modes(gmac->phy_mode));
-		return -EINVAL;
+		err = -EINVAL;
+		goto err_remove_config_dt;
 	}
 	regmap_write(gmac->nss_common, NSS_COMMON_GMAC_CTL(gmac->id), val);
 
@@ -319,7 +322,8 @@
 	default:
 		dev_err(&pdev->dev, "Unsupported PHY mode: \"%s\"\n",
 			phy_modes(gmac->phy_mode));
-		return -EINVAL;
+		err = -EINVAL;
+		goto err_remove_config_dt;
 	}
 	regmap_write(gmac->nss_common, NSS_COMMON_CLK_SRC_CTRL, val);
 
@@ -346,7 +350,16 @@
 	plat_dat->bsp_priv = gmac;
 	plat_dat->fix_mac_speed = ipq806x_gmac_fix_mac_speed;
 
-	return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+	err = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+	if (err)
+		goto err_remove_config_dt;
+
+	return 0;
+
+err_remove_config_dt:
+	stmmac_remove_config_dt(pdev, plat_dat);
+
+	return err;
 }
 
 static const struct of_device_id ipq806x_gmac_dwmac_match[] = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c
index 78e9d18..3d3f43d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c
@@ -46,7 +46,8 @@
 	reg = syscon_regmap_lookup_by_compatible("nxp,lpc1850-creg");
 	if (IS_ERR(reg)) {
 		dev_err(&pdev->dev, "syscon lookup failed\n");
-		return PTR_ERR(reg);
+		ret = PTR_ERR(reg);
+		goto err_remove_config_dt;
 	}
 
 	if (plat_dat->interface == PHY_INTERFACE_MODE_MII) {
@@ -55,13 +56,23 @@
 		ethmode = LPC18XX_CREG_CREG6_ETHMODE_RMII;
 	} else {
 		dev_err(&pdev->dev, "Only MII and RMII mode supported\n");
-		return -EINVAL;
+		ret = -EINVAL;
+		goto err_remove_config_dt;
 	}
 
 	regmap_update_bits(reg, LPC18XX_CREG_CREG6,
 			   LPC18XX_CREG_CREG6_ETHMODE_MASK, ethmode);
 
-	return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+	ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+	if (ret)
+		goto err_remove_config_dt;
+
+	return 0;
+
+err_remove_config_dt:
+	stmmac_remove_config_dt(pdev, plat_dat);
+
+	return ret;
 }
 
 static const struct of_device_id lpc18xx_dwmac_match[] = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
index 309d995..7fdd176 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
@@ -64,18 +64,31 @@
 		return PTR_ERR(plat_dat);
 
 	dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
-	if (!dwmac)
-		return -ENOMEM;
+	if (!dwmac) {
+		ret = -ENOMEM;
+		goto err_remove_config_dt;
+	}
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
 	dwmac->reg = devm_ioremap_resource(&pdev->dev, res);
-	if (IS_ERR(dwmac->reg))
-		return PTR_ERR(dwmac->reg);
+	if (IS_ERR(dwmac->reg)) {
+		ret = PTR_ERR(dwmac->reg);
+		goto err_remove_config_dt;
+	}
 
 	plat_dat->bsp_priv = dwmac;
 	plat_dat->fix_mac_speed = meson6_dwmac_fix_mac_speed;
 
-	return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+	ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+	if (ret)
+		goto err_remove_config_dt;
+
+	return 0;
+
+err_remove_config_dt:
+	stmmac_remove_config_dt(pdev, plat_dat);
+
+	return ret;
 }
 
 static const struct of_device_id meson6_dwmac_match[] = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
index 250e4ce..ffaed1f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
@@ -264,32 +264,48 @@
 		return PTR_ERR(plat_dat);
 
 	dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
-	if (!dwmac)
-		return -ENOMEM;
+	if (!dwmac) {
+		ret = -ENOMEM;
+		goto err_remove_config_dt;
+	}
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
 	dwmac->regs = devm_ioremap_resource(&pdev->dev, res);
-	if (IS_ERR(dwmac->regs))
-		return PTR_ERR(dwmac->regs);
+	if (IS_ERR(dwmac->regs)) {
+		ret = PTR_ERR(dwmac->regs);
+		goto err_remove_config_dt;
+	}
 
 	dwmac->pdev = pdev;
 	dwmac->phy_mode = of_get_phy_mode(pdev->dev.of_node);
 	if (dwmac->phy_mode < 0) {
 		dev_err(&pdev->dev, "missing phy-mode property\n");
-		return -EINVAL;
+		ret = -EINVAL;
+		goto err_remove_config_dt;
 	}
 
 	ret = meson8b_init_clk(dwmac);
 	if (ret)
-		return ret;
+		goto err_remove_config_dt;
 
 	ret = meson8b_init_prg_eth(dwmac);
 	if (ret)
-		return ret;
+		goto err_remove_config_dt;
 
 	plat_dat->bsp_priv = dwmac;
 
-	return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+	ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+	if (ret)
+		goto err_clk_disable;
+
+	return 0;
+
+err_clk_disable:
+	clk_disable_unprepare(dwmac->m25_div_clk);
+err_remove_config_dt:
+	stmmac_remove_config_dt(pdev, plat_dat);
+
+	return ret;
 }
 
 static int meson8b_dwmac_remove(struct platform_device *pdev)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index 3740a44..d80c88b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -981,14 +981,27 @@
 	plat_dat->resume = rk_gmac_resume;
 
 	plat_dat->bsp_priv = rk_gmac_setup(pdev, data);
-	if (IS_ERR(plat_dat->bsp_priv))
-		return PTR_ERR(plat_dat->bsp_priv);
+	if (IS_ERR(plat_dat->bsp_priv)) {
+		ret = PTR_ERR(plat_dat->bsp_priv);
+		goto err_remove_config_dt;
+	}
 
 	ret = rk_gmac_init(pdev, plat_dat->bsp_priv);
 	if (ret)
-		return ret;
+		goto err_remove_config_dt;
 
-	return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+	ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+	if (ret)
+		goto err_gmac_exit;
+
+	return 0;
+
+err_gmac_exit:
+	rk_gmac_exit(pdev, plat_dat->bsp_priv);
+err_remove_config_dt:
+	stmmac_remove_config_dt(pdev, plat_dat);
+
+	return ret;
 }
 
 static const struct of_device_id rk_gmac_dwmac_match[] = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index bec6963..0c420e9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -304,6 +304,8 @@
 	struct device		*dev = &pdev->dev;
 	int			ret;
 	struct socfpga_dwmac	*dwmac;
+	struct net_device	*ndev;
+	struct stmmac_priv	*stpriv;
 
 	ret = stmmac_get_platform_resources(pdev, &stmmac_res);
 	if (ret)
@@ -314,32 +316,43 @@
 		return PTR_ERR(plat_dat);
 
 	dwmac = devm_kzalloc(dev, sizeof(*dwmac), GFP_KERNEL);
-	if (!dwmac)
-		return -ENOMEM;
+	if (!dwmac) {
+		ret = -ENOMEM;
+		goto err_remove_config_dt;
+	}
 
 	ret = socfpga_dwmac_parse_data(dwmac, dev);
 	if (ret) {
 		dev_err(dev, "Unable to parse OF data\n");
-		return ret;
+		goto err_remove_config_dt;
 	}
 
 	plat_dat->bsp_priv = dwmac;
 	plat_dat->fix_mac_speed = socfpga_dwmac_fix_mac_speed;
 
 	ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+	if (ret)
+		goto err_remove_config_dt;
 
-	if (!ret) {
-		struct net_device *ndev = platform_get_drvdata(pdev);
-		struct stmmac_priv *stpriv = netdev_priv(ndev);
+	ndev = platform_get_drvdata(pdev);
+	stpriv = netdev_priv(ndev);
 
-		/* The socfpga driver needs to control the stmmac reset to
-		 * set the phy mode. Create a copy of the core reset handel
-		 * so it can be used by the driver later.
-		 */
-		dwmac->stmmac_rst = stpriv->stmmac_rst;
+	/* The socfpga driver needs to control the stmmac reset to set the phy
+	 * mode. Create a copy of the core reset handle so it can be used by
+	 * the driver later.
+	 */
+	dwmac->stmmac_rst = stpriv->stmmac_rst;
 
-		ret = socfpga_dwmac_set_phy_mode(dwmac);
-	}
+	ret = socfpga_dwmac_set_phy_mode(dwmac);
+	if (ret)
+		goto err_dvr_remove;
+
+	return 0;
+
+err_dvr_remove:
+	stmmac_dvr_remove(&pdev->dev);
+err_remove_config_dt:
+	stmmac_remove_config_dt(pdev, plat_dat);
 
 	return ret;
 }
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
index 58c05ac..060b98c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
@@ -345,13 +345,15 @@
 		return PTR_ERR(plat_dat);
 
 	dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
-	if (!dwmac)
-		return -ENOMEM;
+	if (!dwmac) {
+		ret = -ENOMEM;
+		goto err_remove_config_dt;
+	}
 
 	ret = sti_dwmac_parse_data(dwmac, pdev);
 	if (ret) {
 		dev_err(&pdev->dev, "Unable to parse OF data\n");
-		return ret;
+		goto err_remove_config_dt;
 	}
 
 	dwmac->fix_retime_src = data->fix_retime_src;
@@ -363,9 +365,20 @@
 
 	ret = sti_dwmac_init(pdev, plat_dat->bsp_priv);
 	if (ret)
-		return ret;
+		goto err_remove_config_dt;
 
-	return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+	ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+	if (ret)
+		goto err_dwmac_exit;
+
+	return 0;
+
+err_dwmac_exit:
+	sti_dwmac_exit(pdev, plat_dat->bsp_priv);
+err_remove_config_dt:
+	stmmac_remove_config_dt(pdev, plat_dat);
+
+	return ret;
 }
 
 static const struct sti_dwmac_of_data stih4xx_dwmac_data = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
index e5a926b..61cb248 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
@@ -107,24 +107,33 @@
 		return PTR_ERR(plat_dat);
 
 	dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
-	if (!dwmac)
-		return -ENOMEM;
+	if (!dwmac) {
+		ret = -ENOMEM;
+		goto err_remove_config_dt;
+	}
 
 	ret = stm32_dwmac_parse_data(dwmac, &pdev->dev);
 	if (ret) {
 		dev_err(&pdev->dev, "Unable to parse OF data\n");
-		return ret;
+		goto err_remove_config_dt;
 	}
 
 	plat_dat->bsp_priv = dwmac;
 
 	ret = stm32_dwmac_init(plat_dat);
 	if (ret)
-		return ret;
+		goto err_remove_config_dt;
 
 	ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
 	if (ret)
-		stm32_dwmac_clk_disable(dwmac);
+		goto err_clk_disable;
+
+	return 0;
+
+err_clk_disable:
+	stm32_dwmac_clk_disable(dwmac);
+err_remove_config_dt:
+	stmmac_remove_config_dt(pdev, plat_dat);
 
 	return ret;
 }
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
index adff463..d07520f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
@@ -120,22 +120,27 @@
 		return PTR_ERR(plat_dat);
 
 	gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL);
-	if (!gmac)
-		return -ENOMEM;
+	if (!gmac) {
+		ret = -ENOMEM;
+		goto err_remove_config_dt;
+	}
 
 	gmac->interface = of_get_phy_mode(dev->of_node);
 
 	gmac->tx_clk = devm_clk_get(dev, "allwinner_gmac_tx");
 	if (IS_ERR(gmac->tx_clk)) {
 		dev_err(dev, "could not get tx clock\n");
-		return PTR_ERR(gmac->tx_clk);
+		ret = PTR_ERR(gmac->tx_clk);
+		goto err_remove_config_dt;
 	}
 
 	/* Optional regulator for PHY */
 	gmac->regulator = devm_regulator_get_optional(dev, "phy");
 	if (IS_ERR(gmac->regulator)) {
-		if (PTR_ERR(gmac->regulator) == -EPROBE_DEFER)
-			return -EPROBE_DEFER;
+		if (PTR_ERR(gmac->regulator) == -EPROBE_DEFER) {
+			ret = -EPROBE_DEFER;
+			goto err_remove_config_dt;
+		}
 		dev_info(dev, "no regulator found\n");
 		gmac->regulator = NULL;
 	}
@@ -151,11 +156,18 @@
 
 	ret = sun7i_gmac_init(pdev, plat_dat->bsp_priv);
 	if (ret)
-		return ret;
+		goto err_remove_config_dt;
 
 	ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
 	if (ret)
-		sun7i_gmac_exit(pdev, plat_dat->bsp_priv);
+		goto err_gmac_exit;
+
+	return 0;
+
+err_gmac_exit:
+	sun7i_gmac_exit(pdev, plat_dat->bsp_priv);
+err_remove_config_dt:
+	stmmac_remove_config_dt(pdev, plat_dat);
 
 	return ret;
 }
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 1f9ec02..caf069a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -3416,7 +3416,6 @@
 	stmmac_set_mac(priv->ioaddr, false);
 	netif_carrier_off(ndev);
 	unregister_netdev(ndev);
-	of_node_put(priv->plat->phy_node);
 	if (priv->stmmac_rst)
 		reset_control_assert(priv->stmmac_rst);
 	clk_disable_unprepare(priv->pclk);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 0a0d6a8..a840818 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -200,7 +200,6 @@
 /**
  * stmmac_probe_config_dt - parse device-tree driver parameters
  * @pdev: platform_device structure
- * @plat: driver data platform structure
  * @mac: MAC address to use
  * Description:
  * this function is to read the driver parameters from device-tree and
@@ -306,7 +305,7 @@
 		dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg),
 				       GFP_KERNEL);
 		if (!dma_cfg) {
-			of_node_put(plat->phy_node);
+			stmmac_remove_config_dt(pdev, plat);
 			return ERR_PTR(-ENOMEM);
 		}
 		plat->dma_cfg = dma_cfg;
@@ -329,14 +328,37 @@
 
 	return plat;
 }
+
+/**
+ * stmmac_remove_config_dt - undo the effects of stmmac_probe_config_dt()
+ * @pdev: platform_device structure
+ * @plat: driver data platform structure
+ *
+ * Release resources claimed by stmmac_probe_config_dt().
+ */
+void stmmac_remove_config_dt(struct platform_device *pdev,
+			     struct plat_stmmacenet_data *plat)
+{
+	struct device_node *np = pdev->dev.of_node;
+
+	if (of_phy_is_fixed_link(np))
+		of_phy_deregister_fixed_link(np);
+	of_node_put(plat->phy_node);
+}
 #else
 struct plat_stmmacenet_data *
 stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
 {
 	return ERR_PTR(-ENOSYS);
 }
+
+void stmmac_remove_config_dt(struct platform_device *pdev,
+			     struct plat_stmmacenet_data *plat)
+{
+}
 #endif /* CONFIG_OF */
 EXPORT_SYMBOL_GPL(stmmac_probe_config_dt);
+EXPORT_SYMBOL_GPL(stmmac_remove_config_dt);
 
 int stmmac_get_platform_resources(struct platform_device *pdev,
 				  struct stmmac_resources *stmmac_res)
@@ -392,10 +414,13 @@
 {
 	struct net_device *ndev = platform_get_drvdata(pdev);
 	struct stmmac_priv *priv = netdev_priv(ndev);
+	struct plat_stmmacenet_data *plat = priv->plat;
 	int ret = stmmac_dvr_remove(&pdev->dev);
 
-	if (priv->plat->exit)
-		priv->plat->exit(pdev, priv->plat->bsp_priv);
+	if (plat->exit)
+		plat->exit(pdev, plat->bsp_priv);
+
+	stmmac_remove_config_dt(pdev, plat);
 
 	return ret;
 }
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
index 64e147f..b72eb0d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
@@ -23,6 +23,8 @@
 
 struct plat_stmmacenet_data *
 stmmac_probe_config_dt(struct platform_device *pdev, const char **mac);
+void stmmac_remove_config_dt(struct platform_device *pdev,
+			     struct plat_stmmacenet_data *plat);
 
 int stmmac_get_platform_resources(struct platform_device *pdev,
 				  struct stmmac_resources *stmmac_res);
diff --git a/drivers/net/ethernet/synopsys/dwc_eth_qos.c b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
index 4ba2421..97d64bf 100644
--- a/drivers/net/ethernet/synopsys/dwc_eth_qos.c
+++ b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
@@ -2881,7 +2881,7 @@
 	ret = of_get_phy_mode(lp->pdev->dev.of_node);
 	if (ret < 0) {
 		dev_err(&lp->pdev->dev, "error in getting phy i/f\n");
-		goto err_out_clk_dis_phy;
+		goto err_out_deregister_fixed_link;
 	}
 
 	lp->phy_interface = ret;
@@ -2889,14 +2889,14 @@
 	ret = dwceqos_mii_init(lp);
 	if (ret) {
 		dev_err(&lp->pdev->dev, "error in dwceqos_mii_init\n");
-		goto err_out_clk_dis_phy;
+		goto err_out_deregister_fixed_link;
 	}
 
 	ret = dwceqos_mii_probe(ndev);
 	if (ret != 0) {
 		netdev_err(ndev, "mii_probe fail.\n");
 		ret = -ENXIO;
-		goto err_out_clk_dis_phy;
+		goto err_out_deregister_fixed_link;
 	}
 
 	dwceqos_set_umac_addr(lp, lp->ndev->dev_addr, 0);
@@ -2914,7 +2914,7 @@
 	if (ret) {
 		dev_err(&lp->pdev->dev, "Unable to retrieve DT, error %d\n",
 			ret);
-		goto err_out_clk_dis_phy;
+		goto err_out_deregister_fixed_link;
 	}
 	dev_info(&lp->pdev->dev, "pdev->id %d, baseaddr 0x%08lx, irq %d\n",
 		 pdev->id, ndev->base_addr, ndev->irq);
@@ -2924,7 +2924,7 @@
 	if (ret) {
 		dev_err(&lp->pdev->dev, "Unable to request IRQ %d, error %d\n",
 			ndev->irq, ret);
-		goto err_out_clk_dis_phy;
+		goto err_out_deregister_fixed_link;
 	}
 
 	if (netif_msg_probe(lp))
@@ -2935,11 +2935,14 @@
 	ret = register_netdev(ndev);
 	if (ret) {
 		dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
-			goto err_out_clk_dis_phy;
+		goto err_out_deregister_fixed_link;
 	}
 
 	return 0;
 
+err_out_deregister_fixed_link:
+	if (of_phy_is_fixed_link(pdev->dev.of_node))
+		of_phy_deregister_fixed_link(pdev->dev.of_node);
 err_out_clk_dis_phy:
 	clk_disable_unprepare(lp->phy_ref_clk);
 err_out_clk_dis_aper:
@@ -2959,8 +2962,11 @@
 	if (ndev) {
 		lp = netdev_priv(ndev);
 
-		if (ndev->phydev)
+		if (ndev->phydev) {
 			phy_disconnect(ndev->phydev);
+			if (of_phy_is_fixed_link(pdev->dev.of_node))
+				of_phy_deregister_fixed_link(pdev->dev.of_node);
+		}
 		mdiobus_unregister(lp->mii_bus);
 		mdiobus_free(lp->mii_bus);
 
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 58947aa..b9087b8 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -2459,20 +2459,8 @@
 		if (strcmp(slave_node->name, "slave"))
 			continue;
 
-		if (of_phy_is_fixed_link(slave_node)) {
-			struct phy_device *phydev;
-
-			phydev = of_phy_find_device(slave_node);
-			if (phydev) {
-				fixed_phy_unregister(phydev);
-				/* Put references taken by
-				 * of_phy_find_device() and
-				 * of_phy_register_fixed_link().
-				 */
-				phy_device_free(phydev);
-				phy_device_free(phydev);
-			}
-		}
+		if (of_phy_is_fixed_link(slave_node))
+			of_phy_deregister_fixed_link(slave_node);
 
 		of_node_put(slave_data->phy_node);
 
@@ -2942,6 +2930,8 @@
 	/* Select default pin state */
 	pinctrl_pm_select_default_state(dev);
 
+	/* shut up ASSERT_RTNL() warning in netif_set_real_num_tx/rx_queues */
+	rtnl_lock();
 	if (cpsw->data.dual_emac) {
 		int i;
 
@@ -2953,6 +2943,8 @@
 		if (netif_running(ndev))
 			cpsw_ndo_open(ndev);
 	}
+	rtnl_unlock();
+
 	return 0;
 }
 #endif
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 84fbe571..481c7bf 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1767,6 +1767,7 @@
  */
 static int davinci_emac_probe(struct platform_device *pdev)
 {
+	struct device_node *np = pdev->dev.of_node;
 	int rc = 0;
 	struct resource *res, *res_ctrl;
 	struct net_device *ndev;
@@ -1805,7 +1806,7 @@
 	if (!pdata) {
 		dev_err(&pdev->dev, "no platform data\n");
 		rc = -ENODEV;
-		goto no_pdata;
+		goto err_free_netdev;
 	}
 
 	/* MAC addr and PHY mask , RMII enable info from platform_data */
@@ -1941,6 +1942,10 @@
 		cpdma_chan_destroy(priv->rxchan);
 	cpdma_ctlr_destroy(priv->dma);
 no_pdata:
+	if (of_phy_is_fixed_link(np))
+		of_phy_deregister_fixed_link(np);
+	of_node_put(priv->phy_node);
+err_free_netdev:
 	free_netdev(ndev);
 	return rc;
 }
@@ -1956,6 +1961,7 @@
 {
 	struct net_device *ndev = platform_get_drvdata(pdev);
 	struct emac_priv *priv = netdev_priv(ndev);
+	struct device_node *np = pdev->dev.of_node;
 
 	dev_notice(&ndev->dev, "DaVinci EMAC: davinci_emac_remove()\n");
 
@@ -1968,6 +1974,8 @@
 	unregister_netdev(ndev);
 	of_node_put(priv->phy_node);
 	pm_runtime_disable(&pdev->dev);
+	if (of_phy_is_fixed_link(np))
+		of_phy_deregister_fixed_link(np);
 	free_netdev(ndev);
 
 	return 0;
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 42edd7b..8b4822a 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -859,7 +859,6 @@
 	struct geneve_dev *geneve = netdev_priv(dev);
 	struct geneve_sock *gs4;
 	struct rtable *rt = NULL;
-	const struct iphdr *iip; /* interior IP header */
 	int err = -EINVAL;
 	struct flowi4 fl4;
 	__u8 tos, ttl;
@@ -890,8 +889,6 @@
 	sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
 	skb_reset_mac_header(skb);
 
-	iip = ip_hdr(skb);
-
 	if (info) {
 		const struct ip_tunnel_key *key = &info->key;
 		u8 *opts = NULL;
@@ -911,7 +908,7 @@
 		if (unlikely(err))
 			goto tx_error;
 
-		tos = ip_tunnel_ecn_encap(key->tos, iip, skb);
+		tos = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
 		ttl = key->ttl;
 		df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
 	} else {
@@ -920,7 +917,7 @@
 		if (unlikely(err))
 			goto tx_error;
 
-		tos = ip_tunnel_ecn_encap(fl4.flowi4_tos, iip, skb);
+		tos = ip_tunnel_ecn_encap(fl4.flowi4_tos, ip_hdr(skb), skb);
 		ttl = geneve->ttl;
 		if (!ttl && IN_MULTICAST(ntohl(fl4.daddr)))
 			ttl = 1;
@@ -952,7 +949,6 @@
 {
 	struct geneve_dev *geneve = netdev_priv(dev);
 	struct dst_entry *dst = NULL;
-	const struct iphdr *iip; /* interior IP header */
 	struct geneve_sock *gs6;
 	int err = -EINVAL;
 	struct flowi6 fl6;
@@ -982,8 +978,6 @@
 	sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
 	skb_reset_mac_header(skb);
 
-	iip = ip_hdr(skb);
-
 	if (info) {
 		const struct ip_tunnel_key *key = &info->key;
 		u8 *opts = NULL;
@@ -1004,7 +998,7 @@
 		if (unlikely(err))
 			goto tx_error;
 
-		prio = ip_tunnel_ecn_encap(key->tos, iip, skb);
+		prio = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
 		ttl = key->ttl;
 		label = info->key.label;
 	} else {
@@ -1014,7 +1008,7 @@
 			goto tx_error;
 
 		prio = ip_tunnel_ecn_encap(ip6_tclass(fl6.flowlabel),
-					   iip, skb);
+					   ip_hdr(skb), skb);
 		ttl = geneve->ttl;
 		if (!ttl && ipv6_addr_is_multicast(&fl6.daddr))
 			ttl = 1;
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index f442eb3..0fef178 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -497,6 +497,7 @@
 	struct net_device *phy_dev;
 	int err;
 	u16 mode = IPVLAN_MODE_L3;
+	bool create = false;
 
 	if (!tb[IFLA_LINK])
 		return -EINVAL;
@@ -513,6 +514,7 @@
 		err = ipvlan_port_create(phy_dev);
 		if (err < 0)
 			return err;
+		create = true;
 	}
 
 	if (data && data[IFLA_IPVLAN_MODE])
@@ -536,22 +538,27 @@
 
 	err = register_netdevice(dev);
 	if (err < 0)
-		return err;
+		goto destroy_ipvlan_port;
 
 	err = netdev_upper_dev_link(phy_dev, dev);
 	if (err) {
-		unregister_netdevice(dev);
-		return err;
+		goto unregister_netdev;
 	}
 	err = ipvlan_set_port_mode(port, mode);
 	if (err) {
-		unregister_netdevice(dev);
-		return err;
+		goto unregister_netdev;
 	}
 
 	list_add_tail_rcu(&ipvlan->pnode, &port->ipvlans);
 	netif_stacked_transfer_operstate(phy_dev, dev);
 	return 0;
+
+unregister_netdev:
+	unregister_netdevice(dev);
+destroy_ipvlan_port:
+	if (create)
+		ipvlan_port_destroy(phy_dev);
+	return err;
 }
 
 static void ipvlan_link_delete(struct net_device *dev, struct list_head *head)
diff --git a/drivers/net/irda/w83977af_ir.c b/drivers/net/irda/w83977af_ir.c
index 4e3d2e7..e8c3a8c 100644
--- a/drivers/net/irda/w83977af_ir.c
+++ b/drivers/net/irda/w83977af_ir.c
@@ -518,7 +518,9 @@
 		
 		mtt = irda_get_mtt(skb);
 		pr_debug("%s(%ld), mtt=%d\n", __func__ , jiffies, mtt);
-			if (mtt)
+			if (mtt > 1000)
+				mdelay(mtt/1000);
+			else if (mtt)
 				udelay(mtt);
 
 			/* Enable DMA interrupt */
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 070e329..7869b06 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -491,7 +491,13 @@
 	/* Don't put anything that may fail after macvlan_common_newlink
 	 * because we can't undo what it does.
 	 */
-	return macvlan_common_newlink(src_net, dev, tb, data);
+	err = macvlan_common_newlink(src_net, dev, tb, data);
+	if (err) {
+		netdev_rx_handler_unregister(dev);
+		return err;
+	}
+
+	return 0;
 }
 
 static void macvtap_dellink(struct net_device *dev,
@@ -736,13 +742,8 @@
 
 	if (zerocopy)
 		err = zerocopy_sg_from_iter(skb, from);
-	else {
+	else
 		err = skb_copy_datagram_from_iter(skb, 0, from, len);
-		if (!err && m && m->msg_control) {
-			struct ubuf_info *uarg = m->msg_control;
-			uarg->callback(uarg, false);
-		}
-	}
 
 	if (err)
 		goto err_kfree;
@@ -773,7 +774,11 @@
 		skb_shinfo(skb)->destructor_arg = m->msg_control;
 		skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
 		skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
+	} else if (m && m->msg_control) {
+		struct ubuf_info *uarg = m->msg_control;
+		uarg->callback(uarg, false);
 	}
+
 	if (vlan) {
 		skb->dev = vlan->dev;
 		dev_queue_xmit(skb);
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index aadd6e9..9cbe645 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -102,15 +102,19 @@
 	if (ret < 0)
 		return ret;
 
-	if (phydev->interface == PHY_INTERFACE_MODE_RGMII) {
-		/* enable TXDLY */
-		phy_write(phydev, RTL8211F_PAGE_SELECT, 0xd08);
-		reg = phy_read(phydev, 0x11);
+	phy_write(phydev, RTL8211F_PAGE_SELECT, 0xd08);
+	reg = phy_read(phydev, 0x11);
+
+	/* enable TX-delay for rgmii-id and rgmii-txid, otherwise disable it */
+	if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
+	    phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
 		reg |= RTL8211F_TX_DELAY;
-		phy_write(phydev, 0x11, reg);
-		/* restore to default page 0 */
-		phy_write(phydev, RTL8211F_PAGE_SELECT, 0x0);
-	}
+	else
+		reg &= ~RTL8211F_TX_DELAY;
+
+	phy_write(phydev, 0x11, reg);
+	/* restore to default page 0 */
+	phy_write(phydev, RTL8211F_PAGE_SELECT, 0x0);
 
 	return 0;
 }
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 115ce4e..929dafb8 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1246,13 +1246,8 @@
 
 	if (zerocopy)
 		err = zerocopy_sg_from_iter(skb, from);
-	else {
+	else
 		err = skb_copy_datagram_from_iter(skb, 0, from, len);
-		if (!err && msg_control) {
-			struct ubuf_info *uarg = msg_control;
-			uarg->callback(uarg, false);
-		}
-	}
 
 	if (err) {
 		this_cpu_inc(tun->pcpu_stats->rx_dropped);
@@ -1298,6 +1293,9 @@
 		skb_shinfo(skb)->destructor_arg = msg_control;
 		skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
 		skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
+	} else if (msg_control) {
+		struct ubuf_info *uarg = msg_control;
+		uarg->callback(uarg, false);
 	}
 
 	skb_reset_network_header(skb);
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index cce2495..dc7b639 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -603,12 +603,12 @@
 	u16 medium;
 
 	/* Stop MAC operation */
-	medium = asix_read_medium_status(dev, 0);
+	medium = asix_read_medium_status(dev, 1);
 	medium &= ~AX_MEDIUM_RE;
-	asix_write_medium_mode(dev, medium, 0);
+	asix_write_medium_mode(dev, medium, 1);
 
 	netdev_dbg(dev->net, "ax88772_suspend: medium=0x%04x\n",
-		   asix_read_medium_status(dev, 0));
+		   asix_read_medium_status(dev, 1));
 
 	/* Preserve BMCR for restoring */
 	priv->presvd_phy_bmcr =
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index c47ec0a..dd623f6 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -388,12 +388,6 @@
 	case USB_CDC_NOTIFY_NETWORK_CONNECTION:
 		netif_dbg(dev, timer, dev->net, "CDC: carrier %s\n",
 			  event->wValue ? "on" : "off");
-
-		/* Work-around for devices with broken off-notifications */
-		if (event->wValue &&
-		    !test_bit(__LINK_STATE_NOCARRIER, &dev->net->state))
-			usbnet_link_change(dev, 0, 0);
-
 		usbnet_link_change(dev, !!event->wValue, 0);
 		break;
 	case USB_CDC_NOTIFY_SPEED_CHANGE:	/* tx/rx rates */
@@ -466,6 +460,36 @@
 	return 1;
 }
 
+/* Ensure correct link state
+ *
+ * Some devices (ZTE MF823/831/910) export two carrier on notifications when
+ * connected. This causes the link state to be incorrect. Work around this by
+ * always setting the state to off, then on.
+ */
+void usbnet_cdc_zte_status(struct usbnet *dev, struct urb *urb)
+{
+	struct usb_cdc_notification *event;
+
+	if (urb->actual_length < sizeof(*event))
+		return;
+
+	event = urb->transfer_buffer;
+
+	if (event->bNotificationType != USB_CDC_NOTIFY_NETWORK_CONNECTION) {
+		usbnet_cdc_status(dev, urb);
+		return;
+	}
+
+	netif_dbg(dev, timer, dev->net, "CDC: carrier %s\n",
+		  event->wValue ? "on" : "off");
+
+	if (event->wValue &&
+	    netif_carrier_ok(dev->net))
+		netif_carrier_off(dev->net);
+
+	usbnet_link_change(dev, !!event->wValue, 0);
+}
+
 static const struct driver_info	cdc_info = {
 	.description =	"CDC Ethernet Device",
 	.flags =	FLAG_ETHER | FLAG_POINTTOPOINT,
@@ -481,7 +505,7 @@
 	.flags =	FLAG_ETHER | FLAG_POINTTOPOINT,
 	.bind =		usbnet_cdc_zte_bind,
 	.unbind =	usbnet_cdc_unbind,
-	.status =	usbnet_cdc_status,
+	.status =	usbnet_cdc_zte_status,
 	.set_rx_mode =	usbnet_cdc_update_filter,
 	.manage_power =	usbnet_manage_power,
 	.rx_fixup = usbnet_cdc_zte_rx_fixup,
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 3ff76c6..6fe1cdb 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -894,6 +894,7 @@
 	{QMI_FIXED_INTF(0x1bbb, 0x0203, 2)},	/* Alcatel L800MA */
 	{QMI_FIXED_INTF(0x2357, 0x0201, 4)},	/* TP-LINK HSUPA Modem MA180 */
 	{QMI_FIXED_INTF(0x2357, 0x9000, 4)},	/* TP-LINK MA260 */
+	{QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)},	/* Telit LE922A */
 	{QMI_FIXED_INTF(0x1bc7, 0x1200, 5)},	/* Telit LE920 */
 	{QMI_FIXED_INTF(0x1bc7, 0x1201, 2)},	/* Telit LE920 */
 	{QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)},	/* XS Stick W100-2 from 4G Systems */
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 24532cd..2ba01ca 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -611,6 +611,7 @@
 	struct vxlan_rdst *rd = NULL;
 	struct vxlan_fdb *f;
 	int notify = 0;
+	int rc;
 
 	f = __vxlan_find_mac(vxlan, mac);
 	if (f) {
@@ -641,8 +642,7 @@
 		if ((flags & NLM_F_APPEND) &&
 		    (is_multicast_ether_addr(f->eth_addr) ||
 		     is_zero_ether_addr(f->eth_addr))) {
-			int rc = vxlan_fdb_append(f, ip, port, vni, ifindex,
-						  &rd);
+			rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd);
 
 			if (rc < 0)
 				return rc;
@@ -673,7 +673,11 @@
 		INIT_LIST_HEAD(&f->remotes);
 		memcpy(f->eth_addr, mac, ETH_ALEN);
 
-		vxlan_fdb_append(f, ip, port, vni, ifindex, &rd);
+		rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd);
+		if (rc < 0) {
+			kfree(f);
+			return rc;
+		}
 
 		++vxlan->addrcnt;
 		hlist_add_head_rcu(&f->hlist,
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index 39ce76a..16241d2 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -2222,8 +2222,9 @@
 			is_scanning_required = 1;
 		} else {
 			mwifiex_dbg(priv->adapter, MSG,
-				    "info: trying to associate to '%s' bssid %pM\n",
-				    (char *)req_ssid.ssid, bss->bssid);
+				    "info: trying to associate to '%.*s' bssid %pM\n",
+				    req_ssid.ssid_len, (char *)req_ssid.ssid,
+				    bss->bssid);
 			memcpy(&priv->cfg_bssid, bss->bssid, ETH_ALEN);
 			break;
 		}
@@ -2283,8 +2284,8 @@
 	}
 
 	mwifiex_dbg(adapter, INFO,
-		    "info: Trying to associate to %s and bssid %pM\n",
-		    (char *)sme->ssid, sme->bssid);
+		    "info: Trying to associate to %.*s and bssid %pM\n",
+		    (int)sme->ssid_len, (char *)sme->ssid, sme->bssid);
 
 	if (!mwifiex_stop_bg_scan(priv))
 		cfg80211_sched_scan_stopped_rtnl(priv->wdev.wiphy);
@@ -2417,8 +2418,8 @@
 	}
 
 	mwifiex_dbg(priv->adapter, MSG,
-		    "info: trying to join to %s and bssid %pM\n",
-		    (char *)params->ssid, params->bssid);
+		    "info: trying to join to %.*s and bssid %pM\n",
+		    params->ssid_len, (char *)params->ssid, params->bssid);
 
 	mwifiex_set_ibss_params(priv, params);
 
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index 5a3145a..262281b 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -490,3 +490,18 @@
 	return -ENODEV;
 }
 EXPORT_SYMBOL(of_phy_register_fixed_link);
+
+void of_phy_deregister_fixed_link(struct device_node *np)
+{
+	struct phy_device *phydev;
+
+	phydev = of_phy_find_device(np);
+	if (!phydev)
+		return;
+
+	fixed_phy_unregister(phydev);
+
+	put_device(&phydev->mdio.dev);	/* of_phy_find_device() */
+	phy_device_free(phydev);	/* fixed_phy_register() */
+}
+EXPORT_SYMBOL(of_phy_deregister_fixed_link);
diff --git a/drivers/pci/host/pcie-designware-plat.c b/drivers/pci/host/pcie-designware-plat.c
index 8df6312..1a02038 100644
--- a/drivers/pci/host/pcie-designware-plat.c
+++ b/drivers/pci/host/pcie-designware-plat.c
@@ -3,7 +3,7 @@
  *
  * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com)
  *
- * Authors: Joao Pinto <jpmpinto@gmail.com>
+ * Authors: Joao Pinto <Joao.Pinto@synopsys.com>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
diff --git a/drivers/pci/pcie/aer/aer_inject.c b/drivers/pci/pcie/aer/aer_inject.c
index db553dc..2b6a592 100644
--- a/drivers/pci/pcie/aer/aer_inject.c
+++ b/drivers/pci/pcie/aer/aer_inject.c
@@ -307,20 +307,6 @@
 	return 0;
 }
 
-static struct pci_dev *pcie_find_root_port(struct pci_dev *dev)
-{
-	while (1) {
-		if (!pci_is_pcie(dev))
-			break;
-		if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
-			return dev;
-		if (!dev->bus->self)
-			break;
-		dev = dev->bus->self;
-	}
-	return NULL;
-}
-
 static int find_aer_device_iter(struct device *device, void *data)
 {
 	struct pcie_device **result = data;
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index ab00267..104c46d 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -1439,6 +1439,21 @@
 		dev_warn(&dev->dev, "PCI-X settings not supported\n");
 }
 
+static bool pcie_root_rcb_set(struct pci_dev *dev)
+{
+	struct pci_dev *rp = pcie_find_root_port(dev);
+	u16 lnkctl;
+
+	if (!rp)
+		return false;
+
+	pcie_capability_read_word(rp, PCI_EXP_LNKCTL, &lnkctl);
+	if (lnkctl & PCI_EXP_LNKCTL_RCB)
+		return true;
+
+	return false;
+}
+
 static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
 {
 	int pos;
@@ -1468,9 +1483,20 @@
 			~hpp->pci_exp_devctl_and, hpp->pci_exp_devctl_or);
 
 	/* Initialize Link Control Register */
-	if (pcie_cap_has_lnkctl(dev))
+	if (pcie_cap_has_lnkctl(dev)) {
+
+		/*
+		 * If the Root Port supports Read Completion Boundary of
+		 * 128, set RCB to 128.  Otherwise, clear it.
+		 */
+		hpp->pci_exp_lnkctl_and |= PCI_EXP_LNKCTL_RCB;
+		hpp->pci_exp_lnkctl_or &= ~PCI_EXP_LNKCTL_RCB;
+		if (pcie_root_rcb_set(dev))
+			hpp->pci_exp_lnkctl_or |= PCI_EXP_LNKCTL_RCB;
+
 		pcie_capability_clear_and_set_word(dev, PCI_EXP_LNKCTL,
 			~hpp->pci_exp_lnkctl_and, hpp->pci_exp_lnkctl_or);
+	}
 
 	/* Find Advanced Error Reporting Enhanced Capability */
 	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
index f399bae..05d8da9 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
@@ -450,6 +450,10 @@
 	case IPA_RM_RESOURCE_MHI_CONS:
 		clients->names[i++] = IPA_CLIENT_MHI_CONS;
 		break;
+	case IPA_RM_RESOURCE_ODU_ADAPT_CONS:
+		clients->names[i++] = IPA_CLIENT_ODU_EMB_CONS;
+		clients->names[i++] = IPA_CLIENT_ODU_TETH_CONS;
+		break;
 	case IPA_RM_RESOURCE_USB_PROD:
 		clients->names[i++] = IPA_CLIENT_USB_PROD;
 		break;
@@ -459,6 +463,8 @@
 	case IPA_RM_RESOURCE_MHI_PROD:
 		clients->names[i++] = IPA_CLIENT_MHI_PROD;
 		break;
+	case IPA_RM_RESOURCE_ODU_ADAPT_PROD:
+		clients->names[i++] = IPA_CLIENT_ODU_PROD;
 	default:
 		break;
 	}
@@ -490,13 +496,15 @@
 	if (ep->keep_ipa_awake)
 		return false;
 
-	if (client == IPA_CLIENT_USB_CONS   ||
-	    client == IPA_CLIENT_MHI_CONS   ||
-	    client == IPA_CLIENT_HSIC1_CONS ||
-	    client == IPA_CLIENT_WLAN1_CONS ||
-	    client == IPA_CLIENT_WLAN2_CONS ||
-	    client == IPA_CLIENT_WLAN3_CONS ||
-	    client == IPA_CLIENT_WLAN4_CONS)
+	if (client == IPA_CLIENT_USB_CONS     ||
+	    client == IPA_CLIENT_MHI_CONS     ||
+	    client == IPA_CLIENT_HSIC1_CONS   ||
+	    client == IPA_CLIENT_WLAN1_CONS   ||
+	    client == IPA_CLIENT_WLAN2_CONS   ||
+	    client == IPA_CLIENT_WLAN3_CONS   ||
+	    client == IPA_CLIENT_WLAN4_CONS   ||
+	    client == IPA_CLIENT_ODU_EMB_CONS ||
+	    client == IPA_CLIENT_ODU_TETH_CONS)
 		return true;
 
 	return false;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index 2564b90..9b91537 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -818,6 +818,10 @@
 	case IPA_RM_RESOURCE_MHI_CONS:
 		clients->names[i++] = IPA_CLIENT_MHI_CONS;
 		break;
+	case IPA_RM_RESOURCE_ODU_ADAPT_CONS:
+		clients->names[i++] = IPA_CLIENT_ODU_EMB_CONS;
+		clients->names[i++] = IPA_CLIENT_ODU_TETH_CONS;
+		break;
 	case IPA_RM_RESOURCE_USB_PROD:
 		clients->names[i++] = IPA_CLIENT_USB_PROD;
 		break;
@@ -827,6 +831,8 @@
 	case IPA_RM_RESOURCE_MHI_PROD:
 		clients->names[i++] = IPA_CLIENT_MHI_PROD;
 		break;
+	case IPA_RM_RESOURCE_ODU_ADAPT_PROD:
+		clients->names[i++] = IPA_CLIENT_ODU_PROD;
 	default:
 		break;
 	}
@@ -865,7 +871,9 @@
 	    client == IPA_CLIENT_WLAN1_CONS   ||
 	    client == IPA_CLIENT_WLAN2_CONS   ||
 	    client == IPA_CLIENT_WLAN3_CONS   ||
-	    client == IPA_CLIENT_WLAN4_CONS)
+	    client == IPA_CLIENT_WLAN4_CONS   ||
+	    client == IPA_CLIENT_ODU_EMB_CONS ||
+	    client == IPA_CLIENT_ODU_TETH_CONS)
 		return true;
 
 	return false;
diff --git a/drivers/pwm/pwm-meson.c b/drivers/pwm/pwm-meson.c
index 381871b..9d5bd7d 100644
--- a/drivers/pwm/pwm-meson.c
+++ b/drivers/pwm/pwm-meson.c
@@ -474,6 +474,7 @@
 	if (IS_ERR(meson->base))
 		return PTR_ERR(meson->base);
 
+	spin_lock_init(&meson->lock);
 	meson->chip.dev = &pdev->dev;
 	meson->chip.ops = &meson_pwm_ops;
 	meson->chip.base = -1;
diff --git a/drivers/pwm/sysfs.c b/drivers/pwm/sysfs.c
index 0296d81..a813239 100644
--- a/drivers/pwm/sysfs.c
+++ b/drivers/pwm/sysfs.c
@@ -425,6 +425,8 @@
 		if (test_bit(PWMF_EXPORTED, &pwm->flags))
 			pwm_unexport_child(parent, pwm);
 	}
+
+	put_device(parent);
 }
 
 static int __init pwm_sysfs_init(void)
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index aebc4dd..ac05317 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -1083,7 +1083,7 @@
 	nonemb_cmd = &phba->boot_struct.nonemb_cmd;
 	nonemb_cmd->size = sizeof(*resp);
 	nonemb_cmd->va = pci_alloc_consistent(phba->ctrl.pdev,
-					      sizeof(nonemb_cmd->size),
+					      nonemb_cmd->size,
 					      &nonemb_cmd->dma);
 	if (!nonemb_cmd->va) {
 		mutex_unlock(&ctrl->mbox_lock);
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index d007ec1..a1d6ab7 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -2009,7 +2009,7 @@
 
 static int hpsa_slave_alloc(struct scsi_device *sdev)
 {
-	struct hpsa_scsi_dev_t *sd;
+	struct hpsa_scsi_dev_t *sd = NULL;
 	unsigned long flags;
 	struct ctlr_info *h;
 
@@ -2026,7 +2026,8 @@
 			sd->target = sdev_id(sdev);
 			sd->lun = sdev->lun;
 		}
-	} else
+	}
+	if (!sd)
 		sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev),
 					sdev_id(sdev), sdev->lun);
 
@@ -3840,6 +3841,7 @@
 		sizeof(this_device->vendor));
 	memcpy(this_device->model, &inq_buff[16],
 		sizeof(this_device->model));
+	this_device->rev = inq_buff[2];
 	memset(this_device->device_id, 0,
 		sizeof(this_device->device_id));
 	if (hpsa_get_device_id(h, scsi3addr, this_device->device_id, 8,
@@ -3929,10 +3931,14 @@
 
 	if (!is_logical_dev_addr_mode(lunaddrbytes)) {
 		/* physical device, target and lun filled in later */
-		if (is_hba_lunid(lunaddrbytes))
+		if (is_hba_lunid(lunaddrbytes)) {
+			int bus = HPSA_HBA_BUS;
+
+			if (!device->rev)
+				bus = HPSA_LEGACY_HBA_BUS;
 			hpsa_set_bus_target_lun(device,
-					HPSA_HBA_BUS, 0, lunid & 0x3fff);
-		else
+					bus, 0, lunid & 0x3fff);
+		} else
 			/* defer target, lun assignment for physical devices */
 			hpsa_set_bus_target_lun(device,
 					HPSA_PHYSICAL_DEVICE_BUS, -1, -1);
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index 82cdfad..9ea162d 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -69,6 +69,7 @@
 	u64 sas_address;
 	unsigned char vendor[8];        /* bytes 8-15 of inquiry data */
 	unsigned char model[16];        /* bytes 16-31 of inquiry data */
+	unsigned char rev;		/* byte 2 of inquiry data */
 	unsigned char raid_level;	/* from inquiry page 0xC1 */
 	unsigned char volume_offline;	/* discovered via TUR or VPD */
 	u16 queue_depth;		/* max queue_depth for this device */
@@ -402,6 +403,7 @@
 #define HPSA_RAID_VOLUME_BUS		1
 #define HPSA_EXTERNAL_RAID_VOLUME_BUS	2
 #define HPSA_HBA_BUS			0
+#define HPSA_LEGACY_HBA_BUS		3
 
 /*
 	Send the command to the hardware
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index 04ce7cf..50c7167 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -308,7 +308,7 @@
 	fc_stats = &lport->host_stats;
 	memset(fc_stats, 0, sizeof(struct fc_host_statistics));
 
-	fc_stats->seconds_since_last_reset = (lport->boot_time - jiffies) / HZ;
+	fc_stats->seconds_since_last_reset = (jiffies - lport->boot_time) / HZ;
 
 	for_each_possible_cpu(cpu) {
 		struct fc_stats *stats;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 91b70bc..1c4744e 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -3885,6 +3885,11 @@
 	}
 }
 
+static inline bool ata_12_16_cmd(struct scsi_cmnd *scmd)
+{
+	return (scmd->cmnd[0] == ATA_12 || scmd->cmnd[0] == ATA_16);
+}
+
 /**
  * _scsih_flush_running_cmds - completing outstanding commands.
  * @ioc: per adapter object
@@ -3906,6 +3911,9 @@
 		if (!scmd)
 			continue;
 		count++;
+		if (ata_12_16_cmd(scmd))
+			scsi_internal_device_unblock(scmd->device,
+							SDEV_RUNNING);
 		mpt3sas_base_free_smid(ioc, smid);
 		scsi_dma_unmap(scmd);
 		if (ioc->pci_error_recovery)
@@ -4010,11 +4018,6 @@
 	    SAM_STAT_CHECK_CONDITION;
 }
 
-static inline bool ata_12_16_cmd(struct scsi_cmnd *scmd)
-{
-	return (scmd->cmnd[0] == ATA_12 || scmd->cmnd[0] == ATA_16);
-}
-
 /**
  * scsih_qcmd - main scsi request entry point
  * @scmd: pointer to scsi command object
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index 86eb199..c7cc803 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -791,8 +791,10 @@
 	slot->slot_tag = tag;
 
 	slot->buf = pci_pool_alloc(mvi->dma_pool, GFP_ATOMIC, &slot->buf_dma);
-	if (!slot->buf)
+	if (!slot->buf) {
+		rc = -ENOMEM;
 		goto err_out_tag;
+	}
 	memset(slot->buf, 0, MVS_SLOT_BUF_SZ);
 
 	tei.task = task;
diff --git a/drivers/scsi/qlogicpti.h b/drivers/scsi/qlogicpti.h
index 4377e87..892a0b0 100644
--- a/drivers/scsi/qlogicpti.h
+++ b/drivers/scsi/qlogicpti.h
@@ -356,8 +356,8 @@
 
 	/* The rest of the elements are unimportant for performance. */
 	struct qlogicpti         *next;
-	__u32                     res_dvma;             /* Ptr to RESPONSE bufs (DVMA)*/
-	__u32                     req_dvma;             /* Ptr to REQUEST bufs (DVMA) */
+	dma_addr_t                res_dvma;             /* Ptr to RESPONSE bufs (DVMA)*/
+	dma_addr_t                req_dvma;             /* Ptr to REQUEST bufs (DVMA) */
 	u_char	                  fware_majrev, fware_minrev, fware_micrev;
 	struct Scsi_Host         *qhost;
 	int                       qpti_id;
diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
index cca38aa..7957e83 100644
--- a/drivers/soc/qcom/socinfo.c
+++ b/drivers/soc/qcom/socinfo.c
@@ -541,6 +541,9 @@
 	/* Skunk ID */
 	[321] = {MSM_CPU_SKUNK, "MSMSKUNK"},
 
+	/* Bat ID */
+	[328] = {MSM_CPU_BAT, "SDMBAT"},
+
 	/* Uninitialized IDs are not known to run Linux.
 	 * MSM_CPU_UNKNOWN is set to 0 to ensure these IDs are
 	 * considered as unknown CPU.
@@ -1222,6 +1225,10 @@
 		dummy_socinfo.id = 321;
 		strlcpy(dummy_socinfo.build_id, "msmskunk - ",
 			sizeof(dummy_socinfo.build_id));
+	} else if (early_machine_is_sdmbat()) {
+		dummy_socinfo.id = 328;
+		strlcpy(dummy_socinfo.build_id, "sdmbat - ",
+			sizeof(dummy_socinfo.build_id));
 	}
 
 	strlcat(dummy_socinfo.build_id, "Dummy socinfo",
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index 8347c90..5eb0412 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -808,7 +808,11 @@
 	struct crypto_skcipher *tfm_arc4;
 	struct scatterlist sgin, sgout;
 	struct skcipher_request *req;
-	unsigned char sec_key[CIFS_SESS_KEY_SIZE]; /* a nonce */
+	unsigned char *sec_key;
+
+	sec_key = kmalloc(CIFS_SESS_KEY_SIZE, GFP_KERNEL);
+	if (sec_key == NULL)
+		return -ENOMEM;
 
 	get_random_bytes(sec_key, CIFS_SESS_KEY_SIZE);
 
@@ -816,7 +820,7 @@
 	if (IS_ERR(tfm_arc4)) {
 		rc = PTR_ERR(tfm_arc4);
 		cifs_dbg(VFS, "could not allocate crypto API arc4\n");
-		return rc;
+		goto out;
 	}
 
 	rc = crypto_skcipher_setkey(tfm_arc4, ses->auth_key.response,
@@ -854,7 +858,8 @@
 
 out_free_cipher:
 	crypto_free_skcipher(tfm_arc4);
-
+out:
+	kfree(sec_key);
 	return rc;
 }
 
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 3f3185f..e3fed92 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -3427,6 +3427,7 @@
 	__u16 rc = 0;
 	struct cifs_posix_acl *cifs_acl = (struct cifs_posix_acl *)parm_data;
 	struct posix_acl_xattr_header *local_acl = (void *)pACL;
+	struct posix_acl_xattr_entry *ace = (void *)(local_acl + 1);
 	int count;
 	int i;
 
@@ -3453,8 +3454,7 @@
 		return 0;
 	}
 	for (i = 0; i < count; i++) {
-		rc = convert_ace_to_cifs_ace(&cifs_acl->ace_array[i],
-			(struct posix_acl_xattr_entry *)(local_acl + 1));
+		rc = convert_ace_to_cifs_ace(&cifs_acl->ace_array[i], &ace[i]);
 		if (rc != 0) {
 			/* ACE not converted */
 			break;
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index aab5227..4547aed 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -412,6 +412,9 @@
 		}
 	} while (server->tcpStatus == CifsNeedReconnect);
 
+	if (server->tcpStatus == CifsNeedNegotiate)
+		mod_delayed_work(cifsiod_wq, &server->echo, 0);
+
 	return rc;
 }
 
@@ -421,17 +424,25 @@
 	int rc;
 	struct TCP_Server_Info *server = container_of(work,
 					struct TCP_Server_Info, echo.work);
-	unsigned long echo_interval = server->echo_interval;
+	unsigned long echo_interval;
 
 	/*
-	 * We cannot send an echo if it is disabled or until the
-	 * NEGOTIATE_PROTOCOL request is done, which is indicated by
-	 * server->ops->need_neg() == true. Also, no need to ping if
-	 * we got a response recently.
+	 * If we need to renegotiate, set echo interval to zero to
+	 * immediately call echo service where we can renegotiate.
+	 */
+	if (server->tcpStatus == CifsNeedNegotiate)
+		echo_interval = 0;
+	else
+		echo_interval = server->echo_interval;
+
+	/*
+	 * We cannot send an echo if it is disabled.
+	 * Also, no need to ping if we got a response recently.
 	 */
 
 	if (server->tcpStatus == CifsNeedReconnect ||
-	    server->tcpStatus == CifsExiting || server->tcpStatus == CifsNew ||
+	    server->tcpStatus == CifsExiting ||
+	    server->tcpStatus == CifsNew ||
 	    (server->ops->can_echo && !server->ops->can_echo(server)) ||
 	    time_before(jiffies, server->lstrp + echo_interval - HZ))
 		goto requeue_echo;
@@ -442,7 +453,7 @@
 			 server->hostname);
 
 requeue_echo:
-	queue_delayed_work(cifsiod_wq, &server->echo, echo_interval);
+	queue_delayed_work(cifsiod_wq, &server->echo, server->echo_interval);
 }
 
 static bool
diff --git a/fs/isofs/rock.c b/fs/isofs/rock.c
index 98b3eb7..0ec1373 100644
--- a/fs/isofs/rock.c
+++ b/fs/isofs/rock.c
@@ -377,9 +377,9 @@
 			{
 				int p;
 				for (p = 0; p < rr->u.ER.len_id; p++)
-					printk("%c", rr->u.ER.data[p]);
+					printk(KERN_CONT "%c", rr->u.ER.data[p]);
 			}
-			printk("\n");
+			printk(KERN_CONT "\n");
 			break;
 		case SIG('P', 'X'):
 			inode->i_mode = isonum_733(rr->u.PX.mode);
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index edd46a0..0e10085 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -328,11 +328,11 @@
 	if (!real)
 		goto bug;
 
+	/* Handle recursion */
+	real = d_real(real, inode, open_flags);
+
 	if (!inode || inode == d_inode(real))
 		return real;
-
-	/* Handle recursion */
-	return d_real(real, inode, open_flags);
 bug:
 	WARN(1, "ovl_d_real(%pd4, %s:%lu): real dentry not found\n", dentry,
 	     inode ? inode->i_sb->s_id : "NULL", inode ? inode->i_ino : 0);
diff --git a/include/asm-generic/export.h b/include/asm-generic/export.h
index 63554e9..59a3b2f5 100644
--- a/include/asm-generic/export.h
+++ b/include/asm-generic/export.h
@@ -54,6 +54,7 @@
 KSYM(__kcrctab_\name):
 	__put KSYM(__crc_\name)
 	.weak KSYM(__crc_\name)
+	.set KSYM(__crc_\name), 0
 	.previous
 #endif
 #endif
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index 994f52a..3dbcd5b 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -181,6 +181,13 @@
  * @set_flags: Set custom flags which deal with hardware specifics. Returns 0
  *	       on success, -EERROR otherwise.
  *
+ * @list_registers: Queries the hardware to get the current register contents.
+ *		    This callback is optional.
+ *
+ * @list_rate:  On success, return the nth supported frequency for a given
+ *		clock that is below rate_max. Return -ENXIO in case there is
+ *		no frequency table.
+ *
  * The clk_enable/clk_disable and clk_prepare/clk_unprepare pairs allow
  * implementations to split any work between atomic (enable) and sleepable
  * (prepare) contexts.  If enabling a clock requires code that might sleep,
@@ -221,6 +228,10 @@
 	void		(*init)(struct clk_hw *hw);
 	int		(*debug_init)(struct clk_hw *hw, struct dentry *dentry);
 	int		(*set_flags)(struct clk_hw *hw, unsigned int flags);
+	void		(*list_registers)(struct seq_file *f,
+							struct clk_hw *hw);
+	long		(*list_rate)(struct clk_hw *hw, unsigned int n,
+							unsigned long rate_max);
 };
 
 /**
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index 432f5c9..928e5ca 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -263,7 +263,9 @@
 #endif
 #endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP && !__CHECKER__ */
 
-#if GCC_VERSION >= 50000
+#if GCC_VERSION >= 70000
+#define KASAN_ABI_VERSION 5
+#elif GCC_VERSION >= 50000
 #define KASAN_ABI_VERSION 4
 #elif GCC_VERSION >= 40902
 #define KASAN_ABI_VERSION 3
diff --git a/include/linux/diagchar.h b/include/linux/diagchar.h
new file mode 100644
index 0000000..3b5c7bf
--- /dev/null
+++ b/include/linux/diagchar.h
@@ -0,0 +1,895 @@
+/* Copyright (c) 2008-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DIAGCHAR_SHARED
+#define DIAGCHAR_SHARED
+
+#define MSG_MASKS_TYPE		0x00000001
+#define LOG_MASKS_TYPE		0x00000002
+#define EVENT_MASKS_TYPE	0x00000004
+#define PKT_TYPE		0x00000008
+#define DEINIT_TYPE		0x00000010
+#define USER_SPACE_DATA_TYPE	0x00000020
+#define DCI_DATA_TYPE		0x00000040
+#define USER_SPACE_RAW_DATA_TYPE	0x00000080
+#define DCI_LOG_MASKS_TYPE	0x00000100
+#define DCI_EVENT_MASKS_TYPE	0x00000200
+#define DCI_PKT_TYPE		0x00000400
+#define HDLC_SUPPORT_TYPE	0x00001000
+
+#define USB_MODE			1
+#define MEMORY_DEVICE_MODE		2
+#define NO_LOGGING_MODE			3
+#define UART_MODE			4
+#define SOCKET_MODE			5
+#define CALLBACK_MODE			6
+
+/* different values that go in for diag_data_type */
+#define DATA_TYPE_EVENT			0
+#define DATA_TYPE_F3			1
+#define DATA_TYPE_LOG			2
+#define DATA_TYPE_RESPONSE		3
+#define DATA_TYPE_DELAYED_RESPONSE	4
+#define DATA_TYPE_DCI_LOG		0x00000100
+#define DATA_TYPE_DCI_EVENT		0x00000200
+
+/* Different IOCTL values */
+#define DIAG_IOCTL_COMMAND_REG		0
+#define DIAG_IOCTL_COMMAND_DEREG	1
+#define DIAG_IOCTL_SWITCH_LOGGING	7
+#define DIAG_IOCTL_GET_DELAYED_RSP_ID	8
+#define DIAG_IOCTL_LSM_DEINIT		9
+#define DIAG_IOCTL_DCI_INIT		20
+#define DIAG_IOCTL_DCI_DEINIT		21
+#define DIAG_IOCTL_DCI_SUPPORT		22
+#define DIAG_IOCTL_DCI_REG		23
+#define DIAG_IOCTL_DCI_STREAM_INIT	24
+#define DIAG_IOCTL_DCI_HEALTH_STATS	25
+#define DIAG_IOCTL_DCI_LOG_STATUS	26
+#define DIAG_IOCTL_DCI_EVENT_STATUS	27
+#define DIAG_IOCTL_DCI_CLEAR_LOGS	28
+#define DIAG_IOCTL_DCI_CLEAR_EVENTS	29
+#define DIAG_IOCTL_REMOTE_DEV		32
+#define DIAG_IOCTL_VOTE_REAL_TIME	33
+#define DIAG_IOCTL_GET_REAL_TIME	34
+#define DIAG_IOCTL_PERIPHERAL_BUF_CONFIG	35
+#define DIAG_IOCTL_PERIPHERAL_BUF_DRAIN		36
+#define DIAG_IOCTL_REGISTER_CALLBACK	37
+#define DIAG_IOCTL_HDLC_TOGGLE	38
+
+/* PC Tools IDs */
+#define APQ8060_TOOLS_ID	4062
+#define AO8960_TOOLS_ID		4064
+#define APQ8064_TOOLS_ID	4072
+#define MSM8625_TOOLS_ID	4075
+#define MSM8930_TOOLS_ID	4076
+#define MSM8630_TOOLS_ID	4077
+#define MSM8230_TOOLS_ID	4078
+#define APQ8030_TOOLS_ID	4079
+#define MSM8627_TOOLS_ID	4080
+#define MSM8227_TOOLS_ID	4081
+#define MSM8974_TOOLS_ID	4083
+#define APQ8074_TOOLS_ID	4090
+#define MSM8916_TOOLS_ID	4094
+#define APQ8084_TOOLS_ID	4095
+#define MSM8994_TOOLS_ID	4097
+#define MSM8939_TOOLS_ID	4103
+#define APQ8026_TOOLS_ID	4104
+#define MSM8909_TOOLS_ID	4108
+#define MSM8992_TOOLS_ID	4111
+#define MSM8952_TOOLS_ID	4110
+#define MSM_8996_TOOLS_ID	4112
+
+#define MSG_MASK_0			(0x00000001)
+#define MSG_MASK_1			(0x00000002)
+#define MSG_MASK_2			(0x00000004)
+#define MSG_MASK_3			(0x00000008)
+#define MSG_MASK_4			(0x00000010)
+#define MSG_MASK_5			(0x00000020)
+#define MSG_MASK_6			(0x00000040)
+#define MSG_MASK_7			(0x00000080)
+#define MSG_MASK_8			(0x00000100)
+#define MSG_MASK_9			(0x00000200)
+#define MSG_MASK_10			(0x00000400)
+#define MSG_MASK_11			(0x00000800)
+#define MSG_MASK_12			(0x00001000)
+#define MSG_MASK_13			(0x00002000)
+#define MSG_MASK_14			(0x00004000)
+#define MSG_MASK_15			(0x00008000)
+#define MSG_MASK_16			(0x00010000)
+#define MSG_MASK_17			(0x00020000)
+#define MSG_MASK_18			(0x00040000)
+#define MSG_MASK_19			(0x00080000)
+#define MSG_MASK_20			(0x00100000)
+#define MSG_MASK_21			(0x00200000)
+#define MSG_MASK_22			(0x00400000)
+#define MSG_MASK_23			(0x00800000)
+#define MSG_MASK_24			(0x01000000)
+#define MSG_MASK_25			(0x02000000)
+#define MSG_MASK_26			(0x04000000)
+#define MSG_MASK_27			(0x08000000)
+#define MSG_MASK_28			(0x10000000)
+#define MSG_MASK_29			(0x20000000)
+#define MSG_MASK_30			(0x40000000)
+#define MSG_MASK_31			(0x80000000)
+
+/* These masks are to be used for support of all legacy messages in the sw.
+ * The user does not need to remember the names as they will be embedded in
+ * the appropriate macros.
+ */
+#define MSG_LEGACY_LOW			MSG_MASK_0
+#define MSG_LEGACY_MED			MSG_MASK_1
+#define MSG_LEGACY_HIGH			MSG_MASK_2
+#define MSG_LEGACY_ERROR		MSG_MASK_3
+#define MSG_LEGACY_FATAL		MSG_MASK_4
+
+/* Legacy Message Priorities */
+#define MSG_LVL_FATAL			(MSG_LEGACY_FATAL)
+#define MSG_LVL_ERROR			(MSG_LEGACY_ERROR | MSG_LVL_FATAL)
+#define MSG_LVL_HIGH			(MSG_LEGACY_HIGH | MSG_LVL_ERROR)
+#define MSG_LVL_MED			(MSG_LEGACY_MED | MSG_LVL_HIGH)
+#define MSG_LVL_LOW			(MSG_LEGACY_LOW | MSG_LVL_MED)
+
+#define MSG_LVL_NONE			0
+
+/* This needs to be modified manually now, when we add
+ * a new RANGE of SSIDs to the msg_mask_tbl.
+ */
+#define MSG_MASK_TBL_CNT		25
+#define APPS_EVENT_LAST_ID		0x0B14
+
+#define MSG_SSID_0			0
+#define MSG_SSID_0_LAST			118
+#define MSG_SSID_1			500
+#define MSG_SSID_1_LAST			506
+#define MSG_SSID_2			1000
+#define MSG_SSID_2_LAST			1007
+#define MSG_SSID_3			2000
+#define MSG_SSID_3_LAST			2008
+#define MSG_SSID_4			3000
+#define MSG_SSID_4_LAST			3014
+#define MSG_SSID_5			4000
+#define MSG_SSID_5_LAST			4010
+#define MSG_SSID_6			4500
+#define MSG_SSID_6_LAST			4573
+#define MSG_SSID_7			4600
+#define MSG_SSID_7_LAST			4615
+#define MSG_SSID_8			5000
+#define MSG_SSID_8_LAST			5032
+#define MSG_SSID_9			5500
+#define MSG_SSID_9_LAST			5516
+#define MSG_SSID_10			6000
+#define MSG_SSID_10_LAST		6081
+#define MSG_SSID_11			6500
+#define MSG_SSID_11_LAST		6521
+#define MSG_SSID_12			7000
+#define MSG_SSID_12_LAST		7003
+#define MSG_SSID_13			7100
+#define MSG_SSID_13_LAST		7111
+#define MSG_SSID_14			7200
+#define MSG_SSID_14_LAST		7201
+#define MSG_SSID_15			8000
+#define MSG_SSID_15_LAST		8000
+#define MSG_SSID_16			8500
+#define MSG_SSID_16_LAST		8529
+#define MSG_SSID_17			9000
+#define MSG_SSID_17_LAST		9008
+#define MSG_SSID_18			9500
+#define MSG_SSID_18_LAST		9510
+#define MSG_SSID_19			10200
+#define MSG_SSID_19_LAST		10210
+#define MSG_SSID_20			10251
+#define MSG_SSID_20_LAST		10255
+#define MSG_SSID_21			10300
+#define MSG_SSID_21_LAST		10300
+#define MSG_SSID_22			10350
+#define MSG_SSID_22_LAST		10377
+#define MSG_SSID_23			10400
+#define MSG_SSID_23_LAST		10415
+#define MSG_SSID_24			0xC000
+#define MSG_SSID_24_LAST		0xC063
+
+static const uint32_t msg_bld_masks_0[] = {
+	MSG_LVL_LOW,
+	MSG_LVL_MED,
+	MSG_LVL_LOW,
+	MSG_LVL_ERROR,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_MED,
+	MSG_LVL_HIGH,
+	MSG_LVL_ERROR,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_ERROR,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_HIGH,
+	MSG_LVL_HIGH,
+	MSG_LVL_LOW | MSG_MASK_5 | MSG_MASK_6 | MSG_MASK_7 | MSG_MASK_8,
+	MSG_LVL_LOW,
+	MSG_LVL_ERROR,
+	MSG_LVL_LOW,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_LOW,
+	MSG_LVL_MED,
+	MSG_LVL_LOW,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED | MSG_MASK_7 | MSG_MASK_8 | MSG_MASK_9 | MSG_MASK_10 |
+		MSG_MASK_11 | MSG_MASK_12 | MSG_MASK_13 | MSG_MASK_14 |
+		MSG_MASK_15 | MSG_MASK_16 | MSG_MASK_17 | MSG_MASK_18 |
+		MSG_MASK_19 | MSG_MASK_20 | MSG_MASK_21,
+	MSG_LVL_MED | MSG_MASK_5 | MSG_MASK_6 | MSG_MASK_7 | MSG_MASK_8 |
+		MSG_MASK_9 | MSG_MASK_10 | MSG_MASK_11 | MSG_MASK_12 |
+		MSG_MASK_13 | MSG_MASK_14 | MSG_MASK_15 | MSG_MASK_16 |
+		MSG_MASK_17,
+	MSG_LVL_LOW,
+	MSG_LVL_MED,
+	MSG_LVL_HIGH,
+	MSG_LVL_HIGH,
+	MSG_LVL_HIGH,
+	MSG_LVL_HIGH,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED | MSG_MASK_5 | MSG_MASK_6 | MSG_MASK_7 | MSG_MASK_8,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_HIGH,
+	MSG_LVL_HIGH,
+	MSG_LVL_HIGH,
+	MSG_LVL_MED,
+	MSG_LVL_MED | MSG_MASK_5 | MSG_MASK_6 | MSG_MASK_7 | MSG_MASK_8 |
+		MSG_MASK_9 | MSG_MASK_10 | MSG_MASK_11 | MSG_MASK_12 |
+		MSG_MASK_13 | MSG_MASK_14 | MSG_MASK_15 | MSG_MASK_16 |
+		MSG_MASK_17 | MSG_MASK_18 | MSG_MASK_19 | MSG_MASK_20 |
+		MSG_MASK_21 | MSG_MASK_22 | MSG_MASK_23 | MSG_MASK_24|
+		MSG_MASK_25,
+	MSG_LVL_MED | MSG_MASK_5 | MSG_MASK_6 | MSG_MASK_7 | MSG_MASK_8 |
+		MSG_MASK_9 | MSG_MASK_10,
+	MSG_LVL_MED,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_MED,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_HIGH,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW | MSG_MASK_5 | MSG_MASK_6 | MSG_MASK_7 | MSG_MASK_8,
+	MSG_LVL_LOW | MSG_MASK_5 | MSG_MASK_6,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_LOW,
+	MSG_LVL_MED | MSG_MASK_5 | MSG_MASK_6 | MSG_MASK_7 | MSG_MASK_8 |
+		MSG_MASK_9 | MSG_MASK_10 | MSG_MASK_11 | MSG_MASK_12 |
+		MSG_MASK_13 | MSG_MASK_14 | MSG_MASK_15 | MSG_MASK_16 |
+		MSG_MASK_17 | MSG_MASK_18 | MSG_MASK_19 | MSG_MASK_20,
+	MSG_LVL_LOW,
+	MSG_LVL_MED,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_HIGH,
+	MSG_LVL_HIGH,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_HIGH | MSG_MASK_21,
+	MSG_LVL_HIGH,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW | MSG_LVL_MED | MSG_LVL_HIGH | MSG_LVL_ERROR |
+		MSG_LVL_FATAL,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW | MSG_LVL_MED | MSG_LVL_HIGH | MSG_LVL_ERROR |
+		MSG_LVL_FATAL,
+	MSG_LVL_LOW,
+	MSG_LVL_MED,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW | MSG_LVL_MED | MSG_LVL_HIGH | MSG_LVL_ERROR |
+		MSG_LVL_FATAL,
+	MSG_LVL_MED,
+	MSG_LVL_HIGH,
+	MSG_LVL_LOW,
+	MSG_LVL_HIGH,
+	MSG_LVL_LOW | MSG_LVL_MED | MSG_LVL_HIGH | MSG_LVL_ERROR |
+		MSG_LVL_FATAL,
+	MSG_LVL_MED | MSG_LVL_HIGH | MSG_LVL_ERROR,
+	MSG_LVL_MED | MSG_LVL_HIGH | MSG_LVL_ERROR,
+	MSG_LVL_MED | MSG_LVL_HIGH,
+	MSG_LVL_MED | MSG_LVL_HIGH,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW | MSG_LVL_MED | MSG_LVL_HIGH | MSG_LVL_ERROR |
+		MSG_LVL_FATAL,
+	MSG_LVL_HIGH,
+	MSG_LVL_LOW | MSG_LVL_MED | MSG_LVL_HIGH | MSG_LVL_ERROR |
+		MSG_LVL_FATAL,
+	MSG_LVL_LOW | MSG_LVL_MED | MSG_LVL_HIGH | MSG_LVL_ERROR |
+		MSG_LVL_FATAL,
+	MSG_LVL_LOW | MSG_LVL_MED | MSG_LVL_HIGH | MSG_LVL_ERROR |
+		MSG_LVL_FATAL,
+	MSG_LVL_LOW,
+	MSG_LVL_MED,
+	MSG_LVL_LOW | MSG_LVL_MED | MSG_LVL_HIGH | MSG_LVL_ERROR |
+		MSG_LVL_FATAL,
+	MSG_LVL_LOW | MSG_LVL_MED | MSG_LVL_HIGH | MSG_LVL_ERROR |
+		MSG_LVL_FATAL,
+	MSG_LVL_MED,
+	MSG_LVL_LOW | MSG_LVL_MED | MSG_LVL_HIGH | MSG_LVL_ERROR |
+		MSG_LVL_FATAL,
+	MSG_LVL_LOW | MSG_LVL_MED | MSG_LVL_HIGH | MSG_LVL_ERROR |
+		MSG_LVL_FATAL,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_HIGH
+};
+
+static const uint32_t msg_bld_masks_1[] = {
+	MSG_LVL_MED,
+	MSG_LVL_HIGH,
+	MSG_LVL_HIGH,
+	MSG_LVL_LOW,
+	MSG_LVL_HIGH,
+	MSG_LVL_HIGH,
+	MSG_LVL_HIGH
+};
+
+static const uint32_t msg_bld_masks_2[] = {
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED | MSG_MASK_5,
+	MSG_LVL_MED,
+	MSG_LVL_MED
+};
+
+static const uint32_t msg_bld_masks_3[] = {
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED | MSG_MASK_5 | MSG_MASK_6 | MSG_MASK_7 | MSG_MASK_8 |
+		MSG_MASK_9 | MSG_MASK_10,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED
+};
+
+static const uint32_t msg_bld_masks_4[] = {
+	MSG_LVL_HIGH,
+	MSG_LVL_HIGH,
+	MSG_LVL_HIGH,
+	MSG_LVL_HIGH,
+	MSG_LVL_HIGH,
+	MSG_LVL_HIGH,
+	MSG_LVL_HIGH,
+	MSG_LVL_HIGH,
+	MSG_LVL_HIGH,
+	MSG_LVL_HIGH,
+	MSG_LVL_HIGH,
+	MSG_LVL_HIGH,
+	MSG_LVL_HIGH,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW
+};
+
+static const uint32_t msg_bld_masks_5[] = {
+	MSG_LVL_HIGH,
+	MSG_LVL_MED,
+	MSG_LVL_HIGH,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED | MSG_LVL_MED | MSG_MASK_5 | MSG_MASK_6 | MSG_MASK_7 |
+		MSG_MASK_8 | MSG_MASK_9,
+	MSG_LVL_MED
+};
+
+static const uint32_t msg_bld_masks_6[] = {
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW
+};
+
+static const uint32_t msg_bld_masks_7[] = {
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW | MSG_LVL_MED | MSG_LVL_HIGH | MSG_LVL_ERROR | MSG_LVL_FATAL
+};
+
+static const uint32_t msg_bld_masks_8[] = {
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED
+};
+
+static const uint32_t msg_bld_masks_9[] = {
+	MSG_LVL_MED | MSG_MASK_5,
+	MSG_LVL_MED | MSG_MASK_5,
+	MSG_LVL_MED | MSG_MASK_5,
+	MSG_LVL_MED | MSG_MASK_5,
+	MSG_LVL_MED | MSG_MASK_5,
+	MSG_LVL_MED | MSG_MASK_5,
+	MSG_LVL_MED | MSG_MASK_5,
+	MSG_LVL_MED | MSG_MASK_5,
+	MSG_LVL_MED | MSG_MASK_5,
+	MSG_LVL_MED | MSG_MASK_5,
+	MSG_LVL_MED | MSG_MASK_5,
+	MSG_LVL_MED | MSG_MASK_5,
+	MSG_LVL_MED | MSG_MASK_5,
+	MSG_LVL_MED | MSG_MASK_5,
+	MSG_LVL_MED | MSG_MASK_5,
+	MSG_LVL_MED | MSG_MASK_5,
+	MSG_LVL_MED | MSG_MASK_5
+};
+
+static const uint32_t msg_bld_masks_10[] =  {
+	MSG_LVL_MED,
+	MSG_LVL_ERROR,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW | MSG_MASK_5 | MSG_MASK_6 | MSG_MASK_7 | MSG_MASK_8 |
+		MSG_MASK_9 | MSG_MASK_10 | MSG_MASK_11 | MSG_MASK_12 |
+		MSG_MASK_13 | MSG_MASK_14 | MSG_MASK_15 | MSG_MASK_16 |
+		MSG_MASK_17 | MSG_MASK_18 | MSG_MASK_19 | MSG_MASK_20 |
+		MSG_MASK_21 | MSG_MASK_22,
+	MSG_LVL_LOW | MSG_MASK_5,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW | MSG_MASK_5,
+	MSG_LVL_LOW | MSG_MASK_5,
+	MSG_LVL_LOW | MSG_MASK_5,
+	MSG_LVL_LOW | MSG_MASK_5,
+	MSG_LVL_LOW | MSG_MASK_5,
+	MSG_LVL_LOW | MSG_MASK_5,
+	MSG_LVL_LOW | MSG_MASK_5,
+	MSG_LVL_LOW | MSG_MASK_5,
+	MSG_LVL_LOW | MSG_MASK_5,
+	MSG_LVL_LOW | MSG_MASK_5,
+	MSG_LVL_LOW | MSG_MASK_5,
+	MSG_LVL_LOW | MSG_MASK_5,
+	MSG_LVL_LOW | MSG_MASK_5,
+	MSG_LVL_LOW | MSG_MASK_5,
+	MSG_LVL_LOW | MSG_MASK_5,
+	MSG_LVL_LOW | MSG_MASK_5,
+	MSG_LVL_LOW | MSG_MASK_5,
+	MSG_LVL_LOW | MSG_MASK_5,
+	MSG_LVL_LOW | MSG_MASK_5,
+	MSG_LVL_LOW,
+	MSG_LVL_MED,
+	MSG_LVL_HIGH,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW | MSG_MASK_5,
+	MSG_LVL_LOW|MSG_MASK_0 | MSG_MASK_1 | MSG_MASK_2 | MSG_MASK_3 |
+		MSG_MASK_4 | MSG_MASK_5 | MSG_MASK_6,
+	MSG_LVL_HIGH,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_MED
+};
+
+static const uint32_t msg_bld_masks_11[] = {
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+};
+
+static const uint32_t msg_bld_masks_12[] = {
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+};
+
+static const uint32_t msg_bld_masks_13[] = {
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+};
+
+static const uint32_t msg_bld_masks_14[] = {
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+};
+
+static const uint32_t msg_bld_masks_15[] = {
+	MSG_LVL_MED
+};
+
+static const uint32_t msg_bld_masks_16[] = {
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW | MSG_LVL_MED | MSG_LVL_HIGH | MSG_LVL_ERROR |
+		MSG_LVL_FATAL,
+	MSG_LVL_LOW | MSG_LVL_MED | MSG_LVL_HIGH | MSG_LVL_ERROR |
+		MSG_LVL_FATAL,
+	MSG_LVL_LOW | MSG_LVL_MED | MSG_LVL_HIGH | MSG_LVL_ERROR |
+		MSG_LVL_FATAL,
+	MSG_LVL_LOW | MSG_LVL_MED | MSG_LVL_HIGH | MSG_LVL_ERROR |
+		MSG_LVL_FATAL,
+	MSG_LVL_LOW | MSG_LVL_MED | MSG_LVL_HIGH | MSG_LVL_ERROR |
+		MSG_LVL_FATAL
+};
+
+static const uint32_t msg_bld_masks_17[] =  {
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+	MSG_LVL_MED | MSG_MASK_6 | MSG_MASK_7 | MSG_MASK_8 | MSG_MASK_9,
+	MSG_LVL_MED | MSG_MASK_5 | MSG_MASK_6 | MSG_MASK_7 | MSG_MASK_8 |
+		MSG_MASK_9 | MSG_MASK_10 | MSG_MASK_11 | MSG_MASK_12 |
+		MSG_MASK_13 | MSG_MASK_14 | MSG_MASK_15 | MSG_MASK_16 |
+		MSG_MASK_17,
+	MSG_LVL_MED,
+	MSG_LVL_MED | MSG_MASK_5 | MSG_MASK_6 | MSG_MASK_7 | MSG_MASK_8 |
+		MSG_MASK_9 | MSG_MASK_10 | MSG_MASK_11 | MSG_MASK_12 |
+		MSG_MASK_13 | MSG_MASK_14 | MSG_MASK_15 | MSG_MASK_16 |
+		MSG_MASK_17 | MSG_MASK_18 | MSG_MASK_19 | MSG_MASK_20 |
+		MSG_MASK_21 | MSG_MASK_22,
+	MSG_LVL_MED,
+	MSG_LVL_MED,
+};
+
+static const uint32_t msg_bld_masks_18[] = {
+	MSG_LVL_LOW,
+	MSG_LVL_LOW | MSG_MASK_8 | MSG_MASK_9 | MSG_MASK_10 | MSG_MASK_11 |
+		MSG_MASK_12 | MSG_MASK_13 | MSG_MASK_14 | MSG_MASK_15 |
+		MSG_MASK_16 | MSG_MASK_17 | MSG_MASK_18 | MSG_MASK_19 |
+		MSG_MASK_20,
+	MSG_LVL_LOW | MSG_MASK_5 | MSG_MASK_6,
+	MSG_LVL_LOW | MSG_MASK_5,
+	MSG_LVL_LOW | MSG_MASK_5 | MSG_MASK_6,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW | MSG_MASK_5 | MSG_MASK_6 | MSG_MASK_7 | MSG_MASK_8 |
+		MSG_MASK_9,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW
+};
+
+static const uint32_t msg_bld_masks_19[] = {
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW
+};
+
+static const uint32_t msg_bld_masks_20[] = {
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW
+};
+
+static const uint32_t msg_bld_masks_21[] = {
+	MSG_LVL_HIGH
+};
+
+static const uint32_t msg_bld_masks_22[] = {
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW
+};
+
+static const uint32_t msg_bld_masks_23[] = {
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW,
+	MSG_LVL_LOW
+};
+
+/* LOG CODES */
+static const uint32_t log_code_last_tbl[] = {
+	0x0,	/* EQUIP ID 0 */
+	0x1966,	/* EQUIP ID 1 */
+	0x0,	/* EQUIP ID 2 */
+	0x0,	/* EQUIP ID 3 */
+	0x4910,	/* EQUIP ID 4 */
+	0x5420,	/* EQUIP ID 5 */
+	0x0,	/* EQUIP ID 6 */
+	0x74FF,	/* EQUIP ID 7 */
+	0x0,	/* EQUIP ID 8 */
+	0x0,	/* EQUIP ID 9 */
+	0xA38A,	/* EQUIP ID 10 */
+	0xB201,	/* EQUIP ID 11 */
+	0x0,	/* EQUIP ID 12 */
+	0xD1FF,	/* EQUIP ID 13 */
+	0x0,	/* EQUIP ID 14 */
+	0x0,	/* EQUIP ID 15 */
+};
+
+#define LOG_GET_ITEM_NUM(xx_code)	(xx_code & 0x0FFF)
+#define LOG_GET_EQUIP_ID(xx_code)	((xx_code & 0xF000) >> 12)
+#define LOG_ITEMS_TO_SIZE(num_items)	((num_items+7)/8)
+#define LOG_SIZE_TO_ITEMS(size)		((8*size) - 7)
+#define EVENT_COUNT_TO_BYTES(count)	((count/8) + 1)
+
+#endif
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 3be7abd..c9f3796 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -476,7 +476,6 @@
 enum {
 	MLX4_INTERFACE_STATE_UP		= 1 << 0,
 	MLX4_INTERFACE_STATE_DELETION	= 1 << 1,
-	MLX4_INTERFACE_STATE_SHUTDOWN	= 1 << 2,
 };
 
 #define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \
diff --git a/include/linux/of_mdio.h b/include/linux/of_mdio.h
index 2ab2336..a58cca8 100644
--- a/include/linux/of_mdio.h
+++ b/include/linux/of_mdio.h
@@ -29,6 +29,7 @@
 extern struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np);
 extern int of_mdio_parse_addr(struct device *dev, const struct device_node *np);
 extern int of_phy_register_fixed_link(struct device_node *np);
+extern void of_phy_deregister_fixed_link(struct device_node *np);
 extern bool of_phy_is_fixed_link(struct device_node *np);
 
 #else /* CONFIG_OF */
@@ -83,6 +84,9 @@
 {
 	return -ENOSYS;
 }
+static inline void of_phy_deregister_fixed_link(struct device_node *np)
+{
+}
 static inline bool of_phy_is_fixed_link(struct device_node *np)
 {
 	return false;
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index dd15d39..7dbe914 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -374,16 +374,13 @@
 }
 
 /*
- * Get the offset in PAGE_SIZE.
- * (TODO: hugepage should have ->index in PAGE_SIZE)
+ * Get index of the page with in radix-tree
+ * (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE)
  */
-static inline pgoff_t page_to_pgoff(struct page *page)
+static inline pgoff_t page_to_index(struct page *page)
 {
 	pgoff_t pgoff;
 
-	if (unlikely(PageHeadHuge(page)))
-		return page->index << compound_order(page);
-
 	if (likely(!PageTransTail(page)))
 		return page->index;
 
@@ -397,6 +394,18 @@
 }
 
 /*
+ * Get the offset in PAGE_SIZE.
+ * (TODO: hugepage should have ->index in PAGE_SIZE)
+ */
+static inline pgoff_t page_to_pgoff(struct page *page)
+{
+	if (unlikely(PageHeadHuge(page)))
+		return page->index << compound_order(page);
+
+	return page_to_index(page);
+}
+
+/*
  * Return byte-offset into filesystem object for page.
  */
 static inline loff_t page_offset(struct page *page)
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 0e49f70..a38772a 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1928,6 +1928,20 @@
 	return (pcie_caps_reg(dev) & PCI_EXP_FLAGS_TYPE) >> 4;
 }
 
+static inline struct pci_dev *pcie_find_root_port(struct pci_dev *dev)
+{
+	while (1) {
+		if (!pci_is_pcie(dev))
+			break;
+		if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
+			return dev;
+		if (!dev->bus->self)
+			break;
+		dev = dev->bus->self;
+	}
+	return NULL;
+}
+
 void pci_request_acs(void);
 bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags);
 bool pci_acs_path_enabled(struct pci_dev *start,
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 3fe01b4..f98fbf6 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -5775,6 +5775,16 @@
 /* ethtool helper */
 void cfg80211_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info);
 
+/**
+ * cfg80211_is_gratuitous_arp_unsolicited_na - packet is grat. ARP/unsol. NA
+ * @skb: the input packet, must be an ethernet frame already
+ *
+ * Return: %true if the packet is a gratuitous ARP or unsolicited NA packet.
+ * This is used to drop packets that shouldn't occur because the AP implements
+ * a proxy service.
+ */
+bool cfg80211_is_gratuitous_arp_unsolicited_na(struct sk_buff *skb);
+
 /* Logging, debugging and troubleshooting/diagnostic helpers. */
 
 /* wiphy_printk helpers, similar to dev_printk */
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 8fed1cd..f11ca83 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -970,6 +970,8 @@
 int compat_ipv6_getsockopt(struct sock *sk, int level, int optname,
 			   char __user *optval, int __user *optlen);
 
+int __ip6_datagram_connect(struct sock *sk, struct sockaddr *addr,
+			   int addr_len);
 int ip6_datagram_connect(struct sock *sk, struct sockaddr *addr, int addr_len);
 int ip6_datagram_connect_v6_only(struct sock *sk, struct sockaddr *addr,
 				 int addr_len);
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index 5041805..d9d52c0 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -100,6 +100,9 @@
 
 	possible_net_t ct_net;
 
+#if IS_ENABLED(CONFIG_NF_NAT)
+	struct rhlist_head nat_bysource;
+#endif
 	/* all members below initialized via memset */
 	u8 __nfct_init_offset[0];
 
@@ -117,9 +120,6 @@
 	/* Extensions */
 	struct nf_ct_ext *ext;
 
-#if IS_ENABLED(CONFIG_NF_NAT)
-	struct rhash_head	nat_bysource;
-#endif
 	/* Storage reserved for other modules, must be the last member */
 	union nf_conntrack_proto proto;
 };
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index d79d1e9..b02af0b 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -313,7 +313,7 @@
  * 	@size: maximum set size
  * 	@nelems: number of elements
  * 	@ndeact: number of deactivated elements queued for removal
- * 	@timeout: default timeout value in msecs
+ *	@timeout: default timeout value in jiffies
  * 	@gc_int: garbage collection interval in msecs
  *	@policy: set parameterization (see enum nft_set_policies)
  *	@udlen: user data length
diff --git a/include/soc/qcom/socinfo.h b/include/soc/qcom/socinfo.h
index 20410f5..f5987da 100644
--- a/include/soc/qcom/socinfo.h
+++ b/include/soc/qcom/socinfo.h
@@ -98,6 +98,8 @@
 	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msmfalcon")
 #define early_machine_is_msmskunk()	\
 	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msmskunk")
+#define early_machine_is_sdmbat()	\
+	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,sdmbat")
 #else
 #define of_board_is_sim()		0
 #define of_board_is_rumi()		0
@@ -136,6 +138,7 @@
 #define early_machine_is_msmhamster()	0
 #define early_machine_is_msmfalcon()	0
 #define early_machine_is_msmskunk()	0
+#define early_machine_is_sdmbat()	0
 #endif
 
 #define PLATFORM_SUBTYPE_MDM	1
@@ -196,7 +199,7 @@
 	MSM_CPU_HAMSTER,
 	MSM_CPU_FALCON,
 	MSM_CPU_SKUNK,
-
+	MSM_CPU_BAT,
 };
 
 struct msm_soc_info {
diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h
index d6d071f..3af60ee 100644
--- a/include/uapi/linux/input-event-codes.h
+++ b/include/uapi/linux/input-event-codes.h
@@ -640,7 +640,7 @@
  * Control a data application associated with the currently viewed channel,
  * e.g. teletext or data broadcast application (MHEG, MHP, HbbTV, etc.)
  */
-#define KEY_DATA			0x275
+#define KEY_DATA			0x277
 
 #define BTN_TRIGGER_HAPPY		0x2c0
 #define BTN_TRIGGER_HAPPY1		0x2c0
diff --git a/include/uapi/linux/tc_act/Kbuild b/include/uapi/linux/tc_act/Kbuild
index e3969bd..9611c7b 100644
--- a/include/uapi/linux/tc_act/Kbuild
+++ b/include/uapi/linux/tc_act/Kbuild
@@ -11,3 +11,4 @@
 header-y += tc_bpf.h
 header-y += tc_connmark.h
 header-y += tc_ife.h
+header-y += tc_tunnel_key.h
diff --git a/include/uapi/linux/usb/Kbuild b/include/uapi/linux/usb/Kbuild
index 4cc4d6e..ba153d5 100644
--- a/include/uapi/linux/usb/Kbuild
+++ b/include/uapi/linux/usb/Kbuild
@@ -9,4 +9,5 @@
 header-y += gadgetfs.h
 header-y += midi.h
 header-y += tmc.h
+header-y += usb_ctrl_qti.h
 header-y += video.h
diff --git a/include/uapi/linux/usb/usb_ctrl_qti.h b/include/uapi/linux/usb/usb_ctrl_qti.h
new file mode 100644
index 0000000..b02272a
--- /dev/null
+++ b/include/uapi/linux/usb/usb_ctrl_qti.h
@@ -0,0 +1,41 @@
+#ifndef __UAPI_LINUX_USB_CTRL_QTI_H
+#define __UAPI_LINUX_USB_CTRL_QTI_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#define MAX_QTI_PKT_SIZE 2048
+
+#define QTI_CTRL_IOCTL_MAGIC	'r'
+#define QTI_CTRL_GET_LINE_STATE	_IOR(QTI_CTRL_IOCTL_MAGIC, 2, int)
+#define QTI_CTRL_EP_LOOKUP _IOR(QTI_CTRL_IOCTL_MAGIC, 3, struct ep_info)
+#define QTI_CTRL_MODEM_OFFLINE _IO(QTI_CTRL_IOCTL_MAGIC, 4)
+#define QTI_CTRL_MODEM_ONLINE _IO(QTI_CTRL_IOCTL_MAGIC, 5)
+
+enum peripheral_ep_type {
+	DATA_EP_TYPE_RESERVED	= 0x0,
+	DATA_EP_TYPE_HSIC	= 0x1,
+	DATA_EP_TYPE_HSUSB	= 0x2,
+	DATA_EP_TYPE_PCIE	= 0x3,
+	DATA_EP_TYPE_EMBEDDED	= 0x4,
+	DATA_EP_TYPE_BAM_DMUX	= 0x5,
+};
+
+struct peripheral_ep_info {
+	enum peripheral_ep_type		ep_type;
+	__u32				peripheral_iface_id;
+};
+
+struct ipa_ep_pair {
+	__u32 cons_pipe_num;
+	__u32 prod_pipe_num;
+};
+
+struct ep_info {
+	struct peripheral_ep_info	ph_ep_info;
+	struct ipa_ep_pair		ipa_ep_pair;
+
+};
+
+#endif
+
diff --git a/init/Kconfig b/init/Kconfig
index 2b7b97f..f595c26 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1962,7 +1962,6 @@
 
 config MODVERSIONS
 	bool "Module versioning support"
-	depends on BROKEN
 	help
 	  Usually, you have to use modules compiled with your kernel.
 	  Saying Y here makes it sometimes possible to use modules
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 6a93615..8199821 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -2454,6 +2454,7 @@
 			 struct bpf_verifier_state *old,
 			 struct bpf_verifier_state *cur)
 {
+	bool varlen_map_access = env->varlen_map_value_access;
 	struct bpf_reg_state *rold, *rcur;
 	int i;
 
@@ -2467,12 +2468,17 @@
 		/* If the ranges were not the same, but everything else was and
 		 * we didn't do a variable access into a map then we are a-ok.
 		 */
-		if (!env->varlen_map_value_access &&
+		if (!varlen_map_access &&
 		    rold->type == rcur->type && rold->imm == rcur->imm)
 			continue;
 
+		/* If we didn't map access then again we don't care about the
+		 * mismatched range values and it's ok if our old type was
+		 * UNKNOWN and we didn't go to a NOT_INIT'ed reg.
+		 */
 		if (rold->type == NOT_INIT ||
-		    (rold->type == UNKNOWN_VALUE && rcur->type != NOT_INIT))
+		    (!varlen_map_access && rold->type == UNKNOWN_VALUE &&
+		     rcur->type != NOT_INIT))
 			continue;
 
 		if (rold->type == PTR_TO_PACKET && rcur->type == PTR_TO_PACKET &&
diff --git a/kernel/module.c b/kernel/module.c
index f57dd63..0e54d5b 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -1301,8 +1301,9 @@
 		goto bad_version;
 	}
 
-	pr_warn("%s: no symbol version for %s\n", mod->name, symname);
-	return 0;
+	/* Broken toolchain. Warn once, then let it go.. */
+	pr_warn_once("%s: no symbol version for %s\n", mod->name, symname);
+	return 1;
 
 bad_version:
 	pr_warn("%s: disagrees about version of symbol %s\n",
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index a8e1260..056052dc 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -362,6 +362,7 @@
 
 	__debug_object_init(addr, descr, 0);
 }
+EXPORT_SYMBOL_GPL(debug_object_init);
 
 /**
  * debug_object_init_on_stack - debug checks when an object on stack is
@@ -376,6 +377,7 @@
 
 	__debug_object_init(addr, descr, 1);
 }
+EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
 
 /**
  * debug_object_activate - debug checks when an object is activated
@@ -449,6 +451,7 @@
 	}
 	return 0;
 }
+EXPORT_SYMBOL_GPL(debug_object_activate);
 
 /**
  * debug_object_deactivate - debug checks when an object is deactivated
@@ -496,6 +499,7 @@
 
 	raw_spin_unlock_irqrestore(&db->lock, flags);
 }
+EXPORT_SYMBOL_GPL(debug_object_deactivate);
 
 /**
  * debug_object_destroy - debug checks when an object is destroyed
@@ -542,6 +546,7 @@
 out_unlock:
 	raw_spin_unlock_irqrestore(&db->lock, flags);
 }
+EXPORT_SYMBOL_GPL(debug_object_destroy);
 
 /**
  * debug_object_free - debug checks when an object is freed
@@ -582,6 +587,7 @@
 out_unlock:
 	raw_spin_unlock_irqrestore(&db->lock, flags);
 }
+EXPORT_SYMBOL_GPL(debug_object_free);
 
 /**
  * debug_object_assert_init - debug checks when object should be init-ed
@@ -626,6 +632,7 @@
 
 	raw_spin_unlock_irqrestore(&db->lock, flags);
 }
+EXPORT_SYMBOL_GPL(debug_object_assert_init);
 
 /**
  * debug_object_active_state - debug checks object usage state machine
@@ -673,6 +680,7 @@
 
 	raw_spin_unlock_irqrestore(&db->lock, flags);
 }
+EXPORT_SYMBOL_GPL(debug_object_active_state);
 
 #ifdef CONFIG_DEBUG_OBJECTS_FREE
 static void __debug_check_no_obj_freed(const void *address, unsigned long size)
diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index 5e51872b..fbdf879 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -20,6 +20,11 @@
 #include <linux/uaccess.h>
 #include <linux/module.h>
 
+/*
+ * Note: test functions are marked noinline so that their names appear in
+ * reports.
+ */
+
 static noinline void __init kmalloc_oob_right(void)
 {
 	char *ptr;
@@ -411,6 +416,29 @@
 	kfree(kmem);
 }
 
+static noinline void __init use_after_scope_test(void)
+{
+	volatile char *volatile p;
+
+	pr_info("use-after-scope on int\n");
+	{
+		int local = 0;
+
+		p = (char *)&local;
+	}
+	p[0] = 1;
+	p[3] = 1;
+
+	pr_info("use-after-scope on array\n");
+	{
+		char local[1024] = {0};
+
+		p = local;
+	}
+	p[0] = 1;
+	p[1023] = 1;
+}
+
 static int __init kmalloc_tests_init(void)
 {
 	kmalloc_oob_right();
@@ -436,6 +464,7 @@
 	kasan_global_oob();
 	ksize_unpoisons_memory();
 	copy_user_test();
+	use_after_scope_test();
 	return -EAGAIN;
 }
 
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index eff3de3..d4a6e40 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1456,9 +1456,9 @@
 		new_ptl = pmd_lockptr(mm, new_pmd);
 		if (new_ptl != old_ptl)
 			spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
-		if (pmd_present(*old_pmd) && pmd_dirty(*old_pmd))
-			force_flush = true;
 		pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd);
+		if (pmd_present(pmd) && pmd_dirty(pmd))
+			force_flush = true;
 		VM_BUG_ON(!pmd_none(*new_pmd));
 
 		if (pmd_move_must_withdraw(new_ptl, old_ptl) &&
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index 70c0097..0e9505f 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -764,6 +764,25 @@
 void __asan_handle_no_return(void) {}
 EXPORT_SYMBOL(__asan_handle_no_return);
 
+/* Emitted by compiler to poison large objects when they go out of scope. */
+void __asan_poison_stack_memory(const void *addr, size_t size)
+{
+	/*
+	 * Addr is KASAN_SHADOW_SCALE_SIZE-aligned and the object is surrounded
+	 * by redzones, so we simply round up size to simplify logic.
+	 */
+	kasan_poison_shadow(addr, round_up(size, KASAN_SHADOW_SCALE_SIZE),
+			    KASAN_USE_AFTER_SCOPE);
+}
+EXPORT_SYMBOL(__asan_poison_stack_memory);
+
+/* Emitted by compiler to unpoison large objects when they go into scope. */
+void __asan_unpoison_stack_memory(const void *addr, size_t size)
+{
+	kasan_unpoison_shadow(addr, size);
+}
+EXPORT_SYMBOL(__asan_unpoison_stack_memory);
+
 #ifdef CONFIG_MEMORY_HOTPLUG
 static int kasan_mem_notifier(struct notifier_block *nb,
 			unsigned long action, void *data)
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index e5c2181..1c260e6 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -21,6 +21,7 @@
 #define KASAN_STACK_MID         0xF2
 #define KASAN_STACK_RIGHT       0xF3
 #define KASAN_STACK_PARTIAL     0xF4
+#define KASAN_USE_AFTER_SCOPE   0xF8
 
 /* Don't break randconfig/all*config builds */
 #ifndef KASAN_ABI_VERSION
@@ -53,6 +54,9 @@
 #if KASAN_ABI_VERSION >= 4
 	struct kasan_source_location *location;
 #endif
+#if KASAN_ABI_VERSION >= 5
+	char *odr_indicator;
+#endif
 };
 
 /**
diff --git a/mm/kasan/report.c b/mm/kasan/report.c
index 24c1211..073325a 100644
--- a/mm/kasan/report.c
+++ b/mm/kasan/report.c
@@ -90,6 +90,9 @@
 	case KASAN_KMALLOC_FREE:
 		bug_type = "use-after-free";
 		break;
+	case KASAN_USE_AFTER_SCOPE:
+		bug_type = "use-after-scope";
+		break;
 	}
 
 	pr_err("BUG: KASAN: %s in %pS at addr %p\n",
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 728d779..87e1a7ca 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -103,6 +103,7 @@
 	.mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
 };
 
+#ifdef CONFIG_SYSFS
 static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
 					 struct kobj_attribute *attr,
 					 char *buf)
@@ -295,6 +296,7 @@
 	.attrs = khugepaged_attr,
 	.name = "khugepaged",
 };
+#endif /* CONFIG_SYSFS */
 
 #define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB)
 
diff --git a/mm/mlock.c b/mm/mlock.c
index b3ffbdf..facf6e7 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -190,10 +190,13 @@
 	 */
 	spin_lock_irq(zone_lru_lock(zone));
 
-	nr_pages = hpage_nr_pages(page);
-	if (!TestClearPageMlocked(page))
+	if (!TestClearPageMlocked(page)) {
+		/* Potentially, PTE-mapped THP: do not skip the rest PTEs */
+		nr_pages = 1;
 		goto unlock_out;
+	}
 
+	nr_pages = hpage_nr_pages(page);
 	__mod_zone_page_state(zone, NR_MLOCK, -nr_pages);
 
 	if (__munlock_isolate_lru_page(page, true)) {
diff --git a/mm/mremap.c b/mm/mremap.c
index 6ccecc0..30d7d24 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -149,14 +149,18 @@
 		if (pte_none(*old_pte))
 			continue;
 
-		/*
-		 * We are remapping a dirty PTE, make sure to
-		 * flush TLB before we drop the PTL for the
-		 * old PTE or we may race with page_mkclean().
-		 */
-		if (pte_present(*old_pte) && pte_dirty(*old_pte))
-			force_flush = true;
 		pte = ptep_get_and_clear(mm, old_addr, old_pte);
+		/*
+		 * If we are remapping a dirty PTE, make sure
+		 * to flush TLB before we drop the PTL for the
+		 * old PTE or we may race with page_mkclean().
+		 *
+		 * This check has to be done after we removed the
+		 * old PTE from page tables or another thread may
+		 * dirty it after the check and before the removal.
+		 */
+		if (pte_present(pte) && pte_dirty(pte))
+			force_flush = true;
 		pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
 		pte = move_soft_dirty_pte(pte);
 		set_pte_at(mm, new_addr, new_pte, pte);
diff --git a/mm/truncate.c b/mm/truncate.c
index a01cce4..8d8c62d 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -283,7 +283,7 @@
 
 			if (!trylock_page(page))
 				continue;
-			WARN_ON(page_to_pgoff(page) != index);
+			WARN_ON(page_to_index(page) != index);
 			if (PageWriteback(page)) {
 				unlock_page(page);
 				continue;
@@ -371,7 +371,7 @@
 			}
 
 			lock_page(page);
-			WARN_ON(page_to_pgoff(page) != index);
+			WARN_ON(page_to_index(page) != index);
 			wait_on_page_writeback(page);
 			truncate_inode_page(mapping, page);
 			unlock_page(page);
@@ -492,7 +492,7 @@
 			if (!trylock_page(page))
 				continue;
 
-			WARN_ON(page_to_pgoff(page) != index);
+			WARN_ON(page_to_index(page) != index);
 
 			/* Middle of THP: skip */
 			if (PageTransTail(page)) {
@@ -612,7 +612,7 @@
 			}
 
 			lock_page(page);
-			WARN_ON(page_to_pgoff(page) != index);
+			WARN_ON(page_to_index(page) != index);
 			if (page->mapping != mapping) {
 				unlock_page(page);
 				continue;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 76fda22..d75cdf3 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2354,6 +2354,8 @@
 			}
 		}
 
+		cond_resched();
+
 		if (nr_reclaimed < nr_to_reclaim || scan_adjusted)
 			continue;
 
diff --git a/mm/workingset.c b/mm/workingset.c
index 617475f..fb1f918 100644
--- a/mm/workingset.c
+++ b/mm/workingset.c
@@ -348,7 +348,7 @@
 	shadow_nodes = list_lru_shrink_count(&workingset_shadow_nodes, sc);
 	local_irq_enable();
 
-	if (memcg_kmem_enabled()) {
+	if (sc->memcg) {
 		pages = mem_cgroup_node_nr_lru_pages(sc->memcg, sc->nid,
 						     LRU_ALL_FILE);
 	} else {
diff --git a/net/core/flow.c b/net/core/flow.c
index 3937b1b..18e8893 100644
--- a/net/core/flow.c
+++ b/net/core/flow.c
@@ -95,7 +95,6 @@
 	list_for_each_entry_safe(fce, n, &gc_list, u.gc_list) {
 		flow_entry_kill(fce, xfrm);
 		atomic_dec(&xfrm->flow_cache_gc_count);
-		WARN_ON(atomic_read(&xfrm->flow_cache_gc_count) < 0);
 	}
 }
 
@@ -236,9 +235,8 @@
 		if (fcp->hash_count > fc->high_watermark)
 			flow_cache_shrink(fc, fcp);
 
-		if (fcp->hash_count > 2 * fc->high_watermark ||
-		    atomic_read(&net->xfrm.flow_cache_gc_count) > fc->high_watermark) {
-			atomic_inc(&net->xfrm.flow_cache_genid);
+		if (atomic_read(&net->xfrm.flow_cache_gc_count) >
+		    2 * num_online_cpus() * fc->high_watermark) {
 			flo = ERR_PTR(-ENOBUFS);
 			goto ret_object;
 		}
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index deb35ac..a6196cf 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -931,8 +931,8 @@
 	       + nla_total_size(4) /* IFLA_PROMISCUITY */
 	       + nla_total_size(4) /* IFLA_NUM_TX_QUEUES */
 	       + nla_total_size(4) /* IFLA_NUM_RX_QUEUES */
-	       + nla_total_size(4) /* IFLA_MAX_GSO_SEGS */
-	       + nla_total_size(4) /* IFLA_MAX_GSO_SIZE */
+	       + nla_total_size(4) /* IFLA_GSO_MAX_SEGS */
+	       + nla_total_size(4) /* IFLA_GSO_MAX_SIZE */
 	       + nla_total_size(1) /* IFLA_OPERSTATE */
 	       + nla_total_size(1) /* IFLA_LINKMODE */
 	       + nla_total_size(4) /* IFLA_CARRIER_CHANGES */
diff --git a/net/core/sock.c b/net/core/sock.c
index 5e3ca41..00a074d 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -715,7 +715,7 @@
 		val = min_t(u32, val, sysctl_wmem_max);
 set_sndbuf:
 		sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
-		sk->sk_sndbuf = max_t(u32, val * 2, SOCK_MIN_SNDBUF);
+		sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF);
 		/* Wake up sending tasks if we upped the value. */
 		sk->sk_write_space(sk);
 		break;
@@ -751,7 +751,7 @@
 		 * returning the value we actually used in getsockopt
 		 * is the most desirable behavior.
 		 */
-		sk->sk_rcvbuf = max_t(u32, val * 2, SOCK_MIN_RCVBUF);
+		sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF);
 		break;
 
 	case SO_RCVBUFFORCE:
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index b567c87..edbe59d 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -700,6 +700,7 @@
 {
 	const struct dccp_hdr *dh;
 	unsigned int cscov;
+	u8 dccph_doff;
 
 	if (skb->pkt_type != PACKET_HOST)
 		return 1;
@@ -721,18 +722,19 @@
 	/*
 	 * If P.Data Offset is too small for packet type, drop packet and return
 	 */
-	if (dh->dccph_doff < dccp_hdr_len(skb) / sizeof(u32)) {
-		DCCP_WARN("P.Data Offset(%u) too small\n", dh->dccph_doff);
+	dccph_doff = dh->dccph_doff;
+	if (dccph_doff < dccp_hdr_len(skb) / sizeof(u32)) {
+		DCCP_WARN("P.Data Offset(%u) too small\n", dccph_doff);
 		return 1;
 	}
 	/*
 	 * If P.Data Offset is too too large for packet, drop packet and return
 	 */
-	if (!pskb_may_pull(skb, dh->dccph_doff * sizeof(u32))) {
-		DCCP_WARN("P.Data Offset(%u) too large\n", dh->dccph_doff);
+	if (!pskb_may_pull(skb, dccph_doff * sizeof(u32))) {
+		DCCP_WARN("P.Data Offset(%u) too large\n", dccph_doff);
 		return 1;
 	}
-
+	dh = dccp_hdr(skb);
 	/*
 	 * If P.type is not Data, Ack, or DataAck and P.X == 0 (the packet
 	 * has short sequence numbers), drop packet and return
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index a6902c1..7899919 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -233,6 +233,8 @@
 		genphy_read_status(phydev);
 		if (ds->ops->adjust_link)
 			ds->ops->adjust_link(ds, port, phydev);
+
+		put_device(&phydev->mdio.dev);
 	}
 
 	return 0;
@@ -504,15 +506,8 @@
 
 void dsa_cpu_dsa_destroy(struct device_node *port_dn)
 {
-	struct phy_device *phydev;
-
-	if (of_phy_is_fixed_link(port_dn)) {
-		phydev = of_phy_find_device(port_dn);
-		if (phydev) {
-			phy_device_free(phydev);
-			fixed_phy_unregister(phydev);
-		}
-	}
+	if (of_phy_is_fixed_link(port_dn))
+		of_phy_deregister_fixed_link(port_dn);
 }
 
 static void dsa_switch_destroy(struct dsa_switch *ds)
diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
index f8a7d9a..5fff951 100644
--- a/net/dsa/dsa2.c
+++ b/net/dsa/dsa2.c
@@ -28,8 +28,10 @@
 	struct dsa_switch_tree *dst;
 
 	list_for_each_entry(dst, &dsa_switch_trees, list)
-		if (dst->tree == tree)
+		if (dst->tree == tree) {
+			kref_get(&dst->refcount);
 			return dst;
+		}
 	return NULL;
 }
 
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 6b1282c..30e2e21 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -1125,7 +1125,7 @@
 	p->phy_interface = mode;
 
 	phy_dn = of_parse_phandle(port_dn, "phy-handle", 0);
-	if (of_phy_is_fixed_link(port_dn)) {
+	if (!phy_dn && of_phy_is_fixed_link(port_dn)) {
 		/* In the case of a fixed PHY, the DT node associated
 		 * to the fixed PHY is the Port DT node
 		 */
@@ -1135,7 +1135,7 @@
 			return ret;
 		}
 		phy_is_fixed = true;
-		phy_dn = port_dn;
+		phy_dn = of_node_get(port_dn);
 	}
 
 	if (ds->ops->get_phy_flags)
@@ -1154,6 +1154,7 @@
 			ret = dsa_slave_phy_connect(p, slave_dev, phy_id);
 			if (ret) {
 				netdev_err(slave_dev, "failed to connect to phy%d: %d\n", phy_id, ret);
+				of_node_put(phy_dn);
 				return ret;
 			}
 		} else {
@@ -1162,6 +1163,8 @@
 						phy_flags,
 						p->phy_interface);
 		}
+
+		of_node_put(phy_dn);
 	}
 
 	if (p->phy && phy_is_fixed)
@@ -1174,6 +1177,8 @@
 		ret = dsa_slave_phy_connect(p, slave_dev, p->port);
 		if (ret) {
 			netdev_err(slave_dev, "failed to connect to port %d: %d\n", p->port, ret);
+			if (phy_is_fixed)
+				of_phy_deregister_fixed_link(port_dn);
 			return ret;
 		}
 	}
@@ -1289,10 +1294,18 @@
 void dsa_slave_destroy(struct net_device *slave_dev)
 {
 	struct dsa_slave_priv *p = netdev_priv(slave_dev);
+	struct dsa_switch *ds = p->parent;
+	struct device_node *port_dn;
+
+	port_dn = ds->ports[p->port].dn;
 
 	netif_carrier_off(slave_dev);
-	if (p->phy)
+	if (p->phy) {
 		phy_disconnect(p->phy);
+
+		if (of_phy_is_fixed_link(port_dn))
+			of_phy_deregister_fixed_link(port_dn);
+	}
 	unregister_netdev(slave_dev);
 	free_netdev(slave_dev);
 }
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 300b068..b54b3ca 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -715,6 +715,7 @@
 	default "reno" if DEFAULT_RENO
 	default "dctcp" if DEFAULT_DCTCP
 	default "cdg" if DEFAULT_CDG
+	default "bbr" if DEFAULT_BBR
 	default "cubic"
 
 config TCP_MD5SIG
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 6d902a0..c836bfe 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1248,7 +1248,7 @@
 		fixedid = !!(skb_shinfo(skb)->gso_type & SKB_GSO_TCP_FIXEDID);
 
 		/* fixed ID is invalid if DF bit is not set */
-		if (fixedid && !(iph->frag_off & htons(IP_DF)))
+		if (fixedid && !(ip_hdr(skb)->frag_off & htons(IP_DF)))
 			goto out;
 	}
 
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index d95631d..20fb25e 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -476,7 +476,7 @@
 		esph = (void *)skb_push(skb, 4);
 		*seqhi = esph->spi;
 		esph->spi = esph->seq_no;
-		esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.input.hi);
+		esph->seq_no = XFRM_SKB_CB(skb)->seq.input.hi;
 		aead_request_set_callback(req, 0, esp_input_done_esn, skb);
 	}
 
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index eaf720b..d24fa20 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -107,6 +107,8 @@
 	if (unlikely(!skb))
 		return 0;
 
+	skb->protocol = htons(ETH_P_IP);
+
 	return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT,
 		       net, sk, skb, NULL, skb_dst(skb)->dev,
 		       dst_output);
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c
index c3776ff..b3cc133 100644
--- a/net/ipv4/netfilter.c
+++ b/net/ipv4/netfilter.c
@@ -24,10 +24,11 @@
 	struct flowi4 fl4 = {};
 	__be32 saddr = iph->saddr;
 	__u8 flags = skb->sk ? inet_sk_flowi_flags(skb->sk) : 0;
+	struct net_device *dev = skb_dst(skb)->dev;
 	unsigned int hh_len;
 
 	if (addr_type == RTN_UNSPEC)
-		addr_type = inet_addr_type(net, saddr);
+		addr_type = inet_addr_type_dev_table(net, dev, saddr);
 	if (addr_type == RTN_LOCAL || addr_type == RTN_UNICAST)
 		flags |= FLOWI_FLAG_ANYSRC;
 	else
@@ -40,6 +41,8 @@
 	fl4.saddr = saddr;
 	fl4.flowi4_tos = RT_TOS(iph->tos);
 	fl4.flowi4_oif = skb->sk ? skb->sk->sk_bound_dev_if : 0;
+	if (!fl4.flowi4_oif)
+		fl4.flowi4_oif = l3mdev_master_ifindex(dev);
 	fl4.flowi4_mark = skb->mark;
 	fl4.flowi4_flags = flags;
 	rt = ip_route_output_key(net, &fl4);
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index b31df59..6975384 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -1201,8 +1201,8 @@
 
 	newinfo->number = compatr->num_entries;
 	for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
-		newinfo->hook_entry[i] = info->hook_entry[i];
-		newinfo->underflow[i] = info->underflow[i];
+		newinfo->hook_entry[i] = compatr->hook_entry[i];
+		newinfo->underflow[i] = compatr->underflow[i];
 	}
 	entry1 = newinfo->entries;
 	pos = entry1;
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 0fc5c46..50dacc8 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -140,7 +140,8 @@
 }
 EXPORT_SYMBOL_GPL(ip6_datagram_release_cb);
 
-static int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr,
+			   int addr_len)
 {
 	struct sockaddr_in6	*usin = (struct sockaddr_in6 *) uaddr;
 	struct inet_sock	*inet = inet_sk(sk);
@@ -253,6 +254,7 @@
 out:
 	return err;
 }
+EXPORT_SYMBOL_GPL(__ip6_datagram_connect);
 
 int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
 {
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index f921368..17df6bb 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -418,7 +418,7 @@
 		esph = (void *)skb_push(skb, 4);
 		*seqhi = esph->spi;
 		esph->spi = esph->seq_no;
-		esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.input.hi);
+		esph->seq_no = XFRM_SKB_CB(skb)->seq.input.hi;
 		aead_request_set_callback(req, 0, esp_input_done_esn, skb);
 	}
 
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 77cde2b..15db375 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -447,8 +447,10 @@
 
 	if (__ipv6_addr_needs_scope_id(addr_type))
 		iif = skb->dev->ifindex;
-	else
-		iif = l3mdev_master_ifindex(skb_dst(skb)->dev);
+	else {
+		dst = skb_dst(skb);
+		iif = l3mdev_master_ifindex(dst ? dst->dev : skb->dev);
+	}
 
 	/*
 	 *	Must not send error if the source does not uniquely
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index 1fcf61f..89c59e6 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -99,7 +99,7 @@
 		segs = ops->callbacks.gso_segment(skb, features);
 	}
 
-	if (IS_ERR(segs))
+	if (IS_ERR_OR_NULL(segs))
 		goto out;
 
 	gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL);
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 0a4759b..d76674e 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1181,7 +1181,6 @@
 	if (err)
 		return err;
 
-	skb->protocol = htons(ETH_P_IPV6);
 	skb_push(skb, sizeof(struct ipv6hdr));
 	skb_reset_network_header(skb);
 	ipv6h = ipv6_hdr(skb);
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index 5465042..d58480a 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -1138,6 +1138,33 @@
 	.priority	=	100,
 };
 
+static bool is_vti6_tunnel(const struct net_device *dev)
+{
+	return dev->netdev_ops == &vti6_netdev_ops;
+}
+
+static int vti6_device_event(struct notifier_block *unused,
+			     unsigned long event, void *ptr)
+{
+	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+	struct ip6_tnl *t = netdev_priv(dev);
+
+	if (!is_vti6_tunnel(dev))
+		return NOTIFY_DONE;
+
+	switch (event) {
+	case NETDEV_DOWN:
+		if (!net_eq(t->net, dev_net(dev)))
+			xfrm_garbage_collect(t->net);
+		break;
+	}
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block vti6_notifier_block __read_mostly = {
+	.notifier_call = vti6_device_event,
+};
+
 /**
  * vti6_tunnel_init - register protocol and reserve needed resources
  *
@@ -1148,6 +1175,8 @@
 	const char *msg;
 	int err;
 
+	register_netdevice_notifier(&vti6_notifier_block);
+
 	msg = "tunnel device";
 	err = register_pernet_device(&vti6_net_ops);
 	if (err < 0)
@@ -1180,6 +1209,7 @@
 xfrm_proto_esp_failed:
 	unregister_pernet_device(&vti6_net_ops);
 pernet_dev_failed:
+	unregister_netdevice_notifier(&vti6_notifier_block);
 	pr_err("vti6 init: failed to register %s\n", msg);
 	return err;
 }
@@ -1194,6 +1224,7 @@
 	xfrm6_protocol_deregister(&vti_ah6_protocol, IPPROTO_AH);
 	xfrm6_protocol_deregister(&vti_esp6_protocol, IPPROTO_ESP);
 	unregister_pernet_device(&vti6_net_ops);
+	unregister_netdevice_notifier(&vti6_notifier_block);
 }
 
 module_init(vti6_tunnel_init);
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index e4347ae..9948b5c 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -576,11 +576,11 @@
 	/* Jumbo payload inhibits frag. header */
 	if (ipv6_hdr(skb)->payload_len == 0) {
 		pr_debug("payload len = 0\n");
-		return -EINVAL;
+		return 0;
 	}
 
 	if (find_prev_fhdr(skb, &prevhdr, &nhoff, &fhoff) < 0)
-		return -EINVAL;
+		return 0;
 
 	if (!pskb_may_pull(skb, fhoff + sizeof(*fhdr)))
 		return -ENOMEM;
diff --git a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
index f7aab5a..f06b047 100644
--- a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
+++ b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
@@ -69,7 +69,7 @@
 	if (err == -EINPROGRESS)
 		return NF_STOLEN;
 
-	return NF_ACCEPT;
+	return err == 0 ? NF_ACCEPT : NF_DROP;
 }
 
 static struct nf_hook_ops ipv6_defrag_ops[] = {
diff --git a/net/ipv6/netfilter/nf_reject_ipv6.c b/net/ipv6/netfilter/nf_reject_ipv6.c
index a540022..1009040 100644
--- a/net/ipv6/netfilter/nf_reject_ipv6.c
+++ b/net/ipv6/netfilter/nf_reject_ipv6.c
@@ -156,6 +156,7 @@
 	fl6.daddr = oip6h->saddr;
 	fl6.fl6_sport = otcph->dest;
 	fl6.fl6_dport = otcph->source;
+	fl6.flowi6_oif = l3mdev_master_ifindex(skb_dst(oldskb)->dev);
 	security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6));
 	dst = ip6_route_output(net, NULL, &fl6);
 	if (dst->error) {
diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
index 7cca8ac..cd42523 100644
--- a/net/ipv6/output_core.c
+++ b/net/ipv6/output_core.c
@@ -155,6 +155,8 @@
 	if (unlikely(!skb))
 		return 0;
 
+	skb->protocol = htons(ETH_P_IPV6);
+
 	return nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
 		       net, sk, skb, NULL, skb_dst(skb)->dev,
 		       dst_output);
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index 982f6c4..8938b6b 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -61,7 +61,8 @@
 		if ((l2tp->conn_id == tunnel_id) &&
 		    net_eq(sock_net(sk), net) &&
 		    !(inet->inet_rcv_saddr && inet->inet_rcv_saddr != laddr) &&
-		    !(sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif))
+		    (!sk->sk_bound_dev_if || !dif ||
+		     sk->sk_bound_dev_if == dif))
 			goto found;
 	}
 
@@ -182,15 +183,17 @@
 		struct iphdr *iph = (struct iphdr *) skb_network_header(skb);
 
 		read_lock_bh(&l2tp_ip_lock);
-		sk = __l2tp_ip_bind_lookup(net, iph->daddr, 0, tunnel_id);
+		sk = __l2tp_ip_bind_lookup(net, iph->daddr, inet_iif(skb),
+					   tunnel_id);
+		if (!sk) {
+			read_unlock_bh(&l2tp_ip_lock);
+			goto discard;
+		}
+
+		sock_hold(sk);
 		read_unlock_bh(&l2tp_ip_lock);
 	}
 
-	if (sk == NULL)
-		goto discard;
-
-	sock_hold(sk);
-
 	if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
 		goto discard_put;
 
@@ -256,15 +259,9 @@
 	if (addr->l2tp_family != AF_INET)
 		return -EINVAL;
 
-	ret = -EADDRINUSE;
-	read_lock_bh(&l2tp_ip_lock);
-	if (__l2tp_ip_bind_lookup(net, addr->l2tp_addr.s_addr,
-				  sk->sk_bound_dev_if, addr->l2tp_conn_id))
-		goto out_in_use;
-
-	read_unlock_bh(&l2tp_ip_lock);
-
 	lock_sock(sk);
+
+	ret = -EINVAL;
 	if (!sock_flag(sk, SOCK_ZAPPED))
 		goto out;
 
@@ -281,14 +278,22 @@
 		inet->inet_rcv_saddr = inet->inet_saddr = addr->l2tp_addr.s_addr;
 	if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST)
 		inet->inet_saddr = 0;  /* Use device */
-	sk_dst_reset(sk);
-
-	l2tp_ip_sk(sk)->conn_id = addr->l2tp_conn_id;
 
 	write_lock_bh(&l2tp_ip_lock);
+	if (__l2tp_ip_bind_lookup(net, addr->l2tp_addr.s_addr,
+				  sk->sk_bound_dev_if, addr->l2tp_conn_id)) {
+		write_unlock_bh(&l2tp_ip_lock);
+		ret = -EADDRINUSE;
+		goto out;
+	}
+
+	sk_dst_reset(sk);
+	l2tp_ip_sk(sk)->conn_id = addr->l2tp_conn_id;
+
 	sk_add_bind_node(sk, &l2tp_ip_bind_table);
 	sk_del_node_init(sk);
 	write_unlock_bh(&l2tp_ip_lock);
+
 	ret = 0;
 	sock_reset_flag(sk, SOCK_ZAPPED);
 
@@ -296,11 +301,6 @@
 	release_sock(sk);
 
 	return ret;
-
-out_in_use:
-	read_unlock_bh(&l2tp_ip_lock);
-
-	return ret;
 }
 
 static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
@@ -308,21 +308,24 @@
 	struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *) uaddr;
 	int rc;
 
-	if (sock_flag(sk, SOCK_ZAPPED)) /* Must bind first - autobinding does not work */
-		return -EINVAL;
-
 	if (addr_len < sizeof(*lsa))
 		return -EINVAL;
 
 	if (ipv4_is_multicast(lsa->l2tp_addr.s_addr))
 		return -EINVAL;
 
-	rc = ip4_datagram_connect(sk, uaddr, addr_len);
-	if (rc < 0)
-		return rc;
-
 	lock_sock(sk);
 
+	/* Must bind first - autobinding does not work */
+	if (sock_flag(sk, SOCK_ZAPPED)) {
+		rc = -EINVAL;
+		goto out_sk;
+	}
+
+	rc = __ip4_datagram_connect(sk, uaddr, addr_len);
+	if (rc < 0)
+		goto out_sk;
+
 	l2tp_ip_sk(sk)->peer_conn_id = lsa->l2tp_conn_id;
 
 	write_lock_bh(&l2tp_ip_lock);
@@ -330,7 +333,9 @@
 	sk_add_bind_node(sk, &l2tp_ip_bind_table);
 	write_unlock_bh(&l2tp_ip_lock);
 
+out_sk:
 	release_sock(sk);
+
 	return rc;
 }
 
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index 9978d01..aa821cb 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -72,8 +72,9 @@
 
 		if ((l2tp->conn_id == tunnel_id) &&
 		    net_eq(sock_net(sk), net) &&
-		    !(addr && ipv6_addr_equal(addr, laddr)) &&
-		    !(sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif))
+		    (!addr || ipv6_addr_equal(addr, laddr)) &&
+		    (!sk->sk_bound_dev_if || !dif ||
+		     sk->sk_bound_dev_if == dif))
 			goto found;
 	}
 
@@ -196,16 +197,17 @@
 		struct ipv6hdr *iph = ipv6_hdr(skb);
 
 		read_lock_bh(&l2tp_ip6_lock);
-		sk = __l2tp_ip6_bind_lookup(net, &iph->daddr,
-					    0, tunnel_id);
+		sk = __l2tp_ip6_bind_lookup(net, &iph->daddr, inet6_iif(skb),
+					    tunnel_id);
+		if (!sk) {
+			read_unlock_bh(&l2tp_ip6_lock);
+			goto discard;
+		}
+
+		sock_hold(sk);
 		read_unlock_bh(&l2tp_ip6_lock);
 	}
 
-	if (sk == NULL)
-		goto discard;
-
-	sock_hold(sk);
-
 	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
 		goto discard_put;
 
@@ -266,6 +268,7 @@
 	struct sockaddr_l2tpip6 *addr = (struct sockaddr_l2tpip6 *) uaddr;
 	struct net *net = sock_net(sk);
 	__be32 v4addr = 0;
+	int bound_dev_if;
 	int addr_type;
 	int err;
 
@@ -284,13 +287,6 @@
 	if (addr_type & IPV6_ADDR_MULTICAST)
 		return -EADDRNOTAVAIL;
 
-	err = -EADDRINUSE;
-	read_lock_bh(&l2tp_ip6_lock);
-	if (__l2tp_ip6_bind_lookup(net, &addr->l2tp_addr,
-				   sk->sk_bound_dev_if, addr->l2tp_conn_id))
-		goto out_in_use;
-	read_unlock_bh(&l2tp_ip6_lock);
-
 	lock_sock(sk);
 
 	err = -EINVAL;
@@ -300,28 +296,25 @@
 	if (sk->sk_state != TCP_CLOSE)
 		goto out_unlock;
 
+	bound_dev_if = sk->sk_bound_dev_if;
+
 	/* Check if the address belongs to the host. */
 	rcu_read_lock();
 	if (addr_type != IPV6_ADDR_ANY) {
 		struct net_device *dev = NULL;
 
 		if (addr_type & IPV6_ADDR_LINKLOCAL) {
-			if (addr_len >= sizeof(struct sockaddr_in6) &&
-			    addr->l2tp_scope_id) {
-				/* Override any existing binding, if another
-				 * one is supplied by user.
-				 */
-				sk->sk_bound_dev_if = addr->l2tp_scope_id;
-			}
+			if (addr->l2tp_scope_id)
+				bound_dev_if = addr->l2tp_scope_id;
 
 			/* Binding to link-local address requires an
-			   interface */
-			if (!sk->sk_bound_dev_if)
+			 * interface.
+			 */
+			if (!bound_dev_if)
 				goto out_unlock_rcu;
 
 			err = -ENODEV;
-			dev = dev_get_by_index_rcu(sock_net(sk),
-						   sk->sk_bound_dev_if);
+			dev = dev_get_by_index_rcu(sock_net(sk), bound_dev_if);
 			if (!dev)
 				goto out_unlock_rcu;
 		}
@@ -336,13 +329,22 @@
 	}
 	rcu_read_unlock();
 
-	inet->inet_rcv_saddr = inet->inet_saddr = v4addr;
+	write_lock_bh(&l2tp_ip6_lock);
+	if (__l2tp_ip6_bind_lookup(net, &addr->l2tp_addr, bound_dev_if,
+				   addr->l2tp_conn_id)) {
+		write_unlock_bh(&l2tp_ip6_lock);
+		err = -EADDRINUSE;
+		goto out_unlock;
+	}
+
+	inet->inet_saddr = v4addr;
+	inet->inet_rcv_saddr = v4addr;
+	sk->sk_bound_dev_if = bound_dev_if;
 	sk->sk_v6_rcv_saddr = addr->l2tp_addr;
 	np->saddr = addr->l2tp_addr;
 
 	l2tp_ip6_sk(sk)->conn_id = addr->l2tp_conn_id;
 
-	write_lock_bh(&l2tp_ip6_lock);
 	sk_add_bind_node(sk, &l2tp_ip6_bind_table);
 	sk_del_node_init(sk);
 	write_unlock_bh(&l2tp_ip6_lock);
@@ -355,10 +357,7 @@
 	rcu_read_unlock();
 out_unlock:
 	release_sock(sk);
-	return err;
 
-out_in_use:
-	read_unlock_bh(&l2tp_ip6_lock);
 	return err;
 }
 
@@ -371,9 +370,6 @@
 	int	addr_type;
 	int rc;
 
-	if (sock_flag(sk, SOCK_ZAPPED)) /* Must bind first - autobinding does not work */
-		return -EINVAL;
-
 	if (addr_len < sizeof(*lsa))
 		return -EINVAL;
 
@@ -390,10 +386,18 @@
 			return -EINVAL;
 	}
 
-	rc = ip6_datagram_connect(sk, uaddr, addr_len);
-
 	lock_sock(sk);
 
+	 /* Must bind first - autobinding does not work */
+	if (sock_flag(sk, SOCK_ZAPPED)) {
+		rc = -EINVAL;
+		goto out_sk;
+	}
+
+	rc = __ip6_datagram_connect(sk, uaddr, addr_len);
+	if (rc < 0)
+		goto out_sk;
+
 	l2tp_ip6_sk(sk)->peer_conn_id = lsa->l2tp_conn_id;
 
 	write_lock_bh(&l2tp_ip6_lock);
@@ -401,6 +405,7 @@
 	sk_add_bind_node(sk, &l2tp_ip6_bind_table);
 	write_unlock_bh(&l2tp_ip6_lock);
 
+out_sk:
 	release_sock(sk);
 
 	return rc;
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index bbb8f3d..5b9c884 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -42,7 +42,7 @@
 	const struct nf_conntrack_zone *zone;
 };
 
-static struct rhashtable nf_nat_bysource_table;
+static struct rhltable nf_nat_bysource_table;
 
 inline const struct nf_nat_l3proto *
 __nf_nat_l3proto_find(u8 family)
@@ -193,9 +193,12 @@
 	const struct nf_nat_conn_key *key = arg->key;
 	const struct nf_conn *ct = obj;
 
-	return same_src(ct, key->tuple) &&
-	       net_eq(nf_ct_net(ct), key->net) &&
-	       nf_ct_zone_equal(ct, key->zone, IP_CT_DIR_ORIGINAL);
+	if (!same_src(ct, key->tuple) ||
+	    !net_eq(nf_ct_net(ct), key->net) ||
+	    !nf_ct_zone_equal(ct, key->zone, IP_CT_DIR_ORIGINAL))
+		return 1;
+
+	return 0;
 }
 
 static struct rhashtable_params nf_nat_bysource_params = {
@@ -204,7 +207,6 @@
 	.obj_cmpfn = nf_nat_bysource_cmp,
 	.nelem_hint = 256,
 	.min_size = 1024,
-	.nulls_base = (1U << RHT_BASE_SHIFT),
 };
 
 /* Only called for SRC manip */
@@ -223,12 +225,15 @@
 		.tuple = tuple,
 		.zone = zone
 	};
+	struct rhlist_head *hl;
 
-	ct = rhashtable_lookup_fast(&nf_nat_bysource_table, &key,
-				    nf_nat_bysource_params);
-	if (!ct)
+	hl = rhltable_lookup(&nf_nat_bysource_table, &key,
+			     nf_nat_bysource_params);
+	if (!hl)
 		return 0;
 
+	ct = container_of(hl, typeof(*ct), nat_bysource);
+
 	nf_ct_invert_tuplepr(result,
 			     &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
 	result->dst = tuple->dst;
@@ -446,11 +451,17 @@
 	}
 
 	if (maniptype == NF_NAT_MANIP_SRC) {
+		struct nf_nat_conn_key key = {
+			.net = nf_ct_net(ct),
+			.tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
+			.zone = nf_ct_zone(ct),
+		};
 		int err;
 
-		err = rhashtable_insert_fast(&nf_nat_bysource_table,
-					     &ct->nat_bysource,
-					     nf_nat_bysource_params);
+		err = rhltable_insert_key(&nf_nat_bysource_table,
+					  &key,
+					  &ct->nat_bysource,
+					  nf_nat_bysource_params);
 		if (err)
 			return NF_DROP;
 	}
@@ -567,8 +578,8 @@
 	 * will delete entry from already-freed table.
 	 */
 	ct->status &= ~IPS_NAT_DONE_MASK;
-	rhashtable_remove_fast(&nf_nat_bysource_table, &ct->nat_bysource,
-			       nf_nat_bysource_params);
+	rhltable_remove(&nf_nat_bysource_table, &ct->nat_bysource,
+			nf_nat_bysource_params);
 
 	/* don't delete conntrack.  Although that would make things a lot
 	 * simpler, we'd end up flushing all conntracks on nat rmmod.
@@ -698,8 +709,8 @@
 	if (!nat)
 		return;
 
-	rhashtable_remove_fast(&nf_nat_bysource_table, &ct->nat_bysource,
-			       nf_nat_bysource_params);
+	rhltable_remove(&nf_nat_bysource_table, &ct->nat_bysource,
+			nf_nat_bysource_params);
 }
 
 static struct nf_ct_ext_type nat_extend __read_mostly = {
@@ -834,13 +845,13 @@
 {
 	int ret;
 
-	ret = rhashtable_init(&nf_nat_bysource_table, &nf_nat_bysource_params);
+	ret = rhltable_init(&nf_nat_bysource_table, &nf_nat_bysource_params);
 	if (ret)
 		return ret;
 
 	ret = nf_ct_extend_register(&nat_extend);
 	if (ret < 0) {
-		rhashtable_destroy(&nf_nat_bysource_table);
+		rhltable_destroy(&nf_nat_bysource_table);
 		printk(KERN_ERR "nf_nat_core: Unable to register extension\n");
 		return ret;
 	}
@@ -864,7 +875,7 @@
 	return 0;
 
  cleanup_extend:
-	rhashtable_destroy(&nf_nat_bysource_table);
+	rhltable_destroy(&nf_nat_bysource_table);
 	nf_ct_extend_unregister(&nat_extend);
 	return ret;
 }
@@ -883,7 +894,7 @@
 	for (i = 0; i < NFPROTO_NUMPROTO; i++)
 		kfree(nf_nat_l4protos[i]);
 
-	rhashtable_destroy(&nf_nat_bysource_table);
+	rhltable_destroy(&nf_nat_bysource_table);
 }
 
 MODULE_LICENSE("GPL");
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 026581b..e5194f6f 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -2570,7 +2570,8 @@
 	}
 
 	if (set->timeout &&
-	    nla_put_be64(skb, NFTA_SET_TIMEOUT, cpu_to_be64(set->timeout),
+	    nla_put_be64(skb, NFTA_SET_TIMEOUT,
+			 cpu_to_be64(jiffies_to_msecs(set->timeout)),
 			 NFTA_SET_PAD))
 		goto nla_put_failure;
 	if (set->gc_int &&
@@ -2859,7 +2860,8 @@
 	if (nla[NFTA_SET_TIMEOUT] != NULL) {
 		if (!(flags & NFT_SET_TIMEOUT))
 			return -EINVAL;
-		timeout = be64_to_cpu(nla_get_be64(nla[NFTA_SET_TIMEOUT]));
+		timeout = msecs_to_jiffies(be64_to_cpu(nla_get_be64(
+						nla[NFTA_SET_TIMEOUT])));
 	}
 	gc_int = 0;
 	if (nla[NFTA_SET_GC_INTERVAL] != NULL) {
@@ -3178,7 +3180,8 @@
 
 	if (nft_set_ext_exists(ext, NFT_SET_EXT_TIMEOUT) &&
 	    nla_put_be64(skb, NFTA_SET_ELEM_TIMEOUT,
-			 cpu_to_be64(*nft_set_ext_timeout(ext)),
+			 cpu_to_be64(jiffies_to_msecs(
+						*nft_set_ext_timeout(ext))),
 			 NFTA_SET_ELEM_PAD))
 		goto nla_put_failure;
 
@@ -3447,7 +3450,7 @@
 		memcpy(nft_set_ext_data(ext), data, set->dlen);
 	if (nft_set_ext_exists(ext, NFT_SET_EXT_EXPIRATION))
 		*nft_set_ext_expiration(ext) =
-			jiffies + msecs_to_jiffies(timeout);
+			jiffies + timeout;
 	if (nft_set_ext_exists(ext, NFT_SET_EXT_TIMEOUT))
 		*nft_set_ext_timeout(ext) = timeout;
 
@@ -3535,7 +3538,8 @@
 	if (nla[NFTA_SET_ELEM_TIMEOUT] != NULL) {
 		if (!(set->flags & NFT_SET_TIMEOUT))
 			return -EINVAL;
-		timeout = be64_to_cpu(nla_get_be64(nla[NFTA_SET_ELEM_TIMEOUT]));
+		timeout = msecs_to_jiffies(be64_to_cpu(nla_get_be64(
+					nla[NFTA_SET_ELEM_TIMEOUT])));
 	} else if (set->flags & NFT_SET_TIMEOUT) {
 		timeout = set->timeout;
 	}
diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c
index baf694d..d5447a2 100644
--- a/net/netfilter/nft_hash.c
+++ b/net/netfilter/nft_hash.c
@@ -53,6 +53,7 @@
 {
 	struct nft_hash *priv = nft_expr_priv(expr);
 	u32 len;
+	int err;
 
 	if (!tb[NFTA_HASH_SREG] ||
 	    !tb[NFTA_HASH_DREG] ||
@@ -67,8 +68,10 @@
 	priv->sreg = nft_parse_register(tb[NFTA_HASH_SREG]);
 	priv->dreg = nft_parse_register(tb[NFTA_HASH_DREG]);
 
-	len = ntohl(nla_get_be32(tb[NFTA_HASH_LEN]));
-	if (len == 0 || len > U8_MAX)
+	err = nft_parse_u32_check(tb[NFTA_HASH_LEN], U8_MAX, &len);
+	if (err < 0)
+		return err;
+	if (len == 0)
 		return -ERANGE;
 
 	priv->len = len;
diff --git a/net/netfilter/nft_range.c b/net/netfilter/nft_range.c
index fbc8800..8f0aaae 100644
--- a/net/netfilter/nft_range.c
+++ b/net/netfilter/nft_range.c
@@ -59,6 +59,12 @@
 	int err;
 	u32 op;
 
+	if (!tb[NFTA_RANGE_SREG]      ||
+	    !tb[NFTA_RANGE_OP]	      ||
+	    !tb[NFTA_RANGE_FROM_DATA] ||
+	    !tb[NFTA_RANGE_TO_DATA])
+		return -EINVAL;
+
 	err = nft_data_init(NULL, &priv->data_from, sizeof(priv->data_from),
 			    &desc_from, tb[NFTA_RANGE_FROM_DATA]);
 	if (err < 0)
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 62bea45..602e5eb 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -322,14 +322,11 @@
 	sk_mem_charge(sk, skb->truesize);
 }
 
-static void netlink_sock_destruct(struct sock *sk)
+static void __netlink_sock_destruct(struct sock *sk)
 {
 	struct netlink_sock *nlk = nlk_sk(sk);
 
 	if (nlk->cb_running) {
-		if (nlk->cb.done)
-			nlk->cb.done(&nlk->cb);
-
 		module_put(nlk->cb.module);
 		kfree_skb(nlk->cb.skb);
 	}
@@ -346,6 +343,28 @@
 	WARN_ON(nlk_sk(sk)->groups);
 }
 
+static void netlink_sock_destruct_work(struct work_struct *work)
+{
+	struct netlink_sock *nlk = container_of(work, struct netlink_sock,
+						work);
+
+	nlk->cb.done(&nlk->cb);
+	__netlink_sock_destruct(&nlk->sk);
+}
+
+static void netlink_sock_destruct(struct sock *sk)
+{
+	struct netlink_sock *nlk = nlk_sk(sk);
+
+	if (nlk->cb_running && nlk->cb.done) {
+		INIT_WORK(&nlk->work, netlink_sock_destruct_work);
+		schedule_work(&nlk->work);
+		return;
+	}
+
+	__netlink_sock_destruct(sk);
+}
+
 /* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on
  * SMP. Look, when several writers sleep and reader wakes them up, all but one
  * immediately hit write lock and grab all the cpus. Exclusive sleep solves
diff --git a/net/netlink/af_netlink.h b/net/netlink/af_netlink.h
index 3cfd6cc..4fdb383 100644
--- a/net/netlink/af_netlink.h
+++ b/net/netlink/af_netlink.h
@@ -3,6 +3,7 @@
 
 #include <linux/rhashtable.h>
 #include <linux/atomic.h>
+#include <linux/workqueue.h>
 #include <net/sock.h>
 
 #define NLGRPSZ(x)	(ALIGN(x, sizeof(unsigned long) * 8) / 8)
@@ -33,6 +34,7 @@
 
 	struct rhash_head	node;
 	struct rcu_head		rcu;
+	struct work_struct	work;
 };
 
 static inline struct netlink_sock *nlk_sk(struct sock *sk)
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index 31045ef..fecefa2 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -370,8 +370,11 @@
 		skb_orphan(skb);
 		memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
 		err = nf_ct_frag6_gather(net, skb, user);
-		if (err)
+		if (err) {
+			if (err != -EINPROGRESS)
+				kfree_skb(skb);
 			return err;
+		}
 
 		key->ip.proto = ipv6_hdr(skb)->nexthdr;
 		ovs_cb.mru = IP6CB(skb)->frag_max_size;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index d2238b2..dd23323 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -3648,19 +3648,25 @@
 
 		if (optlen != sizeof(val))
 			return -EINVAL;
-		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
-			return -EBUSY;
 		if (copy_from_user(&val, optval, sizeof(val)))
 			return -EFAULT;
 		switch (val) {
 		case TPACKET_V1:
 		case TPACKET_V2:
 		case TPACKET_V3:
-			po->tp_version = val;
-			return 0;
+			break;
 		default:
 			return -EINVAL;
 		}
+		lock_sock(sk);
+		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
+			ret = -EBUSY;
+		} else {
+			po->tp_version = val;
+			ret = 0;
+		}
+		release_sock(sk);
+		return ret;
 	}
 	case PACKET_RESERVE:
 	{
@@ -4164,6 +4170,7 @@
 	/* Added to avoid minimal code churn */
 	struct tpacket_req *req = &req_u->req;
 
+	lock_sock(sk);
 	/* Opening a Tx-ring is NOT supported in TPACKET_V3 */
 	if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) {
 		net_warn_ratelimited("Tx-ring is not supported.\n");
@@ -4245,7 +4252,6 @@
 			goto out;
 	}
 
-	lock_sock(sk);
 
 	/* Detach socket from network */
 	spin_lock(&po->bind_lock);
@@ -4294,11 +4300,11 @@
 		if (!tx_ring)
 			prb_shutdown_retire_blk_timer(po, rb_queue);
 	}
-	release_sock(sk);
 
 	if (pg_vec)
 		free_pg_vec(pg_vec, order, req->tp_block_nr);
 out:
+	release_sock(sk);
 	return err;
 }
 
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index fcddacc..20e2923 100644
--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -659,6 +659,8 @@
 out_pernet:
 	unregister_pernet_subsys(&rds_tcp_net_ops);
 out_slab:
+	if (unregister_netdevice_notifier(&rds_tcp_dev_notifier))
+		pr_warn("could not unregister rds_tcp_dev_notifier\n");
 	kmem_cache_destroy(rds_tcp_conn_slab);
 out:
 	return ret;
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index b54d56d..cf9b2fe 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -108,6 +108,17 @@
 	kfree(keys);
 }
 
+static bool offset_valid(struct sk_buff *skb, int offset)
+{
+	if (offset > 0 && offset > skb->len)
+		return false;
+
+	if  (offset < 0 && -offset > skb_headroom(skb))
+		return false;
+
+	return true;
+}
+
 static int tcf_pedit(struct sk_buff *skb, const struct tc_action *a,
 		     struct tcf_result *res)
 {
@@ -134,6 +145,11 @@
 			if (tkey->offmask) {
 				char *d, _d;
 
+				if (!offset_valid(skb, off + tkey->at)) {
+					pr_info("tc filter pedit 'at' offset %d out of bounds\n",
+						off + tkey->at);
+					goto bad;
+				}
 				d = skb_header_pointer(skb, off + tkey->at, 1,
 						       &_d);
 				if (!d)
@@ -146,10 +162,10 @@
 					" offset must be on 32 bit boundaries\n");
 				goto bad;
 			}
-			if (offset > 0 && offset > skb->len) {
-				pr_info("tc filter pedit"
-					" offset %d can't exceed pkt length %d\n",
-				       offset, skb->len);
+
+			if (!offset_valid(skb, off + offset)) {
+				pr_info("tc filter pedit offset %d out of bounds\n",
+					offset);
 				goto bad;
 			}
 
diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c
index eb219b7..5877f60 100644
--- a/net/sched/cls_basic.c
+++ b/net/sched/cls_basic.c
@@ -62,9 +62,6 @@
 	struct basic_head *head = rtnl_dereference(tp->root);
 	struct basic_filter *f;
 
-	if (head == NULL)
-		return 0UL;
-
 	list_for_each_entry(f, &head->flist, link) {
 		if (f->handle == handle) {
 			l = (unsigned long) f;
@@ -109,7 +106,6 @@
 		tcf_unbind_filter(tp, &f->res);
 		call_rcu(&f->rcu, basic_delete_filter);
 	}
-	RCU_INIT_POINTER(tp->root, NULL);
 	kfree_rcu(head, rcu);
 	return true;
 }
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index bb1d5a4..0a47ba5 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -292,7 +292,6 @@
 		call_rcu(&prog->rcu, __cls_bpf_delete_prog);
 	}
 
-	RCU_INIT_POINTER(tp->root, NULL);
 	kfree_rcu(head, rcu);
 	return true;
 }
@@ -303,9 +302,6 @@
 	struct cls_bpf_prog *prog;
 	unsigned long ret = 0UL;
 
-	if (head == NULL)
-		return 0UL;
-
 	list_for_each_entry(prog, &head->plist, link) {
 		if (prog->handle == handle) {
 			ret = (unsigned long) prog;
diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c
index 85233c47..c1f2007 100644
--- a/net/sched/cls_cgroup.c
+++ b/net/sched/cls_cgroup.c
@@ -137,11 +137,10 @@
 
 	if (!force)
 		return false;
-
-	if (head) {
-		RCU_INIT_POINTER(tp->root, NULL);
+	/* Head can still be NULL due to cls_cgroup_init(). */
+	if (head)
 		call_rcu(&head->rcu, cls_cgroup_destroy_rcu);
-	}
+
 	return true;
 }
 
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index e396723..6575aba 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -596,7 +596,6 @@
 		list_del_rcu(&f->list);
 		call_rcu(&f->rcu, flow_destroy_filter);
 	}
-	RCU_INIT_POINTER(tp->root, NULL);
 	kfree_rcu(head, rcu);
 	return true;
 }
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index f6f40fb..9044424 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -13,6 +13,7 @@
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/rhashtable.h>
+#include <linux/workqueue.h>
 
 #include <linux/if_ether.h>
 #include <linux/in6.h>
@@ -64,7 +65,10 @@
 	bool mask_assigned;
 	struct list_head filters;
 	struct rhashtable_params ht_params;
-	struct rcu_head rcu;
+	union {
+		struct work_struct work;
+		struct rcu_head	rcu;
+	};
 };
 
 struct cls_fl_filter {
@@ -269,6 +273,24 @@
 	dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, &tc);
 }
 
+static void fl_destroy_sleepable(struct work_struct *work)
+{
+	struct cls_fl_head *head = container_of(work, struct cls_fl_head,
+						work);
+	if (head->mask_assigned)
+		rhashtable_destroy(&head->ht);
+	kfree(head);
+	module_put(THIS_MODULE);
+}
+
+static void fl_destroy_rcu(struct rcu_head *rcu)
+{
+	struct cls_fl_head *head = container_of(rcu, struct cls_fl_head, rcu);
+
+	INIT_WORK(&head->work, fl_destroy_sleepable);
+	schedule_work(&head->work);
+}
+
 static bool fl_destroy(struct tcf_proto *tp, bool force)
 {
 	struct cls_fl_head *head = rtnl_dereference(tp->root);
@@ -282,10 +304,9 @@
 		list_del_rcu(&f->list);
 		call_rcu(&f->rcu, fl_destroy_filter);
 	}
-	RCU_INIT_POINTER(tp->root, NULL);
-	if (head->mask_assigned)
-		rhashtable_destroy(&head->ht);
-	kfree_rcu(head, rcu);
+
+	__module_get(THIS_MODULE);
+	call_rcu(&head->rcu, fl_destroy_rcu);
 	return true;
 }
 
@@ -711,8 +732,9 @@
 		goto errout;
 
 	if (fold) {
-		rhashtable_remove_fast(&head->ht, &fold->ht_node,
-				       head->ht_params);
+		if (!tc_skip_sw(fold->flags))
+			rhashtable_remove_fast(&head->ht, &fold->ht_node,
+					       head->ht_params);
 		fl_hw_destroy_filter(tp, (unsigned long)fold);
 	}
 
@@ -739,8 +761,9 @@
 	struct cls_fl_head *head = rtnl_dereference(tp->root);
 	struct cls_fl_filter *f = (struct cls_fl_filter *) arg;
 
-	rhashtable_remove_fast(&head->ht, &f->ht_node,
-			       head->ht_params);
+	if (!tc_skip_sw(f->flags))
+		rhashtable_remove_fast(&head->ht, &f->ht_node,
+				       head->ht_params);
 	list_del_rcu(&f->list);
 	fl_hw_destroy_filter(tp, (unsigned long)f);
 	tcf_unbind_filter(tp, &f->res);
diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c
index 25927b6..f935429 100644
--- a/net/sched/cls_matchall.c
+++ b/net/sched/cls_matchall.c
@@ -114,7 +114,6 @@
 
 		call_rcu(&f->rcu, mall_destroy_filter);
 	}
-	RCU_INIT_POINTER(tp->root, NULL);
 	kfree_rcu(head, rcu);
 	return true;
 }
diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h
index 4f05a19..322438f 100644
--- a/net/sched/cls_rsvp.h
+++ b/net/sched/cls_rsvp.h
@@ -152,7 +152,8 @@
 		return -1;
 	nhptr = ip_hdr(skb);
 #endif
-
+	if (unlikely(!head))
+		return -1;
 restart:
 
 #if RSVP_DST_LEN == 4
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
index 96144bd..0751245 100644
--- a/net/sched/cls_tcindex.c
+++ b/net/sched/cls_tcindex.c
@@ -543,7 +543,6 @@
 	walker.fn = tcindex_destroy_element;
 	tcindex_walk(tp, &walker);
 
-	RCU_INIT_POINTER(tp->root, NULL);
 	call_rcu(&p->rcu, __tcindex_destroy);
 	return true;
 }
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 975dbeb..52d7476 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -421,6 +421,10 @@
 	dev = dev_get_by_name(net, driver_name);
 	if (!dev)
 		return -ENODEV;
+	if (tipc_mtu_bad(dev, 0)) {
+		dev_put(dev);
+		return -EINVAL;
+	}
 
 	/* Associate TIPC bearer with L2 bearer */
 	rcu_assign_pointer(b->media_ptr, dev);
@@ -610,8 +614,6 @@
 	if (!b)
 		return NOTIFY_DONE;
 
-	b->mtu = dev->mtu;
-
 	switch (evt) {
 	case NETDEV_CHANGE:
 		if (netif_carrier_ok(dev))
@@ -624,6 +626,11 @@
 		tipc_reset_bearer(net, b);
 		break;
 	case NETDEV_CHANGEMTU:
+		if (tipc_mtu_bad(dev, 0)) {
+			bearer_disable(net, b);
+			break;
+		}
+		b->mtu = dev->mtu;
 		tipc_reset_bearer(net, b);
 		break;
 	case NETDEV_CHANGEADDR:
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
index 78892e2f..278ff7f 100644
--- a/net/tipc/bearer.h
+++ b/net/tipc/bearer.h
@@ -39,6 +39,7 @@
 
 #include "netlink.h"
 #include "core.h"
+#include "msg.h"
 #include <net/genetlink.h>
 
 #define MAX_MEDIA	3
@@ -59,6 +60,9 @@
 #define TIPC_MEDIA_TYPE_IB	2
 #define TIPC_MEDIA_TYPE_UDP	3
 
+/* minimum bearer MTU */
+#define TIPC_MIN_BEARER_MTU	(MAX_H_SIZE + INT_H_SIZE)
+
 /**
  * struct tipc_media_addr - destination address used by TIPC bearers
  * @value: address info (format defined by media)
@@ -215,4 +219,13 @@
 void tipc_bearer_bc_xmit(struct net *net, u32 bearer_id,
 			 struct sk_buff_head *xmitq);
 
+/* check if device MTU is too low for tipc headers */
+static inline bool tipc_mtu_bad(struct net_device *dev, unsigned int reserve)
+{
+	if (dev->mtu >= TIPC_MIN_BEARER_MTU + reserve)
+		return false;
+	netdev_warn(dev, "MTU too low for tipc bearer\n");
+	return true;
+}
+
 #endif	/* _TIPC_BEARER_H */
diff --git a/net/tipc/link.c b/net/tipc/link.c
index ecc12411..bda89bf 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -47,8 +47,8 @@
 #include <linux/pkt_sched.h>
 
 struct tipc_stats {
-	u32 sent_info;		/* used in counting # sent packets */
-	u32 recv_info;		/* used in counting # recv'd packets */
+	u32 sent_pkts;
+	u32 recv_pkts;
 	u32 sent_states;
 	u32 recv_states;
 	u32 sent_probes;
@@ -857,7 +857,6 @@
 	l->acked = 0;
 	l->silent_intv_cnt = 0;
 	l->rst_cnt = 0;
-	l->stats.recv_info = 0;
 	l->stale_count = 0;
 	l->bc_peer_is_up = false;
 	memset(&l->mon_state, 0, sizeof(l->mon_state));
@@ -888,6 +887,7 @@
 	struct sk_buff_head *transmq = &l->transmq;
 	struct sk_buff_head *backlogq = &l->backlogq;
 	struct sk_buff *skb, *_skb, *bskb;
+	int pkt_cnt = skb_queue_len(list);
 
 	/* Match msg importance against this and all higher backlog limits: */
 	if (!skb_queue_empty(backlogq)) {
@@ -901,6 +901,11 @@
 		return -EMSGSIZE;
 	}
 
+	if (pkt_cnt > 1) {
+		l->stats.sent_fragmented++;
+		l->stats.sent_fragments += pkt_cnt;
+	}
+
 	/* Prepare each packet for sending, and add to relevant queue: */
 	while (skb_queue_len(list)) {
 		skb = skb_peek(list);
@@ -920,6 +925,7 @@
 			__skb_queue_tail(xmitq, _skb);
 			TIPC_SKB_CB(skb)->ackers = l->ackers;
 			l->rcv_unacked = 0;
+			l->stats.sent_pkts++;
 			seqno++;
 			continue;
 		}
@@ -968,6 +974,7 @@
 		msg_set_ack(hdr, ack);
 		msg_set_bcast_ack(hdr, bc_ack);
 		l->rcv_unacked = 0;
+		l->stats.sent_pkts++;
 		seqno++;
 	}
 	l->snd_nxt = seqno;
@@ -1260,7 +1267,7 @@
 
 		/* Deliver packet */
 		l->rcv_nxt++;
-		l->stats.recv_info++;
+		l->stats.recv_pkts++;
 		if (!tipc_data_input(l, skb, l->inputq))
 			rc |= tipc_link_input(l, skb, l->inputq);
 		if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN))
@@ -1800,10 +1807,6 @@
 void tipc_link_reset_stats(struct tipc_link *l)
 {
 	memset(&l->stats, 0, sizeof(l->stats));
-	if (!link_is_bc_sndlink(l)) {
-		l->stats.sent_info = l->snd_nxt;
-		l->stats.recv_info = l->rcv_nxt;
-	}
 }
 
 static void link_print(struct tipc_link *l, const char *str)
@@ -1867,12 +1870,12 @@
 	};
 
 	struct nla_map map[] = {
-		{TIPC_NLA_STATS_RX_INFO, s->recv_info},
+		{TIPC_NLA_STATS_RX_INFO, 0},
 		{TIPC_NLA_STATS_RX_FRAGMENTS, s->recv_fragments},
 		{TIPC_NLA_STATS_RX_FRAGMENTED, s->recv_fragmented},
 		{TIPC_NLA_STATS_RX_BUNDLES, s->recv_bundles},
 		{TIPC_NLA_STATS_RX_BUNDLED, s->recv_bundled},
-		{TIPC_NLA_STATS_TX_INFO, s->sent_info},
+		{TIPC_NLA_STATS_TX_INFO, 0},
 		{TIPC_NLA_STATS_TX_FRAGMENTS, s->sent_fragments},
 		{TIPC_NLA_STATS_TX_FRAGMENTED, s->sent_fragmented},
 		{TIPC_NLA_STATS_TX_BUNDLES, s->sent_bundles},
@@ -1947,9 +1950,9 @@
 		goto attr_msg_full;
 	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->mtu))
 		goto attr_msg_full;
-	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->rcv_nxt))
+	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->stats.recv_pkts))
 		goto attr_msg_full;
-	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->snd_nxt))
+	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->stats.sent_pkts))
 		goto attr_msg_full;
 
 	if (tipc_link_is_up(link))
@@ -2004,12 +2007,12 @@
 	};
 
 	struct nla_map map[] = {
-		{TIPC_NLA_STATS_RX_INFO, stats->recv_info},
+		{TIPC_NLA_STATS_RX_INFO, stats->recv_pkts},
 		{TIPC_NLA_STATS_RX_FRAGMENTS, stats->recv_fragments},
 		{TIPC_NLA_STATS_RX_FRAGMENTED, stats->recv_fragmented},
 		{TIPC_NLA_STATS_RX_BUNDLES, stats->recv_bundles},
 		{TIPC_NLA_STATS_RX_BUNDLED, stats->recv_bundled},
-		{TIPC_NLA_STATS_TX_INFO, stats->sent_info},
+		{TIPC_NLA_STATS_TX_INFO, stats->sent_pkts},
 		{TIPC_NLA_STATS_TX_FRAGMENTS, stats->sent_fragments},
 		{TIPC_NLA_STATS_TX_FRAGMENTED, stats->sent_fragmented},
 		{TIPC_NLA_STATS_TX_BUNDLES, stats->sent_bundles},
@@ -2076,9 +2079,9 @@
 		goto attr_msg_full;
 	if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, bcl->name))
 		goto attr_msg_full;
-	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, bcl->rcv_nxt))
+	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, 0))
 		goto attr_msg_full;
-	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, bcl->snd_nxt))
+	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, 0))
 		goto attr_msg_full;
 
 	prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
index 78cab9c..b58dc95 100644
--- a/net/tipc/udp_media.c
+++ b/net/tipc/udp_media.c
@@ -697,6 +697,11 @@
 		udp_conf.local_ip.s_addr = htonl(INADDR_ANY);
 		udp_conf.use_udp_checksums = false;
 		ub->ifindex = dev->ifindex;
+		if (tipc_mtu_bad(dev, sizeof(struct iphdr) +
+				      sizeof(struct udphdr))) {
+			err = -EINVAL;
+			goto err;
+		}
 		b->mtu = dev->mtu - sizeof(struct iphdr)
 			- sizeof(struct udphdr);
 #if IS_ENABLED(CONFIG_IPV6)
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 659b507..60ee74c 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -13,6 +13,8 @@
 #include <net/dsfield.h>
 #include <linux/if_vlan.h>
 #include <linux/mpls.h>
+#include <net/ndisc.h>
+#include <linux/if_arp.h>
 #include "core.h"
 #include "rdev-ops.h"
 
@@ -1791,3 +1793,54 @@
 const unsigned char bridge_tunnel_header[] __aligned(2) =
 	{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 };
 EXPORT_SYMBOL(bridge_tunnel_header);
+
+bool cfg80211_is_gratuitous_arp_unsolicited_na(struct sk_buff *skb)
+{
+	const struct ethhdr *eth = (void *)skb->data;
+	const struct {
+		struct arphdr hdr;
+		u8 ar_sha[ETH_ALEN];
+		u8 ar_sip[4];
+		u8 ar_tha[ETH_ALEN];
+		u8 ar_tip[4];
+	} __packed *arp;
+	const struct ipv6hdr *ipv6;
+	const struct icmp6hdr *icmpv6;
+
+	switch (eth->h_proto) {
+	case cpu_to_be16(ETH_P_ARP):
+		/* can't say - but will probably be dropped later anyway */
+		if (!pskb_may_pull(skb, sizeof(*eth) + sizeof(*arp)))
+			return false;
+
+		arp = (void *)(eth + 1);
+
+		if ((arp->hdr.ar_op == cpu_to_be16(ARPOP_REPLY) ||
+		     arp->hdr.ar_op == cpu_to_be16(ARPOP_REQUEST)) &&
+		    !memcmp(arp->ar_sip, arp->ar_tip, sizeof(arp->ar_sip)))
+			return true;
+		break;
+	case cpu_to_be16(ETH_P_IPV6):
+		/* can't say - but will probably be dropped later anyway */
+		if (!pskb_may_pull(skb, sizeof(*eth) + sizeof(*ipv6) +
+					sizeof(*icmpv6)))
+			return false;
+
+		ipv6 = (void *)(eth + 1);
+		icmpv6 = (void *)(ipv6 + 1);
+
+		if (icmpv6->icmp6_type == NDISC_NEIGHBOUR_ADVERTISEMENT &&
+		    !memcmp(&ipv6->saddr, &ipv6->daddr, sizeof(ipv6->saddr)))
+			return true;
+		break;
+	default:
+		/*
+		 * no need to support other protocols, proxy service isn't
+		 * specified for any others
+		 */
+		break;
+	}
+
+	return false;
+}
+EXPORT_SYMBOL(cfg80211_is_gratuitous_arp_unsolicited_na);
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index fd69866..5bf7e1bf 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1268,12 +1268,14 @@
 			err = security_xfrm_policy_lookup(pol->security,
 						      fl->flowi_secid,
 						      policy_to_flow_dir(dir));
-			if (!err && !xfrm_pol_hold_rcu(pol))
-				goto again;
-			else if (err == -ESRCH)
+			if (!err) {
+				if (!xfrm_pol_hold_rcu(pol))
+					goto again;
+			} else if (err == -ESRCH) {
 				pol = NULL;
-			else
+			} else {
 				pol = ERR_PTR(err);
+			}
 		} else
 			pol = NULL;
 	}
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 0889209..671a1d0 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -2450,7 +2450,7 @@
 
 #ifdef CONFIG_COMPAT
 	if (in_compat_syscall())
-		return -ENOTSUPP;
+		return -EOPNOTSUPP;
 #endif
 
 	type = nlh->nlmsg_type;
diff --git a/samples/bpf/bpf_helpers.h b/samples/bpf/bpf_helpers.h
index 90f44bd..dadd516 100644
--- a/samples/bpf/bpf_helpers.h
+++ b/samples/bpf/bpf_helpers.h
@@ -113,7 +113,7 @@
 #define PT_REGS_FP(x) ((x)->gprs[11]) /* Works only with CONFIG_FRAME_POINTER */
 #define PT_REGS_RC(x) ((x)->gprs[2])
 #define PT_REGS_SP(x) ((x)->gprs[15])
-#define PT_REGS_IP(x) ((x)->ip)
+#define PT_REGS_IP(x) ((x)->psw.addr)
 
 #elif defined(__aarch64__)
 
diff --git a/samples/bpf/sampleip_kern.c b/samples/bpf/sampleip_kern.c
index 774a681..ceabf31 100644
--- a/samples/bpf/sampleip_kern.c
+++ b/samples/bpf/sampleip_kern.c
@@ -25,7 +25,7 @@
 	u64 ip;
 	u32 *value, init_val = 1;
 
-	ip = ctx->regs.ip;
+	ip = PT_REGS_IP(&ctx->regs);
 	value = bpf_map_lookup_elem(&ip_map, &ip);
 	if (value)
 		*value += 1;
diff --git a/samples/bpf/trace_event_kern.c b/samples/bpf/trace_event_kern.c
index 71a8ed3..41b6115 100644
--- a/samples/bpf/trace_event_kern.c
+++ b/samples/bpf/trace_event_kern.c
@@ -50,7 +50,7 @@
 	key.userstack = bpf_get_stackid(ctx, &stackmap, USER_STACKID_FLAGS);
 	if ((int)key.kernstack < 0 && (int)key.userstack < 0) {
 		bpf_trace_printk(fmt, sizeof(fmt), cpu, ctx->sample_period,
-				 ctx->regs.ip);
+				 PT_REGS_IP(&ctx->regs));
 		return 0;
 	}
 
diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile
index ebced77..90a091b 100644
--- a/scripts/kconfig/Makefile
+++ b/scripts/kconfig/Makefile
@@ -35,6 +35,8 @@
 
 silentoldconfig: $(obj)/conf
 	$(Q)mkdir -p include/config include/generated
+	$(Q)test -e include/generated/autoksyms.h || \
+	    touch   include/generated/autoksyms.h
 	$< $(silent) --$@ $(Kconfig)
 
 localyesconfig localmodconfig: $(obj)/streamline_config.pl $(obj)/conf
diff --git a/sound/sparc/dbri.c b/sound/sparc/dbri.c
index 0190cb6..3fe4468 100644
--- a/sound/sparc/dbri.c
+++ b/sound/sparc/dbri.c
@@ -304,7 +304,7 @@
 	spinlock_t lock;
 
 	struct dbri_dma *dma;	/* Pointer to our DMA block */
-	u32 dma_dvma;		/* DBRI visible DMA address */
+	dma_addr_t dma_dvma;	/* DBRI visible DMA address */
 
 	void __iomem *regs;	/* dbri HW regs */
 	int dbri_irqp;		/* intr queue pointer */
@@ -657,12 +657,14 @@
  */
 static s32 *dbri_cmdlock(struct snd_dbri *dbri, int len)
 {
+	u32 dvma_addr = (u32)dbri->dma_dvma;
+
 	/* Space for 2 WAIT cmds (replaced later by 1 JUMP cmd) */
 	len += 2;
 	spin_lock(&dbri->cmdlock);
 	if (dbri->cmdptr - dbri->dma->cmd + len < DBRI_NO_CMDS - 2)
 		return dbri->cmdptr + 2;
-	else if (len < sbus_readl(dbri->regs + REG8) - dbri->dma_dvma)
+	else if (len < sbus_readl(dbri->regs + REG8) - dvma_addr)
 		return dbri->dma->cmd;
 	else
 		printk(KERN_ERR "DBRI: no space for commands.");
@@ -680,6 +682,7 @@
  */
 static void dbri_cmdsend(struct snd_dbri *dbri, s32 *cmd, int len)
 {
+	u32 dvma_addr = (u32)dbri->dma_dvma;
 	s32 tmp, addr;
 	static int wait_id = 0;
 
@@ -689,7 +692,7 @@
 	*(cmd+1) = DBRI_CMD(D_WAIT, 1, wait_id);
 
 	/* Replace the last command with JUMP */
-	addr = dbri->dma_dvma + (cmd - len - dbri->dma->cmd) * sizeof(s32);
+	addr = dvma_addr + (cmd - len - dbri->dma->cmd) * sizeof(s32);
 	*(dbri->cmdptr+1) = addr;
 	*(dbri->cmdptr) = DBRI_CMD(D_JUMP, 0, 0);
 
@@ -747,6 +750,7 @@
 /* Lock must not be held before calling this */
 static void dbri_initialize(struct snd_dbri *dbri)
 {
+	u32 dvma_addr = (u32)dbri->dma_dvma;
 	s32 *cmd;
 	u32 dma_addr;
 	unsigned long flags;
@@ -764,7 +768,7 @@
 	/*
 	 * Initialize the interrupt ring buffer.
 	 */
-	dma_addr = dbri->dma_dvma + dbri_dma_off(intr, 0);
+	dma_addr = dvma_addr + dbri_dma_off(intr, 0);
 	dbri->dma->intr[0] = dma_addr;
 	dbri->dbri_irqp = 1;
 	/*
@@ -778,7 +782,7 @@
 	dbri->cmdptr = cmd;
 	*(cmd++) = DBRI_CMD(D_WAIT, 1, 0);
 	*(cmd++) = DBRI_CMD(D_WAIT, 1, 0);
-	dma_addr = dbri->dma_dvma + dbri_dma_off(cmd, 0);
+	dma_addr = dvma_addr + dbri_dma_off(cmd, 0);
 	sbus_writel(dma_addr, dbri->regs + REG8);
 	spin_unlock(&dbri->cmdlock);
 
@@ -1077,6 +1081,7 @@
 static int setup_descs(struct snd_dbri *dbri, int streamno, unsigned int period)
 {
 	struct dbri_streaminfo *info = &dbri->stream_info[streamno];
+	u32 dvma_addr = (u32)dbri->dma_dvma;
 	__u32 dvma_buffer;
 	int desc;
 	int len;
@@ -1177,7 +1182,7 @@
 		else {
 			dbri->next_desc[last_desc] = desc;
 			dbri->dma->desc[last_desc].nda =
-			    dbri->dma_dvma + dbri_dma_off(desc, desc);
+			    dvma_addr + dbri_dma_off(desc, desc);
 		}
 
 		last_desc = desc;
@@ -1192,7 +1197,7 @@
 	}
 
 	dbri->dma->desc[last_desc].nda =
-	    dbri->dma_dvma + dbri_dma_off(desc, first_desc);
+	    dvma_addr + dbri_dma_off(desc, first_desc);
 	dbri->next_desc[last_desc] = first_desc;
 	dbri->pipes[info->pipe].first_desc = first_desc;
 	dbri->pipes[info->pipe].desc = first_desc;
@@ -1697,6 +1702,7 @@
 static void xmit_descs(struct snd_dbri *dbri)
 {
 	struct dbri_streaminfo *info;
+	u32 dvma_addr = (u32)dbri->dma_dvma;
 	s32 *cmd;
 	unsigned long flags;
 	int first_td;
@@ -1718,7 +1724,7 @@
 			*(cmd++) = DBRI_CMD(D_SDP, 0,
 					    dbri->pipes[info->pipe].sdp
 					    | D_SDP_P | D_SDP_EVERY | D_SDP_C);
-			*(cmd++) = dbri->dma_dvma +
+			*(cmd++) = dvma_addr +
 				   dbri_dma_off(desc, first_td);
 			dbri_cmdsend(dbri, cmd, 2);
 
@@ -1740,7 +1746,7 @@
 			*(cmd++) = DBRI_CMD(D_SDP, 0,
 					    dbri->pipes[info->pipe].sdp
 					    | D_SDP_P | D_SDP_EVERY | D_SDP_C);
-			*(cmd++) = dbri->dma_dvma +
+			*(cmd++) = dvma_addr +
 				   dbri_dma_off(desc, first_td);
 			dbri_cmdsend(dbri, cmd, 2);
 
@@ -2539,7 +2545,7 @@
 	if (!dbri->dma)
 		return -ENOMEM;
 
-	dprintk(D_GEN, "DMA Cmd Block 0x%p (0x%08x)\n",
+	dprintk(D_GEN, "DMA Cmd Block 0x%p (%pad)\n",
 		dbri->dma, dbri->dma_dvma);
 
 	/* Map the registers into memory. */
diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c
index 0a063af..9bab867 100644
--- a/virt/kvm/arm/vgic/vgic-v2.c
+++ b/virt/kvm/arm/vgic/vgic-v2.c
@@ -50,8 +50,10 @@
 
 			WARN_ON(cpuif->vgic_lr[lr] & GICH_LR_STATE);
 
-			kvm_notify_acked_irq(vcpu->kvm, 0,
-					     intid - VGIC_NR_PRIVATE_IRQS);
+			/* Only SPIs require notification */
+			if (vgic_valid_spi(vcpu->kvm, intid))
+				kvm_notify_acked_irq(vcpu->kvm, 0,
+						     intid - VGIC_NR_PRIVATE_IRQS);
 		}
 	}
 
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
index 9f0dae3..5c9f974 100644
--- a/virt/kvm/arm/vgic/vgic-v3.c
+++ b/virt/kvm/arm/vgic/vgic-v3.c
@@ -41,8 +41,10 @@
 
 			WARN_ON(cpuif->vgic_lr[lr] & ICH_LR_STATE);
 
-			kvm_notify_acked_irq(vcpu->kvm, 0,
-					     intid - VGIC_NR_PRIVATE_IRQS);
+			/* Only SPIs require notification */
+			if (vgic_valid_spi(vcpu->kvm, intid))
+				kvm_notify_acked_irq(vcpu->kvm, 0,
+						     intid - VGIC_NR_PRIVATE_IRQS);
 		}
 
 		/*
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 5c36034..7f9ee29 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -2889,10 +2889,10 @@
 
 	ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC);
 	if (ret < 0) {
-		ops->destroy(dev);
 		mutex_lock(&kvm->lock);
 		list_del(&dev->vm_node);
 		mutex_unlock(&kvm->lock);
+		ops->destroy(dev);
 		return ret;
 	}