Merge "UPSTREAM: android: binder: fix type mismatch warning"
diff --git a/Documentation/arm/msm/rpm.txt b/Documentation/arm/msm/rpm.txt
new file mode 100644
index 0000000..d5be6a7
--- /dev/null
+++ b/Documentation/arm/msm/rpm.txt
@@ -0,0 +1,157 @@
+Introduction
+============
+
+Resource Power Manager (RPM)
+
+RPM is a dedicated hardware engine for managing shared SoC resources,
+which includes buses, clocks, power rails, etc.  The goal of RPM is
+to achieve the maximum power savings while satisfying the SoC's
+operational and performance requirements.  RPM accepts resource
+requests from multiple RPM masters.  It arbitrates and aggregates the
+requests, and configures the shared resources.  The RPM masters are
+the application processor, the modem processor, as well as some
+hardware accelerators.
+
+The RPM driver provides an API for interacting with RPM.  Kernel code
+calls the RPM driver to request RPM-managed, shared resources.
+Kernel code can also register with the driver for RPM notifications,
+which are sent when the status of shared resources change.
+
+Hardware description
+====================
+
+RPM exposes a separate region of registers to each of the RPM masters.
+In general, each register represents some shared resource(s).  At a
+very basic level, a master requests resources by writing to the
+registers, then generating an interrupt to RPM.  RPM processes the
+request, writes acknowledgment to the registers, then generates an
+interrupt to the master.
+
+In addition to the master-specific regions, RPM also exposes a shared
+region that contains the current status of the shared resources.  Only
+RPM can write to the status region, but every master can read from it.
+
+RPM contains internal logics that aggregate and arbitrate among
+requests from the various RPM masters.  It interfaces with the PMIC,
+the bus arbitration block, and the clock controller block in order to
+configure the shared resources.
+
+Software description
+====================
+
+The RPM driver encapsulates the low level RPM interactions, which
+rely on reading/writing registers and generating/processing
+interrupts, and provides a higher level synchronuous set/clear/get
+interface.  Most functions take an array of id-value pairs.
+The ids identify the RPM registers which would correspond to some
+RPM resources, the values specify the new resource values.
+
+The RPM driver synchronizes accesses to RPM.  It protects against
+simultaneous accesses from multiple tasks, on SMP cores, in task
+contexts, and in atomic contexts.
+
+Design
+======
+
+Design goals:
+- Encapsulate low level RPM interactions.
+- Provide a synchronuous set/clear/get interface.
+- Synchronize simultaneous software accesses to RPM.
+
+Power Management
+================
+
+RPM is part of the power management architecture for MSM 8660.  RPM
+manages shared system resources to lower system power.
+
+SMP/multi-core
+==============
+
+The RPM driver uses mutex to synchronize client accesses among tasks.
+It uses spinlocks to synchronize accesses from atomic contexts and
+SMP cores.
+
+Security
+========
+
+None.
+
+Performance
+===========
+
+None.
+
+Interface
+=========
+
+msm_rpm_get_status():
+The function reads the shared status region and returns the current
+resource values, which are the arbitrated/aggregated results across
+all RPM masters.
+
+msm_rpm_set():
+The function makes a resource request to RPM.
+
+msm_rpm_set_noirq():
+The function is similar to msm_rpm_set() except that it must be
+called with interrupts masked.  If possible, use msm_rpm_set()
+instead, to maximize CPU throughput.
+
+msm_rpm_clear():
+The function makes a resource request to RPM to clear resource values.
+Once the values are cleared, the resources revert back to their default
+values for this RPM master.  RPM internally uses the default values as
+the requests from this RPM master when arbitrating and aggregating with
+requests from other RPM masters.
+
+msm_rpm_clear_noirq():
+The function is similar to msm_rpm_clear() except that it must be
+called with interrupts masked.  If possible, use msm_rpm_clear()
+instead, to maximize CPU throughput.
+
+msm_rpm_register_notification():
+The function registers for RPM notification.  When the specified
+resources change their status on RPM, RPM sends out notifications
+and the driver will "up" the semaphore in struct
+msm_rpm_notification.
+
+msm_rpm_unregister_notification():
+The function unregisters a notification.
+
+msm_rpm_init():
+The function initializes the RPM driver with platform specific data.
+
+Driver parameters
+=================
+
+None.
+
+Config options
+==============
+
+MSM_RPM
+
+Dependencies
+============
+
+None.
+
+User space utilities
+====================
+
+None.
+
+Other
+=====
+
+None.
+
+Known issues
+============
+
+None.
+
+To do
+=====
+
+None.
diff --git a/Documentation/devicetree/bindings/arm/msm/rpm-smd.txt b/Documentation/devicetree/bindings/arm/msm/rpm-smd.txt
new file mode 100644
index 0000000..4cba3ec
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/rpm-smd.txt
@@ -0,0 +1,41 @@
+Resource Power Manager(RPM)
+
+RPM is a dedicated hardware engine for managing shared SoC resources,
+which includes buses, clocks, power rails, etc.  The goal of RPM is
+to achieve the maximum power savings while satisfying the SoC's
+operational and performance requirements.  RPM accepts resource
+requests from multiple RPM masters.  It arbitrates and aggregates the
+requests, and configures the shared resources.  The RPM masters are
+the application processor, the modem processor, as well as hardware
+accelerators. The RPM driver communicates with the hardware engine using
+SMD.
+
+The devicetree representation of the SPM block should be:
+
+Required properties
+
+- compatible: "qcom,rpm-smd" or "qcom,rpm-glink"
+- rpm-channel-name: The string corresponding to the channel name of the
+			peripheral subsystem. Required for both smd and
+			glink transports.
+- rpm-channel-type: The interal SMD edge for this subsystem found in
+			<soc/qcom/smd.h>
+- qcom,glink-edge: Logical name of the remote subsystem. This is a required
+			property when rpm-smd driver using glink as trasport.
+
+Optional properties
+- rpm-standalone: Allow RPM driver to run in standalone mode irrespective of RPM
+			channel presence.
+- reg: Contains the memory address at which rpm messaging format version is
+  stored. If this field is not present, the target only supports v0 format.
+
+Example:
+
+	qcom,rpm-smd@68150 {
+		compatible = "qcom,rpm-smd", "qcom,rpm-glink";
+		reg = <0x68150 0x3200>;
+		qcom,rpm-channel-name = "rpm_requests";
+		qcom,rpm-channel-type = 15; /* SMD_APPS_RPM */
+		qcom,glink-edge = "rpm";
+	}
+}
diff --git a/Documentation/devicetree/bindings/usb/msm-ssusb.txt b/Documentation/devicetree/bindings/usb/msm-ssusb.txt
index 4bb75aa..881f9ca 100644
--- a/Documentation/devicetree/bindings/usb/msm-ssusb.txt
+++ b/Documentation/devicetree/bindings/usb/msm-ssusb.txt
@@ -65,15 +65,23 @@
 - qcom,use-pdc-interrupts: It present, it configures provided PDC IRQ with required
   configuration for wakeup functionality.
 - extcon: phandles to external connector devices. First phandle should point to
-	  external connector, which provide "USB" cable events, the second
-	  should point to external connector device, which provide "USB-HOST"
-	  cable events. A single phandle may be specified if a single connector
-	  device provides both "USB" and "USB-HOST" events.
+	  external connector, which provide type-C based "USB" cable events, the
+	  second should point to external connector device, which provide type-C
+	  "USB-HOST" cable events. A single phandle may be specified if a single
+	  connector device provides both "USB" and "USB-HOST" events. An optional
+	  third phandle may be specified for EUD based attach/detach events. A
+	  mandatory fourth phandle has to be specified to provide microUSB based
+	  "USB" cable events. An optional fifth phandle may be specified to provide
+	  microUSB based "USB-HOST" cable events. Only the fourth phandle may be
+	  specified if a single connector device provides both "USB" and "USB-HOST"
+	  events.
 - qcom,num-gsi-evt-buffs: If present, specifies number of GSI based hardware accelerated
   event buffers. 1 event buffer is needed per h/w accelerated endpoint.
 - qcom,pm-qos-latency: This represents max tolerable CPU latency in microsecs,
 	which is used as a vote by driver to get max performance in perf mode.
 - qcom,smmu-s1-bypass: If present, configure SMMU to bypass stage 1 translation.
+- qcom,no-vbus-vote-with-type-C: If present, then do not try to get and enable VBUS
+	regulator in type-C host mode from dwc3-msm driver.
 
 Sub nodes:
 - Sub node for "DWC3- USB3 controller".
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
index 4fb23c9..3d2dc66 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
@@ -28,17 +28,29 @@
 		#size-cells = <1>;
 		ranges;
 
-		peripheral2_mem: peripheral2_region@8fd00000 {
+		peripheral2_mem: peripheral2_region@8fe00000 {
 			compatible = "removed-dma-pool";
 			no-map;
-			reg = <0x8fd00000 0x300000>;
+			reg = <0x8fe00000 0x200000>;
 			label = "peripheral2_mem";
 		};
 
-		mss_mem: mss_region@87800000 {
+		sbl_region: sbl_region@8fd00000 {
+			no-map;
+			reg = <0x8fd00000 0x100000>;
+			label = "sbl_mem";
+		};
+
+		hyp_region: hyp_region@8fc00000 {
+			no-map;
+			reg = <0x8fc00000 0x80000>;
+			label = "hyp_mem";
+		};
+
+		mss_mem: mss_region@87400000 {
 			compatible = "removed-dma-pool";
 			no-map;
-			reg = <0x87800000 0x8000000>;
+			reg = <0x87400000 0x8300000>;
 			label = "mss_mem";
 		};
 
@@ -251,10 +263,10 @@
 		status = "disabled";
 	};
 
-	qcom,msm-imem@8600000 {
+	qcom,msm-imem@1468B000 {
 		compatible = "qcom,msm-imem";
-		reg = <0x8600000 0x1000>; /* Address and size of IMEM */
-		ranges = <0x0 0x8600000 0x1000>;
+		reg = <0x1468B000 0x1000>; /* Address and size of IMEM */
+		ranges = <0x0 0x1468B000 0x1000>;
 		#address-cells = <1>;
 		#size-cells = <1>;
 
@@ -272,7 +284,17 @@
 			compatible = "qcom,msm-imem-boot_stats";
 			reg = <0x6b0 32>;
 		};
-	};
+
+		pil@94c {
+			compatible = "qcom,msm-imem-pil";
+			reg = <0x94c 200>;
+		};
+
+		diag_dload@c8 {
+			compatible = "qcom,msm-imem-diag-dload";
+			reg = <0xc8 200>;
+		};
+};
 
 	restart@4ab000 {
 		compatible = "qcom,pshold";
diff --git a/arch/arm/configs/msm8953-perf_defconfig b/arch/arm/configs/msm8953-perf_defconfig
new file mode 100644
index 0000000..067878b
--- /dev/null
+++ b/arch/arm/configs/msm8953-perf_defconfig
@@ -0,0 +1,473 @@
+CONFIG_LOCALVERSION="-perf"
+# CONFIG_LOCALVERSION_AUTO is not set
+# CONFIG_FHANDLE is not set
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_SCHED_WALT=y
+CONFIG_RCU_EXPERT=y
+CONFIG_RCU_FAST_NO_HZ=y
+CONFIG_RCU_NOCB_CPU=y
+CONFIG_RCU_NOCB_CPU_ALL=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_SCHEDTUNE=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_CGROUP_BPF=y
+CONFIG_SCHED_CORE_CTL=y
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_SCHED_TUNE=y
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+# CONFIG_RD_LZ4 is not set
+CONFIG_KALLSYMS_ALL=y
+CONFIG_BPF_SYSCALL=y
+# CONFIG_AIO is not set
+# CONFIG_MEMBARRIER is not set
+CONFIG_EMBEDDED=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_PROFILING=y
+CONFIG_CC_STACKPROTECTOR_STRONG=y
+CONFIG_ARCH_MMAP_RND_BITS=16
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SIG=y
+CONFIG_MODULE_SIG_FORCE=y
+CONFIG_MODULE_SIG_SHA512=y
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_IOSCHED_DEADLINE is not set
+CONFIG_ARCH_QCOM=y
+CONFIG_ARCH_MSM8953=y
+CONFIG_ARCH_SDM450=y
+# CONFIG_VDSO is not set
+CONFIG_SMP=y
+CONFIG_SCHED_MC=y
+CONFIG_NR_CPUS=8
+CONFIG_ARM_PSCI=y
+CONFIG_PREEMPT=y
+CONFIG_AEABI=y
+CONFIG_HIGHMEM=y
+CONFIG_CMA=y
+CONFIG_CMA_DEBUGFS=y
+CONFIG_ZSMALLOC=y
+CONFIG_BALANCE_ANON_FILE_RECLAIM=y
+CONFIG_SECCOMP=y
+CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_INTERACTIVE=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPU_BOOST=y
+CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
+CONFIG_CPU_IDLE=y
+CONFIG_VFP=y
+CONFIG_NEON=y
+CONFIG_KERNEL_MODE_NEON=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_PM_AUTOSLEEP=y
+CONFIG_PM_WAKELOCKS=y
+CONFIG_PM_WAKELOCKS_LIMIT=0
+# CONFIG_PM_WAKELOCKS_GC is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_XFRM_STATISTICS=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_INET_AH=y
+CONFIG_INET_ESP=y
+CONFIG_INET_IPCOMP=y
+CONFIG_INET_DIAG_DESTROY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CT_PROTO_DCCP=y
+CONFIG_NF_CT_PROTO_SCTP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SANE=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_HARDIDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_LOG=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=y
+CONFIG_NETFILTER_XT_TARGET_TEE=y
+CONFIG_NETFILTER_XT_TARGET_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_TRACE=y
+CONFIG_NETFILTER_XT_TARGET_SECMARK=y
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_DSCP=y
+CONFIG_NETFILTER_XT_MATCH_ESP=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+# CONFIG_NETFILTER_XT_MATCH_L2TP is not set
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_QTAGUID=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TIME=y
+CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_RPFILTER=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_NAT=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_SECURITY=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_NF_CONNTRACK_IPV6=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_MATCH_RPFILTER=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+CONFIG_BRIDGE_NF_EBTABLES=y
+CONFIG_BRIDGE_EBT_BROUTE=y
+CONFIG_L2TP=y
+CONFIG_L2TP_DEBUGFS=y
+CONFIG_L2TP_V3=y
+CONFIG_L2TP_IP=y
+CONFIG_L2TP_ETH=y
+CONFIG_BRIDGE=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NET_SCH_PRIO=y
+CONFIG_NET_SCH_MULTIQ=y
+CONFIG_NET_SCH_INGRESS=y
+CONFIG_NET_CLS_FW=y
+CONFIG_NET_CLS_U32=y
+CONFIG_CLS_U32_MARK=y
+CONFIG_NET_CLS_FLOW=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_CMP=y
+CONFIG_NET_EMATCH_NBYTE=y
+CONFIG_NET_EMATCH_U32=y
+CONFIG_NET_EMATCH_META=y
+CONFIG_NET_EMATCH_TEXT=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_GACT=y
+CONFIG_NET_ACT_MIRRED=y
+CONFIG_NET_ACT_SKBEDIT=y
+CONFIG_RMNET_DATA=y
+CONFIG_RMNET_DATA_FC=y
+CONFIG_RMNET_DATA_DEBUG_PKT=y
+CONFIG_BT=y
+CONFIG_MSM_BT_POWER=y
+CONFIG_CFG80211=y
+CONFIG_CFG80211_INTERNAL_REGDB=y
+# CONFIG_CFG80211_CRDA_SUPPORT is not set
+CONFIG_RFKILL=y
+CONFIG_NFC_NQ=y
+CONFIG_IPC_ROUTER=y
+CONFIG_IPC_ROUTER_SECURITY=y
+CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
+CONFIG_DMA_CMA=y
+CONFIG_ZRAM=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_QSEECOM=y
+CONFIG_MEMORY_STATE_TIME=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_SCH=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_SCSI_UFSHCD=y
+CONFIG_SCSI_UFSHCD_PLATFORM=y
+CONFIG_SCSI_UFS_QCOM=y
+CONFIG_SCSI_UFS_QCOM_ICE=y
+CONFIG_SCSI_UFSHCD_CMD_LOGGING=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_DEBUG=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_REQ_CRYPT=y
+CONFIG_DM_UEVENT=y
+CONFIG_DM_VERITY=y
+CONFIG_DM_VERITY_FEC=y
+CONFIG_NETDEVICES=y
+CONFIG_DUMMY=y
+CONFIG_TUN=y
+CONFIG_PPP=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_MPPE=y
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPPOE=y
+CONFIG_PPPOL2TP=y
+CONFIG_PPPOLAC=y
+CONFIG_PPPOPNS=y
+CONFIG_PPP_ASYNC=y
+CONFIG_PPP_SYNC_TTY=y
+CONFIG_WCNSS_MEM_PRE_ALLOC=y
+CONFIG_CLD_LL_CORE=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_KEYBOARD_GPIO=y
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_JOYSTICK=y
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_UINPUT=y
+# CONFIG_SERIO_SERPORT is not set
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_MSM=y
+CONFIG_SERIAL_MSM_CONSOLE=y
+CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_MSM_LEGACY=y
+CONFIG_MSM_ADSPRPC=y
+CONFIG_MSM_RDBG=m
+CONFIG_I2C_CHARDEV=y
+CONFIG_SPI=y
+CONFIG_SPI_QUP=y
+CONFIG_SPI_SPIDEV=y
+CONFIG_SLIMBUS_MSM_NGD=y
+CONFIG_SPMI=y
+CONFIG_SPMI_MSM_PMIC_ARB_DEBUG=y
+CONFIG_PINCTRL_MSM8953=y
+CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_POWER_SUPPLY=y
+CONFIG_SMB135X_CHARGER=y
+CONFIG_SMB1351_USB_CHARGER=y
+CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
+CONFIG_THERMAL=y
+CONFIG_THERMAL_QPNP=y
+CONFIG_THERMAL_QPNP_ADC_TM=y
+CONFIG_THERMAL_TSENS=y
+CONFIG_MSM_BCL_PERIPHERAL_CTL=y
+CONFIG_QTI_THERMAL_LIMITS_DCVS=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_PROXY_CONSUMER=y
+CONFIG_REGULATOR_CPR4_APSS=y
+CONFIG_REGULATOR_MEM_ACC=y
+CONFIG_REGULATOR_MSM_GFX_LDO=y
+CONFIG_REGULATOR_QPNP_LABIBB=y
+CONFIG_REGULATOR_QPNP=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_CONTROLLER=y
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
+CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_FB=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_DYNAMIC_MINORS=y
+CONFIG_SND_SOC=y
+CONFIG_UHID=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_ELECOM=y
+CONFIG_HID_MAGICMOUSE=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MULTITOUCH=y
+CONFIG_USB_DWC3=y
+CONFIG_NOP_USB_XCEIV=y
+CONFIG_DUAL_ROLE_USB_INTF=y
+CONFIG_USB_MSM_SSPHY_QMP=y
+CONFIG_MSM_QUSB_PHY=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_DEBUG_FILES=y
+CONFIG_USB_GADGET_DEBUG_FS=y
+CONFIG_USB_GADGET_VBUS_DRAW=500
+CONFIG_MMC=y
+CONFIG_MMC_PERF_PROFILING=y
+CONFIG_MMC_PARANOID_SD_INIT=y
+CONFIG_MMC_CLKGATE=y
+CONFIG_MMC_BLOCK_MINORS=32
+CONFIG_MMC_BLOCK_DEFERRED_RESUME=y
+CONFIG_MMC_TEST=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_MSM=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_QPNP=y
+CONFIG_LEDS_QPNP_WLED=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_EDAC=y
+CONFIG_EDAC_MM_EDAC=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_QPNP=y
+CONFIG_DMADEVICES=y
+CONFIG_QCOM_SPS_DMA=y
+CONFIG_UIO=y
+CONFIG_UIO_MSM_SHAREDMEM=y
+CONFIG_STAGING=y
+CONFIG_ASHMEM=y
+CONFIG_ANDROID_LOW_MEMORY_KILLER=y
+CONFIG_ION=y
+CONFIG_ION_MSM=y
+CONFIG_GSI=y
+CONFIG_IPA3=y
+CONFIG_RMNET_IPA3=y
+CONFIG_RNDIS_IPA=y
+CONFIG_SPS=y
+CONFIG_SPS_SUPPORT_NDP_BAM=y
+CONFIG_QPNP_COINCELL=y
+CONFIG_QPNP_REVID=y
+CONFIG_USB_BAM=y
+CONFIG_REMOTE_SPINLOCK_MSM=y
+CONFIG_MAILBOX=y
+CONFIG_MSM_BOOT_STATS=y
+CONFIG_QCOM_WATCHDOG_V2=y
+CONFIG_QCOM_MEMORY_DUMP_V2=y
+CONFIG_QCOM_SECURE_BUFFER=y
+CONFIG_QCOM_EARLY_RANDOM=y
+CONFIG_MSM_SMEM=y
+CONFIG_MSM_GLINK=y
+CONFIG_MSM_GLINK_LOOPBACK_SERVER=y
+CONFIG_MSM_GLINK_SMEM_NATIVE_XPRT=y
+CONFIG_MSM_GLINK_SPI_XPRT=y
+CONFIG_MSM_SMP2P=y
+CONFIG_MSM_IPC_ROUTER_GLINK_XPRT=y
+CONFIG_MSM_QMI_INTERFACE=y
+CONFIG_MSM_GLINK_PKT=y
+CONFIG_MSM_SUBSYSTEM_RESTART=y
+CONFIG_MSM_PIL=y
+CONFIG_MSM_PIL_SSR_GENERIC=y
+CONFIG_MSM_PIL_MSS_QDSP6V5=y
+CONFIG_ICNSS=y
+CONFIG_MSM_PERFORMANCE=y
+CONFIG_MSM_EVENT_TIMER=y
+CONFIG_MSM_PM=y
+CONFIG_QTI_RPM_STATS_LOG=y
+CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
+CONFIG_PWM=y
+CONFIG_PWM_QPNP=y
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_SENSORS_SSC=y
+CONFIG_MSM_TZ_LOG=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_QUOTA=y
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+CONFIG_FUSE_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_ECRYPT_FS=y
+CONFIG_ECRYPT_FS_MESSAGING=y
+# CONFIG_NETWORK_FILESYSTEMS is not set
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+CONFIG_FRAME_WARN=2048
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_PANIC_TIMEOUT=5
+CONFIG_SCHEDSTATS=y
+CONFIG_SCHED_STACK_END_CHECK=y
+# CONFIG_DEBUG_PREEMPT is not set
+CONFIG_IPC_LOGGING=y
+CONFIG_CPU_FREQ_SWITCH_PROFILER=y
+CONFIG_CORESIGHT=y
+CONFIG_CORESIGHT_REMOTE_ETM=y
+CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0
+CONFIG_CORESIGHT_STM=y
+CONFIG_CORESIGHT_TPDA=y
+CONFIG_CORESIGHT_TPDM=y
+CONFIG_CORESIGHT_CTI=y
+CONFIG_CORESIGHT_EVENT=y
+CONFIG_CORESIGHT_HWEVENT=y
+CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
+CONFIG_SECURITY=y
+CONFIG_LSM_MMAP_MIN_ADDR=4096
+CONFIG_HARDENED_USERCOPY=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SMACK=y
+CONFIG_CRYPTO_CTR=y
+CONFIG_CRYPTO_XCBC=y
+CONFIG_CRYPTO_MD4=y
+CONFIG_CRYPTO_TWOFISH=y
+CONFIG_CRYPTO_ANSI_CPRNG=y
+CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
+CONFIG_CRYPTO_DEV_QCRYPTO=y
+CONFIG_CRYPTO_DEV_QCEDEV=y
+CONFIG_CRYPTO_DEV_OTA_CRYPTO=y
+CONFIG_CRYPTO_DEV_QCOM_ICE=y
+CONFIG_ARM_CRYPTO=y
+CONFIG_CRYPTO_SHA1_ARM_NEON=y
+CONFIG_CRYPTO_SHA2_ARM_CE=y
+CONFIG_CRYPTO_AES_ARM_BS=y
+CONFIG_CRYPTO_AES_ARM_CE=y
+CONFIG_QMI_ENCDEC=y
diff --git a/arch/arm/configs/msm8953_defconfig b/arch/arm/configs/msm8953_defconfig
new file mode 100644
index 0000000..46f3e0c
--- /dev/null
+++ b/arch/arm/configs/msm8953_defconfig
@@ -0,0 +1,533 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+# CONFIG_FHANDLE is not set
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_SCHED_WALT=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_RCU_EXPERT=y
+CONFIG_RCU_FAST_NO_HZ=y
+CONFIG_RCU_NOCB_CPU=y
+CONFIG_RCU_NOCB_CPU_ALL=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
+CONFIG_CGROUP_DEBUG=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_SCHEDTUNE=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_CGROUP_BPF=y
+CONFIG_SCHED_CORE_CTL=y
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_SCHED_TUNE=y
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+# CONFIG_RD_LZ4 is not set
+CONFIG_KALLSYMS_ALL=y
+CONFIG_BPF_SYSCALL=y
+# CONFIG_AIO is not set
+# CONFIG_MEMBARRIER is not set
+CONFIG_EMBEDDED=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=m
+CONFIG_KPROBES=y
+CONFIG_CC_STACKPROTECTOR_STRONG=y
+CONFIG_ARCH_MMAP_RND_BITS=16
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SIG=y
+CONFIG_MODULE_SIG_FORCE=y
+CONFIG_MODULE_SIG_SHA512=y
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_IOSCHED_DEADLINE is not set
+CONFIG_ARCH_QCOM=y
+CONFIG_ARCH_MSM8953=y
+CONFIG_ARCH_SDM450=y
+# CONFIG_VDSO is not set
+CONFIG_SMP=y
+CONFIG_SCHED_MC=y
+CONFIG_NR_CPUS=8
+CONFIG_ARM_PSCI=y
+CONFIG_PREEMPT=y
+CONFIG_AEABI=y
+CONFIG_HIGHMEM=y
+CONFIG_CMA=y
+CONFIG_CMA_DEBUGFS=y
+CONFIG_ZSMALLOC=y
+CONFIG_BALANCE_ANON_FILE_RECLAIM=y
+CONFIG_SECCOMP=y
+CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_INTERACTIVE=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPU_BOOST=y
+CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
+CONFIG_CPU_IDLE=y
+CONFIG_VFP=y
+CONFIG_NEON=y
+CONFIG_KERNEL_MODE_NEON=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_PM_AUTOSLEEP=y
+CONFIG_PM_WAKELOCKS=y
+CONFIG_PM_WAKELOCKS_LIMIT=0
+# CONFIG_PM_WAKELOCKS_GC is not set
+CONFIG_PM_DEBUG=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_XFRM_STATISTICS=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_INET_AH=y
+CONFIG_INET_ESP=y
+CONFIG_INET_IPCOMP=y
+CONFIG_INET_DIAG_DESTROY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CT_PROTO_DCCP=y
+CONFIG_NF_CT_PROTO_SCTP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SANE=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_HARDIDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_LOG=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=y
+CONFIG_NETFILTER_XT_TARGET_TEE=y
+CONFIG_NETFILTER_XT_TARGET_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_TRACE=y
+CONFIG_NETFILTER_XT_TARGET_SECMARK=y
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_DSCP=y
+CONFIG_NETFILTER_XT_MATCH_ESP=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+# CONFIG_NETFILTER_XT_MATCH_L2TP is not set
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_QTAGUID=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TIME=y
+CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_RPFILTER=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_NAT=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_SECURITY=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_NF_CONNTRACK_IPV6=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_MATCH_RPFILTER=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+CONFIG_BRIDGE_NF_EBTABLES=y
+CONFIG_BRIDGE_EBT_BROUTE=y
+CONFIG_L2TP=y
+CONFIG_L2TP_DEBUGFS=y
+CONFIG_L2TP_V3=y
+CONFIG_L2TP_IP=y
+CONFIG_L2TP_ETH=y
+CONFIG_BRIDGE=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NET_SCH_PRIO=y
+CONFIG_NET_SCH_MULTIQ=y
+CONFIG_NET_SCH_INGRESS=y
+CONFIG_NET_CLS_FW=y
+CONFIG_NET_CLS_U32=y
+CONFIG_CLS_U32_MARK=y
+CONFIG_NET_CLS_FLOW=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_CMP=y
+CONFIG_NET_EMATCH_NBYTE=y
+CONFIG_NET_EMATCH_U32=y
+CONFIG_NET_EMATCH_META=y
+CONFIG_NET_EMATCH_TEXT=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_GACT=y
+CONFIG_NET_ACT_MIRRED=y
+CONFIG_NET_ACT_SKBEDIT=y
+CONFIG_DNS_RESOLVER=y
+CONFIG_RMNET_DATA=y
+CONFIG_RMNET_DATA_FC=y
+CONFIG_RMNET_DATA_DEBUG_PKT=y
+CONFIG_BT=y
+CONFIG_MSM_BT_POWER=y
+CONFIG_CFG80211=y
+CONFIG_CFG80211_INTERNAL_REGDB=y
+# CONFIG_CFG80211_CRDA_SUPPORT is not set
+CONFIG_RFKILL=y
+CONFIG_NFC_NQ=y
+CONFIG_IPC_ROUTER=y
+CONFIG_IPC_ROUTER_SECURITY=y
+CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
+CONFIG_DMA_CMA=y
+CONFIG_ZRAM=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_HDCP_QSEECOM=y
+CONFIG_QSEECOM=y
+CONFIG_UID_SYS_STATS=y
+CONFIG_MEMORY_STATE_TIME=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_SCH=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_SCSI_UFSHCD=y
+CONFIG_SCSI_UFSHCD_PLATFORM=y
+CONFIG_SCSI_UFS_QCOM=y
+CONFIG_SCSI_UFS_QCOM_ICE=y
+CONFIG_SCSI_UFSHCD_CMD_LOGGING=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_DEBUG=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_REQ_CRYPT=y
+CONFIG_DM_UEVENT=y
+CONFIG_DM_VERITY=y
+CONFIG_DM_VERITY_FEC=y
+CONFIG_NETDEVICES=y
+CONFIG_DUMMY=y
+CONFIG_TUN=y
+CONFIG_PPP=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_MPPE=y
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPPOE=y
+CONFIG_PPPOL2TP=y
+CONFIG_PPPOLAC=y
+CONFIG_PPPOPNS=y
+CONFIG_PPP_ASYNC=y
+CONFIG_PPP_SYNC_TTY=y
+CONFIG_WCNSS_MEM_PRE_ALLOC=y
+CONFIG_CLD_LL_CORE=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_KEYBOARD_GPIO=y
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_JOYSTICK=y
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_UINPUT=y
+# CONFIG_SERIO_SERPORT is not set
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_MSM=y
+CONFIG_SERIAL_MSM_CONSOLE=y
+CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_MSM_LEGACY=y
+CONFIG_MSM_ADSPRPC=y
+CONFIG_MSM_RDBG=m
+CONFIG_I2C_CHARDEV=y
+CONFIG_SPI=y
+CONFIG_SPI_QUP=y
+CONFIG_SPI_SPIDEV=y
+CONFIG_SLIMBUS_MSM_NGD=y
+CONFIG_SPMI=y
+CONFIG_SPMI_MSM_PMIC_ARB_DEBUG=y
+CONFIG_PINCTRL_MSM8953=y
+CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_POWER_SUPPLY=y
+CONFIG_SMB135X_CHARGER=y
+CONFIG_SMB1351_USB_CHARGER=y
+CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
+CONFIG_THERMAL=y
+CONFIG_THERMAL_QPNP=y
+CONFIG_THERMAL_QPNP_ADC_TM=y
+CONFIG_THERMAL_TSENS=y
+CONFIG_MSM_BCL_PERIPHERAL_CTL=y
+CONFIG_QTI_THERMAL_LIMITS_DCVS=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_PROXY_CONSUMER=y
+CONFIG_REGULATOR_CPR4_APSS=y
+CONFIG_REGULATOR_MEM_ACC=y
+CONFIG_REGULATOR_MSM_GFX_LDO=y
+CONFIG_REGULATOR_QPNP_LABIBB=y
+CONFIG_REGULATOR_QPNP=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_CONTROLLER=y
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
+CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_FB=y
+CONFIG_FB_VIRTUAL=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_DYNAMIC_MINORS=y
+CONFIG_SND_SOC=y
+CONFIG_UHID=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_ELECOM=y
+CONFIG_HID_MAGICMOUSE=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MULTITOUCH=y
+CONFIG_USB_DWC3=y
+CONFIG_NOP_USB_XCEIV=y
+CONFIG_DUAL_ROLE_USB_INTF=y
+CONFIG_USB_MSM_SSPHY_QMP=y
+CONFIG_MSM_QUSB_PHY=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_DEBUG_FILES=y
+CONFIG_USB_GADGET_DEBUG_FS=y
+CONFIG_USB_GADGET_VBUS_DRAW=500
+CONFIG_MMC=y
+CONFIG_MMC_PERF_PROFILING=y
+CONFIG_MMC_RING_BUFFER=y
+CONFIG_MMC_PARANOID_SD_INIT=y
+CONFIG_MMC_CLKGATE=y
+CONFIG_MMC_BLOCK_MINORS=32
+CONFIG_MMC_BLOCK_DEFERRED_RESUME=y
+CONFIG_MMC_TEST=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_MSM=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_QPNP=y
+CONFIG_LEDS_QPNP_WLED=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_EDAC=y
+CONFIG_EDAC_MM_EDAC=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_QPNP=y
+CONFIG_DMADEVICES=y
+CONFIG_QCOM_SPS_DMA=y
+CONFIG_UIO=y
+CONFIG_UIO_MSM_SHAREDMEM=y
+CONFIG_STAGING=y
+CONFIG_ASHMEM=y
+CONFIG_ANDROID_LOW_MEMORY_KILLER=y
+CONFIG_ION=y
+CONFIG_ION_MSM=y
+CONFIG_GSI=y
+CONFIG_IPA3=y
+CONFIG_RMNET_IPA3=y
+CONFIG_RNDIS_IPA=y
+CONFIG_SPS=y
+CONFIG_SPS_SUPPORT_NDP_BAM=y
+CONFIG_QPNP_COINCELL=y
+CONFIG_QPNP_REVID=y
+CONFIG_USB_BAM=y
+CONFIG_MSM_EXT_DISPLAY=y
+CONFIG_REMOTE_SPINLOCK_MSM=y
+CONFIG_MAILBOX=y
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_MSM_BOOT_STATS=y
+CONFIG_MSM_CORE_HANG_DETECT=y
+CONFIG_MSM_GLADIATOR_HANG_DETECT=y
+CONFIG_QCOM_WATCHDOG_V2=y
+CONFIG_QCOM_MEMORY_DUMP_V2=y
+CONFIG_QCOM_SECURE_BUFFER=y
+CONFIG_QCOM_EARLY_RANDOM=y
+CONFIG_MSM_SMEM=y
+CONFIG_MSM_GLINK=y
+CONFIG_MSM_GLINK_LOOPBACK_SERVER=y
+CONFIG_MSM_GLINK_SMEM_NATIVE_XPRT=y
+CONFIG_MSM_GLINK_SPI_XPRT=y
+CONFIG_TRACER_PKT=y
+CONFIG_MSM_SMP2P=y
+CONFIG_MSM_IPC_ROUTER_GLINK_XPRT=y
+CONFIG_MSM_QMI_INTERFACE=y
+CONFIG_MSM_GLINK_PKT=y
+CONFIG_MSM_SUBSYSTEM_RESTART=y
+CONFIG_MSM_PIL=y
+CONFIG_MSM_PIL_SSR_GENERIC=y
+CONFIG_MSM_PIL_MSS_QDSP6V5=y
+CONFIG_ICNSS=y
+CONFIG_MSM_PERFORMANCE=y
+CONFIG_MSM_EVENT_TIMER=y
+CONFIG_MSM_PM=y
+CONFIG_QTI_RPM_STATS_LOG=y
+CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
+CONFIG_PWM=y
+CONFIG_PWM_QPNP=y
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_SENSORS_SSC=y
+CONFIG_MSM_TZ_LOG=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_QUOTA=y
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+CONFIG_FUSE_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_ECRYPT_FS=y
+CONFIG_ECRYPT_FS_MESSAGING=y
+# CONFIG_NETWORK_FILESYSTEMS is not set
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DYNAMIC_DEBUG=y
+CONFIG_DEBUG_INFO=y
+CONFIG_FRAME_WARN=2048
+CONFIG_PAGE_OWNER=y
+CONFIG_PAGE_OWNER_ENABLE_DEFAULT=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_PAGEALLOC=y
+CONFIG_SLUB_DEBUG_PANIC_ON=y
+CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT=y
+CONFIG_DEBUG_OBJECTS=y
+CONFIG_DEBUG_OBJECTS_FREE=y
+CONFIG_DEBUG_OBJECTS_TIMERS=y
+CONFIG_DEBUG_OBJECTS_WORK=y
+CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
+CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y
+CONFIG_SLUB_DEBUG_ON=y
+CONFIG_DEBUG_KMEMLEAK=y
+CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE=4000
+CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y
+CONFIG_DEBUG_STACK_USAGE=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_LOCKUP_DETECTOR=y
+CONFIG_WQ_WATCHDOG=y
+CONFIG_PANIC_TIMEOUT=5
+CONFIG_PANIC_ON_SCHED_BUG=y
+CONFIG_PANIC_ON_RT_THROTTLING=y
+CONFIG_SCHEDSTATS=y
+CONFIG_SCHED_STACK_END_CHECK=y
+# CONFIG_DEBUG_PREEMPT is not set
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_MUTEXES=y
+CONFIG_DEBUG_ATOMIC_SLEEP=y
+CONFIG_DEBUG_LIST=y
+CONFIG_FAULT_INJECTION=y
+CONFIG_FAIL_PAGE_ALLOC=y
+CONFIG_FAULT_INJECTION_DEBUG_FS=y
+CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
+CONFIG_IPC_LOGGING=y
+CONFIG_QCOM_RTB=y
+CONFIG_QCOM_RTB_SEPARATE_CPUS=y
+CONFIG_FUNCTION_TRACER=y
+CONFIG_IRQSOFF_TRACER=y
+CONFIG_PREEMPT_TRACER=y
+CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_CPU_FREQ_SWITCH_PROFILER=y
+CONFIG_LKDTM=y
+CONFIG_MEMTEST=y
+CONFIG_PANIC_ON_DATA_CORRUPTION=y
+CONFIG_DEBUG_USER=y
+CONFIG_PID_IN_CONTEXTIDR=y
+CONFIG_DEBUG_SET_MODULE_RONX=y
+CONFIG_CORESIGHT=y
+CONFIG_CORESIGHT_REMOTE_ETM=y
+CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0
+CONFIG_CORESIGHT_STM=y
+CONFIG_CORESIGHT_TPDA=y
+CONFIG_CORESIGHT_TPDM=y
+CONFIG_CORESIGHT_CTI=y
+CONFIG_CORESIGHT_EVENT=y
+CONFIG_CORESIGHT_HWEVENT=y
+CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
+CONFIG_SECURITY=y
+CONFIG_LSM_MMAP_MIN_ADDR=4096
+CONFIG_HARDENED_USERCOPY=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SMACK=y
+CONFIG_CRYPTO_CTR=y
+CONFIG_CRYPTO_XCBC=y
+CONFIG_CRYPTO_MD4=y
+CONFIG_CRYPTO_TWOFISH=y
+CONFIG_CRYPTO_ANSI_CPRNG=y
+CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
+CONFIG_CRYPTO_DEV_QCRYPTO=y
+CONFIG_CRYPTO_DEV_QCEDEV=y
+CONFIG_CRYPTO_DEV_OTA_CRYPTO=y
+CONFIG_CRYPTO_DEV_QCOM_ICE=y
+CONFIG_ARM_CRYPTO=y
+CONFIG_CRYPTO_SHA1_ARM_NEON=y
+CONFIG_CRYPTO_SHA2_ARM_CE=y
+CONFIG_CRYPTO_AES_ARM_BS=y
+CONFIG_CRYPTO_AES_ARM_CE=y
+CONFIG_QMI_ENCDEC=y
diff --git a/arch/arm/configs/sdxpoorwills-perf_defconfig b/arch/arm/configs/sdxpoorwills-perf_defconfig
index d922449..456ff5e 100644
--- a/arch/arm/configs/sdxpoorwills-perf_defconfig
+++ b/arch/arm/configs/sdxpoorwills-perf_defconfig
@@ -134,6 +134,9 @@
 CONFIG_BRIDGE=y
 CONFIG_NET_SCHED=y
 CONFIG_NET_SCH_PRIO=y
+CONFIG_RMNET_DATA=y
+CONFIG_RMNET_DATA_FC=y
+CONFIG_RMNET_DATA_DEBUG_PKT=y
 CONFIG_BT=y
 CONFIG_BT_RFCOMM=y
 CONFIG_BT_RFCOMM_TTY=y
diff --git a/arch/arm/configs/sdxpoorwills_defconfig b/arch/arm/configs/sdxpoorwills_defconfig
index 968284d..f2b20b0 100644
--- a/arch/arm/configs/sdxpoorwills_defconfig
+++ b/arch/arm/configs/sdxpoorwills_defconfig
@@ -136,6 +136,9 @@
 CONFIG_BRIDGE=y
 CONFIG_NET_SCHED=y
 CONFIG_NET_SCH_PRIO=y
+CONFIG_RMNET_DATA=y
+CONFIG_RMNET_DATA_FC=y
+CONFIG_RMNET_DATA_DEBUG_PKT=y
 CONFIG_CFG80211=y
 CONFIG_CFG80211_DEBUGFS=y
 CONFIG_CFG80211_INTERNAL_REGDB=y
diff --git a/arch/arm64/boot/dts/qcom/msm8953-pm.dtsi b/arch/arm64/boot/dts/qcom/msm8953-pm.dtsi
new file mode 100644
index 0000000..0cbb0f2
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8953-pm.dtsi
@@ -0,0 +1,267 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/msm/pm.h>
+
+&soc {
+	qcom,spm@b1d2000 {
+		compatible = "qcom,spm-v2";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		reg = <0xb1d2000 0x1000>;
+		reg-names = "saw-base";
+		qcom,name = "system-cci";
+		qcom,saw2-ver-reg = <0xfd0>;
+		qcom,saw2-cfg = <0x14>;
+		qcom,saw2-spm-dly= <0x3C102800>;
+		qcom,saw2-spm-ctl = <0xe>;
+		qcom,saw2-avs-ctl = <0x10>;
+		qcom,cpu-vctl-list = <&CPU0 &CPU1 &CPU2 &CPU3
+					&CPU4 &CPU5 &CPU6 &CPU7>;
+		qcom,vctl-timeout-us = <500>;
+		qcom,vctl-port = <0x0>;
+		qcom,phase-port = <0x1>;
+		qcom,pfm-port = <0x2>;
+	};
+
+	qcom,lpm-levels {
+		compatible = "qcom,lpm-levels";
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		qcom,pm-cluster@0 {
+			reg = <0>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			label = "system";
+			qcom,psci-mode-shift = <8>;
+			qcom,psci-mode-mask = <0xf>;
+
+			qcom,pm-cluster-level@0 {
+				reg = <0>;
+				label = "system-active";
+				qcom,psci-mode = <0>;
+				qcom,latency-us = <423>;
+				qcom,ss-power = <349>;
+				qcom,energy-overhead = <299110>;
+				qcom,time-overhead = <719>;
+			};
+
+			qcom,pm-cluster-level@1 {
+				reg = <1>;
+				label = "system-wfi";
+				qcom,psci-mode = <1>;
+				qcom,latency-us = <480>;
+				qcom,ss-power = <348>;
+				qcom,energy-overhead = <313989>;
+				qcom,time-overhead = <753>;
+				qcom,min-child-idx = <3>;
+			};
+
+			qcom,pm-cluster-level@2 {
+				reg = <2>;
+				label = "system-pc";
+				qcom,psci-mode = <3>;
+				qcom,latency-us = <11027>;
+				qcom,ss-power = <340>;
+				qcom,energy-overhead = <616035>;
+				qcom,time-overhead = <1495>;
+				qcom,min-child-idx = <3>;
+				qcom,notify-rpm;
+				qcom,is-reset;
+				qcom,reset-level = <LPM_RESET_LVL_PC>;
+			};
+
+			qcom,pm-cluster@0{
+				reg = <0>;
+				#address-cells = <1>;
+				#size-cells = <0>;
+				label = "pwr";
+				qcom,psci-mode-shift = <4>;
+				qcom,psci-mode-mask = <0xf>;
+
+				qcom,pm-cluster-level@0{
+					reg = <0>;
+					label = "pwr-l2-wfi";
+					qcom,psci-mode = <1>;
+					qcom,latency-us = <180>;
+					qcom,ss-power = <361>;
+					qcom,energy-overhead = <113215>;
+					qcom,time-overhead = <270>;
+				};
+
+				qcom,pm-cluster-level@1{
+					reg = <1>;
+					label = "pwr-l2-retention";
+					qcom,psci-mode = <3>;
+					qcom,latency-us = <188>;
+					qcom,ss-power = <360>;
+					qcom,energy-overhead = <121271>;
+					qcom,time-overhead = <307>;
+					qcom,min-child-idx = <1>;
+					qcom,reset-level =
+						<LPM_RESET_LVL_RET>;
+				};
+
+				qcom,pm-cluster-level@2{
+					reg = <2>;
+					label = "pwr-l2-gdhs";
+					qcom,psci-mode = <4>;
+					qcom,latency-us = <212>;
+					qcom,ss-power = <354>;
+					qcom,energy-overhead = <150748>;
+					qcom,time-overhead = <363>;
+					qcom,min-child-idx = <1>;
+					qcom,reset-level =
+						<LPM_RESET_LVL_GDHS>;
+				};
+
+				qcom,pm-cluster-level@3{
+					reg = <3>;
+					label = "pwr-l2-pc";
+					qcom,psci-mode = <5>;
+					qcom,latency-us = <421>;
+					qcom,ss-power = <350>;
+					qcom,energy-overhead = <277479>;
+					qcom,time-overhead = <690>;
+					qcom,min-child-idx = <1>;
+					qcom,is-reset;
+					qcom,reset-level =
+						<LPM_RESET_LVL_PC>;
+				};
+
+				qcom,pm-cpu {
+					#address-cells = <1>;
+					#size-cells = <0>;
+					qcom,psci-mode-shift = <0>;
+					qcom,psci-mode-mask = <0xf>;
+					qcom,cpu = <&CPU0 &CPU1 &CPU2 &CPU3>;
+
+					qcom,pm-cpu-level@0 {
+						reg = <0>;
+						label = "wfi";
+						qcom,psci-cpu-mode = <1>;
+						qcom,latency-us = <1>;
+						qcom,ss-power = <395>;
+						qcom,energy-overhead = <30424>;
+						qcom,time-overhead = <61>;
+					};
+
+					qcom,pm-cpu-level@1 {
+						reg = <1>;
+						label = "pc";
+						qcom,psci-cpu-mode = <3>;
+						qcom,latency-us = <180>;
+						qcom,ss-power = <361>;
+						qcom,energy-overhead = <113215>;
+						qcom,time-overhead = <270>;
+						qcom,use-broadcast-timer;
+						qcom,is-reset;
+						qcom,reset-level =
+							<LPM_RESET_LVL_PC>;
+					};
+				};
+			};
+
+			qcom,pm-cluster@1{
+				reg = <1>;
+				#address-cells = <1>;
+				#size-cells = <0>;
+				label = "perf";
+				qcom,psci-mode-shift = <4>;
+				qcom,psci-mode-mask = <0xf>;
+
+				qcom,pm-cluster-level@0{
+					reg = <0>;
+					label = "perf-l2-wfi";
+					qcom,psci-mode = <1>;
+					qcom,latency-us = <179>;
+					qcom,ss-power = <362>;
+					qcom,energy-overhead = <113242>;
+					qcom,time-overhead = <268>;
+				};
+
+				qcom,pm-cluster-level@1{
+					reg = <1>;
+					label = "perf-l2-retention";
+					qcom,psci-mode = <3>;
+					qcom,latency-us = <186>;
+					qcom,ss-power = <361>;
+					qcom,energy-overhead = <125982>;
+					qcom,time-overhead = <306>;
+					qcom,min-child-idx = <1>;
+					qcom,reset-level =
+						<LPM_RESET_LVL_RET>;
+				};
+
+				qcom,pm-cluster-level@2{
+					reg = <2>;
+					label = "perf-l2-gdhs";
+					qcom,psci-mode = <4>;
+					qcom,latency-us = <210>;
+					qcom,ss-power = <355>;
+					qcom,energy-overhead = <155728>;
+					qcom,time-overhead = <361>;
+					qcom,min-child-idx = <1>;
+					qcom,reset-level =
+						<LPM_RESET_LVL_GDHS>;
+				};
+
+				qcom,pm-cluster-level@3{
+					reg = <3>;
+					label = "perf-l2-pc";
+					qcom,psci-mode = <5>;
+					qcom,latency-us = <423>;
+					qcom,ss-power = <349>;
+					qcom,energy-overhead = <299110>;
+					qcom,time-overhead = <719>;
+					qcom,min-child-idx = <1>;
+					qcom,is-reset;
+					qcom,reset-level =
+						<LPM_RESET_LVL_PC>;
+				};
+
+				qcom,pm-cpu {
+					#address-cells = <1>;
+					#size-cells = <0>;
+					qcom,psci-mode-shift = <0>;
+					qcom,psci-mode-mask = <0xf>;
+					qcom,cpu = <&CPU4 &CPU5 &CPU6 &CPU7>;
+
+					qcom,pm-cpu-level@0 {
+						reg = <0>;
+						label = "wfi";
+						qcom,psci-cpu-mode = <1>;
+						qcom,latency-us = <1>;
+						qcom,ss-power = <397>;
+						qcom,energy-overhead = <30424>;
+						qcom,time-overhead = <61>;
+					};
+
+					qcom,pm-cpu-level@1 {
+						reg = <1>;
+						label = "pc";
+						qcom,psci-cpu-mode = <3>;
+						qcom,latency-us = <179>;
+						qcom,ss-power = <362>;
+						qcom,energy-overhead = <113242>;
+						qcom,time-overhead = <268>;
+						qcom,use-broadcast-timer;
+						qcom,is-reset;
+						qcom,reset-level =
+							<LPM_RESET_LVL_PC>;
+					};
+				};
+			};
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8953.dtsi b/arch/arm64/boot/dts/qcom/msm8953.dtsi
index 8b1c607..12b39a9 100644
--- a/arch/arm64/boot/dts/qcom/msm8953.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8953.dtsi
@@ -124,6 +124,7 @@
 
 #include "msm8953-pinctrl.dtsi"
 #include "msm8953-cpu.dtsi"
+#include "msm8953-pm.dtsi"
 
 
 &soc {
@@ -559,6 +560,12 @@
 		};
 	};
 
+	rpm_bus: qcom,rpm-smd {
+		compatible = "qcom,rpm-smd";
+		rpm-channel-name = "rpm_requests";
+		rpm-channel-type = <15>; /* SMD_APPS_RPM */
+		};
+
 	qcom,wdt@b017000 {
 		compatible = "qcom,msm-watchdog";
 		reg = <0xb017000 0x1000>;
diff --git a/arch/arm64/boot/dts/qcom/pmi8950.dtsi b/arch/arm64/boot/dts/qcom/pmi8950.dtsi
index 0ec1f0b..4223cfe 100644
--- a/arch/arm64/boot/dts/qcom/pmi8950.dtsi
+++ b/arch/arm64/boot/dts/qcom/pmi8950.dtsi
@@ -434,6 +434,7 @@
 			#address-cells = <1>;
 			#size-cells = <1>;
 			qcom,pmic-revid = <&pmi8950_revid>;
+			qcom,qpnp-labibb-mode = "lcd";
 
 			ibb_regulator: qcom,ibb@dc00 {
 				reg = <0xdc00 0x100>;
diff --git a/arch/arm64/boot/dts/qcom/pmi8998.dtsi b/arch/arm64/boot/dts/qcom/pmi8998.dtsi
index 3b0adcd..5b48c14 100644
--- a/arch/arm64/boot/dts/qcom/pmi8998.dtsi
+++ b/arch/arm64/boot/dts/qcom/pmi8998.dtsi
@@ -688,6 +688,14 @@
 				qcom,led-mask = <4>;
 				qcom,default-led-trigger = "switch1_trigger";
 			};
+
+			pmi8998_switch2: qcom,led_switch_2 {
+				label = "switch";
+				qcom,led-name = "led:switch_2";
+				qcom,led-mask = <4>;
+				qcom,default-led-trigger = "switch2_trigger";
+			};
+
 		};
 
 		pmi8998_haptics: qcom,haptics@c000 {
diff --git a/arch/arm64/boot/dts/qcom/qcs605-360camera.dtsi b/arch/arm64/boot/dts/qcom/qcs605-360camera.dtsi
index efd6f4a..6670edd 100644
--- a/arch/arm64/boot/dts/qcom/qcs605-360camera.dtsi
+++ b/arch/arm64/boot/dts/qcom/qcs605-360camera.dtsi
@@ -234,3 +234,52 @@
 		};
 	};
 };
+&pm660_vadc{
+
+	chan@4e {
+		label = "emmc_therm";
+		reg = <0x4e>;
+		qcom,decimation = <2>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <2>;
+		qcom,hw-settle-time = <2>;
+		qcom,fast-avg-setup = <0>;
+	};
+
+	chan@4f {
+		label = "pa_therm0";
+		reg = <0x4f>;
+		qcom,decimation = <2>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <2>;
+		qcom,hw-settle-time = <2>;
+		qcom,fast-avg-setup = <0>;
+	};
+};
+
+&pm660_adc_tm{
+
+	chan@4e {
+		label = "emmc_therm";
+		reg = <0x4e>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <2>;
+		qcom,hw-settle-time = <2>;
+		qcom,btm-channel-number = <0x80>;
+		qcom,thermal-node;
+	};
+
+	chan@4f {
+		label = "pa_therm0";
+		reg = <0x4f>;
+		qcom,pre-div-channel-scaling = <0>;
+		qcom,calibration-type = "ratiometric";
+		qcom,scale-function = <2>;
+		qcom,hw-settle-time = <2>;
+		qcom,btm-channel-number = <0x88>;
+		qcom,thermal-node;
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/qcs605-mtp-overlay.dts b/arch/arm64/boot/dts/qcom/qcs605-mtp-overlay.dts
index 7955242..65be275 100644
--- a/arch/arm64/boot/dts/qcom/qcs605-mtp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/qcs605-mtp-overlay.dts
@@ -32,3 +32,47 @@
 		       <0x0001001b 0x0102001a 0x0 0x0>,
 		       <0x0001001b 0x0201011a 0x0 0x0>;
 };
+
+&cam_cci {
+	/delete-node/ qcom,cam-sensor@1;
+	qcom,cam-sensor@1 {
+		cell-index = <1>;
+		compatible = "qcom,cam-sensor";
+		reg = <0x1>;
+		csiphy-sd-index = <1>;
+		sensor-position-roll = <90>;
+		sensor-position-pitch = <0>;
+		sensor-position-yaw = <180>;
+		eeprom-src = <&eeprom_rear_aux>;
+		cam_vio-supply = <&camera_vio_ldo>;
+		cam_vana-supply = <&camera_vana_ldo>;
+		cam_vdig-supply = <&camera_ldo>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		regulator-names = "cam_vdig", "cam_vio", "cam_vana",
+			"cam_clk";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <1352000 1800000 2850000 0>;
+		rgltr-max-voltage = <1352000 1800000 2850000 0>;
+		rgltr-load-current = <105000 0 80000 0>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk0_active
+				&cam_sensor_rear2_active>;
+		pinctrl-1 = <&cam_sensor_mclk0_suspend
+				&cam_sensor_rear2_suspend>;
+		gpios = <&tlmm 13 0>,
+			<&tlmm 28 0>;
+		gpio-reset = <1>;
+		gpio-req-tbl-num = <0 1>;
+		gpio-req-tbl-flags = <1 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK0",
+					"CAM_RESET1";
+		sensor-mode = <0>;
+		cci-master = <1>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/qcs605-mtp.dts b/arch/arm64/boot/dts/qcom/qcs605-mtp.dts
index dc3c7ce..b0ca9a3 100644
--- a/arch/arm64/boot/dts/qcom/qcs605-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/qcs605-mtp.dts
@@ -26,3 +26,47 @@
 		       <0x0001001b 0x0102001a 0x0 0x0>,
 		       <0x0001001b 0x0201011a 0x0 0x0>;
 };
+
+&cam_cci {
+	/delete-node/ qcom,cam-sensor@1;
+	qcom,cam-sensor@1 {
+		cell-index = <1>;
+		compatible = "qcom,cam-sensor";
+		reg = <0x1>;
+		csiphy-sd-index = <1>;
+		sensor-position-roll = <90>;
+		sensor-position-pitch = <0>;
+		sensor-position-yaw = <180>;
+		eeprom-src = <&eeprom_rear_aux>;
+		cam_vio-supply = <&camera_vio_ldo>;
+		cam_vana-supply = <&camera_vana_ldo>;
+		cam_vdig-supply = <&camera_ldo>;
+		cam_clk-supply = <&titan_top_gdsc>;
+		regulator-names = "cam_vdig", "cam_vio", "cam_vana",
+			"cam_clk";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <1352000 1800000 2850000 0>;
+		rgltr-max-voltage = <1352000 1800000 2850000 0>;
+		rgltr-load-current = <105000 0 80000 0>;
+		gpio-no-mux = <0>;
+		pinctrl-names = "cam_default", "cam_suspend";
+		pinctrl-0 = <&cam_sensor_mclk0_active
+				&cam_sensor_rear2_active>;
+		pinctrl-1 = <&cam_sensor_mclk0_suspend
+				&cam_sensor_rear2_suspend>;
+		gpios = <&tlmm 13 0>,
+			<&tlmm 28 0>;
+		gpio-reset = <1>;
+		gpio-req-tbl-num = <0 1>;
+		gpio-req-tbl-flags = <1 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK0",
+					"CAM_RESET1";
+		sensor-mode = <0>;
+		cci-master = <1>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/sda845-v2-hdk-overlay.dts b/arch/arm64/boot/dts/qcom/sda845-v2-hdk-overlay.dts
index 813c198..6357886 100644
--- a/arch/arm64/boot/dts/qcom/sda845-v2-hdk-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sda845-v2-hdk-overlay.dts
@@ -54,3 +54,7 @@
 	status = "okay";
 	qcom,led-strings-list = [01 02];
 };
+
+&mdss_mdp {
+	connectors = <&sde_rscc &sde_wb &sde_dp>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-camera.dtsi b/arch/arm64/boot/dts/qcom/sdm670-camera.dtsi
index 34b8740..110e626 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-camera.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-camera.dtsi
@@ -230,6 +230,33 @@
 		compatible = "qcom,msm-cam-smmu";
 		status = "ok";
 
+		msm_cam_smmu_lrme {
+			compatible = "qcom,msm-cam-smmu-cb";
+			iommus = <&apps_smmu 0x1038 0x0>,
+				<&apps_smmu 0x1058 0x0>,
+				<&apps_smmu 0x1039 0x0>,
+				<&apps_smmu 0x1059 0x0>;
+			label = "lrme";
+			lrme_iova_mem_map: iova-mem-map {
+				iova-mem-region-shared {
+					/* Shared region is 100MB long */
+					iova-region-name = "shared";
+					iova-region-start = <0x7400000>;
+					iova-region-len = <0x6400000>;
+					iova-region-id = <0x1>;
+					status = "ok";
+				};
+				/* IO region is approximately 3.3 GB */
+				iova-mem-region-io {
+					iova-region-name = "io";
+					iova-region-start = <0xd800000>;
+					iova-region-len = <0xd2800000>;
+					iova-region-id = <0x3>;
+					status = "ok";
+				};
+			};
+		};
+
 		msm_cam_smmu_ife {
 			compatible = "qcom,msm-cam-smmu-cb";
 			iommus = <&apps_smmu 0x808 0x0>,
@@ -435,13 +462,14 @@
 			"csid0", "csid1", "csid2",
 			"ife0", "ife1", "ife2", "ipe0",
 			"ipe1", "cam-cdm-intf0", "cpas-cdm0", "bps0",
-			"icp0", "jpeg-dma0", "jpeg-enc0", "fd0";
+			"icp0", "jpeg-dma0", "jpeg-enc0", "fd0", "lrmecpas0";
 		client-axi-port-names =
 			"cam_hf_1", "cam_hf_2", "cam_hf_2", "cam_sf_1",
 			"cam_hf_1", "cam_hf_2", "cam_hf_2",
 			"cam_hf_1", "cam_hf_2", "cam_hf_2", "cam_sf_1",
 			"cam_sf_1", "cam_sf_1", "cam_sf_1", "cam_sf_1",
-			"cam_sf_1", "cam_sf_1", "cam_sf_1", "cam_sf_1";
+			"cam_sf_1", "cam_sf_1", "cam_sf_1", "cam_sf_1",
+			"cam_sf_1";
 		client-bus-camnoc-based;
 		qcom,axi-port-list {
 			qcom,axi-port1 {
@@ -530,7 +558,8 @@
 		cdm-client-names = "vfe",
 			"jpegdma",
 			"jpegenc",
-			"fd";
+			"fd",
+			"lrmecdm";
 		status = "ok";
 	};
 
@@ -1062,4 +1091,43 @@
 			<0 0 0 0 0 600000000 0 0>;
 		status = "ok";
 	};
+
+	qcom,cam-lrme {
+		compatible = "qcom,cam-lrme";
+		arch-compat = "lrme";
+		status = "ok";
+	};
+
+	cam_lrme: qcom,lrme@ac6b000 {
+		cell-index = <0>;
+		compatible = "qcom,lrme";
+		reg-names = "lrme";
+		reg = <0xac6b000 0xa00>;
+		reg-cam-base = <0x6b000>;
+		interrupt-names = "lrme";
+		interrupts = <0 476 0>;
+		regulator-names = "camss";
+		camss-supply = <&titan_top_gdsc>;
+		clock-names = "camera_ahb",
+			"camera_axi",
+			"soc_ahb_clk",
+			"cpas_ahb_clk",
+			"camnoc_axi_clk",
+			"lrme_clk_src",
+			"lrme_clk";
+		clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>,
+			<&clock_gcc GCC_CAMERA_AXI_CLK>,
+			<&clock_camcc CAM_CC_SOC_AHB_CLK>,
+			<&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+			<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
+			<&clock_camcc CAM_CC_LRME_CLK_SRC>,
+			<&clock_camcc CAM_CC_LRME_CLK>;
+		clock-rates = <0 0 0 0 0 200000000 200000000>,
+			<0 0 0 0 0 269000000 269000000>,
+			<0 0 0 0 0 320000000 320000000>,
+			<0 0 0 0 0 400000000 400000000>;
+		clock-cntl-level = "lowsvs", "svs", "svs_l1", "turbo";
+		src-clock-name = "lrme_clk_src";
+		status = "ok";
+	};
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm670-gpu.dtsi b/arch/arm64/boot/dts/qcom/sdm670-gpu.dtsi
index 95e5585..9e75ee0 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-gpu.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-gpu.dtsi
@@ -117,8 +117,8 @@
 		cache-slices = <&llcc 12>, <&llcc 11>;
 
 		/* CPU latency parameter */
-		qcom,pm-qos-active-latency = <660>;
-		qcom,pm-qos-wakeup-latency = <460>;
+		qcom,pm-qos-active-latency = <914>;
+		qcom,pm-qos-wakeup-latency = <899>;
 
 		/* Enable context aware freq. scaling */
 		qcom,enable-ca-jump;
diff --git a/arch/arm64/boot/dts/qcom/sdm670-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm670-mtp.dtsi
index ef1fc08..e9924e2 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-mtp.dtsi
@@ -363,3 +363,12 @@
 &mdss_mdp {
 	#cooling-cells = <2>;
 };
+
+&thermal_zones {
+	xo-therm-cpu-step {
+		status = "disabled";
+	};
+	xo-therm-mdm-step {
+		status = "disabled";
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-pm.dtsi b/arch/arm64/boot/dts/qcom/sdm670-pm.dtsi
index a459a9d..dd35a36 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-pm.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-pm.dtsi
@@ -28,56 +28,45 @@
 				reg = <0>;
 				label = "l3-wfi";
 				qcom,psci-mode = <0x1>;
-				qcom,latency-us = <51>;
-				qcom,ss-power = <452>;
-				qcom,energy-overhead = <69355>;
-				qcom,time-overhead = <99>;
+				qcom,latency-us = <600>;
+				qcom,ss-power = <420>;
+				qcom,energy-overhead = <4254140>;
+				qcom,time-overhead = <1260>;
 			};
 
-			qcom,pm-cluster-level@1 { /* D2 */
+			qcom,pm-cluster-level@1 { /* D4, D3 is not supported */
 				reg = <1>;
-				label = "l3-dyn-ret";
-				qcom,psci-mode = <0x2>;
-				qcom,latency-us = <659>;
-				qcom,ss-power = <434>;
-				qcom,energy-overhead = <465725>;
-				qcom,time-overhead = <976>;
-				qcom,min-child-idx = <1>;
-			};
-
-			qcom,pm-cluster-level@2 { /* D4, D3 is not supported */
-				reg = <2>;
 				label = "l3-pc";
 				qcom,psci-mode = <0x4>;
-				qcom,latency-us = <3201>;
-				qcom,ss-power = <408>;
-				qcom,energy-overhead = <2421840>;
-				qcom,time-overhead = <5376>;
-				qcom,min-child-idx = <2>;
+				qcom,latency-us = <3048>;
+				qcom,ss-power = <329>;
+				qcom,energy-overhead = <6189829>;
+				qcom,time-overhead = <5800>;
+				qcom,min-child-idx = <3>;
 				qcom,is-reset;
 			};
 
-			qcom,pm-cluster-level@3 { /* Cx off */
-				reg = <3>;
+			qcom,pm-cluster-level@2 { /* Cx off */
+				reg = <2>;
 				label = "cx-off";
 				qcom,psci-mode = <0x224>;
-				qcom,latency-us = <5562>;
-				qcom,ss-power = <308>;
-				qcom,energy-overhead = <2521840>;
-				qcom,time-overhead = <6376>;
+				qcom,latency-us = <4562>;
+				qcom,ss-power = <290>;
+				qcom,energy-overhead = <6989829>;
+				qcom,time-overhead = <8200>;
 				qcom,min-child-idx = <3>;
 				qcom,is-reset;
 				qcom,notify-rpm;
 			};
 
-			qcom,pm-cluster-level@4 { /* AOSS sleep */
-				reg = <4>;
+			qcom,pm-cluster-level@3 { /* AOSS sleep */
+				reg = <3>;
 				label = "llcc-off";
 				qcom,psci-mode = <0xC24>;
 				qcom,latency-us = <6562>;
-				qcom,ss-power = <108>;
-				qcom,energy-overhead = <2621840>;
-				qcom,time-overhead = <7376>;
+				qcom,ss-power = <165>;
+				qcom,energy-overhead = <7000029>;
+				qcom,time-overhead = <9825>;
 				qcom,min-child-idx = <3>;
 				qcom,is-reset;
 				qcom,notify-rpm;
@@ -88,6 +77,7 @@
 				#size-cells = <0>;
 				qcom,psci-mode-shift = <0>;
 				qcom,psci-mode-mask = <0xf>;
+				qcom,use-prediction;
 				qcom,cpu = <&CPU0 &CPU1 &CPU2 &CPU3 &CPU4
 									&CPU5>;
 
@@ -95,30 +85,30 @@
 					reg = <0>;
 					label = "wfi";
 					qcom,psci-cpu-mode = <0x1>;
-					qcom,latency-us = <43>;
-					qcom,ss-power = <454>;
-					qcom,energy-overhead = <38639>;
-					qcom,time-overhead = <83>;
+					qcom,latency-us = <60>;
+					qcom,ss-power = <383>;
+					qcom,energy-overhead = <64140>;
+					qcom,time-overhead = <121>;
 				};
 
 				qcom,pm-cpu-level@1 { /* C2D */
 					reg = <1>;
 					label = "ret";
 					qcom,psci-cpu-mode = <0x2>;
-					qcom,latency-us = <119>;
-					qcom,ss-power = <449>;
-					qcom,energy-overhead = <78456>;
-					qcom,time-overhead = <167>;
+					qcom,latency-us = <282>;
+					qcom,ss-power = <370>;
+					qcom,energy-overhead = <238600>;
+					qcom,time-overhead = <559>;
 				};
 
 				qcom,pm-cpu-level@2 {  /* C3 */
 					reg = <2>;
 					label = "pc";
 					qcom,psci-cpu-mode = <0x3>;
-					qcom,latency-us = <461>;
-					qcom,ss-power = <436>;
-					qcom,energy-overhead = <418225>;
-					qcom,time-overhead = <885>;
+					qcom,latency-us = <901>;
+					qcom,ss-power = <364>;
+					qcom,energy-overhead = <579285>;
+					qcom,time-overhead = <1450>;
 					qcom,is-reset;
 					qcom,use-broadcast-timer;
 				};
@@ -127,10 +117,10 @@
 					reg = <3>;
 					label = "rail-pc";
 					qcom,psci-cpu-mode = <0x4>;
-					qcom,latency-us = <531>;
-					qcom,ss-power = <400>;
-					qcom,energy-overhead = <428225>;
-					qcom,time-overhead = <1000>;
+					qcom,latency-us = <915>;
+					qcom,ss-power = <353>;
+					qcom,energy-overhead = <666292>;
+					qcom,time-overhead = <1617>;
 					qcom,is-reset;
 					qcom,use-broadcast-timer;
 				};
@@ -141,36 +131,37 @@
 				#size-cells = <0>;
 				qcom,psci-mode-shift = <0>;
 				qcom,psci-mode-mask = <0xf>;
+				qcom,use-prediction;
 				qcom,cpu = <&CPU6 &CPU7>;
 
 				qcom,pm-cpu-level@0 { /* C1 */
 					reg = <0>;
 					label = "wfi";
 					qcom,psci-cpu-mode = <0x1>;
-					qcom,latency-us = <43>;
-					qcom,ss-power = <454>;
-					qcom,energy-overhead = <38639>;
-					qcom,time-overhead = <83>;
+					qcom,latency-us = <66>;
+					qcom,ss-power = <427>;
+					qcom,energy-overhead = <68410>;
+					qcom,time-overhead = <121>;
 				};
 
 				qcom,pm-cpu-level@1 { /* C2D */
 					reg = <1>;
 					label = "ret";
 					qcom,psci-cpu-mode = <0x2>;
-					qcom,latency-us = <116>;
-					qcom,ss-power = <449>;
-					qcom,energy-overhead = <78456>;
-					qcom,time-overhead = <167>;
+					qcom,latency-us = <282>;
+					qcom,ss-power = <388>;
+					qcom,energy-overhead = <281755>;
+					qcom,time-overhead = <553>;
 				};
 
 				qcom,pm-cpu-level@2 {  /* C3 */
 					reg = <2>;
 					label = "pc";
 					qcom,psci-cpu-mode = <0x3>;
-					qcom,latency-us = <621>;
-					qcom,ss-power = <436>;
-					qcom,energy-overhead = <418225>;
-					qcom,time-overhead = <885>;
+					qcom,latency-us = <1244>;
+					qcom,ss-power = <373>;
+					qcom,energy-overhead = <795006>;
+					qcom,time-overhead = <1767>;
 					qcom,is-reset;
 					qcom,use-broadcast-timer;
 				};
@@ -179,10 +170,10 @@
 					reg = <3>;
 					label = "rail-pc";
 					qcom,psci-cpu-mode = <0x4>;
-					qcom,latency-us = <1061>;
-					qcom,ss-power = <400>;
-					qcom,energy-overhead = <428225>;
-					qcom,time-overhead = <1000>;
+					qcom,latency-us = <1854>;
+					qcom,ss-power = <359>;
+					qcom,energy-overhead = <1068095>;
+					qcom,time-overhead = <2380>;
 					qcom,is-reset;
 					qcom,use-broadcast-timer;
 				};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-pmic-overlay.dtsi b/arch/arm64/boot/dts/qcom/sdm670-pmic-overlay.dtsi
index 220487a..0a7e25d 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-pmic-overlay.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-pmic-overlay.dtsi
@@ -380,5 +380,8 @@
 };
 
 &usb0 {
-	extcon = <&pm660_pdphy>, <&pm660_pdphy>, <&eud>;
+	extcon = <&pm660_pdphy>, <&pm660_pdphy>, <&eud>,
+					<&pm660_charger>, <&pm660_charger>;
+	vbus_dwc3-supply = <&smb2_vbus>;
+	qcom,no-vbus-vote-with-type-C;
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm670-qrd.dtsi b/arch/arm64/boot/dts/qcom/sdm670-qrd.dtsi
index 0e87314..f3c6b00 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-qrd.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-qrd.dtsi
@@ -178,6 +178,7 @@
 
 &int_codec {
 	qcom,model = "sdm670-skuw-snd-card";
+	qcom,msm-micbias1-ext-cap;
 	qcom,audio-routing =
 		"RX_BIAS", "INT_MCLK0",
 		"SPK_RX_BIAS", "INT_MCLK0",
diff --git a/arch/arm64/boot/dts/qcom/sdm670-sde-display.dtsi b/arch/arm64/boot/dts/qcom/sdm670-sde-display.dtsi
index e8a62be..de125e2 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-sde-display.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-sde-display.dtsi
@@ -751,6 +751,11 @@
 &dsi_nt35695b_truly_fhd_video {
 	qcom,mdss-dsi-t-clk-post = <0x07>;
 	qcom,mdss-dsi-t-clk-pre = <0x1c>;
+	qcom,mdss-dsi-min-refresh-rate = <48>;
+	qcom,mdss-dsi-max-refresh-rate = <60>;
+	qcom,mdss-dsi-pan-enable-dynamic-fps;
+	qcom,mdss-dsi-pan-fps-update =
+		"dfps_immediate_porch_mode_vfp";
 	qcom,mdss-dsi-display-timings {
 		timing@0 {
 			qcom,mdss-dsi-panel-phy-timings = [00 1c 05 06 0b 0c
diff --git a/arch/arm64/boot/dts/qcom/sdm670-thermal.dtsi b/arch/arm64/boot/dts/qcom/sdm670-thermal.dtsi
index 1ce8dba..6324b64 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-thermal.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-thermal.dtsi
@@ -147,7 +147,7 @@
 		};
 	};
 
-	cpu4-silver-usr {
+	cpuss-0-usr {
 		polling-delay-passive = <0>;
 		polling-delay = <0>;
 		thermal-sensors = <&tsens0 5>;
@@ -161,7 +161,7 @@
 		};
 	};
 
-	cpu5-silver-usr {
+	cpuss-1-usr {
 		polling-delay-passive = <0>;
 		polling-delay = <0>;
 		thermal-sensors = <&tsens0 6>;
@@ -175,7 +175,7 @@
 		};
 	};
 
-	kryo-l3-0-usr {
+	cpu4-silver-usr {
 		polling-delay-passive = <0>;
 		polling-delay = <0>;
 		thermal-sensors = <&tsens0 7>;
@@ -189,7 +189,7 @@
 		};
 	};
 
-	kryo-l3-1-usr {
+	cpu5-silver-usr {
 		polling-delay-passive = <0>;
 		polling-delay = <0>;
 		thermal-sensors = <&tsens0 8>;
@@ -462,15 +462,16 @@
 		cooling-maps {
 			cpu0_vdd_cdev {
 				trip = <&aoss0_trip>;
-				cooling-device = <&CPU0 4 4>;
+				cooling-device = <&CPU0 2 2>;
 			};
 			cpu6_vdd_cdev {
 				trip = <&aoss0_trip>;
-				cooling-device = <&CPU6 9 9>;
+				cooling-device = <&CPU6 (THERMAL_MAX_LIMIT-8)
+							(THERMAL_MAX_LIMIT-8)>;
 			};
 			gpu_vdd_cdev {
 				trip = <&aoss0_trip>;
-				cooling-device = <&msm_gpu 1 1>;
+				cooling-device = <&msm_gpu 4 4>;
 			};
 			cx_vdd_cdev {
 				trip = <&aoss0_trip>;
@@ -511,15 +512,16 @@
 		cooling-maps {
 			cpu0_vdd_cdev {
 				trip = <&cpu0_trip>;
-				cooling-device = <&CPU0 4 4>;
+				cooling-device = <&CPU0 2 2>;
 			};
 			cpu6_vdd_cdev {
 				trip = <&cpu0_trip>;
-				cooling-device = <&CPU6 9 9>;
+				cooling-device = <&CPU6 (THERMAL_MAX_LIMIT-8)
+							(THERMAL_MAX_LIMIT-8)>;
 			};
 			gpu_vdd_cdev {
 				trip = <&cpu0_trip>;
-				cooling-device = <&msm_gpu 1 1>;
+				cooling-device = <&msm_gpu 4 4>;
 			};
 			cx_vdd_cdev {
 				trip = <&cpu0_trip>;
@@ -560,15 +562,16 @@
 		cooling-maps {
 			cpu0_vdd_cdev {
 				trip = <&cpu1_trip>;
-				cooling-device = <&CPU0 4 4>;
+				cooling-device = <&CPU0 2 2>;
 			};
 			cpu6_vdd_cdev {
 				trip = <&cpu1_trip>;
-				cooling-device = <&CPU6 9 9>;
+				cooling-device = <&CPU6 (THERMAL_MAX_LIMIT-8)
+							(THERMAL_MAX_LIMIT-8)>;
 			};
 			gpu_vdd_cdev {
 				trip = <&cpu1_trip>;
-				cooling-device = <&msm_gpu 1 1>;
+				cooling-device = <&msm_gpu 4 4>;
 			};
 			cx_vdd_cdev {
 				trip = <&cpu1_trip>;
@@ -609,15 +612,16 @@
 		cooling-maps {
 			cpu0_vdd_cdev {
 				trip = <&cpu2_trip>;
-				cooling-device = <&CPU0 4 4>;
+				cooling-device = <&CPU0 2 2>;
 			};
 			cpu6_vdd_cdev {
 				trip = <&cpu2_trip>;
-				cooling-device = <&CPU6 9 9>;
+				cooling-device = <&CPU6 (THERMAL_MAX_LIMIT-8)
+							(THERMAL_MAX_LIMIT-8)>;
 			};
 			gpu_vdd_cdev {
 				trip = <&cpu2_trip>;
-				cooling-device = <&msm_gpu 1 1>;
+				cooling-device = <&msm_gpu 4 4>;
 			};
 			cx_vdd_cdev {
 				trip = <&cpu2_trip>;
@@ -658,15 +662,16 @@
 		cooling-maps {
 			cpu0_vdd_cdev {
 				trip = <&cpu3_trip>;
-				cooling-device = <&CPU0 4 4>;
+				cooling-device = <&CPU0 2 2>;
 			};
 			cpu6_vdd_cdev {
 				trip = <&cpu3_trip>;
-				cooling-device = <&CPU6 9 9>;
+				cooling-device = <&CPU6 (THERMAL_MAX_LIMIT-8)
+							(THERMAL_MAX_LIMIT-8)>;
 			};
 			gpu_vdd_cdev {
 				trip = <&cpu3_trip>;
-				cooling-device = <&msm_gpu 1 1>;
+				cooling-device = <&msm_gpu 4 4>;
 			};
 			cx_vdd_cdev {
 				trip = <&cpu3_trip>;
@@ -691,13 +696,113 @@
 		};
 	};
 
-	cpu4-silver-lowf {
+	cpuss-0-lowf {
 		polling-delay-passive = <0>;
 		polling-delay = <0>;
 		thermal-governor = "low_limits_floor";
 		thermal-sensors = <&tsens0 5>;
 		tracks-low;
 		trips {
+			l3_0_trip: l3-0-trip {
+				temperature = <5000>;
+				hysteresis = <5000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu0_vdd_cdev {
+				trip = <&l3_0_trip>;
+				cooling-device = <&CPU0 2 2>;
+			};
+			cpu6_vdd_cdev {
+				trip = <&l3_0_trip>;
+				cooling-device = <&CPU6 (THERMAL_MAX_LIMIT-8)
+							(THERMAL_MAX_LIMIT-8)>;
+			};
+			gpu_vdd_cdev {
+				trip = <&l3_0_trip>;
+				cooling-device = <&msm_gpu 4 4>;
+			};
+			cx_vdd_cdev {
+				trip = <&l3_0_trip>;
+				cooling-device = <&cx_cdev 0 0>;
+			};
+			mx_vdd_cdev {
+				trip = <&l3_0_trip>;
+				cooling-device = <&mx_cdev 0 0>;
+			};
+			modem_vdd_cdev {
+				trip = <&l3_0_trip>;
+				cooling-device = <&modem_vdd 0 0>;
+			};
+			adsp_vdd_cdev {
+				trip = <&l3_0_trip>;
+				cooling-device = <&adsp_vdd 0 0>;
+			};
+			cdsp_vdd_cdev {
+				trip = <&l3_0_trip>;
+				cooling-device = <&cdsp_vdd 0 0>;
+			};
+		};
+	};
+
+	cpuss-1-lowf {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "low_limits_floor";
+		thermal-sensors = <&tsens0 6>;
+		tracks-low;
+		trips {
+			l3_1_trip: l3-1-trip {
+				temperature = <5000>;
+				hysteresis = <5000>;
+				type = "passive";
+			};
+		};
+		cooling-maps {
+			cpu0_vdd_cdev {
+				trip = <&l3_1_trip>;
+				cooling-device = <&CPU0 2 2>;
+			};
+			cpu6_vdd_cdev {
+				trip = <&l3_1_trip>;
+				cooling-device = <&CPU6 (THERMAL_MAX_LIMIT-8)
+							(THERMAL_MAX_LIMIT-8)>;
+			};
+			gpu_vdd_cdev {
+				trip = <&l3_1_trip>;
+				cooling-device = <&msm_gpu 4 4>;
+			};
+			cx_vdd_cdev {
+				trip = <&l3_1_trip>;
+				cooling-device = <&cx_cdev 0 0>;
+			};
+			mx_vdd_cdev {
+				trip = <&l3_1_trip>;
+				cooling-device = <&mx_cdev 0 0>;
+			};
+			modem_vdd_cdev {
+				trip = <&l3_1_trip>;
+				cooling-device = <&modem_vdd 0 0>;
+			};
+			adsp_vdd_cdev {
+				trip = <&l3_1_trip>;
+				cooling-device = <&adsp_vdd 0 0>;
+			};
+			cdsp_vdd_cdev {
+				trip = <&l3_1_trip>;
+				cooling-device = <&cdsp_vdd 0 0>;
+			};
+		};
+	};
+
+	cpu4-silver-lowf {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-governor = "low_limits_floor";
+		thermal-sensors = <&tsens0 7>;
+		tracks-low;
+		trips {
 			cpu4_trip: cpu4-trip {
 				temperature = <5000>;
 				hysteresis = <5000>;
@@ -707,15 +812,16 @@
 		cooling-maps {
 			cpu0_vdd_cdev {
 				trip = <&cpu4_trip>;
-				cooling-device = <&CPU0 4 4>;
+				cooling-device = <&CPU0 2 2>;
 			};
 			cpu6_vdd_cdev {
 				trip = <&cpu4_trip>;
-				cooling-device = <&CPU6 9 9>;
+				cooling-device = <&CPU6 (THERMAL_MAX_LIMIT-8)
+							(THERMAL_MAX_LIMIT-8)>;
 			};
 			gpu_vdd_cdev {
 				trip = <&cpu4_trip>;
-				cooling-device = <&msm_gpu 1 1>;
+				cooling-device = <&msm_gpu 4 4>;
 			};
 			cx_vdd_cdev {
 				trip = <&cpu4_trip>;
@@ -744,7 +850,7 @@
 		polling-delay-passive = <0>;
 		polling-delay = <0>;
 		thermal-governor = "low_limits_floor";
-		thermal-sensors = <&tsens0 6>;
+		thermal-sensors = <&tsens0 8>;
 		tracks-low;
 		trips {
 			cpu5_trip: cpu5-trip {
@@ -756,15 +862,16 @@
 		cooling-maps {
 			cpu0_vdd_cdev {
 				trip = <&cpu5_trip>;
-				cooling-device = <&CPU0 4 4>;
+				cooling-device = <&CPU0 2 2>;
 			};
 			cpu6_vdd_cdev {
 				trip = <&cpu5_trip>;
-				cooling-device = <&CPU6 9 9>;
+				cooling-device = <&CPU6 (THERMAL_MAX_LIMIT-8)
+							(THERMAL_MAX_LIMIT-8)>;
 			};
 			gpu_vdd_cdev {
 				trip = <&cpu5_trip>;
-				cooling-device = <&msm_gpu 1 1>;
+				cooling-device = <&msm_gpu 4 4>;
 			};
 			cx_vdd_cdev {
 				trip = <&cpu5_trip>;
@@ -789,104 +896,6 @@
 		};
 	};
 
-	kryo-l3-0-lowf {
-		polling-delay-passive = <0>;
-		polling-delay = <0>;
-		thermal-governor = "low_limits_floor";
-		thermal-sensors = <&tsens0 7>;
-		tracks-low;
-		trips {
-			l3_0_trip: l3-0-trip {
-				temperature = <5000>;
-				hysteresis = <5000>;
-				type = "passive";
-			};
-		};
-		cooling-maps {
-			cpu0_vdd_cdev {
-				trip = <&l3_0_trip>;
-				cooling-device = <&CPU0 4 4>;
-			};
-			cpu6_vdd_cdev {
-				trip = <&l3_0_trip>;
-				cooling-device = <&CPU6 9 9>;
-			};
-			gpu_vdd_cdev {
-				trip = <&l3_0_trip>;
-				cooling-device = <&msm_gpu 1 1>;
-			};
-			cx_vdd_cdev {
-				trip = <&l3_0_trip>;
-				cooling-device = <&cx_cdev 0 0>;
-			};
-			mx_vdd_cdev {
-				trip = <&l3_0_trip>;
-				cooling-device = <&mx_cdev 0 0>;
-			};
-			modem_vdd_cdev {
-				trip = <&l3_0_trip>;
-				cooling-device = <&modem_vdd 0 0>;
-			};
-			adsp_vdd_cdev {
-				trip = <&l3_0_trip>;
-				cooling-device = <&adsp_vdd 0 0>;
-			};
-			cdsp_vdd_cdev {
-				trip = <&l3_0_trip>;
-				cooling-device = <&cdsp_vdd 0 0>;
-			};
-		};
-	};
-
-	kryo-l3-1-lowf {
-		polling-delay-passive = <0>;
-		polling-delay = <0>;
-		thermal-governor = "low_limits_floor";
-		thermal-sensors = <&tsens0 8>;
-		tracks-low;
-		trips {
-			l3_1_trip: l3-1-trip {
-				temperature = <5000>;
-				hysteresis = <5000>;
-				type = "passive";
-			};
-		};
-		cooling-maps {
-			cpu0_vdd_cdev {
-				trip = <&l3_1_trip>;
-				cooling-device = <&CPU0 4 4>;
-			};
-			cpu6_vdd_cdev {
-				trip = <&l3_1_trip>;
-				cooling-device = <&CPU6 9 9>;
-			};
-			gpu_vdd_cdev {
-				trip = <&l3_1_trip>;
-				cooling-device = <&msm_gpu 1 1>;
-			};
-			cx_vdd_cdev {
-				trip = <&l3_1_trip>;
-				cooling-device = <&cx_cdev 0 0>;
-			};
-			mx_vdd_cdev {
-				trip = <&l3_1_trip>;
-				cooling-device = <&mx_cdev 0 0>;
-			};
-			modem_vdd_cdev {
-				trip = <&l3_1_trip>;
-				cooling-device = <&modem_vdd 0 0>;
-			};
-			adsp_vdd_cdev {
-				trip = <&l3_1_trip>;
-				cooling-device = <&adsp_vdd 0 0>;
-			};
-			cdsp_vdd_cdev {
-				trip = <&l3_1_trip>;
-				cooling-device = <&cdsp_vdd 0 0>;
-			};
-		};
-	};
-
 	cpu0-gold-lowf {
 		polling-delay-passive = <0>;
 		polling-delay = <0>;
@@ -903,15 +912,16 @@
 		cooling-maps {
 			cpu0_vdd_cdev {
 				trip = <&cpug0_trip>;
-				cooling-device = <&CPU0 4 4>;
+				cooling-device = <&CPU0 2 2>;
 			};
 			cpu6_vdd_cdev {
 				trip = <&cpug0_trip>;
-				cooling-device = <&CPU6 9 9>;
+				cooling-device = <&CPU6 (THERMAL_MAX_LIMIT-8)
+							(THERMAL_MAX_LIMIT-8)>;
 			};
 			gpu_vdd_cdev {
 				trip = <&cpug0_trip>;
-				cooling-device = <&msm_gpu 1 1>;
+				cooling-device = <&msm_gpu 4 4>;
 			};
 			cx_vdd_cdev {
 				trip = <&cpug0_trip>;
@@ -952,15 +962,16 @@
 		cooling-maps {
 			cpu0_vdd_cdev {
 				trip = <&cpug1_trip>;
-				cooling-device = <&CPU0 4 4>;
+				cooling-device = <&CPU0 2 2>;
 			};
 			cpu6_vdd_cdev {
 				trip = <&cpug1_trip>;
-				cooling-device = <&CPU6 9 9>;
+				cooling-device = <&CPU6 (THERMAL_MAX_LIMIT-8)
+							(THERMAL_MAX_LIMIT-8)>;
 			};
 			gpu_vdd_cdev {
 				trip = <&cpug1_trip>;
-				cooling-device = <&msm_gpu 1 1>;
+				cooling-device = <&msm_gpu 4 4>;
 			};
 			cx_vdd_cdev {
 				trip = <&cpug1_trip>;
@@ -1001,15 +1012,16 @@
 		cooling-maps {
 			cpu0_vdd_cdev {
 				trip = <&gpu0_trip_l>;
-				cooling-device = <&CPU0 4 4>;
+				cooling-device = <&CPU0 2 2>;
 			};
 			cpu6_vdd_cdev {
 				trip = <&gpu0_trip_l>;
-				cooling-device = <&CPU6 9 9>;
+				cooling-device = <&CPU6 (THERMAL_MAX_LIMIT-8)
+							(THERMAL_MAX_LIMIT-8)>;
 			};
 			gpu_vdd_cdev {
 				trip = <&gpu0_trip_l>;
-				cooling-device = <&msm_gpu 1 1>;
+				cooling-device = <&msm_gpu 4 4>;
 			};
 			cx_vdd_cdev {
 				trip = <&gpu0_trip_l>;
@@ -1050,15 +1062,16 @@
 		cooling-maps {
 			cpu0_vdd_cdev {
 				trip = <&gpu1_trip_l>;
-				cooling-device = <&CPU0 4 4>;
+				cooling-device = <&CPU0 2 2>;
 			};
 			cpu6_vdd_cdev {
 				trip = <&gpu1_trip_l>;
-				cooling-device = <&CPU6 9 9>;
+				cooling-device = <&CPU6 (THERMAL_MAX_LIMIT-8)
+							(THERMAL_MAX_LIMIT-8)>;
 			};
 			gpu_vdd_cdev {
 				trip = <&gpu1_trip_l>;
-				cooling-device = <&msm_gpu 1 1>;
+				cooling-device = <&msm_gpu 4 4>;
 			};
 			cx_vdd_cdev {
 				trip = <&gpu1_trip_l>;
@@ -1099,15 +1112,16 @@
 		cooling-maps {
 			cpu0_vdd_cdev {
 				trip = <&aoss1_trip>;
-				cooling-device = <&CPU0 4 4>;
+				cooling-device = <&CPU0 2 2>;
 			};
 			cpu6_vdd_cdev {
 				trip = <&aoss1_trip>;
-				cooling-device = <&CPU6 9 9>;
+				cooling-device = <&CPU6 (THERMAL_MAX_LIMIT-8)
+							(THERMAL_MAX_LIMIT-8)>;
 			};
 			gpu_vdd_cdev {
 				trip = <&aoss1_trip>;
-				cooling-device = <&msm_gpu 1 1>;
+				cooling-device = <&msm_gpu 4 4>;
 			};
 			cx_vdd_cdev {
 				trip = <&aoss1_trip>;
@@ -1148,15 +1162,16 @@
 		cooling-maps {
 			cpu0_vdd_cdev {
 				trip = <&dsp_trip>;
-				cooling-device = <&CPU0 4 4>;
+				cooling-device = <&CPU0 2 2>;
 			};
 			cpu6_vdd_cdev {
 				trip = <&dsp_trip>;
-				cooling-device = <&CPU6 9 9>;
+				cooling-device = <&CPU6 (THERMAL_MAX_LIMIT-8)
+							(THERMAL_MAX_LIMIT-8)>;
 			};
 			gpu_vdd_cdev {
 				trip = <&dsp_trip>;
-				cooling-device = <&msm_gpu 1 1>;
+				cooling-device = <&msm_gpu 4 4>;
 			};
 			cx_vdd_cdev {
 				trip = <&dsp_trip>;
@@ -1197,15 +1212,16 @@
 		cooling-maps {
 			cpu0_vdd_cdev {
 				trip = <&ddr_trip>;
-				cooling-device = <&CPU0 4 4>;
+				cooling-device = <&CPU0 2 2>;
 			};
 			cpu6_vdd_cdev {
 				trip = <&ddr_trip>;
-				cooling-device = <&CPU6 9 9>;
+				cooling-device = <&CPU6 (THERMAL_MAX_LIMIT-8)
+							(THERMAL_MAX_LIMIT-8)>;
 			};
 			gpu_vdd_cdev {
 				trip = <&ddr_trip>;
-				cooling-device = <&msm_gpu 1 1>;
+				cooling-device = <&msm_gpu 4 4>;
 			};
 			cx_vdd_cdev {
 				trip = <&ddr_trip>;
@@ -1246,15 +1262,16 @@
 		cooling-maps {
 			cpu0_vdd_cdev {
 				trip = <&wlan_trip>;
-				cooling-device = <&CPU0 4 4>;
+				cooling-device = <&CPU0 2 2>;
 			};
 			cpu6_vdd_cdev {
 				trip = <&wlan_trip>;
-				cooling-device = <&CPU6 9 9>;
+				cooling-device = <&CPU6 (THERMAL_MAX_LIMIT-8)
+							(THERMAL_MAX_LIMIT-8)>;
 			};
 			gpu_vdd_cdev {
 				trip = <&wlan_trip>;
-				cooling-device = <&msm_gpu 1 1>;
+				cooling-device = <&msm_gpu 4 4>;
 			};
 			cx_vdd_cdev {
 				trip = <&wlan_trip>;
@@ -1295,15 +1312,16 @@
 		cooling-maps {
 			cpu0_vdd_cdev {
 				trip = <&hvx_trip>;
-				cooling-device = <&CPU0 4 4>;
+				cooling-device = <&CPU0 2 2>;
 			};
 			cpu6_vdd_cdev {
 				trip = <&hvx_trip>;
-				cooling-device = <&CPU6 9 9>;
+				cooling-device = <&CPU6 (THERMAL_MAX_LIMIT-8)
+							(THERMAL_MAX_LIMIT-8)>;
 			};
 			gpu_vdd_cdev {
 				trip = <&hvx_trip>;
-				cooling-device = <&msm_gpu 1 1>;
+				cooling-device = <&msm_gpu 4 4>;
 			};
 			cx_vdd_cdev {
 				trip = <&hvx_trip>;
@@ -1344,15 +1362,16 @@
 		cooling-maps {
 			cpu0_vdd_cdev {
 				trip = <&camera_trip>;
-				cooling-device = <&CPU0 4 4>;
+				cooling-device = <&CPU0 2 2>;
 			};
 			cpu6_vdd_cdev {
 				trip = <&camera_trip>;
-				cooling-device = <&CPU6 9 9>;
+				cooling-device = <&CPU6 (THERMAL_MAX_LIMIT-8)
+							(THERMAL_MAX_LIMIT-8)>;
 			};
 			gpu_vdd_cdev {
 				trip = <&camera_trip>;
-				cooling-device = <&msm_gpu 1 1>;
+				cooling-device = <&msm_gpu 4 4>;
 			};
 			cx_vdd_cdev {
 				trip = <&camera_trip>;
@@ -1393,15 +1412,16 @@
 		cooling-maps {
 			cpu0_vdd_cdev {
 				trip = <&mmss_trip>;
-				cooling-device = <&CPU0 4 4>;
+				cooling-device = <&CPU0 2 2>;
 			};
 			cpu6_vdd_cdev {
 				trip = <&mmss_trip>;
-				cooling-device = <&CPU6 9 9>;
+				cooling-device = <&CPU6 (THERMAL_MAX_LIMIT-8)
+							(THERMAL_MAX_LIMIT-8)>;
 			};
 			gpu_vdd_cdev {
 				trip = <&mmss_trip>;
-				cooling-device = <&msm_gpu 1 1>;
+				cooling-device = <&msm_gpu 4 4>;
 			};
 			cx_vdd_cdev {
 				trip = <&mmss_trip>;
@@ -1442,15 +1462,16 @@
 		cooling-maps {
 			cpu0_vdd_cdev {
 				trip = <&mdm_trip>;
-				cooling-device = <&CPU0 4 4>;
+				cooling-device = <&CPU0 2 2>;
 			};
 			cpu6_vdd_cdev {
 				trip = <&mdm_trip>;
-				cooling-device = <&CPU6 9 9>;
+				cooling-device = <&CPU6 (THERMAL_MAX_LIMIT-8)
+							(THERMAL_MAX_LIMIT-8)>;
 			};
 			gpu_vdd_cdev {
 				trip = <&mdm_trip>;
-				cooling-device = <&msm_gpu 1 1>;
+				cooling-device = <&msm_gpu 4 4>;
 			};
 			cx_vdd_cdev {
 				trip = <&mdm_trip>;
@@ -1504,4 +1525,118 @@
 			};
 		};
 	};
+
+	xo-therm-cpu-step {
+		polling-delay-passive = <2000>;
+		polling-delay = <0>;
+		thermal-sensors = <&pm660_adc_tm 0x4c>;
+		thermal-governor = "step_wise";
+
+		trips {
+			gold_trip0: gold-trip0 {
+				temperature = <45000>;
+				hysteresis = <0>;
+				type = "passive";
+			};
+			silver_trip1: silver-trip1 {
+				temperature = <48000>;
+				hysteresis = <0>;
+				type = "passive";
+			};
+		};
+
+		cooling-maps {
+			skin_cpu6 {
+				trip = <&gold_trip0>;
+				cooling-device =
+					/* throttle from fmax to 1747200KHz */
+					<&CPU6 THERMAL_NO_LIMIT
+						(THERMAL_MAX_LIMIT-8)>;
+			};
+			skin_cpu7 {
+				trip = <&gold_trip0>;
+				cooling-device =
+					<&CPU7 THERMAL_NO_LIMIT
+						(THERMAL_MAX_LIMIT-8)>;
+			};
+			skin_cpu0 {
+				trip = <&silver_trip1>;
+				/* throttle from fmax to 1516800KHz */
+				cooling-device = <&CPU0 THERMAL_NO_LIMIT 2>;
+			};
+			skin_cpu1 {
+				trip = <&silver_trip1>;
+				cooling-device = <&CPU1 THERMAL_NO_LIMIT 2>;
+			};
+			skin_cpu2 {
+				trip = <&silver_trip1>;
+				cooling-device = <&CPU2 THERMAL_NO_LIMIT 2>;
+			};
+			skin_cpu3 {
+				trip = <&silver_trip1>;
+				cooling-device = <&CPU3 THERMAL_NO_LIMIT 2>;
+			};
+			skin_cpu4 {
+				trip = <&silver_trip1>;
+				cooling-device = <&CPU4 THERMAL_NO_LIMIT 2>;
+			};
+			skin_cpu5 {
+				trip = <&silver_trip1>;
+				cooling-device = <&CPU5 THERMAL_NO_LIMIT 2>;
+			};
+		};
+	};
+
+	xo-therm-mdm-step {
+		polling-delay-passive = <0>;
+		polling-delay = <0>;
+		thermal-sensors = <&pm660_adc_tm 0x4c>;
+		thermal-governor = "step_wise";
+
+		trips {
+			modem_trip0: modem-trip0 {
+				temperature = <44000>;
+				hysteresis = <4000>;
+				type = "passive";
+			};
+			modem_trip1: modem-trip1 {
+				temperature = <46000>;
+				hysteresis = <3000>;
+				type = "passive";
+			};
+			modem_trip2: modem-trip2 {
+				temperature = <48000>;
+				hysteresis = <2000>;
+				type = "passive";
+			};
+			modem_trip3: modem-trip3 {
+				temperature = <55000>;
+				hysteresis = <5000>;
+				type = "passive";
+			};
+		};
+
+		cooling-maps {
+			modem_lvl1 {
+				trip = <&modem_trip1>;
+				cooling-device = <&modem_pa 1 1>;
+			};
+			modem_lvl2 {
+				trip = <&modem_trip2>;
+				cooling-device = <&modem_pa 2 2>;
+			};
+			modem_lvl3 {
+				trip = <&modem_trip3>;
+				cooling-device = <&modem_pa 3 3>;
+			};
+			modem_proc_lvl1 {
+				trip = <&modem_trip0>;
+				cooling-device = <&modem_proc 1 1>;
+			};
+			modem_proc_lvl3 {
+				trip = <&modem_trip3>;
+				cooling-device = <&modem_proc 3 3>;
+			};
+		};
+	};
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm670-usb.dtsi b/arch/arm64/boot/dts/qcom/sdm670-usb.dtsi
index 6a69e29..84c7459 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-usb.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-usb.dtsi
@@ -11,7 +11,7 @@
  * GNU General Public License for more details.
  */
 
-#include "sdm845-usb.dtsi"
+#include "sdm845-670-usb-common.dtsi"
 
 &soc {
 	/delete-node/ ssusb@a800000;
@@ -29,7 +29,7 @@
 &usb0 {
 	/delete-property/ iommus;
 	/delete-property/ qcom,smmu-s1-bypass;
-	extcon = <0>, <0>, <0> /* <&eud> */;
+	extcon = <0>, <0>, <&eud>, <0>, <0>;
 };
 
 &qusb_phy0 {
diff --git a/arch/arm64/boot/dts/qcom/sdm670.dtsi b/arch/arm64/boot/dts/qcom/sdm670.dtsi
index a3b8c78..771e599 100644
--- a/arch/arm64/boot/dts/qcom/sdm670.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670.dtsi
@@ -2220,6 +2220,8 @@
 
 		qcom,clk-rates = <400000 20000000 25000000 50000000 100000000
 						192000000 384000000>;
+		qcom,bus-aggr-clk-rates = <50000000 50000000 50000000 50000000
+				100000000 200000000 200000000>;
 		qcom,bus-speed-mode = "HS400_1p8v", "HS200_1p8v", "DDR_1p8v";
 
 		qcom,devfreq,freq-table = <50000000 200000000>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi b/arch/arm64/boot/dts/qcom/sdm845-670-usb-common.dtsi
similarity index 99%
rename from arch/arm64/boot/dts/qcom/sdm845-usb.dtsi
rename to arch/arm64/boot/dts/qcom/sdm845-670-usb-common.dtsi
index b9eabcf..f6fa948 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-670-usb-common.dtsi
@@ -36,7 +36,7 @@
 		qcom,dwc-usb3-msm-tx-fifo-size = <21288>;
 		qcom,num-gsi-evt-buffs = <0x3>;
 		qcom,use-pdc-interrupts;
-		extcon = <0>, <0>, <&eud>;
+		extcon = <0>, <0>, <&eud>, <0>, <0>;
 
 		clocks = <&clock_gcc GCC_USB30_PRIM_MASTER_CLK>,
 			 <&clock_gcc GCC_CFG_NOC_USB3_PRIM_AXI_CLK>,
diff --git a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-cdp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-cdp.dtsi
index d8a6dc3..31cfdd6 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-cdp.dtsi
@@ -27,7 +27,7 @@
 		reg = <0x01 0x00>;
 		compatible = "qcom,camera-flash";
 		flash-source = <&pmi8998_flash0 &pmi8998_flash1>;
-		torch-source = <&pmi8998_torch0 &pmi8998_torch1 >;
+		torch-source = <&pmi8998_torch0 &pmi8998_torch1>;
 		switch-source = <&pmi8998_switch0>;
 		status = "ok";
 	};
@@ -42,6 +42,16 @@
 		status = "ok";
 	};
 
+	led_flash_iris: qcom,camera-flash@3 {
+		cell-index = <3>;
+		reg = <0x03 0x00>;
+		compatible = "qcom,camera-flash";
+		flash-source = <&pmi8998_flash2>;
+		torch-source = <&pmi8998_torch2>;
+		switch-source = <&pmi8998_switch2>;
+		status = "ok";
+	};
+
 	actuator_regulator: gpio-regulator@0 {
 		compatible = "regulator-fixed";
 		reg = <0x00 0x00>;
@@ -412,6 +422,7 @@
 		sensor-position-roll = <270>;
 		sensor-position-pitch = <0>;
 		sensor-position-yaw = <0>;
+		led-flash-src = <&led_flash_iris>;
 		cam_vio-supply = <&pm8998_lvs1>;
 		cam_vana-supply = <&pmi8998_bob>;
 		cam_vdig-supply = <&camera_ldo>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-mtp.dtsi
index 952ba29..d7f25977 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-mtp.dtsi
@@ -42,6 +42,16 @@
 		status = "ok";
 	};
 
+	led_flash_iris: qcom,camera-flash@3 {
+		cell-index = <3>;
+		reg = <0x03 0x00>;
+		compatible = "qcom,camera-flash";
+		flash-source = <&pmi8998_flash2>;
+		torch-source = <&pmi8998_torch2>;
+		switch-source = <&pmi8998_switch2>;
+		status = "ok";
+	};
+
 	actuator_regulator: gpio-regulator@0 {
 		compatible = "regulator-fixed";
 		reg = <0x00 0x00>;
@@ -403,6 +413,7 @@
 		clock-cntl-level = "turbo";
 		clock-rates = <24000000>;
 	};
+
 	qcom,cam-sensor@3 {
 		cell-index = <3>;
 		compatible = "qcom,cam-sensor";
@@ -411,6 +422,7 @@
 		sensor-position-roll = <270>;
 		sensor-position-pitch = <0>;
 		sensor-position-yaw = <0>;
+		led-flash-src = <&led_flash_iris>;
 		cam_vio-supply = <&pm8998_lvs1>;
 		cam_vana-supply = <&pmi8998_bob>;
 		cam_vdig-supply = <&camera_ldo>;
@@ -445,5 +457,4 @@
 		clock-cntl-level = "turbo";
 		clock-rates = <24000000>;
 	};
-
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845-interposer-pm660.dtsi b/arch/arm64/boot/dts/qcom/sdm845-interposer-pm660.dtsi
index 10efa20..c16e1d8 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-interposer-pm660.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-interposer-pm660.dtsi
@@ -172,6 +172,12 @@
 	/delete-property/ switch-source;
 };
 
+&led_flash_iris {
+	/delete-property/ flash-source;
+	/delete-property/ torch-source;
+	/delete-property/ switch-source;
+};
+
 &actuator_regulator {
 	/delete-property/ vin-supply;
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
index d01149b..825f121 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
@@ -263,6 +263,12 @@
 	pinctrl-1 = <&flash_led3_front_dis>;
 };
 
+&pmi8998_switch2 {
+	pinctrl-names = "led_enable", "led_disable";
+	pinctrl-0 = <&flash_led3_iris_en>;
+	pinctrl-1 = <&flash_led3_iris_dis>;
+};
+
 &vendor {
 	mtp_batterydata: qcom,battery-data {
 		qcom,batt-id-range-pct = <15>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
index 5035c9f..244ac1d 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
@@ -97,6 +97,37 @@
 			};
 		};
 
+		flash_led3_iris {
+			flash_led3_iris_en: flash_led3_iris_en {
+				mux {
+					pins = "gpio23";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio23";
+					drive_strength = <2>;
+					output-high;
+					bias-disable;
+				};
+			};
+
+			flash_led3_iris_dis: flash_led3_iris_dis {
+				mux {
+					pins = "gpio23";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio23";
+					drive_strength = <2>;
+					output-low;
+					bias-disable;
+				};
+			};
+		};
+
+
 		wcd9xxx_intr {
 			wcd_intr_default: wcd_intr_default{
 				mux {
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
index 97904e3..5b050c5 100644
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -4052,7 +4052,7 @@
 #include "sdm845-pcie.dtsi"
 #include "sdm845-audio.dtsi"
 #include "sdm845-gpu.dtsi"
-#include "sdm845-usb.dtsi"
+#include "sdm845-670-usb-common.dtsi"
 
 &pm8998_temp_alarm {
 	cooling-maps {
diff --git a/arch/arm64/configs/msm8953-perf_defconfig b/arch/arm64/configs/msm8953-perf_defconfig
index 75702c5..db96773 100644
--- a/arch/arm64/configs/msm8953-perf_defconfig
+++ b/arch/arm64/configs/msm8953-perf_defconfig
@@ -284,8 +284,10 @@
 # CONFIG_LEGACY_PTYS is not set
 CONFIG_SERIAL_MSM=y
 CONFIG_SERIAL_MSM_CONSOLE=y
+CONFIG_SERIAL_MSM_SMD=y
 CONFIG_HW_RANDOM=y
 CONFIG_HW_RANDOM_MSM_LEGACY=y
+CONFIG_MSM_SMD_PKT=y
 CONFIG_MSM_RDBG=m
 CONFIG_I2C_CHARDEV=y
 CONFIG_SPI=y
@@ -351,6 +353,10 @@
 CONFIG_USB_GADGET_DEBUG_FILES=y
 CONFIG_USB_GADGET_DEBUG_FS=y
 CONFIG_USB_GADGET_VBUS_DRAW=500
+CONFIG_USB_CONFIGFS=y
+CONFIG_USB_CONFIGFS_F_FS=y
+CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_F_DIAG=y
 CONFIG_MMC=y
 CONFIG_MMC_PERF_PROFILING=y
 CONFIG_MMC_PARANOID_SD_INIT=y
@@ -397,10 +403,14 @@
 CONFIG_QCOM_EUD=y
 CONFIG_QCOM_WATCHDOG_V2=y
 CONFIG_QCOM_MEMORY_DUMP_V2=y
+CONFIG_MSM_RPM_SMD=y
 CONFIG_QCOM_SECURE_BUFFER=y
 CONFIG_QCOM_EARLY_RANDOM=y
 CONFIG_MSM_SMEM=y
+CONFIG_MSM_SMD=y
+CONFIG_MSM_SMD_DEBUG=y
 CONFIG_MSM_SMP2P=y
+CONFIG_MSM_IPC_ROUTER_SMD_XPRT=y
 CONFIG_MSM_QMI_INTERFACE=y
 CONFIG_MSM_SUBSYSTEM_RESTART=y
 CONFIG_MSM_PIL=y
diff --git a/arch/arm64/configs/msm8953_defconfig b/arch/arm64/configs/msm8953_defconfig
index 789d403..d893fd0 100644
--- a/arch/arm64/configs/msm8953_defconfig
+++ b/arch/arm64/configs/msm8953_defconfig
@@ -294,8 +294,10 @@
 # CONFIG_LEGACY_PTYS is not set
 CONFIG_SERIAL_MSM=y
 CONFIG_SERIAL_MSM_CONSOLE=y
+CONFIG_SERIAL_MSM_SMD=y
 CONFIG_HW_RANDOM=y
 CONFIG_HW_RANDOM_MSM_LEGACY=y
+CONFIG_MSM_SMD_PKT=y
 CONFIG_MSM_RDBG=m
 CONFIG_I2C_CHARDEV=y
 CONFIG_SPI=y
@@ -362,6 +364,10 @@
 CONFIG_USB_GADGET_DEBUG_FILES=y
 CONFIG_USB_GADGET_DEBUG_FS=y
 CONFIG_USB_GADGET_VBUS_DRAW=500
+CONFIG_USB_CONFIGFS=y
+CONFIG_USB_CONFIGFS_F_FS=y
+CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_F_DIAG=y
 CONFIG_MMC=y
 CONFIG_MMC_PERF_PROFILING=y
 CONFIG_MMC_RING_BUFFER=y
@@ -413,11 +419,15 @@
 CONFIG_QCOM_EUD=y
 CONFIG_QCOM_WATCHDOG_V2=y
 CONFIG_QCOM_MEMORY_DUMP_V2=y
+CONFIG_MSM_RPM_SMD=y
 CONFIG_QCOM_SECURE_BUFFER=y
 CONFIG_QCOM_EARLY_RANDOM=y
 CONFIG_MSM_SMEM=y
+CONFIG_MSM_SMD=y
+CONFIG_MSM_SMD_DEBUG=y
 CONFIG_TRACER_PKT=y
 CONFIG_MSM_SMP2P=y
+CONFIG_MSM_IPC_ROUTER_SMD_XPRT=y
 CONFIG_MSM_QMI_INTERFACE=y
 CONFIG_MSM_SUBSYSTEM_RESTART=y
 CONFIG_MSM_PIL=y
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index f3a142e..8fe5ffc 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -25,6 +25,7 @@
 #include <linux/const.h>
 #include <linux/types.h>
 #include <asm/bug.h>
+#include <asm/page.h>
 #include <asm/sizes.h>
 
 /*
@@ -92,15 +93,26 @@
 #define KERNEL_END        _end
 
 /*
- * The size of the KASAN shadow region. This should be 1/8th of the
- * size of the entire kernel virtual address space.
+ * KASAN requires 1/8th of the kernel virtual address space for the shadow
+ * region. KASAN can bloat the stack significantly, so double the (minimum)
+ * stack size when KASAN is in use.
  */
 #ifdef CONFIG_KASAN
 #define KASAN_SHADOW_SIZE	(UL(1) << (VA_BITS - 3))
+#define KASAN_THREAD_SHIFT	1
 #else
 #define KASAN_SHADOW_SIZE	(0)
+#define KASAN_THREAD_SHIFT	0
 #endif
 
+#define THREAD_SHIFT	(14 + KASAN_THREAD_SHIFT)
+
+#if THREAD_SHIFT >= PAGE_SHIFT
+#define THREAD_SIZE_ORDER	(THREAD_SHIFT - PAGE_SHIFT)
+#endif
+
+#define THREAD_SIZE		(UL(1) << THREAD_SHIFT)
+
 /*
  * Physical vs virtual RAM address space conversion.  These are
  * private definitions which should NOT be used outside memory.h
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
index ebd18b7..ba3a69a 100644
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -23,19 +23,13 @@
 
 #include <linux/compiler.h>
 
-#ifdef CONFIG_ARM64_4K_PAGES
-#define THREAD_SIZE_ORDER	2
-#elif defined(CONFIG_ARM64_16K_PAGES)
-#define THREAD_SIZE_ORDER	0
-#endif
-
-#define THREAD_SIZE		16384
 #define THREAD_START_SP		(THREAD_SIZE - 16)
 
 #ifndef __ASSEMBLY__
 
 struct task_struct;
 
+#include <asm/memory.h>
 #include <asm/stack_pointer.h>
 #include <asm/types.h>
 
diff --git a/drivers/char/diag/diag_masks.c b/drivers/char/diag/diag_masks.c
index 8e5d836..f510c14 100644
--- a/drivers/char/diag/diag_masks.c
+++ b/drivers/char/diag/diag_masks.c
@@ -2133,6 +2133,14 @@
 				driver->real_time_mode[DIAG_LOCAL_PROC]);
 		diag_send_peripheral_buffering_mode(
 					&driver->buffering_mode[peripheral]);
+
+		/*
+		 * Clear mask_update variable afer updating
+		 * logging masks to peripheral.
+		 */
+		mutex_lock(&driver->cntl_lock);
+		driver->mask_update ^= PERIPHERAL_MASK(peripheral);
+		mutex_unlock(&driver->cntl_lock);
 	}
 }
 
diff --git a/drivers/char/diag/diagfwd_cntl.c b/drivers/char/diag/diagfwd_cntl.c
index eaca17a..162d53f 100644
--- a/drivers/char/diag/diagfwd_cntl.c
+++ b/drivers/char/diag/diagfwd_cntl.c
@@ -39,9 +39,6 @@
 	for (peripheral = 0; peripheral <= NUM_PERIPHERALS; peripheral++) {
 		if (!(driver->mask_update & PERIPHERAL_MASK(peripheral)))
 			continue;
-		mutex_lock(&driver->cntl_lock);
-		driver->mask_update ^= PERIPHERAL_MASK(peripheral);
-		mutex_unlock(&driver->cntl_lock);
 		diag_send_updates_peripheral(peripheral);
 	}
 }
@@ -834,7 +831,7 @@
 		 */
 		if (root_str) {
 			driver->diag_id_sent[peripheral] = 1;
-			diag_send_updates_peripheral(peripheral);
+			queue_work(driver->cntl_wq, &driver->mask_update_work);
 		}
 		fwd_info = &peripheral_info[TYPE_DATA][peripheral];
 		diagfwd_buffers_init(fwd_info);
diff --git a/drivers/cpuidle/lpm-levels.c b/drivers/cpuidle/lpm-levels.c
index 03fce64..0bff951 100644
--- a/drivers/cpuidle/lpm-levels.c
+++ b/drivers/cpuidle/lpm-levels.c
@@ -642,7 +642,7 @@
 				next_wakeup_us = next_event_us - lvl_latency_us;
 		}
 
-		if (!i) {
+		if (!i && !cpu_isolated(dev->cpu)) {
 			/*
 			 * If the next_wake_us itself is not sufficient for
 			 * deeper low power modes than clock gating do not
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index 884dd68..51cc57b 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -83,6 +83,7 @@
 	struct dp_usbpd_cb usbpd_cb;
 	struct dp_display_mode mode;
 	struct dp_display dp_display;
+	struct msm_drm_private *priv;
 
 	struct workqueue_struct *wq;
 	struct delayed_work hdcp_cb_work;
@@ -300,8 +301,13 @@
 	pr_debug("HDCP 1.3 initialized\n");
 
 	dp->hdcp.hdcp2 = sde_dp_hdcp2p2_init(&hdcp_init_data);
-	if (!IS_ERR_OR_NULL(dp->hdcp.hdcp2))
-		pr_debug("HDCP 2.2 initialized\n");
+	if (IS_ERR_OR_NULL(dp->hdcp.hdcp2)) {
+		pr_err("Error initializing HDCP 2.x\n");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	pr_debug("HDCP 2.2 initialized\n");
 
 	dp->hdcp.feature_enabled = true;
 
@@ -317,7 +323,6 @@
 	int rc = 0;
 	struct dp_display_private *dp;
 	struct drm_device *drm;
-	struct msm_drm_private *priv;
 	struct platform_device *pdev = to_platform_device(dev);
 
 	if (!dev || !pdev || !master) {
@@ -337,25 +342,7 @@
 	}
 
 	dp->dp_display.drm_dev = drm;
-	priv = drm->dev_private;
-
-	rc = dp->aux->drm_aux_register(dp->aux);
-	if (rc) {
-		pr_err("DRM DP AUX register failed\n");
-		goto end;
-	}
-
-	rc = dp->power->power_client_init(dp->power, &priv->phandle);
-	if (rc) {
-		pr_err("Power client create failed\n");
-		goto end;
-	}
-
-	rc = dp_display_initialize_hdcp(dp);
-	if (rc) {
-		pr_err("HDCP initialization failed\n");
-		goto end;
-	}
+	dp->priv = drm->dev_private;
 end:
 	return rc;
 }
@@ -399,32 +386,25 @@
 		(dp->link->sink_count.count == 0);
 }
 
-static void dp_display_send_hpd_event(struct dp_display *dp_display)
+static void dp_display_send_hpd_event(struct dp_display_private *dp)
 {
 	struct drm_device *dev = NULL;
-	struct dp_display_private *dp;
 	struct drm_connector *connector;
 	char name[HPD_STRING_SIZE], status[HPD_STRING_SIZE],
 		bpp[HPD_STRING_SIZE], pattern[HPD_STRING_SIZE];
 	char *envp[5];
 
-	if (!dp_display) {
-		pr_err("invalid input\n");
-		return;
-	}
-
-	dp = container_of(dp_display, struct dp_display_private, dp_display);
-	if (!dp) {
-		pr_err("invalid params\n");
-		return;
-	}
 	connector = dp->dp_display.connector;
-	dev = dp_display->connector->dev;
+
+	if (!connector) {
+		pr_err("connector not set\n");
+		return;
+	}
 
 	connector->status = connector->funcs->detect(connector, false);
-	pr_debug("[%s] status updated to %s\n",
-			      connector->name,
-			      drm_get_connector_status_name(connector->status));
+
+	dev = dp->dp_display.connector->dev;
+
 	snprintf(name, HPD_STRING_SIZE, "name=%s", connector->name);
 	snprintf(status, HPD_STRING_SIZE, "status=%s",
 		drm_get_connector_status_name(connector->status));
@@ -434,8 +414,7 @@
 	snprintf(pattern, HPD_STRING_SIZE, "pattern=%d",
 		dp->link->test_video.test_video_pattern);
 
-	pr_debug("generating hotplug event [%s]:[%s] [%s] [%s]\n",
-		name, status, bpp, pattern);
+	pr_debug("[%s]:[%s] [%s] [%s]\n", name, status, bpp, pattern);
 	envp[0] = name;
 	envp[1] = status;
 	envp[2] = bpp;
@@ -445,12 +424,48 @@
 			envp);
 }
 
+static void dp_display_post_open(struct dp_display *dp_display)
+{
+	struct drm_connector *connector;
+	struct dp_display_private *dp;
+
+	if (!dp_display) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	dp = container_of(dp_display, struct dp_display_private, dp_display);
+	if (IS_ERR_OR_NULL(dp)) {
+		pr_err("invalid params\n");
+		return;
+	}
+
+	connector = dp->dp_display.connector;
+
+	if (!connector) {
+		pr_err("connector not set\n");
+		return;
+	}
+
+	/* if cable is already connected, send notification */
+	if (dp_display->is_connected)
+		dp_display_send_hpd_event(dp);
+	else
+		dp_display->post_open = NULL;
+
+}
+
 static int dp_display_send_hpd_notification(struct dp_display_private *dp,
 		bool hpd)
 {
 	dp->dp_display.is_connected = hpd;
+
+	/* in case, framework is not yet up, don't notify hpd */
+	if (dp->dp_display.post_open)
+		return 0;
+
 	reinit_completion(&dp->notification_comp);
-	dp_display_send_hpd_event(&dp->dp_display);
+	dp_display_send_hpd_event(dp);
 
 	if (!wait_for_completion_timeout(&dp->notification_comp, HZ * 5)) {
 		pr_warn("%s timeout\n", hpd ? "connect" : "disconnect");
@@ -476,6 +491,9 @@
 		dp->debug->psm_enabled = false;
 	}
 
+	if (!dp->dp_display.connector)
+		return 0;
+
 	rc = dp->panel->read_sink_caps(dp->panel, dp->dp_display.connector);
 	if (rc) {
 		if (rc == -ETIMEDOUT) {
@@ -793,18 +811,6 @@
 		.dev = dev,
 	};
 
-	cb->configure  = dp_display_usbpd_configure_cb;
-	cb->disconnect = dp_display_usbpd_disconnect_cb;
-	cb->attention  = dp_display_usbpd_attention_cb;
-
-	dp->usbpd = dp_usbpd_get(dev, cb);
-	if (IS_ERR(dp->usbpd)) {
-		rc = PTR_ERR(dp->usbpd);
-		pr_err("failed to initialize usbpd, rc = %d\n", rc);
-		dp->usbpd = NULL;
-		goto error;
-	}
-
 	mutex_init(&dp->session_lock);
 
 	dp->parser = dp_parser_get(dp->pdev);
@@ -812,7 +818,7 @@
 		rc = PTR_ERR(dp->parser);
 		pr_err("failed to initialize parser, rc = %d\n", rc);
 		dp->parser = NULL;
-		goto error_parser;
+		goto error;
 	}
 
 	rc = dp->parser->parse(dp->parser);
@@ -837,6 +843,12 @@
 		goto error_power;
 	}
 
+	rc = dp->power->power_client_init(dp->power, &dp->priv->phandle);
+	if (rc) {
+		pr_err("Power client create failed\n");
+		goto error_aux;
+	}
+
 	dp->aux = dp_aux_get(dev, &dp->catalog->aux, dp->parser->aux_cfg);
 	if (IS_ERR(dp->aux)) {
 		rc = PTR_ERR(dp->aux);
@@ -845,6 +857,12 @@
 		goto error_aux;
 	}
 
+	rc = dp->aux->drm_aux_register(dp->aux);
+	if (rc) {
+		pr_err("DRM DP AUX register failed\n");
+		goto error_link;
+	}
+
 	dp->link = dp_link_get(dev, dp->aux);
 	if (IS_ERR(dp->link)) {
 		rc = PTR_ERR(dp->link);
@@ -888,6 +906,18 @@
 		goto error_audio;
 	}
 
+	cb->configure  = dp_display_usbpd_configure_cb;
+	cb->disconnect = dp_display_usbpd_disconnect_cb;
+	cb->attention  = dp_display_usbpd_attention_cb;
+
+	dp->usbpd = dp_usbpd_get(dev, cb);
+	if (IS_ERR(dp->usbpd)) {
+		rc = PTR_ERR(dp->usbpd);
+		pr_err("failed to initialize usbpd, rc = %d\n", rc);
+		dp->usbpd = NULL;
+		goto error_usbpd;
+	}
+
 	dp->debug = dp_debug_get(dev, dp->panel, dp->usbpd,
 				dp->link, &dp->dp_display.connector);
 	if (IS_ERR(dp->debug)) {
@@ -899,6 +929,8 @@
 
 	return rc;
 error_debug:
+	dp_usbpd_put(dp->usbpd);
+error_usbpd:
 	dp_audio_put(dp->audio);
 error_audio:
 	dp_ctrl_put(dp->ctrl);
@@ -914,13 +946,40 @@
 	dp_catalog_put(dp->catalog);
 error_catalog:
 	dp_parser_put(dp->parser);
-error_parser:
-	dp_usbpd_put(dp->usbpd);
-	mutex_destroy(&dp->session_lock);
 error:
+	mutex_destroy(&dp->session_lock);
 	return rc;
 }
 
+static void dp_display_post_init(struct dp_display *dp_display)
+{
+	int rc = 0;
+	struct dp_display_private *dp;
+
+	if (!dp_display) {
+		pr_err("invalid input\n");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	dp = container_of(dp_display, struct dp_display_private, dp_display);
+	if (IS_ERR_OR_NULL(dp)) {
+		pr_err("invalid params\n");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	rc = dp_init_sub_modules(dp);
+	if (rc)
+		goto end;
+
+	dp_display_initialize_hdcp(dp);
+
+	dp_display->post_init = NULL;
+end:
+	pr_debug("%s\n", rc ? "failed" : "success");
+}
+
 static int dp_display_set_mode(struct dp_display *dp_display,
 		struct dp_display_mode *mode)
 {
@@ -1023,7 +1082,7 @@
 	}
 end:
 	/* clear framework event notifier */
-	dp_display->send_hpd_event = NULL;
+	dp_display->post_open = NULL;
 
 	complete_all(&dp->notification_comp);
 	mutex_unlock(&dp->session_lock);
@@ -1148,7 +1207,7 @@
 	struct drm_dp_link *link_info;
 	u32 mode_rate_khz = 0, supported_rate_khz = 0, mode_bpp = 0;
 
-	if (!dp || !mode_pclk_khz) {
+	if (!dp || !mode_pclk_khz || !dp->connector) {
 		pr_err("invalid params\n");
 		return -EINVAL;
 	}
@@ -1178,7 +1237,7 @@
 	struct dp_display_private *dp_display;
 	int ret = 0;
 
-	if (!dp) {
+	if (!dp || !dp->connector) {
 		pr_err("invalid params\n");
 		return 0;
 	}
@@ -1206,7 +1265,7 @@
 
 	dp = container_of(dp_display, struct dp_display_private, dp_display);
 
-	if (hdr->hdr_supported)
+	if (hdr->hdr_supported && dp->panel->hdr_supported(dp->panel))
 		rc = dp->panel->setup_hdr(dp->panel, hdr);
 
 	return rc;
@@ -1249,16 +1308,10 @@
 	dp->pdev = pdev;
 	dp->name = "drm_dp";
 
-	rc = dp_init_sub_modules(dp);
-	if (rc) {
-		rc = -EPROBE_DEFER;
-		goto err_dev;
-	}
-
 	rc = dp_display_create_workqueue(dp);
 	if (rc) {
 		pr_err("Failed to create workqueue\n");
-		goto err_sub_mod;
+		goto error;
 	}
 
 	platform_set_drvdata(pdev, dp);
@@ -1276,20 +1329,18 @@
 	g_dp_display->unprepare     = dp_display_unprepare;
 	g_dp_display->request_irq   = dp_request_irq;
 	g_dp_display->get_debug     = dp_get_debug;
-	g_dp_display->send_hpd_event    = dp_display_send_hpd_event;
+	g_dp_display->post_open     = dp_display_post_open;
+	g_dp_display->post_init     = dp_display_post_init;
 	g_dp_display->pre_kickoff   = dp_display_pre_kickoff;
 
 	rc = component_add(&pdev->dev, &dp_display_comp_ops);
 	if (rc) {
 		pr_err("component add failed, rc=%d\n", rc);
-		goto err_sub_mod;
+		goto error;
 	}
 
 	return 0;
-
-err_sub_mod:
-	dp_display_deinit_sub_modules(dp);
-err_dev:
+error:
 	devm_kfree(&pdev->dev, dp);
 bail:
 	return rc;
diff --git a/drivers/gpu/drm/msm/dp/dp_display.h b/drivers/gpu/drm/msm/dp/dp_display.h
index 2d314c7..c55e6c8 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.h
+++ b/drivers/gpu/drm/msm/dp/dp_display.h
@@ -42,9 +42,10 @@
 	int (*unprepare)(struct dp_display *dp_display);
 	int (*request_irq)(struct dp_display *dp_display);
 	struct dp_debug *(*get_debug)(struct dp_display *dp_display);
-	void (*send_hpd_event)(struct dp_display *dp_display);
+	void (*post_open)(struct dp_display *dp_display);
 	int (*pre_kickoff)(struct dp_display *dp_display,
 				struct drm_msm_ext_hdr_metadata *hdr_meta);
+	void (*post_init)(struct dp_display *dp_display);
 };
 
 int dp_display_get_num_of_displays(void);
diff --git a/drivers/gpu/drm/msm/dp/dp_drm.c b/drivers/gpu/drm/msm/dp/dp_drm.c
index 2c29ad2..7746b8e 100644
--- a/drivers/gpu/drm/msm/dp/dp_drm.c
+++ b/drivers/gpu/drm/msm/dp/dp_drm.c
@@ -184,6 +184,9 @@
 	bridge = to_dp_bridge(drm_bridge);
 	dp = bridge->display;
 
+	if (dp && dp->connector)
+		sde_connector_helper_bridge_disable(dp->connector);
+
 	rc = dp->pre_disable(dp);
 	if (rc) {
 		pr_err("[%d] DP display pre disable failed, rc=%d\n",
@@ -295,6 +298,10 @@
 		return -EINVAL;
 
 	dp_display->connector = connector;
+
+	if (dp_display->post_init)
+		dp_display->post_init(dp_display);
+
 	return 0;
 }
 
@@ -376,7 +383,7 @@
 	return status;
 }
 
-void dp_connector_send_hpd_event(void *display)
+void dp_connector_post_open(void *display)
 {
 	struct dp_display *dp;
 
@@ -387,8 +394,8 @@
 
 	dp = display;
 
-	if (dp->send_hpd_event)
-		dp->send_hpd_event(dp);
+	if (dp->post_open)
+		dp->post_open(dp);
 }
 
 int dp_connector_get_modes(struct drm_connector *connector,
diff --git a/drivers/gpu/drm/msm/dp/dp_drm.h b/drivers/gpu/drm/msm/dp/dp_drm.h
index 1673212..89b0a7e 100644
--- a/drivers/gpu/drm/msm/dp/dp_drm.h
+++ b/drivers/gpu/drm/msm/dp/dp_drm.h
@@ -95,7 +95,11 @@
 
 int dp_connector_get_info(struct msm_display_info *info, void *display);
 
-void dp_connector_send_hpd_event(void *display);
+/**
+ * dp_connector_post_open - handle the post open functionalites
+ * @display: Pointer to private display structure
+ */
+void dp_connector_post_open(void *display);
 
 int dp_drm_bridge_init(void *display,
 	struct drm_encoder *encoder);
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c
index 02edafb..b5dd9bc 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.c
+++ b/drivers/gpu/drm/msm/dp/dp_panel.c
@@ -19,6 +19,11 @@
 #define DP_PANEL_DEFAULT_BPP 24
 #define DP_MAX_DS_PORT_COUNT 1
 
+#define DPRX_FEATURE_ENUMERATION_LIST 0x2210
+#define VSC_SDP_EXTENSION_FOR_COLORIMETRY_SUPPORTED BIT(3)
+#define VSC_EXT_VESA_SDP_SUPPORTED BIT(4)
+#define VSC_EXT_VESA_SDP_CHAINING_SUPPORTED BIT(5)
+
 enum dp_panel_hdr_pixel_encoding {
 	RGB,
 	YCbCr444,
@@ -65,9 +70,14 @@
 	bool custom_edid;
 	bool custom_dpcd;
 	bool panel_on;
+	bool vsc_supported;
+	bool vscext_supported;
+	bool vscext_chaining_supported;
 	enum dp_panel_hdr_state hdr_state;
 	u8 spd_vendor_name[8];
 	u8 spd_product_description[16];
+	u8 major;
+	u8 minor;
 };
 
 static const struct dp_panel_info fail_safe = {
@@ -99,7 +109,7 @@
 	int rlen, rc = 0;
 	struct dp_panel_private *panel;
 	struct drm_dp_link *link_info;
-	u8 *dpcd, major = 0, minor = 0;
+	u8 *dpcd, rx_feature;
 	u32 dfp_count = 0;
 	unsigned long caps = DP_LINK_CAP_ENHANCED_FRAMING;
 
@@ -131,11 +141,33 @@
 			DUMP_PREFIX_NONE, 8, 1, dp_panel->dpcd, rlen, false);
 	}
 
+	rlen = drm_dp_dpcd_read(panel->aux->drm_aux,
+		DPRX_FEATURE_ENUMERATION_LIST, &rx_feature, 1);
+	if (rlen != 1) {
+		pr_debug("failed to read DPRX_FEATURE_ENUMERATION_LIST\n");
+		panel->vsc_supported = false;
+		panel->vscext_supported = false;
+		panel->vscext_chaining_supported = false;
+	} else {
+		panel->vsc_supported = !!(rx_feature &
+			VSC_SDP_EXTENSION_FOR_COLORIMETRY_SUPPORTED);
+
+		panel->vscext_supported = !!(rx_feature &
+			VSC_EXT_VESA_SDP_SUPPORTED);
+
+		panel->vscext_chaining_supported = !!(rx_feature &
+			VSC_EXT_VESA_SDP_CHAINING_SUPPORTED);
+	}
+
+	pr_debug("vsc=%d, vscext=%d, vscext_chaining=%d\n",
+		panel->vsc_supported, panel->vscext_supported,
+		panel->vscext_chaining_supported);
+
 	link_info->revision = dp_panel->dpcd[DP_DPCD_REV];
 
-	major = (link_info->revision >> 4) & 0x0f;
-	minor = link_info->revision & 0x0f;
-	pr_debug("version: %d.%d\n", major, minor);
+	panel->major = (link_info->revision >> 4) & 0x0f;
+	panel->minor = link_info->revision & 0x0f;
+	pr_debug("version: %d.%d\n", panel->major, panel->minor);
 
 	link_info->rate =
 		drm_dp_bw_code_to_link_rate(dp_panel->dpcd[DP_MAX_LINK_RATE]);
@@ -655,6 +687,21 @@
 	return min_link_rate_khz;
 }
 
+static bool dp_panel_hdr_supported(struct dp_panel *dp_panel)
+{
+	struct dp_panel_private *panel;
+
+	if (!dp_panel) {
+		pr_err("invalid input\n");
+		return false;
+	}
+
+	panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+
+	return panel->major >= 1 && panel->vsc_supported &&
+		(panel->minor >= 4 || panel->vscext_supported);
+}
+
 static bool dp_panel_is_validate_hdr_state(struct dp_panel_private *panel,
 		struct drm_msm_ext_hdr_metadata *hdr_meta)
 {
@@ -731,7 +778,10 @@
 	hdr->version = 0x01;
 	hdr->length = 0x1A;
 
-	memcpy(&hdr->hdr_meta, hdr_meta, sizeof(hdr->hdr_meta));
+	if (panel->hdr_state)
+		memcpy(&hdr->hdr_meta, hdr_meta, sizeof(hdr->hdr_meta));
+	else
+		memset(&hdr->hdr_meta, 0, sizeof(hdr->hdr_meta));
 
 	panel->catalog->config_hdr(panel->catalog, panel->hdr_state);
 end:
@@ -806,6 +856,7 @@
 	dp_panel->tpg_config = dp_panel_tpg_config;
 	dp_panel->spd_config = dp_panel_spd_config;
 	dp_panel->setup_hdr = dp_panel_setup_hdr;
+	dp_panel->hdr_supported = dp_panel_hdr_supported;
 
 	dp_panel_edid_register(panel);
 
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.h b/drivers/gpu/drm/msm/dp/dp_panel.h
index 128f694..2583f61 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.h
+++ b/drivers/gpu/drm/msm/dp/dp_panel.h
@@ -93,6 +93,7 @@
 		struct drm_msm_ext_hdr_metadata *hdr_meta);
 	void (*tpg_config)(struct dp_panel *dp_panel, bool enable);
 	int (*spd_config)(struct dp_panel *dp_panel);
+	bool (*hdr_supported)(struct dp_panel *dp_panel);
 };
 
 /**
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
index 2286603..2c758a2 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
@@ -790,6 +790,20 @@
 		}
 	}
 
+	if (!debugfs_create_bool("ulps_enable", 0600, dir,
+			&display->panel->ulps_enabled)) {
+		pr_err("[%s] debugfs create ulps enable file failed\n",
+		       display->name);
+		goto error_remove_dir;
+	}
+
+	if (!debugfs_create_bool("ulps_suspend_enable", 0600, dir,
+			&display->panel->ulps_suspend_enabled)) {
+		pr_err("[%s] debugfs create ulps-suspend enable file failed\n",
+		       display->name);
+		goto error_remove_dir;
+	}
+
 	display->root = dir;
 	return rc;
 error_remove_dir:
@@ -826,12 +840,17 @@
 
 	pr_debug("checking ulps req validity\n");
 
-	if (!dsi_panel_ulps_feature_enabled(display->panel))
+	if (!dsi_panel_ulps_feature_enabled(display->panel) &&
+			!display->panel->ulps_suspend_enabled) {
+		pr_debug("%s: ULPS feature is not enabled\n", __func__);
 		return false;
+	}
 
-	/* TODO: ULPS during suspend */
-	if (!dsi_panel_initialized(display->panel))
+	if (!dsi_panel_initialized(display->panel) &&
+			!display->panel->ulps_suspend_enabled) {
+		pr_debug("%s: panel not yet initialized\n", __func__);
 		return false;
+	}
 
 	if (enable && display->ulps_enabled) {
 		pr_debug("ULPS already enabled\n");
@@ -2337,22 +2356,27 @@
 	if ((clk & DSI_LINK_CLK) && (new_state == DSI_CLK_OFF)) {
 		/*
 		 * If ULPS feature is enabled, enter ULPS first.
+		 * However, when blanking the panel, we should enter ULPS
+		 * only if ULPS during suspend feature is enabled.
 		 */
-		if (dsi_panel_initialized(display->panel) &&
-			dsi_panel_ulps_feature_enabled(display->panel)) {
+		if (!dsi_panel_initialized(display->panel)) {
+			if (display->panel->ulps_suspend_enabled)
+				rc = dsi_display_set_ulps(display, true);
+		} else if (dsi_panel_ulps_feature_enabled(display->panel)) {
 			rc = dsi_display_set_ulps(display, true);
-			if (rc) {
-				pr_err("%s: failed enable ulps, rc = %d\n",
-			       __func__, rc);
-			}
 		}
+		if (rc)
+			pr_err("%s: failed enable ulps, rc = %d\n",
+			       __func__, rc);
 	}
 
 	if ((clk & DSI_CORE_CLK) && (new_state == DSI_CLK_OFF)) {
 		/*
-		 * Enable DSI clamps only if entering idle power collapse.
+		 * Enable DSI clamps only if entering idle power collapse or
+		 * when ULPS during suspend is enabled..
 		 */
-		if (dsi_panel_initialized(display->panel)) {
+		if (dsi_panel_initialized(display->panel) ||
+			display->panel->ulps_suspend_enabled) {
 			dsi_display_phy_idle_off(display);
 			rc = dsi_display_set_clamp(display, true);
 			if (rc)
@@ -3958,7 +3982,7 @@
 	return rc;
 }
 
-int dsi_display_get_mode_count(struct dsi_display *display,
+static int dsi_display_get_mode_count_no_lock(struct dsi_display *display,
 			u32 *count)
 {
 	struct dsi_dfps_capabilities dfps_caps;
@@ -3970,15 +3994,13 @@
 		return -EINVAL;
 	}
 
-	mutex_lock(&display->display_lock);
-
 	*count = display->panel->num_timing_nodes;
 
 	rc = dsi_panel_get_dfps_caps(display->panel, &dfps_caps);
 	if (rc) {
 		pr_err("[%s] failed to get dfps caps from panel\n",
 				display->name);
-		goto done;
+		return rc;
 	}
 
 	num_dfps_rates = !dfps_caps.dfps_support ? 1 :
@@ -3988,7 +4010,22 @@
 	/* Inflate num_of_modes by fps in dfps */
 	*count = display->panel->num_timing_nodes * num_dfps_rates;
 
-done:
+	return 0;
+}
+
+int dsi_display_get_mode_count(struct dsi_display *display,
+			u32 *count)
+{
+	int rc;
+
+	if (!display || !display->panel) {
+		pr_err("invalid display:%d panel:%d\n", display != NULL,
+				display ? display->panel != NULL : 0);
+		return -EINVAL;
+	}
+
+	mutex_lock(&display->display_lock);
+	rc = dsi_display_get_mode_count_no_lock(display, count);
 	mutex_unlock(&display->display_lock);
 
 	return 0;
@@ -4001,20 +4038,36 @@
 }
 
 int dsi_display_get_modes(struct dsi_display *display,
-			  struct dsi_display_mode *modes)
+			  struct dsi_display_mode **out_modes)
 {
 	struct dsi_dfps_capabilities dfps_caps;
-	u32 num_dfps_rates, panel_mode_count;
+	u32 num_dfps_rates, panel_mode_count, total_mode_count;
 	u32 mode_idx, array_idx = 0;
-	int i, rc = 0;
+	int i, rc = -EINVAL;
 
-	if (!display || !modes) {
+	if (!display || !out_modes) {
 		pr_err("Invalid params\n");
 		return -EINVAL;
 	}
 
+	*out_modes = NULL;
+
 	mutex_lock(&display->display_lock);
 
+	rc = dsi_display_get_mode_count_no_lock(display, &total_mode_count);
+	if (rc)
+		goto error;
+
+	/* free any previously probed modes */
+	kfree(display->modes);
+
+	display->modes = kcalloc(total_mode_count, sizeof(*display->modes),
+			GFP_KERNEL);
+	if (!display->modes) {
+		rc = -ENOMEM;
+		goto error;
+	}
+
 	rc = dsi_panel_get_dfps_caps(display->panel, &dfps_caps);
 	if (rc) {
 		pr_err("[%s] failed to get dfps caps from panel\n",
@@ -4055,12 +4108,14 @@
 		}
 
 		for (i = 0; i < num_dfps_rates; i++) {
-			struct dsi_display_mode *sub_mode = &modes[array_idx];
+			struct dsi_display_mode *sub_mode =
+					&display->modes[array_idx];
 			u32 curr_refresh_rate;
 
 			if (!sub_mode) {
 				pr_err("invalid mode data\n");
-				return -EFAULT;
+				rc = -EFAULT;
+				goto error;
 			}
 
 			memcpy(sub_mode, &panel_mode, sizeof(panel_mode));
@@ -4084,11 +4139,57 @@
 		}
 	}
 
+	*out_modes = display->modes;
+	rc = 0;
+
 error:
+	if (rc)
+		kfree(display->modes);
+
 	mutex_unlock(&display->display_lock);
 	return rc;
 }
 
+int dsi_display_find_mode(struct dsi_display *display,
+		const struct dsi_display_mode *cmp,
+		struct dsi_display_mode **out_mode)
+{
+	u32 count, i;
+	int rc;
+
+	if (!display || !out_mode)
+		return -EINVAL;
+
+	*out_mode = NULL;
+
+	rc = dsi_display_get_mode_count(display, &count);
+	if (rc)
+		return rc;
+
+	mutex_lock(&display->display_lock);
+	for (i = 0; i < count; i++) {
+		struct dsi_display_mode *m = &display->modes[i];
+
+		if (cmp->timing.v_active == m->timing.v_active &&
+			cmp->timing.h_active == m->timing.h_active &&
+			cmp->timing.refresh_rate == m->timing.refresh_rate) {
+			*out_mode = m;
+			rc = 0;
+			break;
+		}
+	}
+	mutex_unlock(&display->display_lock);
+
+	if (!*out_mode) {
+		pr_err("[%s] failed to find mode for v_active %u h_active %u rate %u\n",
+				display->name, cmp->timing.v_active,
+				cmp->timing.h_active, cmp->timing.refresh_rate);
+		rc = -ENOENT;
+	}
+
+	return rc;
+}
+
 /**
  * dsi_display_validate_mode_vrr() - Validate if varaible refresh case.
  * @display:     DSI display handle.
@@ -4636,17 +4737,27 @@
 		goto error_panel_post_unprep;
 	}
 
-	rc = dsi_display_phy_sw_reset(display);
-	if (rc) {
-		pr_err("[%s] failed to reset phy, rc=%d\n", display->name, rc);
-		goto error_ctrl_clk_off;
-	}
+	/*
+	 * If ULPS during suspend feature is enabled, then DSI PHY was
+	 * left on during suspend. In this case, we do not need to reset/init
+	 * PHY. This would have already been done when the CORE clocks are
+	 * turned on. However, if cont splash is disabled, the first time DSI
+	 * is powered on, phy init needs to be done unconditionally.
+	 */
+	if (!display->panel->ulps_suspend_enabled || !display->ulps_enabled) {
+		rc = dsi_display_phy_sw_reset(display);
+		if (rc) {
+			pr_err("[%s] failed to reset phy, rc=%d\n",
+				display->name, rc);
+			goto error_ctrl_clk_off;
+		}
 
-	rc = dsi_display_phy_enable(display);
-	if (rc) {
-		pr_err("[%s] failed to enable DSI PHY, rc=%d\n",
-		       display->name, rc);
-		goto error_ctrl_clk_off;
+		rc = dsi_display_phy_enable(display);
+		if (rc) {
+			pr_err("[%s] failed to enable DSI PHY, rc=%d\n",
+			       display->name, rc);
+			goto error_ctrl_clk_off;
+		}
 	}
 
 	rc = dsi_display_set_clk_src(display);
@@ -5133,10 +5244,12 @@
 		pr_err("[%s] failed to deinit controller, rc=%d\n",
 		       display->name, rc);
 
-	rc = dsi_display_phy_disable(display);
-	if (rc)
-		pr_err("[%s] failed to disable DSI PHY, rc=%d\n",
-		       display->name, rc);
+	if (!display->panel->ulps_suspend_enabled) {
+		rc = dsi_display_phy_disable(display);
+		if (rc)
+			pr_err("[%s] failed to disable DSI PHY, rc=%d\n",
+			       display->name, rc);
+	}
 
 	rc = dsi_display_clk_ctrl(display->dsi_clk_handle,
 			DSI_CORE_CLK, DSI_CLK_OFF);
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.h b/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
index 886641b..87b9fd5 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
@@ -124,6 +124,7 @@
  * struct dsi_display - dsi display information
  * @pdev:             Pointer to platform device.
  * @drm_dev:          DRM device associated with the display.
+ * @drm_conn:         Pointer to DRM connector associated with the display
  * @name:             Name of the display.
  * @display_type:     Display type as defined in device tree.
  * @list:             List pointer.
@@ -134,6 +135,7 @@
  * @ctrl:             Controller information for DSI display.
  * @panel:            Handle to DSI panel.
  * @panel_of:         pHandle to DSI panel.
+ * @modes:            Array of probed DSI modes
  * @type:             DSI display type.
  * @clk_master_idx:   The master controller for controlling clocks. This is an
  *		      index into the ctrl[MAX_DSI_CTRLS_PER_DISPLAY] array.
@@ -161,6 +163,7 @@
 struct dsi_display {
 	struct platform_device *pdev;
 	struct drm_device *drm_dev;
+	struct drm_connector *drm_conn;
 
 	const char *name;
 	const char *display_type;
@@ -176,6 +179,8 @@
 	struct dsi_panel *panel;
 	struct device_node *panel_of;
 
+	struct dsi_display_mode *modes;
+
 	enum dsi_display_type type;
 	u32 clk_master_idx;
 	u32 cmd_master_idx;
@@ -302,15 +307,13 @@
 /**
  * dsi_display_get_modes() - get modes supported by display
  * @display:            Handle to display.
- * @modes;              Pointer to array of modes. Memory allocated should be
- *			big enough to store (count * struct dsi_display_mode)
- *			elements. If modes pointer is NULL, number of modes will
- *			be stored in the memory pointed to by count.
+ * @modes;              Output param, list of DSI modes. Number of modes matches
+ *                      count returned by dsi_display_get_mode_count
  *
  * Return: error code.
  */
 int dsi_display_get_modes(struct dsi_display *display,
-			  struct dsi_display_mode *modes);
+			  struct dsi_display_mode **modes);
 
 /**
  * dsi_display_put_mode() - free up mode created for the display
@@ -323,6 +326,17 @@
 	struct dsi_display_mode *mode);
 
 /**
+ * dsi_display_find_mode() - retrieve cached DSI mode given relevant params
+ * @display:            Handle to display.
+ * @cmp:                Mode to use as comparison to find original
+ * @out_mode:           Output parameter, pointer to retrieved mode
+ *
+ * Return: error code.
+ */
+int dsi_display_find_mode(struct dsi_display *display,
+		const struct dsi_display_mode *cmp,
+		struct dsi_display_mode **out_mode);
+/**
  * dsi_display_validate_mode() - validates if mode is supported by display
  * @display:             Handle to display.
  * @mode:                Mode to be validated.
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display_test.c b/drivers/gpu/drm/msm/dsi-staging/dsi_display_test.c
index 6e41f36..1cec9e1 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display_test.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display_test.c
@@ -28,7 +28,6 @@
 	struct dsi_display *display;
 	struct dsi_display_mode *modes;
 	u32 count = 0;
-	u32 size = 0;
 	int rc = 0;
 
 	test = container_of(work, struct dsi_display_test, test_work);
@@ -40,14 +39,7 @@
 		goto test_fail;
 	}
 
-	size = count * sizeof(*modes);
-	modes = kzalloc(size, GFP_KERNEL);
-	if (!modes) {
-		rc = -ENOMEM;
-		goto test_fail;
-	}
-
-	rc = dsi_display_get_modes(display, modes);
+	rc = dsi_display_get_modes(display, &modes);
 	if (rc) {
 		pr_err("failed to get modes, rc=%d\n", rc);
 		goto test_fail_free_modes;
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
index 6391a22..fd50256 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
@@ -197,12 +197,17 @@
 static void dsi_bridge_disable(struct drm_bridge *bridge)
 {
 	int rc = 0;
+	struct dsi_display *display;
 	struct dsi_bridge *c_bridge = to_dsi_bridge(bridge);
 
 	if (!bridge) {
 		pr_err("Invalid params\n");
 		return;
 	}
+	display = c_bridge->display;
+
+	if (display && display->drm_conn)
+		sde_connector_helper_bridge_disable(display->drm_conn);
 
 	rc = dsi_display_pre_disable(c_bridge->display);
 	if (rc) {
@@ -263,7 +268,8 @@
 {
 	int rc = 0;
 	struct dsi_bridge *c_bridge = to_dsi_bridge(bridge);
-	struct dsi_display_mode dsi_mode, cur_dsi_mode;
+	struct dsi_display *display;
+	struct dsi_display_mode dsi_mode, cur_dsi_mode, *panel_dsi_mode;
 	struct drm_display_mode cur_mode;
 	struct drm_crtc_state *crtc_state;
 
@@ -274,8 +280,26 @@
 		return false;
 	}
 
+	display = c_bridge->display;
+	if (!display) {
+		pr_err("Invalid params\n");
+		return false;
+	}
+
 	convert_to_dsi_mode(mode, &dsi_mode);
 
+	/*
+	 * retrieve dsi mode from dsi driver's cache since not safe to take
+	 * the drm mode config mutex in all paths
+	 */
+	rc = dsi_display_find_mode(display, &dsi_mode, &panel_dsi_mode);
+	if (rc)
+		return rc;
+
+	/* propagate the private info to the adjusted_mode derived dsi mode */
+	dsi_mode.priv_info = panel_dsi_mode->priv_info;
+	dsi_mode.dsi_mode_flags = panel_dsi_mode->dsi_mode_flags;
+
 	rc = dsi_display_validate_mode(c_bridge->display, &dsi_mode,
 			DSI_VALIDATE_FLAG_ALLOW_ADJUST);
 	if (rc) {
@@ -302,6 +326,7 @@
 			dsi_mode.dsi_mode_flags |= DSI_MODE_FLAG_DMS;
 	}
 
+	/* convert back to drm mode, propagating the private info & flags */
 	dsi_convert_to_drm_mode(&dsi_mode, adjusted_mode);
 
 	return true;
@@ -369,6 +394,8 @@
 	if (!info || !dsi_display)
 		return -EINVAL;
 
+	dsi_display->drm_conn = connector;
+
 	sde_kms_info_add_keystr(info,
 		"display type", dsi_display->display_type);
 
@@ -533,7 +560,6 @@
 		void *display)
 {
 	u32 count = 0;
-	u32 size = 0;
 	struct dsi_display_mode *modes = NULL;
 	struct drm_display_mode drm_mode;
 	int rc, i;
@@ -549,21 +575,14 @@
 	rc = dsi_display_get_mode_count(display, &count);
 	if (rc) {
 		pr_err("failed to get num of modes, rc=%d\n", rc);
-		goto error;
-	}
-
-	size = count * sizeof(*modes);
-	modes = kzalloc(size,  GFP_KERNEL);
-	if (!modes) {
-		count = 0;
 		goto end;
 	}
 
-	rc = dsi_display_get_modes(display, modes);
+	rc = dsi_display_get_modes(display, &modes);
 	if (rc) {
 		pr_err("failed to get modes, rc=%d\n", rc);
 		count = 0;
-		goto error;
+		goto end;
 	}
 
 	for (i = 0; i < count; i++) {
@@ -577,14 +596,12 @@
 			       drm_mode.hdisplay,
 			       drm_mode.vdisplay);
 			count = -ENOMEM;
-			goto error;
+			goto end;
 		}
 		m->width_mm = connector->display_info.width_mm;
 		m->height_mm = connector->display_info.height_mm;
 		drm_mode_probed_add(connector, m);
 	}
-error:
-	kfree(modes);
 end:
 	pr_debug("MODE COUNT =%d\n\n", count);
 	return count;
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
index d0cb51b..133b4d5 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
@@ -1669,8 +1669,14 @@
 	panel->ulps_enabled =
 		of_property_read_bool(of_node, "qcom,ulps-enabled");
 
-	if (panel->ulps_enabled)
-		pr_debug("ulps_enabled:%d\n", panel->ulps_enabled);
+	pr_info("%s: ulps feature %s\n", __func__,
+		(panel->ulps_enabled ? "enabled" : "disabled"));
+
+	panel->ulps_suspend_enabled =
+		of_property_read_bool(of_node, "qcom,suspend-ulps-enabled");
+
+	pr_info("%s: ulps during suspend feature %s", __func__,
+		(panel->ulps_suspend_enabled ? "enabled" : "disabled"));
 
 	panel->te_using_watchdog_timer = of_property_read_bool(of_node,
 					"qcom,mdss-dsi-te-using-wd");
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
index ea67f45..06199f4 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
@@ -170,6 +170,7 @@
 
 	bool lp11_init;
 	bool ulps_enabled;
+	bool ulps_suspend_enabled;
 	bool allow_phy_power_off;
 
 	bool panel_initialized;
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c b/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c
index 197d448..2567f04 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c
@@ -663,7 +663,7 @@
 			}
 		}
 	} else {
-		if (dsi_phy->dsi_phy_state == DSI_PHY_ENGINE_OFF &&
+		if (dsi_phy->dsi_phy_state == DSI_PHY_ENGINE_ON &&
 				dsi_phy->regulator_required) {
 			rc = dsi_pwr_enable_regulator(
 				&dsi_phy->pwr_info.phy_pwr, false);
@@ -955,6 +955,14 @@
 	phy->regulator_required = clk_freq->byte_clk_rate >
 		(phy->regulator_min_datarate_bps / BITS_PER_BYTE);
 
+	/*
+	 * DSI PLL needs 0p9 LDO1A for Powering DSI PLL block.
+	 * PLL driver can vote for this regulator in PLL driver file, but for
+	 * the usecase where we come out of idle(static screen), if PLL and
+	 * PHY vote for regulator ,there will be performance delays as both
+	 * votes go through RPM to enable regulators.
+	 */
+	phy->regulator_required = true;
 	pr_debug("[%s] lane_datarate=%u min_datarate=%u required=%d\n",
 			phy->name,
 			clk_freq->byte_clk_rate * BITS_PER_BYTE,
diff --git a/drivers/gpu/drm/msm/msm_prop.c b/drivers/gpu/drm/msm/msm_prop.c
index ce84b7a..8e85c81 100644
--- a/drivers/gpu/drm/msm/msm_prop.c
+++ b/drivers/gpu/drm/msm/msm_prop.c
@@ -128,6 +128,20 @@
 			&property_state->dirty_list);
 }
 
+bool msm_property_is_dirty(
+		struct msm_property_info *info,
+		struct msm_property_state *property_state,
+		uint32_t property_idx)
+{
+	if (!info || !property_state || !property_state->values ||
+			property_idx >= info->property_count) {
+		DRM_ERROR("invalid argument(s), idx %u\n", property_idx);
+		return false;
+	}
+
+	return !list_empty(&property_state->values[property_idx].dirty_node);
+}
+
 /**
  * _msm_property_install_integer - install standard drm range property
  * @info: Pointer to property info container struct
diff --git a/drivers/gpu/drm/msm/msm_prop.h b/drivers/gpu/drm/msm/msm_prop.h
index d257a8c..ecc1d0b 100644
--- a/drivers/gpu/drm/msm/msm_prop.h
+++ b/drivers/gpu/drm/msm/msm_prop.h
@@ -316,6 +316,19 @@
 		int property_idx);
 
 /**
+ * msm_property_is_dirty - check whether a property is dirty
+ *	Note: Intended for use during atomic_check before pop_dirty usage
+ * @info: Pointer to property info container struct
+ * @property_state: Pointer to property state container struct
+ * @property_idx: Property index
+ * Returns: true if dirty, false otherwise
+ */
+bool msm_property_is_dirty(
+		struct msm_property_info *info,
+		struct msm_property_state *property_state,
+		uint32_t property_idx);
+
+/**
  * msm_property_atomic_set - helper function for atomic property set callback
  * @info: Pointer to property info container struct
  * @property_state: Pointer to local state structure
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.c b/drivers/gpu/drm/msm/sde/sde_connector.c
index fb1f578..0baba62 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.c
+++ b/drivers/gpu/drm/msm/sde/sde_connector.c
@@ -425,6 +425,20 @@
 	}
 }
 
+void sde_connector_helper_bridge_disable(struct drm_connector *connector)
+{
+	int rc;
+
+	if (!connector)
+		return;
+
+	/* trigger a final connector pre-kickoff for power mode updates */
+	rc = sde_connector_pre_kickoff(connector);
+	if (rc)
+		SDE_ERROR("conn %d final pre kickoff failed %d\n",
+				connector->base.id, rc);
+}
+
 static int _sde_connector_update_power_locked(struct sde_connector *c_conn)
 {
 	struct drm_connector *connector;
@@ -1086,12 +1100,14 @@
 	c_state = to_sde_connector_state(state);
 
 	idx = msm_property_index(&c_conn->property_info, property);
-	if (idx == CONNECTOR_PROP_RETIRE_FENCE)
-		rc = sde_fence_create(&c_conn->retire_fence, val, 0);
-	else
+	if (idx == CONNECTOR_PROP_RETIRE_FENCE) {
+		*val = ~0;
+		rc = 0;
+	} else {
 		/* get cached property value */
 		rc = msm_property_atomic_get(&c_conn->property_info,
 				&c_state->property_state, property, val);
+	}
 
 	/* allow for custom override */
 	if (c_conn->ops.get_property)
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.h b/drivers/gpu/drm/msm/sde/sde_connector.h
index a7bad7c..f6dee6b 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.h
+++ b/drivers/gpu/drm/msm/sde/sde_connector.h
@@ -220,10 +220,10 @@
 	int (*post_kickoff)(struct drm_connector *connector);
 
 	/**
-	 * send_hpd_event - send HPD uevent notification to userspace
+	 * post_open - calls connector to process post open functionalities
 	 * @display: Pointer to private display structure
 	 */
-	void (*send_hpd_event)(void *display);
+	void (*post_open)(void *display);
 
 	/**
 	 * check_status - check status of connected display panel
@@ -705,4 +705,11 @@
  * conn: Pointer to drm_connector struct
  */
 void sde_conn_timeline_status(struct drm_connector *conn);
+
+/**
+ * sde_connector_helper_bridge_disable - helper function for drm bridge disable
+ * @connector: Pointer to DRM connector object
+ */
+void sde_connector_helper_bridge_disable(struct drm_connector *connector);
+
 #endif /* _SDE_CONNECTOR_H_ */
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
index a7073cb..94c7f40 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.c
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -806,6 +806,21 @@
 	*crtc_roi = &crtc_state->crtc_roi;
 }
 
+bool sde_crtc_is_crtc_roi_dirty(struct drm_crtc_state *state)
+{
+	struct sde_crtc_state *cstate;
+	struct sde_crtc *sde_crtc;
+
+	if (!state || !state->crtc)
+		return false;
+
+	sde_crtc = to_sde_crtc(state->crtc);
+	cstate = to_sde_crtc_state(state);
+
+	return msm_property_is_dirty(&sde_crtc->property_info,
+			&cstate->property_state, CRTC_PROP_ROI_V1);
+}
+
 static int _sde_crtc_set_roi_v1(struct drm_crtc_state *state,
 		void __user *usr_ptr)
 {
@@ -894,6 +909,8 @@
 	struct sde_crtc_state *crtc_state;
 	struct sde_rect *crtc_roi;
 	int i, num_attached_conns = 0;
+	bool is_crtc_roi_dirty;
+	bool is_any_conn_roi_dirty;
 
 	if (!crtc || !state)
 		return -EINVAL;
@@ -902,7 +919,11 @@
 	crtc_state = to_sde_crtc_state(state);
 	crtc_roi = &crtc_state->crtc_roi;
 
+	is_crtc_roi_dirty = sde_crtc_is_crtc_roi_dirty(state);
+	is_any_conn_roi_dirty = false;
+
 	for_each_connector_in_state(state->state, conn, conn_state, i) {
+		struct sde_connector *sde_conn;
 		struct sde_connector_state *sde_conn_state;
 		struct sde_rect conn_roi;
 
@@ -917,8 +938,15 @@
 		}
 		++num_attached_conns;
 
+		sde_conn = to_sde_connector(conn_state->connector);
 		sde_conn_state = to_sde_connector_state(conn_state);
 
+		is_any_conn_roi_dirty = is_any_conn_roi_dirty ||
+				msm_property_is_dirty(
+						&sde_conn->property_info,
+						&sde_conn_state->property_state,
+						CONNECTOR_PROP_ROI_V1);
+
 		/*
 		 * current driver only supports same connector and crtc size,
 		 * but if support for different sizes is added, driver needs
@@ -938,8 +966,24 @@
 				conn_roi.w, conn_roi.h);
 	}
 
+	/*
+	 * Check against CRTC ROI and Connector ROI not being updated together.
+	 * This restriction should be relaxed when Connector ROI scaling is
+	 * supported.
+	 */
+	if (is_any_conn_roi_dirty != is_crtc_roi_dirty) {
+		SDE_ERROR("connector/crtc rois not updated together\n");
+		return -EINVAL;
+	}
+
 	sde_kms_rect_merge_rectangles(&crtc_state->user_roi_list, crtc_roi);
 
+	/* clear the ROI to null if it matches full screen anyways */
+	if (crtc_roi->x == 0 && crtc_roi->y == 0 &&
+			crtc_roi->w == state->adjusted_mode.hdisplay &&
+			crtc_roi->h == state->adjusted_mode.vdisplay)
+		memset(crtc_roi, 0, sizeof(*crtc_roi));
+
 	SDE_DEBUG("%s: crtc roi (%d,%d,%d,%d)\n", sde_crtc->name,
 			crtc_roi->x, crtc_roi->y, crtc_roi->w, crtc_roi->h);
 	SDE_EVT32_VERBOSE(DRMID(crtc), crtc_roi->x, crtc_roi->y, crtc_roi->w,
@@ -1203,8 +1247,6 @@
 	struct sde_crtc *sde_crtc;
 	struct sde_crtc_state *sde_crtc_state;
 	struct msm_mode_info mode_info;
-	struct drm_connector *conn;
-	struct drm_connector_state *conn_state;
 	int rc, lm_idx, i;
 
 	if (!crtc || !state)
@@ -1213,6 +1255,7 @@
 	memset(&mode_info, 0, sizeof(mode_info));
 
 	sde_crtc = to_sde_crtc(crtc);
+	sde_crtc_state = to_sde_crtc_state(state);
 
 	if (hweight_long(state->connector_mask) != 1) {
 		SDE_ERROR("invalid connector count(%d) for crtc: %d\n",
@@ -1221,8 +1264,17 @@
 		return -EINVAL;
 	}
 
-	for_each_connector_in_state(state->state, conn, conn_state, i) {
-		rc = sde_connector_get_mode_info(conn_state, &mode_info);
+	/*
+	 * check connector array cached at modeset time since incoming atomic
+	 * state may not include any connectors if they aren't modified
+	 */
+	for (i = 0; i < ARRAY_SIZE(sde_crtc_state->connectors); i++) {
+		struct drm_connector *conn = sde_crtc_state->connectors[i];
+
+		if (!conn || !conn->state)
+			continue;
+
+		rc = sde_connector_get_mode_info(conn->state, &mode_info);
 		if (rc) {
 			SDE_ERROR("failed to get mode info\n");
 			return -EINVAL;
@@ -1233,7 +1285,6 @@
 	if (!mode_info.roi_caps.enabled)
 		return 0;
 
-	sde_crtc_state = to_sde_crtc_state(state);
 	if (sde_crtc_state->user_roi_list.num_rects >
 					mode_info.roi_caps.num_roi) {
 		SDE_ERROR("roi count is more than supported limit, %d > %d\n",
@@ -3370,12 +3421,13 @@
 
 		if (!master_ctl || master_ctl->idx > ctl->idx)
 			master_ctl = ctl;
+
+		if (ctl->ops.setup_sbuf_cfg)
+			ctl->ops.setup_sbuf_cfg(ctl, &cstate->sbuf_cfg);
 	}
 
 	/* only update sbuf_cfg and flush for master ctl */
-	if (master_ctl && master_ctl->ops.setup_sbuf_cfg &&
-			master_ctl->ops.update_pending_flush) {
-		master_ctl->ops.setup_sbuf_cfg(master_ctl, &cstate->sbuf_cfg);
+	if (master_ctl && master_ctl->ops.update_pending_flush) {
 		master_ctl->ops.update_pending_flush(master_ctl, flush_mask);
 
 		/* explicitly trigger rotator for async modes */
@@ -3501,7 +3553,7 @@
 	}
 
 	/* reset both previous... */
-	for_each_plane_in_state(old_state->state, plane, pstate, i) {
+	drm_atomic_crtc_state_for_each_plane_state(plane, pstate, old_state) {
 		if (pstate->crtc != crtc)
 			continue;
 
@@ -4506,6 +4558,17 @@
 					sde_crtc->name, plane->base.id, rc);
 			goto end;
 		}
+
+		/* identify attached planes that are not in the delta state */
+		if (!drm_atomic_get_existing_plane_state(state->state, plane)) {
+			rc = sde_plane_confirm_hw_rsvps(plane, pstate);
+			if (rc) {
+				SDE_ERROR("crtc%d confirmation hw failed %d\n",
+						crtc->base.id, rc);
+				goto end;
+			}
+		}
+
 		if (cnt >= SDE_PSTATES_MAX)
 			continue;
 
@@ -5214,7 +5277,8 @@
 
 	i = msm_property_index(&sde_crtc->property_info, property);
 	if (i == CRTC_PROP_OUTPUT_FENCE) {
-		ret = _sde_crtc_get_output_fence(crtc, state, val);
+		*val = ~0;
+		ret = 0;
 	} else {
 		ret = msm_property_atomic_get(&sde_crtc->property_info,
 			&cstate->property_state, property, val);
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.h b/drivers/gpu/drm/msm/sde/sde_crtc.h
index c6b4afa..9501d0f 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.h
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.h
@@ -684,6 +684,14 @@
 void sde_crtc_get_crtc_roi(struct drm_crtc_state *state,
 		const struct sde_rect **crtc_roi);
 
+/**
+ * sde_crtc_is_crtc_roi_dirty - retrieve whether crtc_roi was updated this frame
+ *	Note: Only use during atomic_check since dirty properties may be popped
+ * @crtc_state: Pointer to crtc state
+ * Return: true if roi is dirty, false otherwise
+ */
+bool sde_crtc_is_crtc_roi_dirty(struct drm_crtc_state *state);
+
 /** sde_crt_get_secure_level - retrieve the secure level from the give state
  *	object, this is used to determine the secure state of the crtc
  * @crtc : Pointer to drm crtc structure
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c
index a7dffba..4d09642 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.c
@@ -780,24 +780,6 @@
 	}
 }
 
-static void _sde_encoder_adjust_mode(struct drm_connector *connector,
-		struct drm_display_mode *adj_mode)
-{
-	struct drm_display_mode *cur_mode;
-
-	if (!connector || !adj_mode)
-		return;
-
-	list_for_each_entry(cur_mode, &connector->modes, head) {
-		if (cur_mode->vdisplay == adj_mode->vdisplay &&
-			cur_mode->hdisplay == adj_mode->hdisplay &&
-			cur_mode->vrefresh == adj_mode->vrefresh) {
-			adj_mode->private = cur_mode->private;
-			adj_mode->private_flags |= cur_mode->private_flags;
-		}
-	}
-}
-
 static int sde_encoder_virt_atomic_check(
 		struct drm_encoder *drm_enc,
 		struct drm_crtc_state *crtc_state,
@@ -833,15 +815,6 @@
 
 	SDE_EVT32(DRMID(drm_enc), drm_atomic_crtc_needs_modeset(crtc_state));
 
-	/*
-	 * display drivers may populate private fields of the drm display mode
-	 * structure while registering possible modes of a connector with DRM.
-	 * These private fields are not populated back while DRM invokes
-	 * the mode_set callbacks. This module retrieves and populates the
-	 * private fields of the given mode.
-	 */
-	_sde_encoder_adjust_mode(conn_state->connector, adj_mode);
-
 	/* perform atomic check on the first physical encoder (master) */
 	for (i = 0; i < sde_enc->num_phys_encs; i++) {
 		struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
@@ -2626,6 +2599,13 @@
 	SDE_ATRACE_BEGIN("encoder_underrun_callback");
 	atomic_inc(&phy_enc->underrun_cnt);
 	SDE_EVT32(DRMID(drm_enc), atomic_read(&phy_enc->underrun_cnt));
+
+	trace_sde_encoder_underrun(DRMID(drm_enc),
+		atomic_read(&phy_enc->underrun_cnt));
+
+	SDE_DBG_CTRL("stop_ftrace");
+	SDE_DBG_CTRL("panic_underrun");
+
 	SDE_ATRACE_END("encoder_underrun_callback");
 }
 
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c
index 5d359be..a54e39a9 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.c
+++ b/drivers/gpu/drm/msm/sde/sde_kms.c
@@ -893,7 +893,7 @@
 		.mode_valid = dp_connector_mode_valid,
 		.get_info   = dp_connector_get_info,
 		.get_mode_info  = dp_connector_get_mode_info,
-		.send_hpd_event = dp_connector_send_hpd_event,
+		.post_open  = dp_connector_post_open,
 		.check_status = NULL,
 		.pre_kickoff  = dp_connector_pre_kickoff,
 	};
@@ -2100,8 +2100,8 @@
 
 		sde_conn = to_sde_connector(connector);
 
-		if (sde_conn->ops.send_hpd_event)
-			sde_conn->ops.send_hpd_event(sde_conn->display);
+		if (sde_conn->ops.post_open)
+			sde_conn->ops.post_open(sde_conn->display);
 	}
 	mutex_unlock(&dev->mode_config.mutex);
 
@@ -2113,7 +2113,6 @@
 {
 	struct dsi_display_mode *modes = NULL;
 	u32 count = 0;
-	u32 size = 0;
 	int rc = 0;
 
 	rc = dsi_display_get_mode_count(display, &count);
@@ -2123,18 +2122,12 @@
 	}
 
 	SDE_DEBUG("num of modes = %d\n", count);
-	size = count * sizeof(*modes);
-	modes = kzalloc(size,  GFP_KERNEL);
-	if (!modes) {
-		count = 0;
-		goto end;
-	}
 
-	rc = dsi_display_get_modes(display, modes);
+	rc = dsi_display_get_modes(display, &modes);
 	if (rc) {
 		SDE_ERROR("failed to get modes, rc=%d\n", rc);
 		count = 0;
-		goto error;
+		return rc;
 	}
 
 	/* TODO; currently consider modes[0] as the preferred mode */
@@ -2144,9 +2137,6 @@
 		drm_mode->hdisplay, drm_mode->vdisplay);
 	drm_mode_set_name(drm_mode);
 	drm_mode_set_crtcinfo(drm_mode, 0);
-error:
-	kfree(modes);
-end:
 	return rc;
 }
 
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c
index ab48c4a..9f27286 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.c
+++ b/drivers/gpu/drm/msm/sde/sde_plane.c
@@ -2958,6 +2958,50 @@
 	return 0;
 }
 
+int sde_plane_confirm_hw_rsvps(struct drm_plane *plane,
+		const struct drm_plane_state *state)
+{
+	struct drm_crtc_state *cstate;
+	struct sde_plane_state *pstate;
+	struct sde_plane_rot_state *rstate;
+	struct sde_hw_blk *hw_blk;
+
+	if (!plane || !state) {
+		SDE_ERROR("invalid plane/state\n");
+		return -EINVAL;
+	}
+
+	pstate = to_sde_plane_state(state);
+	rstate = &pstate->rot;
+
+	/* cstate will be null if crtc is disconnected from plane */
+	cstate = _sde_plane_get_crtc_state((struct drm_plane_state *)state);
+	if (IS_ERR_OR_NULL(cstate)) {
+		SDE_ERROR("invalid crtc state\n");
+		return -EINVAL;
+	}
+
+	if (sde_plane_enabled((struct drm_plane_state *)state) &&
+			rstate->out_sbuf) {
+		SDE_DEBUG("plane%d.%d acquire rotator, fb %d\n",
+				plane->base.id, rstate->sequence_id,
+				state->fb ? state->fb->base.id : -1);
+
+		hw_blk = sde_crtc_res_get(cstate, SDE_HW_BLK_ROT,
+				(u64) state->fb);
+		if (!hw_blk) {
+			SDE_ERROR("plane%d.%d no available rotator, fb %d\n",
+					plane->base.id, rstate->sequence_id,
+					state->fb ? state->fb->base.id : -1);
+			SDE_EVT32(DRMID(plane), rstate->sequence_id,
+					state->fb ? state->fb->base.id : -1,
+					SDE_EVTLOG_ERROR);
+			return -EINVAL;
+		}
+	}
+	return 0;
+}
+
 /**
  * sde_plane_get_ctl_flush - get control flush for the given plane
  * @plane: Pointer to drm plane structure
@@ -3593,7 +3637,6 @@
 	struct drm_crtc *crtc;
 	struct drm_framebuffer *fb;
 	struct sde_rect src, dst;
-	const struct sde_rect *crtc_roi;
 	bool q16_data = true;
 	bool blend_enabled = true;
 	int idx;
@@ -3712,9 +3755,8 @@
 	_sde_plane_sspp_atomic_check_mode_changed(psde, state,
 								old_state);
 
-	/* re-program the output rects always in the case of partial update */
-	sde_crtc_get_crtc_roi(crtc->state, &crtc_roi);
-	if (!sde_kms_rect_is_null(crtc_roi))
+	/* re-program the output rects always if partial update roi changed */
+	if (sde_crtc_is_crtc_roi_dirty(crtc->state))
 		pstate->dirty |= SDE_PLANE_DIRTY_RECTS;
 
 	if (pstate->dirty & SDE_PLANE_DIRTY_RECTS)
@@ -3747,6 +3789,8 @@
 
 	/* update roi config */
 	if (pstate->dirty & SDE_PLANE_DIRTY_RECTS) {
+		const struct sde_rect *crtc_roi;
+
 		POPULATE_RECT(&src, rstate->out_src_x, rstate->out_src_y,
 			rstate->out_src_w, rstate->out_src_h, q16_data);
 		POPULATE_RECT(&dst, state->crtc_x, state->crtc_y,
@@ -3773,6 +3817,7 @@
 		 * adjust layer mixer position of the sspp in the presence
 		 * of a partial update to the active lm origin
 		 */
+		sde_crtc_get_crtc_roi(crtc->state, &crtc_roi);
 		dst.x -= crtc_roi->x;
 		dst.y -= crtc_roi->y;
 
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.h b/drivers/gpu/drm/msm/sde/sde_plane.h
index 5c1fff1..d1eb399 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.h
+++ b/drivers/gpu/drm/msm/sde/sde_plane.h
@@ -199,6 +199,15 @@
 bool is_sde_plane_virtual(struct drm_plane *plane);
 
 /**
+ * sde_plane_confirm_hw_rsvps - reserve an sbuf resource, if needed
+ * @plane: Pointer to DRM plane object
+ * @state: Pointer to plane state
+ * Returns: Zero on success
+ */
+int sde_plane_confirm_hw_rsvps(struct drm_plane *plane,
+		const struct drm_plane_state *state);
+
+/**
  * sde_plane_get_ctl_flush - get control flush mask
  * @plane:   Pointer to DRM plane object
  * @ctl: Pointer to control hardware
diff --git a/drivers/gpu/drm/msm/sde/sde_trace.h b/drivers/gpu/drm/msm/sde/sde_trace.h
index 19cda3e..ef4cdeb 100644
--- a/drivers/gpu/drm/msm/sde/sde_trace.h
+++ b/drivers/gpu/drm/msm/sde/sde_trace.h
@@ -125,6 +125,22 @@
 	TP_printk("crtc:%d", __entry->crtc_id)
 );
 
+TRACE_EVENT(sde_encoder_underrun,
+	TP_PROTO(u32 enc_id, u32 underrun_cnt),
+	TP_ARGS(enc_id, underrun_cnt),
+	TP_STRUCT__entry(
+			__field(u32, enc_id)
+			__field(u32, underrun_cnt)
+	),
+	TP_fast_assign(
+			__entry->enc_id = enc_id;
+			__entry->underrun_cnt = underrun_cnt;
+
+	),
+	TP_printk("enc:%d underrun_cnt:%d", __entry->enc_id,
+		__entry->underrun_cnt)
+);
+
 TRACE_EVENT(tracing_mark_write,
 	TP_PROTO(int pid, const char *name, bool trace_begin),
 	TP_ARGS(pid, name, trace_begin),
diff --git a/drivers/gpu/drm/msm/sde_dbg.c b/drivers/gpu/drm/msm/sde_dbg.c
index 6b5be3b..c34b198 100644
--- a/drivers/gpu/drm/msm/sde_dbg.c
+++ b/drivers/gpu/drm/msm/sde_dbg.c
@@ -68,6 +68,11 @@
 #define REG_DUMP_ALIGN		16
 
 #define RSC_DEBUG_MUX_SEL_SDM845 9
+
+#define DBG_CTRL_STOP_FTRACE	BIT(0)
+#define DBG_CTRL_PANIC_UNDERRUN	BIT(1)
+#define DBG_CTRL_MAX			BIT(2)
+
 /**
  * struct sde_dbg_reg_offset - tracking for start and end of region
  * @start: start offset
@@ -198,6 +203,7 @@
 	struct sde_dbg_vbif_debug_bus dbgbus_vbif_rt;
 	bool dump_all;
 	bool dsi_dbg_bus;
+	u32 debugfs_ctrl;
 } sde_dbg_base;
 
 /* sde_dbg_base_evtlog - global pointer to main sde event log for macro use */
@@ -2697,6 +2703,46 @@
 	}
 }
 
+void sde_dbg_ctrl(const char *name, ...)
+{
+	int i = 0;
+	va_list args;
+	char *blk_name = NULL;
+
+
+	/* no debugfs controlled events are enabled, just return */
+	if (!sde_dbg_base.debugfs_ctrl)
+		return;
+
+	va_start(args, name);
+
+	while ((blk_name = va_arg(args, char*))) {
+		if (i++ >= SDE_EVTLOG_MAX_DATA) {
+			pr_err("could not parse all dbg arguments\n");
+			break;
+		}
+
+		if (IS_ERR_OR_NULL(blk_name))
+			break;
+
+		if (!strcmp(blk_name, "stop_ftrace") &&
+				sde_dbg_base.debugfs_ctrl &
+				DBG_CTRL_STOP_FTRACE) {
+			pr_debug("tracing off\n");
+			tracing_off();
+		}
+
+		if (!strcmp(blk_name, "panic_underrun") &&
+				sde_dbg_base.debugfs_ctrl &
+				DBG_CTRL_PANIC_UNDERRUN) {
+			pr_debug("panic underrun\n");
+			panic("underrun");
+		}
+	}
+
+}
+
+
 /*
  * sde_dbg_debugfs_open - debugfs open handler for evtlog dump
  * @inode: debugfs inode
@@ -2760,6 +2806,82 @@
 	.write = sde_evtlog_dump_write,
 };
 
+/**
+ * sde_dbg_ctrl_read - debugfs read handler for debug ctrl read
+ * @file: file handler
+ * @buff: user buffer content for debugfs
+ * @count: size of user buffer
+ * @ppos: position offset of user buffer
+ */
+static ssize_t sde_dbg_ctrl_read(struct file *file, char __user *buff,
+		size_t count, loff_t *ppos)
+{
+	ssize_t len = 0;
+	char buf[24] = {'\0'};
+
+	if (!buff || !ppos)
+		return -EINVAL;
+
+	if (*ppos)
+		return 0;	/* the end */
+
+	len = snprintf(buf, sizeof(buf), "0x%x\n", sde_dbg_base.debugfs_ctrl);
+	pr_debug("%s: ctrl:0x%x len:0x%zx\n",
+		__func__, sde_dbg_base.debugfs_ctrl, len);
+
+	if ((count < sizeof(buf)) || copy_to_user(buff, buf, len)) {
+		pr_err("error copying the buffer! count:0x%zx\n", count);
+		return -EFAULT;
+	}
+
+	*ppos += len;	/* increase offset */
+	return len;
+}
+
+/**
+ * sde_dbg_ctrl_write - debugfs read handler for debug ctrl write
+ * @file: file handler
+ * @user_buf: user buffer content from debugfs
+ * @count: size of user buffer
+ * @ppos: position offset of user buffer
+ */
+static ssize_t sde_dbg_ctrl_write(struct file *file,
+	const char __user *user_buf, size_t count, loff_t *ppos)
+{
+	u32 dbg_ctrl = 0;
+	char buf[24];
+
+	if (!file) {
+		pr_err("DbgDbg: %s: error no file --\n", __func__);
+		return -EINVAL;
+	}
+
+	if (count >= sizeof(buf))
+		return -EFAULT;
+
+
+	if (copy_from_user(buf, user_buf, count))
+		return -EFAULT;
+
+	buf[count] = 0; /* end of string */
+
+	if (kstrtouint(buf, 0, &dbg_ctrl)) {
+		pr_err("%s: error in the number of bytes\n", __func__);
+		return -EFAULT;
+	}
+
+	pr_debug("dbg_ctrl_read:0x%x\n", dbg_ctrl);
+	sde_dbg_base.debugfs_ctrl = dbg_ctrl;
+
+	return count;
+}
+
+static const struct file_operations sde_dbg_ctrl_fops = {
+	.open = sde_dbg_debugfs_open,
+	.read = sde_dbg_ctrl_read,
+	.write = sde_dbg_ctrl_write,
+};
+
 /*
  * sde_evtlog_filter_show - read callback for evtlog filter
  * @s: pointer to seq_file object
@@ -3154,6 +3276,8 @@
 	if (!debugfs_root)
 		return -EINVAL;
 
+	debugfs_create_file("dbg_ctrl", 0600, debugfs_root, NULL,
+			&sde_dbg_ctrl_fops);
 	debugfs_create_file("dump", 0600, debugfs_root, NULL,
 			&sde_evtlog_fops);
 	debugfs_create_u32("enable", 0600, debugfs_root,
diff --git a/drivers/gpu/drm/msm/sde_dbg.h b/drivers/gpu/drm/msm/sde_dbg.h
index 7b1b4c6..9efb893 100644
--- a/drivers/gpu/drm/msm/sde_dbg.h
+++ b/drivers/gpu/drm/msm/sde_dbg.h
@@ -151,6 +151,13 @@
 #define SDE_DBG_DUMP_WQ(...) sde_dbg_dump(true, __func__, ##__VA_ARGS__, \
 		SDE_DBG_DUMP_DATA_LIMITER)
 
+/**
+ * SDE_DBG_EVT_CTRL - trigger a different driver events
+ *  event: event that trigger different behavior in the driver
+ */
+#define SDE_DBG_CTRL(...) sde_dbg_ctrl(__func__, ##__VA_ARGS__, \
+		SDE_DBG_DUMP_DATA_LIMITER)
+
 #if defined(CONFIG_DEBUG_FS)
 
 /**
@@ -249,6 +256,15 @@
 void sde_dbg_dump(bool queue_work, const char *name, ...);
 
 /**
+ * sde_dbg_ctrl - trigger specific actions for the driver with debugging
+ *		purposes. Those actions need to be enabled by the debugfs entry
+ *		so the driver executes those actions in the corresponding calls.
+ * @va_args:	list of actions to trigger
+ * Returns:	none
+ */
+void sde_dbg_ctrl(const char *name, ...);
+
+/**
  * sde_dbg_reg_register_base - register a hw register address section for later
  *	dumping. call this before calling sde_dbg_reg_register_dump_range
  *	to be able to specify sub-ranges within the base hw range.
@@ -386,6 +402,10 @@
 {
 }
 
+static inline void sde_dbg_ctrl(const char *name, ...)
+{
+}
+
 static inline int sde_dbg_reg_register_base(const char *name,
 		void __iomem *base, size_t max_offset)
 {
diff --git a/drivers/gpu/msm/adreno_a6xx.c b/drivers/gpu/msm/adreno_a6xx.c
index 0c883fa..83dd3fb 100644
--- a/drivers/gpu/msm/adreno_a6xx.c
+++ b/drivers/gpu/msm/adreno_a6xx.c
@@ -1592,8 +1592,8 @@
 
 	kgsl_gmu_regwrite(device, A6XX_GMU_BOOT_SLUMBER_OPTION,
 			OOB_SLUMBER_OPTION);
-	kgsl_gmu_regwrite(device, A6XX_GMU_GX_VOTE_IDX, bus_level);
-	kgsl_gmu_regwrite(device, A6XX_GMU_MX_VOTE_IDX, perf_idx);
+	kgsl_gmu_regwrite(device, A6XX_GMU_GX_VOTE_IDX, perf_idx);
+	kgsl_gmu_regwrite(device, A6XX_GMU_MX_VOTE_IDX, bus_level);
 
 	ret = a6xx_oob_set(adreno_dev, OOB_BOOT_SLUMBER_SET_MASK,
 			OOB_BOOT_SLUMBER_CHECK_MASK,
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 45fbd09..86438a9 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -1100,8 +1100,10 @@
 	writel_relaxed(0, base + ARM_SMMU_CB_TLBSYNC);
 	if (readl_poll_timeout_atomic(base + ARM_SMMU_CB_TLBSTATUS, val,
 				      !(val & TLBSTATUS_SACTIVE),
-				      0, TLB_LOOP_TIMEOUT))
+				      0, TLB_LOOP_TIMEOUT)) {
+		trace_tlbsync_timeout(smmu->dev, 0);
 		dev_err(smmu->dev, "TLBSYNC timeout!\n");
+	}
 }
 
 static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
@@ -1132,11 +1134,15 @@
 static void arm_smmu_tlb_inv_context(void *cookie)
 {
 	struct arm_smmu_domain *smmu_domain = cookie;
+	struct device *dev = smmu_domain->dev;
 	struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
 	struct arm_smmu_device *smmu = smmu_domain->smmu;
 	bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
 	void __iomem *base;
 	bool use_tlbiall = smmu->options & ARM_SMMU_OPT_NO_ASID_RETENTION;
+	ktime_t cur = ktime_get();
+
+	trace_tlbi_start(dev, 0);
 
 	if (stage1 && !use_tlbiall) {
 		base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
@@ -1153,6 +1159,8 @@
 			       base + ARM_SMMU_GR0_TLBIVMID);
 		__arm_smmu_tlb_sync(smmu);
 	}
+
+	trace_tlbi_end(dev, ktime_us_delta(ktime_get(), cur));
 }
 
 static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
@@ -4453,18 +4461,18 @@
 	if (readl_poll_timeout_atomic(base + ARM_SMMU_CB_TLBSTATUS, val,
 				      !(val & TLBSTATUS_SACTIVE), 0, 100)) {
 		cur = ktime_get();
-		trace_errata_throttle_start(dev, 0);
+		trace_tlbi_throttle_start(dev, 0);
 
 		msm_bus_noc_throttle_wa(true);
 		if (readl_poll_timeout_atomic(base + ARM_SMMU_CB_TLBSTATUS, val,
 				      !(val & TLBSTATUS_SACTIVE), 0, 10000)) {
 			dev_err(smmu->dev, "ERRATA1 TLBSYNC timeout");
-			trace_errata_failed(dev, 0);
+			trace_tlbsync_timeout(dev, 0);
 		}
 
 		msm_bus_noc_throttle_wa(false);
 
-		trace_errata_throttle_end(
+		trace_tlbi_throttle_end(
 				dev, ktime_us_delta(ktime_get(), cur));
 	}
 }
@@ -4481,7 +4489,7 @@
 	bool errata;
 
 	cur = ktime_get();
-	trace_errata_tlbi_start(dev, 0);
+	trace_tlbi_start(dev, 0);
 
 	errata = qsmmuv500_errata1_required(smmu_domain, data);
 	remote_spin_lock_irqsave(&data->errata1_lock, flags);
@@ -4500,7 +4508,7 @@
 	}
 	remote_spin_unlock_irqrestore(&data->errata1_lock, flags);
 
-	trace_errata_tlbi_end(dev, ktime_us_delta(ktime_get(), cur));
+	trace_tlbi_end(dev, ktime_us_delta(ktime_get(), cur));
 }
 
 static struct iommu_gather_ops qsmmuv500_errata1_smmu_gather_ops = {
diff --git a/drivers/iommu/msm_dma_iommu_mapping.c b/drivers/iommu/msm_dma_iommu_mapping.c
index 07e5236..3f739a2 100644
--- a/drivers/iommu/msm_dma_iommu_mapping.c
+++ b/drivers/iommu/msm_dma_iommu_mapping.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -27,9 +27,11 @@
  * @dev - Device this is mapped to. Used as key
  * @sgl - The scatterlist for this mapping
  * @nents - Number of entries in sgl
- * @dir - The direction for the unmap.
+ * @dir - The direction for the map.
  * @meta - Backpointer to the meta this guy belongs to.
  * @ref - for reference counting this mapping
+ * @map_attrs - dma mapping attributes
+ * @buf_start_addr - address of start of buffer
  *
  * Represents a mapping of one dma_buf buffer to a particular device
  * and address range. There may exist other mappings of this buffer in
@@ -44,6 +46,8 @@
 	enum dma_data_direction dir;
 	struct msm_iommu_meta *meta;
 	struct kref ref;
+	unsigned long map_attrs;
+	dma_addr_t buf_start_addr;
 };
 
 struct msm_iommu_meta {
@@ -199,21 +203,43 @@
 		iommu_map->sgl.dma_address = sg->dma_address;
 		iommu_map->sgl.dma_length = sg->dma_length;
 		iommu_map->dev = dev;
+		iommu_map->dir = dir;
+		iommu_map->nents = nents;
+		iommu_map->map_attrs = attrs;
+		iommu_map->buf_start_addr = sg_phys(sg);
 		msm_iommu_add(iommu_meta, iommu_map);
 
 	} else {
-		sg->dma_address = iommu_map->sgl.dma_address;
-		sg->dma_length = iommu_map->sgl.dma_length;
+		if (nents == iommu_map->nents &&
+		    dir == iommu_map->dir &&
+		    attrs == iommu_map->map_attrs &&
+		    sg_phys(sg) == iommu_map->buf_start_addr) {
+			sg->dma_address = iommu_map->sgl.dma_address;
+			sg->dma_length = iommu_map->sgl.dma_length;
 
-		kref_get(&iommu_map->ref);
-		if (is_device_dma_coherent(dev))
-			/*
-			 * Ensure all outstanding changes for coherent
-			 * buffers are applied to the cache before any
-			 * DMA occurs.
-			 */
-			dmb(ish);
-		ret = nents;
+			kref_get(&iommu_map->ref);
+			if (is_device_dma_coherent(dev))
+				/*
+				 * Ensure all outstanding changes for coherent
+				 * buffers are applied to the cache before any
+				 * DMA occurs.
+				 */
+				dmb(ish);
+			ret = nents;
+		} else {
+			bool start_diff = (sg_phys(sg) !=
+					   iommu_map->buf_start_addr);
+
+			dev_err(dev, "lazy map request differs:\n"
+				"req dir:%d, original dir:%d\n"
+				"req nents:%d, original nents:%d\n"
+				"req map attrs:%lu, original map attrs:%lu\n"
+				"req buffer start address differs:%d\n",
+				dir, iommu_map->dir, nents,
+				iommu_map->nents, attrs, iommu_map->map_attrs,
+				start_diff);
+			ret = -EINVAL;
+		}
 	}
 	mutex_unlock(&iommu_meta->lock);
 	return ret;
@@ -321,13 +347,9 @@
 		goto out;
 	}
 
-	/*
-	 * Save direction for later use when we actually unmap.
-	 * Not used right now but in the future if we go to coherent mapping
-	 * API we might want to call the appropriate API when client asks
-	 * to unmap
-	 */
-	iommu_map->dir = dir;
+	if (dir != iommu_map->dir)
+		WARN(1, "%s: (%pK) dir:%d differs from original dir:%d\n",
+		     __func__, dma_buf, dir, iommu_map->dir);
 
 	kref_put(&iommu_map->ref, msm_iommu_map_release);
 	mutex_unlock(&meta->lock);
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c b/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c
index 0a1c2cf..6b872b9 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c
+++ b/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c
@@ -501,11 +501,10 @@
 	mutex_unlock(&ctx->sync_mutex);
 
 	/* stop hw first */
-	if (ctx->ctxt_to_hw_map) {
+	if (ctx->hw_mgr_intf->hw_stop) {
 		stop.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
-		if (ctx->hw_mgr_intf->hw_stop)
-			ctx->hw_mgr_intf->hw_stop(ctx->hw_mgr_intf->hw_mgr_priv,
-				&stop);
+		ctx->hw_mgr_intf->hw_stop(ctx->hw_mgr_intf->hw_mgr_priv,
+			&stop);
 	}
 
 	/*
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c
index fc84d9d..f1dfc7c 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c
@@ -596,19 +596,29 @@
 }
 
 static int cam_cpas_hw_update_axi_vote(struct cam_hw_info *cpas_hw,
-	uint32_t client_handle, struct cam_axi_vote *axi_vote)
+	uint32_t client_handle, struct cam_axi_vote *client_axi_vote)
 {
+	struct cam_axi_vote axi_vote;
 	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
 	uint32_t client_indx = CAM_CPAS_GET_CLIENT_IDX(client_handle);
 	int rc = 0;
 
-	if (!axi_vote || ((axi_vote->compressed_bw == 0) &&
-		(axi_vote->uncompressed_bw == 0))) {
-		CAM_ERR(CAM_CPAS, "Invalid vote, client_handle=%d",
+	if (!client_axi_vote) {
+		CAM_ERR(CAM_CPAS, "Invalid arg client_handle=%d",
 			client_handle);
 		return -EINVAL;
 	}
 
+	axi_vote = *client_axi_vote;
+
+	if ((axi_vote.compressed_bw == 0) &&
+		(axi_vote.uncompressed_bw == 0)) {
+		CAM_DBG(CAM_CPAS, "0 vote from client_handle=%d",
+			client_handle);
+		axi_vote.compressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
+		axi_vote.uncompressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
+	}
+
 	if (!CAM_CPAS_CLIENT_VALID(client_indx))
 		return -EINVAL;
 
@@ -622,12 +632,12 @@
 
 	CAM_DBG(CAM_CPAS,
 		"Client[%d] Requested compressed[%llu], uncompressed[%llu]",
-		client_indx, axi_vote->compressed_bw,
-		axi_vote->uncompressed_bw);
+		client_indx, axi_vote.compressed_bw,
+		axi_vote.uncompressed_bw);
 
 	rc = cam_cpas_util_apply_client_axi_vote(cpas_core,
 		cpas_hw->soc_info.soc_private,
-		cpas_core->cpas_client[client_indx], axi_vote);
+		cpas_core->cpas_client[client_indx], &axi_vote);
 
 unlock_client:
 	mutex_unlock(&cpas_core->client_mutex[client_indx]);
@@ -742,17 +752,27 @@
 }
 
 static int cam_cpas_hw_update_ahb_vote(struct cam_hw_info *cpas_hw,
-	uint32_t client_handle, struct cam_ahb_vote *ahb_vote)
+	uint32_t client_handle, struct cam_ahb_vote *client_ahb_vote)
 {
+	struct cam_ahb_vote ahb_vote;
 	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
 	uint32_t client_indx = CAM_CPAS_GET_CLIENT_IDX(client_handle);
 	int rc = 0;
 
-	if (!ahb_vote || (ahb_vote->vote.level == 0)) {
-		CAM_ERR(CAM_CPAS, "Invalid AHB vote, %pK", ahb_vote);
+	if (!client_ahb_vote) {
+		CAM_ERR(CAM_CPAS, "Invalid input arg");
 		return -EINVAL;
 	}
 
+	ahb_vote = *client_ahb_vote;
+
+	if (ahb_vote.vote.level == 0) {
+		CAM_DBG(CAM_CPAS, "0 ahb vote from client %d",
+			client_handle);
+		ahb_vote.type = CAM_VOTE_ABSOLUTE;
+		ahb_vote.vote.level = CAM_SVS_VOTE;
+	}
+
 	if (!CAM_CPAS_CLIENT_VALID(client_indx))
 		return -EINVAL;
 
@@ -766,12 +786,12 @@
 
 	CAM_DBG(CAM_CPAS,
 		"client[%d] : type[%d], level[%d], freq[%ld], applied[%d]",
-		client_indx, ahb_vote->type, ahb_vote->vote.level,
-		ahb_vote->vote.freq,
+		client_indx, ahb_vote.type, ahb_vote.vote.level,
+		ahb_vote.vote.freq,
 		cpas_core->cpas_client[client_indx]->ahb_level);
 
 	rc = cam_cpas_util_apply_client_ahb_vote(cpas_hw,
-		cpas_core->cpas_client[client_indx], ahb_vote, NULL);
+		cpas_core->cpas_client[client_indx], &ahb_vote, NULL);
 
 unlock_client:
 	mutex_unlock(&cpas_core->client_mutex[client_indx]);
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
index 340a1e2..6f997a2 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
@@ -300,7 +300,7 @@
 	hw_mgr_clk_info->base_clk = 0;
 	for (i = 0; i < CAM_ICP_CTX_MAX; i++) {
 		ctx_data = &hw_mgr->ctx_data[i];
-		if (ctx_data->in_use &&
+		if (ctx_data->state == CAM_ICP_CTX_STATE_ACQUIRED &&
 			ctx_data->icp_dev_acquire_info->dev_type == dev_type)
 			hw_mgr_clk_info->base_clk +=
 				ctx_data->clk_info.base_clk;
@@ -527,7 +527,7 @@
 	hw_mgr_clk_info->compressed_bw = 0;
 	for (i = 0; i < CAM_ICP_CTX_MAX; i++) {
 		ctx = &hw_mgr->ctx_data[i];
-		if (ctx->in_use &&
+		if (ctx->state == CAM_ICP_CTX_STATE_ACQUIRED &&
 			ctx->icp_dev_acquire_info->dev_type ==
 			ctx_data->icp_dev_acquire_info->dev_type) {
 			mutex_lock(&hw_mgr->hw_mgr_mutex);
@@ -905,6 +905,34 @@
 	return rc;
 }
 
+static int cam_icp_mgr_cleanup_ctx(struct cam_icp_hw_ctx_data *ctx_data)
+{
+	int i;
+	struct hfi_frame_process_info *hfi_frame_process;
+	struct cam_hw_done_event_data buf_data;
+
+	hfi_frame_process = &ctx_data->hfi_frame_process;
+	for (i = 0; i < CAM_FRAME_CMD_MAX; i++) {
+		if (!hfi_frame_process->request_id[i])
+			continue;
+		buf_data.request_id = hfi_frame_process->request_id[i];
+		ctx_data->ctxt_event_cb(ctx_data->context_priv,
+			false, &buf_data);
+		hfi_frame_process->request_id[i] = 0;
+		if (ctx_data->hfi_frame_process.in_resource[i] > 0) {
+			CAM_DBG(CAM_ICP, "Delete merged sync in object: %d",
+				ctx_data->hfi_frame_process.in_resource[i]);
+			cam_sync_destroy(
+				ctx_data->hfi_frame_process.in_resource[i]);
+			ctx_data->hfi_frame_process.in_resource[i] = 0;
+		}
+		hfi_frame_process->fw_process_flag[i] = false;
+		clear_bit(i, ctx_data->hfi_frame_process.bitmap);
+	}
+
+	return 0;
+}
+
 static int cam_icp_mgr_handle_frame_process(uint32_t *msg_ptr, int flag)
 {
 	int i;
@@ -926,6 +954,15 @@
 		(void *)ctx_data->context_priv, request_id);
 
 	mutex_lock(&ctx_data->ctx_mutex);
+	if (ctx_data->state != CAM_ICP_CTX_STATE_ACQUIRED) {
+		mutex_unlock(&ctx_data->ctx_mutex);
+		CAM_WARN(CAM_ICP,
+			"ctx with id: %u not in the right state : %x",
+			ctx_data->ctx_id,
+			ctx_data->state);
+		return 0;
+	}
+
 	hfi_frame_process = &ctx_data->hfi_frame_process;
 	for (i = 0; i < CAM_FRAME_CMD_MAX; i++)
 		if (hfi_frame_process->request_id[i] == request_id)
@@ -1056,9 +1093,12 @@
 		return -EINVAL;
 	}
 
-	ctx_data->fw_handle = create_handle_ack->fw_handle;
-	CAM_DBG(CAM_ICP, "fw_handle = %x", ctx_data->fw_handle);
-	complete(&ctx_data->wait_complete);
+	if (ctx_data->state == CAM_ICP_CTX_STATE_IN_USE) {
+		ctx_data->fw_handle = create_handle_ack->fw_handle;
+		CAM_DBG(CAM_ICP, "fw_handle = %x", ctx_data->fw_handle);
+		complete(&ctx_data->wait_complete);
+	} else
+		CAM_WARN(CAM_ICP, "Timeout failed to create fw handle");
 
 	return 0;
 }
@@ -1080,7 +1120,8 @@
 		return -EINVAL;
 	}
 
-	complete(&ctx_data->wait_complete);
+	if (ctx_data->state == CAM_ICP_CTX_STATE_IN_USE)
+		complete(&ctx_data->wait_complete);
 
 	return 0;
 }
@@ -1134,7 +1175,10 @@
 		ioconfig_ack = (struct hfi_msg_ipebps_async_ack *)msg_ptr;
 		ctx_data =
 			(struct cam_icp_hw_ctx_data *)ioconfig_ack->user_data1;
-		complete(&ctx_data->wait_complete);
+		if ((ctx_data->state == CAM_ICP_CTX_STATE_RELEASE) ||
+			(ctx_data->state == CAM_ICP_CTX_STATE_IN_USE))
+			complete(&ctx_data->wait_complete);
+
 		break;
 	default:
 		CAM_ERR(CAM_ICP, "Invalid opcode : %u",
@@ -1474,8 +1518,8 @@
 
 	for (i = 0; i < CAM_ICP_CTX_MAX; i++) {
 		mutex_lock(&hw_mgr->ctx_data[i].ctx_mutex);
-		if (hw_mgr->ctx_data[i].in_use == false) {
-			hw_mgr->ctx_data[i].in_use = true;
+		if (hw_mgr->ctx_data[i].state == CAM_ICP_CTX_STATE_FREE) {
+			hw_mgr->ctx_data[i].state = CAM_ICP_CTX_STATE_IN_USE;
 			mutex_unlock(&hw_mgr->ctx_data[i].ctx_mutex);
 			break;
 		}
@@ -1487,7 +1531,7 @@
 
 static void cam_icp_mgr_put_ctx(struct cam_icp_hw_ctx_data *ctx_data)
 {
-	ctx_data->in_use = false;
+	ctx_data->state = CAM_ICP_CTX_STATE_FREE;
 }
 
 static int cam_icp_mgr_abort_handle(
@@ -1631,14 +1675,21 @@
 
 	mutex_lock(&hw_mgr->hw_mgr_mutex);
 	mutex_lock(&hw_mgr->ctx_data[ctx_id].ctx_mutex);
-	if (!hw_mgr->ctx_data[ctx_id].in_use) {
+	if (hw_mgr->ctx_data[ctx_id].state !=
+		CAM_ICP_CTX_STATE_ACQUIRED) {
 		mutex_unlock(&hw_mgr->ctx_data[ctx_id].ctx_mutex);
 		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+		CAM_WARN(CAM_ICP,
+			"ctx with id: %d not in right state to release: %d",
+			ctx_id, hw_mgr->ctx_data[ctx_id].state);
 		return 0;
 	}
+	cam_icp_mgr_ipe_bps_power_collapse(hw_mgr,
+		&hw_mgr->ctx_data[ctx_id], 0);
+	hw_mgr->ctx_data[ctx_id].state = CAM_ICP_CTX_STATE_RELEASE;
 	cam_icp_mgr_destroy_handle(&hw_mgr->ctx_data[ctx_id]);
+	cam_icp_mgr_cleanup_ctx(&hw_mgr->ctx_data[ctx_id]);
 
-	hw_mgr->ctx_data[ctx_id].in_use = false;
 	hw_mgr->ctx_data[ctx_id].fw_handle = 0;
 	hw_mgr->ctx_data[ctx_id].scratch_mem_size = 0;
 	for (i = 0; i < CAM_FRAME_CMD_MAX; i++)
@@ -1651,6 +1702,7 @@
 	hw_mgr->ctxt_cnt--;
 	kfree(hw_mgr->ctx_data[ctx_id].icp_dev_acquire_info);
 	hw_mgr->ctx_data[ctx_id].icp_dev_acquire_info = NULL;
+	hw_mgr->ctx_data[ctx_id].state = CAM_ICP_CTX_STATE_FREE;
 	mutex_unlock(&hw_mgr->ctx_data[ctx_id].ctx_mutex);
 	mutex_unlock(&hw_mgr->hw_mgr_mutex);
 
@@ -2005,7 +2057,7 @@
 	struct cam_hw_done_event_data buf_data;
 
 	buf_data.request_id = *(uint64_t *)config_args->priv;
-	ctx_data->ctxt_event_cb(ctx_data->context_priv, true, &buf_data);
+	ctx_data->ctxt_event_cb(ctx_data->context_priv, false, &buf_data);
 
 	ctx_data->hfi_frame_process.request_id[idx] = 0;
 	ctx_data->hfi_frame_process.fw_process_flag[idx] = false;
@@ -2068,9 +2120,10 @@
 
 	ctx_data = config_args->ctxt_to_hw_map;
 	mutex_lock(&ctx_data->ctx_mutex);
-	if (!ctx_data->in_use) {
+	if (ctx_data->state != CAM_ICP_CTX_STATE_ACQUIRED) {
 		mutex_unlock(&ctx_data->ctx_mutex);
-		CAM_ERR(CAM_ICP, "ctx is not in use");
+		CAM_ERR(CAM_ICP, "ctx id :%u is not in use",
+			ctx_data->ctx_id);
 		return -EINVAL;
 	}
 
@@ -2343,9 +2396,10 @@
 
 	ctx_data = prepare_args->ctxt_to_hw_map;
 	mutex_lock(&ctx_data->ctx_mutex);
-	if (!ctx_data->in_use) {
+	if (ctx_data->state != CAM_ICP_CTX_STATE_ACQUIRED) {
 		mutex_unlock(&ctx_data->ctx_mutex);
-		CAM_ERR(CAM_ICP, "ctx is not in use");
+		CAM_ERR(CAM_ICP, "ctx id: %u is not in use",
+			ctx_data->ctx_id);
 		return -EINVAL;
 	}
 
@@ -2461,7 +2515,7 @@
 	}
 
 	mutex_lock(&hw_mgr->ctx_data[ctx_id].ctx_mutex);
-	if (!hw_mgr->ctx_data[ctx_id].in_use) {
+	if (hw_mgr->ctx_data[ctx_id].state != CAM_ICP_CTX_STATE_ACQUIRED) {
 		CAM_DBG(CAM_ICP, "ctx is not in use: %d", ctx_id);
 		mutex_unlock(&hw_mgr->ctx_data[ctx_id].ctx_mutex);
 		return -EINVAL;
@@ -2804,6 +2858,7 @@
 		goto copy_to_user_failed;
 
 	cam_icp_ctx_clk_info_init(ctx_data);
+	ctx_data->state = CAM_ICP_CTX_STATE_ACQUIRED;
 	mutex_unlock(&ctx_data->ctx_mutex);
 	CAM_DBG(CAM_ICP, "scratch size = %x fw_handle = %x",
 			(unsigned int)icp_dev_acquire_info->scratch_mem_size,
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
index 321f10c..ab19f45 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
@@ -58,6 +58,15 @@
 #define CPAS_IPE1_BIT           0x2000
 #define CPAS_BPS_BIT            0x400
 
+#define ICP_PWR_CLP_BPS         0x00000001
+#define ICP_PWR_CLP_IPE0        0x00010000
+#define ICP_PWR_CLP_IPE1        0x00020000
+
+#define CAM_ICP_CTX_STATE_FREE      0x0
+#define CAM_ICP_CTX_STATE_IN_USE    0x1
+#define CAM_ICP_CTX_STATE_ACQUIRED  0x2
+#define CAM_ICP_CTX_STATE_RELEASE   0x3
+
 /**
  * struct icp_hfi_mem_info
  * @qtbl: Memory info of queue table
@@ -154,7 +163,7 @@
  * @acquire_dev_cmd: Acquire command
  * @icp_dev_acquire_info: Acquire device info
  * @ctxt_event_cb: Context callback function
- * @in_use: Flag for context usage
+ * @state: context state
  * @role: Role of a context in case of chaining
  * @chain_ctx: Peer context
  * @hfi_frame_process: Frame process command
@@ -171,7 +180,7 @@
 	struct cam_acquire_dev_cmd acquire_dev_cmd;
 	struct cam_icp_acquire_dev_info *icp_dev_acquire_info;
 	cam_hw_event_cb_func ctxt_event_cb;
-	bool in_use;
+	uint32_t state;
 	uint32_t role;
 	struct cam_icp_hw_ctx_data *chain_ctx;
 	struct hfi_frame_process_info hfi_frame_process;
diff --git a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
index cfe5071..01c0a02 100644
--- a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
+++ b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
@@ -66,6 +66,73 @@
 	return 0;
 }
 
+static int __cam_isp_ctx_enqueue_init_request(
+	struct cam_context *ctx, struct cam_ctx_request *req)
+{
+	int rc = 0;
+	struct cam_ctx_request           *req_old;
+	struct cam_isp_ctx_req           *req_isp_old;
+	struct cam_isp_ctx_req           *req_isp_new;
+
+	spin_lock_bh(&ctx->lock);
+	if (list_empty(&ctx->pending_req_list)) {
+		list_add_tail(&req->list, &ctx->pending_req_list);
+		CAM_DBG(CAM_ISP, "INIT packet added req id= %d",
+			req->request_id);
+		goto end;
+	}
+
+	req_old = list_first_entry(&ctx->pending_req_list,
+		struct cam_ctx_request, list);
+	req_isp_old = (struct cam_isp_ctx_req *) req_old->req_priv;
+	req_isp_new = (struct cam_isp_ctx_req *) req->req_priv;
+	if (req_isp_old->packet_opcode_type == CAM_ISP_PACKET_INIT_DEV) {
+		if ((req_isp_old->num_cfg + req_isp_new->num_cfg) >=
+			CAM_ISP_CTX_CFG_MAX) {
+			CAM_WARN(CAM_ISP, "Can not merge INIT pkt");
+			rc = -ENOMEM;
+		}
+
+		if (req_isp_old->num_fence_map_out != 0 ||
+			req_isp_old->num_fence_map_in != 0) {
+			CAM_WARN(CAM_ISP, "Invalid INIT pkt sequence");
+			rc = -EINVAL;
+		}
+
+		if (!rc) {
+			memcpy(req_isp_old->fence_map_out,
+				req_isp_new->fence_map_out,
+				sizeof(req_isp_new->fence_map_out[0])*
+				req_isp_new->num_fence_map_out);
+			req_isp_old->num_fence_map_out =
+				req_isp_new->num_fence_map_out;
+
+			memcpy(req_isp_old->fence_map_in,
+				req_isp_new->fence_map_in,
+				sizeof(req_isp_new->fence_map_in[0])*
+				req_isp_new->num_fence_map_in);
+			req_isp_old->num_fence_map_in =
+				req_isp_new->num_fence_map_in;
+
+			memcpy(&req_isp_old->cfg[req_isp_old->num_cfg],
+				req_isp_new->cfg,
+				sizeof(req_isp_new->cfg[0])*
+				req_isp_new->num_cfg);
+			req_isp_old->num_cfg += req_isp_new->num_cfg;
+
+			list_add_tail(&req->list, &ctx->free_req_list);
+		}
+	} else {
+		CAM_WARN(CAM_ISP,
+			"Received Update pkt before INIT pkt. req_id= %lld",
+			req->request_id);
+		rc = -EINVAL;
+	}
+end:
+	spin_unlock_bh(&ctx->lock);
+	return rc;
+}
+
 static const char *__cam_isp_resource_handle_id_to_type
 	(uint32_t resource_handle)
 {
@@ -1028,9 +1095,9 @@
 	struct cam_ctx_request           *req_temp;
 	struct cam_isp_ctx_req           *req_isp;
 
-	spin_lock(&ctx->lock);
+	spin_lock_bh(&ctx->lock);
 	if (list_empty(req_list)) {
-		spin_unlock(&ctx->lock);
+		spin_unlock_bh(&ctx->lock);
 		CAM_DBG(CAM_ISP, "request list is empty");
 		return 0;
 	}
@@ -1064,7 +1131,7 @@
 			break;
 		}
 	}
-	spin_unlock(&ctx->lock);
+	spin_unlock_bh(&ctx->lock);
 
 	if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ &&
 		!cancel_req_id_found)
@@ -1098,10 +1165,10 @@
 	rc = __cam_isp_ctx_flush_req(ctx, &ctx->pending_req_list, flush_req);
 
 	/* if nothing is in pending req list, change state to acquire*/
-	spin_lock(&ctx->lock);
+	spin_lock_bh(&ctx->lock);
 	if (list_empty(&ctx->pending_req_list))
 		ctx->state = CAM_CTX_ACQUIRED;
-	spin_unlock(&ctx->lock);
+	spin_unlock_bh(&ctx->lock);
 
 	trace_cam_context_state("ISP", ctx);
 
@@ -1627,6 +1694,7 @@
 	struct cam_req_mgr_add_request    add_req;
 	struct cam_isp_context           *ctx_isp =
 		(struct cam_isp_context *) ctx->ctx_priv;
+	struct cam_isp_prepare_hw_update_data    hw_update_data;
 
 	CAM_DBG(CAM_ISP, "get free request object......");
 
@@ -1677,6 +1745,7 @@
 	cfg.max_in_map_entries = CAM_ISP_CTX_RES_MAX;
 	cfg.out_map_entries = req_isp->fence_map_out;
 	cfg.in_map_entries = req_isp->fence_map_in;
+	cfg.priv  = &hw_update_data;
 
 	CAM_DBG(CAM_ISP, "try to prepare config packet......");
 
@@ -1691,31 +1760,48 @@
 	req_isp->num_fence_map_out = cfg.num_out_map_entries;
 	req_isp->num_fence_map_in = cfg.num_in_map_entries;
 	req_isp->num_acked = 0;
+	req_isp->packet_opcode_type = hw_update_data.packet_opcode_type;
 
 	CAM_DBG(CAM_ISP, "num_entry: %d, num fence out: %d, num fence in: %d",
-		 req_isp->num_cfg, req_isp->num_fence_map_out,
+		req_isp->num_cfg, req_isp->num_fence_map_out,
 		req_isp->num_fence_map_in);
 
 	req->request_id = packet->header.request_id;
 	req->status = 1;
 
-	if (ctx->state >= CAM_CTX_READY && ctx->ctx_crm_intf->add_req) {
-		add_req.link_hdl = ctx->link_hdl;
-		add_req.dev_hdl  = ctx->dev_hdl;
-		add_req.req_id   = req->request_id;
-		add_req.skip_before_applying = 0;
-		rc = ctx->ctx_crm_intf->add_req(&add_req);
-		if (rc) {
-			CAM_ERR(CAM_ISP, "Error: Adding request id=%llu",
-				req->request_id);
-				goto free_req;
+	CAM_DBG(CAM_ISP, "Packet request id 0x%llx packet opcode:%d",
+		packet->header.request_id, req_isp->packet_opcode_type);
+
+	if (req_isp->packet_opcode_type == CAM_ISP_PACKET_INIT_DEV) {
+		if (ctx->state < CAM_CTX_ACTIVATED) {
+			rc = __cam_isp_ctx_enqueue_init_request(ctx, req);
+			if (rc)
+				CAM_ERR(CAM_ISP, "Enqueue INIT pkt failed");
+		} else {
+			rc = -EINVAL;
+			CAM_ERR(CAM_ISP, "Recevied INIT pkt in wrong state");
+		}
+	} else {
+		if (ctx->state >= CAM_CTX_READY && ctx->ctx_crm_intf->add_req) {
+			add_req.link_hdl = ctx->link_hdl;
+			add_req.dev_hdl  = ctx->dev_hdl;
+			add_req.req_id   = req->request_id;
+			add_req.skip_before_applying = 0;
+			rc = ctx->ctx_crm_intf->add_req(&add_req);
+			if (rc) {
+				CAM_ERR(CAM_ISP, "Add req failed: req id=%llu",
+					req->request_id);
+			} else {
+				__cam_isp_ctx_enqueue_request_in_order(
+					ctx, req);
+			}
+		} else {
+			rc = -EINVAL;
+			CAM_ERR(CAM_ISP, "Recevied Update in wrong state");
 		}
 	}
-
-	CAM_DBG(CAM_ISP, "Packet request id 0x%llx",
-		packet->header.request_id);
-
-	__cam_isp_ctx_enqueue_request_in_order(ctx, req);
+	if (rc)
+		goto free_req;
 
 	CAM_DBG(CAM_ISP, "Preprocessing Config %lld successful",
 		req->request_id);
diff --git a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.h b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.h
index 347290c..88ebc03 100644
--- a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.h
+++ b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.h
@@ -81,6 +81,8 @@
  *                         the request has been completed.
  * @bubble_report:         Flag to track if bubble report is active on
  *                         current request
+ * @packet_opcode_type:    Request packet opcode type,
+ *                         ie INIT packet or update packet
  *
  */
 struct cam_isp_ctx_req {
@@ -94,6 +96,7 @@
 	uint32_t                         num_fence_map_in;
 	uint32_t                         num_acked;
 	int32_t                          bubble_report;
+	uint32_t                         packet_opcode_type;
 };
 
 /**
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
index 0362758..d407771 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
@@ -1139,7 +1139,8 @@
 static int cam_ife_mgr_acquire_cid_res(
 	struct cam_ife_hw_mgr_ctx          *ife_ctx,
 	struct cam_isp_in_port_info        *in_port,
-	uint32_t                           *cid_res_id)
+	uint32_t                           *cid_res_id,
+	int                                 pixel_count)
 {
 	int rc = -1;
 	int i, j;
@@ -1159,6 +1160,7 @@
 
 	csid_acquire.res_type = CAM_ISP_RESOURCE_CID;
 	csid_acquire.in_port = in_port;
+	csid_acquire.pixel_count = pixel_count;
 
 	for (i = 0; i < CAM_IFE_CSID_HW_NUM_MAX; i++) {
 		if (!ife_hw_mgr->csid_devices[i])
@@ -1239,13 +1241,6 @@
 		goto err;
 	}
 
-	/* get cid resource */
-	rc = cam_ife_mgr_acquire_cid_res(ife_ctx, in_port, &cid_res_id);
-	if (rc) {
-		CAM_ERR(CAM_ISP, "Acquire IFE CID resource Failed");
-		goto err;
-	}
-
 	cam_ife_hw_mgr_preprocess_out_port(ife_ctx, in_port,
 		&pixel_count, &rdi_count);
 
@@ -1254,6 +1249,14 @@
 		return -EINVAL;
 	}
 
+	/* get cid resource */
+	rc = cam_ife_mgr_acquire_cid_res(ife_ctx, in_port, &cid_res_id,
+		pixel_count);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "Acquire IFE CID resource Failed");
+		goto err;
+	}
+
 	if (pixel_count) {
 		/* get ife csid IPP resrouce */
 		rc = cam_ife_hw_mgr_acquire_res_ife_csid_ipp(ife_ctx, in_port,
@@ -2349,11 +2352,12 @@
 	int rc = 0;
 	struct cam_hw_prepare_update_args *prepare =
 		(struct cam_hw_prepare_update_args *) prepare_hw_update_args;
-	struct cam_ife_hw_mgr_ctx        *ctx;
-	struct cam_ife_hw_mgr            *hw_mgr;
-	struct cam_kmd_buf_info           kmd_buf;
-	uint32_t                          i;
-	bool                              fill_fence = true;
+	struct cam_ife_hw_mgr_ctx               *ctx;
+	struct cam_ife_hw_mgr                   *hw_mgr;
+	struct cam_kmd_buf_info                  kmd_buf;
+	uint32_t                                 i;
+	bool                                     fill_fence = true;
+	struct cam_isp_prepare_hw_update_data   *prepare_hw_data;
 
 	if (!hw_mgr_priv || !prepare_hw_update_args) {
 		CAM_ERR(CAM_ISP, "Invalid args");
@@ -2439,9 +2443,14 @@
 	 * bits to get the type of operation since UMD definition
 	 * of op_code has some difference from KMD.
 	 */
+	prepare_hw_data = (struct cam_isp_prepare_hw_update_data  *)
+		prepare->priv;
 	if (((prepare->packet->header.op_code + 1) & 0xF) ==
-		CAM_ISP_PACKET_INIT_DEV)
+		CAM_ISP_PACKET_INIT_DEV) {
+		prepare_hw_data->packet_opcode_type = CAM_ISP_PACKET_INIT_DEV;
 		goto end;
+	} else
+		prepare_hw_data->packet_opcode_type = CAM_ISP_PACKET_UPDATE_DEV;
 
 	/* add reg update commands */
 	for (i = 0; i < ctx->num_base; i++) {
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include/cam_isp_hw_mgr_intf.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include/cam_isp_hw_mgr_intf.h
index 0480cd3..cf044eb 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include/cam_isp_hw_mgr_intf.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include/cam_isp_hw_mgr_intf.h
@@ -46,6 +46,18 @@
 	CAM_ISP_HW_ERROR_MAX,
 };
 
+/**
+ * struct cam_isp_prepare_hw_update_data - hw prepare data
+ *
+ * @packet_opcode_type:     Packet header opcode in the packet header
+ *                   this opcode defines, packet is init packet or
+ *                   update packet
+ *
+ */
+struct cam_isp_prepare_hw_update_data {
+	uint32_t      packet_opcode_type;
+};
+
 
 /**
  * struct cam_isp_hw_sof_event_data - Event payload for CAM_HW_EVENT_SOF
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
index 9a368cf..daf515a 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
@@ -287,7 +287,7 @@
 
 static int cam_ife_csid_cid_get(struct cam_ife_csid_hw *csid_hw,
 	struct cam_isp_resource_node **res, int32_t vc, uint32_t dt,
-	uint32_t res_type)
+	uint32_t res_type, int pixel_count)
 {
 	int  rc = 0;
 	struct cam_ife_csid_cid_data    *cid_data;
@@ -305,7 +305,8 @@
 					break;
 				}
 			} else {
-				if (cid_data->vc == vc && cid_data->dt == dt) {
+				if (cid_data->vc == vc && cid_data->dt == dt &&
+					cid_data->pixel_count == pixel_count) {
 					cid_data->cnt++;
 					*res = &csid_hw->cid_res[i];
 					break;
@@ -329,6 +330,7 @@
 				cid_data->vc  = vc;
 				cid_data->dt  = dt;
 				cid_data->cnt = 1;
+				cid_data->pixel_count = pixel_count;
 				csid_hw->cid_res[j].res_state =
 					CAM_ISP_RESOURCE_STATE_RESERVED;
 				*res = &csid_hw->cid_res[j];
@@ -568,6 +570,7 @@
 	struct cam_csid_hw_reserve_resource_args  *cid_reserv)
 {
 	int rc = 0;
+	uint32_t i;
 	struct cam_ife_csid_cid_data       *cid_data;
 
 	CAM_DBG(CAM_ISP,
@@ -725,6 +728,7 @@
 		cid_data->vc = cid_reserv->in_port->vc;
 		cid_data->dt = cid_reserv->in_port->dt;
 		cid_data->cnt = 1;
+		cid_data->pixel_count = cid_reserv->pixel_count;
 		cid_reserv->node_res = &csid_hw->cid_res[0];
 		csid_hw->csi2_reserve_cnt++;
 
@@ -733,9 +737,27 @@
 			csid_hw->hw_intf->hw_idx,
 			cid_reserv->node_res->res_id);
 	} else {
-		rc = cam_ife_csid_cid_get(csid_hw, &cid_reserv->node_res,
-			cid_reserv->in_port->vc, cid_reserv->in_port->dt,
-			cid_reserv->in_port->res_type);
+		if (cid_reserv->pixel_count > 0) {
+			for (i = 0; i < CAM_IFE_CSID_CID_RES_MAX; i++) {
+				cid_data = (struct cam_ife_csid_cid_data *)
+					csid_hw->cid_res[i].res_priv;
+				if ((csid_hw->cid_res[i].res_state >=
+					CAM_ISP_RESOURCE_STATE_RESERVED) &&
+					cid_data->pixel_count > 0) {
+					CAM_DBG(CAM_ISP,
+						"CSID:%d IPP resource is full");
+					rc = -EINVAL;
+					goto end;
+				}
+			}
+		}
+
+		rc = cam_ife_csid_cid_get(csid_hw,
+			&cid_reserv->node_res,
+			cid_reserv->in_port->vc,
+			cid_reserv->in_port->dt,
+			cid_reserv->in_port->res_type,
+			cid_reserv->pixel_count);
 		/* if success then increment the reserve count */
 		if (!rc) {
 			if (csid_hw->csi2_reserve_cnt == UINT_MAX) {
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.h
index 07217f5..b400d14 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.h
@@ -356,10 +356,11 @@
 /**
  * struct cam_ife_csid_cid_data- cid configuration private data
  *
- * @vc:      virtual channel
- * @dt:      Data type
- * @cnt:     cid resource reference count.
- * @tpg_set: tpg used for this cid resource
+ * @vc:          Virtual channel
+ * @dt:          Data type
+ * @cnt:         Cid resource reference count.
+ * @tpg_set:     Tpg used for this cid resource
+ * @pixel_count: Pixel resource connected
  *
  */
 struct cam_ife_csid_cid_data {
@@ -367,6 +368,7 @@
 	uint32_t                     dt;
 	uint32_t                     cnt;
 	uint32_t                     tpg_set;
+	int                          pixel_count;
 };
 
 
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_ife_csid_hw_intf.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_ife_csid_hw_intf.h
index 37e0ce3..df97bd6 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_ife_csid_hw_intf.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_ife_csid_hw_intf.h
@@ -61,20 +61,21 @@
 
 /**
  * struct cam_csid_hw_reserve_resource- hw reserve
- * @res_type :  reource type CID or PATH
- *              if type is CID, then res_id is not required,
- *              if type is path then res id need to be filled
- * @res_id  :  res id to be reserved
- * @in_port : input port resource info
- * @out_port: output port resource info, used for RDI path only
- * @sync_mode : Sync mode
- *              Sync mode could be master, slave or none
- * @master_idx: master device index to be configured in the slave path
- *              for master path, this value is not required.
- *              only slave need to configure the master index value
- * @cid:        cid (DT_ID) value for path, this is applicable for CSID path
- *              reserve
- * @node_res :  reserved resource structure pointer
+ * @res_type :    Reource type CID or PATH
+ *                if type is CID, then res_id is not required,
+ *                if type is path then res id need to be filled
+ * @res_id  :     Resource id to be reserved
+ * @in_port :     Input port resource info
+ * @out_port:     Output port resource info, used for RDI path only
+ * @sync_mode:    Sync mode
+ *                Sync mode could be master, slave or none
+ * @master_idx:   Master device index to be configured in the slave path
+ *                for master path, this value is not required.
+ *                only slave need to configure the master index value
+ * @cid:          cid (DT_ID) value for path, this is applicable for CSID path
+ *                reserve
+ * @node_res :    Reserved resource structure pointer
+ * @pixel_count:  Number of pixel resources
  *
  */
 struct cam_csid_hw_reserve_resource_args {
@@ -86,6 +87,7 @@
 	uint32_t                                  master_idx;
 	uint32_t                                  cid;
 	struct cam_isp_resource_node             *node_res;
+	int                                       pixel_count;
 };
 
 /**
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
index e94bb62..a2fbbd7 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
@@ -2389,49 +2389,6 @@
 				wm_data->index, reg_val_pair[j-1]);
 		}
 
-		if (wm_data->framedrop_pattern != io_cfg->framedrop_pattern ||
-			!wm_data->hfr_cfg_done) {
-			CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
-				wm_data->hw_regs->framedrop_pattern,
-				io_cfg->framedrop_pattern);
-			wm_data->framedrop_pattern = io_cfg->framedrop_pattern;
-			CAM_DBG(CAM_ISP, "WM %d framedrop pattern 0x%x",
-				wm_data->index, reg_val_pair[j-1]);
-		}
-
-
-		if (wm_data->framedrop_period != io_cfg->framedrop_period ||
-			!wm_data->hfr_cfg_done) {
-			CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
-				wm_data->hw_regs->framedrop_period,
-				io_cfg->framedrop_period);
-			wm_data->framedrop_period = io_cfg->framedrop_period;
-			CAM_DBG(CAM_ISP, "WM %d framedrop period 0x%x",
-				wm_data->index, reg_val_pair[j-1]);
-		}
-
-		if (wm_data->irq_subsample_period != io_cfg->subsample_period
-			|| !wm_data->hfr_cfg_done) {
-			CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
-				wm_data->hw_regs->irq_subsample_period,
-				io_cfg->subsample_period);
-			wm_data->irq_subsample_period =
-				io_cfg->subsample_period;
-			CAM_DBG(CAM_ISP, "WM %d irq subsample period 0x%x",
-				wm_data->index, reg_val_pair[j-1]);
-		}
-
-		if (wm_data->irq_subsample_pattern != io_cfg->subsample_pattern
-			|| !wm_data->hfr_cfg_done) {
-			CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
-				wm_data->hw_regs->irq_subsample_pattern,
-				io_cfg->subsample_pattern);
-			wm_data->irq_subsample_pattern =
-				io_cfg->subsample_pattern;
-			CAM_DBG(CAM_ISP, "WM %d irq subsample pattern 0x%x",
-				wm_data->index, reg_val_pair[j-1]);
-		}
-
 		if (wm_data->en_ubwc) {
 			if (!wm_data->hw_regs->ubwc_regs) {
 				CAM_ERR(CAM_ISP,
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.c
index 1b8cdf3..f166025 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.c
@@ -414,7 +414,7 @@
 
 	rc = cam_vfe_top_set_axi_bw_vote(top_priv);
 	if (rc) {
-		CAM_ERR(CAM_ISP, "set_hw_clk_rate failed, rc=%d", rc);
+		CAM_ERR(CAM_ISP, "set_axi_bw_vote failed, rc=%d", rc);
 		return rc;
 	}
 
@@ -449,15 +449,29 @@
 		rc = mux_res->stop(mux_res);
 	} else {
 		CAM_ERR(CAM_ISP, "Invalid res id:%d", mux_res->res_id);
-		rc = -EINVAL;
+		return -EINVAL;
 	}
 
 	if (!rc) {
 		for (i = 0; i < CAM_VFE_TOP_VER2_MUX_MAX; i++) {
-			if (top_priv->mux_rsrc[i].res_id == mux_res->res_id)
+			if (top_priv->mux_rsrc[i].res_id == mux_res->res_id) {
 				top_priv->req_clk_rate[i] = 0;
-			top_priv->req_axi_vote[i].compressed_bw = 0;
-			top_priv->req_axi_vote[i].uncompressed_bw = 0;
+				top_priv->req_axi_vote[i].compressed_bw = 0;
+				top_priv->req_axi_vote[i].uncompressed_bw = 0;
+				break;
+			}
+		}
+
+		rc = cam_vfe_top_set_hw_clk_rate(top_priv);
+		if (rc) {
+			CAM_ERR(CAM_ISP, "set_hw_clk_rate failed, rc=%d", rc);
+			return rc;
+		}
+
+		rc = cam_vfe_top_set_axi_bw_vote(top_priv);
+		if (rc) {
+			CAM_ERR(CAM_ISP, "set_axi_bw_vote failed, rc=%d", rc);
+			return rc;
 		}
 	}
 
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_context.c b/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_context.c
index 6fcd7f6..4589a22 100644
--- a/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_context.c
+++ b/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_context.c
@@ -63,6 +63,20 @@
 	return cam_context_buf_done_from_hw(ctx, done, evt_id);
 }
 
+static int __cam_jpeg_ctx_stop_dev_in_acquired(struct cam_context *ctx,
+	struct cam_start_stop_dev_cmd *cmd)
+{
+	int rc;
+
+	rc = cam_context_stop_dev_to_hw(ctx);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "Failed in Stop dev, rc=%d", rc);
+		return rc;
+	}
+
+	return rc;
+}
+
 /* top state machine */
 static struct cam_ctx_ops
 	cam_jpeg_ctx_state_machine[CAM_CTX_STATE_MAX] = {
@@ -85,6 +99,7 @@
 		.ioctl_ops = {
 			.release_dev = __cam_jpeg_ctx_release_dev_in_acquired,
 			.config_dev = __cam_jpeg_ctx_config_dev_in_acquired,
+			.stop_dev = __cam_jpeg_ctx_stop_dev_in_acquired,
 		},
 		.crm_ops = { },
 		.irq_ops = __cam_jpeg_ctx_handle_buf_done_in_acquired,
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c
index df95100..e401549 100644
--- a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c
@@ -80,11 +80,23 @@
 
 	dev_type = ctx_data->jpeg_dev_acquire_info.dev_type;
 
+	mutex_lock(&g_jpeg_hw_mgr.hw_mgr_mutex);
+
+	p_cfg_req = hw_mgr->dev_hw_cfg_args[dev_type][0];
+
+	if (hw_mgr->device_in_use[dev_type][0] == false ||
+		p_cfg_req == NULL) {
+		CAM_ERR(CAM_JPEG, "irq for old request %d", rc);
+		mutex_unlock(&g_jpeg_hw_mgr.hw_mgr_mutex);
+		return -EINVAL;
+	}
+
 	irq_cb.jpeg_hw_mgr_cb = cam_jpeg_hw_mgr_cb;
 	irq_cb.data = NULL;
 	irq_cb.b_set_cb = false;
 	if (!hw_mgr->devices[dev_type][0]->hw_ops.process_cmd) {
 		CAM_ERR(CAM_JPEG, "process_cmd null ");
+		mutex_unlock(&g_jpeg_hw_mgr.hw_mgr_mutex);
 		return -EINVAL;
 	}
 	rc = hw_mgr->devices[dev_type][0]->hw_ops.process_cmd(
@@ -93,6 +105,7 @@
 		&irq_cb, sizeof(irq_cb));
 	if (rc) {
 		CAM_ERR(CAM_JPEG, "CMD_SET_IRQ_CB failed %d", rc);
+		mutex_unlock(&g_jpeg_hw_mgr.hw_mgr_mutex);
 		return rc;
 	}
 
@@ -103,9 +116,7 @@
 			CAM_ERR(CAM_JPEG, "Failed to Deinit %d HW", dev_type);
 	}
 
-	mutex_lock(&g_jpeg_hw_mgr.hw_mgr_mutex);
 	hw_mgr->device_in_use[dev_type][0] = false;
-	p_cfg_req = hw_mgr->dev_hw_cfg_args[dev_type][0];
 	hw_mgr->dev_hw_cfg_args[dev_type][0] = NULL;
 	mutex_unlock(&g_jpeg_hw_mgr.hw_mgr_mutex);
 
@@ -167,7 +178,6 @@
 
 	list_add_tail(&p_cfg_req->list, &hw_mgr->free_req_list);
 
-
 	return rc;
 }
 
@@ -319,8 +329,11 @@
 		return -EINVAL;
 	}
 
+	mutex_lock(&hw_mgr->hw_mgr_mutex);
+
 	if (list_empty(&hw_mgr->hw_config_req_list)) {
 		CAM_DBG(CAM_JPEG, "no available request");
+		mutex_unlock(&hw_mgr->hw_mgr_mutex);
 		rc = -EFAULT;
 		goto end;
 	}
@@ -329,11 +342,11 @@
 		struct cam_jpeg_hw_cfg_req, list);
 	if (!p_cfg_req) {
 		CAM_ERR(CAM_JPEG, "no request");
+		mutex_unlock(&hw_mgr->hw_mgr_mutex);
 		rc = -EFAULT;
 		goto end;
 	}
 
-	mutex_lock(&hw_mgr->hw_mgr_mutex);
 	if (false == hw_mgr->device_in_use[p_cfg_req->dev_type][0]) {
 		hw_mgr->device_in_use[p_cfg_req->dev_type][0] = true;
 		hw_mgr->dev_hw_cfg_args[p_cfg_req->dev_type][0] = p_cfg_req;
@@ -344,6 +357,7 @@
 		rc = -EFAULT;
 		goto end;
 	}
+
 	mutex_unlock(&hw_mgr->hw_mgr_mutex);
 
 	config_args = (struct cam_hw_config_args *)&p_cfg_req->hw_cfg_args;
@@ -464,7 +478,7 @@
 		hw_mgr->devices[dev_type][0]->hw_priv,
 		NULL, 0);
 	if (rc) {
-		CAM_ERR(CAM_JPEG, "Failed to apply the configs %d",
+		CAM_ERR(CAM_JPEG, "Failed to start hw %d",
 			rc);
 		goto end_callcb;
 	}
@@ -553,12 +567,12 @@
 		goto err_after_dq_free_list;
 	}
 
-	mutex_unlock(&hw_mgr->hw_mgr_mutex);
 
 	task_data = (struct cam_jpeg_process_frame_work_data_t *)
 		task->payload;
 	if (!task_data) {
 		CAM_ERR(CAM_JPEG, "task_data is NULL");
+		mutex_unlock(&hw_mgr->hw_mgr_mutex);
 		rc = -EINVAL;
 		goto err_after_dq_free_list;
 	}
@@ -567,6 +581,7 @@
 		p_cfg_req->hw_cfg_args.num_hw_update_entries);
 
 	list_add_tail(&p_cfg_req->list, &hw_mgr->hw_config_req_list);
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
 
 	task_data->data = (void *)(int64_t)p_cfg_req->dev_type;
 	task_data->request_id = request_id;
@@ -719,6 +734,88 @@
 	return rc;
 }
 
+static int cam_jpeg_mgr_flush(void *hw_mgr_priv,
+	struct cam_jpeg_hw_ctx_data *ctx_data)
+{
+	int rc = 0;
+	struct cam_jpeg_hw_mgr *hw_mgr = hw_mgr_priv;
+	uint32_t dev_type;
+	struct cam_jpeg_hw_cfg_req *p_cfg_req = NULL;
+	struct cam_jpeg_hw_cfg_req *cfg_req, *req_temp;
+
+	if (!hw_mgr || !ctx_data) {
+		CAM_ERR(CAM_JPEG, "Invalid args");
+		return -EINVAL;
+	}
+
+	dev_type = ctx_data->jpeg_dev_acquire_info.dev_type;
+
+	p_cfg_req = hw_mgr->dev_hw_cfg_args[dev_type][0];
+	if (hw_mgr->device_in_use[dev_type][0] == true &&
+		p_cfg_req != NULL) {
+		if ((struct cam_jpeg_hw_ctx_data *)p_cfg_req->
+		hw_cfg_args.ctxt_to_hw_map == ctx_data) {
+			/* stop reset Unregister CB and deinit */
+			if (hw_mgr->devices[dev_type][0]->hw_ops.stop) {
+				rc = hw_mgr->devices[dev_type][0]->hw_ops.stop(
+					hw_mgr->devices[dev_type][0]->hw_priv,
+					NULL, 0);
+				if (rc)
+					CAM_ERR(CAM_JPEG, "stop fail %d", rc);
+			} else {
+				CAM_ERR(CAM_JPEG, "op stop null ");
+				rc = -EINVAL;
+			}
+		}
+
+		hw_mgr->device_in_use[dev_type][0] = false;
+		p_cfg_req = hw_mgr->dev_hw_cfg_args[dev_type][0];
+		hw_mgr->dev_hw_cfg_args[dev_type][0] = NULL;
+	}
+
+	list_for_each_entry_safe(cfg_req, req_temp,
+		&hw_mgr->hw_config_req_list, list) {
+		if ((struct cam_jpeg_hw_ctx_data *)cfg_req->
+			hw_cfg_args.ctxt_to_hw_map != ctx_data)
+			continue;
+
+		CAM_INFO(CAM_JPEG, "deleting req %pK", cfg_req);
+		list_del_init(&cfg_req->list);
+	}
+
+	return rc;
+}
+
+static int cam_jpeg_mgr_hw_stop(void *hw_mgr_priv, void *stop_hw_args)
+{
+	int rc;
+	struct cam_hw_stop_args *stop_args =
+		(struct cam_hw_stop_args *)stop_hw_args;
+	struct cam_jpeg_hw_mgr *hw_mgr = hw_mgr_priv;
+	struct cam_jpeg_hw_ctx_data *ctx_data = NULL;
+
+	if (!hw_mgr || !stop_args || !stop_args->ctxt_to_hw_map) {
+		CAM_ERR(CAM_JPEG, "Invalid args");
+		return -EINVAL;
+	}
+	mutex_lock(&hw_mgr->hw_mgr_mutex);
+
+	ctx_data = (struct cam_jpeg_hw_ctx_data *)stop_args->ctxt_to_hw_map;
+	if (!ctx_data->in_use) {
+		CAM_ERR(CAM_JPEG, "ctx is not in use");
+		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+		return -EINVAL;
+	}
+
+	rc = cam_jpeg_mgr_flush(hw_mgr_priv, ctx_data);
+	if ((rc))
+		CAM_ERR(CAM_JPEG, "flush failed %d", rc);
+
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+	return rc;
+}
+
 static int cam_jpeg_mgr_release_hw(void *hw_mgr_priv, void *release_hw_args)
 {
 	int rc;
@@ -1184,6 +1281,7 @@
 	hw_mgr_intf->hw_release = cam_jpeg_mgr_release_hw;
 	hw_mgr_intf->hw_prepare_update = cam_jpeg_mgr_prepare_hw_update;
 	hw_mgr_intf->hw_config = cam_jpeg_mgr_config_hw;
+	hw_mgr_intf->hw_stop = cam_jpeg_mgr_hw_stop;
 
 	mutex_init(&g_jpeg_hw_mgr.hw_mgr_mutex);
 	spin_lock_init(&g_jpeg_hw_mgr.hw_mgr_lock);
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/cam_jpeg_enc_hw_info_ver_4_2_0.h b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/cam_jpeg_enc_hw_info_ver_4_2_0.h
index 725af47..2ac4db6 100644
--- a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/cam_jpeg_enc_hw_info_ver_4_2_0.h
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/cam_jpeg_enc_hw_info_ver_4_2_0.h
@@ -19,6 +19,9 @@
 #define CAM_JPEG_HW_IRQ_STATUS_RESET_ACK_MASK 0x10000000
 #define CAM_JPEG_HW_IRQ_STATUS_RESET_ACK_SHIFT 0x0000000a
 
+#define CAM_JPEG_HW_IRQ_STATUS_STOP_DONE_MASK 0x8000000
+#define CAM_JPEG_HW_IRQ_STATUS_STOP_DONE_SHIFT 0x0000001b
+
 #define CAM_JPEG_HW_IRQ_STATUS_BUS_ERROR_MASK 0x00000800
 #define CAM_JPEG_HW_IRQ_STATUS_BUS_ERROR_SHIFT 0x0000000b
 
@@ -63,11 +66,13 @@
 		.int_mask_enable_all = 0xFFFFFFFF,
 		.hw_cmd_start = 0x00000001,
 		.reset_cmd = 0x00032093,
+		.hw_cmd_stop = 0x00000002,
 	},
 	.int_status = {
 		.framedone = CAM_JPEG_HW_MASK_COMP_FRAMEDONE,
 		.resetdone = CAM_JPEG_HW_MASK_COMP_RESET_ACK,
 		.iserror = CAM_JPEG_HW_MASK_COMP_ERR,
+		.stopdone = CAM_JPEG_HW_IRQ_STATUS_STOP_DONE_MASK,
 	}
 };
 
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_core.c b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_core.c
index a7c4e06..934b911 100644
--- a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_core.c
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_core.c
@@ -37,6 +37,8 @@
 	((jpeg_irq_status) & (hi)->int_status.resetdone)
 #define CAM_JPEG_HW_IRQ_IS_ERR(jpeg_irq_status, hi) \
 	((jpeg_irq_status) & (hi)->int_status.iserror)
+#define CAM_JPEG_HW_IRQ_IS_STOP_DONE(jpeg_irq_status, hi) \
+	((jpeg_irq_status) & (hi)->int_status.stopdone)
 
 #define CAM_JPEG_ENC_RESET_TIMEOUT msecs_to_jiffies(500)
 
@@ -181,7 +183,9 @@
 
 	CAM_DBG(CAM_JPEG, "irq_num %d  irq_status = %x , core_state %d",
 		irq_num, irq_status, core_info->core_state);
+
 	if (CAM_JPEG_HW_IRQ_IS_FRAME_DONE(irq_status, hw_info)) {
+		spin_lock(&jpeg_enc_dev->hw_lock);
 		if (core_info->core_state == CAM_JPEG_ENC_CORE_READY) {
 			encoded_size = cam_io_r_mb(mem_base +
 				core_info->jpeg_enc_hw_info->reg_offset.
@@ -191,22 +195,42 @@
 					encoded_size,
 					core_info->irq_cb.data);
 			} else {
-				CAM_ERR(CAM_JPEG, "unexpected done");
+				CAM_ERR(CAM_JPEG, "unexpected done, no cb");
 			}
+		} else {
+			CAM_ERR(CAM_JPEG, "unexpected done irq");
 		}
-
 		core_info->core_state = CAM_JPEG_ENC_CORE_NOT_READY;
+		spin_unlock(&jpeg_enc_dev->hw_lock);
 	}
 	if (CAM_JPEG_HW_IRQ_IS_RESET_ACK(irq_status, hw_info)) {
+		spin_lock(&jpeg_enc_dev->hw_lock);
 		if (core_info->core_state == CAM_JPEG_ENC_CORE_RESETTING) {
 			core_info->core_state = CAM_JPEG_ENC_CORE_READY;
 			complete(&jpeg_enc_dev->hw_complete);
 		} else {
 			CAM_ERR(CAM_JPEG, "unexpected reset irq");
 		}
+		spin_unlock(&jpeg_enc_dev->hw_lock);
+	}
+	if (CAM_JPEG_HW_IRQ_IS_STOP_DONE(irq_status, hw_info)) {
+		spin_lock(&jpeg_enc_dev->hw_lock);
+		if (core_info->core_state == CAM_JPEG_ENC_CORE_ABORTING) {
+			core_info->core_state = CAM_JPEG_ENC_CORE_NOT_READY;
+			complete(&jpeg_enc_dev->hw_complete);
+			if (core_info->irq_cb.jpeg_hw_mgr_cb) {
+				core_info->irq_cb.jpeg_hw_mgr_cb(irq_status,
+					-1,
+					core_info->irq_cb.data);
+			}
+		} else {
+			CAM_ERR(CAM_JPEG, "unexpected abort irq");
+		}
+		spin_unlock(&jpeg_enc_dev->hw_lock);
 	}
 	/* Unexpected/unintended HW interrupt */
 	if (CAM_JPEG_HW_IRQ_IS_ERR(irq_status, hw_info)) {
+		spin_lock(&jpeg_enc_dev->hw_lock);
 		core_info->core_state = CAM_JPEG_ENC_CORE_NOT_READY;
 		CAM_ERR_RATE_LIMIT(CAM_JPEG,
 			"error irq_num %d  irq_status = %x , core_state %d",
@@ -217,6 +241,7 @@
 				-1,
 				core_info->irq_cb.data);
 		}
+		spin_unlock(&jpeg_enc_dev->hw_lock);
 	}
 
 	return IRQ_HANDLED;
@@ -244,14 +269,18 @@
 	hw_info = core_info->jpeg_enc_hw_info;
 	mem_base = soc_info->reg_map[0].mem_base;
 
+	mutex_lock(&core_info->core_mutex);
+	spin_lock(&jpeg_enc_dev->hw_lock);
 	if (core_info->core_state == CAM_JPEG_ENC_CORE_RESETTING) {
 		CAM_ERR(CAM_JPEG, "alrady resetting");
+		spin_unlock(&jpeg_enc_dev->hw_lock);
+		mutex_unlock(&core_info->core_mutex);
 		return 0;
 	}
 
 	reinit_completion(&jpeg_enc_dev->hw_complete);
-
 	core_info->core_state = CAM_JPEG_ENC_CORE_RESETTING;
+	spin_unlock(&jpeg_enc_dev->hw_lock);
 
 	cam_io_w_mb(hw_info->reg_val.int_mask_disable_all,
 		mem_base + hw_info->reg_offset.int_mask);
@@ -269,6 +298,7 @@
 		core_info->core_state = CAM_JPEG_ENC_CORE_NOT_READY;
 	}
 
+	mutex_unlock(&core_info->core_mutex);
 	return 0;
 }
 
@@ -303,6 +333,54 @@
 	return 0;
 }
 
+int cam_jpeg_enc_stop_hw(void *data,
+	void *stop_args, uint32_t arg_size)
+{
+	struct cam_hw_info *jpeg_enc_dev = data;
+	struct cam_jpeg_enc_device_core_info *core_info = NULL;
+	struct cam_hw_soc_info *soc_info = NULL;
+	struct cam_jpeg_enc_device_hw_info *hw_info = NULL;
+	void __iomem *mem_base;
+	unsigned long rem_jiffies;
+
+	if (!jpeg_enc_dev) {
+		CAM_ERR(CAM_JPEG, "Invalid args");
+		return -EINVAL;
+	}
+	soc_info = &jpeg_enc_dev->soc_info;
+	core_info =
+		(struct cam_jpeg_enc_device_core_info *)jpeg_enc_dev->
+		core_info;
+	hw_info = core_info->jpeg_enc_hw_info;
+	mem_base = soc_info->reg_map[0].mem_base;
+
+	mutex_unlock(&core_info->core_mutex);
+	spin_lock(&jpeg_enc_dev->hw_lock);
+	if (core_info->core_state == CAM_JPEG_ENC_CORE_ABORTING) {
+		CAM_ERR(CAM_JPEG, "alrady stopping");
+		spin_unlock(&jpeg_enc_dev->hw_lock);
+		mutex_unlock(&core_info->core_mutex);
+		return 0;
+	}
+
+	reinit_completion(&jpeg_enc_dev->hw_complete);
+	core_info->core_state = CAM_JPEG_ENC_CORE_ABORTING;
+	spin_unlock(&jpeg_enc_dev->hw_lock);
+
+	cam_io_w_mb(hw_info->reg_val.hw_cmd_stop,
+		mem_base + hw_info->reg_offset.hw_cmd);
+
+	rem_jiffies = wait_for_completion_timeout(&jpeg_enc_dev->hw_complete,
+		CAM_JPEG_ENC_RESET_TIMEOUT);
+	if (!rem_jiffies) {
+		CAM_ERR(CAM_JPEG, "error Reset Timeout");
+		core_info->core_state = CAM_JPEG_ENC_CORE_NOT_READY;
+	}
+
+	mutex_unlock(&core_info->core_mutex);
+	return 0;
+}
+
 int cam_jpeg_enc_process_cmd(void *device_priv, uint32_t cmd_type,
 	void *cmd_args, uint32_t arg_size)
 {
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_core.h b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_core.h
index 4f5d625..5fa8f21 100644
--- a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_core.h
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_core.h
@@ -37,12 +37,14 @@
 	uint32_t int_mask_enable_all;
 	uint32_t hw_cmd_start;
 	uint32_t reset_cmd;
+	uint32_t hw_cmd_stop;
 };
 
 struct cam_jpeg_enc_int_status {
 	uint32_t framedone;
 	uint32_t resetdone;
 	uint32_t iserror;
+	uint32_t stopdone;
 };
 
 struct cam_jpeg_enc_device_hw_info {
@@ -55,6 +57,7 @@
 	CAM_JPEG_ENC_CORE_NOT_READY,
 	CAM_JPEG_ENC_CORE_READY,
 	CAM_JPEG_ENC_CORE_RESETTING,
+	CAM_JPEG_ENC_CORE_ABORTING,
 	CAM_JPEG_ENC_CORE_STATE_MAX,
 };
 
@@ -73,6 +76,8 @@
 	void *init_hw_args, uint32_t arg_size);
 int cam_jpeg_enc_start_hw(void *device_priv,
 	void *start_hw_args, uint32_t arg_size);
+int cam_jpeg_enc_stop_hw(void *device_priv,
+	void *stop_hw_args, uint32_t arg_size);
 int cam_jpeg_enc_reset_hw(void *device_priv,
 	void *reset_hw_args, uint32_t arg_size);
 int cam_jpeg_enc_process_cmd(void *device_priv, uint32_t cmd_type,
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_dev.c b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_dev.c
index 735bd21..2180448 100644
--- a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_dev.c
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_dev.c
@@ -139,6 +139,7 @@
 	jpeg_enc_dev_intf->hw_ops.init = cam_jpeg_enc_init_hw;
 	jpeg_enc_dev_intf->hw_ops.deinit = cam_jpeg_enc_deinit_hw;
 	jpeg_enc_dev_intf->hw_ops.start = cam_jpeg_enc_start_hw;
+	jpeg_enc_dev_intf->hw_ops.stop = cam_jpeg_enc_stop_hw;
 	jpeg_enc_dev_intf->hw_ops.reset = cam_jpeg_enc_reset_hw;
 	jpeg_enc_dev_intf->hw_ops.process_cmd = cam_jpeg_enc_process_cmd;
 	jpeg_enc_dev_intf->hw_type = CAM_JPEG_DEV_ENC;
diff --git a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.c b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.c
index 0318739..dbd969c 100644
--- a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.c
+++ b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.c
@@ -634,6 +634,7 @@
 	mutex_lock(&lrme_hw->hw_mutex);
 
 	if (lrme_hw->open_count > 0) {
+		lrme_hw->open_count++;
 		CAM_DBG(CAM_LRME, "This device is activated before");
 		goto unlock;
 	}
@@ -665,6 +666,7 @@
 	lrme_hw->open_count++;
 	lrme_core->state = CAM_LRME_CORE_STATE_IDLE;
 
+	CAM_DBG(CAM_LRME, "open count %d", lrme_hw->open_count);
 	mutex_unlock(&lrme_hw->hw_mutex);
 	return rc;
 
@@ -672,6 +674,7 @@
 	if (cam_lrme_soc_disable_resources(lrme_hw))
 		CAM_ERR(CAM_LRME, "Error in disable soc resources");
 unlock:
+	CAM_DBG(CAM_LRME, "open count %d", lrme_hw->open_count);
 	mutex_unlock(&lrme_hw->hw_mutex);
 	return rc;
 }
@@ -698,6 +701,8 @@
 	}
 	lrme_hw->open_count--;
 
+	CAM_DBG(CAM_LRME, "open count %d", lrme_hw->open_count);
+
 	if (lrme_hw->open_count)
 		goto unlock;
 
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
index 244746b..d7a382f 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
@@ -194,11 +194,13 @@
 
 	tbl = traverse_data->tbl;
 	apply_data = traverse_data->apply_data;
-	CAM_DBG(CAM_CRM, "Enter pd %d idx %d state %d skip %d status %d",
+	CAM_DBG(CAM_CRM,
+		"Enter pd %d idx %d state %d skip %d status %d skip_idx %d",
 		tbl->pd, curr_idx, tbl->slot[curr_idx].state,
-		tbl->skip_traverse, traverse_data->in_q->slot[curr_idx].status);
+		tbl->skip_traverse, traverse_data->in_q->slot[curr_idx].status,
+		traverse_data->in_q->slot[curr_idx].skip_idx);
 
-	if (tbl->inject_delay > 0) {
+	if (tbl->inject_delay > 0 && (traverse_data->validate_only == false)) {
 		CAM_DBG(CAM_CRM, "Injecting Delay of one frame");
 		apply_data[tbl->pd].req_id = -1;
 		tbl->inject_delay--;
@@ -220,15 +222,18 @@
 		}
 		if (rc >= 0) {
 			SET_SUCCESS_BIT(traverse_data->result, tbl->pd);
-			apply_data[tbl->pd].pd = tbl->pd;
-			apply_data[tbl->pd].req_id =
-				CRM_GET_REQ_ID(traverse_data->in_q, curr_idx);
-			apply_data[tbl->pd].idx = curr_idx;
+			if (traverse_data->validate_only == false) {
+				apply_data[tbl->pd].pd = tbl->pd;
+				apply_data[tbl->pd].req_id =
+					CRM_GET_REQ_ID(traverse_data->in_q,
+					curr_idx);
+				apply_data[tbl->pd].idx = curr_idx;
 
-			/* If traverse is sucessful decrement traverse skip */
-			if (tbl->skip_traverse > 0) {
-				apply_data[tbl->pd].req_id = -1;
-				tbl->skip_traverse--;
+				/* If traverse is success dec traverse skip */
+				if (tbl->skip_traverse > 0) {
+					apply_data[tbl->pd].req_id = -1;
+					tbl->skip_traverse--;
+				}
 			}
 		} else {
 			/* linked pd table is not ready for this traverse yet */
@@ -338,6 +343,7 @@
 	slot->req_id = -1;
 	slot->skip_idx = 0;
 	slot->recover = 0;
+	slot->sync_mode = CAM_REQ_MGR_SYNC_MODE_NO_SYNC;
 	slot->status = CRM_SLOT_STATUS_NO_REQ;
 
 	/* Reset all pd table slot */
@@ -460,17 +466,18 @@
 /**
  * __cam_req_mgr_check_link_is_ready()
  *
- * @brief    : traverse through all request tables and see if all devices are
- *             ready to apply request settings.
- * @link     : pointer to link whose input queue and req tbl are
- *             traversed through
- * @idx      : index within input request queue
+ * @brief         : traverse through all request tables and see if
+ *                  all devices are ready to apply request settings
+ * @link          : pointer to link whose input queue and req tbl are
+ *                  traversed through
+ * @idx           : index within input request queue
+ * @validate_only : Whether to validate only and/or update settings
  *
  * @return   : 0 for success, negative for failure
  *
  */
 static int __cam_req_mgr_check_link_is_ready(struct cam_req_mgr_core_link *link,
-	int32_t idx)
+	int32_t idx, bool validate_only)
 {
 	int                            rc;
 	struct cam_req_mgr_traverse    traverse_data;
@@ -480,14 +487,18 @@
 	in_q = link->req.in_q;
 
 	apply_data = link->req.apply_data;
-	memset(apply_data, 0,
-		sizeof(struct cam_req_mgr_apply) * CAM_PIPELINE_DELAY_MAX);
+	if (validate_only == false) {
+		memset(apply_data, 0,
+			sizeof(struct cam_req_mgr_apply) *
+			CAM_PIPELINE_DELAY_MAX);
+	}
 
 	traverse_data.apply_data = apply_data;
 	traverse_data.idx = idx;
 	traverse_data.tbl = link->req.l_tbl;
 	traverse_data.in_q = in_q;
 	traverse_data.result = 0;
+	traverse_data.validate_only = validate_only;
 	/*
 	 *  Traverse through all pd tables, if result is success,
 	 *  apply the settings
@@ -510,6 +521,209 @@
 }
 
 /**
+ * __cam_req_mgr_find_slot_for_req()
+ *
+ * @brief    : Find idx from input queue at which req id is enqueued
+ * @in_q     : input request queue pointer
+ * @req_id   : request id which needs to be searched in input queue
+ *
+ * @return   : slot index where passed request id is stored, -1 for failure
+ *
+ */
+static int32_t __cam_req_mgr_find_slot_for_req(
+	struct cam_req_mgr_req_queue *in_q, int64_t req_id)
+{
+	int32_t                   idx, i;
+	struct cam_req_mgr_slot  *slot;
+
+	idx = in_q->rd_idx;
+	for (i = 0; i < in_q->num_slots; i++) {
+		slot = &in_q->slot[idx];
+		if (slot->req_id == req_id) {
+			CAM_DBG(CAM_CRM,
+				"req: %lld found at idx: %d status: %d sync_mode: %d",
+				req_id, idx, slot->status, slot->sync_mode);
+			break;
+		}
+		__cam_req_mgr_dec_idx(&idx, 1, in_q->num_slots);
+	}
+	if (i >= in_q->num_slots)
+		idx = -1;
+
+	return idx;
+}
+
+/**
+ * __cam_req_mgr_reset_sof_cnt()
+ *
+ * @brief    : the sof_count for both the links are reset
+ * @link     : pointer to link whose input queue and req tbl are
+ *             traversed through
+ *
+ */
+static void __cam_req_mgr_reset_sof_cnt(
+	struct cam_req_mgr_core_link *link)
+{
+	link->sof_counter = -1;
+	link->sync_link->sof_counter = -1;
+	link->frame_skip_flag = false;
+}
+
+/**
+ * __cam_req_mgr_sof_cnt_initialize()
+ *
+ * @brief    : when the sof count is intially -1 it increments count
+ *             and computes the sync_self_ref for this link
+ *             the count needs to be wrapped back starting from 0
+ * @link     : pointer to link whose input queue and req tbl are
+ *             traversed through
+ *
+ */
+static void __cam_req_mgr_sof_cnt_initialize(
+	struct cam_req_mgr_core_link *link)
+{
+	link->sof_counter++;
+	link->sync_self_ref = link->sof_counter -
+		link->sync_link->sof_counter;
+}
+
+/**
+ * __cam_req_mgr_wrap_sof_cnt()
+ *
+ * @brief    : once the sof count reaches a predefined maximum
+ *             the count needs to be wrapped back starting from 0
+ * @link     : pointer to link whose input queue and req tbl are
+ *             traversed through
+ *
+ */
+static void __cam_req_mgr_wrap_sof_cnt(
+	struct cam_req_mgr_core_link *link)
+{
+	link->sof_counter = (MAX_SYNC_COUNT -
+		(link->sync_link->sof_counter));
+	link->sync_link->sof_counter = 0;
+}
+
+/**
+ * __cam_req_mgr_validate_sof_cnt()
+ *
+ * @brief     : validates sof count difference for a given link
+ * @link      : pointer to link whose input queue and req tbl are
+ *              traversed through
+ * @sync_link : pointer to the sync link
+ * @return   : 0 for success, negative for failure
+ *
+ */
+static int __cam_req_mgr_validate_sof_cnt(
+	struct cam_req_mgr_core_link *link,
+	struct cam_req_mgr_core_link *sync_link)
+{
+	int64_t sync_diff = 0;
+	int rc = 0;
+
+	if (link->sof_counter == MAX_SYNC_COUNT)
+		__cam_req_mgr_wrap_sof_cnt(link);
+
+	sync_diff = link->sof_counter - sync_link->sof_counter;
+	if (sync_diff != link->sync_self_ref) {
+		link->sync_link->frame_skip_flag = true;
+		CAM_WARN(CAM_CRM,
+			"Detected anomaly, skip link:%d, self=%lld, other=%lld",
+			link->link_hdl, link->sof_counter,
+			sync_link->sof_counter);
+		rc = -EPERM;
+	}
+
+	return rc;
+}
+
+
+/**
+ * __cam_req_mgr_process_sync_req()
+ *
+ * @brief    : processes requests during sync mode
+ * @link     : pointer to link whose input queue and req tbl are
+ *             traversed through
+ * @slot     : pointer to the current slot being processed
+ * @return   : 0 for success, negative for failure
+ *
+ */
+static int __cam_req_mgr_process_sync_req(
+	struct cam_req_mgr_core_link *link,
+	struct cam_req_mgr_slot *slot)
+{
+	struct cam_req_mgr_core_link *sync_link = NULL;
+	int64_t req_id = 0;
+	int sync_slot_idx = 0, rc = 0;
+
+	if (!link->sync_link) {
+		CAM_ERR(CAM_CRM, "Sync link null");
+		return -EINVAL;
+	}
+
+	sync_link = link->sync_link;
+	req_id = slot->req_id;
+	if (link->sof_counter == -1) {
+		__cam_req_mgr_sof_cnt_initialize(link);
+	} else if (link->frame_skip_flag &&
+		(sync_link->sync_self_ref != -1)) {
+		CAM_DBG(CAM_CRM, "Link[%x] Req[%lld] Resetting values",
+			link->link_hdl, req_id);
+		__cam_req_mgr_reset_sof_cnt(link);
+		__cam_req_mgr_sof_cnt_initialize(link);
+	} else {
+		link->sof_counter++;
+	}
+
+	rc = __cam_req_mgr_check_link_is_ready(link, slot->idx, true);
+	if (rc) {
+		CAM_DBG(CAM_CRM,
+			"Req: %lld [My link]not available link: %x, rc=%d",
+			req_id, link->link_hdl, rc);
+		goto failure;
+	}
+
+	sync_slot_idx = __cam_req_mgr_find_slot_for_req(
+		sync_link->req.in_q, req_id);
+	if (sync_slot_idx != -1) {
+		rc = __cam_req_mgr_check_link_is_ready(
+			sync_link, sync_slot_idx, true);
+		CAM_DBG(CAM_CRM, "sync_slot_idx=%d, status=%d, rc=%d",
+			sync_slot_idx,
+			sync_link->req.in_q->slot[sync_slot_idx].status,
+			rc);
+	} else {
+		CAM_DBG(CAM_CRM, "sync_slot_idx=%d, rc=%d",
+			sync_slot_idx, rc);
+	}
+
+	if ((sync_slot_idx != -1) &&
+	((sync_link->req.in_q->slot[sync_slot_idx].status ==
+	CRM_SLOT_STATUS_REQ_APPLIED) || (rc == 0))) {
+		rc = __cam_req_mgr_validate_sof_cnt(link, sync_link);
+		if (rc) {
+			CAM_DBG(CAM_CRM,
+				"Req: %lld validate failed: %x",
+				req_id, sync_link->link_hdl);
+			goto failure;
+		}
+		__cam_req_mgr_check_link_is_ready(link, slot->idx, false);
+	} else {
+		CAM_DBG(CAM_CRM,
+			"Req: %lld [Other link] not ready to apply on link: %x",
+			req_id, sync_link->link_hdl);
+		rc = -EPERM;
+		goto failure;
+	}
+
+	return rc;
+
+failure:
+	link->sof_counter--;
+	return rc;
+}
+
+/**
  * __cam_req_mgr_process_req()
  *
  * @brief    : processes read index in request queue and traverse through table
@@ -529,7 +743,7 @@
 
 	in_q = link->req.in_q;
 	session = (struct cam_req_mgr_core_session *)link->parent;
-
+	mutex_lock(&session->lock);
 	/*
 	 * Check if new read index,
 	 * - if in pending  state, traverse again to complete
@@ -537,32 +751,48 @@
 	 * - if in applied_state, somthign wrong.
 	 * - if in no_req state, no new req
 	 */
-	CAM_DBG(CAM_CRM, "idx %d req_status %d",
-		in_q->rd_idx, in_q->slot[in_q->rd_idx].status);
+	CAM_DBG(CAM_CRM, "SOF Req[%lld] idx %d req_status %d",
+		in_q->slot[in_q->rd_idx].req_id, in_q->rd_idx,
+		in_q->slot[in_q->rd_idx].status);
 
 	slot = &in_q->slot[in_q->rd_idx];
 	if (slot->status == CRM_SLOT_STATUS_NO_REQ) {
 		CAM_DBG(CAM_CRM, "No Pending req");
-		return 0;
+		rc = 0;
+		goto error;
 	}
 
 	if (trigger != CAM_TRIGGER_POINT_SOF &&
 			trigger != CAM_TRIGGER_POINT_EOF)
-		return rc;
+		goto error;
+
+	if ((trigger == CAM_TRIGGER_POINT_EOF) &&
+		(!(link->trigger_mask & CAM_TRIGGER_POINT_SOF))) {
+		CAM_DBG(CAM_CRM, "Applying for last SOF fails");
+		rc = -EINVAL;
+		goto error;
+	}
 
 	if (trigger == CAM_TRIGGER_POINT_SOF) {
 		if (link->trigger_mask) {
 			CAM_ERR_RATE_LIMIT(CAM_CRM,
 				"Applying for last EOF fails");
-			return -EINVAL;
+			rc = -EINVAL;
+			goto error;
 		}
-		rc = __cam_req_mgr_check_link_is_ready(link, slot->idx);
-		if (rc < 0) {
 
-			 /* If traverse result is not success, then some devices
-			  * are not ready with packet for the asked request id,
-			  * hence try again in next sof
-			  */
+		if (slot->sync_mode == CAM_REQ_MGR_SYNC_MODE_SYNC)
+			rc = __cam_req_mgr_process_sync_req(link, slot);
+		else
+			rc = __cam_req_mgr_check_link_is_ready(link,
+				slot->idx, false);
+
+		if (rc < 0) {
+			/*
+			 * If traverse result is not success, then some devices
+			 * are not ready with packet for the asked request id,
+			 * hence try again in next sof
+			 */
 			slot->status = CRM_SLOT_STATUS_REQ_PENDING;
 			spin_lock_bh(&link->link_state_spin_lock);
 			if (link->state == CAM_CRM_LINK_STATE_ERR) {
@@ -578,20 +808,17 @@
 				rc = -EPERM;
 			}
 			spin_unlock_bh(&link->link_state_spin_lock);
-			return rc;
+			goto error;
 		}
 	}
-	if (trigger == CAM_TRIGGER_POINT_EOF &&
-			(!(link->trigger_mask & CAM_TRIGGER_POINT_SOF))) {
-		CAM_DBG(CAM_CRM, "Applying for last SOF fails");
-		return -EINVAL;
-	}
 
 	rc = __cam_req_mgr_send_req(link, link->req.in_q, trigger);
 	if (rc < 0) {
 		/* Apply req failed retry at next sof */
 		slot->status = CRM_SLOT_STATUS_REQ_PENDING;
 	} else {
+		CAM_DBG(CAM_CRM, "Applied req[%lld] on link[%x] success",
+			slot->req_id, link->link_hdl);
 		link->trigger_mask |= trigger;
 
 		spin_lock_bh(&link->link_state_spin_lock);
@@ -605,8 +832,9 @@
 		if (link->trigger_mask == link->subscribe_event) {
 			slot->status = CRM_SLOT_STATUS_REQ_APPLIED;
 			link->trigger_mask = 0;
-			CAM_DBG(CAM_CRM, "req is applied\n");
-
+			CAM_DBG(CAM_CRM, "req %d is applied on link %d",
+				slot->req_id,
+				link->link_hdl);
 			idx = in_q->rd_idx;
 			__cam_req_mgr_dec_idx(
 				&idx, link->max_delay + 1,
@@ -614,7 +842,11 @@
 			__cam_req_mgr_reset_req_slot(link, idx);
 		}
 	}
+	mutex_unlock(&session->lock);
+	return rc;
 
+error:
+	mutex_unlock(&session->lock);
 	return rc;
 }
 
@@ -703,39 +935,6 @@
 }
 
 /**
- * __cam_req_mgr_find_slot_for_req()
- *
- * @brief    : Find idx from input queue at which req id is enqueued
- * @in_q     : input request queue pointer
- * @req_id   : request id which needs to be searched in input queue
- *
- * @return   : slot index where passed request id is stored, -1 for failure
- *
- */
-static int32_t __cam_req_mgr_find_slot_for_req(
-	struct cam_req_mgr_req_queue *in_q, int64_t req_id)
-{
-	int32_t                   idx, i;
-	struct cam_req_mgr_slot  *slot;
-
-	idx = in_q->wr_idx;
-	for (i = 0; i < in_q->num_slots; i++) {
-		slot = &in_q->slot[idx];
-		if (slot->req_id == req_id) {
-			CAM_DBG(CAM_CRM, "req %lld found at %d %d status %d",
-				req_id, idx, slot->idx,
-				slot->status);
-			break;
-		}
-		__cam_req_mgr_dec_idx(&idx, 1, in_q->num_slots);
-	}
-	if (i >= in_q->num_slots)
-		idx = -1;
-
-	return idx;
-}
-
-/**
  * __cam_req_mgr_setup_in_q()
  *
  * @brief : Initialize req table data
@@ -938,6 +1137,7 @@
 {
 	struct cam_req_mgr_core_link *link;
 	struct cam_req_mgr_req_queue *in_q;
+	int i;
 
 	if (!session || !g_crm_core_dev) {
 		CAM_ERR(CAM_CRM, "NULL session/core_dev ptr");
@@ -976,16 +1176,34 @@
 	in_q->num_slots = 0;
 	link->state = CAM_CRM_LINK_STATE_IDLE;
 	link->parent = (void *)session;
+	link->sync_link = NULL;
 	mutex_unlock(&link->lock);
 
 	mutex_lock(&session->lock);
-	session->links[session->num_links] = link;
+	/*  Loop through and find a free index */
+	for (i = 0; i < MAX_LINKS_PER_SESSION; i++) {
+		if (!session->links[i]) {
+			session->links[i] = link;
+			break;
+		}
+	}
+
+	if (i == MAX_LINKS_PER_SESSION) {
+		CAM_ERR(CAM_CRM, "Free link index not found");
+		goto error;
+	}
+
 	session->num_links++;
 	CAM_DBG(CAM_CRM, "Active session links (%d)",
 		session->num_links);
 	mutex_unlock(&session->lock);
 
 	return link;
+error:
+	mutex_unlock(&session->lock);
+	kfree(link);
+	kfree(in_q);
+	return NULL;
 }
 
 /**
@@ -1013,7 +1231,7 @@
 		CAM_WARN(CAM_CRM, "No active link or invalid state %d",
 			session->num_links);
 	else {
-		for (i = 0; i < session->num_links; i++) {
+		for (i = 0; i < MAX_LINKS_PER_SESSION; i++) {
 			if (session->links[i] == *link)
 				session->links[i] = NULL;
 		}
@@ -1105,6 +1323,7 @@
 		for (i = 0; i < in_q->num_slots; i++) {
 			slot = &in_q->slot[i];
 			slot->req_id = -1;
+			slot->sync_mode = CAM_REQ_MGR_SYNC_MODE_NO_SYNC;
 			slot->skip_idx = 1;
 			slot->status = CRM_SLOT_STATUS_NO_REQ;
 		}
@@ -1176,9 +1395,10 @@
 	link = (struct cam_req_mgr_core_link *)priv;
 	task_data = (struct crm_task_payload *)data;
 	sched_req  = (struct cam_req_mgr_sched_request *)&task_data->u;
-	CAM_DBG(CAM_CRM, "link_hdl %x req_id %lld",
+	CAM_DBG(CAM_CRM, "link_hdl %x req_id %lld sync_mode %d",
 		sched_req->link_hdl,
-		sched_req->req_id);
+		sched_req->req_id,
+		sched_req->sync_mode);
 
 	in_q = link->req.in_q;
 
@@ -1189,11 +1409,12 @@
 		slot->status != CRM_SLOT_STATUS_REQ_APPLIED)
 		CAM_WARN(CAM_CRM, "in_q overwrite %d", slot->status);
 
-	CAM_DBG(CAM_CRM, "sched_req %lld at slot %d",
-		sched_req->req_id, in_q->wr_idx);
+	CAM_DBG(CAM_CRM, "sched_req %lld at slot %d sync_mode %d",
+		sched_req->req_id, in_q->wr_idx, sched_req->sync_mode);
 
 	slot->status = CRM_SLOT_STATUS_REQ_ADDED;
 	slot->req_id = sched_req->req_id;
+	slot->sync_mode = sched_req->sync_mode;
 	slot->skip_idx = 0;
 	slot->recover = sched_req->bubble_enable;
 	__cam_req_mgr_inc_idx(&in_q->wr_idx, 1, in_q->num_slots);
@@ -1444,6 +1665,8 @@
 		 * Check if any new req is pending in slot, if not finish the
 		 * lower pipeline delay device with available req ids.
 		 */
+		 CAM_DBG(CAM_CRM, "link[%x] Req[%lld] invalidating slot",
+			link->link_hdl, in_q->slot[in_q->rd_idx].req_id);
 		__cam_req_mgr_check_next_req_slot(in_q);
 		__cam_req_mgr_inc_idx(&in_q->rd_idx, 1, in_q->num_slots);
 	}
@@ -1841,6 +2064,7 @@
 	mutex_lock(&cam_session->lock);
 	cam_session->session_hdl = session_hdl;
 	cam_session->num_links = 0;
+	cam_session->sync_mode = CAM_REQ_MGR_SYNC_MODE_NO_SYNC;
 	list_add(&cam_session->entry, &g_crm_core_dev->session_head);
 	mutex_unlock(&cam_session->lock);
 end:
@@ -1979,16 +2203,6 @@
 		goto setup_failed;
 	}
 
-	/* Start watchdong timer to detect if camera hw goes into bad state */
-	rc = crm_timer_init(&link->watchdog, CAM_REQ_MGR_WATCHDOG_TIMEOUT,
-		link, &__cam_req_mgr_sof_freeze);
-	if (rc < 0) {
-		kfree(link->workq->task.pool[0].payload);
-		__cam_req_mgr_destroy_link_info(link);
-		cam_req_mgr_workq_destroy(&link->workq);
-		goto setup_failed;
-	}
-
 	mutex_unlock(&link->lock);
 	mutex_unlock(&g_crm_core_dev->crm_lock);
 	return rc;
@@ -2009,6 +2223,7 @@
 	int                              rc = 0;
 	struct cam_req_mgr_core_session *cam_session;
 	struct cam_req_mgr_core_link    *link;
+	int                              i;
 
 	if (!unlink_info) {
 		CAM_ERR(CAM_CRM, "NULL pointer");
@@ -2041,6 +2256,19 @@
 	spin_unlock_bh(&link->link_state_spin_lock);
 	__cam_req_mgr_print_req_tbl(&link->req);
 
+	if ((cam_session->sync_mode != CAM_REQ_MGR_SYNC_MODE_NO_SYNC) &&
+		(link->sync_link)) {
+		/*
+		 * make sure to unlink sync setup under the assumption
+		 * of only having 2 links in a given session
+		 */
+		cam_session->sync_mode = CAM_REQ_MGR_SYNC_MODE_NO_SYNC;
+		for (i = 0; i < MAX_LINKS_PER_SESSION; i++) {
+			if (cam_session->links[i])
+				cam_session->links[i]->sync_link = NULL;
+		}
+	}
+
 	/* Destroy workq payload data */
 	kfree(link->workq->task.pool[0].payload);
 	link->workq->task.pool[0].payload = NULL;
@@ -2096,17 +2324,20 @@
 		CAM_DBG(CAM_CRM, "link ptr NULL %x", sched_req->link_hdl);
 		return -EINVAL;
 	}
+
 	session = (struct cam_req_mgr_core_session *)link->parent;
 	if (!session) {
 		CAM_WARN(CAM_CRM, "session ptr NULL %x", sched_req->link_hdl);
 		return -EINVAL;
 	}
-	CAM_DBG(CAM_CRM, "link %x req %lld",
-		sched_req->link_hdl, sched_req->req_id);
+
+	CAM_DBG(CAM_CRM, "link %x req %lld, sync_mode %d",
+		sched_req->link_hdl, sched_req->req_id, sched_req->sync_mode);
 
 	task_data.type = CRM_WORKQ_TASK_SCHED_REQ;
 	sched = (struct cam_req_mgr_sched_request *)&task_data.u;
 	sched->req_id = sched_req->req_id;
+	sched->sync_mode = sched_req->sync_mode;
 	sched->link_hdl = sched_req->link_hdl;
 	if (session->force_err_recovery == AUTO_RECOVERY) {
 		sched->bubble_enable = sched_req->bubble_enable;
@@ -2117,22 +2348,73 @@
 
 	rc = cam_req_mgr_process_sched_req(link, &task_data);
 
-	CAM_DBG(CAM_CRM, "DONE dev %x req %lld",
-		sched_req->link_hdl, sched_req->req_id);
+	CAM_DBG(CAM_CRM, "DONE dev %x req %lld sync_mode %d",
+		sched_req->link_hdl, sched_req->req_id, sched_req->sync_mode);
 end:
 	return rc;
 }
 
-int cam_req_mgr_sync_link(
-	struct cam_req_mgr_sync_mode *sync_links)
+int cam_req_mgr_sync_config(
+	struct cam_req_mgr_sync_mode *sync_info)
 {
-	if (!sync_links) {
+	int                              rc = 0;
+	struct cam_req_mgr_core_session *cam_session;
+	struct cam_req_mgr_core_link    *link1 = NULL;
+	struct cam_req_mgr_core_link    *link2 = NULL;
+
+	if (!sync_info) {
 		CAM_ERR(CAM_CRM, "NULL pointer");
 		return -EINVAL;
 	}
 
-	/* This function handles ioctl, implementation pending */
-	return 0;
+	if ((sync_info->num_links < 0) ||
+		(sync_info->num_links > MAX_LINKS_PER_SESSION)) {
+		CAM_ERR(CAM_CRM, "Invalid num links %d", sync_info->num_links);
+		return -EINVAL;
+	}
+
+	/* session hdl's priv data is cam session struct */
+	cam_session = (struct cam_req_mgr_core_session *)
+		cam_get_device_priv(sync_info->session_hdl);
+	if (!cam_session) {
+		CAM_ERR(CAM_CRM, "NULL pointer");
+		return -EINVAL;
+	}
+
+	mutex_lock(&cam_session->lock);
+	CAM_DBG(CAM_CRM, "link handles %x %x",
+		sync_info->link_hdls[0], sync_info->link_hdls[1]);
+
+	/* only two links existing per session in dual cam use case*/
+	link1 = cam_get_device_priv(sync_info->link_hdls[0]);
+	if (!link1) {
+		CAM_ERR(CAM_CRM, "link1 NULL pointer");
+		rc = -EINVAL;
+		goto done;
+	}
+
+	link2 = cam_get_device_priv(sync_info->link_hdls[1]);
+	if (!link2) {
+		CAM_ERR(CAM_CRM, "link2 NULL pointer");
+		rc = -EINVAL;
+		goto done;
+	}
+
+	link1->sof_counter = -1;
+	link1->sync_self_ref = -1;
+	link1->frame_skip_flag = false;
+	link1->sync_link = link2;
+
+	link2->sof_counter = -1;
+	link2->sync_self_ref = -1;
+	link2->frame_skip_flag = false;
+	link2->sync_link = link1;
+
+	cam_session->sync_mode = sync_info->sync_mode;
+
+done:
+	mutex_unlock(&cam_session->lock);
+	return rc;
 }
 
 int cam_req_mgr_flush_requests(
@@ -2203,6 +2485,55 @@
 	return rc;
 }
 
+int cam_req_mgr_link_control(struct cam_req_mgr_link_control *control)
+{
+	int                               rc = 0;
+	int                               i;
+	struct cam_req_mgr_core_link     *link = NULL;
+
+	if (!control) {
+		CAM_ERR(CAM_CRM, "Control command is NULL");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	mutex_lock(&g_crm_core_dev->crm_lock);
+	for (i = 0; i < control->num_links; i++) {
+		link = (struct cam_req_mgr_core_link *)
+			cam_get_device_priv(control->link_hdls[i]);
+		if (!link) {
+			CAM_ERR(CAM_CRM, "Link(%d) is NULL on session 0x%x",
+				i, control->session_hdl);
+			rc = -EINVAL;
+			break;
+		}
+
+		mutex_lock(&link->lock);
+		if (control->ops == CAM_REQ_MGR_LINK_ACTIVATE) {
+			/* Start SOF watchdog timer */
+			rc = crm_timer_init(&link->watchdog,
+				CAM_REQ_MGR_WATCHDOG_TIMEOUT, link,
+				&__cam_req_mgr_sof_freeze);
+			if (rc < 0) {
+				CAM_ERR(CAM_CRM,
+					"SOF timer start fails: link=0x%x",
+					link->link_hdl);
+				rc = -EFAULT;
+			}
+		} else if (control->ops == CAM_REQ_MGR_LINK_DEACTIVATE) {
+			/* Destroy SOF watchdog timer */
+			crm_timer_exit(&link->watchdog);
+		} else {
+			CAM_ERR(CAM_CRM, "Invalid link control command");
+			rc = -EINVAL;
+		}
+		mutex_unlock(&link->lock);
+	}
+	mutex_unlock(&g_crm_core_dev->crm_lock);
+end:
+	return rc;
+}
+
 
 int cam_req_mgr_core_device_init(void)
 {
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h
index e17047d6..42f8c77 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h
@@ -30,6 +30,8 @@
 
 #define CRM_WORKQ_NUM_TASKS 60
 
+#define MAX_SYNC_COUNT 65535
+
 /**
  * enum crm_workq_task_type
  * @codes: to identify which type of task is present
@@ -122,11 +124,12 @@
 
 /**
  * struct cam_req_mgr_traverse
- * @idx        : slot index
- * @result     : contains which all tables were able to apply successfully
- * @tbl        : pointer of pipeline delay based request table
- * @apply_data : pointer which various tables will update during traverse
- * @in_q       : input request queue pointer
+ * @idx           : slot index
+ * @result        : contains which all tables were able to apply successfully
+ * @tbl           : pointer of pipeline delay based request table
+ * @apply_data    : pointer which various tables will update during traverse
+ * @in_q          : input request queue pointer
+ * @validate_only : Whether to validate only and/or update settings
  */
 struct cam_req_mgr_traverse {
 	int32_t                       idx;
@@ -134,6 +137,7 @@
 	struct cam_req_mgr_req_tbl   *tbl;
 	struct cam_req_mgr_apply     *apply_data;
 	struct cam_req_mgr_req_queue *in_q;
+	bool                          validate_only;
 };
 
 /**
@@ -198,6 +202,7 @@
  * - members updated due to external events
  * @recover      : if user enabled recovery for this request.
  * @req_id       : mask tracking which all devices have request ready
+ * @sync_mode    : Sync mode in which req id in this slot has to applied
  */
 struct cam_req_mgr_slot {
 	int32_t               idx;
@@ -205,6 +210,7 @@
 	enum crm_slot_status  status;
 	int32_t               recover;
 	int64_t               req_id;
+	int32_t               sync_mode;
 };
 
 /**
@@ -282,6 +288,12 @@
  * @subscribe_event      : irqs that link subscribes, IFE should send
  *                         notification to CRM at those hw events.
  * @trigger_mask         : mask on which irq the req is already applied
+ * @sync_link            : pointer to the sync link for synchronization
+ * @sof_counter          : sof counter during sync_mode
+ * @sync_self_ref        : reference sync count against which the difference
+ *                         between sync_counts for a given link is checked
+ * @frame_skip_flag      : flag that determines if a frame needs to be skipped
+ *
  */
 struct cam_req_mgr_core_link {
 	int32_t                              link_hdl;
@@ -299,6 +311,10 @@
 	spinlock_t                           link_state_spin_lock;
 	uint32_t                             subscribe_event;
 	uint32_t                             trigger_mask;
+	struct cam_req_mgr_core_link        *sync_link;
+	int64_t                              sof_counter;
+	int64_t                              sync_self_ref;
+	bool                                 frame_skip_flag;
 };
 
 /**
@@ -315,6 +331,7 @@
  * - Debug data
  * @force_err_recovery : For debugging, we can force bubble recovery
  *                       to be always ON or always OFF using debugfs.
+ * @sync_mode          : Sync mode for this session links
  */
 struct cam_req_mgr_core_session {
 	int32_t                       session_hdl;
@@ -323,6 +340,7 @@
 	struct list_head              entry;
 	struct mutex                  lock;
 	int32_t                       force_err_recovery;
+	int32_t                       sync_mode;
 };
 
 /**
@@ -384,11 +402,11 @@
 	struct cam_req_mgr_sched_request *sched_req);
 
 /**
- * cam_req_mgr_sync_link()
+ * cam_req_mgr_sync_mode_setup()
  * @brief: sync for links in a session
- * @sync_links: session, links info and master link info
+ * @sync_info: session, links info and master link info
  */
-int cam_req_mgr_sync_link(struct cam_req_mgr_sync_mode *sync_links);
+int cam_req_mgr_sync_config(struct cam_req_mgr_sync_mode *sync_info);
 
 /**
  * cam_req_mgr_flush_requests()
@@ -415,5 +433,13 @@
  * @brief: Handles camera close
  */
 void cam_req_mgr_handle_core_shutdown(void);
+
+/**
+ * cam_req_mgr_link_control()
+ * @brief:   Handles link control operations
+ * @control: Link control command
+ */
+int cam_req_mgr_link_control(struct cam_req_mgr_link_control *control);
+
 #endif
 
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c
index 49c3c56e..e4944d0 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c
@@ -316,18 +316,18 @@
 		break;
 
 	case CAM_REQ_MGR_SYNC_MODE: {
-		struct cam_req_mgr_sync_mode sync_mode;
+		struct cam_req_mgr_sync_mode sync_info;
 
-		if (k_ioctl->size != sizeof(sync_mode))
+		if (k_ioctl->size != sizeof(sync_info))
 			return -EINVAL;
 
-		if (copy_from_user(&sync_mode,
+		if (copy_from_user(&sync_info,
 			(void *)k_ioctl->handle,
 			k_ioctl->size)) {
 			return -EFAULT;
 		}
 
-		rc = cam_req_mgr_sync_link(&sync_mode);
+		rc = cam_req_mgr_sync_config(&sync_info);
 		}
 		break;
 	case CAM_REQ_MGR_ALLOC_BUF: {
@@ -408,6 +408,24 @@
 			rc = -EINVAL;
 		}
 		break;
+	case CAM_REQ_MGR_LINK_CONTROL: {
+		struct cam_req_mgr_link_control cmd;
+
+		if (k_ioctl->size != sizeof(cmd))
+			return -EINVAL;
+
+		if (copy_from_user(&cmd,
+			(void __user *)k_ioctl->handle,
+			k_ioctl->size)) {
+			rc = -EFAULT;
+			break;
+		}
+
+		rc = cam_req_mgr_link_control(&cmd);
+		if (rc)
+			rc = -EINVAL;
+		}
+		break;
 	default:
 		return -ENOIOCTLCMD;
 	}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c
index abfc190..febf922 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c
@@ -44,9 +44,9 @@
 		goto free_power_settings;
 	}
 
-	power_info->power_setting[0].seq_type = SENSOR_VAF;
-	power_info->power_setting[0].seq_val = CAM_VAF;
-	power_info->power_setting[0].config_val = 0;
+	power_info->power_down_setting[0].seq_type = SENSOR_VAF;
+	power_info->power_down_setting[0].seq_val = CAM_VAF;
+	power_info->power_down_setting[0].config_val = 0;
 
 	return rc;
 
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c
index cb44cb8..2adca66 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c
@@ -224,14 +224,13 @@
 {
 	int32_t      rc = 0;
 	uint32_t     lane_enable = 0, mask = 1, size = 0;
-	uint16_t     lane_mask = 0, i = 0, cfg_size = 0;
+	uint16_t     lane_mask = 0, i = 0, cfg_size = 0, temp = 0;
 	uint8_t      lane_cnt, lane_pos = 0;
 	uint16_t     settle_cnt = 0;
 	void __iomem *csiphybase;
 	struct csiphy_reg_t (*reg_array)[MAX_SETTINGS_PER_LANE];
 
 	lane_cnt = csiphy_dev->csiphy_info.lane_cnt;
-	lane_mask = csiphy_dev->csiphy_info.lane_mask & 0x1f;
 	csiphybase = csiphy_dev->soc_info.reg_map[0].mem_base;
 
 	if (!csiphybase) {
@@ -239,17 +238,6 @@
 		return -EINVAL;
 	}
 
-	for (i = 0; i < MAX_DPHY_DATA_LN; i++) {
-		if (mask == 0x2) {
-			if (lane_mask & mask)
-				lane_enable |= 0x80;
-			i--;
-		} else if (lane_mask & mask) {
-			lane_enable |= 0x1 << (i<<1);
-		}
-		mask <<= 1;
-	}
-
 	if (!csiphy_dev->csiphy_info.csiphy_3phase) {
 		if (csiphy_dev->csiphy_info.combo_mode == 1)
 			reg_array =
@@ -260,6 +248,18 @@
 		csiphy_dev->num_irq_registers = 11;
 		cfg_size = csiphy_dev->ctrl_reg->csiphy_reg.
 			csiphy_2ph_config_array_size;
+
+		lane_mask = csiphy_dev->csiphy_info.lane_mask & 0x1f;
+		for (i = 0; i < MAX_DPHY_DATA_LN; i++) {
+			if (mask == 0x2) {
+				if (lane_mask & mask)
+					lane_enable |= 0x80;
+				i--;
+			} else if (lane_mask & mask) {
+				lane_enable |= 0x1 << (i<<1);
+			}
+			mask <<= 1;
+		}
 	} else {
 		if (csiphy_dev->csiphy_info.combo_mode == 1)
 			reg_array =
@@ -267,9 +267,18 @@
 		else
 			reg_array =
 				csiphy_dev->ctrl_reg->csiphy_3ph_reg;
-		csiphy_dev->num_irq_registers = 20;
+		csiphy_dev->num_irq_registers = 11;
 		cfg_size = csiphy_dev->ctrl_reg->csiphy_reg.
 			csiphy_3ph_config_array_size;
+
+		lane_mask = csiphy_dev->csiphy_info.lane_mask & 0x7;
+		mask = lane_mask;
+		while (mask != 0) {
+			temp = (i << 1)+1;
+			lane_enable |= ((mask & 0x1) << temp);
+			mask >>= 1;
+			i++;
+		}
 	}
 
 	size = csiphy_dev->ctrl_reg->csiphy_reg.csiphy_common_array_size;
@@ -295,7 +304,7 @@
 		}
 	}
 
-	while (lane_mask & 0x1f) {
+	while (lane_mask) {
 		if (!(lane_mask & 0x1)) {
 			lane_pos++;
 			lane_mask >>= 1;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.c
index 5eb29c3..f3c4811 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.c
@@ -231,9 +231,11 @@
 
 static int cam_eeprom_i2c_driver_remove(struct i2c_client *client)
 {
+	int                             i;
 	struct v4l2_subdev             *sd = i2c_get_clientdata(client);
 	struct cam_eeprom_ctrl_t       *e_ctrl;
 	struct cam_eeprom_soc_private  *soc_private;
+	struct cam_hw_soc_info         *soc_info;
 
 	if (!sd) {
 		CAM_ERR(CAM_EEPROM, "Subdevice is NULL");
@@ -253,6 +255,10 @@
 		return -EINVAL;
 	}
 
+	soc_info = &e_ctrl->soc_info;
+	for (i = 0; i < soc_info->num_clk; i++)
+		devm_clk_put(soc_info->dev, soc_info->clk[i]);
+
 	if (soc_private)
 		kfree(soc_private);
 
@@ -358,9 +364,11 @@
 
 static int cam_eeprom_spi_driver_remove(struct spi_device *sdev)
 {
+	int                             i;
 	struct v4l2_subdev             *sd = spi_get_drvdata(sdev);
 	struct cam_eeprom_ctrl_t       *e_ctrl;
 	struct cam_eeprom_soc_private  *soc_private;
+	struct cam_hw_soc_info         *soc_info;
 
 	if (!sd) {
 		CAM_ERR(CAM_EEPROM, "Subdevice is NULL");
@@ -373,6 +381,10 @@
 		return -EINVAL;
 	}
 
+	soc_info = &e_ctrl->soc_info;
+	for (i = 0; i < soc_info->num_clk; i++)
+		devm_clk_put(soc_info->dev, soc_info->clk[i]);
+
 	kfree(e_ctrl->io_master_info.spi_client);
 	soc_private =
 		(struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private;
@@ -458,7 +470,9 @@
 
 static int cam_eeprom_platform_driver_remove(struct platform_device *pdev)
 {
+	int                        i;
 	struct cam_eeprom_ctrl_t  *e_ctrl;
+	struct cam_hw_soc_info    *soc_info;
 
 	e_ctrl = platform_get_drvdata(pdev);
 	if (!e_ctrl) {
@@ -466,7 +480,12 @@
 		return -EINVAL;
 	}
 
-	kfree(e_ctrl->soc_info.soc_private);
+	soc_info = &e_ctrl->soc_info;
+
+	for (i = 0; i < soc_info->num_clk; i++)
+		devm_clk_put(soc_info->dev, soc_info->clk[i]);
+
+	kfree(soc_info->soc_private);
 	kfree(e_ctrl->io_master_info.cci_client);
 	kfree(e_ctrl);
 	return 0;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_soc.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_soc.c
index 70c40fd..c250045 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_soc.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_soc.c
@@ -288,7 +288,7 @@
  */
 int cam_eeprom_parse_dt(struct cam_eeprom_ctrl_t *e_ctrl)
 {
-	int                             rc = 0;
+	int                             i, rc = 0;
 	struct cam_hw_soc_info         *soc_info = &e_ctrl->soc_info;
 	struct device_node             *of_node = NULL;
 	struct cam_eeprom_soc_private  *soc_private =
@@ -358,5 +358,16 @@
 			soc_private->i2c_info.slave_addr);
 	}
 
+	for (i = 0; i < soc_info->num_clk; i++) {
+		soc_info->clk[i] = devm_clk_get(soc_info->dev,
+			soc_info->clk_name[i]);
+		if (!soc_info->clk[i]) {
+			CAM_ERR(CAM_SENSOR, "get failed for %s",
+				soc_info->clk_name[i]);
+			rc = -ENOENT;
+			return rc;
+		}
+	}
+
 	return rc;
 }
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_core.c
index d825f5e..4e4b112 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_core.c
@@ -46,9 +46,9 @@
 		goto free_power_settings;
 	}
 
-	power_info->power_setting[0].seq_type = SENSOR_VAF;
-	power_info->power_setting[0].seq_val = CAM_VAF;
-	power_info->power_setting[0].config_val = 0;
+	power_info->power_down_setting[0].seq_type = SENSOR_VAF;
+	power_info->power_down_setting[0].seq_val = CAM_VAF;
+	power_info->power_down_setting[0].config_val = 0;
 
 	return rc;
 
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_dev.c
index d9b43a4..5e1d719 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_dev.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_dev.c
@@ -226,7 +226,9 @@
 
 static int cam_ois_i2c_driver_remove(struct i2c_client *client)
 {
+	int                             i;
 	struct cam_ois_ctrl_t          *o_ctrl = i2c_get_clientdata(client);
+	struct cam_hw_soc_info         *soc_info;
 	struct cam_ois_soc_private     *soc_private;
 	struct cam_sensor_power_ctrl_t *power_info;
 
@@ -235,8 +237,13 @@
 		return -EINVAL;
 	}
 
+	soc_info = &o_ctrl->soc_info;
+
+	for (i = 0; i < soc_info->num_clk; i++)
+		devm_clk_put(soc_info->dev, soc_info->clk[i]);
+
 	soc_private =
-		(struct cam_ois_soc_private *)o_ctrl->soc_info.soc_private;
+		(struct cam_ois_soc_private *)soc_info->soc_private;
 	power_info = &soc_private->power_info;
 
 	kfree(power_info->power_setting);
@@ -327,9 +334,11 @@
 
 static int cam_ois_platform_driver_remove(struct platform_device *pdev)
 {
+	int                             i;
 	struct cam_ois_ctrl_t          *o_ctrl;
 	struct cam_ois_soc_private     *soc_private;
 	struct cam_sensor_power_ctrl_t *power_info;
+	struct cam_hw_soc_info         *soc_info;
 
 	o_ctrl = platform_get_drvdata(pdev);
 	if (!o_ctrl) {
@@ -337,6 +346,10 @@
 		return -EINVAL;
 	}
 
+	soc_info = &o_ctrl->soc_info;
+	for (i = 0; i < soc_info->num_clk; i++)
+		devm_clk_put(soc_info->dev, soc_info->clk[i]);
+
 	soc_private =
 		(struct cam_ois_soc_private *)o_ctrl->soc_info.soc_private;
 	power_info = &soc_private->power_info;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_soc.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_soc.c
index 5886413..c8b3448 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_soc.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_soc.c
@@ -27,7 +27,7 @@
  */
 static int cam_ois_get_dt_data(struct cam_ois_ctrl_t *o_ctrl)
 {
-	int                             rc = 0;
+	int                             i, rc = 0;
 	struct cam_hw_soc_info         *soc_info = &o_ctrl->soc_info;
 	struct cam_ois_soc_private     *soc_private =
 		(struct cam_ois_soc_private *)o_ctrl->soc_info.soc_private;
@@ -65,6 +65,17 @@
 		return -EINVAL;
 	}
 
+	for (i = 0; i < soc_info->num_clk; i++) {
+		soc_info->clk[i] = devm_clk_get(soc_info->dev,
+			soc_info->clk_name[i]);
+		if (!soc_info->clk[i]) {
+			CAM_ERR(CAM_SENSOR, "get failed for %s",
+				soc_info->clk_name[i]);
+			rc = -ENOENT;
+			return rc;
+		}
+	}
+
 	return rc;
 }
 /**
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_dev.c
index 8ea767f..b60111a 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_dev.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_dev.c
@@ -208,7 +208,9 @@
 
 static int cam_sensor_platform_remove(struct platform_device *pdev)
 {
+	int                        i;
 	struct cam_sensor_ctrl_t  *s_ctrl;
+	struct cam_hw_soc_info    *soc_info;
 
 	s_ctrl = platform_get_drvdata(pdev);
 	if (!s_ctrl) {
@@ -216,6 +218,10 @@
 		return 0;
 	}
 
+	soc_info = &s_ctrl->soc_info;
+	for (i = 0; i < soc_info->num_clk; i++)
+		devm_clk_put(soc_info->dev, soc_info->clk[i]);
+
 	kfree(s_ctrl->i2c_data.per_frame);
 	devm_kfree(&pdev->dev, s_ctrl);
 
@@ -224,13 +230,19 @@
 
 static int cam_sensor_driver_i2c_remove(struct i2c_client *client)
 {
+	int                        i;
 	struct cam_sensor_ctrl_t  *s_ctrl = i2c_get_clientdata(client);
+	struct cam_hw_soc_info    *soc_info;
 
 	if (!s_ctrl) {
 		CAM_ERR(CAM_SENSOR, "sensor device is NULL");
 		return 0;
 	}
 
+	soc_info = &s_ctrl->soc_info;
+	for (i = 0; i < soc_info->num_clk; i++)
+		devm_clk_put(soc_info->dev, soc_info->clk[i]);
+
 	kfree(s_ctrl->i2c_data.per_frame);
 	kfree(s_ctrl);
 
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_i2c.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_i2c.h
index 10b07ec..7cddcf9 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_i2c.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_i2c.h
@@ -163,4 +163,18 @@
 int32_t cam_qup_i2c_write_table(
 	struct camera_io_master *client,
 	struct cam_sensor_i2c_reg_setting *write_setting);
+
+/**
+ * cam_qup_i2c_write_continuous_write: QUP based I2C write continuous(Burst/Seq)
+ * @client: QUP I2C client structure
+ * @write_setting: I2C register setting
+ * @cam_sensor_i2c_write_flag: burst or seq write
+ *
+ * This API handles QUP continuous write
+ */
+int32_t cam_qup_i2c_write_continuous_table(
+	struct camera_io_master *client,
+	struct cam_sensor_i2c_reg_setting *write_setting,
+	uint8_t cam_sensor_i2c_write_flag);
+
 #endif /*_CAM_SENSOR_I2C_H*/
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.c
index 8eb04ec..d69ff47 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.c
@@ -129,8 +129,8 @@
 		return cam_cci_i2c_write_continuous_table(io_master_info,
 			write_setting, cam_sensor_i2c_write_flag);
 	} else if (io_master_info->master_type == I2C_MASTER) {
-		return cam_qup_i2c_write_table(io_master_info,
-			write_setting);
+		return cam_qup_i2c_write_continuous_table(io_master_info,
+			write_setting, cam_sensor_i2c_write_flag);
 	} else if (io_master_info->master_type == SPI_MASTER) {
 		return cam_spi_write_table(io_master_info,
 			write_setting);
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_qup_i2c.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_qup_i2c.c
index 72e51ee..1c6ab0b 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_qup_i2c.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_qup_i2c.c
@@ -353,3 +353,174 @@
 
 	return rc;
 }
+
+static int32_t cam_qup_i2c_write_seq(struct camera_io_master *client,
+	struct cam_sensor_i2c_reg_setting *write_setting)
+{
+	int i;
+	int32_t rc = 0;
+	struct cam_sensor_i2c_reg_array *reg_setting;
+
+	reg_setting = write_setting->reg_setting;
+
+	for (i = 0; i < write_setting->size; i++) {
+		reg_setting->reg_addr += i;
+		rc = cam_qup_i2c_write(client, reg_setting,
+			write_setting->addr_type, write_setting->data_type);
+		if (rc < 0) {
+			CAM_ERR(CAM_SENSOR,
+				"Sequential i2c write failed: rc: %d", rc);
+			break;
+		}
+		reg_setting++;
+	}
+
+	if (write_setting->delay > 20)
+		msleep(write_setting->delay);
+	else if (write_setting->delay)
+		usleep_range(write_setting->delay * 1000, (write_setting->delay
+			* 1000) + 1000);
+
+	return rc;
+}
+
+static int32_t cam_qup_i2c_write_burst(struct camera_io_master *client,
+	struct cam_sensor_i2c_reg_setting *write_setting)
+{
+	int i;
+	int32_t rc = 0;
+	uint32_t len = 0;
+	unsigned char *buf = NULL;
+	struct cam_sensor_i2c_reg_array *reg_setting;
+	enum camera_sensor_i2c_type addr_type;
+	enum camera_sensor_i2c_type data_type;
+
+	buf = kzalloc((write_setting->addr_type +
+			(write_setting->size * write_setting->data_type)),
+			GFP_KERNEL);
+
+	if (!buf) {
+		CAM_ERR(CAM_SENSOR, "BUF is NULL");
+		return -ENOMEM;
+	}
+
+	reg_setting = write_setting->reg_setting;
+	addr_type = write_setting->addr_type;
+	data_type = write_setting->data_type;
+
+	CAM_DBG(CAM_SENSOR, "reg addr = 0x%x data type: %d",
+			reg_setting->reg_addr, data_type);
+	if (addr_type == CAMERA_SENSOR_I2C_TYPE_BYTE) {
+		buf[0] = reg_setting->reg_addr;
+		CAM_DBG(CAM_SENSOR, "byte %d: 0x%x", len, buf[len]);
+		len = 1;
+	} else if (addr_type == CAMERA_SENSOR_I2C_TYPE_WORD) {
+		buf[0] = reg_setting->reg_addr >> 8;
+		buf[1] = reg_setting->reg_addr;
+		CAM_DBG(CAM_SENSOR, "byte %d: 0x%x", len, buf[len]);
+		CAM_DBG(CAM_SENSOR, "byte %d: 0x%x", len+1, buf[len+1]);
+		len = 2;
+	} else if (addr_type == CAMERA_SENSOR_I2C_TYPE_3B) {
+		buf[0] = reg_setting->reg_addr >> 16;
+		buf[1] = reg_setting->reg_addr >> 8;
+		buf[2] = reg_setting->reg_addr;
+		len = 3;
+	} else if (addr_type == CAMERA_SENSOR_I2C_TYPE_DWORD) {
+		buf[0] = reg_setting->reg_addr >> 24;
+		buf[1] = reg_setting->reg_addr >> 16;
+		buf[2] = reg_setting->reg_addr >> 8;
+		buf[3] = reg_setting->reg_addr;
+		len = 4;
+	} else {
+		CAM_ERR(CAM_SENSOR, "Invalid I2C addr type");
+		rc = -EINVAL;
+		goto free_res;
+	}
+
+	for (i = 0; i < write_setting->size; i++) {
+		if (data_type == CAMERA_SENSOR_I2C_TYPE_BYTE) {
+			buf[len] = reg_setting->reg_data;
+			CAM_DBG(CAM_SENSOR,
+				"Byte %d: 0x%x", len, buf[len]);
+			len += 1;
+		} else if (data_type == CAMERA_SENSOR_I2C_TYPE_WORD) {
+			buf[len] = reg_setting->reg_data >> 8;
+			buf[len+1] = reg_setting->reg_data;
+			CAM_DBG(CAM_SENSOR,
+				"Byte %d: 0x%x", len, buf[len]);
+			CAM_DBG(CAM_SENSOR,
+				"Byte %d: 0x%x", len+1, buf[len+1]);
+			len += 2;
+		} else if (data_type == CAMERA_SENSOR_I2C_TYPE_3B) {
+			buf[len] = reg_setting->reg_data >> 16;
+			buf[len + 1] = reg_setting->reg_data >> 8;
+			buf[len + 2] = reg_setting->reg_data;
+			CAM_DBG(CAM_SENSOR,
+				"Byte %d: 0x%x", len, buf[len]);
+			CAM_DBG(CAM_SENSOR,
+				"Byte %d: 0x%x", len+1, buf[len+1]);
+			CAM_DBG(CAM_SENSOR,
+				"Byte %d: 0x%x", len+2, buf[len+2]);
+			len += 3;
+		} else if (data_type == CAMERA_SENSOR_I2C_TYPE_DWORD) {
+			buf[len] = reg_setting->reg_data >> 24;
+			buf[len + 1] = reg_setting->reg_data >> 16;
+			buf[len + 2] = reg_setting->reg_data >> 8;
+			buf[len + 3] = reg_setting->reg_data;
+			CAM_DBG(CAM_SENSOR,
+				"Byte %d: 0x%x", len, buf[len]);
+			CAM_DBG(CAM_SENSOR,
+				"Byte %d: 0x%x", len+1, buf[len+1]);
+			CAM_DBG(CAM_SENSOR,
+				"Byte %d: 0x%x", len+2, buf[len+2]);
+			CAM_DBG(CAM_SENSOR,
+				"Byte %d: 0x%x", len+3, buf[len+3]);
+			len += 4;
+		} else {
+			CAM_ERR(CAM_SENSOR, "Invalid Data Type");
+			rc = -EINVAL;
+			goto free_res;
+		}
+		reg_setting++;
+	}
+
+	if (len > (write_setting->addr_type +
+		(write_setting->size * write_setting->data_type))) {
+		CAM_ERR(CAM_SENSOR, "Invalid Length: %u | Expected length: %u",
+			len, (write_setting->addr_type +
+			(write_setting->size * write_setting->data_type)));
+		rc = -EINVAL;
+		goto free_res;
+	}
+
+	rc = cam_qup_i2c_txdata(client, buf, len);
+	if (rc < 0)
+		CAM_ERR(CAM_SENSOR, "failed rc: %d", rc);
+
+free_res:
+	kfree(buf);
+	return rc;
+}
+
+int32_t cam_qup_i2c_write_continuous_table(struct camera_io_master *client,
+	struct cam_sensor_i2c_reg_setting *write_settings,
+	uint8_t cam_sensor_i2c_write_flag)
+{
+	int32_t rc = 0;
+
+	if (!client || !write_settings)
+		return -EINVAL;
+
+	if ((write_settings->addr_type <= CAMERA_SENSOR_I2C_TYPE_INVALID
+		|| write_settings->addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX
+		|| (write_settings->data_type <= CAMERA_SENSOR_I2C_TYPE_INVALID
+		|| write_settings->data_type >= CAMERA_SENSOR_I2C_TYPE_MAX)))
+		return -EINVAL;
+
+	if (cam_sensor_i2c_write_flag == CAM_SENSOR_I2C_WRITE_BURST)
+		rc = cam_qup_i2c_write_burst(client, write_settings);
+	else if (cam_sensor_i2c_write_flag == CAM_SENSOR_I2C_WRITE_SEQ)
+		rc = cam_qup_i2c_write_seq(client, write_settings);
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c
index 0a3878e..37784b4 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c
@@ -231,6 +231,8 @@
 		cam_cmd_i2c_continuous_wr->header.addr_type;
 	i2c_list->i2c_settings.data_type =
 		cam_cmd_i2c_continuous_wr->header.data_type;
+	i2c_list->i2c_settings.size =
+		cam_cmd_i2c_continuous_wr->header.count;
 
 	for (cnt = 0; cnt < (cam_cmd_i2c_continuous_wr->header.count);
 		cnt++) {
@@ -1179,12 +1181,6 @@
 		return -EINVAL;
 	}
 
-	if (cam_res_mgr_shared_pinctrl_init()) {
-		CAM_ERR(CAM_SENSOR,
-			"Failed to init shared pinctrl");
-		return -EINVAL;
-	}
-
 	return 0;
 }
 
@@ -1247,6 +1243,12 @@
 		ctrl->cam_pinctrl_status = 1;
 	}
 
+	if (cam_res_mgr_shared_pinctrl_init()) {
+		CAM_ERR(CAM_SENSOR,
+			"Failed to init shared pinctrl");
+		return -EINVAL;
+	}
+
 	rc = cam_sensor_util_request_gpio_table(soc_info, 1);
 	if (rc < 0)
 		no_gpio = rc;
@@ -1257,18 +1259,13 @@
 			ctrl->pinctrl_info.gpio_state_active);
 		if (ret)
 			CAM_ERR(CAM_SENSOR, "cannot set pin to active state");
-
-		ret = cam_res_mgr_shared_pinctrl_select_state(true);
-		if (ret)
-			CAM_ERR(CAM_SENSOR,
-				"Cannot set shared pin to active state");
-
-		ret = cam_res_mgr_shared_pinctrl_post_init();
-		if (ret)
-			CAM_ERR(CAM_SENSOR,
-				"Failed to post init shared pinctrl");
 	}
 
+	ret = cam_res_mgr_shared_pinctrl_select_state(true);
+	if (ret)
+		CAM_ERR(CAM_SENSOR,
+			"Cannot set shared pin to active state");
+
 	for (index = 0; index < ctrl->power_setting_size; index++) {
 		CAM_DBG(CAM_SENSOR, "index: %d", index);
 		power_setting = &ctrl->power_setting[index];
@@ -1430,6 +1427,11 @@
 				(power_setting->delay * 1000) + 1000);
 	}
 
+	ret = cam_res_mgr_shared_pinctrl_post_init();
+	if (ret)
+		CAM_ERR(CAM_SENSOR,
+			"Failed to post init shared pinctrl");
+
 	return 0;
 power_up_failed:
 	CAM_ERR(CAM_SENSOR, "failed");
diff --git a/drivers/media/platform/msm/camera/cam_sync/cam_sync.c b/drivers/media/platform/msm/camera/cam_sync/cam_sync.c
index e7dcbe7..ae9f74c 100644
--- a/drivers/media/platform/msm/camera/cam_sync/cam_sync.c
+++ b/drivers/media/platform/msm/camera/cam_sync/cam_sync.c
@@ -55,7 +55,6 @@
 {
 	struct sync_callback_info *sync_cb;
 	struct sync_callback_info *cb_info;
-	struct sync_callback_info *temp_cb;
 	struct sync_table_row *row = NULL;
 
 	if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0 || !cb_func)
@@ -72,6 +71,17 @@
 		return -EINVAL;
 	}
 
+	/* Don't register if callback was registered earlier */
+	list_for_each_entry(cb_info, &row->callback_list, list) {
+		if (cb_info->callback_func == cb_func &&
+			cb_info->cb_data == userdata) {
+			CAM_ERR(CAM_SYNC, "Duplicate register for sync_obj %d",
+				sync_obj);
+			spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+			return -EALREADY;
+		}
+	}
+
 	sync_cb = kzalloc(sizeof(*sync_cb), GFP_ATOMIC);
 	if (!sync_cb) {
 		spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
@@ -86,7 +96,6 @@
 		sync_cb->sync_obj = sync_obj;
 		INIT_WORK(&sync_cb->cb_dispatch_work,
 			cam_sync_util_cb_dispatch);
-		list_add_tail(&sync_cb->list, &row->callback_list);
 		sync_cb->status = row->state;
 		queue_work(sync_dev->work_queue,
 			&sync_cb->cb_dispatch_work);
@@ -95,16 +104,6 @@
 		return 0;
 	}
 
-	/* Don't register if callback was registered earlier */
-	list_for_each_entry_safe(cb_info, temp_cb, &row->callback_list, list) {
-		if (cb_info->callback_func == cb_func &&
-			cb_info->cb_data == userdata) {
-			kfree(sync_cb);
-			spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
-			return -EALREADY;
-		}
-	}
-
 	sync_cb->callback_func = cb_func;
 	sync_cb->cb_data = userdata;
 	sync_cb->sync_obj = sync_obj;
@@ -238,6 +237,8 @@
 		spin_unlock_bh(&sync_dev->row_spinlocks[parent_info->sync_id]);
 	}
 
+	spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+
 	/*
 	 * Now dispatch the various sync objects collected so far, in our
 	 * list
@@ -251,17 +252,20 @@
 		struct sync_user_payload *temp_payload_info;
 
 		signalable_row = sync_dev->sync_table + list_info->sync_obj;
+
+		spin_lock_bh(&sync_dev->row_spinlocks[list_info->sync_obj]);
 		/* Dispatch kernel callbacks if any were registered earlier */
 		list_for_each_entry_safe(sync_cb,
-		temp_sync_cb, &signalable_row->callback_list, list) {
+			temp_sync_cb, &signalable_row->callback_list, list) {
 			sync_cb->status = list_info->status;
+			list_del_init(&sync_cb->list);
 			queue_work(sync_dev->work_queue,
 				&sync_cb->cb_dispatch_work);
 		}
 
 		/* Dispatch user payloads if any were registered earlier */
 		list_for_each_entry_safe(payload_info, temp_payload_info,
-		&signalable_row->user_payload_list, list) {
+			&signalable_row->user_payload_list, list) {
 			spin_lock_bh(&sync_dev->cam_sync_eventq_lock);
 			if (!sync_dev->cam_sync_eventq) {
 				spin_unlock_bh(
@@ -291,12 +295,12 @@
 		 */
 		complete_all(&signalable_row->signaled);
 
+		spin_unlock_bh(&sync_dev->row_spinlocks[list_info->sync_obj]);
+
 		list_del_init(&list_info->list);
 		kfree(list_info);
 	}
 
-	spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
-
 	return rc;
 }
 
@@ -346,9 +350,7 @@
 
 int cam_sync_destroy(int32_t sync_obj)
 {
-
-	cam_sync_deinit_object(sync_dev->sync_table, sync_obj);
-	return 0;
+	return cam_sync_deinit_object(sync_dev->sync_table, sync_obj);
 }
 
 int cam_sync_wait(int32_t sync_obj, uint64_t timeout_ms)
diff --git a/drivers/media/platform/msm/camera/cam_sync/cam_sync_private.h b/drivers/media/platform/msm/camera/cam_sync/cam_sync_private.h
index e2a7fcb..5ae707a 100644
--- a/drivers/media/platform/msm/camera/cam_sync/cam_sync_private.h
+++ b/drivers/media/platform/msm/camera/cam_sync/cam_sync_private.h
@@ -58,11 +58,11 @@
  * enum sync_list_clean_type - Enum to indicate the type of list clean action
  * to be peformed, i.e. specific sync ID or all list sync ids.
  *
- * @SYNC_CLEAN_ID  : Specific object to be cleaned in the list
+ * @SYNC_CLEAN_ONE : Specific object to be cleaned in the list
  * @SYNC_CLEAN_ALL : Clean all objects in the list
  */
 enum sync_list_clean_type {
-	SYNC_LIST_CLEAN_ID,
+	SYNC_LIST_CLEAN_ONE,
 	SYNC_LIST_CLEAN_ALL
 };
 
diff --git a/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.c b/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.c
index 6aa7c23..826253c 100644
--- a/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.c
+++ b/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.c
@@ -144,8 +144,8 @@
 		child_info = kzalloc(sizeof(*child_info), GFP_ATOMIC);
 
 		if (!child_info) {
-			cam_sync_util_cleanup_children_list(
-				&row->children_list, SYNC_LIST_CLEAN_ALL, 0);
+			cam_sync_util_cleanup_children_list(row,
+				SYNC_LIST_CLEAN_ALL, 0);
 			return -ENOMEM;
 		}
 
@@ -159,11 +159,10 @@
 		spin_lock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
 		parent_info = kzalloc(sizeof(*parent_info), GFP_ATOMIC);
 		if (!parent_info) {
-			cam_sync_util_cleanup_parents_list(
-				&child_row->parents_list,
+			cam_sync_util_cleanup_parents_list(child_row,
 				SYNC_LIST_CLEAN_ALL, 0);
-			cam_sync_util_cleanup_children_list(
-				&row->children_list, SYNC_LIST_CLEAN_ALL, 0);
+			cam_sync_util_cleanup_children_list(row,
+				SYNC_LIST_CLEAN_ALL, 0);
 			spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
 			return -ENOMEM;
 		}
@@ -197,13 +196,13 @@
 
 int cam_sync_deinit_object(struct sync_table_row *table, uint32_t idx)
 {
-	struct sync_table_row *row = table + idx;
-	struct sync_child_info *child_info, *temp_child, *child_copy_info;
-	struct sync_callback_info *sync_cb, *temp_cb;
-	struct sync_parent_info *parent_info, *temp_parent, *parent_copy_info;
-	struct sync_user_payload *upayload_info, *temp_upayload;
-	struct sync_table_row   *child_row = NULL, *parent_row = NULL;
-	struct list_head child_copy_list, parent_copy_list;
+	struct sync_table_row      *row = table + idx;
+	struct sync_child_info     *child_info, *temp_child;
+	struct sync_callback_info  *sync_cb, *temp_cb;
+	struct sync_parent_info    *parent_info, *temp_parent;
+	struct sync_user_payload   *upayload_info, *temp_upayload;
+	struct sync_table_row      *child_row = NULL, *parent_row = NULL;
+	struct list_head            temp_child_list, temp_parent_list;
 
 	if (!table || idx <= 0 || idx >= CAM_SYNC_MAX_OBJS)
 		return -EINVAL;
@@ -217,67 +216,38 @@
 		return -EINVAL;
 	}
 
-	/* Objects child and parent objects will be added into this list */
-	INIT_LIST_HEAD(&child_copy_list);
-	INIT_LIST_HEAD(&parent_copy_list);
+	/* Object's child and parent objects will be added into this list */
+	INIT_LIST_HEAD(&temp_child_list);
+	INIT_LIST_HEAD(&temp_parent_list);
 
 	list_for_each_entry_safe(child_info, temp_child, &row->children_list,
 		list) {
 		if (child_info->sync_id <= 0)
 			continue;
 
-		child_copy_info = kzalloc(sizeof(*child_copy_info), GFP_ATOMIC);
-		if (!child_copy_info) {
-			/* No free memory, clean up the child_copy_list */
-			while (!list_empty(&child_copy_list)) {
-				child_info = list_first_entry(&child_copy_list,
-					struct sync_child_info, list);
-				list_del_init(&child_info->list);
-				kfree(child_info);
-			}
-			spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
-			goto deinit;
-		}
-		child_copy_info->sync_id = child_info->sync_id;
-		list_add_tail(&child_copy_info->list, &child_copy_list);
+		list_del_init(&child_info->list);
+		list_add_tail(&child_info->list, &temp_child_list);
 	}
 
 	list_for_each_entry_safe(parent_info, temp_parent, &row->parents_list,
 		list) {
 		if (parent_info->sync_id <= 0)
 			continue;
-		parent_copy_info = kzalloc(sizeof(*parent_copy_info),
-			GFP_ATOMIC);
-		if (!parent_copy_info) {
-			/* No free memory, clean up the parent_copy_list */
-			while (!list_empty(&parent_copy_list)) {
-				parent_info = list_first_entry(
-					&parent_copy_list,
-					struct sync_parent_info, list);
-				list_del_init(&parent_info->list);
-				kfree(parent_info);
-			}
-			/* No free memory, clean up the child_copy_list */
-			while (!list_empty(&child_copy_list)) {
-				child_info = list_first_entry(&child_copy_list,
-					struct sync_child_info, list);
-				list_del_init(&child_info->list);
-				kfree(child_info);
-			}
-			spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
-			goto deinit;
-		}
-		parent_copy_info->sync_id = parent_info->sync_id;
-		list_add_tail(&parent_copy_info->list, &parent_copy_list);
+
+		list_del_init(&parent_info->list);
+		list_add_tail(&parent_info->list, &temp_parent_list);
 	}
 
 	spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
-	/* Cleanup the child to parent link from child list*/
-	while (!list_empty(&child_copy_list)) {
-		child_info = list_first_entry(&child_copy_list,
+
+	/* Cleanup the child to parent link from child list */
+	while (!list_empty(&temp_child_list)) {
+		child_info = list_first_entry(&temp_child_list,
 			struct sync_child_info, list);
 		child_row = sync_dev->sync_table + child_info->sync_id;
+
 		spin_lock_bh(&sync_dev->row_spinlocks[child_info->sync_id]);
+
 		if (child_row->state == CAM_SYNC_STATE_INVALID) {
 			spin_unlock_bh(&sync_dev->row_spinlocks[
 				child_info->sync_id]);
@@ -286,20 +256,23 @@
 			continue;
 		}
 
-		cam_sync_util_cleanup_parents_list(&child_row->parents_list,
-			SYNC_LIST_CLEAN_ID, idx);
+		cam_sync_util_cleanup_parents_list(child_row,
+			SYNC_LIST_CLEAN_ONE, idx);
 
 		spin_unlock_bh(&sync_dev->row_spinlocks[child_info->sync_id]);
+
 		list_del_init(&child_info->list);
 		kfree(child_info);
 	}
 
 	/* Cleanup the parent to child link */
-	while (!list_empty(&parent_copy_list)) {
-		parent_info = list_first_entry(&parent_copy_list,
+	while (!list_empty(&temp_parent_list)) {
+		parent_info = list_first_entry(&temp_parent_list,
 			struct sync_parent_info, list);
 		parent_row = sync_dev->sync_table + parent_info->sync_id;
+
 		spin_lock_bh(&sync_dev->row_spinlocks[parent_info->sync_id]);
+
 		if (parent_row->state == CAM_SYNC_STATE_INVALID) {
 			spin_unlock_bh(&sync_dev->row_spinlocks[
 				parent_info->sync_id]);
@@ -308,29 +281,24 @@
 			continue;
 		}
 
-		cam_sync_util_cleanup_children_list(&parent_row->children_list,
-			SYNC_LIST_CLEAN_ID, idx);
+		cam_sync_util_cleanup_children_list(parent_row,
+			SYNC_LIST_CLEAN_ONE, idx);
 
 		spin_unlock_bh(&sync_dev->row_spinlocks[parent_info->sync_id]);
+
 		list_del_init(&parent_info->list);
 		kfree(parent_info);
 	}
 
-deinit:
 	spin_lock_bh(&sync_dev->row_spinlocks[idx]);
-	cam_sync_util_cleanup_children_list(&row->children_list,
-		SYNC_LIST_CLEAN_ALL, 0);
-	cam_sync_util_cleanup_parents_list(&row->parents_list,
-		SYNC_LIST_CLEAN_ALL, 0);
-
 	list_for_each_entry_safe(upayload_info, temp_upayload,
-				&row->user_payload_list, list) {
+			&row->user_payload_list, list) {
 		list_del_init(&upayload_info->list);
 		kfree(upayload_info);
 	}
 
 	list_for_each_entry_safe(sync_cb, temp_cb,
-				&row->callback_list, list) {
+			&row->callback_list, list) {
 		list_del_init(&sync_cb->list);
 		kfree(sync_cb);
 	}
@@ -349,10 +317,6 @@
 		struct sync_callback_info,
 		cb_dispatch_work);
 
-	spin_lock_bh(&sync_dev->row_spinlocks[cb_info->sync_obj]);
-	list_del_init(&cb_info->list);
-	spin_unlock_bh(&sync_dev->row_spinlocks[cb_info->sync_obj]);
-
 	cb_info->callback_func(cb_info->sync_obj,
 		cb_info->status,
 		cb_info->cb_data);
@@ -457,7 +421,7 @@
 	return result;
 }
 
-void cam_sync_util_cleanup_children_list(struct list_head *list_to_clean,
+void cam_sync_util_cleanup_children_list(struct sync_table_row *row,
 	uint32_t list_clean_type, uint32_t sync_obj)
 {
 	struct sync_child_info *child_info = NULL;
@@ -465,8 +429,8 @@
 	uint32_t                curr_sync_obj;
 
 	list_for_each_entry_safe(child_info,
-			temp_child_info, list_to_clean, list) {
-		if ((list_clean_type == SYNC_LIST_CLEAN_ID) &&
+			temp_child_info, &row->children_list, list) {
+		if ((list_clean_type == SYNC_LIST_CLEAN_ONE) &&
 			(child_info->sync_id != sync_obj))
 			continue;
 
@@ -474,13 +438,13 @@
 		list_del_init(&child_info->list);
 		kfree(child_info);
 
-		if ((list_clean_type == SYNC_LIST_CLEAN_ID) &&
+		if ((list_clean_type == SYNC_LIST_CLEAN_ONE) &&
 			(curr_sync_obj == sync_obj))
 			break;
 	}
 }
 
-void cam_sync_util_cleanup_parents_list(struct list_head *list_to_clean,
+void cam_sync_util_cleanup_parents_list(struct sync_table_row *row,
 	uint32_t list_clean_type, uint32_t sync_obj)
 {
 	struct sync_parent_info *parent_info = NULL;
@@ -488,8 +452,8 @@
 	uint32_t                 curr_sync_obj;
 
 	list_for_each_entry_safe(parent_info,
-			temp_parent_info, list_to_clean, list) {
-		if ((list_clean_type == SYNC_LIST_CLEAN_ID) &&
+			temp_parent_info, &row->parents_list, list) {
+		if ((list_clean_type == SYNC_LIST_CLEAN_ONE) &&
 			(parent_info->sync_id != sync_obj))
 			continue;
 
@@ -497,7 +461,7 @@
 		list_del_init(&parent_info->list);
 		kfree(parent_info);
 
-		if ((list_clean_type == SYNC_LIST_CLEAN_ID) &&
+		if ((list_clean_type == SYNC_LIST_CLEAN_ONE) &&
 			(curr_sync_obj == sync_obj))
 			break;
 	}
diff --git a/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.h b/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.h
index 1c5c4bf..ae7d542 100644
--- a/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.h
+++ b/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.h
@@ -140,26 +140,26 @@
 
 /**
  * @brief: Function to clean up the children of a sync object
- * @param list_to_clean : List to clean up
+ * @row                 : Row whose child list to clean
  * @list_clean_type     : Clean specific object or clean all objects
  * @sync_obj            : Sync object to be clean if list clean type is
- *                         SYNC_LIST_CLEAN_ID
+ *                          SYNC_LIST_CLEAN_ONE
  *
  * @return None
  */
-void cam_sync_util_cleanup_children_list(struct list_head *list_to_clean,
+void cam_sync_util_cleanup_children_list(struct sync_table_row *row,
 	uint32_t list_clean_type, uint32_t sync_obj);
 
 /**
  * @brief: Function to clean up the parents of a sync object
- * @param list_to_clean : List to clean up
+ * @row                 : Row whose parent list to clean
  * @list_clean_type     : Clean specific object or clean all objects
  * @sync_obj            : Sync object to be clean if list clean type is
- *                         SYNC_LIST_CLEAN_ID
+ *                          SYNC_LIST_CLEAN_ONE
  *
  * @return None
  */
-void cam_sync_util_cleanup_parents_list(struct list_head *list_to_clean,
+void cam_sync_util_cleanup_parents_list(struct sync_table_row *row,
 	uint32_t list_clean_type, uint32_t sync_obj);
 
 #endif /* __CAM_SYNC_UTIL_H__ */
diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c
index c821cca..d6ca73b 100644
--- a/drivers/mtd/nand/nand_ids.c
+++ b/drivers/mtd/nand/nand_ids.c
@@ -58,6 +58,10 @@
 	{"TC58NYG2S0HBAI4 4G 1.8V 8-bit",
 		{ .id = {0x98, 0xac, 0x90, 0x26, 0x76, 0x00, 0x00, 0x00} },
 		SZ_4K, SZ_512, SZ_256K, 0, 5, 256, NAND_ECC_INFO(8, SZ_512) },
+	{"MT29F4G08ABBFA3W 4G 1.8V 8-bit",
+		{ .id = {0x2c, 0xac, 0x80, 0x26, 0x00, 0x00, 0x00, 0x00} },
+		SZ_4K, SZ_512, SZ_256K, 0, 4, 256, NAND_ECC_INFO(8, SZ_512) },
+
 
 	LEGACY_ID_NAND("NAND 4MiB 5V 8-bit",   0x6B, 4, SZ_8K, SP_OPTIONS),
 	LEGACY_ID_NAND("NAND 4MiB 3,3V 8-bit", 0xE3, 4, SZ_8K, SP_OPTIONS),
diff --git a/drivers/platform/msm/ep_pcie/ep_pcie_com.h b/drivers/platform/msm/ep_pcie/ep_pcie_com.h
index 7553a24..c02aabf 100644
--- a/drivers/platform/msm/ep_pcie/ep_pcie_com.h
+++ b/drivers/platform/msm/ep_pcie/ep_pcie_com.h
@@ -340,10 +340,13 @@
 	bool                         l23_ready;
 	bool                         l1ss_enabled;
 	struct ep_pcie_msi_config    msi_cfg;
+	bool                         no_notify;
+	bool                         client_ready;
 
 	struct ep_pcie_register_event *event_reg;
 	struct work_struct	     handle_perst_work;
 	struct work_struct           handle_bme_work;
+	struct work_struct           handle_d3cold_work;
 };
 
 extern struct ep_pcie_dev_t ep_pcie_dev;
diff --git a/drivers/platform/msm/ep_pcie/ep_pcie_core.c b/drivers/platform/msm/ep_pcie/ep_pcie_core.c
index 88c03fc..e48409b 100644
--- a/drivers/platform/msm/ep_pcie/ep_pcie_core.c
+++ b/drivers/platform/msm/ep_pcie/ep_pcie_core.c
@@ -1060,6 +1060,10 @@
 
 static void ep_pcie_enumeration_complete(struct ep_pcie_dev_t *dev)
 {
+	unsigned long irqsave_flags;
+
+	spin_lock_irqsave(&dev->isr_lock, irqsave_flags);
+
 	dev->enumerated = true;
 	dev->link_status = EP_PCIE_LINK_ENABLED;
 
@@ -1086,7 +1090,14 @@
 		"PCIe V%d: register driver for device 0x%x.\n",
 		ep_pcie_dev.rev, hw_drv.device_id);
 	ep_pcie_register_drv(&hw_drv);
-	ep_pcie_notify_event(dev, EP_PCIE_EVENT_LINKUP);
+	if (!dev->no_notify)
+		ep_pcie_notify_event(dev, EP_PCIE_EVENT_LINKUP);
+	else
+		EP_PCIE_DBG(dev,
+			"PCIe V%d: do not notify client about linkup.\n",
+			dev->rev);
+
+	spin_unlock_irqrestore(&dev->isr_lock, irqsave_flags);
 }
 
 int ep_pcie_core_enable_endpoint(enum ep_pcie_options opt)
@@ -1584,7 +1595,13 @@
 			"PCIe V%d: No. %ld change to D3 state.\n",
 			dev->rev, dev->d3_counter);
 		ep_pcie_write_mask(dev->parf + PCIE20_PARF_PM_CTRL, 0, BIT(1));
-		ep_pcie_notify_event(dev, EP_PCIE_EVENT_PM_D3_HOT);
+
+		if (dev->enumerated)
+			ep_pcie_notify_event(dev, EP_PCIE_EVENT_PM_D3_HOT);
+		else
+			EP_PCIE_DBG(dev,
+				"PCIe V%d: do not notify client about this D3 hot event since enumeration by HLOS is not done yet.\n",
+				dev->rev);
 	} else if (dstate == 0) {
 		dev->l23_ready = false;
 		dev->d0_counter++;
@@ -1648,9 +1665,25 @@
 	struct ep_pcie_dev_t *dev = container_of(work, struct ep_pcie_dev_t,
 					handle_perst_work);
 
+	EP_PCIE_DBG(dev,
+		"PCIe V%d: Start enumeration due to PERST deassertion.\n",
+		dev->rev);
+
 	ep_pcie_enumeration(dev);
 }
 
+static void handle_d3cold_func(struct work_struct *work)
+{
+	struct ep_pcie_dev_t *dev = container_of(work, struct ep_pcie_dev_t,
+					handle_d3cold_work);
+
+	EP_PCIE_DBG(dev,
+		"PCIe V%d: shutdown PCIe link due to PERST assertion before BME is set.\n",
+		dev->rev);
+	ep_pcie_core_disable_endpoint();
+	dev->no_notify = false;
+}
+
 static void handle_bme_func(struct work_struct *work)
 {
 	struct ep_pcie_dev_t *dev = container_of(work,
@@ -1673,10 +1706,14 @@
 		EP_PCIE_DBG(dev,
 			"PCIe V%d: PCIe is not enumerated yet; PERST is %sasserted.\n",
 			dev->rev, perst ? "de" : "");
-		if ((!dev->perst_enum) || !perst)
-			goto out;
-		/* start work for link enumeration with the host side */
-		schedule_work(&dev->handle_perst_work);
+		if (perst) {
+			/* start work for link enumeration with the host side */
+			schedule_work(&dev->handle_perst_work);
+		} else {
+			dev->no_notify = true;
+			/* shutdown the link if the link is already on */
+			schedule_work(&dev->handle_d3cold_work);
+		}
 
 		goto out;
 	}
@@ -1694,7 +1731,16 @@
 		EP_PCIE_DBG(dev,
 			"PCIe V%d: No. %ld PERST assertion.\n",
 			dev->rev, dev->perst_ast_counter);
-		ep_pcie_notify_event(dev, EP_PCIE_EVENT_PM_D3_COLD);
+
+		if (dev->client_ready) {
+			ep_pcie_notify_event(dev, EP_PCIE_EVENT_PM_D3_COLD);
+		} else {
+			dev->no_notify = true;
+			EP_PCIE_DBG(dev,
+				"PCIe V%d: Client driver is not ready when this PERST assertion happens; shutdown link now.\n",
+				dev->rev);
+			schedule_work(&dev->handle_d3cold_work);
+		}
 	}
 
 out:
@@ -1779,6 +1825,7 @@
 	/* Initialize all works to be performed before registering for IRQs*/
 	INIT_WORK(&dev->handle_perst_work, handle_perst_func);
 	INIT_WORK(&dev->handle_bme_work, handle_bme_func);
+	INIT_WORK(&dev->handle_d3cold_work, handle_d3cold_func);
 
 	if (dev->aggregated_irq) {
 		ret = devm_request_irq(pdev,
@@ -1932,6 +1979,8 @@
 		"PCIe V%d: Event 0x%x is registered\n",
 		ep_pcie_dev.rev, reg->events);
 
+	ep_pcie_dev.client_ready = true;
+
 	return 0;
 }
 
@@ -1970,6 +2019,12 @@
 			"PCIe V%d: PCIe link is up and BME is enabled; current SW link status:%d.\n",
 			dev->rev, dev->link_status);
 		dev->link_status = EP_PCIE_LINK_ENABLED;
+		if (dev->no_notify) {
+			EP_PCIE_DBG(dev,
+				"PCIe V%d: BME is set now, but do not tell client about BME enable.\n",
+				dev->rev);
+			return EP_PCIE_LINK_UP;
+		}
 	} else {
 		EP_PCIE_DBG(dev,
 			"PCIe V%d: PCIe link is up but BME is disabled; current SW link status:%d.\n",
diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c b/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
index 745e429..90920d9 100644
--- a/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
+++ b/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
@@ -2225,6 +2225,7 @@
 	}
 
 	if (!IPA3_USB_IS_TTYPE_DPL(ttype)) {
+		ipa3_xdci_ep_delay_rm(ul_clnt_hdl); /* Remove ep_delay if set */
 		/* Reset UL channel */
 		result = ipa3_reset_gsi_channel(ul_clnt_hdl);
 		if (result) {
diff --git a/drivers/platform/msm/ipa/ipa_rm_inactivity_timer.c b/drivers/platform/msm/ipa/ipa_rm_inactivity_timer.c
index 8e33d71..613bed3 100644
--- a/drivers/platform/msm/ipa/ipa_rm_inactivity_timer.c
+++ b/drivers/platform/msm/ipa/ipa_rm_inactivity_timer.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -20,6 +20,8 @@
 #include <linux/ipa.h>
 #include "ipa_rm_i.h"
 
+#define MAX_WS_NAME 20
+
 /**
  * struct ipa_rm_it_private - IPA RM Inactivity Timer private
  *	data
@@ -45,6 +47,8 @@
 	bool reschedule_work;
 	bool work_in_progress;
 	unsigned long jiffies;
+	struct wakeup_source w_lock;
+	char w_lock_name[MAX_WS_NAME];
 };
 
 static struct ipa_rm_it_private ipa_rm_it_handles[IPA_RM_RESOURCE_MAX];
@@ -87,6 +91,7 @@
 	} else {
 		IPA_RM_DBG_LOW("%s: calling release_resource on resource %d!\n",
 			__func__, me->resource_name);
+		__pm_relax(&ipa_rm_it_handles[me->resource_name].w_lock);
 		ipa_rm_release_resource(me->resource_name);
 		ipa_rm_it_handles[me->resource_name].work_in_progress = false;
 	}
@@ -110,6 +115,9 @@
 int ipa_rm_inactivity_timer_init(enum ipa_rm_resource_name resource_name,
 				 unsigned long msecs)
 {
+	struct wakeup_source *pwlock;
+	char *name;
+
 	IPA_RM_DBG_LOW("%s: resource %d\n", __func__, resource_name);
 
 	if (resource_name < 0 ||
@@ -130,7 +138,10 @@
 	ipa_rm_it_handles[resource_name].resource_requested = false;
 	ipa_rm_it_handles[resource_name].reschedule_work = false;
 	ipa_rm_it_handles[resource_name].work_in_progress = false;
-
+	pwlock = &(ipa_rm_it_handles[resource_name].w_lock);
+	name = ipa_rm_it_handles[resource_name].w_lock_name;
+	snprintf(name, MAX_WS_NAME, "IPA_RM%d\n", resource_name);
+	wakeup_source_init(pwlock, name);
 	INIT_DELAYED_WORK(&ipa_rm_it_handles[resource_name].work,
 			  ipa_rm_inactivity_timer_func);
 	ipa_rm_it_handles[resource_name].initied = 1;
@@ -151,6 +162,8 @@
 */
 int ipa_rm_inactivity_timer_destroy(enum ipa_rm_resource_name resource_name)
 {
+	struct wakeup_source *pwlock;
+
 	IPA_RM_DBG_LOW("%s: resource %d\n", __func__, resource_name);
 
 	if (resource_name < 0 ||
@@ -166,6 +179,8 @@
 	}
 
 	cancel_delayed_work_sync(&ipa_rm_it_handles[resource_name].work);
+	pwlock = &(ipa_rm_it_handles[resource_name].w_lock);
+	wakeup_source_trash(pwlock);
 
 	memset(&ipa_rm_it_handles[resource_name], 0,
 	       sizeof(struct ipa_rm_it_private));
@@ -261,6 +276,7 @@
 	}
 	ipa_rm_it_handles[resource_name].work_in_progress = true;
 	ipa_rm_it_handles[resource_name].reschedule_work = false;
+	__pm_stay_awake(&ipa_rm_it_handles[resource_name].w_lock);
 	IPA_RM_DBG_LOW("%s: setting delayed work\n", __func__);
 	queue_delayed_work(system_unbound_wq,
 			      &ipa_rm_it_handles[resource_name].work,
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa.c b/drivers/platform/msm/ipa/ipa_v2/ipa.c
index 1c1057c..07dc7b0 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa.c
@@ -3726,6 +3726,11 @@
 					atomic_set(
 						&ipa_ctx->sps_pm.dec_clients,
 						1);
+					/*
+					 * acquire wake lock as long as suspend
+					 * vote is held
+					 */
+					ipa_inc_acquire_wakelock();
 					ipa_sps_process_irq_schedule_rel();
 				}
 				mutex_unlock(&ipa_ctx->sps_pm.sps_pm_lock);
@@ -3798,6 +3803,7 @@
 			ipa_sps_process_irq_schedule_rel();
 		} else {
 			atomic_set(&ipa_ctx->sps_pm.dec_clients, 0);
+			ipa_dec_release_wakelock();
 			IPA_ACTIVE_CLIENTS_DEC_SPECIAL("SPS_RESOURCE");
 		}
 	}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index b615ec8..af4d448 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -4030,6 +4030,11 @@
 					atomic_set(
 					&ipa3_ctx->transport_pm.dec_clients,
 					1);
+					/*
+					 * acquire wake lock as long as suspend
+					 * vote is held
+					 */
+					ipa3_inc_acquire_wakelock();
 					ipa3_process_irq_schedule_rel();
 				}
 				mutex_unlock(&ipa3_ctx->transport_pm.
@@ -4110,6 +4115,7 @@
 			ipa3_process_irq_schedule_rel();
 		} else {
 			atomic_set(&ipa3_ctx->transport_pm.dec_clients, 0);
+			ipa3_dec_release_wakelock();
 			IPA_ACTIVE_CLIENTS_DEC_SPECIAL("TRANSPORT_RESOURCE");
 		}
 	}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
index 59fe07f..35b6dff 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
@@ -601,6 +601,22 @@
 	ep->priv = params->priv;
 	ep->keep_ipa_awake = params->keep_ipa_awake;
 
+
+	/* Config QMB for USB_CONS ep */
+	if (!IPA_CLIENT_IS_PROD(ep->client)) {
+		IPADBG("Configuring QMB on USB CONS pipe\n");
+		if (ipa_ep_idx >= ipa3_ctx->ipa_num_pipes ||
+			ipa3_ctx->ep[ipa_ep_idx].valid == 0) {
+			IPAERR("bad parm.\n");
+			return -EINVAL;
+		}
+		result = ipa3_cfg_ep_cfg(ipa_ep_idx, &params->ipa_ep_cfg.cfg);
+		if (result) {
+			IPAERR("fail to configure QMB.\n");
+			return result;
+		}
+	}
+
 	if (!ep->skip_ep_cfg) {
 		if (ipa3_cfg_ep(ipa_ep_idx, &params->ipa_ep_cfg)) {
 			IPAERR("fail to configure EP.\n");
@@ -1163,6 +1179,48 @@
 	return result;
 }
 
+void ipa3_xdci_ep_delay_rm(u32 clnt_hdl)
+{
+	struct ipa3_ep_context *ep;
+	struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
+	int result;
+
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+		ipa3_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR("bad parm.\n");
+		return;
+	}
+
+	ep = &ipa3_ctx->ep[clnt_hdl];
+
+	if (ep->ep_delay_set == true) {
+
+		memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+		ep_cfg_ctrl.ipa_ep_delay = false;
+
+		if (!ep->keep_ipa_awake)
+			IPA_ACTIVE_CLIENTS_INC_EP
+				(ipa3_get_client_mapping(clnt_hdl));
+
+		result = ipa3_cfg_ep_ctrl(clnt_hdl,
+			&ep_cfg_ctrl);
+
+		if (!ep->keep_ipa_awake)
+			IPA_ACTIVE_CLIENTS_DEC_EP
+				(ipa3_get_client_mapping(clnt_hdl));
+
+		if (result) {
+			IPAERR
+			("client (ep: %d) failed to remove delay result=%d\n",
+				clnt_hdl, result);
+		} else {
+			IPADBG("client (ep: %d) delay removed\n",
+				clnt_hdl);
+			ep->ep_delay_set = false;
+		}
+	}
+}
+
 int ipa3_xdci_disconnect(u32 clnt_hdl, bool should_force_clear, u32 qmi_req_id)
 {
 	struct ipa3_ep_context *ep;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index 3754aa8..adbd7b8 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -1682,6 +1682,8 @@
 
 int ipa3_xdci_disconnect(u32 clnt_hdl, bool should_force_clear, u32 qmi_req_id);
 
+void ipa3_xdci_ep_delay_rm(u32 clnt_hdl);
+
 int ipa3_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
 	bool should_force_clear, u32 qmi_req_id, bool is_dpl);
 
diff --git a/drivers/platform/msm/msm_ext_display.c b/drivers/platform/msm/msm_ext_display.c
index 24b8e2c..bc4df04 100644
--- a/drivers/platform/msm/msm_ext_display.c
+++ b/drivers/platform/msm/msm_ext_display.c
@@ -237,7 +237,10 @@
 
 		pr_debug("codec ops set for %s\n", msm_ext_disp_name(type));
 	} else if (state == EXT_DISPLAY_CABLE_DISCONNECT) {
-		*ext_disp->ops = (struct msm_ext_disp_audio_codec_ops){NULL};
+		if (ext_disp->ops)
+			*ext_disp->ops =
+				(struct msm_ext_disp_audio_codec_ops){NULL};
+
 		ext_disp->current_disp = EXT_DISPLAY_TYPE_MAX;
 
 		pr_debug("codec ops cleared for %s\n", msm_ext_disp_name(type));
diff --git a/drivers/power/supply/qcom/battery.c b/drivers/power/supply/qcom/battery.c
index 4b900e2..aa5b1b0 100644
--- a/drivers/power/supply/qcom/battery.c
+++ b/drivers/power/supply/qcom/battery.c
@@ -150,22 +150,52 @@
 		total_current_ua = pval.intval;
 	}
 
-	pval.intval = total_current_ua - slave_ua;
-	/* Set ICL on main charger */
-	rc = power_supply_set_property(chip->main_psy,
+	/*
+	 * If there is an increase in slave share
+	 * (Also handles parallel enable case)
+	 *	Set Main ICL then slave ICL
+	 * else
+	 * (Also handles parallel disable case)
+	 *	Set slave ICL then main ICL.
+	 */
+	if (slave_ua > chip->pl_settled_ua) {
+		pval.intval = total_current_ua - slave_ua;
+		/* Set ICL on main charger */
+		rc = power_supply_set_property(chip->main_psy,
 				POWER_SUPPLY_PROP_CURRENT_MAX, &pval);
-	if (rc < 0) {
-		pr_err("Couldn't change slave suspend state rc=%d\n", rc);
-		return;
-	}
+		if (rc < 0) {
+			pr_err("Couldn't change slave suspend state rc=%d\n",
+					rc);
+			return;
+		}
 
-	/* set parallel's ICL  could be 0mA when pl is disabled */
-	pval.intval = slave_ua;
-	rc = power_supply_set_property(chip->pl_psy,
-			POWER_SUPPLY_PROP_CURRENT_MAX, &pval);
-	if (rc < 0) {
-		pr_err("Couldn't set parallel icl, rc=%d\n", rc);
-		return;
+		/* set parallel's ICL  could be 0mA when pl is disabled */
+		pval.intval = slave_ua;
+		rc = power_supply_set_property(chip->pl_psy,
+				POWER_SUPPLY_PROP_CURRENT_MAX, &pval);
+		if (rc < 0) {
+			pr_err("Couldn't set parallel icl, rc=%d\n", rc);
+			return;
+		}
+	} else {
+		/* set parallel's ICL  could be 0mA when pl is disabled */
+		pval.intval = slave_ua;
+		rc = power_supply_set_property(chip->pl_psy,
+				POWER_SUPPLY_PROP_CURRENT_MAX, &pval);
+		if (rc < 0) {
+			pr_err("Couldn't set parallel icl, rc=%d\n", rc);
+			return;
+		}
+
+		pval.intval = total_current_ua - slave_ua;
+		/* Set ICL on main charger */
+		rc = power_supply_set_property(chip->main_psy,
+				POWER_SUPPLY_PROP_CURRENT_MAX, &pval);
+		if (rc < 0) {
+			pr_err("Couldn't change slave suspend state rc=%d\n",
+					rc);
+			return;
+		}
 	}
 
 	chip->total_settled_ua = total_settled_ua;
@@ -626,24 +656,56 @@
 		get_fcc_split(chip, total_fcc_ua, &master_fcc_ua,
 				&slave_fcc_ua);
 
-		chip->slave_fcc_ua = slave_fcc_ua;
-
-		pval.intval = master_fcc_ua;
-		rc = power_supply_set_property(chip->main_psy,
+		/*
+		 * If there is an increase in slave share
+		 * (Also handles parallel enable case)
+		 *	Set Main ICL then slave FCC
+		 * else
+		 * (Also handles parallel disable case)
+		 *	Set slave ICL then main FCC.
+		 */
+		if (slave_fcc_ua > chip->slave_fcc_ua) {
+			pval.intval = master_fcc_ua;
+			rc = power_supply_set_property(chip->main_psy,
 				POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
 				&pval);
-		if (rc < 0) {
-			pr_err("Could not set main fcc, rc=%d\n", rc);
-			return rc;
-		}
+			if (rc < 0) {
+				pr_err("Could not set main fcc, rc=%d\n", rc);
+				return rc;
+			}
 
-		pval.intval = slave_fcc_ua;
-		rc = power_supply_set_property(chip->pl_psy,
+			pval.intval = slave_fcc_ua;
+			rc = power_supply_set_property(chip->pl_psy,
 				POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
 				&pval);
-		if (rc < 0) {
-			pr_err("Couldn't set parallel fcc, rc=%d\n", rc);
-			return rc;
+			if (rc < 0) {
+				pr_err("Couldn't set parallel fcc, rc=%d\n",
+						rc);
+				return rc;
+			}
+
+			chip->slave_fcc_ua = slave_fcc_ua;
+		} else {
+			pval.intval = slave_fcc_ua;
+			rc = power_supply_set_property(chip->pl_psy,
+				POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+				&pval);
+			if (rc < 0) {
+				pr_err("Couldn't set parallel fcc, rc=%d\n",
+						rc);
+				return rc;
+			}
+
+			chip->slave_fcc_ua = slave_fcc_ua;
+
+			pval.intval = master_fcc_ua;
+			rc = power_supply_set_property(chip->main_psy,
+				POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+				&pval);
+			if (rc < 0) {
+				pr_err("Could not set main fcc, rc=%d\n", rc);
+				return rc;
+			}
 		}
 
 		/*
diff --git a/drivers/power/supply/qcom/smb1355-charger.c b/drivers/power/supply/qcom/smb1355-charger.c
index 59f2466..a279e98 100644
--- a/drivers/power/supply/qcom/smb1355-charger.c
+++ b/drivers/power/supply/qcom/smb1355-charger.c
@@ -26,10 +26,9 @@
 #include <linux/regulator/machine.h>
 #include <linux/regulator/of_regulator.h>
 #include <linux/power_supply.h>
+#include <linux/workqueue.h>
 #include <linux/pmic-voter.h>
 
-#define SMB1355_DEFAULT_FCC_UA 1000000
-
 /* SMB1355 registers, different than mentioned in smb-reg.h */
 
 #define CHGR_BASE	0x1000
@@ -72,8 +71,12 @@
 #define BATIF_CFG_SMISC_BATID_REG		(BATIF_BASE + 0x73)
 #define CFG_SMISC_RBIAS_EXT_CTRL_BIT		BIT(2)
 
+#define SMB2CHGS_BATIF_ENG_SMISC_DIETEMP	(BATIF_BASE + 0xC0)
+#define TDIE_COMPARATOR_THRESHOLD		GENMASK(5, 0)
+
 #define BATIF_ENG_SCMISC_SPARE1_REG		(BATIF_BASE + 0xC2)
 #define EXT_BIAS_PIN_BIT			BIT(2)
+#define DIE_TEMP_COMP_HYST_BIT			BIT(1)
 
 #define TEMP_COMP_STATUS_REG			(MISC_BASE + 0x07)
 #define SKIN_TEMP_RST_HOT_BIT			BIT(6)
@@ -147,7 +150,6 @@
 };
 
 struct smb_iio {
-	struct iio_channel	*temp_chan;
 	struct iio_channel	*temp_max_chan;
 };
 
@@ -170,6 +172,10 @@
 	struct pmic_revid_data	*pmic_rev_id;
 
 	int			c_health;
+	int			die_temp_deciDegC;
+	bool			exit_die_temp;
+	struct delayed_work	die_temp_work;
+	bool			disabled;
 };
 
 static bool is_secure(struct smb1355 *chip, int addr)
@@ -269,6 +275,48 @@
 	return rc;
 }
 
+#define UB_COMP_OFFSET_DEGC		34
+#define DIE_TEMP_MEAS_PERIOD_MS		10000
+static void die_temp_work(struct work_struct *work)
+{
+	struct smb1355 *chip = container_of(work, struct smb1355,
+							die_temp_work.work);
+	int rc, i;
+	u8 temp_stat;
+
+	for (i = 0; i < BIT(5); i++) {
+		rc = smb1355_masked_write(chip,
+				SMB2CHGS_BATIF_ENG_SMISC_DIETEMP,
+				TDIE_COMPARATOR_THRESHOLD, i);
+		if (rc < 0) {
+			pr_err("Couldn't set temp comp threshold rc=%d\n", rc);
+			continue;
+		}
+
+		if (chip->exit_die_temp)
+			return;
+
+		/* wait for the comparator output to deglitch */
+		msleep(100);
+
+		rc = smb1355_read(chip, TEMP_COMP_STATUS_REG, &temp_stat);
+		if (rc < 0) {
+			pr_err("Couldn't read temp comp status rc=%d\n", rc);
+			continue;
+		}
+
+		if (!(temp_stat & DIE_TEMP_UB_HOT_BIT)) {
+			/* found the temp */
+			break;
+		}
+	}
+
+	chip->die_temp_deciDegC = 10 * (i + UB_COMP_OFFSET_DEGC);
+
+	schedule_delayed_work(&chip->die_temp_work,
+			msecs_to_jiffies(DIE_TEMP_MEAS_PERIOD_MS));
+}
+
 static irqreturn_t smb1355_handle_chg_state_change(int irq, void *data)
 {
 	struct smb1355 *chip = data;
@@ -366,25 +414,6 @@
 	return rc;
 }
 
-static int smb1355_get_parallel_charging(struct smb1355 *chip, int *disabled)
-{
-	int rc;
-	u8 cfg2;
-
-	rc = smb1355_read(chip, CHGR_CFG2_REG, &cfg2);
-	if (rc < 0) {
-		pr_err("Couldn't read en_cmg_reg rc=%d\n", rc);
-		return rc;
-	}
-
-	if (cfg2 & CHG_EN_SRC_BIT)
-		*disabled = 0;
-	else
-		*disabled = 1;
-
-	return 0;
-}
-
 static int smb1355_get_prop_connector_health(struct smb1355 *chip)
 {
 	u8 temp;
@@ -409,24 +438,6 @@
 }
 
 
-static int smb1355_get_prop_charger_temp(struct smb1355 *chip,
-				union power_supply_propval *val)
-{
-	int rc;
-
-	if (!chip->iio.temp_chan ||
-		PTR_ERR(chip->iio.temp_chan) == -EPROBE_DEFER)
-		chip->iio.temp_chan = devm_iio_channel_get(chip->dev,
-						"charger_temp");
-
-	if (IS_ERR(chip->iio.temp_chan))
-		return PTR_ERR(chip->iio.temp_chan);
-
-	rc = iio_read_channel_processed(chip->iio.temp_chan, &val->intval);
-	val->intval /= 100;
-	return rc;
-}
-
 static int smb1355_get_prop_charger_temp_max(struct smb1355 *chip,
 				union power_supply_propval *val)
 {
@@ -467,13 +478,13 @@
 			val->intval = !(stat & DISABLE_CHARGING_BIT);
 		break;
 	case POWER_SUPPLY_PROP_CHARGER_TEMP:
-		rc = smb1355_get_prop_charger_temp(chip, val);
+		val->intval = chip->die_temp_deciDegC;
 		break;
 	case POWER_SUPPLY_PROP_CHARGER_TEMP_MAX:
 		rc = smb1355_get_prop_charger_temp_max(chip, val);
 		break;
 	case POWER_SUPPLY_PROP_INPUT_SUSPEND:
-		rc = smb1355_get_parallel_charging(chip, &val->intval);
+		val->intval = chip->disabled;
 		break;
 	case POWER_SUPPLY_PROP_VOLTAGE_MAX:
 		rc = smb1355_get_charge_param(chip, &chip->param.ov,
@@ -513,6 +524,9 @@
 {
 	int rc;
 
+	if (chip->disabled == disable)
+		return 0;
+
 	rc = smb1355_masked_write(chip, WD_CFG_REG, WDOG_TIMER_EN_BIT,
 				 disable ? 0 : WDOG_TIMER_EN_BIT);
 	if (rc < 0) {
@@ -531,9 +545,21 @@
 			disable ? 0 : CHG_EN_SRC_BIT);
 	if (rc < 0) {
 		pr_err("Couldn't configure charge enable source rc=%d\n", rc);
-		return rc;
+		disable = true;
 	}
 
+	chip->die_temp_deciDegC = -EINVAL;
+	if (disable) {
+		chip->exit_die_temp = true;
+		cancel_delayed_work_sync(&chip->die_temp_work);
+	} else {
+		/* start the work to measure temperature */
+		chip->exit_die_temp = false;
+		schedule_delayed_work(&chip->die_temp_work, 0);
+	}
+
+	chip->disabled = disable;
+
 	return 0;
 }
 
@@ -769,18 +795,29 @@
 	}
 
 	/*
-	 * Disable thermal Die temperature comparator source and hw mitigation
-	 * for skin/die
+	 * Enable thermal Die temperature comparator source and disable hw
+	 * mitigation for skin/die
 	 */
 	rc = smb1355_masked_write(chip, MISC_THERMREG_SRC_CFG_REG,
 		THERMREG_DIE_CMP_SRC_EN_BIT | BYP_THERM_CHG_CURR_ADJUST_BIT,
-		BYP_THERM_CHG_CURR_ADJUST_BIT);
+		THERMREG_DIE_CMP_SRC_EN_BIT | BYP_THERM_CHG_CURR_ADJUST_BIT);
 	if (rc < 0) {
 		pr_err("Couldn't set Skin temperature comparator src rc=%d\n",
 			rc);
 		return rc;
 	}
 
+	/*
+	 * Disable hysterisis for die temperature. This is so that sw can run
+	 * stepping scheme quickly
+	 */
+	rc = smb1355_masked_write(chip, BATIF_ENG_SCMISC_SPARE1_REG,
+				DIE_TEMP_COMP_HYST_BIT, 0);
+	if (rc < 0) {
+		pr_err("Couldn't disable hyst. for die rc=%d\n", rc);
+		return rc;
+	}
+
 	rc = smb1355_tskin_sensor_config(chip);
 	if (rc < 0) {
 		pr_err("Couldn't configure tskin regs rc=%d\n", rc);
@@ -905,6 +942,9 @@
 	chip->c_health = -EINVAL;
 	chip->name = "smb1355";
 	mutex_init(&chip->write_lock);
+	INIT_DELAYED_WORK(&chip->die_temp_work, die_temp_work);
+	chip->disabled = true;
+	chip->die_temp_deciDegC = -EINVAL;
 
 	chip->regmap = dev_get_regmap(chip->dev->parent, NULL);
 	if (!chip->regmap) {
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index bd90802..22b7236 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -141,6 +141,22 @@
 	  Say M here if you want to include support for the Qualcomm RPM as a
 	  module. This will build a module called "qcom-smd-rpm".
 
+config MSM_SPM
+        bool "Driver support for SPM and AVS wrapper hardware"
+        help
+          Enables the support for SPM and AVS wrapper hardware on MSMs. SPM
+          hardware is used to manage the processor power during sleep. The
+          driver allows configuring SPM to allow different low power modes for
+          both core and L2.
+
+config MSM_L2_SPM
+        bool "SPM support for L2 cache"
+        help
+          Enable SPM driver support for L2 cache. Some MSM chipsets allow
+          control of L2 cache low power mode with a Subsystem Power manager.
+          Enabling this driver allows configuring L2 SPM for low power modes
+          on supported chipsets.
+
 config QCOM_SCM
 	bool "Secure Channel Manager (SCM) support"
 	default n
@@ -268,6 +284,15 @@
 	  This defines maximum number of entries to be allocated for application
 	  subsytem in Minidump table.
 
+config MSM_RPM_SMD
+	bool "RPM driver using SMD protocol"
+	help
+	  RPM is the dedicated hardware engine for managing shared SoC
+	  resources. This config adds driver support for using SMD as a
+	  transport layer communication with RPM hardware. It also selects
+	  the MSM_MPM config that programs the MPM module to monitor interrupts
+	  during sleep modes.
+
 config QCOM_BUS_SCALING
 	bool "Bus scaling driver"
 	help
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index 4454512..6deadc0 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -13,6 +13,7 @@
 obj-$(CONFIG_QCOM_SMD) +=	smd.o
 obj-$(CONFIG_QCOM_SMD_RPM)	+= smd-rpm.o
 obj-$(CONFIG_QCOM_SMEM) +=	smem.o
+obj-$(CONFIG_MSM_SPM) += msm-spm.o spm_devices.o
 obj-$(CONFIG_QCOM_SMEM_STATE) += smem_state.o
 obj-$(CONFIG_QCOM_SMP2P)	+= smp2p.o
 obj-$(CONFIG_QCOM_SMSM)	+= smsm.o
@@ -42,6 +43,10 @@
 obj-$(CONFIG_TRACER_PKT) += tracer_pkt.o
 obj-$(CONFIG_QCOM_BUS_SCALING) += msm_bus/
 obj-$(CONFIG_QTI_RPMH_API) += rpmh.o
+obj-$(CONFIG_MSM_RPM_SMD)   +=  rpm-smd.o
+ifdef CONFIG_DEBUG_FS
+obj-$(CONFIG_MSM_RPM_SMD)   +=  rpm-smd-debug.o
+endif
 obj-$(CONFIG_QTI_SYSTEM_PM) += system_pm.o
 obj-$(CONFIG_MSM_SERVICE_NOTIFIER) += service-notifier.o
 obj-$(CONFIG_MSM_SERVICE_LOCATOR) += service-locator.o
diff --git a/drivers/soc/qcom/glink_loopback_server.c b/drivers/soc/qcom/glink_loopback_server.c
index 94a3d8c..abd140e 100644
--- a/drivers/soc/qcom/glink_loopback_server.c
+++ b/drivers/soc/qcom/glink_loopback_server.c
@@ -140,7 +140,7 @@
 	{"LOOPBACK_CTL_APSS", "mpss", "smem"},
 	{"LOOPBACK_CTL_APSS", "lpass", "smem"},
 	{"LOOPBACK_CTL_APSS", "dsps", "smem"},
-	{"LOOPBACK_CTL_APPS", "cdsp", "smem"},
+	{"LOOPBACK_CTL_APSS", "cdsp", "smem"},
 	{"LOOPBACK_CTL_APSS", "spss", "mailbox"},
 	{"LOOPBACK_CTL_APSS", "wdsp", "spi"},
 };
diff --git a/drivers/soc/qcom/glink_private.h b/drivers/soc/qcom/glink_private.h
index 3bcf56e..31c1721 100644
--- a/drivers/soc/qcom/glink_private.h
+++ b/drivers/soc/qcom/glink_private.h
@@ -750,7 +750,6 @@
  * ssr_name:	Name of the subsystem recognized by the SSR framework
  * edge:	Name of the G-Link edge
  * xprt:	Name of the G-Link transport
- * restarted:	Indicates whether a restart has been triggered for this edge
  * cb_data:	Private callback data structure for notification functions
  * notify_list_node:	used to chain this structure in the notify list
  */
@@ -758,7 +757,6 @@
 	const char *ssr_name;
 	const char *edge;
 	const char *xprt;
-	bool restarted;
 	struct ssr_notify_data *cb_data;
 	struct list_head notify_list_node;
 };
diff --git a/drivers/soc/qcom/glink_ssr.c b/drivers/soc/qcom/glink_ssr.c
index 4a3293d..dd436da 100644
--- a/drivers/soc/qcom/glink_ssr.c
+++ b/drivers/soc/qcom/glink_ssr.c
@@ -22,7 +22,6 @@
 #include <linux/random.h>
 #include <soc/qcom/glink.h>
 #include <soc/qcom/subsystem_notif.h>
-#include <soc/qcom/subsystem_restart.h>
 #include "glink_private.h"
 
 #define GLINK_SSR_REPLY_TIMEOUT	HZ
@@ -607,13 +606,9 @@
 				atomic_read(&responses_remaining));
 			kfree(do_cleanup_data);
 
-			if (strcmp(ss_leaf_entry->ssr_name, "rpm")) {
-				subsystem_restart(ss_leaf_entry->ssr_name);
-				ss_leaf_entry->restarted = true;
-			} else {
+			if (!strcmp(ss_leaf_entry->ssr_name, "rpm"))
 				panic("%s: Could not queue intent for RPM!\n",
 						__func__);
-			}
 			atomic_dec(&responses_remaining);
 			kref_put(&ss_leaf_entry->cb_data->cb_kref,
 							cb_data_release);
@@ -637,13 +632,9 @@
 					atomic_read(&responses_remaining));
 			kfree(do_cleanup_data);
 
-			if (strcmp(ss_leaf_entry->ssr_name, "rpm")) {
-				subsystem_restart(ss_leaf_entry->ssr_name);
-				ss_leaf_entry->restarted = true;
-			} else {
+			if (!strcmp(ss_leaf_entry->ssr_name, "rpm"))
 				panic("%s: glink_tx() to RPM failed!\n",
 						__func__);
-			}
 			atomic_dec(&responses_remaining);
 			kref_put(&ss_leaf_entry->cb_data->cb_kref,
 							cb_data_release);
@@ -685,11 +676,7 @@
 			/* Check for RPM, as it can't be restarted */
 			if (!strcmp(ss_leaf_entry->ssr_name, "rpm"))
 				panic("%s: RPM failed to respond!\n", __func__);
-			else if (!ss_leaf_entry->restarted)
-				subsystem_restart(ss_leaf_entry->ssr_name);
 		}
-		ss_leaf_entry->restarted = false;
-
 		if (!IS_ERR_OR_NULL(ss_leaf_entry->cb_data))
 			ss_leaf_entry->cb_data->responded = false;
 		kref_put(&ss_leaf_entry->cb_data->cb_kref, cb_data_release);
@@ -1009,7 +996,6 @@
 		ss_info_leaf->ssr_name = subsys_name;
 		ss_info_leaf->edge = edge;
 		ss_info_leaf->xprt = xprt;
-		ss_info_leaf->restarted = false;
 		list_add_tail(&ss_info_leaf->notify_list_node,
 				&ss_info->notify_list);
 		ss_info->notify_list_len++;
diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c
index e391cd1..6f0c38d 100644
--- a/drivers/soc/qcom/icnss.c
+++ b/drivers/soc/qcom/icnss.c
@@ -2228,6 +2228,12 @@
 	if (test_bit(SKIP_QMI, &quirks))
 		set_bit(ICNSS_FW_READY, &penv->state);
 
+	if (test_bit(ICNSS_FW_DOWN, &penv->state)) {
+		icnss_pr_err("FW is in bad state, state: 0x%lx\n",
+			     penv->state);
+		return -ENODEV;
+	}
+
 	if (!test_bit(ICNSS_FW_READY, &penv->state)) {
 		icnss_pr_dbg("FW is not ready yet, state: 0x%lx\n",
 			     penv->state);
@@ -2801,7 +2807,8 @@
 	return 0;
 }
 
-int icnss_register_driver(struct icnss_driver_ops *ops)
+int __icnss_register_driver(struct icnss_driver_ops *ops,
+			    struct module *owner, const char *mod_name)
 {
 	int ret = 0;
 
@@ -2832,7 +2839,7 @@
 out:
 	return ret;
 }
-EXPORT_SYMBOL(icnss_register_driver);
+EXPORT_SYMBOL(__icnss_register_driver);
 
 int icnss_unregister_driver(struct icnss_driver_ops *ops)
 {
@@ -2858,7 +2865,7 @@
 }
 EXPORT_SYMBOL(icnss_unregister_driver);
 
-int icnss_ce_request_irq(unsigned int ce_id,
+int icnss_ce_request_irq(struct device *dev, unsigned int ce_id,
 	irqreturn_t (*handler)(int, void *),
 		unsigned long flags, const char *name, void *ctx)
 {
@@ -2866,7 +2873,7 @@
 	unsigned int irq;
 	struct ce_irq_list *irq_entry;
 
-	if (!penv || !penv->pdev) {
+	if (!penv || !penv->pdev || !dev) {
 		ret = -ENODEV;
 		goto out;
 	}
@@ -2905,13 +2912,13 @@
 }
 EXPORT_SYMBOL(icnss_ce_request_irq);
 
-int icnss_ce_free_irq(unsigned int ce_id, void *ctx)
+int icnss_ce_free_irq(struct device *dev, unsigned int ce_id, void *ctx)
 {
 	int ret = 0;
 	unsigned int irq;
 	struct ce_irq_list *irq_entry;
 
-	if (!penv || !penv->pdev) {
+	if (!penv || !penv->pdev || !dev) {
 		ret = -ENODEV;
 		goto out;
 	}
@@ -2941,11 +2948,11 @@
 }
 EXPORT_SYMBOL(icnss_ce_free_irq);
 
-void icnss_enable_irq(unsigned int ce_id)
+void icnss_enable_irq(struct device *dev, unsigned int ce_id)
 {
 	unsigned int irq;
 
-	if (!penv || !penv->pdev) {
+	if (!penv || !penv->pdev || !dev) {
 		icnss_pr_err("Platform driver not initialized\n");
 		return;
 	}
@@ -2965,11 +2972,11 @@
 }
 EXPORT_SYMBOL(icnss_enable_irq);
 
-void icnss_disable_irq(unsigned int ce_id)
+void icnss_disable_irq(struct device *dev, unsigned int ce_id)
 {
 	unsigned int irq;
 
-	if (!penv || !penv->pdev) {
+	if (!penv || !penv->pdev || !dev) {
 		icnss_pr_err("Platform driver not initialized\n");
 		return;
 	}
@@ -2990,9 +2997,9 @@
 }
 EXPORT_SYMBOL(icnss_disable_irq);
 
-int icnss_get_soc_info(struct icnss_soc_info *info)
+int icnss_get_soc_info(struct device *dev, struct icnss_soc_info *info)
 {
-	if (!penv) {
+	if (!penv || !dev) {
 		icnss_pr_err("Platform driver not initialized\n");
 		return -EINVAL;
 	}
@@ -3012,10 +3019,13 @@
 }
 EXPORT_SYMBOL(icnss_get_soc_info);
 
-int icnss_set_fw_log_mode(uint8_t fw_log_mode)
+int icnss_set_fw_log_mode(struct device *dev, uint8_t fw_log_mode)
 {
 	int ret;
 
+	if (!dev)
+		return -ENODEV;
+
 	icnss_pr_dbg("FW log mode: %u\n", fw_log_mode);
 
 	ret = wlfw_ini_send_sync_msg(fw_log_mode);
@@ -3098,7 +3108,7 @@
 }
 EXPORT_SYMBOL(icnss_athdiag_write);
 
-int icnss_wlan_enable(struct icnss_wlan_enable_cfg *config,
+int icnss_wlan_enable(struct device *dev, struct icnss_wlan_enable_cfg *config,
 		      enum icnss_driver_mode mode,
 		      const char *host_version)
 {
@@ -3106,6 +3116,9 @@
 	u32 i;
 	int ret;
 
+	if (!dev)
+		return -ENODEV;
+
 	icnss_pr_dbg("Mode: %d, config: %p, host_version: %s\n",
 		     mode, config, host_version);
 
@@ -3172,23 +3185,26 @@
 }
 EXPORT_SYMBOL(icnss_wlan_enable);
 
-int icnss_wlan_disable(enum icnss_driver_mode mode)
+int icnss_wlan_disable(struct device *dev, enum icnss_driver_mode mode)
 {
+	if (!dev)
+		return -ENODEV;
+
 	return wlfw_wlan_mode_send_sync_msg(QMI_WLFW_OFF_V01);
 }
 EXPORT_SYMBOL(icnss_wlan_disable);
 
-bool icnss_is_qmi_disable(void)
+bool icnss_is_qmi_disable(struct device *dev)
 {
 	return test_bit(SKIP_QMI, &quirks) ? true : false;
 }
 EXPORT_SYMBOL(icnss_is_qmi_disable);
 
-int icnss_get_ce_id(int irq)
+int icnss_get_ce_id(struct device *dev, int irq)
 {
 	int i;
 
-	if (!penv || !penv->pdev)
+	if (!penv || !penv->pdev || !dev)
 		return -ENODEV;
 
 	for (i = 0; i < ICNSS_MAX_IRQ_REGISTRATIONS; i++) {
@@ -3202,11 +3218,11 @@
 }
 EXPORT_SYMBOL(icnss_get_ce_id);
 
-int icnss_get_irq(int ce_id)
+int icnss_get_irq(struct device *dev, int ce_id)
 {
 	int irq;
 
-	if (!penv || !penv->pdev)
+	if (!penv || !penv->pdev || !dev)
 		return -ENODEV;
 
 	if (ce_id >= ICNSS_MAX_IRQ_REGISTRATIONS)
@@ -3582,7 +3598,7 @@
 		goto out;
 	}
 
-	icnss_wlan_disable(ICNSS_OFF);
+	icnss_wlan_disable(&priv->pdev->dev, ICNSS_OFF);
 
 	ret = icnss_hw_power_off(priv);
 
@@ -3623,7 +3639,7 @@
 
 	set_bit(ICNSS_FW_TEST_MODE, &priv->state);
 
-	ret = icnss_wlan_enable(NULL, mode, NULL);
+	ret = icnss_wlan_enable(&priv->pdev->dev, NULL, mode, NULL);
 	if (ret)
 		goto power_off;
 
diff --git a/drivers/soc/qcom/msm-spm.c b/drivers/soc/qcom/msm-spm.c
index bffe3f3..3033a4a 100644
--- a/drivers/soc/qcom/msm-spm.c
+++ b/drivers/soc/qcom/msm-spm.c
@@ -170,7 +170,7 @@
 		struct msm_spm_driver_data *dev)
 {
 	if (!dev)
-		return;
+		return -ENODEV;
 
 	msm_spm_drv_load_shadow(dev, MSM_SPM_REG_SAW_ID);
 	return (dev->reg_shadow[MSM_SPM_REG_SAW_ID] >> 24) & 0xFF;
@@ -198,7 +198,7 @@
 	 * Ensure that vctl_port is always set to 0.
 	 */
 	if (dev->vctl_port) {
-		WARN();
+		__WARN();
 		return;
 	}
 
diff --git a/drivers/soc/qcom/rpm-smd-debug.c b/drivers/soc/qcom/rpm-smd-debug.c
new file mode 100644
index 0000000..6ae9f08
--- /dev/null
+++ b/drivers/soc/qcom/rpm-smd-debug.c
@@ -0,0 +1,151 @@
+/* Copyright (c) 2013-2014, 2017,  The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "rpm-smd-debug: %s(): " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/list.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <soc/qcom/rpm-smd.h>
+
+#define MAX_MSG_BUFFER 350
+#define MAX_KEY_VALUE_PAIRS 20
+
+static struct dentry *rpm_debugfs_dir;
+
+static u32 string_to_uint(const u8 *str)
+{
+	int i, len;
+	u32 output = 0;
+
+	len = strnlen(str, sizeof(u32));
+	for (i = 0; i < len; i++)
+		output |= str[i] << (i * 8);
+
+	return output;
+}
+
+static ssize_t rsc_ops_write(struct file *fp, const char __user *user_buffer,
+						size_t count, loff_t *position)
+{
+	char buf[MAX_MSG_BUFFER], rsc_type_str[6] = {}, rpm_set[8] = {},
+						key_str[6] = {};
+	int i, pos = -1, set = -1, nelems = -1;
+	char *cmp;
+	uint32_t rsc_type = 0, rsc_id = 0, key = 0, data = 0;
+	struct msm_rpm_request *req;
+
+	count = min(count, sizeof(buf) - 1);
+	if (copy_from_user(&buf, user_buffer, count))
+		return -EFAULT;
+	buf[count] = '\0';
+	cmp = strstrip(buf);
+
+	if (sscanf(cmp, "%7s %5s %u %d %n", rpm_set, rsc_type_str,
+				&rsc_id, &nelems, &pos) != 4) {
+		pr_err("Invalid number of arguments passed\n");
+		goto err;
+	}
+
+	if (strlen(rpm_set) > 6 || strlen(rsc_type_str) > 4) {
+		pr_err("Invalid value of set or resource type\n");
+		goto err;
+	}
+
+	if (!strcmp(rpm_set, "active"))
+		set = 0;
+	else if (!strcmp(rpm_set, "sleep"))
+		set = 1;
+
+	rsc_type = string_to_uint(rsc_type_str);
+
+	if (set < 0 || nelems < 0) {
+		pr_err("Invalid value of set or nelems\n");
+		goto err;
+	}
+	if (nelems > MAX_KEY_VALUE_PAIRS) {
+		pr_err("Exceeded max no of key-value entries\n");
+		goto err;
+	}
+
+	req = msm_rpm_create_request(set, rsc_type, rsc_id, nelems);
+	if (!req)
+		return -ENOMEM;
+
+	for (i = 0; i < nelems; i++) {
+		cmp += pos;
+		if (sscanf(cmp, "%5s %n", key_str, &pos) != 1) {
+			pr_err("Invalid number of arguments passed\n");
+			goto err;
+		}
+
+		if (strlen(key_str) > 4) {
+			pr_err("Key value cannot be more than 4 charecters");
+			goto err;
+		}
+		key = string_to_uint(key_str);
+		if (!key) {
+			pr_err("Key values entered incorrectly\n");
+			goto err;
+		}
+
+		cmp += pos;
+		if (sscanf(cmp, "%u %n", &data, &pos) != 1) {
+			pr_err("Invalid number of arguments passed\n");
+			goto err;
+		}
+
+		if (msm_rpm_add_kvp_data(req, key,
+				(void *)&data, sizeof(data)))
+			goto err_request;
+	}
+
+	if (msm_rpm_wait_for_ack(msm_rpm_send_request(req)))
+		pr_err("Sending the RPM message failed\n");
+
+err_request:
+	msm_rpm_free_request(req);
+err:
+	return count;
+}
+
+static const struct file_operations rsc_ops = {
+	.write = rsc_ops_write,
+};
+
+static int __init rpm_smd_debugfs_init(void)
+{
+	rpm_debugfs_dir = debugfs_create_dir("rpm_send_msg", NULL);
+	if (!rpm_debugfs_dir)
+		return -ENOMEM;
+
+	if (!debugfs_create_file("message", 0200, rpm_debugfs_dir, NULL,
+								&rsc_ops))
+		return -ENOMEM;
+
+	return 0;
+}
+late_initcall(rpm_smd_debugfs_init);
+
+static void __exit rpm_smd_debugfs_exit(void)
+{
+	debugfs_remove_recursive(rpm_debugfs_dir);
+}
+module_exit(rpm_smd_debugfs_exit);
+
+MODULE_DESCRIPTION("RPM SMD Debug Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/rpm-smd.c b/drivers/soc/qcom/rpm-smd.c
new file mode 100644
index 0000000..3fc7fbf
--- /dev/null
+++ b/drivers/soc/qcom/rpm-smd.c
@@ -0,0 +1,2168 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/bug.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/of_address.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/device.h>
+#include <linux/notifier.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/rbtree.h>
+#include <soc/qcom/rpm-notifier.h>
+#include <soc/qcom/rpm-smd.h>
+#include <soc/qcom/smd.h>
+#include <soc/qcom/glink_rpm_xprt.h>
+#include <soc/qcom/glink.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/trace_rpm_smd.h>
+
+/* Debug Definitions */
+enum {
+	MSM_RPM_LOG_REQUEST_PRETTY	= BIT(0),
+	MSM_RPM_LOG_REQUEST_RAW		= BIT(1),
+	MSM_RPM_LOG_REQUEST_SHOW_MSG_ID	= BIT(2),
+};
+
+static int msm_rpm_debug_mask;
+module_param_named(
+	debug_mask, msm_rpm_debug_mask, int, 0644
+);
+
+struct msm_rpm_driver_data {
+	const char *ch_name;
+	uint32_t ch_type;
+	smd_channel_t *ch_info;
+	struct work_struct work;
+	spinlock_t smd_lock_write;
+	spinlock_t smd_lock_read;
+	struct completion smd_open;
+};
+
+struct glink_apps_rpm_data {
+	const char *name;
+	const char *edge;
+	const char *xprt;
+	void *glink_handle;
+	struct glink_link_info *link_info;
+	struct glink_open_config *open_cfg;
+	struct work_struct work;
+};
+
+static bool glink_enabled;
+static struct glink_apps_rpm_data *glink_data;
+
+#define DEFAULT_BUFFER_SIZE 256
+#define DEBUG_PRINT_BUFFER_SIZE 512
+#define MAX_SLEEP_BUFFER 128
+#define GFP_FLAG(noirq) (noirq ? GFP_ATOMIC : GFP_NOIO)
+#define INV_RSC "resource does not exist"
+#define ERR "err\0"
+#define MAX_ERR_BUFFER_SIZE 128
+#define MAX_WAIT_ON_ACK 24
+#define INIT_ERROR 1
+#define V1_PROTOCOL_VERSION 0x31726576 /* rev1 */
+#define V0_PROTOCOL_VERSION 0 /* rev0 */
+#define RPM_MSG_TYPE_OFFSET 16
+#define RPM_MSG_TYPE_SIZE 8
+#define RPM_SET_TYPE_OFFSET 28
+#define RPM_SET_TYPE_SIZE 4
+#define RPM_REQ_LEN_OFFSET 0
+#define RPM_REQ_LEN_SIZE 16
+#define RPM_MSG_VERSION_OFFSET 24
+#define RPM_MSG_VERSION_SIZE 8
+#define RPM_MSG_VERSION 1
+#define RPM_MSG_SET_OFFSET 28
+#define RPM_MSG_SET_SIZE 4
+#define RPM_RSC_ID_OFFSET 16
+#define RPM_RSC_ID_SIZE 12
+#define RPM_DATA_LEN_OFFSET 0
+#define RPM_DATA_LEN_SIZE 16
+#define RPM_HDR_SIZE ((rpm_msg_fmt_ver == RPM_MSG_V0_FMT) ?\
+		sizeof(struct rpm_v0_hdr) : sizeof(struct rpm_v1_hdr))
+#define CLEAR_FIELD(offset, size) (~GENMASK(offset + size - 1, offset))
+
+static ATOMIC_NOTIFIER_HEAD(msm_rpm_sleep_notifier);
+static bool standalone;
+static int probe_status = -EPROBE_DEFER;
+static int msm_rpm_read_smd_data(char *buf);
+static void msm_rpm_process_ack(uint32_t msg_id, int errno);
+
+int msm_rpm_register_notifier(struct notifier_block *nb)
+{
+	return atomic_notifier_chain_register(&msm_rpm_sleep_notifier, nb);
+}
+
+int msm_rpm_unregister_notifier(struct notifier_block *nb)
+{
+	return atomic_notifier_chain_unregister(&msm_rpm_sleep_notifier, nb);
+}
+
+static struct workqueue_struct *msm_rpm_smd_wq;
+
+enum {
+	MSM_RPM_MSG_REQUEST_TYPE = 0,
+	MSM_RPM_MSG_TYPE_NR,
+};
+
+static const uint32_t msm_rpm_request_service_v1[MSM_RPM_MSG_TYPE_NR] = {
+	0x716572, /* 'req\0' */
+};
+
+enum {
+	RPM_V1_REQUEST_SERVICE,
+	RPM_V1_SYSTEMDB_SERVICE,
+	RPM_V1_COMMAND_SERVICE,
+	RPM_V1_ACK_SERVICE,
+	RPM_V1_NACK_SERVICE,
+} msm_rpm_request_service_v2;
+
+struct rpm_v0_hdr {
+	uint32_t service_type;
+	uint32_t request_len;
+};
+
+struct rpm_v1_hdr {
+	uint32_t request_hdr;
+};
+
+struct rpm_message_header_v0 {
+	struct rpm_v0_hdr hdr;
+	uint32_t msg_id;
+	enum msm_rpm_set set;
+	uint32_t resource_type;
+	uint32_t resource_id;
+	uint32_t data_len;
+};
+
+struct rpm_message_header_v1 {
+	struct rpm_v1_hdr hdr;
+	uint32_t msg_id;
+	uint32_t resource_type;
+	uint32_t request_details;
+};
+
+struct msm_rpm_ack_msg_v0 {
+	uint32_t req;
+	uint32_t req_len;
+	uint32_t rsc_id;
+	uint32_t msg_len;
+	uint32_t id_ack;
+};
+
+struct msm_rpm_ack_msg_v1 {
+	uint32_t request_hdr;
+	uint32_t id_ack;
+};
+
+struct kvp {
+	unsigned int k;
+	unsigned int s;
+};
+
+struct msm_rpm_kvp_data {
+	uint32_t key;
+	uint32_t nbytes; /* number of bytes */
+	uint8_t *value;
+	bool valid;
+};
+
+struct slp_buf {
+	struct rb_node node;
+	char ubuf[MAX_SLEEP_BUFFER];
+	char *buf;
+	bool valid;
+};
+
+enum rpm_msg_fmts {
+	RPM_MSG_V0_FMT,
+	RPM_MSG_V1_FMT
+};
+
+static uint32_t rpm_msg_fmt_ver;
+module_param_named(
+	rpm_msg_fmt_ver, rpm_msg_fmt_ver, uint, 0444
+);
+
+static struct rb_root tr_root = RB_ROOT;
+static int (*msm_rpm_send_buffer)(char *buf, uint32_t size, bool noirq);
+static int msm_rpm_send_smd_buffer(char *buf, uint32_t size, bool noirq);
+static int msm_rpm_glink_send_buffer(char *buf, uint32_t size, bool noirq);
+static uint32_t msm_rpm_get_next_msg_id(void);
+
+static inline uint32_t get_offset_value(uint32_t val, uint32_t offset,
+		uint32_t size)
+{
+	return (((val) & GENMASK(offset + size - 1, offset))
+		>> offset);
+}
+
+static inline void change_offset_value(uint32_t *val, uint32_t offset,
+		uint32_t size, int32_t val1)
+{
+	uint32_t member = *val;
+	uint32_t offset_val = get_offset_value(member, offset, size);
+	uint32_t mask = (1 << size) - 1;
+
+	offset_val += val1;
+	*val &= CLEAR_FIELD(offset, size);
+	*val |= ((offset_val & mask) << offset);
+}
+
+static inline void set_offset_value(uint32_t *val, uint32_t offset,
+		uint32_t size, uint32_t val1)
+{
+	uint32_t mask = (1 << size) - 1;
+
+	*val &= CLEAR_FIELD(offset, size);
+	*val |= ((val1 & mask) << offset);
+}
+static uint32_t get_msg_id(char *buf)
+{
+	if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
+		return ((struct rpm_message_header_v0 *)buf)->msg_id;
+
+	return ((struct rpm_message_header_v1 *)buf)->msg_id;
+
+}
+
+static uint32_t get_ack_msg_id(char *buf)
+{
+	if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
+		return ((struct msm_rpm_ack_msg_v0 *)buf)->id_ack;
+
+	return ((struct msm_rpm_ack_msg_v1 *)buf)->id_ack;
+
+}
+
+static uint32_t get_rsc_type(char *buf)
+{
+	if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
+		return ((struct rpm_message_header_v0 *)buf)->resource_type;
+
+	return ((struct rpm_message_header_v1 *)buf)->resource_type;
+
+}
+
+static uint32_t get_set_type(char *buf)
+{
+	if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
+		return ((struct rpm_message_header_v0 *)buf)->set;
+
+	return get_offset_value(((struct rpm_message_header_v1 *)buf)->
+			request_details, RPM_SET_TYPE_OFFSET,
+			RPM_SET_TYPE_SIZE);
+}
+
+static uint32_t get_data_len(char *buf)
+{
+	if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
+		return ((struct rpm_message_header_v0 *)buf)->data_len;
+
+	return get_offset_value(((struct rpm_message_header_v1 *)buf)->
+			request_details, RPM_DATA_LEN_OFFSET,
+			RPM_DATA_LEN_SIZE);
+}
+
+static uint32_t get_rsc_id(char *buf)
+{
+	if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
+		return ((struct rpm_message_header_v0 *)buf)->resource_id;
+
+	return get_offset_value(((struct rpm_message_header_v1 *)buf)->
+			request_details, RPM_RSC_ID_OFFSET,
+			RPM_RSC_ID_SIZE);
+}
+
+static uint32_t get_ack_req_len(char *buf)
+{
+	if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
+		return ((struct msm_rpm_ack_msg_v0 *)buf)->req_len;
+
+	return get_offset_value(((struct msm_rpm_ack_msg_v1 *)buf)->
+			request_hdr, RPM_REQ_LEN_OFFSET,
+			RPM_REQ_LEN_SIZE);
+}
+
+static uint32_t get_ack_msg_type(char *buf)
+{
+	if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
+		return ((struct msm_rpm_ack_msg_v0 *)buf)->req;
+
+	return get_offset_value(((struct msm_rpm_ack_msg_v1 *)buf)->
+			request_hdr, RPM_MSG_TYPE_OFFSET,
+			RPM_MSG_TYPE_SIZE);
+}
+
+static uint32_t get_req_len(char *buf)
+{
+	if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
+		return ((struct rpm_message_header_v0 *)buf)->hdr.request_len;
+
+	return get_offset_value(((struct rpm_message_header_v1 *)buf)->
+			hdr.request_hdr, RPM_REQ_LEN_OFFSET,
+			RPM_REQ_LEN_SIZE);
+}
+
+static void set_msg_ver(char *buf, uint32_t val)
+{
+	if (rpm_msg_fmt_ver) {
+		set_offset_value(&((struct rpm_message_header_v1 *)buf)->
+			hdr.request_hdr, RPM_MSG_VERSION_OFFSET,
+			RPM_MSG_VERSION_SIZE, val);
+	} else {
+		set_offset_value(&((struct rpm_message_header_v1 *)buf)->
+			hdr.request_hdr, RPM_MSG_VERSION_OFFSET,
+			RPM_MSG_VERSION_SIZE, 0);
+	}
+}
+
+static void set_req_len(char *buf, uint32_t val)
+{
+	if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT) {
+		((struct rpm_message_header_v0 *)buf)->hdr.request_len = val;
+	} else {
+		set_offset_value(&((struct rpm_message_header_v1 *)buf)->
+			hdr.request_hdr, RPM_REQ_LEN_OFFSET,
+			RPM_REQ_LEN_SIZE, val);
+	}
+}
+
+static void change_req_len(char *buf, int32_t val)
+{
+	if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT) {
+		((struct rpm_message_header_v0 *)buf)->hdr.request_len += val;
+	} else {
+		change_offset_value(&((struct rpm_message_header_v1 *)buf)->
+			hdr.request_hdr, RPM_REQ_LEN_OFFSET,
+			RPM_REQ_LEN_SIZE, val);
+	}
+}
+
+static void set_msg_type(char *buf, uint32_t val)
+{
+	if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT) {
+		((struct rpm_message_header_v0 *)buf)->hdr.service_type =
+			msm_rpm_request_service_v1[val];
+	} else {
+		set_offset_value(&((struct rpm_message_header_v1 *)buf)->
+			hdr.request_hdr, RPM_MSG_TYPE_OFFSET,
+			RPM_MSG_TYPE_SIZE, RPM_V1_REQUEST_SERVICE);
+	}
+}
+
+static void set_rsc_id(char *buf, uint32_t val)
+{
+	if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
+		((struct rpm_message_header_v0 *)buf)->resource_id = val;
+	else
+		set_offset_value(&((struct rpm_message_header_v1 *)buf)->
+			request_details, RPM_RSC_ID_OFFSET,
+			RPM_RSC_ID_SIZE, val);
+}
+
+static void set_data_len(char *buf, uint32_t val)
+{
+	if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
+		((struct rpm_message_header_v0 *)buf)->data_len = val;
+	else
+		set_offset_value(&((struct rpm_message_header_v1 *)buf)->
+			request_details, RPM_DATA_LEN_OFFSET,
+			RPM_DATA_LEN_SIZE, val);
+}
+static void change_data_len(char *buf, int32_t val)
+{
+	if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
+		((struct rpm_message_header_v0 *)buf)->data_len += val;
+	else
+		change_offset_value(&((struct rpm_message_header_v1 *)buf)->
+			request_details, RPM_DATA_LEN_OFFSET,
+			RPM_DATA_LEN_SIZE, val);
+}
+
+static void set_set_type(char *buf, uint32_t val)
+{
+	if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
+		((struct rpm_message_header_v0 *)buf)->set = val;
+	else
+		set_offset_value(&((struct rpm_message_header_v1 *)buf)->
+			request_details, RPM_SET_TYPE_OFFSET,
+			RPM_SET_TYPE_SIZE, val);
+}
+static void set_msg_id(char *buf, uint32_t val)
+{
+	if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
+		((struct rpm_message_header_v0 *)buf)->msg_id = val;
+	else
+		((struct rpm_message_header_v1 *)buf)->msg_id = val;
+
+}
+
+static void set_rsc_type(char *buf, uint32_t val)
+{
+	if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
+		((struct rpm_message_header_v0 *)buf)->resource_type = val;
+	else
+		((struct rpm_message_header_v1 *)buf)->resource_type = val;
+}
+
+static inline int get_buf_len(char *buf)
+{
+	return get_req_len(buf) + RPM_HDR_SIZE;
+}
+
+static inline struct kvp *get_first_kvp(char *buf)
+{
+	if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
+		return (struct kvp *)(buf +
+				sizeof(struct rpm_message_header_v0));
+	else
+		return (struct kvp *)(buf +
+				sizeof(struct rpm_message_header_v1));
+}
+
+static inline struct kvp *get_next_kvp(struct kvp *k)
+{
+	return (struct kvp *)((void *)k + sizeof(*k) + k->s);
+}
+
+static inline void *get_data(struct kvp *k)
+{
+	return (void *)k + sizeof(*k);
+}
+
+
+static void delete_kvp(char *buf, struct kvp *d)
+{
+	struct kvp *n;
+	int dec;
+	uint32_t size;
+
+	n = get_next_kvp(d);
+	dec = (void *)n - (void *)d;
+	size = get_data_len(buf) -
+		((void *)n - (void *)get_first_kvp(buf));
+
+	memcpy((void *)d, (void *)n, size);
+
+	change_data_len(buf, -dec);
+	change_req_len(buf, -dec);
+}
+
+static inline void update_kvp_data(struct kvp *dest, struct kvp *src)
+{
+	memcpy(get_data(dest), get_data(src), src->s);
+}
+
+static void add_kvp(char *buf, struct kvp *n)
+{
+	int32_t inc = sizeof(*n) + n->s;
+
+	if (get_req_len(buf) + inc > MAX_SLEEP_BUFFER) {
+		WARN_ON(get_req_len(buf) + inc > MAX_SLEEP_BUFFER);
+		return;
+	}
+
+	memcpy(buf + get_buf_len(buf), n, inc);
+
+	change_data_len(buf, inc);
+	change_req_len(buf, inc);
+}
+
+static struct slp_buf *tr_search(struct rb_root *root, char *slp)
+{
+	unsigned int type = get_rsc_type(slp);
+	unsigned int id = get_rsc_id(slp);
+	struct rb_node *node = root->rb_node;
+
+	while (node) {
+		struct slp_buf *cur = rb_entry(node, struct slp_buf, node);
+		unsigned int ctype = get_rsc_type(cur->buf);
+		unsigned int cid = get_rsc_id(cur->buf);
+
+		if (type < ctype)
+			node = node->rb_left;
+		else if (type > ctype)
+			node = node->rb_right;
+		else if (id < cid)
+			node = node->rb_left;
+		else if (id > cid)
+			node = node->rb_right;
+		else
+			return cur;
+	}
+	return NULL;
+}
+
+static int tr_insert(struct rb_root *root, struct slp_buf *slp)
+{
+	unsigned int type = get_rsc_type(slp->buf);
+	unsigned int id = get_rsc_id(slp->buf);
+	struct rb_node **node = &(root->rb_node), *parent = NULL;
+
+	while (*node) {
+		struct slp_buf *curr = rb_entry(*node, struct slp_buf, node);
+		unsigned int ctype = get_rsc_type(curr->buf);
+		unsigned int cid = get_rsc_id(curr->buf);
+
+		parent = *node;
+
+		if (type < ctype)
+			node = &((*node)->rb_left);
+		else if (type > ctype)
+			node = &((*node)->rb_right);
+		else if (id < cid)
+			node = &((*node)->rb_left);
+		else if (id > cid)
+			node = &((*node)->rb_right);
+		else
+			return -EINVAL;
+	}
+
+	rb_link_node(&slp->node, parent, node);
+	rb_insert_color(&slp->node, root);
+	slp->valid = true;
+	return 0;
+}
+
+#define for_each_kvp(buf, k) \
+	for (k = (struct kvp *)get_first_kvp(buf); \
+		((void *)k - (void *)get_first_kvp(buf)) < \
+		 get_data_len(buf);\
+		k = get_next_kvp(k))
+
+
+static void tr_update(struct slp_buf *s, char *buf)
+{
+	struct kvp *e, *n;
+
+	for_each_kvp(buf, n) {
+		bool found = false;
+
+		for_each_kvp(s->buf, e) {
+			if (n->k == e->k) {
+				found = true;
+				if (n->s == e->s) {
+					void *e_data = get_data(e);
+					void *n_data = get_data(n);
+
+					if (memcmp(e_data, n_data, n->s)) {
+						update_kvp_data(e, n);
+						s->valid = true;
+					}
+				} else {
+					delete_kvp(s->buf, e);
+					add_kvp(s->buf, n);
+					s->valid = true;
+				}
+				break;
+			}
+
+		}
+		if (!found) {
+			add_kvp(s->buf, n);
+			s->valid = true;
+		}
+	}
+}
+static atomic_t msm_rpm_msg_id = ATOMIC_INIT(0);
+
+struct msm_rpm_request {
+	uint8_t *client_buf;
+	struct msm_rpm_kvp_data *kvp;
+	uint32_t num_elements;
+	uint32_t write_idx;
+	uint8_t *buf;
+	uint32_t numbytes;
+};
+
+/*
+ * Data related to message acknowledgment
+ */
+
+LIST_HEAD(msm_rpm_wait_list);
+
+struct msm_rpm_wait_data {
+	struct list_head list;
+	uint32_t msg_id;
+	bool ack_recd;
+	int errno;
+	struct completion ack;
+	bool delete_on_ack;
+};
+DEFINE_SPINLOCK(msm_rpm_list_lock);
+
+
+
+LIST_HEAD(msm_rpm_ack_list);
+
+static struct tasklet_struct data_tasklet;
+
+static inline uint32_t msm_rpm_get_msg_id_from_ack(uint8_t *buf)
+{
+	return get_ack_msg_id(buf);
+}
+
+static inline int msm_rpm_get_error_from_ack(uint8_t *buf)
+{
+	uint8_t *tmp;
+	uint32_t req_len = get_ack_req_len(buf);
+	uint32_t msg_type = get_ack_msg_type(buf);
+	int rc = -ENODEV;
+	uint32_t err;
+	uint32_t ack_msg_size = rpm_msg_fmt_ver ?
+			sizeof(struct msm_rpm_ack_msg_v1) :
+			sizeof(struct msm_rpm_ack_msg_v0);
+
+	if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT &&
+			msg_type == RPM_V1_ACK_SERVICE) {
+		return 0;
+	} else if (rpm_msg_fmt_ver && msg_type == RPM_V1_NACK_SERVICE) {
+		err = *(uint32_t *)(buf + sizeof(struct msm_rpm_ack_msg_v1));
+		return err;
+	}
+
+	req_len -= ack_msg_size;
+	req_len += 2 * sizeof(uint32_t);
+	if (!req_len)
+		return 0;
+
+	pr_err("%s:rpm returned error or nack req_len: %d id_ack: %d\n",
+				__func__, req_len, get_ack_msg_id(buf));
+
+	tmp = buf + ack_msg_size;
+
+	if (memcmp(tmp, ERR, sizeof(uint32_t))) {
+		pr_err("%s rpm returned error\n", __func__);
+		WARN_ON(1);
+	}
+
+	tmp += 2 * sizeof(uint32_t);
+
+	if (!(memcmp(tmp, INV_RSC, min_t(uint32_t, req_len,
+						sizeof(INV_RSC))-1))) {
+		pr_err("%s(): RPM NACK Unsupported resource\n", __func__);
+		rc = -EINVAL;
+	} else {
+		pr_err("%s(): RPM NACK Invalid header\n", __func__);
+	}
+
+	return rc;
+}
+
+int msm_rpm_smd_buffer_request(struct msm_rpm_request *cdata,
+		uint32_t size, gfp_t flag)
+{
+	struct slp_buf *slp;
+	static DEFINE_SPINLOCK(slp_buffer_lock);
+	unsigned long flags;
+	char *buf;
+
+	buf = cdata->buf;
+
+	if (size > MAX_SLEEP_BUFFER)
+		return -ENOMEM;
+
+	spin_lock_irqsave(&slp_buffer_lock, flags);
+	slp = tr_search(&tr_root, buf);
+
+	if (!slp) {
+		slp = kzalloc(sizeof(struct slp_buf), GFP_ATOMIC);
+		if (!slp) {
+			spin_unlock_irqrestore(&slp_buffer_lock, flags);
+			return -ENOMEM;
+		}
+		slp->buf = PTR_ALIGN(&slp->ubuf[0], sizeof(u32));
+		memcpy(slp->buf, buf, size);
+		if (tr_insert(&tr_root, slp))
+			pr_err("Error updating sleep request\n");
+	} else {
+		/* handle unsent requests */
+		tr_update(slp, buf);
+	}
+	trace_rpm_smd_sleep_set(get_msg_id(cdata->client_buf),
+			get_rsc_type(cdata->client_buf),
+			get_req_len(cdata->client_buf));
+
+	spin_unlock_irqrestore(&slp_buffer_lock, flags);
+
+	return 0;
+}
+
+static struct msm_rpm_driver_data msm_rpm_data = {
+	.smd_open = COMPLETION_INITIALIZER(msm_rpm_data.smd_open),
+};
+
+static int msm_rpm_glink_rx_poll(void *glink_handle)
+{
+	int ret;
+
+	ret = glink_rpm_rx_poll(glink_handle);
+	if (ret >= 0)
+		/*
+		 * Sleep for 50us at a time before checking
+		 * for packet availability. The 50us is based
+		 * on the the time rpm could take to process
+		 * and send an ack for the sleep set request.
+		 */
+		udelay(50);
+	else
+		pr_err("Not receieve an ACK from RPM. ret = %d\n", ret);
+
+	return ret;
+}
+
+/*
+ * Returns
+ *	= 0 on successful reads
+ *	> 0 on successful reads with no further data
+ *	standard Linux error codes on failure.
+ */
+static int msm_rpm_read_sleep_ack(void)
+{
+	int ret;
+	char buf[MAX_ERR_BUFFER_SIZE] = {0};
+
+	if (glink_enabled)
+		ret = msm_rpm_glink_rx_poll(glink_data->glink_handle);
+	else {
+		ret = msm_rpm_read_smd_data(buf);
+		if (!ret)
+			ret = smd_is_pkt_avail(msm_rpm_data.ch_info);
+	}
+	return ret;
+}
+
+static int msm_rpm_flush_requests(bool print)
+{
+	struct rb_node *t;
+	int ret;
+	int count = 0;
+
+	for (t = rb_first(&tr_root); t; t = rb_next(t)) {
+
+		struct slp_buf *s = rb_entry(t, struct slp_buf, node);
+		unsigned int type = get_rsc_type(s->buf);
+		unsigned int id = get_rsc_id(s->buf);
+
+		if (!s->valid)
+			continue;
+
+		set_msg_id(s->buf, msm_rpm_get_next_msg_id());
+
+		if (!glink_enabled)
+			ret = msm_rpm_send_smd_buffer(s->buf,
+					get_buf_len(s->buf), true);
+		else
+			ret = msm_rpm_glink_send_buffer(s->buf,
+					get_buf_len(s->buf), true);
+
+		WARN_ON(ret != get_buf_len(s->buf));
+		trace_rpm_smd_send_sleep_set(get_msg_id(s->buf), type, id);
+
+		s->valid = false;
+		count++;
+
+		/*
+		 * RPM acks need to be handled here if we have sent 24
+		 * messages such that we do not overrun SMD buffer. Since
+		 * we expect only sleep sets at this point (RPM PC would be
+		 * disallowed if we had pending active requests), we need not
+		 * process these sleep set acks.
+		 */
+		if (count >= MAX_WAIT_ON_ACK) {
+			int ret = msm_rpm_read_sleep_ack();
+
+			if (ret >= 0)
+				count--;
+			else
+				return ret;
+		}
+	}
+	return 0;
+}
+
+static void msm_rpm_notify_sleep_chain(char *buf,
+		struct msm_rpm_kvp_data *kvp)
+{
+	struct msm_rpm_notifier_data notif;
+
+	notif.rsc_type = get_rsc_type(buf);
+	notif.rsc_id = get_req_len(buf);
+	notif.key = kvp->key;
+	notif.size = kvp->nbytes;
+	notif.value = kvp->value;
+	atomic_notifier_call_chain(&msm_rpm_sleep_notifier, 0, &notif);
+}
+
+static int msm_rpm_add_kvp_data_common(struct msm_rpm_request *handle,
+		uint32_t key, const uint8_t *data, int size, bool noirq)
+{
+	uint32_t i;
+	uint32_t data_size, msg_size;
+
+	if (probe_status)
+		return probe_status;
+
+	if (!handle || !data) {
+		pr_err("%s(): Invalid handle/data\n", __func__);
+		return -EINVAL;
+	}
+
+	if (size < 0)
+		return  -EINVAL;
+
+	data_size = ALIGN(size, SZ_4);
+	msg_size = data_size + 8;
+
+	for (i = 0; i < handle->write_idx; i++) {
+		if (handle->kvp[i].key != key)
+			continue;
+		if (handle->kvp[i].nbytes != data_size) {
+			kfree(handle->kvp[i].value);
+			handle->kvp[i].value = NULL;
+		} else {
+			if (!memcmp(handle->kvp[i].value, data, data_size))
+				return 0;
+		}
+		break;
+	}
+
+	if (i >= handle->num_elements) {
+		pr_err("Number of resources exceeds max allocated\n");
+		return -ENOMEM;
+	}
+
+	if (i == handle->write_idx)
+		handle->write_idx++;
+
+	if (!handle->kvp[i].value) {
+		handle->kvp[i].value = kzalloc(data_size, GFP_FLAG(noirq));
+
+		if (!handle->kvp[i].value)
+			return -ENOMEM;
+	} else {
+		/* We enter the else case, if a key already exists but the
+		 * data doesn't match. In which case, we should zero the data
+		 * out.
+		 */
+		memset(handle->kvp[i].value, 0, data_size);
+	}
+
+	if (!handle->kvp[i].valid)
+		change_data_len(handle->client_buf, msg_size);
+	else
+		change_data_len(handle->client_buf,
+			(data_size - handle->kvp[i].nbytes));
+
+	handle->kvp[i].nbytes = data_size;
+	handle->kvp[i].key = key;
+	memcpy(handle->kvp[i].value, data, size);
+	handle->kvp[i].valid = true;
+
+	return 0;
+
+}
+
+static struct msm_rpm_request *msm_rpm_create_request_common(
+		enum msm_rpm_set set, uint32_t rsc_type, uint32_t rsc_id,
+		int num_elements, bool noirq)
+{
+	struct msm_rpm_request *cdata;
+	uint32_t buf_size;
+
+	if (probe_status)
+		return ERR_PTR(probe_status);
+
+	cdata = kzalloc(sizeof(struct msm_rpm_request),
+			GFP_FLAG(noirq));
+
+	if (!cdata) {
+		pr_err("Cannot allocate memory for client data\n");
+		goto cdata_alloc_fail;
+	}
+
+	if (rpm_msg_fmt_ver == RPM_MSG_V0_FMT)
+		buf_size = sizeof(struct rpm_message_header_v0);
+	else
+		buf_size = sizeof(struct rpm_message_header_v1);
+
+	cdata->client_buf = kzalloc(buf_size, GFP_FLAG(noirq));
+
+	if (!cdata->client_buf)
+		goto client_buf_alloc_fail;
+
+	set_set_type(cdata->client_buf, set);
+	set_rsc_type(cdata->client_buf, rsc_type);
+	set_rsc_id(cdata->client_buf, rsc_id);
+
+	cdata->num_elements = num_elements;
+	cdata->write_idx = 0;
+
+	cdata->kvp = kcalloc(num_elements, sizeof(struct msm_rpm_kvp_data),
+			GFP_FLAG(noirq));
+
+	if (!cdata->kvp) {
+		pr_warn("%s(): Cannot allocate memory for key value data\n",
+				__func__);
+		goto kvp_alloc_fail;
+	}
+
+	cdata->buf = kzalloc(DEFAULT_BUFFER_SIZE, GFP_FLAG(noirq));
+
+	if (!cdata->buf)
+		goto buf_alloc_fail;
+
+	cdata->numbytes = DEFAULT_BUFFER_SIZE;
+	return cdata;
+
+buf_alloc_fail:
+	kfree(cdata->kvp);
+kvp_alloc_fail:
+	kfree(cdata->client_buf);
+client_buf_alloc_fail:
+	kfree(cdata);
+cdata_alloc_fail:
+	return NULL;
+
+}
+
+void msm_rpm_free_request(struct msm_rpm_request *handle)
+{
+	int i;
+
+	if (!handle)
+		return;
+	for (i = 0; i < handle->num_elements; i++)
+		kfree(handle->kvp[i].value);
+	kfree(handle->kvp);
+	kfree(handle->client_buf);
+	kfree(handle->buf);
+	kfree(handle);
+}
+EXPORT_SYMBOL(msm_rpm_free_request);
+
+struct msm_rpm_request *msm_rpm_create_request(
+		enum msm_rpm_set set, uint32_t rsc_type,
+		uint32_t rsc_id, int num_elements)
+{
+	return msm_rpm_create_request_common(set, rsc_type, rsc_id,
+			num_elements, false);
+}
+EXPORT_SYMBOL(msm_rpm_create_request);
+
+struct msm_rpm_request *msm_rpm_create_request_noirq(
+		enum msm_rpm_set set, uint32_t rsc_type,
+		uint32_t rsc_id, int num_elements)
+{
+	return msm_rpm_create_request_common(set, rsc_type, rsc_id,
+			num_elements, true);
+}
+EXPORT_SYMBOL(msm_rpm_create_request_noirq);
+
+int msm_rpm_add_kvp_data(struct msm_rpm_request *handle,
+		uint32_t key, const uint8_t *data, int size)
+{
+	return msm_rpm_add_kvp_data_common(handle, key, data, size, false);
+
+}
+EXPORT_SYMBOL(msm_rpm_add_kvp_data);
+
+int msm_rpm_add_kvp_data_noirq(struct msm_rpm_request *handle,
+		uint32_t key, const uint8_t *data, int size)
+{
+	return msm_rpm_add_kvp_data_common(handle, key, data, size, true);
+}
+EXPORT_SYMBOL(msm_rpm_add_kvp_data_noirq);
+
+/* Runs in interrupt context */
+static void msm_rpm_notify(void *data, unsigned int event)
+{
+	struct msm_rpm_driver_data *pdata = (struct msm_rpm_driver_data *)data;
+
+	WARN_ON(!pdata);
+
+	if (!(pdata->ch_info))
+		return;
+
+	switch (event) {
+	case SMD_EVENT_DATA:
+		tasklet_schedule(&data_tasklet);
+		trace_rpm_smd_interrupt_notify("interrupt notification");
+		break;
+	case SMD_EVENT_OPEN:
+		complete(&pdata->smd_open);
+		break;
+	case SMD_EVENT_CLOSE:
+	case SMD_EVENT_STATUS:
+	case SMD_EVENT_REOPEN_READY:
+		break;
+	default:
+		pr_info("Unknown SMD event\n");
+
+	}
+}
+
+bool msm_rpm_waiting_for_ack(void)
+{
+	bool ret;
+	unsigned long flags;
+
+	spin_lock_irqsave(&msm_rpm_list_lock, flags);
+	ret = list_empty(&msm_rpm_wait_list);
+	spin_unlock_irqrestore(&msm_rpm_list_lock, flags);
+
+	return !ret;
+}
+
+static struct msm_rpm_wait_data *msm_rpm_get_entry_from_msg_id(uint32_t msg_id)
+{
+	struct list_head *ptr;
+	struct msm_rpm_wait_data *elem = NULL;
+	unsigned long flags;
+
+	spin_lock_irqsave(&msm_rpm_list_lock, flags);
+
+	list_for_each(ptr, &msm_rpm_wait_list) {
+		elem = list_entry(ptr, struct msm_rpm_wait_data, list);
+		if (elem && (elem->msg_id == msg_id))
+			break;
+		elem = NULL;
+	}
+	spin_unlock_irqrestore(&msm_rpm_list_lock, flags);
+	return elem;
+}
+
+static uint32_t msm_rpm_get_next_msg_id(void)
+{
+	uint32_t id;
+
+	/*
+	 * A message id of 0 is used by the driver to indicate a error
+	 * condition. The RPM driver uses a id of 1 to indicate unsent data
+	 * when the data sent over hasn't been modified. This isn't a error
+	 * scenario and wait for ack returns a success when the message id is 1.
+	 */
+
+	do {
+		id = atomic_inc_return(&msm_rpm_msg_id);
+	} while ((id == 0) || (id == 1) || msm_rpm_get_entry_from_msg_id(id));
+
+	return id;
+}
+
+static int msm_rpm_add_wait_list(uint32_t msg_id, bool delete_on_ack)
+{
+	unsigned long flags;
+	struct msm_rpm_wait_data *data =
+		kzalloc(sizeof(struct msm_rpm_wait_data), GFP_ATOMIC);
+
+	if (!data)
+		return -ENOMEM;
+
+	init_completion(&data->ack);
+	data->ack_recd = false;
+	data->msg_id = msg_id;
+	data->errno = INIT_ERROR;
+	data->delete_on_ack = delete_on_ack;
+	spin_lock_irqsave(&msm_rpm_list_lock, flags);
+	if (delete_on_ack)
+		list_add_tail(&data->list, &msm_rpm_wait_list);
+	else
+		list_add(&data->list, &msm_rpm_wait_list);
+	spin_unlock_irqrestore(&msm_rpm_list_lock, flags);
+
+	return 0;
+}
+
+static void msm_rpm_free_list_entry(struct msm_rpm_wait_data *elem)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&msm_rpm_list_lock, flags);
+	list_del(&elem->list);
+	spin_unlock_irqrestore(&msm_rpm_list_lock, flags);
+	kfree(elem);
+}
+
+static void msm_rpm_process_ack(uint32_t msg_id, int errno)
+{
+	struct list_head *ptr, *next;
+	struct msm_rpm_wait_data *elem = NULL;
+	unsigned long flags;
+
+	spin_lock_irqsave(&msm_rpm_list_lock, flags);
+
+	list_for_each_safe(ptr, next, &msm_rpm_wait_list) {
+		elem = list_entry(ptr, struct msm_rpm_wait_data, list);
+		if (elem->msg_id == msg_id) {
+			elem->errno = errno;
+			elem->ack_recd = true;
+			complete(&elem->ack);
+			if (elem->delete_on_ack) {
+				list_del(&elem->list);
+				kfree(elem);
+			}
+			break;
+		}
+	}
+	/* Special case where the sleep driver doesn't
+	 * wait for ACKs. This would decrease the latency involved with
+	 * entering RPM assisted power collapse.
+	 */
+	if (!elem)
+		trace_rpm_smd_ack_recvd(0, msg_id, 0xDEADBEEF);
+
+	spin_unlock_irqrestore(&msm_rpm_list_lock, flags);
+}
+
+struct msm_rpm_kvp_packet {
+	uint32_t id;
+	uint32_t len;
+	uint32_t val;
+};
+
+static int msm_rpm_read_smd_data(char *buf)
+{
+	int pkt_sz;
+	int bytes_read = 0;
+
+	pkt_sz = smd_cur_packet_size(msm_rpm_data.ch_info);
+
+	if (!pkt_sz)
+		return -EAGAIN;
+
+	if (pkt_sz > MAX_ERR_BUFFER_SIZE) {
+		pr_err("rpm_smd pkt_sz is greater than max size\n");
+		goto error;
+	}
+
+	if (pkt_sz != smd_read_avail(msm_rpm_data.ch_info))
+		return -EAGAIN;
+
+	do {
+		int len;
+
+		len = smd_read(msm_rpm_data.ch_info, buf + bytes_read, pkt_sz);
+		pkt_sz -= len;
+		bytes_read += len;
+
+	} while (pkt_sz > 0);
+
+	if (pkt_sz < 0) {
+		pr_err("rpm_smd pkt_sz is less than zero\n");
+		goto error;
+	}
+	return 0;
+error:
+	WARN_ON(1);
+
+	return 0;
+}
+
+static void data_fn_tasklet(unsigned long data)
+{
+	uint32_t msg_id;
+	int errno;
+	char buf[MAX_ERR_BUFFER_SIZE] = {0};
+
+	spin_lock(&msm_rpm_data.smd_lock_read);
+	while (smd_is_pkt_avail(msm_rpm_data.ch_info)) {
+		if (msm_rpm_read_smd_data(buf))
+			break;
+		msg_id = msm_rpm_get_msg_id_from_ack(buf);
+		errno = msm_rpm_get_error_from_ack(buf);
+		trace_rpm_smd_ack_recvd(0, msg_id, errno);
+		msm_rpm_process_ack(msg_id, errno);
+	}
+	spin_unlock(&msm_rpm_data.smd_lock_read);
+}
+
+static void msm_rpm_log_request(struct msm_rpm_request *cdata)
+{
+	char buf[DEBUG_PRINT_BUFFER_SIZE];
+	size_t buflen = DEBUG_PRINT_BUFFER_SIZE;
+	char name[5];
+	u32 value;
+	uint32_t i;
+	int j, prev_valid;
+	int valid_count = 0;
+	int pos = 0;
+	uint32_t res_type, rsc_id;
+
+	name[4] = 0;
+
+	for (i = 0; i < cdata->write_idx; i++)
+		if (cdata->kvp[i].valid)
+			valid_count++;
+
+	pos += scnprintf(buf + pos, buflen - pos, "%sRPM req: ", KERN_INFO);
+	if (msm_rpm_debug_mask & MSM_RPM_LOG_REQUEST_SHOW_MSG_ID)
+		pos += scnprintf(buf + pos, buflen - pos, "msg_id=%u, ",
+				get_msg_id(cdata->client_buf));
+	pos += scnprintf(buf + pos, buflen - pos, "s=%s",
+		(get_set_type(cdata->client_buf) ==
+				MSM_RPM_CTX_ACTIVE_SET ? "act" : "slp"));
+
+	res_type = get_rsc_type(cdata->client_buf);
+	rsc_id = get_rsc_id(cdata->client_buf);
+	if ((msm_rpm_debug_mask & MSM_RPM_LOG_REQUEST_PRETTY)
+	    && (msm_rpm_debug_mask & MSM_RPM_LOG_REQUEST_RAW)) {
+		/* Both pretty and raw formatting */
+		memcpy(name, &res_type, sizeof(uint32_t));
+		pos += scnprintf(buf + pos, buflen - pos,
+			", rsc_type=0x%08X (%s), rsc_id=%u; ",
+			res_type, name, rsc_id);
+
+		for (i = 0, prev_valid = 0; i < cdata->write_idx; i++) {
+			if (!cdata->kvp[i].valid)
+				continue;
+
+			memcpy(name, &cdata->kvp[i].key, sizeof(uint32_t));
+			pos += scnprintf(buf + pos, buflen - pos,
+					"[key=0x%08X (%s), value=%s",
+					cdata->kvp[i].key, name,
+					(cdata->kvp[i].nbytes ? "0x" : "null"));
+
+			for (j = 0; j < cdata->kvp[i].nbytes; j++)
+				pos += scnprintf(buf + pos, buflen - pos,
+						"%02X ",
+						cdata->kvp[i].value[j]);
+
+			if (cdata->kvp[i].nbytes)
+				pos += scnprintf(buf + pos, buflen - pos, "(");
+
+			for (j = 0; j < cdata->kvp[i].nbytes; j += 4) {
+				value = 0;
+				memcpy(&value, &cdata->kvp[i].value[j],
+					min_t(uint32_t, sizeof(uint32_t),
+						cdata->kvp[i].nbytes - j));
+				pos += scnprintf(buf + pos, buflen - pos, "%u",
+						value);
+				if (j + 4 < cdata->kvp[i].nbytes)
+					pos += scnprintf(buf + pos,
+						buflen - pos, " ");
+			}
+			if (cdata->kvp[i].nbytes)
+				pos += scnprintf(buf + pos, buflen - pos, ")");
+			pos += scnprintf(buf + pos, buflen - pos, "]");
+			if (prev_valid + 1 < valid_count)
+				pos += scnprintf(buf + pos, buflen - pos, ", ");
+			prev_valid++;
+		}
+	} else if (msm_rpm_debug_mask & MSM_RPM_LOG_REQUEST_PRETTY) {
+		/* Pretty formatting only */
+		memcpy(name, &res_type, sizeof(uint32_t));
+		pos += scnprintf(buf + pos, buflen - pos, " %s %u; ", name,
+			rsc_id);
+
+		for (i = 0, prev_valid = 0; i < cdata->write_idx; i++) {
+			if (!cdata->kvp[i].valid)
+				continue;
+
+			memcpy(name, &cdata->kvp[i].key, sizeof(uint32_t));
+			pos += scnprintf(buf + pos, buflen - pos, "%s=%s",
+				name, (cdata->kvp[i].nbytes ? "" : "null"));
+
+			for (j = 0; j < cdata->kvp[i].nbytes; j += 4) {
+				value = 0;
+				memcpy(&value, &cdata->kvp[i].value[j],
+					min_t(uint32_t, sizeof(uint32_t),
+						cdata->kvp[i].nbytes - j));
+				pos += scnprintf(buf + pos, buflen - pos, "%u",
+						value);
+
+				if (j + 4 < cdata->kvp[i].nbytes)
+					pos += scnprintf(buf + pos,
+						buflen - pos, " ");
+			}
+			if (prev_valid + 1 < valid_count)
+				pos += scnprintf(buf + pos, buflen - pos, ", ");
+			prev_valid++;
+		}
+	} else {
+		/* Raw formatting only */
+		pos += scnprintf(buf + pos, buflen - pos,
+			", rsc_type=0x%08X, rsc_id=%u; ", res_type, rsc_id);
+
+		for (i = 0, prev_valid = 0; i < cdata->write_idx; i++) {
+			if (!cdata->kvp[i].valid)
+				continue;
+
+			pos += scnprintf(buf + pos, buflen - pos,
+					"[key=0x%08X, value=%s",
+					cdata->kvp[i].key,
+					(cdata->kvp[i].nbytes ? "0x" : "null"));
+			for (j = 0; j < cdata->kvp[i].nbytes; j++) {
+				pos += scnprintf(buf + pos, buflen - pos,
+						"%02X",
+						cdata->kvp[i].value[j]);
+				if (j + 1 < cdata->kvp[i].nbytes)
+					pos += scnprintf(buf + pos,
+							buflen - pos, " ");
+			}
+			pos += scnprintf(buf + pos, buflen - pos, "]");
+			if (prev_valid + 1 < valid_count)
+				pos += scnprintf(buf + pos, buflen - pos, ", ");
+			prev_valid++;
+		}
+	}
+
+	pos += scnprintf(buf + pos, buflen - pos, "\n");
+	printk(buf);
+}
+
+static int msm_rpm_send_smd_buffer(char *buf, uint32_t size, bool noirq)
+{
+	unsigned long flags;
+	int ret;
+
+	spin_lock_irqsave(&msm_rpm_data.smd_lock_write, flags);
+	ret = smd_write_avail(msm_rpm_data.ch_info);
+
+	while ((ret = smd_write_avail(msm_rpm_data.ch_info)) < size) {
+		if (ret < 0)
+			break;
+		if (!noirq) {
+			spin_unlock_irqrestore(
+				&msm_rpm_data.smd_lock_write, flags);
+			cpu_relax();
+			spin_lock_irqsave(
+				&msm_rpm_data.smd_lock_write, flags);
+		} else
+			udelay(5);
+	}
+
+	if (ret < 0) {
+		pr_err("SMD not initialized\n");
+		spin_unlock_irqrestore(
+			&msm_rpm_data.smd_lock_write, flags);
+		return ret;
+	}
+
+	ret = smd_write(msm_rpm_data.ch_info, buf, size);
+	spin_unlock_irqrestore(&msm_rpm_data.smd_lock_write, flags);
+	return ret;
+}
+
+static int msm_rpm_glink_send_buffer(char *buf, uint32_t size, bool noirq)
+{
+	int ret;
+	unsigned long flags;
+	int timeout = 50;
+
+	spin_lock_irqsave(&msm_rpm_data.smd_lock_write, flags);
+	do {
+		ret = glink_tx(glink_data->glink_handle, buf, buf,
+					size, GLINK_TX_SINGLE_THREADED);
+		if (ret == -EBUSY || ret == -ENOSPC) {
+			if (!noirq) {
+				spin_unlock_irqrestore(
+					&msm_rpm_data.smd_lock_write, flags);
+				cpu_relax();
+				spin_lock_irqsave(
+					&msm_rpm_data.smd_lock_write, flags);
+			} else {
+				udelay(5);
+			}
+			timeout--;
+		} else {
+			ret = 0;
+		}
+	} while (ret && timeout);
+	spin_unlock_irqrestore(&msm_rpm_data.smd_lock_write, flags);
+
+	if (!timeout)
+		return 0;
+	else
+		return size;
+}
+
+static int msm_rpm_send_data(struct msm_rpm_request *cdata,
+		int msg_type, bool noirq, bool noack)
+{
+	uint8_t *tmpbuff;
+	int ret;
+	uint32_t i;
+	uint32_t msg_size;
+	int msg_hdr_sz, req_hdr_sz;
+	uint32_t data_len = get_data_len(cdata->client_buf);
+	uint32_t set = get_set_type(cdata->client_buf);
+	uint32_t msg_id;
+
+	if (probe_status)
+		return probe_status;
+
+	if (!data_len)
+		return 1;
+
+	msg_hdr_sz = rpm_msg_fmt_ver ? sizeof(struct rpm_message_header_v1) :
+			sizeof(struct rpm_message_header_v0);
+
+	req_hdr_sz = RPM_HDR_SIZE;
+	set_msg_type(cdata->client_buf, msg_type);
+
+	set_req_len(cdata->client_buf, data_len + msg_hdr_sz - req_hdr_sz);
+	msg_size = get_req_len(cdata->client_buf) + req_hdr_sz;
+
+	/* populate data_len */
+	if (msg_size > cdata->numbytes) {
+		kfree(cdata->buf);
+		cdata->numbytes = msg_size;
+		cdata->buf = kzalloc(msg_size, GFP_FLAG(noirq));
+	}
+
+	if (!cdata->buf) {
+		pr_err("Failed malloc\n");
+		return 0;
+	}
+
+	tmpbuff = cdata->buf;
+
+	tmpbuff += msg_hdr_sz;
+	for (i = 0; (i < cdata->write_idx); i++) {
+		/* Sanity check */
+		WARN_ON((tmpbuff - cdata->buf) > cdata->numbytes);
+
+		if (!cdata->kvp[i].valid)
+			continue;
+
+		memcpy(tmpbuff, &cdata->kvp[i].key, sizeof(uint32_t));
+		tmpbuff += sizeof(uint32_t);
+
+		memcpy(tmpbuff, &cdata->kvp[i].nbytes, sizeof(uint32_t));
+		tmpbuff += sizeof(uint32_t);
+
+		memcpy(tmpbuff, cdata->kvp[i].value, cdata->kvp[i].nbytes);
+		tmpbuff += cdata->kvp[i].nbytes;
+
+		if (set == MSM_RPM_CTX_SLEEP_SET)
+			msm_rpm_notify_sleep_chain(cdata->client_buf,
+					&cdata->kvp[i]);
+
+	}
+
+	memcpy(cdata->buf, cdata->client_buf, msg_hdr_sz);
+	if ((set == MSM_RPM_CTX_SLEEP_SET) &&
+		!msm_rpm_smd_buffer_request(cdata, msg_size,
+			GFP_FLAG(noirq)))
+		return 1;
+
+	msg_id = msm_rpm_get_next_msg_id();
+	/* Set the version bit for new protocol */
+	set_msg_ver(cdata->buf, rpm_msg_fmt_ver);
+	set_msg_id(cdata->buf, msg_id);
+	set_msg_id(cdata->client_buf, msg_id);
+
+	if (msm_rpm_debug_mask
+	    & (MSM_RPM_LOG_REQUEST_PRETTY | MSM_RPM_LOG_REQUEST_RAW))
+		msm_rpm_log_request(cdata);
+
+	if (standalone) {
+		for (i = 0; (i < cdata->write_idx); i++)
+			cdata->kvp[i].valid = false;
+
+		set_data_len(cdata->client_buf, 0);
+		ret = msg_id;
+		return ret;
+	}
+
+	msm_rpm_add_wait_list(msg_id, noack);
+
+	ret = msm_rpm_send_buffer(&cdata->buf[0], msg_size, noirq);
+
+	if (ret == msg_size) {
+		for (i = 0; (i < cdata->write_idx); i++)
+			cdata->kvp[i].valid = false;
+		set_data_len(cdata->client_buf, 0);
+		ret = msg_id;
+		trace_rpm_smd_send_active_set(msg_id,
+			get_rsc_type(cdata->client_buf),
+			get_rsc_id(cdata->client_buf));
+	} else if (ret < msg_size) {
+		struct msm_rpm_wait_data *rc;
+
+		ret = 0;
+		pr_err("Failed to write data msg_size:%d ret:%d msg_id:%d\n",
+				msg_size, ret, msg_id);
+		rc = msm_rpm_get_entry_from_msg_id(msg_id);
+		if (rc)
+			msm_rpm_free_list_entry(rc);
+	}
+	return ret;
+}
+
+static int _msm_rpm_send_request(struct msm_rpm_request *handle, bool noack)
+{
+	int ret;
+	static DEFINE_MUTEX(send_mtx);
+
+	mutex_lock(&send_mtx);
+	ret = msm_rpm_send_data(handle, MSM_RPM_MSG_REQUEST_TYPE, false, noack);
+	mutex_unlock(&send_mtx);
+
+	return ret;
+}
+
+int msm_rpm_send_request(struct msm_rpm_request *handle)
+{
+	return _msm_rpm_send_request(handle, false);
+}
+EXPORT_SYMBOL(msm_rpm_send_request);
+
+int msm_rpm_send_request_noirq(struct msm_rpm_request *handle)
+{
+	return msm_rpm_send_data(handle, MSM_RPM_MSG_REQUEST_TYPE, true, false);
+}
+EXPORT_SYMBOL(msm_rpm_send_request_noirq);
+
+void *msm_rpm_send_request_noack(struct msm_rpm_request *handle)
+{
+	int ret;
+
+	ret = _msm_rpm_send_request(handle, true);
+
+	return ret < 0 ? ERR_PTR(ret) : NULL;
+}
+EXPORT_SYMBOL(msm_rpm_send_request_noack);
+
+int msm_rpm_wait_for_ack(uint32_t msg_id)
+{
+	struct msm_rpm_wait_data *elem;
+	int rc = 0;
+
+	if (!msg_id) {
+		pr_err("Invalid msg id\n");
+		return -ENOMEM;
+	}
+
+	if (msg_id == 1)
+		return rc;
+
+	if (standalone)
+		return rc;
+
+	elem = msm_rpm_get_entry_from_msg_id(msg_id);
+	if (!elem)
+		return rc;
+
+	wait_for_completion(&elem->ack);
+	trace_rpm_smd_ack_recvd(0, msg_id, 0xDEADFEED);
+
+	rc = elem->errno;
+	msm_rpm_free_list_entry(elem);
+
+	return rc;
+}
+EXPORT_SYMBOL(msm_rpm_wait_for_ack);
+
+static void msm_rpm_smd_read_data_noirq(uint32_t msg_id)
+{
+	uint32_t id = 0;
+
+	while (id != msg_id) {
+		if (smd_is_pkt_avail(msm_rpm_data.ch_info)) {
+			int errno;
+			char buf[MAX_ERR_BUFFER_SIZE] = {};
+
+			msm_rpm_read_smd_data(buf);
+			id = msm_rpm_get_msg_id_from_ack(buf);
+			errno = msm_rpm_get_error_from_ack(buf);
+			trace_rpm_smd_ack_recvd(1, msg_id, errno);
+			msm_rpm_process_ack(id, errno);
+		}
+	}
+}
+
+static void msm_rpm_glink_read_data_noirq(struct msm_rpm_wait_data *elem)
+{
+	int ret;
+
+	/* Use rx_poll method to read the message from RPM */
+	while (elem->errno) {
+		ret = glink_rpm_rx_poll(glink_data->glink_handle);
+		if (ret >= 0) {
+			/*
+			 * We might have receieve the notification.
+			 * Now we have to check whether the notification
+			 * received is what we are interested?
+			 * Wait for few usec to get the notification
+			 * before re-trying the poll again.
+			 */
+			udelay(50);
+		} else {
+			pr_err("rx poll return error = %d\n", ret);
+		}
+	}
+}
+
+int msm_rpm_wait_for_ack_noirq(uint32_t msg_id)
+{
+	struct msm_rpm_wait_data *elem;
+	unsigned long flags;
+	int rc = 0;
+
+	if (!msg_id)  {
+		pr_err("Invalid msg id\n");
+		return -ENOMEM;
+	}
+
+	if (msg_id == 1)
+		return 0;
+
+	if (standalone)
+		return 0;
+
+	spin_lock_irqsave(&msm_rpm_data.smd_lock_read, flags);
+
+	elem = msm_rpm_get_entry_from_msg_id(msg_id);
+
+	if (!elem)
+		/* Should this be a bug
+		 * Is it ok for another thread to read the msg?
+		 */
+		goto wait_ack_cleanup;
+
+	if (elem->errno != INIT_ERROR) {
+		rc = elem->errno;
+		msm_rpm_free_list_entry(elem);
+		goto wait_ack_cleanup;
+	}
+
+	if (!glink_enabled)
+		msm_rpm_smd_read_data_noirq(msg_id);
+	else
+		msm_rpm_glink_read_data_noirq(elem);
+
+	rc = elem->errno;
+
+	msm_rpm_free_list_entry(elem);
+wait_ack_cleanup:
+	spin_unlock_irqrestore(&msm_rpm_data.smd_lock_read, flags);
+
+	if (!glink_enabled)
+		if (smd_is_pkt_avail(msm_rpm_data.ch_info))
+			tasklet_schedule(&data_tasklet);
+	return rc;
+}
+EXPORT_SYMBOL(msm_rpm_wait_for_ack_noirq);
+
+void *msm_rpm_send_message_noack(enum msm_rpm_set set, uint32_t rsc_type,
+		uint32_t rsc_id, struct msm_rpm_kvp *kvp, int nelems)
+{
+	int i, rc;
+	struct msm_rpm_request *req =
+		msm_rpm_create_request_common(set, rsc_type, rsc_id, nelems,
+			       false);
+
+	if (IS_ERR(req))
+		return req;
+
+	if (!req)
+		return ERR_PTR(ENOMEM);
+
+	for (i = 0; i < nelems; i++) {
+		rc = msm_rpm_add_kvp_data(req, kvp[i].key,
+				kvp[i].data, kvp[i].length);
+		if (rc)
+			goto bail;
+	}
+
+	rc = PTR_ERR(msm_rpm_send_request_noack(req));
+bail:
+	msm_rpm_free_request(req);
+	return rc < 0 ? ERR_PTR(rc) : NULL;
+}
+EXPORT_SYMBOL(msm_rpm_send_message_noack);
+
+int msm_rpm_send_message(enum msm_rpm_set set, uint32_t rsc_type,
+		uint32_t rsc_id, struct msm_rpm_kvp *kvp, int nelems)
+{
+	int i, rc;
+	struct msm_rpm_request *req =
+		msm_rpm_create_request(set, rsc_type, rsc_id, nelems);
+
+	if (IS_ERR(req))
+		return PTR_ERR(req);
+
+	if (!req)
+		return -ENOMEM;
+
+	for (i = 0; i < nelems; i++) {
+		rc = msm_rpm_add_kvp_data(req, kvp[i].key,
+				kvp[i].data, kvp[i].length);
+		if (rc)
+			goto bail;
+	}
+
+	rc = msm_rpm_wait_for_ack(msm_rpm_send_request(req));
+bail:
+	msm_rpm_free_request(req);
+	return rc;
+}
+EXPORT_SYMBOL(msm_rpm_send_message);
+
+int msm_rpm_send_message_noirq(enum msm_rpm_set set, uint32_t rsc_type,
+		uint32_t rsc_id, struct msm_rpm_kvp *kvp, int nelems)
+{
+	int i, rc;
+	struct msm_rpm_request *req =
+		msm_rpm_create_request_noirq(set, rsc_type, rsc_id, nelems);
+
+	if (IS_ERR(req))
+		return PTR_ERR(req);
+
+	if (!req)
+		return -ENOMEM;
+
+	for (i = 0; i < nelems; i++) {
+		rc = msm_rpm_add_kvp_data_noirq(req, kvp[i].key,
+					kvp[i].data, kvp[i].length);
+		if (rc)
+			goto bail;
+	}
+
+	rc = msm_rpm_wait_for_ack_noirq(msm_rpm_send_request_noirq(req));
+bail:
+	msm_rpm_free_request(req);
+	return rc;
+}
+EXPORT_SYMBOL(msm_rpm_send_message_noirq);
+
+/**
+ * During power collapse, the rpm driver disables the SMD interrupts to make
+ * sure that the interrupt doesn't wakes us from sleep.
+ */
+int msm_rpm_enter_sleep(bool print, const struct cpumask *cpumask)
+{
+	int ret = 0;
+
+	if (standalone)
+		return 0;
+
+	if (!glink_enabled)
+		ret = smd_mask_receive_interrupt(msm_rpm_data.ch_info,
+								true, cpumask);
+	else
+		ret = glink_rpm_mask_rx_interrupt(glink_data->glink_handle,
+							true, (void *)cpumask);
+
+	if (!ret) {
+		ret = msm_rpm_flush_requests(print);
+
+		if (ret) {
+			if (!glink_enabled)
+				smd_mask_receive_interrupt(
+					msm_rpm_data.ch_info, false, NULL);
+			else
+				glink_rpm_mask_rx_interrupt(
+					glink_data->glink_handle, false, NULL);
+		}
+	}
+	return ret;
+}
+EXPORT_SYMBOL(msm_rpm_enter_sleep);
+
+/**
+ * When the system resumes from power collapse, the SMD interrupt disabled by
+ * enter function has to reenabled to continue processing SMD message.
+ */
+void msm_rpm_exit_sleep(void)
+{
+	int ret;
+
+	if (standalone)
+		return;
+
+	do  {
+		ret =  msm_rpm_read_sleep_ack();
+	} while (ret > 0);
+
+	if (!glink_enabled)
+		smd_mask_receive_interrupt(msm_rpm_data.ch_info, false, NULL);
+	else
+		glink_rpm_mask_rx_interrupt(glink_data->glink_handle,
+								false, NULL);
+}
+EXPORT_SYMBOL(msm_rpm_exit_sleep);
+
+/*
+ * Whenever there is a data from RPM, notify_rx will be called.
+ * This function is invoked either interrupt OR polling context.
+ */
+static void msm_rpm_trans_notify_rx(void *handle, const void *priv,
+			const void *pkt_priv, const void *ptr, size_t size)
+{
+	uint32_t msg_id;
+	int errno;
+	char buf[MAX_ERR_BUFFER_SIZE] = {0};
+	struct msm_rpm_wait_data *elem;
+	static DEFINE_SPINLOCK(rx_notify_lock);
+	unsigned long flags;
+
+	if (!size)
+		return;
+
+	WARN_ON(size > MAX_ERR_BUFFER_SIZE);
+
+	spin_lock_irqsave(&rx_notify_lock, flags);
+	memcpy(buf, ptr, size);
+	msg_id = msm_rpm_get_msg_id_from_ack(buf);
+	errno = msm_rpm_get_error_from_ack(buf);
+	elem = msm_rpm_get_entry_from_msg_id(msg_id);
+
+	/*
+	 * It is applicable for sleep set requests
+	 * Sleep set requests are not added to the
+	 * wait queue list. Without this check we
+	 * run into NULL pointer deferrence issue.
+	 */
+	if (!elem) {
+		spin_unlock_irqrestore(&rx_notify_lock, flags);
+		glink_rx_done(handle, ptr, 0);
+		return;
+	}
+
+	msm_rpm_process_ack(msg_id, errno);
+	spin_unlock_irqrestore(&rx_notify_lock, flags);
+
+	glink_rx_done(handle, ptr, 0);
+}
+
+static void msm_rpm_trans_notify_state(void *handle, const void *priv,
+				   unsigned int event)
+{
+	switch (event) {
+	case GLINK_CONNECTED:
+		glink_data->glink_handle = handle;
+
+		if (IS_ERR_OR_NULL(glink_data->glink_handle)) {
+			pr_err("glink_handle %d\n",
+					(int)PTR_ERR(glink_data->glink_handle));
+			WARN_ON(1);
+		}
+
+		/*
+		 * Do not allow clients to send data to RPM until glink
+		 * is fully open.
+		 */
+		probe_status = 0;
+		pr_info("glink config params: transport=%s, edge=%s, name=%s\n",
+			glink_data->xprt,
+			glink_data->edge,
+			glink_data->name);
+		break;
+	default:
+		pr_err("Unrecognized event %d\n", event);
+		break;
+	};
+}
+
+static void msm_rpm_trans_notify_tx_done(void *handle, const void *priv,
+					const void *pkt_priv, const void *ptr)
+{
+}
+
+static void msm_rpm_glink_open_work(struct work_struct *work)
+{
+	pr_debug("Opening glink channel\n");
+	glink_data->glink_handle = glink_open(glink_data->open_cfg);
+
+	if (IS_ERR_OR_NULL(glink_data->glink_handle)) {
+		pr_err("Error: glink_open failed %d\n",
+				(int)PTR_ERR(glink_data->glink_handle));
+		WARN_ON(1);
+	}
+}
+
+static void msm_rpm_glink_notifier_cb(struct glink_link_state_cb_info *cb_info,
+					void *priv)
+{
+	struct glink_open_config *open_config;
+	static bool first = true;
+
+	if (!cb_info) {
+		pr_err("Missing callback data\n");
+		return;
+	}
+
+	switch (cb_info->link_state) {
+	case GLINK_LINK_STATE_UP:
+		if (first)
+			first = false;
+		else
+			break;
+		open_config = kzalloc(sizeof(*open_config), GFP_KERNEL);
+		if (!open_config) {
+			pr_err("Could not allocate memory\n");
+			break;
+		}
+
+		glink_data->open_cfg = open_config;
+		pr_debug("glink link state up cb receieved\n");
+		INIT_WORK(&glink_data->work, msm_rpm_glink_open_work);
+
+		open_config->priv = glink_data;
+		open_config->name = glink_data->name;
+		open_config->edge = glink_data->edge;
+		open_config->notify_rx = msm_rpm_trans_notify_rx;
+		open_config->notify_tx_done = msm_rpm_trans_notify_tx_done;
+		open_config->notify_state = msm_rpm_trans_notify_state;
+		schedule_work(&glink_data->work);
+		break;
+	default:
+		pr_err("Unrecognised state = %d\n", cb_info->link_state);
+		break;
+	};
+}
+
+static int msm_rpm_glink_dt_parse(struct platform_device *pdev,
+				struct glink_apps_rpm_data *glink_data)
+{
+	char *key = NULL;
+	int ret;
+
+	if (of_device_is_compatible(pdev->dev.of_node, "qcom,rpm-glink")) {
+		glink_enabled = true;
+	} else {
+		pr_warn("qcom,rpm-glink compatible not matches\n");
+		ret = -EINVAL;
+		return ret;
+	}
+
+	key = "qcom,glink-edge";
+	ret = of_property_read_string(pdev->dev.of_node, key,
+							&glink_data->edge);
+	if (ret) {
+		pr_err("Failed to read node: %s, key=%s\n",
+			pdev->dev.of_node->full_name, key);
+		return ret;
+	}
+
+	key = "rpm-channel-name";
+	ret = of_property_read_string(pdev->dev.of_node, key,
+							&glink_data->name);
+	if (ret)
+		pr_err("%s(): Failed to read node: %s, key=%s\n", __func__,
+			pdev->dev.of_node->full_name, key);
+
+	return ret;
+}
+
+static int msm_rpm_glink_link_setup(struct glink_apps_rpm_data *glink_data,
+						struct platform_device *pdev)
+{
+	struct glink_link_info *link_info;
+	void *link_state_cb_handle;
+	struct device *dev = &pdev->dev;
+	int ret = 0;
+
+	link_info = devm_kzalloc(dev, sizeof(struct glink_link_info),
+								GFP_KERNEL);
+	if (!link_info) {
+		ret = -ENOMEM;
+		return ret;
+	}
+
+	glink_data->link_info = link_info;
+
+	/*
+	 * Setup link info parameters
+	 */
+	link_info->edge = glink_data->edge;
+	link_info->glink_link_state_notif_cb =
+					msm_rpm_glink_notifier_cb;
+	link_state_cb_handle = glink_register_link_state_cb(link_info, NULL);
+	if (IS_ERR_OR_NULL(link_state_cb_handle)) {
+		pr_err("Could not register cb\n");
+		ret = PTR_ERR(link_state_cb_handle);
+		return ret;
+	}
+
+	spin_lock_init(&msm_rpm_data.smd_lock_read);
+	spin_lock_init(&msm_rpm_data.smd_lock_write);
+
+	return ret;
+}
+
+static int msm_rpm_dev_glink_probe(struct platform_device *pdev)
+{
+	int ret = -ENOMEM;
+	struct device *dev = &pdev->dev;
+
+	glink_data = devm_kzalloc(dev, sizeof(*glink_data), GFP_KERNEL);
+	if (!glink_data)
+		return ret;
+
+	ret = msm_rpm_glink_dt_parse(pdev, glink_data);
+	if (ret < 0) {
+		devm_kfree(dev, glink_data);
+		return ret;
+	}
+
+	ret = msm_rpm_glink_link_setup(glink_data, pdev);
+	if (ret < 0) {
+		/*
+		 * If the glink setup fails there is no
+		 * fall back mechanism to SMD.
+		 */
+		pr_err("GLINK setup fail ret = %d\n", ret);
+		WARN_ON(1);
+	}
+
+	return ret;
+}
+
+static int msm_rpm_dev_probe(struct platform_device *pdev)
+{
+	char *key = NULL;
+	int ret = 0;
+	void __iomem *reg_base;
+	uint32_t version = V0_PROTOCOL_VERSION; /* set to default v0 format */
+
+	/*
+	 * Check for standalone support
+	 */
+	key = "rpm-standalone";
+	standalone = of_property_read_bool(pdev->dev.of_node, key);
+	if (standalone) {
+		probe_status = ret;
+		goto skip_init;
+	}
+
+	reg_base = of_iomap(pdev->dev.of_node, 0);
+
+	if (reg_base) {
+		version = readq_relaxed(reg_base);
+		iounmap(reg_base);
+	}
+
+	if (version == V1_PROTOCOL_VERSION)
+		rpm_msg_fmt_ver = RPM_MSG_V1_FMT;
+
+	pr_debug("RPM-SMD running version %d/n", rpm_msg_fmt_ver);
+
+	ret = msm_rpm_dev_glink_probe(pdev);
+	if (!ret) {
+		pr_info("APSS-RPM communication over GLINK\n");
+		msm_rpm_send_buffer = msm_rpm_glink_send_buffer;
+		of_platform_populate(pdev->dev.of_node, NULL, NULL,
+							&pdev->dev);
+		return ret;
+	}
+	msm_rpm_send_buffer = msm_rpm_send_smd_buffer;
+
+	key = "rpm-channel-name";
+	ret = of_property_read_string(pdev->dev.of_node, key,
+					&msm_rpm_data.ch_name);
+	if (ret) {
+		pr_err("%s(): Failed to read node: %s, key=%s\n", __func__,
+			pdev->dev.of_node->full_name, key);
+		goto fail;
+	}
+
+	key = "rpm-channel-type";
+	ret = of_property_read_u32(pdev->dev.of_node, key,
+					&msm_rpm_data.ch_type);
+	if (ret) {
+		pr_err("%s(): Failed to read node: %s, key=%s\n", __func__,
+			pdev->dev.of_node->full_name, key);
+		goto fail;
+	}
+
+	ret = smd_named_open_on_edge(msm_rpm_data.ch_name,
+				msm_rpm_data.ch_type,
+				&msm_rpm_data.ch_info,
+				&msm_rpm_data,
+				msm_rpm_notify);
+	if (ret) {
+		if (ret != -EPROBE_DEFER) {
+			pr_err("%s: Cannot open RPM channel %s %d\n",
+				__func__, msm_rpm_data.ch_name,
+				msm_rpm_data.ch_type);
+		}
+		goto fail;
+	}
+
+	spin_lock_init(&msm_rpm_data.smd_lock_write);
+	spin_lock_init(&msm_rpm_data.smd_lock_read);
+	tasklet_init(&data_tasklet, data_fn_tasklet, 0);
+
+	wait_for_completion(&msm_rpm_data.smd_open);
+
+	smd_disable_read_intr(msm_rpm_data.ch_info);
+
+	msm_rpm_smd_wq = alloc_workqueue("rpm-smd",
+				WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI, 1);
+	if (!msm_rpm_smd_wq) {
+		pr_err("%s: Unable to alloc rpm-smd workqueue\n", __func__);
+		ret = -EINVAL;
+		goto fail;
+	}
+	queue_work(msm_rpm_smd_wq, &msm_rpm_data.work);
+
+	probe_status = ret;
+skip_init:
+	of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+
+	if (standalone)
+		pr_info("RPM running in standalone mode\n");
+fail:
+	return probe_status;
+}
+
+static const struct of_device_id msm_rpm_match_table[] =  {
+	{.compatible = "qcom,rpm-smd"},
+	{.compatible = "qcom,rpm-glink"},
+	{},
+};
+
+static struct platform_driver msm_rpm_device_driver = {
+	.probe = msm_rpm_dev_probe,
+	.driver = {
+		.name = "rpm-smd",
+		.owner = THIS_MODULE,
+		.of_match_table = msm_rpm_match_table,
+	},
+};
+
+int __init msm_rpm_driver_init(void)
+{
+	static bool registered;
+
+	if (registered)
+		return 0;
+	registered = true;
+
+	return platform_driver_register(&msm_rpm_device_driver);
+}
+EXPORT_SYMBOL(msm_rpm_driver_init);
+arch_initcall(msm_rpm_driver_init);
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 7663e3c..80c3f91 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -1325,9 +1325,11 @@
 	if ((long)state < 0)
 		return -EINVAL;
 
+	mutex_lock(&cdev->lock);
 	cdev->sysfs_cur_state_req = state;
 
 	cdev->updated = false;
+	mutex_unlock(&cdev->lock);
 	thermal_cdev_update(cdev);
 
 	return count;
@@ -1349,9 +1351,11 @@
 	if ((long)state < 0)
 		return -EINVAL;
 
+	mutex_lock(&cdev->lock);
 	cdev->sysfs_min_state_req = state;
 
 	cdev->updated = false;
+	mutex_unlock(&cdev->lock);
 	thermal_cdev_update(cdev);
 
 	return count;
diff --git a/drivers/tty/serial/msm_geni_serial.c b/drivers/tty/serial/msm_geni_serial.c
index 899524d..89c1681 100644
--- a/drivers/tty/serial/msm_geni_serial.c
+++ b/drivers/tty/serial/msm_geni_serial.c
@@ -127,7 +127,7 @@
 } while (0)
 
 #define DMA_RX_BUF_SIZE		(2048)
-#define CONSOLE_YIELD_LEN	(8 * 1024)
+#define UART_CONSOLE_RX_WM	(2)
 struct msm_geni_serial_port {
 	struct uart_port uport;
 	char name[20];
@@ -163,7 +163,6 @@
 	unsigned int cur_baud;
 	int ioctl_count;
 	int edge_count;
-	unsigned int tx_yield_count;
 	bool manual_flow;
 };
 
@@ -1196,20 +1195,13 @@
 	unsigned int fifo_width_bytes =
 		(uart_console(uport) ? 1 : (msm_port->tx_fifo_width >> 3));
 	unsigned int geni_m_irq_en;
+	int temp_tail = 0;
 
-	xmit->tail = (xmit->tail + msm_port->xmit_size) & (UART_XMIT_SIZE - 1);
-	msm_port->xmit_size = 0;
-	if (uart_console(uport) &&
-	    (uport->icount.tx - msm_port->tx_yield_count) > CONSOLE_YIELD_LEN) {
-		msm_port->tx_yield_count = uport->icount.tx;
-		msm_geni_serial_stop_tx(uport);
-		uart_write_wakeup(uport);
-		goto exit_handle_tx;
-	}
-
+	xmit_size = uart_circ_chars_pending(xmit);
 	tx_fifo_status = geni_read_reg_nolog(uport->membase,
 					SE_GENI_TX_FIFO_STATUS);
-	if (uart_circ_empty(xmit) && !tx_fifo_status) {
+	/* Both FIFO and framework buffer are drained */
+	if ((xmit_size == msm_port->xmit_size) && !tx_fifo_status) {
 		/*
 		 * This will balance out the power vote put in during start_tx
 		 * allowing the device to suspend.
@@ -1219,9 +1211,12 @@
 				"%s.Power Off.\n", __func__);
 			msm_geni_serial_power_off(uport);
 		}
+		msm_port->xmit_size = 0;
+		uart_circ_clear(xmit);
 		msm_geni_serial_stop_tx(uport);
 		goto exit_handle_tx;
 	}
+	xmit_size -= msm_port->xmit_size;
 
 	if (!uart_console(uport)) {
 		geni_m_irq_en = geni_read_reg_nolog(uport->membase,
@@ -1235,9 +1230,9 @@
 
 	avail_fifo_bytes = (msm_port->tx_fifo_depth - msm_port->tx_wm) *
 							fifo_width_bytes;
-	xmit_size = uart_circ_chars_pending(xmit);
-	if (xmit_size > (UART_XMIT_SIZE - xmit->tail))
-		xmit_size = UART_XMIT_SIZE - xmit->tail;
+	temp_tail = (xmit->tail + msm_port->xmit_size) & (UART_XMIT_SIZE - 1);
+	if (xmit_size > (UART_XMIT_SIZE - temp_tail))
+		xmit_size = (UART_XMIT_SIZE - temp_tail);
 	if (xmit_size > avail_fifo_bytes)
 		xmit_size = avail_fifo_bytes;
 
@@ -1247,33 +1242,29 @@
 	msm_geni_serial_setup_tx(uport, xmit_size);
 
 	bytes_remaining = xmit_size;
-	dump_ipc(msm_port->ipc_log_tx, "Tx", (char *)&xmit->buf[xmit->tail], 0,
+	dump_ipc(msm_port->ipc_log_tx, "Tx", (char *)&xmit->buf[temp_tail], 0,
 								xmit_size);
 	while (i < xmit_size) {
 		unsigned int tx_bytes;
 		unsigned int buf = 0;
-		int temp_tail;
 		int c;
 
 		tx_bytes = ((bytes_remaining < fifo_width_bytes) ?
 					bytes_remaining : fifo_width_bytes);
 
-		temp_tail = (xmit->tail + i) & (UART_XMIT_SIZE - 1);
 		for (c = 0; c < tx_bytes ; c++)
 			buf |= (xmit->buf[temp_tail + c] << (c * 8));
 		geni_write_reg_nolog(buf, uport->membase, SE_GENI_TX_FIFOn);
 		i += tx_bytes;
+		temp_tail = (temp_tail + tx_bytes) & (UART_XMIT_SIZE - 1);
 		uport->icount.tx += tx_bytes;
 		bytes_remaining -= tx_bytes;
 		/* Ensure FIFO write goes through */
 		wmb();
 	}
-	if (uart_console(uport)) {
+	if (uart_console(uport))
 		msm_geni_serial_poll_cancel_tx(uport);
-		xmit->tail = (xmit->tail + xmit_size) & (UART_XMIT_SIZE - 1);
-	} else {
-		msm_port->xmit_size = xmit_size;
-	}
+	msm_port->xmit_size += xmit_size;
 exit_handle_tx:
 	uart_write_wakeup(uport);
 	return ret;
@@ -1368,6 +1359,7 @@
 	unsigned long flags;
 	unsigned int m_irq_en;
 	struct msm_geni_serial_port *msm_port = GET_DEV_PORT(uport);
+	struct tty_port *tport = &uport->state->port;
 	bool drop_rx = false;
 
 	spin_lock_irqsave(&uport->lock, flags);
@@ -1397,7 +1389,8 @@
 	}
 
 	if (s_irq_status & S_RX_FIFO_WR_ERR_EN) {
-		uport->icount.buf_overrun++;
+		uport->icount.overrun++;
+		tty_insert_flip_char(tport, 0, TTY_OVERRUN);
 		IPC_LOG_MSG(msm_port->ipc_log_misc,
 			"%s.sirq 0x%x buf_overrun:%d\n",
 			__func__, s_irq_status, uport->icount.buf_overrun);
@@ -1536,7 +1529,10 @@
 	 * TX WM level at 10% TX_FIFO_DEPTH.
 	 */
 	port->rx_rfr = port->rx_fifo_depth - 2;
-	port->rx_wm = port->rx_fifo_depth >>  1;
+	if (!uart_console(&port->uport))
+		port->rx_wm = port->rx_fifo_depth >>  1;
+	else
+		port->rx_wm = UART_CONSOLE_RX_WM;
 	port->tx_wm = 2;
 }
 
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index b6ad39b..874499d 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -245,6 +245,10 @@
 	struct notifier_block	dwc3_cpu_notifier;
 	struct notifier_block	usbdev_nb;
 	bool			hc_died;
+	/* for usb connector either type-C or microAB */
+	bool			type_c;
+	/* whether to vote for VBUS reg in host mode */
+	bool			no_vbus_vote_type_c;
 
 	struct extcon_dev	*extcon_vbus;
 	struct extcon_dev	*extcon_id;
@@ -2898,7 +2902,7 @@
 	return NOTIFY_DONE;
 }
 
-static int dwc3_msm_extcon_register(struct dwc3_msm *mdwc)
+static int dwc3_msm_extcon_register(struct dwc3_msm *mdwc, int start_idx)
 {
 	struct device_node *node = mdwc->dev->of_node;
 	struct extcon_dev *edev;
@@ -2907,8 +2911,11 @@
 	if (!of_property_read_bool(node, "extcon"))
 		return 0;
 
-	/* Use first phandle (mandatory) for USB vbus status notification */
-	edev = extcon_get_edev_by_phandle(mdwc->dev, 0);
+	/*
+	 * Use mandatory phandle (index 0 for type-C; index 3 for microUSB)
+	 * for USB vbus status notification
+	 */
+	edev = extcon_get_edev_by_phandle(mdwc->dev, start_idx);
 	if (IS_ERR(edev) && PTR_ERR(edev) != -ENODEV)
 		return PTR_ERR(edev);
 
@@ -2923,9 +2930,12 @@
 		}
 	}
 
-	/* Use second phandle (optional) for USB ID status notification */
-	if (of_count_phandle_with_args(node, "extcon", NULL) > 1) {
-		edev = extcon_get_edev_by_phandle(mdwc->dev, 1);
+	/*
+	 * Use optional phandle (index 1 for type-C; index 4 for microUSB)
+	 * for USB ID status notification
+	 */
+	if (of_count_phandle_with_args(node, "extcon", NULL) > start_idx + 1) {
+		edev = extcon_get_edev_by_phandle(mdwc->dev, start_idx + 1);
 		if (IS_ERR(edev) && PTR_ERR(edev) != -ENODEV) {
 			ret = PTR_ERR(edev);
 			goto err;
@@ -2953,12 +2963,12 @@
 	}
 
 	edev = NULL;
-	/* Use third phandle (optional) for EUD based detach/attach events */
+	/* Use optional phandle (index 2) for EUD based detach/attach events */
 	if (of_count_phandle_with_args(node, "extcon", NULL) > 2) {
 		edev = extcon_get_edev_by_phandle(mdwc->dev, 2);
 		if (IS_ERR(edev) && PTR_ERR(edev) != -ENODEV) {
 			ret = PTR_ERR(edev);
-			goto err1;
+			goto err2;
 		}
 	}
 
@@ -3408,10 +3418,6 @@
 	if (of_property_read_bool(node, "qcom,disable-dev-mode-pm"))
 		pm_runtime_get_noresume(mdwc->dev);
 
-	ret = dwc3_msm_extcon_register(mdwc);
-	if (ret)
-		goto put_dwc3;
-
 	ret = of_property_read_u32(node, "qcom,pm-qos-latency",
 				&mdwc->pm_qos_latency);
 	if (ret) {
@@ -3419,15 +3425,34 @@
 		mdwc->pm_qos_latency = 0;
 	}
 
+	mdwc->no_vbus_vote_type_c = of_property_read_bool(node,
+					"qcom,no-vbus-vote-with-type-C");
+
+	/* Mark type-C as true by default */
+	mdwc->type_c = true;
+
 	mdwc->usb_psy = power_supply_get_by_name("usb");
 	if (!mdwc->usb_psy) {
 		dev_warn(mdwc->dev, "Could not get usb power_supply\n");
 		pval.intval = -EINVAL;
 	} else {
 		power_supply_get_property(mdwc->usb_psy,
+			POWER_SUPPLY_PROP_CONNECTOR_TYPE, &pval);
+		if (pval.intval == POWER_SUPPLY_CONNECTOR_MICRO_USB)
+			mdwc->type_c = false;
+		power_supply_get_property(mdwc->usb_psy,
 			POWER_SUPPLY_PROP_PRESENT, &pval);
 	}
 
+	/*
+	 * Extcon phandles starting indices in DT:
+	 * type-C : 0
+	 * microUSB : 3
+	 */
+	ret = dwc3_msm_extcon_register(mdwc, mdwc->type_c ? 0 : 3);
+	if (ret)
+		goto put_psy;
+
 	mutex_init(&mdwc->suspend_resume_mutex);
 	/* Update initial VBUS/ID state from extcon */
 	if (mdwc->extcon_vbus && extcon_get_state(mdwc->extcon_vbus,
@@ -3456,6 +3481,12 @@
 
 	return 0;
 
+put_psy:
+	if (mdwc->usb_psy)
+		power_supply_put(mdwc->usb_psy);
+
+	if (cpu_to_affin)
+		unregister_cpu_notifier(&mdwc->dwc3_cpu_notifier);
 put_dwc3:
 	if (mdwc->bus_perf_client)
 		msm_bus_scale_unregister_client(mdwc->bus_perf_client);
@@ -3478,6 +3509,8 @@
 	int ret_pm;
 
 	device_remove_file(&pdev->dev, &dev_attr_mode);
+	if (mdwc->usb_psy)
+		power_supply_put(mdwc->usb_psy);
 
 	if (cpu_to_affin)
 		unregister_cpu_notifier(&mdwc->dwc3_cpu_notifier);
@@ -3680,7 +3713,8 @@
 	 * IS_ERR: regulator could not be obtained, so skip using it
 	 * Valid pointer otherwise
 	 */
-	if (!mdwc->vbus_reg) {
+	if (!mdwc->vbus_reg && (!mdwc->type_c ||
+				(mdwc->type_c && !mdwc->no_vbus_vote_type_c))) {
 		mdwc->vbus_reg = devm_regulator_get_optional(mdwc->dev,
 					"vbus_dwc3");
 		if (IS_ERR(mdwc->vbus_reg) &&
@@ -3705,7 +3739,7 @@
 		pm_runtime_get_sync(mdwc->dev);
 		dbg_event(0xFF, "StrtHost gync",
 			atomic_read(&mdwc->dev->power.usage_count));
-		if (!IS_ERR(mdwc->vbus_reg))
+		if (!IS_ERR_OR_NULL(mdwc->vbus_reg))
 			ret = regulator_enable(mdwc->vbus_reg);
 		if (ret) {
 			dev_err(mdwc->dev, "unable to enable vbus_reg\n");
@@ -3729,7 +3763,7 @@
 			dev_err(mdwc->dev,
 				"%s: failed to add XHCI pdev ret=%d\n",
 				__func__, ret);
-			if (!IS_ERR(mdwc->vbus_reg))
+			if (!IS_ERR_OR_NULL(mdwc->vbus_reg))
 				regulator_disable(mdwc->vbus_reg);
 			mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
 			mdwc->ss_phy->flags &= ~PHY_HOST_MODE;
@@ -3770,7 +3804,7 @@
 		dev_dbg(mdwc->dev, "%s: turn off host\n", __func__);
 
 		usb_unregister_atomic_notify(&mdwc->usbdev_nb);
-		if (!IS_ERR(mdwc->vbus_reg))
+		if (!IS_ERR_OR_NULL(mdwc->vbus_reg))
 			ret = regulator_disable(mdwc->vbus_reg);
 		if (ret) {
 			dev_err(mdwc->dev, "unable to disable vbus_reg\n");
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 98509f2..31b2731 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -269,7 +269,7 @@
 {
 	int	value = -EINVAL;
 
-	DBG(config->cdev, "adding '%s'/%p to config '%s'/%p\n",
+	DBG(config->cdev, "adding '%s'/%pK to config '%s'/%pK\n",
 			function->name, function,
 			config->label, config);
 
@@ -312,7 +312,7 @@
 
 done:
 	if (value)
-		DBG(config->cdev, "adding '%s'/%p --> %d\n",
+		DBG(config->cdev, "adding '%s'/%pK --> %d\n",
 				function->name, function, value);
 	return value;
 }
@@ -933,7 +933,7 @@
 
 		result = f->set_alt(f, tmp, 0);
 		if (result < 0) {
-			DBG(cdev, "interface %d (%s/%p) alt 0 --> %d\n",
+			DBG(cdev, "interface %d (%s/%pK) alt 0 --> %d\n",
 					tmp, f->name, f, result);
 
 			reset_config(cdev);
@@ -1006,7 +1006,7 @@
 	if (!bind)
 		goto done;
 
-	DBG(cdev, "adding config #%u '%s'/%p\n",
+	DBG(cdev, "adding config #%u '%s'/%pK\n",
 			config->bConfigurationValue,
 			config->label, config);
 
@@ -1023,7 +1023,7 @@
 					struct usb_function, list);
 			list_del(&f->list);
 			if (f->unbind) {
-				DBG(cdev, "unbind function '%s'/%p\n",
+				DBG(cdev, "unbind function '%s'/%pK\n",
 					f->name, f);
 				f->unbind(config, f);
 				/* may free memory for "f" */
@@ -1034,7 +1034,7 @@
 	} else {
 		unsigned	i;
 
-		DBG(cdev, "cfg %d/%p speeds:%s%s%s%s\n",
+		DBG(cdev, "cfg %d/%pK speeds:%s%s%s%s\n",
 			config->bConfigurationValue, config,
 			config->superspeed_plus ? " superplus" : "",
 			config->superspeed ? " super" : "",
@@ -1050,7 +1050,7 @@
 
 			if (!f)
 				continue;
-			DBG(cdev, "  interface %d = %s/%p\n",
+			DBG(cdev, "  interface %d = %s/%pK\n",
 				i, f->name, f);
 		}
 	}
@@ -1076,14 +1076,14 @@
 				struct usb_function, list);
 		list_del(&f->list);
 		if (f->unbind) {
-			DBG(cdev, "unbind function '%s'/%p\n", f->name, f);
+			DBG(cdev, "unbind function '%s'/%pK\n", f->name, f);
 			f->unbind(config, f);
 			/* may free memory for "f" */
 		}
 	}
 	list_del(&config->list);
 	if (config->unbind) {
-		DBG(cdev, "unbind config '%s'/%p\n", config->label, config);
+		DBG(cdev, "unbind config '%s'/%pK\n", config->label, config);
 		config->unbind(config);
 			/* may free memory for "c" */
 	}
@@ -1491,7 +1491,7 @@
 	else if (cdev->os_desc_req == req)
 		cdev->os_desc_pending = false;
 	else
-		WARN(1, "unknown request %p\n", req);
+		WARN(1, "unknown request %pK\n", req);
 }
 
 static int composite_ep0_queue(struct usb_composite_dev *cdev,
@@ -1506,7 +1506,7 @@
 		else if (cdev->os_desc_req == req)
 			cdev->os_desc_pending = true;
 		else
-			WARN(1, "unknown request %p\n", req);
+			WARN(1, "unknown request %pK\n", req);
 	}
 
 	return ret;
@@ -2545,7 +2545,13 @@
 	spin_lock_irqsave(&cdev->lock, flags);
 
 	if (cdev->delayed_status == 0) {
+		if (!cdev->config) {
+			spin_unlock_irqrestore(&cdev->lock, flags);
+			return;
+		}
+		spin_unlock_irqrestore(&cdev->lock, flags);
 		WARN(cdev, "%s: Unexpected call\n", __func__);
+		return;
 
 	} else if (--cdev->delayed_status == 0) {
 		DBG(cdev, "%s: Completing delayed status\n", __func__);
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index f915e55..16b6619 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -163,7 +163,7 @@
 		if (!str)
 			return -ENOMEM;
 	}
-	strncpy(str, s, MAX_USB_STRING_WITH_NULL_LEN);
+	strlcpy(str, s, MAX_USB_STRING_WITH_NULL_LEN);
 	if (str[ret - 1] == '\n')
 		str[ret - 1] = '\0';
 	*s_copy = str;
@@ -1261,7 +1261,7 @@
 			list_move_tail(&f->list, &cfg->func_list);
 			if (f->unbind) {
 				dev_dbg(&gi->cdev.gadget->dev,
-				         "unbind function '%s'/%p\n",
+					"unbind function '%s'/%pK\n",
 				         f->name, f);
 				f->unbind(c, f);
 			}
@@ -1464,7 +1464,7 @@
 	}
 
 	if (!uevent_sent) {
-		pr_info("%s: did not send uevent (%d %d %p)\n", __func__,
+		pr_info("%s: did not send uevent (%d %d %pK)\n", __func__,
 			gi->connected, gi->sw_connected, cdev->config);
 	}
 }
diff --git a/drivers/usb/gadget/function/f_accessory.c b/drivers/usb/gadget/function/f_accessory.c
index 9240956..899cbf1 100644
--- a/drivers/usb/gadget/function/f_accessory.c
+++ b/drivers/usb/gadget/function/f_accessory.c
@@ -564,7 +564,7 @@
 	struct usb_ep *ep;
 	int i;
 
-	DBG(cdev, "create_bulk_endpoints dev: %p\n", dev);
+	DBG(cdev, "create_bulk_endpoints dev: %pK\n", dev);
 
 	ep = usb_ep_autoconfig(cdev->gadget, in_desc);
 	if (!ep) {
@@ -655,7 +655,7 @@
 		r = -EIO;
 		goto done;
 	} else {
-		pr_debug("rx %p queue\n", req);
+		pr_debug("rx %pK queue\n", req);
 	}
 
 	/* wait for a request to complete */
@@ -678,7 +678,7 @@
 		if (req->actual == 0)
 			goto requeue_req;
 
-		pr_debug("rx %p %u\n", req, req->actual);
+		pr_debug("rx %pK %u\n", req, req->actual);
 		xfer = (req->actual < count) ? req->actual : count;
 		r = xfer;
 		if (copy_to_user(buf, req->buf, xfer))
@@ -993,7 +993,7 @@
 	int			id;
 	int			ret;
 
-	DBG(cdev, "acc_function_bind dev: %p\n", dev);
+	DBG(cdev, "acc_function_bind dev: %pK\n", dev);
 
 	if (configfs) {
 		if (acc_string_defs[INTERFACE_STRING_INDEX].id == 0) {
@@ -1180,7 +1180,7 @@
 	list_for_each_safe(entry, temp, &new_list) {
 		hid = list_entry(entry, struct acc_hid_dev, list);
 		if (acc_hid_init(hid)) {
-			pr_err("can't add HID device %p\n", hid);
+			pr_err("can't add HID device %pK\n", hid);
 			acc_hid_delete(hid);
 		} else {
 			spin_lock_irqsave(&dev->lock, flags);
diff --git a/drivers/usb/gadget/function/f_acm.c b/drivers/usb/gadget/function/f_acm.c
index 5e3828d..4f2b847 100644
--- a/drivers/usb/gadget/function/f_acm.c
+++ b/drivers/usb/gadget/function/f_acm.c
@@ -704,7 +704,7 @@
 	if (acm->notify_req)
 		gs_free_req(acm->notify, acm->notify_req);
 
-	ERROR(cdev, "%s/%p: can't bind, err %d\n", f->name, f, status);
+	ERROR(cdev, "%s/%pK: can't bind, err %d\n", f->name, f, status);
 
 	return status;
 }
diff --git a/drivers/usb/gadget/function/f_cdev.c b/drivers/usb/gadget/function/f_cdev.c
index 5804840..4cb2817 100644
--- a/drivers/usb/gadget/function/f_cdev.c
+++ b/drivers/usb/gadget/function/f_cdev.c
@@ -851,7 +851,7 @@
 	int i;
 	struct usb_request *req;
 
-	pr_debug("ep:%p head:%p num:%d size:%d cb:%p",
+	pr_debug("ep:%pK head:%p num:%d size:%d cb:%p",
 				ep, head, num, size, cb);
 
 	for (i = 0; i < num; i++) {
@@ -901,7 +901,7 @@
 		ret = usb_ep_queue(ep, req, GFP_KERNEL);
 		spin_lock_irqsave(&port->port_lock, flags);
 		if (ret) {
-			pr_err("port(%d):%p usb ep(%s) queue failed\n",
+			pr_err("port(%d):%pK usb ep(%s) queue failed\n",
 					port->port_num, port, ep->name);
 			list_add(&req->list, pool);
 			break;
@@ -916,7 +916,7 @@
 	struct f_cdev *port = ep->driver_data;
 	unsigned long flags;
 
-	pr_debug("ep:(%p)(%s) port:%p req_status:%d req->actual:%u\n",
+	pr_debug("ep:(%pK)(%s) port:%p req_status:%d req->actual:%u\n",
 			ep, ep->name, port, req->status, req->actual);
 	if (!port) {
 		pr_err("port is null\n");
@@ -942,7 +942,7 @@
 	unsigned long flags;
 	struct f_cdev *port = ep->driver_data;
 
-	pr_debug("ep:(%p)(%s) port:%p req_stats:%d\n",
+	pr_debug("ep:(%pK)(%s) port:%p req_stats:%d\n",
 			ep, ep->name, port, req->status);
 
 	if (!port) {
@@ -975,7 +975,7 @@
 	int ret = -ENODEV;
 	unsigned long	flags;
 
-	pr_debug("port: %p\n", port);
+	pr_debug("port: %pK\n", port);
 
 	spin_lock_irqsave(&port->port_lock, flags);
 	if (!port->is_connected)
@@ -1018,7 +1018,7 @@
 	struct usb_ep	*out;
 	unsigned long	flags;
 
-	pr_debug("port:%p\n", port);
+	pr_debug("port:%pK\n", port);
 
 	in = port->port_usb.in;
 	out = port->port_usb.out;
@@ -1061,7 +1061,7 @@
 	}
 
 	file->private_data = port;
-	pr_debug("opening port(%s)(%p)\n", port->name, port);
+	pr_debug("opening port(%s)(%pK)\n", port->name, port);
 	ret = wait_event_interruptible(port->open_wq,
 					port->is_connected);
 	if (ret) {
@@ -1074,7 +1074,7 @@
 	spin_unlock_irqrestore(&port->port_lock, flags);
 	usb_cser_start_rx(port);
 
-	pr_debug("port(%s)(%p) open is success\n", port->name, port);
+	pr_debug("port(%s)(%pK) open is success\n", port->name, port);
 
 	return 0;
 }
@@ -1094,7 +1094,7 @@
 	port->port_open = false;
 	port->cbits_updated = false;
 	spin_unlock_irqrestore(&port->port_lock, flags);
-	pr_debug("port(%s)(%p) is closed.\n", port->name, port);
+	pr_debug("port(%s)(%pK) is closed.\n", port->name, port);
 
 	return 0;
 }
@@ -1118,7 +1118,7 @@
 		return -EINVAL;
 	}
 
-	pr_debug("read on port(%s)(%p) count:%zu\n", port->name, port, count);
+	pr_debug("read on port(%s)(%pK) count:%zu\n", port->name, port, count);
 	spin_lock_irqsave(&port->port_lock, flags);
 	current_rx_req = port->current_rx_req;
 	pending_rx_bytes = port->pending_rx_bytes;
@@ -1219,7 +1219,7 @@
 	}
 
 	spin_lock_irqsave(&port->port_lock, flags);
-	pr_debug("write on port(%s)(%p)\n", port->name, port);
+	pr_debug("write on port(%s)(%pK)\n", port->name, port);
 
 	if (!port->is_connected) {
 		spin_unlock_irqrestore(&port->port_lock, flags);
@@ -1388,7 +1388,7 @@
 	case TIOCMBIC:
 	case TIOCMBIS:
 	case TIOCMSET:
-		pr_debug("TIOCMSET on port(%s)%p\n", port->name, port);
+		pr_debug("TIOCMSET on port(%s)%pK\n", port->name, port);
 		i = get_user(val, (uint32_t *)arg);
 		if (i) {
 			pr_err("Error getting TIOCMSET value\n");
@@ -1397,7 +1397,7 @@
 		ret = f_cdev_tiocmset(port, val, ~val);
 		break;
 	case TIOCMGET:
-		pr_debug("TIOCMGET on port(%s)%p\n", port->name, port);
+		pr_debug("TIOCMGET on port(%s)%pK\n", port->name, port);
 		ret = f_cdev_tiocmget(port);
 		if (ret >= 0) {
 			ret = put_user(ret, (uint32_t *)arg);
@@ -1447,14 +1447,14 @@
 		return -ENODEV;
 	}
 
-	pr_debug("port(%s) (%p)\n", port->name, port);
+	pr_debug("port(%s) (%pK)\n", port->name, port);
 
 	cser = &port->port_usb;
 	cser->notify_modem = usb_cser_notify_modem;
 
 	ret = usb_ep_enable(cser->in);
 	if (ret) {
-		pr_err("usb_ep_enable failed eptype:IN ep:%p, err:%d",
+		pr_err("usb_ep_enable failed eptype:IN ep:%pK, err:%d",
 					cser->in, ret);
 		return ret;
 	}
@@ -1462,7 +1462,7 @@
 
 	ret = usb_ep_enable(cser->out);
 	if (ret) {
-		pr_err("usb_ep_enable failed eptype:OUT ep:%p, err: %d",
+		pr_err("usb_ep_enable failed eptype:OUT ep:%pK, err: %d",
 					cser->out, ret);
 		cser->in->driver_data = 0;
 		return ret;
@@ -1574,7 +1574,7 @@
 		goto err_create_dev;
 	}
 
-	pr_info("port_name:%s (%p) portno:(%d)\n",
+	pr_info("port_name:%s (%pK) portno:(%d)\n",
 			port->name, port, port->port_num);
 	return port;
 
diff --git a/drivers/usb/gadget/function/f_diag.c b/drivers/usb/gadget/function/f_diag.c
index be22de048..f08e443 100644
--- a/drivers/usb/gadget/function/f_diag.c
+++ b/drivers/usb/gadget/function/f_diag.c
@@ -291,7 +291,7 @@
 	}
 
 update_dload:
-	pr_debug("%s: dload:%p pid:%x serial_num:%s\n",
+	pr_debug("%s: dload:%pK pid:%x serial_num:%s\n",
 				__func__, diag_dload, local_diag_dload.pid,
 				local_diag_dload.serial_number);
 
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index ce5e85a..f0042ec 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -766,7 +766,7 @@
 	if (ep && ep->req && likely(req->context)) {
 		struct ffs_ep *ep = _ep->driver_data;
 		ep->status = req->status ? req->status : req->actual;
-		ffs_log("ep status %d for req %p", ep->status, req);
+		ffs_log("ep status %d for req %pK", ep->status, req);
 		/* Set is_busy false to indicate completion of last request */
 		ep->is_busy = false;
 		complete(req->context);
@@ -1912,7 +1912,7 @@
 	ffs_log("enter: state %d setup_state %d flag %lu", ffs->state,
 		ffs->setup_state, ffs->flags);
 
-	pr_debug("%s: ffs->gadget= %p, ffs->flags= %lu\n",
+	pr_debug("%s: ffs->gadget= %pK, ffs->flags= %lu\n",
 				__func__, ffs->gadget, ffs->flags);
 	ffs_closed(ffs);
 
@@ -2003,7 +2003,7 @@
 
 	ffs->gadget = cdev->gadget;
 
-	ffs_log("exit: state %d setup_state %d flag %lu gadget %p\n",
+	ffs_log("exit: state %d setup_state %d flag %lu gadget %pK\n",
 			ffs->state, ffs->setup_state, ffs->flags, ffs->gadget);
 
 	ffs_data_get(ffs);
@@ -2019,7 +2019,7 @@
 		ffs->ep0req = NULL;
 		ffs->gadget = NULL;
 		clear_bit(FFS_FL_BOUND, &ffs->flags);
-		ffs_log("state %d setup_state %d flag %lu gadget %p\n",
+		ffs_log("state %d setup_state %d flag %lu gadget %pK\n",
 			ffs->state, ffs->setup_state, ffs->flags, ffs->gadget);
 		ffs_data_put(ffs);
 	}
diff --git a/drivers/usb/gadget/function/f_gsi.c b/drivers/usb/gadget/function/f_gsi.c
index a26d6df..71e84fd 100644
--- a/drivers/usb/gadget/function/f_gsi.c
+++ b/drivers/usb/gadget/function/f_gsi.c
@@ -930,7 +930,7 @@
 	struct gsi_inst_status *inst_cur;
 
 	if (!c_port) {
-		pr_err_ratelimited("%s: gsi ctrl port %p", __func__, c_port);
+		pr_err_ratelimited("%s: gsi ctrl port %pK", __func__, c_port);
 		return -ENODEV;
 	}
 
@@ -1019,7 +1019,7 @@
 	gsi = inst_cur->opts->gsi;
 	c_port = &inst_cur->opts->gsi->c_port;
 	if (!c_port) {
-		log_event_err("%s: gsi ctrl port %p", __func__, c_port);
+		log_event_err("%s: gsi ctrl port %pK", __func__, c_port);
 		return -ENODEV;
 	}
 
@@ -1108,7 +1108,7 @@
 	req = c_port->notify_req;
 
 	if (!c_port || !req || !req->buf) {
-		log_event_err("%s: c_port %p req %p req->buf %p",
+		log_event_err("%s: c_port %pK req %p req->buf %p",
 			__func__, c_port, req, req ? req->buf : req);
 		return -ENODEV;
 	}
@@ -1186,7 +1186,7 @@
 	c_port = &gsi->c_port;
 
 	if (!c_port) {
-		log_event_err("%s: gsi ctrl port %p", __func__, c_port);
+		log_event_err("%s: gsi ctrl port %pK", __func__, c_port);
 		return -ENODEV;
 	}
 
@@ -1325,7 +1325,7 @@
 	gsi = inst_cur->opts->gsi;
 	c_port = &inst_cur->opts->gsi->c_port;
 	if (!c_port) {
-		log_event_err("%s: gsi ctrl port %p", __func__, c_port);
+		log_event_err("%s: gsi ctrl port %pK", __func__, c_port);
 		return -ENODEV;
 	}
 
@@ -1454,7 +1454,7 @@
 	struct gsi_data_port *d_port;
 
 	if (!gsi) {
-		pr_err("%s: gsi prot ctx is %p", __func__, gsi);
+		pr_err("%s: gsi prot ctx is %pK", __func__, gsi);
 		return;
 	}
 
@@ -1678,7 +1678,7 @@
 	struct f_gsi *gsi = req->context;
 	struct gsi_ntb_info *ntb = NULL;
 
-	log_event_dbg("dev:%p", gsi);
+	log_event_dbg("dev:%pK", gsi);
 
 	req->context = NULL;
 	if (req->status || req->actual != req->length) {
diff --git a/drivers/usb/gadget/function/f_mtp.c b/drivers/usb/gadget/function/f_mtp.c
index ca8ed69..aea32e4 100644
--- a/drivers/usb/gadget/function/f_mtp.c
+++ b/drivers/usb/gadget/function/f_mtp.c
@@ -499,7 +499,7 @@
 	struct usb_ep *ep;
 	int i;
 
-	DBG(cdev, "create_bulk_endpoints dev: %p\n", dev);
+	DBG(cdev, "create_bulk_endpoints dev: %pK\n", dev);
 
 	ep = usb_ep_autoconfig(cdev->gadget, in_desc);
 	if (!ep) {
@@ -644,7 +644,7 @@
 		r = -EIO;
 		goto done;
 	} else {
-		DBG(cdev, "rx %p queue\n", req);
+		DBG(cdev, "rx %pK queue\n", req);
 	}
 
 	/* wait for a request to complete */
@@ -670,7 +670,7 @@
 		if (req->actual == 0)
 			goto requeue_req;
 
-		DBG(cdev, "rx %p %d\n", req, req->actual);
+		DBG(cdev, "rx %pK %d\n", req, req->actual);
 		xfer = (req->actual < count) ? req->actual : count;
 		r = xfer;
 		if (copy_to_user(buf, req->buf, xfer))
@@ -955,7 +955,7 @@
 		}
 
 		if (write_req) {
-			DBG(cdev, "rx %p %d\n", write_req, write_req->actual);
+			DBG(cdev, "rx %pK %d\n", write_req, write_req->actual);
 			start_time = ktime_get();
 			mutex_lock(&dev->read_mutex);
 			if (dev->state == STATE_OFFLINE) {
@@ -1410,7 +1410,7 @@
 	struct mtp_instance *fi_mtp;
 
 	dev->cdev = cdev;
-	DBG(cdev, "mtp_function_bind dev: %p\n", dev);
+	DBG(cdev, "mtp_function_bind dev: %pK\n", dev);
 
 	/* allocate interface ID(s) */
 	id = usb_interface_id(c, f);
diff --git a/drivers/usb/gadget/function/f_obex.c b/drivers/usb/gadget/function/f_obex.c
index d43e86c..649ff4d 100644
--- a/drivers/usb/gadget/function/f_obex.c
+++ b/drivers/usb/gadget/function/f_obex.c
@@ -377,7 +377,7 @@
 	return 0;
 
 fail:
-	ERROR(cdev, "%s/%p: can't bind, err %d\n", f->name, f, status);
+	ERROR(cdev, "%s/%pK: can't bind, err %d\n", f->name, f, status);
 
 	return status;
 }
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
index 5605c1e..c330814 100644
--- a/drivers/usb/gadget/function/f_uac2.c
+++ b/drivers/usb/gadget/function/f_uac2.c
@@ -134,6 +134,7 @@
 
 	/* The ALSA Sound Card it represents on the USB-Client side */
 	struct snd_uac2_chip uac2;
+	struct device *gdev;
 };
 
 static inline
@@ -463,7 +464,7 @@
 	c_chmask = opts->c_chmask;
 
 	/* Choose any slot, with no id */
-	err = snd_card_new(&pdev->dev, -1, NULL, THIS_MODULE, 0, &card);
+	err = snd_card_new(audio_dev->gdev, -1, NULL, THIS_MODULE, 0, &card);
 	if (err < 0)
 		return err;
 
@@ -1237,6 +1238,7 @@
 		goto err;
 	}
 
+	agdev->gdev = &gadget->dev;
 	ret = alsa_uac2_init(agdev);
 	if (ret)
 		goto err;
diff --git a/drivers/usb/gadget/function/uvc.h b/drivers/usb/gadget/function/uvc.h
index 7d3bb62..d1649f8 100644
--- a/drivers/usb/gadget/function/uvc.h
+++ b/drivers/usb/gadget/function/uvc.h
@@ -96,7 +96,7 @@
  * Driver specific constants
  */
 
-#define UVC_NUM_REQUESTS			4
+#define UVC_NUM_REQUESTS			16
 #define UVC_MAX_REQUEST_SIZE			64
 #define UVC_MAX_EVENTS				4
 
diff --git a/drivers/usb/gadget/function/uvc_configfs.c b/drivers/usb/gadget/function/uvc_configfs.c
index 8820e11..984b1d7 100644
--- a/drivers/usb/gadget/function/uvc_configfs.c
+++ b/drivers/usb/gadget/function/uvc_configfs.c
@@ -2150,6 +2150,7 @@
 		if (ret)
 			return ret;
 		grp = &f->fmt->group;
+		j = 0;
 		list_for_each_entry(item, &grp->cg_children, ci_entry) {
 			frm = to_uvcg_frame(item);
 			ret = fun(frm, priv2, priv3, j++, UVCG_FRAME);
diff --git a/drivers/usb/gadget/function/uvc_video.c b/drivers/usb/gadget/function/uvc_video.c
index 0f01c04..2ceb3ce 100644
--- a/drivers/usb/gadget/function/uvc_video.c
+++ b/drivers/usb/gadget/function/uvc_video.c
@@ -239,7 +239,11 @@
 	unsigned int i;
 	int ret = -ENOMEM;
 
-	BUG_ON(video->req_size);
+	if (video->req_size) {
+		pr_err("%s: close the video node and reopen it\n",
+				__func__);
+		return -EBUSY;
+	}
 
 	req_size = video->ep->maxpacket
 		 * max_t(unsigned int, video->ep->maxburst, 1)
diff --git a/drivers/usb/phy/phy-msm-qusb.c b/drivers/usb/phy/phy-msm-qusb.c
index e355e35..f7ff9e8f 100644
--- a/drivers/usb/phy/phy-msm-qusb.c
+++ b/drivers/usb/phy/phy-msm-qusb.c
@@ -187,15 +187,14 @@
 	return ret;
 }
 
-static int qusb_phy_enable_power(struct qusb_phy *qphy, bool on,
-						bool toggle_vdd)
+static int qusb_phy_enable_power(struct qusb_phy *qphy, bool on)
 {
 	int ret = 0;
 
 	dev_dbg(qphy->phy.dev, "%s turn %s regulators. power_enabled:%d\n",
 			__func__, on ? "on" : "off", qphy->power_enabled);
 
-	if (toggle_vdd && qphy->power_enabled == on) {
+	if (qphy->power_enabled == on) {
 		dev_dbg(qphy->phy.dev, "PHYs' regulators are already ON.\n");
 		return 0;
 	}
@@ -203,19 +202,17 @@
 	if (!on)
 		goto disable_vdda33;
 
-	if (toggle_vdd) {
-		ret = qusb_phy_config_vdd(qphy, true);
-		if (ret) {
-			dev_err(qphy->phy.dev, "Unable to config VDD:%d\n",
-								ret);
-			goto err_vdd;
-		}
+	ret = qusb_phy_config_vdd(qphy, true);
+	if (ret) {
+		dev_err(qphy->phy.dev, "Unable to config VDD:%d\n",
+							ret);
+		goto err_vdd;
+	}
 
-		ret = regulator_enable(qphy->vdd);
-		if (ret) {
-			dev_err(qphy->phy.dev, "Unable to enable VDD\n");
-			goto unconfig_vdd;
-		}
+	ret = regulator_enable(qphy->vdd);
+	if (ret) {
+		dev_err(qphy->phy.dev, "Unable to enable VDD\n");
+		goto unconfig_vdd;
 	}
 
 	ret = regulator_set_load(qphy->vdda18, QUSB2PHY_1P8_HPM_LOAD);
@@ -258,8 +255,7 @@
 		goto unset_vdd33;
 	}
 
-	if (toggle_vdd)
-		qphy->power_enabled = true;
+	qphy->power_enabled = true;
 
 	pr_debug("%s(): QUSB PHY's regulators are turned ON.\n", __func__);
 	return ret;
@@ -297,21 +293,18 @@
 		dev_err(qphy->phy.dev, "Unable to set LPM of vdda18\n");
 
 disable_vdd:
-	if (toggle_vdd) {
-		ret = regulator_disable(qphy->vdd);
-		if (ret)
-			dev_err(qphy->phy.dev, "Unable to disable vdd:%d\n",
+	ret = regulator_disable(qphy->vdd);
+	if (ret)
+		dev_err(qphy->phy.dev, "Unable to disable vdd:%d\n",
 								ret);
 
 unconfig_vdd:
-		ret = qusb_phy_config_vdd(qphy, false);
-		if (ret)
-			dev_err(qphy->phy.dev, "Unable unconfig VDD:%d\n",
+	ret = qusb_phy_config_vdd(qphy, false);
+	if (ret)
+		dev_err(qphy->phy.dev, "Unable unconfig VDD:%d\n",
 								ret);
-	}
 err_vdd:
-	if (toggle_vdd)
-		qphy->power_enabled = false;
+	qphy->power_enabled = false;
 	dev_dbg(qphy->phy.dev, "QUSB PHY's regulators are turned OFF.\n");
 	return ret;
 }
@@ -375,7 +368,7 @@
 
 	dev_dbg(phy->dev, "%s\n", __func__);
 
-	ret = qusb_phy_enable_power(qphy, true, true);
+	ret = qusb_phy_enable_power(qphy, true);
 	if (ret)
 		return ret;
 
@@ -623,7 +616,7 @@
 
 
 			qusb_phy_enable_clocks(qphy, false);
-			qusb_phy_enable_power(qphy, false, true);
+			qusb_phy_enable_power(qphy, false);
 		}
 		qphy->suspended = true;
 	} else {
@@ -635,7 +628,7 @@
 			writel_relaxed(0x00,
 				qphy->base + QUSB2PHY_PORT_INTR_CTRL);
 		} else {
-			qusb_phy_enable_power(qphy, true, true);
+			qusb_phy_enable_power(qphy, true);
 			qusb_phy_enable_clocks(qphy, true);
 		}
 		qphy->suspended = false;
@@ -677,7 +670,7 @@
 				__func__, qphy->dpdm_enable);
 
 	if (!qphy->dpdm_enable) {
-		ret = qusb_phy_enable_power(qphy, true, false);
+		ret = qusb_phy_enable_power(qphy, true);
 		if (ret < 0) {
 			dev_dbg(qphy->phy.dev,
 				"dpdm regulator enable failed:%d\n", ret);
@@ -698,11 +691,15 @@
 				__func__, qphy->dpdm_enable);
 
 	if (qphy->dpdm_enable) {
-		ret = qusb_phy_enable_power(qphy, false, false);
-		if (ret < 0) {
-			dev_dbg(qphy->phy.dev,
-				"dpdm regulator disable failed:%d\n", ret);
-			return ret;
+		if (!qphy->cable_connected) {
+			dev_dbg(qphy->phy.dev, "turn off for HVDCP case\n");
+			ret = qusb_phy_enable_power(qphy, false);
+			if (ret < 0) {
+				dev_dbg(qphy->phy.dev,
+					"dpdm regulator disable failed:%d\n",
+					ret);
+				return ret;
+			}
 		}
 		qphy->dpdm_enable = false;
 	}
@@ -1029,7 +1026,7 @@
 		qphy->clocks_enabled = false;
 	}
 
-	qusb_phy_enable_power(qphy, false, true);
+	qusb_phy_enable_power(qphy, false);
 
 	return 0;
 }
diff --git a/include/linux/msm_dma_iommu_mapping.h b/include/linux/msm_dma_iommu_mapping.h
index a46eb87..44dabb1 100644
--- a/include/linux/msm_dma_iommu_mapping.h
+++ b/include/linux/msm_dma_iommu_mapping.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -28,6 +28,21 @@
 		   enum dma_data_direction dir, struct dma_buf *dma_buf,
 		   unsigned long attrs);
 
+/*
+ * This function takes an extra reference to the dma_buf.
+ * What this means is that calling msm_dma_unmap_sg will not result in buffer's
+ * iommu mapping being removed, which means that subsequent calls to lazy map
+ * will simply re-use the existing iommu mapping.
+ * The iommu unmapping of the buffer will occur when the ION buffer is
+ * destroyed.
+ * Using lazy mapping can provide a performance benefit because subsequent
+ * mappings are faster.
+ *
+ * The limitation of using this API are that all subsequent iommu mappings
+ * must be the same as the original mapping, ie they must map the same part of
+ * the buffer with the same dma data direction. Also there can't be multiple
+ * mappings of different parts of the buffer.
+ */
 static inline int msm_dma_map_sg_lazy(struct device *dev,
 			       struct scatterlist *sg, int nents,
 			       enum dma_data_direction dir,
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 6e38683..33440fa 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -717,6 +717,9 @@
 #endif
 
 	struct list_head		sb_list;
+
+	/* Is this event shared with other events */
+	bool					shared;
 #endif /* CONFIG_PERF_EVENTS */
 };
 
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 73da337..63ce902 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -4231,19 +4231,6 @@
 }
 
 /**
- * ieee80211_data_from_8023 - convert an 802.3 frame to 802.11
- * @skb: the 802.3 frame
- * @addr: the device MAC address
- * @iftype: the virtual interface type
- * @bssid: the network bssid (used only for iftype STATION and ADHOC)
- * @qos: build 802.11 QoS data frame
- * Return: 0 on success, or a negative error code.
- */
-int ieee80211_data_from_8023(struct sk_buff *skb, const u8 *addr,
-			     enum nl80211_iftype iftype, const u8 *bssid,
-			     bool qos);
-
-/**
  * ieee80211_amsdu_to_8023s - decode an IEEE 802.11n A-MSDU frame
  *
  * Decode an IEEE 802.11 A-MSDU and convert it to a list of 802.3 frames.
diff --git a/include/soc/qcom/icnss.h b/include/soc/qcom/icnss.h
index e58a522..4a7b0d6 100644
--- a/include/soc/qcom/icnss.h
+++ b/include/soc/qcom/icnss.h
@@ -13,10 +13,15 @@
 #define _ICNSS_WLAN_H_
 
 #include <linux/interrupt.h>
+#include <linux/device.h>
 
 #define ICNSS_MAX_IRQ_REGISTRATIONS    12
 #define ICNSS_MAX_TIMESTAMP_LEN        32
 
+#ifndef ICNSS_API_WITH_DEV
+#define ICNSS_API_WITH_DEV
+#endif
+
 enum icnss_uevent {
 	ICNSS_UEVENT_FW_READY,
 	ICNSS_UEVENT_FW_CRASHED,
@@ -34,6 +39,8 @@
 
 struct icnss_driver_ops {
 	char *name;
+	unsigned long drv_state;
+	struct device_driver driver;
 	int (*probe)(struct device *dev);
 	void (*remove)(struct device *dev);
 	void (*shutdown)(struct device *dev);
@@ -99,35 +106,41 @@
 	char fw_build_timestamp[ICNSS_MAX_TIMESTAMP_LEN + 1];
 };
 
-extern int icnss_register_driver(struct icnss_driver_ops *driver);
-extern int icnss_unregister_driver(struct icnss_driver_ops *driver);
-extern int icnss_wlan_enable(struct icnss_wlan_enable_cfg *config,
+#define icnss_register_driver(ops)		\
+	__icnss_register_driver(ops, THIS_MODULE, KBUILD_MODNAME)
+extern int __icnss_register_driver(struct icnss_driver_ops *ops,
+				   struct module *owner, const char *mod_name);
+
+extern int icnss_unregister_driver(struct icnss_driver_ops *ops);
+
+extern int icnss_wlan_enable(struct device *dev,
+			     struct icnss_wlan_enable_cfg *config,
 			     enum icnss_driver_mode mode,
 			     const char *host_version);
-extern int icnss_wlan_disable(enum icnss_driver_mode mode);
-extern void icnss_enable_irq(unsigned int ce_id);
-extern void icnss_disable_irq(unsigned int ce_id);
-extern int icnss_get_soc_info(struct icnss_soc_info *info);
-extern int icnss_ce_free_irq(unsigned int ce_id, void *ctx);
-extern int icnss_ce_request_irq(unsigned int ce_id,
+extern int icnss_wlan_disable(struct device *dev, enum icnss_driver_mode mode);
+extern void icnss_enable_irq(struct device *dev, unsigned int ce_id);
+extern void icnss_disable_irq(struct device *dev, unsigned int ce_id);
+extern int icnss_get_soc_info(struct device *dev, struct icnss_soc_info *info);
+extern int icnss_ce_free_irq(struct device *dev, unsigned int ce_id, void *ctx);
+extern int icnss_ce_request_irq(struct device *dev, unsigned int ce_id,
 	irqreturn_t (*handler)(int, void *),
 	unsigned long flags, const char *name, void *ctx);
-extern int icnss_get_ce_id(int irq);
-extern int icnss_set_fw_log_mode(uint8_t fw_log_mode);
+extern int icnss_get_ce_id(struct device *dev, int irq);
+extern int icnss_set_fw_log_mode(struct device *dev, uint8_t fw_log_mode);
 extern int icnss_athdiag_read(struct device *dev, uint32_t offset,
 			      uint32_t mem_type, uint32_t data_len,
 			      uint8_t *output);
 extern int icnss_athdiag_write(struct device *dev, uint32_t offset,
 			       uint32_t mem_type, uint32_t data_len,
 			       uint8_t *input);
-extern int icnss_get_irq(int ce_id);
+extern int icnss_get_irq(struct device *dev, int ce_id);
 extern int icnss_power_on(struct device *dev);
 extern int icnss_power_off(struct device *dev);
 extern struct dma_iommu_mapping *icnss_smmu_get_mapping(struct device *dev);
 extern int icnss_smmu_map(struct device *dev, phys_addr_t paddr,
 			  uint32_t *iova_addr, size_t size);
 extern unsigned int icnss_socinfo_get_serial_number(struct device *dev);
-extern bool icnss_is_qmi_disable(void);
+extern bool icnss_is_qmi_disable(struct device *dev);
 extern bool icnss_is_fw_ready(void);
 extern int icnss_trigger_recovery(struct device *dev);
 #endif /* _ICNSS_WLAN_H_ */
diff --git a/include/soc/qcom/rpm-notifier.h b/include/soc/qcom/rpm-notifier.h
new file mode 100644
index 0000000..22e1e26
--- /dev/null
+++ b/include/soc/qcom/rpm-notifier.h
@@ -0,0 +1,63 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __ARCH_ARM_MACH_MSM_RPM_NOTIF_H
+#define __ARCH_ARM_MACH_MSM_RPM_NOTIF_H
+
+struct msm_rpm_notifier_data {
+	uint32_t rsc_type;
+	uint32_t rsc_id;
+	uint32_t key;
+	uint32_t size;
+	uint8_t *value;
+};
+/**
+ * msm_rpm_register_notifier - Register for sleep set notifications
+ *
+ * @nb - notifier block to register
+ *
+ * return 0 on success, errno on failure.
+ */
+int msm_rpm_register_notifier(struct notifier_block *nb);
+
+/**
+ * msm_rpm_unregister_notifier - Unregister previously registered notifications
+ *
+ * @nb - notifier block to unregister
+ *
+ * return 0 on success, errno on failure.
+ */
+int msm_rpm_unregister_notifier(struct notifier_block *nb);
+
+/**
+ * msm_rpm_enter_sleep - Notify RPM driver to prepare for entering sleep
+ *
+ * @bool - flag to enable print contents of sleep buffer.
+ * @cpumask - cpumask of next wakeup cpu
+ *
+ * return 0 on success errno on failure.
+ */
+int msm_rpm_enter_sleep(bool print, const struct cpumask *cpumask);
+
+/**
+ * msm_rpm_exit_sleep - Notify RPM driver about resuming from power collapse
+ */
+void msm_rpm_exit_sleep(void);
+
+/**
+ * msm_rpm_waiting_for_ack - Indicate if there is RPM message
+ *				pending acknowledgment.
+ * returns true for pending messages and false otherwise
+ */
+bool msm_rpm_waiting_for_ack(void);
+
+#endif /*__ARCH_ARM_MACH_MSM_RPM_NOTIF_H */
diff --git a/include/soc/qcom/rpm-smd.h b/include/soc/qcom/rpm-smd.h
new file mode 100644
index 0000000..c84d02d
--- /dev/null
+++ b/include/soc/qcom/rpm-smd.h
@@ -0,0 +1,308 @@
+/* Copyright (c) 2012, 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_RPM_SMD_H
+#define __ARCH_ARM_MACH_MSM_RPM_SMD_H
+
+/**
+ * enum msm_rpm_set - RPM enumerations for sleep/active set
+ * %MSM_RPM_CTX_SET_0: Set resource parameters for active mode.
+ * %MSM_RPM_CTX_SET_SLEEP: Set resource parameters for sleep.
+ */
+enum msm_rpm_set {
+	MSM_RPM_CTX_ACTIVE_SET,
+	MSM_RPM_CTX_SLEEP_SET,
+};
+
+struct msm_rpm_request;
+
+struct msm_rpm_kvp {
+	uint32_t key;
+	uint32_t length;
+	uint8_t *data;
+};
+#ifdef CONFIG_MSM_RPM_SMD
+/**
+ * msm_rpm_request() - Creates a parent element to identify the
+ * resource on the RPM, that stores the KVPs for different fields modified
+ * for a hardware resource
+ *
+ * @set: if the device is setting the active/sleep set parameter
+ * for the resource
+ * @rsc_type: unsigned 32 bit integer that identifies the type of the resource
+ * @rsc_id: unsigned 32 bit that uniquely identifies a resource within a type
+ * @num_elements: number of KVPs pairs associated with the resource
+ *
+ * returns pointer to a msm_rpm_request on success, NULL on error
+ */
+struct msm_rpm_request *msm_rpm_create_request(
+		enum msm_rpm_set set, uint32_t rsc_type,
+		uint32_t rsc_id, int num_elements);
+
+/**
+ * msm_rpm_request_noirq() - Creates a parent element to identify the
+ * resource on the RPM, that stores the KVPs for different fields modified
+ * for a hardware resource. This function is similar to msm_rpm_create_request
+ * except that it has to be called with interrupts masked.
+ *
+ * @set: if the device is setting the active/sleep set parameter
+ * for the resource
+ * @rsc_type: unsigned 32 bit integer that identifies the type of the resource
+ * @rsc_id: unsigned 32 bit that uniquely identifies a resource within a type
+ * @num_elements: number of KVPs pairs associated with the resource
+ *
+ * returns pointer to a msm_rpm_request on success, NULL on error
+ */
+struct msm_rpm_request *msm_rpm_create_request_noirq(
+		enum msm_rpm_set set, uint32_t rsc_type,
+		uint32_t rsc_id, int num_elements);
+
+/**
+ * msm_rpm_add_kvp_data() - Adds a Key value pair to a existing RPM resource.
+ *
+ * @handle: RPM resource handle to which the data should be appended
+ * @key:  unsigned integer identify the parameter modified
+ * @data: byte array that contains the value corresponding to key.
+ * @size:   size of data in bytes.
+ *
+ * returns 0 on success or errno
+ */
+int msm_rpm_add_kvp_data(struct msm_rpm_request *handle,
+		uint32_t key, const uint8_t *data, int size);
+
+/**
+ * msm_rpm_add_kvp_data_noirq() - Adds a Key value pair to a existing RPM
+ * resource. This function is similar to msm_rpm_add_kvp_data except that it
+ * has to be called with interrupts masked.
+ *
+ * @handle: RPM resource handle to which the data should be appended
+ * @key:  unsigned integer identify the parameter modified
+ * @data: byte array that contains the value corresponding to key.
+ * @size:   size of data in bytes.
+ *
+ * returns 0 on success or errno
+ */
+int msm_rpm_add_kvp_data_noirq(struct msm_rpm_request *handle,
+		uint32_t key, const uint8_t *data, int size);
+
+/** msm_rpm_free_request() - clean up the RPM request handle created with
+ * msm_rpm_create_request
+ *
+ * @handle: RPM resource handle to be cleared.
+ */
+
+void msm_rpm_free_request(struct msm_rpm_request *handle);
+
+/**
+ * msm_rpm_send_request() - Send the RPM messages using SMD. The function
+ * assigns a message id before sending the data out to the RPM. RPM hardware
+ * uses the message id to acknowledge the messages.
+ *
+ * @handle: pointer to the msm_rpm_request for the resource being modified.
+ *
+ * returns non-zero message id on success and zero on a failed transaction.
+ * The drivers use message id to wait for ACK from RPM.
+ */
+int msm_rpm_send_request(struct msm_rpm_request *handle);
+
+/**
+ * msm_rpm_send_request_noack() - Send the RPM messages using SMD. The function
+ * assigns a message id before sending the data out to the RPM. RPM hardware
+ * uses the message id to acknowledge the messages, but this API does not wait
+ * on the ACK for this message id and it does not add the message id to the wait
+ * list.
+ *
+ * @handle: pointer to the msm_rpm_request for the resource being modified.
+ *
+ * returns NULL on success and PTR_ERR on a failed transaction.
+ */
+void *msm_rpm_send_request_noack(struct msm_rpm_request *handle);
+
+/**
+ * msm_rpm_send_request_noirq() - Send the RPM messages using SMD. The
+ * function assigns a message id before sending the data out to the RPM.
+ * RPM hardware uses the message id to acknowledge the messages. This function
+ * is similar to msm_rpm_send_request except that it has to be called with
+ * interrupts masked.
+ *
+ * @handle: pointer to the msm_rpm_request for the resource being modified.
+ *
+ * returns non-zero message id on success and zero on a failed transaction.
+ * The drivers use message id to wait for ACK from RPM.
+ */
+int msm_rpm_send_request_noirq(struct msm_rpm_request *handle);
+
+/**
+ * msm_rpm_wait_for_ack() - A blocking call that waits for acknowledgment of
+ * a message from RPM.
+ *
+ * @msg_id: the return from msm_rpm_send_requests
+ *
+ * returns 0 on success or errno
+ */
+int msm_rpm_wait_for_ack(uint32_t msg_id);
+
+/**
+ * msm_rpm_wait_for_ack_noirq() - A blocking call that waits for acknowledgment
+ * of a message from RPM. This function is similar to msm_rpm_wait_for_ack
+ * except that it has to be called with interrupts masked.
+ *
+ * @msg_id: the return from msm_rpm_send_request
+ *
+ * returns 0 on success or errno
+ */
+int msm_rpm_wait_for_ack_noirq(uint32_t msg_id);
+
+/**
+ * msm_rpm_send_message() -Wrapper function for clients to send data given an
+ * array of key value pairs.
+ *
+ * @set: if the device is setting the active/sleep set parameter
+ * for the resource
+ * @rsc_type: unsigned 32 bit integer that identifies the type of the resource
+ * @rsc_id: unsigned 32 bit that uniquely identifies a resource within a type
+ * @kvp: array of KVP data.
+ * @nelem: number of KVPs pairs associated with the message.
+ *
+ * returns  0 on success and errno on failure.
+ */
+int msm_rpm_send_message(enum msm_rpm_set set, uint32_t rsc_type,
+		uint32_t rsc_id, struct msm_rpm_kvp *kvp, int nelems);
+
+/**
+ * msm_rpm_send_message_noack() -Wrapper function for clients to send data
+ * given an array of key value pairs without waiting for ack.
+ *
+ * @set: if the device is setting the active/sleep set parameter
+ * for the resource
+ * @rsc_type: unsigned 32 bit integer that identifies the type of the resource
+ * @rsc_id: unsigned 32 bit that uniquely identifies a resource within a type
+ * @kvp: array of KVP data.
+ * @nelem: number of KVPs pairs associated with the message.
+ *
+ * returns  NULL on success and PTR_ERR(errno) on failure.
+ */
+void *msm_rpm_send_message_noack(enum msm_rpm_set set, uint32_t rsc_type,
+		uint32_t rsc_id, struct msm_rpm_kvp *kvp, int nelems);
+
+/**
+ * msm_rpm_send_message_noirq() -Wrapper function for clients to send data
+ * given an array of key value pairs. This function is similar to the
+ * msm_rpm_send_message() except that it has to be called with interrupts
+ * disabled. Clients should choose the irq version when possible for system
+ * performance.
+ *
+ * @set: if the device is setting the active/sleep set parameter
+ * for the resource
+ * @rsc_type: unsigned 32 bit integer that identifies the type of the resource
+ * @rsc_id: unsigned 32 bit that uniquely identifies a resource within a type
+ * @kvp: array of KVP data.
+ * @nelem: number of KVPs pairs associated with the message.
+ *
+ * returns  0 on success and errno on failure.
+ */
+int msm_rpm_send_message_noirq(enum msm_rpm_set set, uint32_t rsc_type,
+		uint32_t rsc_id, struct msm_rpm_kvp *kvp, int nelems);
+
+/**
+ * msm_rpm_driver_init() - Initialization function that registers for a
+ * rpm platform driver.
+ *
+ * returns 0 on success.
+ */
+int __init msm_rpm_driver_init(void);
+
+#else
+
+static inline struct msm_rpm_request *msm_rpm_create_request(
+		enum msm_rpm_set set, uint32_t rsc_type,
+		uint32_t rsc_id, int num_elements)
+{
+	return NULL;
+}
+
+static inline struct msm_rpm_request *msm_rpm_create_request_noirq(
+		enum msm_rpm_set set, uint32_t rsc_type,
+		uint32_t rsc_id, int num_elements)
+{
+	return NULL;
+
+}
+static inline uint32_t msm_rpm_add_kvp_data(struct msm_rpm_request *handle,
+		uint32_t key, const uint8_t *data, int count)
+{
+	return 0;
+}
+static inline uint32_t msm_rpm_add_kvp_data_noirq(
+		struct msm_rpm_request *handle, uint32_t key,
+		const uint8_t *data, int count)
+{
+	return 0;
+}
+
+static inline void msm_rpm_free_request(struct msm_rpm_request *handle)
+{
+}
+
+static inline int msm_rpm_send_request(struct msm_rpm_request *handle)
+{
+	return 0;
+}
+
+static inline int msm_rpm_send_request_noirq(struct msm_rpm_request *handle)
+{
+	return 0;
+
+}
+
+static inline void *msm_rpm_send_request_noack(struct msm_rpm_request *handle)
+{
+	return NULL;
+}
+
+static inline int msm_rpm_send_message(enum msm_rpm_set set, uint32_t rsc_type,
+		uint32_t rsc_id, struct msm_rpm_kvp *kvp, int nelems)
+{
+	return 0;
+}
+
+static inline int msm_rpm_send_message_noirq(enum msm_rpm_set set,
+		uint32_t rsc_type, uint32_t rsc_id, struct msm_rpm_kvp *kvp,
+		int nelems)
+{
+	return 0;
+}
+
+static inline void *msm_rpm_send_message_noack(enum msm_rpm_set set,
+		uint32_t rsc_type, uint32_t rsc_id, struct msm_rpm_kvp *kvp,
+		int nelems)
+{
+	return NULL;
+}
+
+static inline int msm_rpm_wait_for_ack(uint32_t msg_id)
+{
+	return 0;
+
+}
+static inline int msm_rpm_wait_for_ack_noirq(uint32_t msg_id)
+{
+	return 0;
+}
+
+static inline int __init msm_rpm_driver_init(void)
+{
+	return 0;
+}
+#endif
+#endif /*__ARCH_ARM_MACH_MSM_RPM_SMD_H*/
diff --git a/include/soc/qcom/scm.h b/include/soc/qcom/scm.h
index ac8b2eb..63698cf 100644
--- a/include/soc/qcom/scm.h
+++ b/include/soc/qcom/scm.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -229,7 +229,7 @@
 	return 0;
 }
 
-inline bool scm_is_secure_device(void)
+static inline bool scm_is_secure_device(void)
 {
 	return false;
 }
diff --git a/include/trace/events/iommu.h b/include/trace/events/iommu.h
index 255e228..4be890a 100644
--- a/include/trace/events/iommu.h
+++ b/include/trace/events/iommu.h
@@ -201,7 +201,7 @@
 	TP_ARGS(dev, iova, flags)
 );
 
-DECLARE_EVENT_CLASS(iommu_errata_tlbi,
+DECLARE_EVENT_CLASS(iommu_tlbi,
 
 	TP_PROTO(struct device *dev, u64 time),
 
@@ -222,35 +222,35 @@
 	)
 );
 
-DEFINE_EVENT(iommu_errata_tlbi, errata_tlbi_start,
+DEFINE_EVENT(iommu_tlbi, tlbi_start,
 
 	TP_PROTO(struct device *dev, u64 time),
 
 	TP_ARGS(dev, time)
 );
 
-DEFINE_EVENT(iommu_errata_tlbi, errata_tlbi_end,
+DEFINE_EVENT(iommu_tlbi, tlbi_end,
 
 	TP_PROTO(struct device *dev, u64 time),
 
 	TP_ARGS(dev, time)
 );
 
-DEFINE_EVENT(iommu_errata_tlbi, errata_throttle_start,
+DEFINE_EVENT(iommu_tlbi, tlbi_throttle_start,
 
 	TP_PROTO(struct device *dev, u64 time),
 
 	TP_ARGS(dev, time)
 );
 
-DEFINE_EVENT(iommu_errata_tlbi, errata_throttle_end,
+DEFINE_EVENT(iommu_tlbi, tlbi_throttle_end,
 
 	TP_PROTO(struct device *dev, u64 time),
 
 	TP_ARGS(dev, time)
 );
 
-DEFINE_EVENT(iommu_errata_tlbi, errata_failed,
+DEFINE_EVENT(iommu_tlbi, tlbsync_timeout,
 
 	TP_PROTO(struct device *dev, u64 time),
 
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index a3b01c6..8dc7ad5 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -786,6 +786,11 @@
 	TP_PROTO(struct task_struct *p, int task_cpu, unsigned long task_util, int nominated_cpu, int target_cpu, int ediff, bool need_idle),
 	TP_ARGS(p, task_cpu, task_util, nominated_cpu, target_cpu, ediff, need_idle)
 );
+
+DEFINE_EVENT(sched_task_util, sched_task_util_need_idle,
+	TP_PROTO(struct task_struct *p, int task_cpu, unsigned long task_util, int nominated_cpu, int target_cpu, int ediff, bool need_idle),
+	TP_ARGS(p, task_cpu, task_util, nominated_cpu, target_cpu, ediff, need_idle)
+);
 #endif
 
 /*
diff --git a/include/trace/events/trace_rpm_smd.h b/include/trace/events/trace_rpm_smd.h
new file mode 100644
index 0000000..1afc06b
--- /dev/null
+++ b/include/trace/events/trace_rpm_smd.h
@@ -0,0 +1,111 @@
+/* Copyright (c) 2012, 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rpm_smd
+
+#if !defined(_TRACE_RPM_SMD_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_RPM_SMD_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(rpm_smd_ack_recvd,
+
+	TP_PROTO(unsigned int irq, unsigned int msg_id, int errno),
+
+	TP_ARGS(irq, msg_id, errno),
+
+	TP_STRUCT__entry(
+		__field(int, irq)
+		__field(int, msg_id)
+		__field(int, errno)
+	),
+
+	TP_fast_assign(
+		__entry->irq = irq;
+		__entry->msg_id = msg_id;
+		__entry->errno = errno;
+	),
+
+	TP_printk("ctx:%s msg_id:%d errno:%08x",
+		__entry->irq ? "noslp" : "sleep",
+		__entry->msg_id,
+		__entry->errno)
+);
+
+TRACE_EVENT(rpm_smd_interrupt_notify,
+
+	TP_PROTO(char *dummy),
+
+	TP_ARGS(dummy),
+
+	TP_STRUCT__entry(
+		__field(char *, dummy)
+	),
+
+	TP_fast_assign(
+		__entry->dummy = dummy;
+	),
+
+	TP_printk("%s", __entry->dummy)
+);
+
+DECLARE_EVENT_CLASS(rpm_send_msg,
+
+	TP_PROTO(unsigned int msg_id, unsigned int rsc_type,
+		unsigned int rsc_id),
+
+	TP_ARGS(msg_id, rsc_type, rsc_id),
+
+	TP_STRUCT__entry(
+		__field(u32, msg_id)
+		__field(u32, rsc_type)
+		__field(u32, rsc_id)
+		__array(char, name, 5)
+	),
+
+	TP_fast_assign(
+		__entry->msg_id = msg_id;
+		__entry->name[4] = 0;
+		__entry->rsc_type = rsc_type;
+		__entry->rsc_id = rsc_id;
+		memcpy(__entry->name, &rsc_type, sizeof(uint32_t));
+
+	),
+
+	TP_printk("msg_id:%d, rsc_type:0x%08x(%s), rsc_id:0x%08x",
+			__entry->msg_id,
+			__entry->rsc_type, __entry->name,
+			__entry->rsc_id)
+);
+
+DEFINE_EVENT(rpm_send_msg, rpm_smd_sleep_set,
+	TP_PROTO(unsigned int msg_id, unsigned int rsc_type,
+		unsigned int rsc_id),
+	TP_ARGS(msg_id, rsc_type, rsc_id)
+);
+
+DEFINE_EVENT(rpm_send_msg, rpm_smd_send_sleep_set,
+	TP_PROTO(unsigned int msg_id, unsigned int rsc_type,
+		unsigned int rsc_id),
+	TP_ARGS(msg_id, rsc_type, rsc_id)
+);
+
+DEFINE_EVENT(rpm_send_msg, rpm_smd_send_active_set,
+	TP_PROTO(unsigned int msg_id, unsigned int rsc_type,
+		unsigned int rsc_id),
+	TP_ARGS(msg_id, rsc_type, rsc_id)
+);
+
+#endif
+#define TRACE_INCLUDE_FILE trace_rpm_smd
+#include <trace/define_trace.h>
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
index 6f33a4a..d0341cd 100644
--- a/include/uapi/drm/msm_drm.h
+++ b/include/uapi/drm/msm_drm.h
@@ -69,6 +69,12 @@
 
 #define HDR_PRIMARIES_COUNT   3
 
+/* HDR EOTF */
+#define HDR_EOTF_SDR_LUM_RANGE	0x0
+#define HDR_EOTF_HDR_LUM_RANGE	0x1
+#define HDR_EOTF_SMTPE_ST2084	0x2
+#define HDR_EOTF_HLG		0x3
+
 #define DRM_MSM_EXT_HDR_METADATA
 struct drm_msm_ext_hdr_metadata {
 	__u32 hdr_state;        /* HDR state */
diff --git a/include/uapi/media/cam_req_mgr.h b/include/uapi/media/cam_req_mgr.h
index 233d84e..6846b8f 100644
--- a/include/uapi/media/cam_req_mgr.h
+++ b/include/uapi/media/cam_req_mgr.h
@@ -49,6 +49,10 @@
 #define CAM_REQ_MGR_SOF_EVENT_SUCCESS           0
 #define CAM_REQ_MGR_SOF_EVENT_ERROR             1
 
+/* Link control operations */
+#define CAM_REQ_MGR_LINK_ACTIVATE               0
+#define CAM_REQ_MGR_LINK_DEACTIVATE             1
+
 /**
  * Request Manager : flush_type
  * @CAM_REQ_MGR_FLUSH_TYPE_ALL: Req mgr will remove all the pending
@@ -63,6 +67,15 @@
 #define CAM_REQ_MGR_FLUSH_TYPE_MAX          2
 
 /**
+ * Request Manager : Sync Mode type
+ * @CAM_REQ_MGR_SYNC_MODE_NO_SYNC: Req mgr will apply non-sync mode for this
+ * request.
+ * @CAM_REQ_MGR_SYNC_MODE_SYNC: Req mgr will apply sync mode for this request.
+ */
+#define CAM_REQ_MGR_SYNC_MODE_NO_SYNC   0
+#define CAM_REQ_MGR_SYNC_MODE_SYNC      1
+
+/**
  * struct cam_req_mgr_event_data
  * @session_hdl: session handle
  * @link_hdl: link handle
@@ -148,33 +161,35 @@
  * inluding itself.
  * @bubble_enable: Input Param - Cam req mgr will do bubble recovery if this
  * flag is set.
- * @reserved: reserved field for alignment
+ * @sync_mode: Type of Sync mode for this request
  * @req_id: Input Param - Request Id from which all requests will be flushed
  */
 struct cam_req_mgr_sched_request {
 	int32_t session_hdl;
 	int32_t link_hdl;
 	int32_t bubble_enable;
-	int32_t reserved;
+	int32_t sync_mode;
 	int64_t req_id;
 };
 
 /**
  * struct cam_req_mgr_sync_mode
  * @session_hdl:         Input param - Identifier for CSL session
- * @sync_enable:         Input Param -Enable sync mode or disable
+ * @sync_mode:           Input Param - Type of sync mode
  * @num_links:           Input Param - Num of links in sync mode (Valid only
- *                             when sync_enable is TRUE)
+ *                             when sync_mode is one of SYNC enabled modes)
  * @link_hdls:           Input Param - Array of link handles to be in sync mode
- *                             (Valid only when sync_enable is TRUE)
+ *                             (Valid only when sync_mode is one of SYNC
+ *                             enabled modes)
  * @master_link_hdl:     Input Param - To dictate which link's SOF drives system
- *                             (Valid only when sync_enable is TRUE)
+ *                             (Valid only when sync_mode is one of SYNC
+ *                             enabled modes)
  *
  * @opcode: CAM_REQ_MGR_SYNC_MODE
  */
 struct cam_req_mgr_sync_mode {
 	int32_t session_hdl;
-	int32_t sync_enable;
+	int32_t sync_mode;
 	int32_t num_links;
 	int32_t link_hdls[MAX_LINKS_PER_SESSION];
 	int32_t master_link_hdl;
@@ -182,6 +197,24 @@
 };
 
 /**
+ * struct cam_req_mgr_link_control
+ * @ops:                 Link operations: activate/deactive
+ * @session_hdl:         Input param - Identifier for CSL session
+ * @num_links:           Input Param - Num of links
+ * @reserved:            reserved field
+ * @link_hdls:           Input Param - Links to be activated/deactivated
+ *
+ * @opcode: CAM_REQ_MGR_LINK_CONTROL
+ */
+struct cam_req_mgr_link_control {
+	int32_t ops;
+	int32_t session_hdl;
+	int32_t num_links;
+	int32_t reserved;
+	int32_t link_hdls[MAX_LINKS_PER_SESSION];
+};
+
+/**
  * cam_req_mgr specific opcode ids
  */
 #define CAM_REQ_MGR_CREATE_DEV_NODES            (CAM_COMMON_OPCODE_MAX + 1)
@@ -196,6 +229,7 @@
 #define CAM_REQ_MGR_MAP_BUF                     (CAM_COMMON_OPCODE_MAX + 10)
 #define CAM_REQ_MGR_RELEASE_BUF                 (CAM_COMMON_OPCODE_MAX + 11)
 #define CAM_REQ_MGR_CACHE_OPS                   (CAM_COMMON_OPCODE_MAX + 12)
+#define CAM_REQ_MGR_LINK_CONTROL                (CAM_COMMON_OPCODE_MAX + 13)
 /* end of cam_req_mgr opcodes */
 
 #define CAM_MEM_FLAG_HW_READ_WRITE              (1<<0)
diff --git a/init/Kconfig b/init/Kconfig
index 7b3006a..d4a2e32 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1869,6 +1869,16 @@
 
 	  Say Y if unsure.
 
+config PERF_USER_SHARE
+	bool "Perf event sharing with user-space"
+	help
+	  Say yes here to enable the user-space sharing of events. The events
+	  can be shared among other user-space events or with kernel created
+	  events that has the same config and type event attributes.
+
+	  Say N if unsure.
+
+
 config DEBUG_PERF_USE_VMALLOC
 	default n
 	bool "Debug: use vmalloc to back perf mmap() buffers"
diff --git a/kernel/events/core.c b/kernel/events/core.c
index b784662..ed27a8c 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1747,6 +1747,10 @@
 	if (event->group_leader != event) {
 		list_del_init(&event->group_entry);
 		event->group_leader->nr_siblings--;
+
+		if (event->shared)
+			event->group_leader = event;
+
 		goto out;
 	}
 
@@ -4280,15 +4284,23 @@
 
 	if (!is_kernel_event(event)) {
 		perf_remove_from_owner(event);
-	} else {
-		if (perf_event_delete_kernel_shared(event) > 0)
-			return 0;
 	}
 
 	ctx = perf_event_ctx_lock(event);
 	WARN_ON_ONCE(ctx->parent_ctx);
 	perf_remove_from_context(event, DETACH_GROUP);
 
+	if (perf_event_delete_kernel_shared(event) > 0) {
+		perf_event__state_init(event);
+		perf_install_in_context(ctx, event, event->cpu);
+
+		perf_event_ctx_unlock(event, ctx);
+
+		perf_event_enable(event);
+
+		return 0;
+	}
+
 	raw_spin_lock_irq(&ctx->lock);
 	/*
 	 * Mark this even as STATE_DEAD, there is no external reference to it
@@ -9218,6 +9230,122 @@
 	account_pmu_sb_event(event);
 }
 
+static struct perf_event *
+perf_event_create_kernel_shared_check(struct perf_event_attr *attr, int cpu,
+		struct task_struct *task,
+		perf_overflow_handler_t overflow_handler,
+		struct perf_event *group_leader)
+{
+	unsigned long idx;
+	struct perf_event *event;
+	struct shared_events_str *shrd_events;
+
+	/*
+	 * Have to be per cpu events for sharing
+	 */
+	if (!shared_events || (u32)cpu >= nr_cpu_ids)
+		return NULL;
+
+	/*
+	 * Can't handle these type requests for sharing right now.
+	 */
+	if (task || overflow_handler || attr->sample_period ||
+	    (attr->type != PERF_TYPE_HARDWARE &&
+	     attr->type != PERF_TYPE_RAW)) {
+		return NULL;
+	}
+
+	/*
+	 * Using per_cpu_ptr (or could do cross cpu call which is what most of
+	 * perf does to access per cpu data structures
+	 */
+	shrd_events = per_cpu_ptr(shared_events, cpu);
+
+	mutex_lock(&shrd_events->list_mutex);
+
+	event = NULL;
+	for_each_set_bit(idx, shrd_events->used_mask, SHARED_EVENTS_MAX) {
+		/* Do the comparisons field by field on the attr structure.
+		 * This is because the user-space and kernel-space might
+		 * be using different versions of perf. As a result,
+		 * the fields' position in the memory and the size might not
+		 * be the same. Hence memcmp() is not the best way to
+		 * compare.
+		 */
+		if (attr->type == shrd_events->attr[idx].type &&
+			attr->config == shrd_events->attr[idx].config) {
+
+			event = shrd_events->events[idx];
+
+			/* Do not change the group for this shared event */
+			if (group_leader && event->group_leader != event) {
+				event = NULL;
+				continue;
+			}
+
+			event->shared = true;
+			atomic_inc(&shrd_events->refcount[idx]);
+			break;
+		}
+	}
+	mutex_unlock(&shrd_events->list_mutex);
+
+	return event;
+}
+
+static void
+perf_event_create_kernel_shared_add(struct perf_event_attr *attr, int cpu,
+				 struct task_struct *task,
+				 perf_overflow_handler_t overflow_handler,
+				 void *context,
+				 struct perf_event *event)
+{
+	unsigned long idx;
+	struct shared_events_str *shrd_events;
+
+	/*
+	 * Have to be per cpu events for sharing
+	 */
+	if (!shared_events || (u32)cpu >= nr_cpu_ids)
+		return;
+
+	/*
+	 * Can't handle these type requests for sharing right now.
+	 */
+	if (overflow_handler || attr->sample_period ||
+	    (attr->type != PERF_TYPE_HARDWARE &&
+	     attr->type != PERF_TYPE_RAW)) {
+		return;
+	}
+
+	/*
+	 * Using per_cpu_ptr (or could do cross cpu call which is what most of
+	 * perf does to access per cpu data structures
+	 */
+	shrd_events = per_cpu_ptr(shared_events, cpu);
+
+	mutex_lock(&shrd_events->list_mutex);
+
+	/*
+	 * If we are in this routine, we know that this event isn't already in
+	 * the shared list. Check if slot available in shared list
+	 */
+	idx = find_first_zero_bit(shrd_events->used_mask, SHARED_EVENTS_MAX);
+
+	if (idx >= SHARED_EVENTS_MAX)
+		goto out;
+
+	/*
+	 * The event isn't in the list and there is an empty slot so add it.
+	 */
+	shrd_events->attr[idx]   = *attr;
+	shrd_events->events[idx] = event;
+	set_bit(idx, shrd_events->used_mask);
+	atomic_set(&shrd_events->refcount[idx], 1);
+out:
+	mutex_unlock(&shrd_events->list_mutex);
+}
+
 /*
  * Allocate and initialize a event structure
  */
@@ -9684,6 +9812,31 @@
 	return gctx;
 }
 
+#ifdef CONFIG_PERF_USER_SHARE
+static void perf_group_shared_event(struct perf_event *event,
+		struct perf_event *group_leader)
+{
+	if (!event->shared || !group_leader)
+		return;
+
+	/* Do not attempt to change the group for this shared event */
+	if (event->group_leader != event)
+		return;
+
+	/*
+	 * Single events have the group leaders as themselves.
+	 * As we now have a new group to attach to, remove from
+	 * the previous group and attach it to the new group.
+	 */
+	perf_remove_from_context(event, DETACH_GROUP);
+
+	event->group_leader	= group_leader;
+	perf_event__state_init(event);
+
+	perf_install_in_context(group_leader->ctx, event, event->cpu);
+}
+#endif
+
 /**
  * sys_perf_event_open - open a performance event, associate it to a task/cpu
  *
@@ -9697,7 +9850,7 @@
 		pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
 {
 	struct perf_event *group_leader = NULL, *output_event = NULL;
-	struct perf_event *event, *sibling;
+	struct perf_event *event = NULL, *sibling;
 	struct perf_event_attr attr;
 	struct perf_event_context *ctx, *uninitialized_var(gctx);
 	struct file *event_file = NULL;
@@ -9811,11 +9964,17 @@
 	if (flags & PERF_FLAG_PID_CGROUP)
 		cgroup_fd = pid;
 
-	event = perf_event_alloc(&attr, cpu, task, group_leader, NULL,
-				 NULL, NULL, cgroup_fd);
-	if (IS_ERR(event)) {
-		err = PTR_ERR(event);
-		goto err_cred;
+#ifdef CONFIG_PERF_USER_SHARE
+	event = perf_event_create_kernel_shared_check(&attr, cpu, task, NULL,
+			group_leader);
+#endif
+	if (!event) {
+		event = perf_event_alloc(&attr, cpu, task, group_leader, NULL,
+					 NULL, NULL, cgroup_fd);
+		if (IS_ERR(event)) {
+			err = PTR_ERR(event);
+			goto err_cred;
+		}
 	}
 
 	if (is_sampling_event(event)) {
@@ -9982,7 +10141,7 @@
 	 * Must be under the same ctx::mutex as perf_install_in_context(),
 	 * because we need to serialize with concurrent event creation.
 	 */
-	if (!exclusive_event_installable(event, ctx)) {
+	if (!event->shared && !exclusive_event_installable(event, ctx)) {
 		/* exclusive and group stuff are assumed mutually exclusive */
 		WARN_ON_ONCE(move_group);
 
@@ -10059,10 +10218,17 @@
 	perf_event__header_size(event);
 	perf_event__id_header_size(event);
 
-	event->owner = current;
+#ifdef CONFIG_PERF_USER_SHARE
+	if (event->shared && group_leader)
+		perf_group_shared_event(event, group_leader);
+#endif
 
-	perf_install_in_context(ctx, event, event->cpu);
-	perf_unpin_context(ctx);
+	if (!event->shared) {
+		event->owner = current;
+
+		perf_install_in_context(ctx, event, event->cpu);
+		perf_unpin_context(ctx);
+	}
 
 	if (move_group)
 		perf_event_ctx_unlock(group_leader, gctx);
@@ -10077,9 +10243,11 @@
 
 	put_online_cpus();
 
-	mutex_lock(&current->perf_event_mutex);
-	list_add_tail(&event->owner_entry, &current->perf_event_list);
-	mutex_unlock(&current->perf_event_mutex);
+	if (!event->shared) {
+		mutex_lock(&current->perf_event_mutex);
+		list_add_tail(&event->owner_entry, &current->perf_event_list);
+		mutex_unlock(&current->perf_event_mutex);
+	}
 
 	/*
 	 * Drop the reference on the group_event after placing the
@@ -10089,6 +10257,14 @@
 	 */
 	fdput(group);
 	fd_install(event_fd, event_file);
+
+#ifdef CONFIG_PERF_USER_SHARE
+	/* Add the event to the shared events list */
+	if (!event->shared)
+		perf_event_create_kernel_shared_add(&attr, cpu,
+				 task, NULL, ctx, event);
+#endif
+
 	return event_fd;
 
 err_locked:
@@ -10124,102 +10300,6 @@
 	return err;
 }
 
-static struct perf_event *
-perf_event_create_kernel_shared_check(struct perf_event_attr *attr, int cpu,
-				 struct task_struct *task,
-				 perf_overflow_handler_t overflow_handler,
-				 void *context)
-{
-	unsigned long idx;
-	struct perf_event *event;
-	struct shared_events_str *shrd_events;
-
-	/*
-	 * Have to be per cpu events for sharing
-	 */
-	if (!shared_events || (u32)cpu >= nr_cpu_ids)
-		return NULL;
-
-	/*
-	 * Can't handle these type requests for sharing right now.
-	 */
-	if (task || context || overflow_handler ||
-	    (attr->type != PERF_TYPE_HARDWARE &&
-	     attr->type != PERF_TYPE_RAW))
-		return NULL;
-
-	/*
-	 * Using per_cpu_ptr (or could do cross cpu call which is what most of
-	 * perf does to access per cpu data structures
-	 */
-	shrd_events = per_cpu_ptr(shared_events, cpu);
-
-	mutex_lock(&shrd_events->list_mutex);
-
-	event = NULL;
-	for_each_set_bit(idx, shrd_events->used_mask, SHARED_EVENTS_MAX) {
-		if (memcmp(attr, &shrd_events->attr[idx],
-		    sizeof(shrd_events->attr[idx])) == 0) {
-			atomic_inc(&shrd_events->refcount[idx]);
-			event = shrd_events->events[idx];
-			break;
-		}
-	}
-	mutex_unlock(&shrd_events->list_mutex);
-	return event;
-}
-
-static void
-perf_event_create_kernel_shared_add(struct perf_event_attr *attr, int cpu,
-				 struct task_struct *task,
-				 perf_overflow_handler_t overflow_handler,
-				 void *context,
-				 struct perf_event *event)
-{
-	unsigned long idx;
-	struct shared_events_str *shrd_events;
-
-	/*
-	 * Have to be per cpu events for sharing
-	 */
-	if (!shared_events || (u32)cpu >= nr_cpu_ids)
-		return;
-
-	/*
-	 * Can't handle these type requests for sharing right now.
-	 */
-	if (task || context || overflow_handler ||
-	    (attr->type != PERF_TYPE_HARDWARE &&
-	     attr->type != PERF_TYPE_RAW))
-		return;
-
-	/*
-	 * Using per_cpu_ptr (or could do cross cpu call which is what most of
-	 * perf does to access per cpu data structures
-	 */
-	shrd_events = per_cpu_ptr(shared_events, cpu);
-
-	mutex_lock(&shrd_events->list_mutex);
-
-	/*
-	 * If we are in this routine, we know that this event isn't already in
-	 * the shared list. Check if slot available in shared list
-	 */
-	idx = find_first_zero_bit(shrd_events->used_mask, SHARED_EVENTS_MAX);
-
-	if (idx >= SHARED_EVENTS_MAX)
-		goto out;
-
-	/*
-	 * The event isn't in the list and there is an empty slot so add it.
-	 */
-	shrd_events->attr[idx]   = *attr;
-	shrd_events->events[idx] = event;
-	set_bit(idx, shrd_events->used_mask);
-	atomic_set(&shrd_events->refcount[idx], 1);
-out:
-	mutex_unlock(&shrd_events->list_mutex);
-}
 
 /**
  * perf_event_create_kernel_counter
@@ -10238,28 +10318,26 @@
 	struct perf_event *event;
 	int err;
 
-	/*
-	 * Check if the requested attributes match a shared event
-	 */
-	event = perf_event_create_kernel_shared_check(attr, cpu,
-				 task, overflow_handler, context);
-	if (event)
-		return event;
-
-	/*
-	 * Get the target context (task or percpu):
-	 */
-
-	event = perf_event_alloc(attr, cpu, task, NULL, NULL,
-				 overflow_handler, context, -1);
-	if (IS_ERR(event)) {
-		err = PTR_ERR(event);
-		goto err;
+	event = perf_event_create_kernel_shared_check(attr, cpu, task,
+						overflow_handler, NULL);
+	if (!event) {
+		event = perf_event_alloc(attr, cpu, task, NULL, NULL,
+				overflow_handler, context, -1);
+		if (IS_ERR(event)) {
+			err = PTR_ERR(event);
+			goto err;
+		}
 	}
 
 	/* Mark owner so we could distinguish it from user events. */
 	event->owner = TASK_TOMBSTONE;
 
+	if (event->shared)
+		return event;
+
+	/*
+	 * Get the target context (task or percpu):
+	 */
 	ctx = find_get_context(event->pmu, task, event);
 	if (IS_ERR(ctx)) {
 		err = PTR_ERR(ctx);
diff --git a/kernel/sched/boost.c b/kernel/sched/boost.c
index 1ccd19d..09ad1f0 100644
--- a/kernel/sched/boost.c
+++ b/kernel/sched/boost.c
@@ -90,7 +90,7 @@
 		return;
 	}
 
-	if (min_possible_efficiency != max_possible_efficiency) {
+	if (sysctl_sched_is_big_little) {
 		boost_policy = SCHED_BOOST_ON_BIG;
 		return;
 	}
diff --git a/kernel/sched/energy.c b/kernel/sched/energy.c
index c32defa..420cb52 100644
--- a/kernel/sched/energy.c
+++ b/kernel/sched/energy.c
@@ -28,6 +28,8 @@
 #include <linux/pm_opp.h>
 #include <linux/platform_device.h>
 
+#include "sched.h"
+
 struct sched_group_energy *sge_array[NR_CPUS][NR_SD_LEVELS];
 
 static void free_resources(void)
@@ -269,6 +271,7 @@
 
 	kfree(max_frequencies);
 
+	walt_sched_energy_populated_callback();
 	dev_info(&pdev->dev, "Sched-energy-costs capacity updated\n");
 	return 0;
 
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 42be34a..130bbb7 100755
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6746,107 +6746,6 @@
 	return target;
 }
  
-static inline int find_best_target(struct task_struct *p, bool boosted, bool prefer_idle)
-{
-	int iter_cpu;
-	int target_cpu = -1;
-	int target_util = 0;
-	int backup_capacity = 0;
-	int best_idle_cpu = -1;
-	int best_idle_cstate = INT_MAX;
-	int backup_cpu = -1;
-	unsigned long task_util_boosted, new_util;
-
-	task_util_boosted = boosted_task_util(p);
-	for (iter_cpu = 0; iter_cpu < NR_CPUS; iter_cpu++) {
-		int cur_capacity;
-		struct rq *rq;
-		int idle_idx;
-
-		/*
-		 * Iterate from higher cpus for boosted tasks.
-		 */
-		int i = boosted ? NR_CPUS-iter_cpu-1 : iter_cpu;
-
-		if (!cpu_online(i) || !cpumask_test_cpu(i, tsk_cpus_allowed(p)))
-			continue;
-
-		/*
-		 * p's blocked utilization is still accounted for on prev_cpu
-		 * so prev_cpu will receive a negative bias due to the double
-		 * accounting. However, the blocked utilization may be zero.
-		 */
-		new_util = cpu_util(i) + task_util_boosted;
-
-		/*
-		 * Ensure minimum capacity to grant the required boost.
-		 * The target CPU can be already at a capacity level higher
-		 * than the one required to boost the task.
-		 */
-		if (new_util > capacity_orig_of(i))
-			continue;
-
-#ifdef CONFIG_SCHED_WALT
-		if (sched_cpu_high_irqload(i))
-			continue;
-#endif
-		/*
-		 * Unconditionally favoring tasks that prefer idle cpus to
-		 * improve latency.
-		 */
-		if (idle_cpu(i) && prefer_idle) {
-			if (best_idle_cpu < 0)
-				best_idle_cpu = i;
-			continue;
-		}
-
-		cur_capacity = capacity_curr_of(i);
-		rq = cpu_rq(i);
-		idle_idx = idle_get_state_idx(rq);
-
-		if (new_util < cur_capacity) {
-			if (cpu_rq(i)->nr_running) {
-				if(prefer_idle) {
-					// Find a target cpu with lowest
-					// utilization.
-					if (target_util == 0 ||
-						target_util < new_util) {
-						target_cpu = i;
-						target_util = new_util;
-					}
-				} else {
-					// Find a target cpu with highest
-					// utilization.
-					if (target_util == 0 ||
-						target_util > new_util) {
-						target_cpu = i;
-						target_util = new_util;
-					}
-				}
-			} else if (!prefer_idle) {
-				if (best_idle_cpu < 0 ||
-					(sysctl_sched_cstate_aware &&
-						best_idle_cstate > idle_idx)) {
-					best_idle_cstate = idle_idx;
-					best_idle_cpu = i;
-				}
-			}
-		} else if (backup_capacity == 0 ||
-				backup_capacity > cur_capacity) {
-			// Find a backup cpu with least capacity.
-			backup_capacity = cur_capacity;
-			backup_cpu = i;
-		}
-	}
-
-	if (prefer_idle && best_idle_cpu >= 0)
-		target_cpu = best_idle_cpu;
-	else if (target_cpu < 0)
-		target_cpu = best_idle_cpu >= 0 ? best_idle_cpu : backup_cpu;
-
-	return target_cpu;
-}
-
 /*
  * Should task be woken to any available idle cpu?
  *
@@ -6920,12 +6819,14 @@
 	return cpu_cap_idx_pack == cpu_cap_idx_spread;
 }
 
+unsigned int sched_smp_overlap_capacity = SCHED_CAPACITY_SCALE;
+
 static int energy_aware_wake_cpu(struct task_struct *p, int target, int sync)
 {
 	struct sched_domain *sd;
-	struct sched_group *sg, *sg_target;
+	struct sched_group *sg, *sg_target, *start_sg;
 	int target_max_cap = INT_MAX;
-	int target_cpu, targeted_cpus = 0;
+	int target_cpu = -1, targeted_cpus = 0;
 	unsigned long task_util_boosted = 0, curr_util = 0;
 	long new_util, new_util_cum;
 	int i;
@@ -6949,10 +6850,11 @@
 	struct related_thread_group *grp;
 	cpumask_t search_cpus;
 	int prev_cpu = task_cpu(p);
+	int start_cpu = walt_start_cpu(prev_cpu);
 	bool do_rotate = false;
 	bool avoid_prev_cpu = false;
 
-	sd = rcu_dereference(per_cpu(sd_ea, prev_cpu));
+	sd = rcu_dereference(per_cpu(sd_ea, start_cpu));
 
 	if (!sd)
 		return target;
@@ -6977,9 +6879,8 @@
 		return cpu;
 	}
 
+	task_util_boosted = boosted_task_util(p);
 	if (sysctl_sched_is_big_little) {
-		task_util_boosted = boosted_task_util(p);
-
 		/*
 		 * Find group with sufficient capacity. We only get here if no cpu is
 		 * overutilized. We may end up overutilizing a cpu by adding the task,
@@ -7032,198 +6933,195 @@
 				target_max_cap = capacity_of(max_cap_cpu);
 			}
 		} while (sg = sg->next, sg != sd->groups);
+	}
 
-		target_cpu = -1;
+	start_sg = sg_target;
+next_sg:
+	cpumask_copy(&search_cpus, tsk_cpus_allowed(p));
+	cpumask_and(&search_cpus, &search_cpus,
+		    sched_group_cpus(sg_target));
 
-		cpumask_copy(&search_cpus, tsk_cpus_allowed(p));
-		cpumask_and(&search_cpus, &search_cpus,
-			    sched_group_cpus(sg_target));
-
-		i = find_first_cpu_bit(p, &search_cpus, sg_target,
-				       &avoid_prev_cpu, &do_rotate,
-				       &first_cpu_bit_env);
+	i = find_first_cpu_bit(p, &search_cpus, sg_target,
+			       &avoid_prev_cpu, &do_rotate,
+			       &first_cpu_bit_env);
 
 retry:
-		/* Find cpu with sufficient capacity */
-		while ((i = cpumask_next(i, &search_cpus)) < nr_cpu_ids) {
-			cpumask_clear_cpu(i, &search_cpus);
+	/* Find cpu with sufficient capacity */
+	while ((i = cpumask_next(i, &search_cpus)) < nr_cpu_ids) {
+		cpumask_clear_cpu(i, &search_cpus);
 
-			if (cpu_isolated(i))
-				continue;
+		if (cpu_isolated(i))
+			continue;
 
-			if (isolated_candidate == -1)
-				isolated_candidate = i;
+		if (isolated_candidate == -1)
+			isolated_candidate = i;
 
-			if (avoid_prev_cpu && i == prev_cpu)
-				continue;
+		if (avoid_prev_cpu && i == prev_cpu)
+			continue;
 
-			if (is_reserved(i))
-				continue;
+		if (is_reserved(i))
+			continue;
 
-			if (sched_cpu_high_irqload(i))
-				continue;
+		if (sched_cpu_high_irqload(i))
+			continue;
 
-			/*
-			 * Since this code is inside sched_is_big_little,
-			 * we are going to assume that boost policy is
-			 * SCHED_BOOST_ON_BIG.
-			 */
-			if (placement_boost != SCHED_BOOST_NONE) {
-				new_util = cpu_util(i);
-				if (new_util < min_util) {
-					min_util_cpu = i;
-					min_util = new_util;
-				}
-				continue;
-			}
-
-			/*
-			 * p's blocked utilization is still accounted for on prev_cpu
-			 * so prev_cpu will receive a negative bias due to the double
-			 * accounting. However, the blocked utilization may be zero.
-			 */
-			new_util = cpu_util(i) + task_util_boosted;
-
-			if (task_in_cum_window_demand(cpu_rq(i), p))
-				new_util_cum = cpu_util_cum(i, 0) +
-					       task_util_boosted - task_util(p);
-			else
-				new_util_cum = cpu_util_cum(i, 0) +
-					       task_util_boosted;
-
-			if (sync && i == cpu)
-				new_util -= curr_util;
-
-			trace_sched_cpu_util(p, i, task_util_boosted, curr_util,
-					     new_util_cum, sync);
-
-			/*
-			 * Ensure minimum capacity to grant the required boost.
-			 * The target CPU can be already at a capacity level higher
-			 * than the one required to boost the task.
-			 */
-			if (new_util > capacity_orig_of(i))
-				continue;
-
-			cpu_idle_idx = idle_get_state_idx(cpu_rq(i));
-
-			if (!need_idle &&
-			    add_capacity_margin(new_util_cum, i) <
-			    capacity_curr_of(i)) {
-				if (sysctl_sched_cstate_aware) {
-					if (cpu_idle_idx < min_idle_idx) {
-						min_idle_idx = cpu_idle_idx;
-						min_idle_idx_cpu = i;
-						target_cpu = i;
-						target_cpu_util = new_util;
-						target_cpu_new_util_cum =
-						    new_util_cum;
-						targeted_cpus = 1;
-					} else if (cpu_idle_idx ==
-						   min_idle_idx &&
-						   (target_cpu_util >
-						    new_util ||
-						    (target_cpu_util ==
-						     new_util &&
-						     (i == prev_cpu ||
-						      (target_cpu !=
-						       prev_cpu &&
-						       target_cpu_new_util_cum >
-						       new_util_cum))))) {
-						min_idle_idx_cpu = i;
-						target_cpu = i;
-						target_cpu_util = new_util;
-						target_cpu_new_util_cum =
-						    new_util_cum;
-						targeted_cpus++;
-					}
-				} else if (cpu_rq(i)->nr_running) {
-					target_cpu = i;
-					do_rotate = false;
-					break;
-				}
-			} else if (!need_idle) {
-				/*
-				 * At least one CPU other than target_cpu is
-				 * going to raise CPU's OPP higher than current
-				 * because current CPU util is more than current
-				 * capacity + margin.  We can safely do task
-				 * packing without worrying about doing such
-				 * itself raises OPP.
-				 */
-				safe_to_pack = true;
-			}
-
-			/*
-			 * cpu has capacity at higher OPP, keep it as
-			 * fallback.
-			 */
+		/*
+		 * Since this code is inside sched_is_big_little,
+		 * we are going to assume that boost policy is
+		 * SCHED_BOOST_ON_BIG.
+		 */
+		if (placement_boost != SCHED_BOOST_NONE) {
+			new_util = cpu_util(i);
 			if (new_util < min_util) {
 				min_util_cpu = i;
 				min_util = new_util;
+			}
+			continue;
+		}
+
+		/*
+		 * p's blocked utilization is still accounted for on prev_cpu
+		 * so prev_cpu will receive a negative bias due to the double
+		 * accounting. However, the blocked utilization may be zero.
+		 */
+		new_util = cpu_util(i) + task_util_boosted;
+
+		if (task_in_cum_window_demand(cpu_rq(i), p))
+			new_util_cum = cpu_util_cum(i, 0) +
+				       task_util_boosted - task_util(p);
+		else
+			new_util_cum = cpu_util_cum(i, 0) +
+				       task_util_boosted;
+
+		if (sync && i == cpu)
+			new_util -= curr_util;
+
+		trace_sched_cpu_util(p, i, task_util_boosted, curr_util,
+				     new_util_cum, sync);
+
+		/*
+		 * Ensure minimum capacity to grant the required boost.
+		 * The target CPU can be already at a capacity level higher
+		 * than the one required to boost the task.
+		 */
+		if (new_util > capacity_orig_of(i))
+			continue;
+
+		cpu_idle_idx = idle_get_state_idx(cpu_rq(i));
+
+		if (!need_idle &&
+		    add_capacity_margin(new_util_cum, i) <
+		    capacity_curr_of(i)) {
+			if (sysctl_sched_cstate_aware) {
+				if (cpu_idle_idx < min_idle_idx) {
+					min_idle_idx = cpu_idle_idx;
+					min_idle_idx_cpu = i;
+					target_cpu = i;
+					target_cpu_util = new_util;
+					target_cpu_new_util_cum =
+					    new_util_cum;
+					targeted_cpus = 1;
+				} else if (cpu_idle_idx ==
+					   min_idle_idx &&
+					   (target_cpu_util >
+					    new_util ||
+					    (target_cpu_util ==
+					     new_util &&
+					     (i == prev_cpu ||
+					      (target_cpu !=
+					       prev_cpu &&
+					       target_cpu_new_util_cum >
+					       new_util_cum))))) {
+					min_idle_idx_cpu = i;
+					target_cpu = i;
+					target_cpu_util = new_util;
+					target_cpu_new_util_cum =
+					    new_util_cum;
+					targeted_cpus++;
+				}
+			} else if (cpu_rq(i)->nr_running) {
+				target_cpu = i;
+				do_rotate = false;
+				break;
+			}
+		} else if (!need_idle) {
+			/*
+			 * At least one CPU other than target_cpu is
+			 * going to raise CPU's OPP higher than current
+			 * because current CPU util is more than current
+			 * capacity + margin.  We can safely do task
+			 * packing without worrying about doing such
+			 * itself raises OPP.
+			 */
+			safe_to_pack = true;
+		}
+
+		/*
+		 * cpu has capacity at higher OPP, keep it as
+		 * fallback.
+		 */
+		if (new_util < min_util) {
+			min_util_cpu = i;
+			min_util = new_util;
+			min_util_cpu_idle_idx = cpu_idle_idx;
+			min_util_cpu_util_cum = new_util_cum;
+		} else if (sysctl_sched_cstate_aware &&
+			   min_util == new_util) {
+			if (min_util_cpu == task_cpu(p))
+				continue;
+
+			if (i == task_cpu(p) ||
+			    (cpu_idle_idx < min_util_cpu_idle_idx ||
+			     (cpu_idle_idx == min_util_cpu_idle_idx &&
+			      min_util_cpu_util_cum > new_util_cum))) {
+				min_util_cpu = i;
 				min_util_cpu_idle_idx = cpu_idle_idx;
 				min_util_cpu_util_cum = new_util_cum;
-			} else if (sysctl_sched_cstate_aware &&
-				   min_util == new_util) {
-				if (min_util_cpu == task_cpu(p))
-					continue;
-
-				if (i == task_cpu(p) ||
-				    (cpu_idle_idx < min_util_cpu_idle_idx ||
-				     (cpu_idle_idx == min_util_cpu_idle_idx &&
-				      min_util_cpu_util_cum > new_util_cum))) {
-					min_util_cpu = i;
-					min_util_cpu_idle_idx = cpu_idle_idx;
-					min_util_cpu_util_cum = new_util_cum;
-				}
 			}
 		}
+	}
 
-		if (do_rotate) {
-			/*
-			 * We started iteration somewhere in the middle of
-			 * cpumask.  Iterate once again from bit 0 to the
-			 * previous starting point bit.
-			 */
-			do_rotate = false;
-			i = -1;
-			goto retry;
-		}
-
-		if (target_cpu == -1 ||
-		    (target_cpu != min_util_cpu && !safe_to_pack &&
-		     !is_packing_eligible(p, task_util_boosted, sg_target,
-					  target_cpu_new_util_cum,
-					  targeted_cpus))) {
-			if (likely(min_util_cpu != -1))
-				target_cpu = min_util_cpu;
-			else if (cpu_isolated(task_cpu(p)) &&
-					isolated_candidate != -1)
-				target_cpu = isolated_candidate;
-			else
-				target_cpu = task_cpu(p);
-		}
-	} else {
+	if (do_rotate) {
 		/*
-		 * Find a cpu with sufficient capacity
+		 * We started iteration somewhere in the middle of
+		 * cpumask.  Iterate once again from bit 0 to the
+		 * previous starting point bit.
 		 */
-#ifdef CONFIG_CGROUP_SCHEDTUNE
-		bool boosted = schedtune_task_boost(p) > 0;
-		bool prefer_idle = schedtune_prefer_idle(p) > 0;
-#else
-		bool boosted = 0;
-		bool prefer_idle = 0;
-#endif
-		int tmp_target = find_best_target(p, boosted, prefer_idle);
+		do_rotate = false;
+		i = -1;
+		goto retry;
+	}
 
-		target_cpu = task_cpu(p);
-		if (tmp_target >= 0) {
-			target_cpu = tmp_target;
-			if ((boosted || prefer_idle) && idle_cpu(target_cpu))
-				return target_cpu;
+	/*
+	 * If we don't find a CPU that fits this task without
+	 * increasing OPP above sched_smp_overlap_capacity or
+	 * when placement boost is active, expand the search to
+	 * the other groups on a SMP system.
+	 */
+	if (!sysctl_sched_is_big_little &&
+			(placement_boost == SCHED_BOOST_ON_ALL ||
+			(target_cpu == -1 && min_util_cpu_util_cum >
+					     sched_smp_overlap_capacity))) {
+		if (sg_target->next != start_sg) {
+			sg_target = sg_target->next;
+			goto next_sg;
 		}
 	}
 
+	if (target_cpu == -1 ||
+	    (target_cpu != min_util_cpu && !safe_to_pack &&
+	     !is_packing_eligible(p, task_util_boosted, sg_target,
+				  target_cpu_new_util_cum,
+				  targeted_cpus))) {
+		if (likely(min_util_cpu != -1))
+			target_cpu = min_util_cpu;
+		else if (cpu_isolated(task_cpu(p)) &&
+				isolated_candidate != -1)
+			target_cpu = isolated_candidate;
+		else
+			target_cpu = task_cpu(p);
+	}
+
 	if (target_cpu != task_cpu(p) && !avoid_prev_cpu &&
 	    !cpu_isolated(task_cpu(p))) {
 		struct energy_env eenv = {
@@ -7246,6 +7144,14 @@
 			return target_cpu;
 		}
 
+		if (need_idle) {
+			trace_sched_task_util_need_idle(p, task_cpu(p),
+						task_util(p),
+						target_cpu, target_cpu,
+						0, need_idle);
+			return target_cpu;
+		}
+
 		/*
 		 * We always want to migrate the task to the best CPU when
 		 * placement boost is active.
@@ -9403,7 +9309,7 @@
 	if (energy_aware() && !env->dst_rq->rd->overutilized) {
 		int cpu_local, cpu_busiest;
 		long util_cum;
-		unsigned long capacity_local, capacity_busiest;
+		unsigned long energy_local, energy_busiest;
 
 		if (env->idle != CPU_NEWLY_IDLE)
 			goto out_balanced;
@@ -9414,12 +9320,12 @@
 		cpu_local = group_first_cpu(sds.local);
 		cpu_busiest = group_first_cpu(sds.busiest);
 
-		 /* TODO: don't assume same cap cpus are in same domain */
-		capacity_local = capacity_orig_of(cpu_local);
-		capacity_busiest = capacity_orig_of(cpu_busiest);
-		if (capacity_local > capacity_busiest) {
+		 /* TODO: don't assume same energy cpus are in same domain */
+		energy_local = cpu_max_power_cost(cpu_local);
+		energy_busiest = cpu_max_power_cost(cpu_busiest);
+		if (energy_local > energy_busiest) {
 			goto out_balanced;
-		} else if (capacity_local == capacity_busiest) {
+		} else if (energy_local == energy_busiest) {
 			if (cpu_rq(cpu_busiest)->nr_running < 2)
 				goto out_balanced;
 
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index c97b779..1294950 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1757,7 +1757,9 @@
 	unsigned long tutil = task_util(task);
 	int best_cpu_idle_idx = INT_MAX;
 	int cpu_idle_idx = -1;
-	bool placement_boost;
+	enum sched_boost_policy placement_boost;
+	int prev_cpu = task_cpu(task);
+	int start_cpu = walt_start_cpu(prev_cpu);
 	bool do_rotate = false;
 	bool avoid_prev_cpu = false;
 
@@ -1771,19 +1773,16 @@
 	if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
 		return -1; /* No targets found */
 
-	if (energy_aware() && sysctl_sched_is_big_little) {
+	if (energy_aware()) {
 		sg_target = NULL;
 		best_cpu = -1;
 
-		/*
-		 * Since this code is inside sched_is_big_little, we are going
-		 * to assume that boost policy is SCHED_BOOST_ON_BIG
-		 */
-		placement_boost = sched_boost() == FULL_THROTTLE_BOOST;
+		placement_boost = sched_boost() == FULL_THROTTLE_BOOST ?
+				  sched_boost_policy() : SCHED_BOOST_NONE;
 		best_capacity = placement_boost ? 0 : ULONG_MAX;
 
 		rcu_read_lock();
-		sd = rcu_dereference(per_cpu(sd_ea, task_cpu(task)));
+		sd = rcu_dereference(per_cpu(sd_ea, start_cpu));
 		if (!sd) {
 			rcu_read_unlock();
 			goto noea;
@@ -1795,6 +1794,11 @@
 						sched_group_cpus(sg)))
 				continue;
 
+			if (!sysctl_sched_is_big_little) {
+				sg_target = sg;
+				break;
+			}
+
 			cpu = group_first_cpu(sg);
 			cpu_capacity = capacity_orig_of(cpu);
 
@@ -1838,7 +1842,7 @@
 			 */
 			util = cpu_util(cpu);
 
-			if (avoid_prev_cpu && cpu == task_cpu(task))
+			if (avoid_prev_cpu && cpu == prev_cpu)
 				continue;
 
 			if (__cpu_overutilized(cpu, util + tutil))
@@ -1898,12 +1902,13 @@
 			goto retry;
 		}
 
-		if (best_cpu != -1) {
+		if (best_cpu != -1 && placement_boost != SCHED_BOOST_ON_ALL) {
 			return best_cpu;
 		} else if (!cpumask_empty(&backup_search_cpu)) {
 			cpumask_copy(&search_cpu, &backup_search_cpu);
 			cpumask_clear(&backup_search_cpu);
 			cpu = -1;
+			placement_boost = SCHED_BOOST_NONE;
 			goto retry;
 		}
 	}
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 494ab14..c85928b 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -11,6 +11,7 @@
 #include <linux/irq_work.h>
 #include <linux/tick.h>
 #include <linux/slab.h>
+#include <linux/sched_energy.h>
 
 #include "cpupri.h"
 #include "cpudeadline.h"
@@ -26,6 +27,7 @@
 struct cpuidle_state;
 
 extern __read_mostly bool sched_predl;
+extern unsigned int sched_smp_overlap_capacity;
 
 #ifdef CONFIG_SCHED_WALT
 extern unsigned int sched_ravg_window;
@@ -2708,11 +2710,21 @@
 extern void clear_ed_task(struct task_struct *p, struct rq *rq);
 extern bool early_detection_notify(struct rq *rq, u64 wallclock);
 
-static inline unsigned int power_cost(int cpu, u64 demand)
+static inline unsigned int power_cost(int cpu, bool max)
 {
-	return cpu_max_possible_capacity(cpu);
+	struct sched_group_energy *sge = sge_array[cpu][SD_LEVEL1];
+
+	if (!sge || !sge->nr_cap_states)
+		return cpu_max_possible_capacity(cpu);
+
+	if (max)
+		return sge->cap_states[sge->nr_cap_states - 1].power;
+	else
+		return sge->cap_states[0].power;
 }
 
+extern void walt_sched_energy_populated_callback(void);
+
 #else	/* CONFIG_SCHED_WALT */
 
 struct walt_sched_stats;
@@ -2804,6 +2816,11 @@
 {
 	return cpu_rq(cpu)->cpu_capacity_orig;
 }
+
+static inline int cpu_max_power_cost(int cpu)
+{
+	return capacity_orig_of(cpu);
+}
 #endif
 
 static inline void clear_walt_request(int cpu) { }
@@ -2829,11 +2846,13 @@
 	return 0;
 }
 
-static inline unsigned int power_cost(int cpu, u64 demand)
+static inline unsigned int power_cost(int cpu, bool max)
 {
 	return SCHED_CAPACITY_SCALE;
 }
 
+static inline void walt_sched_energy_populated_callback(void) { }
+
 #endif	/* CONFIG_SCHED_WALT */
 
 static inline bool energy_aware(void)
diff --git a/kernel/sched/walt.c b/kernel/sched/walt.c
index da7c0f0..f941d92 100644
--- a/kernel/sched/walt.c
+++ b/kernel/sched/walt.c
@@ -2145,7 +2145,7 @@
 	return ret;
 }
 
-void sort_clusters(void)
+static void sort_clusters(void)
 {
 	struct sched_cluster *cluster;
 	struct list_head new_head;
@@ -2155,9 +2155,9 @@
 
 	for_each_sched_cluster(cluster) {
 		cluster->max_power_cost = power_cost(cluster_first_cpu(cluster),
-							       max_task_load());
+							       true);
 		cluster->min_power_cost = power_cost(cluster_first_cpu(cluster),
-							       0);
+							       false);
 
 		if (cluster->max_power_cost > tmp_max)
 			tmp_max = cluster->max_power_cost;
@@ -2176,6 +2176,59 @@
 	move_list(&cluster_head, &new_head, false);
 }
 
+int __read_mostly min_power_cpu;
+
+void walt_sched_energy_populated_callback(void)
+{
+	struct sched_cluster *cluster;
+	int prev_max = 0, next_min = 0;
+
+	mutex_lock(&cluster_lock);
+
+	if (num_clusters == 1) {
+		sysctl_sched_is_big_little = 0;
+		mutex_unlock(&cluster_lock);
+		return;
+	}
+
+	sort_clusters();
+
+	for_each_sched_cluster(cluster) {
+		if (cluster->min_power_cost > prev_max) {
+			prev_max = cluster->max_power_cost;
+			continue;
+		}
+		/*
+		 * We assume no overlap in the power curves of
+		 * clusters on a big.LITTLE system.
+		 */
+		sysctl_sched_is_big_little = 0;
+		next_min = cluster->min_power_cost;
+	}
+
+	/*
+	 * Find the OPP at which the lower power cluster
+	 * power is overlapping with the next cluster.
+	 */
+	if (!sysctl_sched_is_big_little) {
+		int cpu = cluster_first_cpu(sched_cluster[0]);
+		struct sched_group_energy *sge = sge_array[cpu][SD_LEVEL1];
+		int i;
+
+		for (i = 1; i < sge->nr_cap_states; i++) {
+			if (sge->cap_states[i].power >= next_min) {
+				sched_smp_overlap_capacity =
+						sge->cap_states[i-1].cap;
+				break;
+			}
+		}
+
+		min_power_cpu = cpu;
+	}
+
+	mutex_unlock(&cluster_lock);
+}
+
 static void update_all_clusters_stats(void)
 {
 	struct sched_cluster *cluster;
@@ -2337,10 +2390,58 @@
 	.notifier_call = cpufreq_notifier_policy
 };
 
+static int cpufreq_notifier_trans(struct notifier_block *nb,
+		unsigned long val, void *data)
+{
+	struct cpufreq_freqs *freq = (struct cpufreq_freqs *)data;
+	unsigned int cpu = freq->cpu, new_freq = freq->new;
+	unsigned long flags;
+	struct sched_cluster *cluster;
+	struct cpumask policy_cpus = cpu_rq(cpu)->freq_domain_cpumask;
+	int i, j;
+
+	if (val != CPUFREQ_POSTCHANGE)
+		return NOTIFY_DONE;
+
+	if (cpu_cur_freq(cpu) == new_freq)
+		return NOTIFY_OK;
+
+	for_each_cpu(i, &policy_cpus) {
+		cluster = cpu_rq(i)->cluster;
+
+		if (!use_cycle_counter) {
+			for_each_cpu(j, &cluster->cpus) {
+				struct rq *rq = cpu_rq(j);
+
+				raw_spin_lock_irqsave(&rq->lock, flags);
+				update_task_ravg(rq->curr, rq, TASK_UPDATE,
+						 ktime_get_ns(), 0);
+				raw_spin_unlock_irqrestore(&rq->lock, flags);
+			}
+		}
+
+		cluster->cur_freq = new_freq;
+		cpumask_andnot(&policy_cpus, &policy_cpus, &cluster->cpus);
+	}
+
+	return NOTIFY_OK;
+}
+
+static struct notifier_block notifier_trans_block = {
+	.notifier_call = cpufreq_notifier_trans
+};
+
 static int register_walt_callback(void)
 {
-	return cpufreq_register_notifier(&notifier_policy_block,
-					 CPUFREQ_POLICY_NOTIFIER);
+	int ret;
+
+	ret = cpufreq_register_notifier(&notifier_policy_block,
+					CPUFREQ_POLICY_NOTIFIER);
+	if (!ret)
+		ret = cpufreq_register_notifier(&notifier_trans_block,
+						CPUFREQ_TRANSITION_NOTIFIER);
+
+	return ret;
 }
 /*
  * cpufreq callbacks can be registered at core_initcall or later time.
@@ -2459,6 +2560,11 @@
 	if (list_empty(&grp->tasks))
 		return;
 
+	if (!sysctl_sched_is_big_little) {
+		grp->preferred_cluster = sched_cluster[0];
+		return;
+	}
+
 	wallclock = ktime_get_ns();
 
 	/*
diff --git a/kernel/sched/walt.h b/kernel/sched/walt.h
index 10f3e84..c8780cf 100644
--- a/kernel/sched/walt.h
+++ b/kernel/sched/walt.h
@@ -281,12 +281,16 @@
 	return cpu_rq(src_cpu)->cluster == cpu_rq(dst_cpu)->cluster;
 }
 
-void sort_clusters(void);
-
 void walt_irq_work(struct irq_work *irq_work);
 
 void walt_sched_init(struct rq *rq);
 
+extern int __read_mostly min_power_cpu;
+static inline int walt_start_cpu(int prev_cpu)
+{
+	return sysctl_sched_is_big_little ? prev_cpu : min_power_cpu;
+}
+
 #else /* CONFIG_SCHED_WALT */
 
 static inline void walt_sched_init(struct rq *rq) { }
@@ -358,6 +362,11 @@
 {
 }
 
+static inline int walt_start_cpu(int prev_cpu)
+{
+	return prev_cpu;
+}
+
 #endif /* CONFIG_SCHED_WALT */
 
 #endif
diff --git a/net/rmnet_data/rmnet_data_config.c b/net/rmnet_data/rmnet_data_config.c
index bc1829e..50d9b51 100644
--- a/net/rmnet_data/rmnet_data_config.c
+++ b/net/rmnet_data/rmnet_data_config.c
@@ -25,7 +25,6 @@
 #include "rmnet_data_vnd.h"
 #include "rmnet_data_private.h"
 #include "rmnet_data_trace.h"
-#include "rmnet_map.h"
 
 RMNET_LOG_MODULE(RMNET_DATA_LOGMASK_CONFIG);
 
@@ -870,8 +869,7 @@
 	conf->dev = dev;
 	spin_lock_init(&conf->agg_lock);
 	config->recycle = kfree_skb;
-	hrtimer_init(&conf->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-	conf->hrtimer.function = rmnet_map_flush_packet_queue;
+
 	rc = netdev_rx_handler_register(dev, rmnet_rx_handler, config);
 
 	if (rc) {
@@ -1234,22 +1232,6 @@
 	config = _rmnet_get_phys_ep_config(dev);
 
 	if (config) {
-		unsigned long flags;
-
-		hrtimer_cancel(&config->hrtimer);
-		spin_lock_irqsave(&config->agg_lock, flags);
-		if (config->agg_state == RMNET_MAP_TXFER_SCHEDULED) {
-			if (config->agg_skb) {
-				kfree_skb(config->agg_skb);
-				config->agg_skb = NULL;
-				config->agg_count = 0;
-				memset(&config->agg_time, 0,
-				       sizeof(struct timespec));
-			}
-			config->agg_state = RMNET_MAP_AGG_IDLE;
-		}
-		spin_unlock_irqrestore(&config->agg_lock, flags);
-
 		cfg = &config->local_ep;
 
 		if (cfg && cfg->refcount)
diff --git a/net/rmnet_data/rmnet_data_config.h b/net/rmnet_data/rmnet_data_config.h
index 4142656..aa8a0b5 100644
--- a/net/rmnet_data/rmnet_data_config.h
+++ b/net/rmnet_data/rmnet_data_config.h
@@ -16,7 +16,6 @@
 #include <linux/time.h>
 #include <linux/spinlock.h>
 #include <net/rmnet_config.h>
-#include <linux/hrtimer.h>
 
 #ifndef _RMNET_DATA_CONFIG_H_
 #define _RMNET_DATA_CONFIG_H_
@@ -86,7 +85,6 @@
 	u8 agg_count;
 	struct timespec agg_time;
 	struct timespec agg_last;
-	struct hrtimer hrtimer;
 };
 
 int rmnet_config_init(void);
diff --git a/net/rmnet_data/rmnet_map.h b/net/rmnet_data/rmnet_map.h
index 718140c..3bab6d9 100644
--- a/net/rmnet_data/rmnet_map.h
+++ b/net/rmnet_data/rmnet_map.h
@@ -147,5 +147,4 @@
 				     struct net_device *orig_dev,
 				     u32 egress_data_format);
 int rmnet_ul_aggregation_skip(struct sk_buff *skb, int offset);
-enum hrtimer_restart rmnet_map_flush_packet_queue(struct hrtimer *t);
 #endif /* _RMNET_MAP_H_ */
diff --git a/net/rmnet_data/rmnet_map_data.c b/net/rmnet_data/rmnet_map_data.c
index f24b157..1c0f1060 100644
--- a/net/rmnet_data/rmnet_map_data.c
+++ b/net/rmnet_data/rmnet_map_data.c
@@ -18,6 +18,7 @@
 #include <linux/netdevice.h>
 #include <linux/rmnet_data.h>
 #include <linux/spinlock.h>
+#include <linux/workqueue.h>
 #include <linux/time.h>
 #include <linux/net_map.h>
 #include <linux/ip.h>
@@ -47,6 +48,11 @@
 module_param(agg_bypass_time, long, 0644);
 MODULE_PARM_DESC(agg_bypass_time, "Skip agg when apart spaced more than this");
 
+struct agg_work {
+	struct delayed_work work;
+	struct rmnet_phys_ep_config *config;
+};
+
 #define RMNET_MAP_DEAGGR_SPACING  64
 #define RMNET_MAP_DEAGGR_HEADROOM (RMNET_MAP_DEAGGR_SPACING / 2)
 
@@ -160,21 +166,24 @@
 }
 
 /* rmnet_map_flush_packet_queue() - Transmits aggregeted frame on timeout
+ * @work:        struct agg_work containing delayed work and skb to flush
  *
- * This function is scheduled to run in a specified number of ns after
+ * This function is scheduled to run in a specified number of jiffies after
  * the last frame transmitted by the network stack. When run, the buffer
  * containing aggregated packets is finally transmitted on the underlying link.
  *
  */
-enum hrtimer_restart rmnet_map_flush_packet_queue(struct hrtimer *t)
+static void rmnet_map_flush_packet_queue(struct work_struct *work)
 {
+	struct agg_work *real_work;
 	struct rmnet_phys_ep_config *config;
 	unsigned long flags;
 	struct sk_buff *skb;
 	int rc, agg_count = 0;
 
-	config = container_of(t, struct rmnet_phys_ep_config, hrtimer);
 	skb = 0;
+	real_work = (struct agg_work *)work;
+	config = real_work->config;
 	LOGD("%s", "Entering flush thread");
 	spin_lock_irqsave(&config->agg_lock, flags);
 	if (likely(config->agg_state == RMNET_MAP_TXFER_SCHEDULED)) {
@@ -202,8 +211,7 @@
 		rc = dev_queue_xmit(skb);
 		rmnet_stats_queue_xmit(rc, RMNET_STATS_QUEUE_XMIT_AGG_TIMEOUT);
 	}
-
-	return HRTIMER_NORESTART;
+	kfree(work);
 }
 
 /* rmnet_map_aggregate() - Software aggregates multiple packets.
@@ -218,6 +226,7 @@
 void rmnet_map_aggregate(struct sk_buff *skb,
 			 struct rmnet_phys_ep_config *config) {
 	u8 *dest_buff;
+	struct agg_work *work;
 	unsigned long flags;
 	struct sk_buff *agg_skb;
 	struct timespec diff, last;
@@ -281,9 +290,7 @@
 		config->agg_skb = 0;
 		config->agg_count = 0;
 		memset(&config->agg_time, 0, sizeof(struct timespec));
-		config->agg_state = RMNET_MAP_AGG_IDLE;
 		spin_unlock_irqrestore(&config->agg_lock, flags);
-		hrtimer_cancel(&config->hrtimer);
 		LOGL("delta t: %ld.%09lu\tcount: %d", diff.tv_sec,
 		     diff.tv_nsec, agg_count);
 		trace_rmnet_map_aggregate(skb, agg_count);
@@ -300,9 +307,19 @@
 
 schedule:
 	if (config->agg_state != RMNET_MAP_TXFER_SCHEDULED) {
+		work = kmalloc(sizeof(*work), GFP_ATOMIC);
+		if (!work) {
+			LOGE("Failed to allocate work item for packet %s",
+			     "transfer. DATA PATH LIKELY BROKEN!");
+			config->agg_state = RMNET_MAP_AGG_IDLE;
+			spin_unlock_irqrestore(&config->agg_lock, flags);
+			return;
+		}
+		INIT_DELAYED_WORK((struct delayed_work *)work,
+				  rmnet_map_flush_packet_queue);
+		work->config = config;
 		config->agg_state = RMNET_MAP_TXFER_SCHEDULED;
-		hrtimer_start(&config->hrtimer, ns_to_ktime(3000000),
-			      HRTIMER_MODE_REL);
+		schedule_delayed_work((struct delayed_work *)work, 1);
 	}
 	spin_unlock_irqrestore(&config->agg_lock, flags);
 }
diff --git a/net/wireless/util.c b/net/wireless/util.c
index a4ab20a..29c5661 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -532,123 +532,6 @@
 }
 EXPORT_SYMBOL(ieee80211_data_to_8023_exthdr);
 
-int ieee80211_data_from_8023(struct sk_buff *skb, const u8 *addr,
-			     enum nl80211_iftype iftype,
-			     const u8 *bssid, bool qos)
-{
-	struct ieee80211_hdr hdr;
-	u16 hdrlen, ethertype;
-	__le16 fc;
-	const u8 *encaps_data;
-	int encaps_len, skip_header_bytes;
-	int nh_pos, h_pos;
-	int head_need;
-
-	if (unlikely(skb->len < ETH_HLEN))
-		return -EINVAL;
-
-	nh_pos = skb_network_header(skb) - skb->data;
-	h_pos = skb_transport_header(skb) - skb->data;
-
-	/* convert Ethernet header to proper 802.11 header (based on
-	 * operation mode) */
-	ethertype = (skb->data[12] << 8) | skb->data[13];
-	fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA);
-
-	switch (iftype) {
-	case NL80211_IFTYPE_AP:
-	case NL80211_IFTYPE_AP_VLAN:
-	case NL80211_IFTYPE_P2P_GO:
-		fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS);
-		/* DA BSSID SA */
-		memcpy(hdr.addr1, skb->data, ETH_ALEN);
-		memcpy(hdr.addr2, addr, ETH_ALEN);
-		memcpy(hdr.addr3, skb->data + ETH_ALEN, ETH_ALEN);
-		hdrlen = 24;
-		break;
-	case NL80211_IFTYPE_STATION:
-	case NL80211_IFTYPE_P2P_CLIENT:
-		fc |= cpu_to_le16(IEEE80211_FCTL_TODS);
-		/* BSSID SA DA */
-		memcpy(hdr.addr1, bssid, ETH_ALEN);
-		memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN);
-		memcpy(hdr.addr3, skb->data, ETH_ALEN);
-		hdrlen = 24;
-		break;
-	case NL80211_IFTYPE_OCB:
-	case NL80211_IFTYPE_ADHOC:
-		/* DA SA BSSID */
-		memcpy(hdr.addr1, skb->data, ETH_ALEN);
-		memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN);
-		memcpy(hdr.addr3, bssid, ETH_ALEN);
-		hdrlen = 24;
-		break;
-	default:
-		return -EOPNOTSUPP;
-	}
-
-	if (qos) {
-		fc |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
-		hdrlen += 2;
-	}
-
-	hdr.frame_control = fc;
-	hdr.duration_id = 0;
-	hdr.seq_ctrl = 0;
-
-	skip_header_bytes = ETH_HLEN;
-	if (ethertype == ETH_P_AARP || ethertype == ETH_P_IPX) {
-		encaps_data = bridge_tunnel_header;
-		encaps_len = sizeof(bridge_tunnel_header);
-		skip_header_bytes -= 2;
-	} else if (ethertype >= ETH_P_802_3_MIN) {
-		encaps_data = rfc1042_header;
-		encaps_len = sizeof(rfc1042_header);
-		skip_header_bytes -= 2;
-	} else {
-		encaps_data = NULL;
-		encaps_len = 0;
-	}
-
-	skb_pull(skb, skip_header_bytes);
-	nh_pos -= skip_header_bytes;
-	h_pos -= skip_header_bytes;
-
-	head_need = hdrlen + encaps_len - skb_headroom(skb);
-
-	if (head_need > 0 || skb_cloned(skb)) {
-		head_need = max(head_need, 0);
-		if (head_need)
-			skb_orphan(skb);
-
-		if (pskb_expand_head(skb, head_need, 0, GFP_ATOMIC))
-			return -ENOMEM;
-
-		skb->truesize += head_need;
-	}
-
-	if (encaps_data) {
-		memcpy(skb_push(skb, encaps_len), encaps_data, encaps_len);
-		nh_pos += encaps_len;
-		h_pos += encaps_len;
-	}
-
-	memcpy(skb_push(skb, hdrlen), &hdr, hdrlen);
-
-	nh_pos += hdrlen;
-	h_pos += hdrlen;
-
-	/* Update skb pointers to various headers since this modified frame
-	 * is going to go through Linux networking code that may potentially
-	 * need things like pointer to IP header. */
-	skb_reset_mac_header(skb);
-	skb_set_network_header(skb, nh_pos);
-	skb_set_transport_header(skb, h_pos);
-
-	return 0;
-}
-EXPORT_SYMBOL(ieee80211_data_from_8023);
-
 static void
 __frame_add_frag(struct sk_buff *skb, struct page *page,
 		 void *ptr, int len, int size)
diff --git a/scripts/Makefile.dtbo b/scripts/Makefile.dtbo
index b298f4a..d7938c3 100644
--- a/scripts/Makefile.dtbo
+++ b/scripts/Makefile.dtbo
@@ -10,7 +10,12 @@
 ifneq ($(DTC_OVERLAY_TEST_EXT),)
 DTC_OVERLAY_TEST = $(DTC_OVERLAY_TEST_EXT)
 quiet_cmd_dtbo_verify	= VERIFY  $@
-cmd_dtbo_verify = $(DTC_OVERLAY_TEST) $(addprefix $(obj)/,$($(@F)-base)) $@ $(dot-target).tmp
+cmd_dtbo_verify = $(foreach m,\
+	$(addprefix $(obj)/,$($(@F)-base)),\
+		$(if $(m),\
+			$(DTC_OVERLAY_TEST) $(m) $@ \
+			$(dot-target).$(patsubst $(obj)/%.dtb,%,$(m)).tmp;))\
+			true
 else
 cmd_dtbo_verify = true
 endif
diff --git a/techpack/.gitignore b/techpack/.gitignore
index 58da0b8..829fdf6 100644
--- a/techpack/.gitignore
+++ b/techpack/.gitignore
@@ -1,2 +1,3 @@
 # ignore all subdirs except stub
+*
 !/stub/