Merge "i3c: i3c-master-qcom-geni: Add support to make I3C driver GKI compliant"
diff --git a/arch/arm64/configs/vendor/bengal-perf_defconfig b/arch/arm64/configs/vendor/bengal-perf_defconfig
index 97194b4..3592739 100644
--- a/arch/arm64/configs/vendor/bengal-perf_defconfig
+++ b/arch/arm64/configs/vendor/bengal-perf_defconfig
@@ -570,6 +570,7 @@
 CONFIG_QCOM_CX_IPEAK=y
 CONFIG_QTI_CRYPTO_COMMON=y
 CONFIG_QTI_CRYPTO_TZ=y
+CONFIG_QTI_HW_KEY_MANAGER=y
 CONFIG_ICNSS=y
 CONFIG_ICNSS_QMI=y
 CONFIG_DEVFREQ_GOV_PASSIVE=y
diff --git a/arch/arm64/configs/vendor/bengal_defconfig b/arch/arm64/configs/vendor/bengal_defconfig
index c241006..4467b21 100644
--- a/arch/arm64/configs/vendor/bengal_defconfig
+++ b/arch/arm64/configs/vendor/bengal_defconfig
@@ -594,6 +594,7 @@
 CONFIG_QCOM_CX_IPEAK=y
 CONFIG_QTI_CRYPTO_COMMON=y
 CONFIG_QTI_CRYPTO_TZ=y
+CONFIG_QTI_HW_KEY_MANAGER=y
 CONFIG_ICNSS=y
 CONFIG_ICNSS_DEBUG=y
 CONFIG_ICNSS_QMI=y
diff --git a/arch/arm64/configs/vendor/kona-iot-perf_defconfig b/arch/arm64/configs/vendor/kona-iot-perf_defconfig
index 92859b7..1b4e76f 100644
--- a/arch/arm64/configs/vendor/kona-iot-perf_defconfig
+++ b/arch/arm64/configs/vendor/kona-iot-perf_defconfig
@@ -422,8 +422,7 @@
 CONFIG_MSM_GLOBAL_SYNX=y
 CONFIG_DVB_MPQ=m
 CONFIG_DVB_MPQ_DEMUX=m
-CONFIG_DVB_MPQ_TSPP1=y
-CONFIG_TSPP=m
+CONFIG_DVB_MPQ_SW=y
 CONFIG_VIDEO_V4L2_VIDEOBUF2_CORE=y
 CONFIG_I2C_RTC6226_QCA=y
 CONFIG_DRM=y
diff --git a/arch/arm64/configs/vendor/kona-iot_defconfig b/arch/arm64/configs/vendor/kona-iot_defconfig
index d143528..dbce12e 100644
--- a/arch/arm64/configs/vendor/kona-iot_defconfig
+++ b/arch/arm64/configs/vendor/kona-iot_defconfig
@@ -438,8 +438,7 @@
 CONFIG_MSM_GLOBAL_SYNX=y
 CONFIG_DVB_MPQ=m
 CONFIG_DVB_MPQ_DEMUX=m
-CONFIG_DVB_MPQ_TSPP1=y
-CONFIG_TSPP=m
+CONFIG_DVB_MPQ_SW=y
 CONFIG_VIDEO_V4L2_VIDEOBUF2_CORE=y
 CONFIG_I2C_RTC6226_QCA=y
 CONFIG_DRM=y
diff --git a/arch/arm64/configs/vendor/kona-perf_defconfig b/arch/arm64/configs/vendor/kona-perf_defconfig
index 74b58921..5713457 100644
--- a/arch/arm64/configs/vendor/kona-perf_defconfig
+++ b/arch/arm64/configs/vendor/kona-perf_defconfig
@@ -330,7 +330,6 @@
 CONFIG_CNSS2_QMI=y
 CONFIG_CNSS_ASYNC=y
 CONFIG_BUS_AUTO_SUSPEND=y
-CONFIG_CNSS_QCA6390=y
 CONFIG_CNSS_GENL=y
 CONFIG_NVM=y
 CONFIG_NVM_PBLK=y
diff --git a/arch/arm64/configs/vendor/kona_defconfig b/arch/arm64/configs/vendor/kona_defconfig
index 46b77b9..5033e4a 100644
--- a/arch/arm64/configs/vendor/kona_defconfig
+++ b/arch/arm64/configs/vendor/kona_defconfig
@@ -344,7 +344,6 @@
 CONFIG_CNSS2_QMI=y
 CONFIG_CNSS_ASYNC=y
 CONFIG_BUS_AUTO_SUSPEND=y
-CONFIG_CNSS_QCA6390=y
 CONFIG_CNSS_GENL=y
 CONFIG_NVM=y
 CONFIG_NVM_PBLK=y
diff --git a/arch/arm64/configs/vendor/lito-perf_defconfig b/arch/arm64/configs/vendor/lito-perf_defconfig
index 7548051..d043592 100644
--- a/arch/arm64/configs/vendor/lito-perf_defconfig
+++ b/arch/arm64/configs/vendor/lito-perf_defconfig
@@ -281,6 +281,7 @@
 CONFIG_HDCP_QSEECOM=y
 CONFIG_QSEECOM=y
 CONFIG_UID_SYS_STATS=y
+CONFIG_FPR_FPC=y
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_SG=y
@@ -607,6 +608,7 @@
 CONFIG_QMP_DEBUGFS_CLIENT=y
 CONFIG_QCOM_CDSP_RM=y
 CONFIG_QCOM_QHEE_ENABLE_MEM_PROTECTION=y
+CONFIG_QCOM_CX_IPEAK=y
 CONFIG_QTI_CRYPTO_COMMON=y
 CONFIG_QTI_CRYPTO_TZ=y
 CONFIG_ICNSS=y
diff --git a/arch/arm64/configs/vendor/lito_defconfig b/arch/arm64/configs/vendor/lito_defconfig
index 9c80d86..ba806f7 100644
--- a/arch/arm64/configs/vendor/lito_defconfig
+++ b/arch/arm64/configs/vendor/lito_defconfig
@@ -287,6 +287,7 @@
 CONFIG_HDCP_QSEECOM=y
 CONFIG_QSEECOM=y
 CONFIG_UID_SYS_STATS=y
+CONFIG_FPR_FPC=y
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_SG=y
@@ -626,6 +627,7 @@
 CONFIG_QMP_DEBUGFS_CLIENT=y
 CONFIG_QCOM_CDSP_RM=y
 CONFIG_QCOM_QHEE_ENABLE_MEM_PROTECTION=y
+CONFIG_QCOM_CX_IPEAK=y
 CONFIG_QTI_CRYPTO_COMMON=y
 CONFIG_QTI_CRYPTO_TZ=y
 CONFIG_ICNSS=y
diff --git a/arch/arm64/configs/vendor/sdm660-perf_defconfig b/arch/arm64/configs/vendor/sdm660-perf_defconfig
new file mode 100644
index 0000000..67089f8
--- /dev/null
+++ b/arch/arm64/configs/vendor/sdm660-perf_defconfig
@@ -0,0 +1,647 @@
+CONFIG_LOCALVERSION="-perf"
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_AUDIT=y
+# CONFIG_AUDITSYSCALL is not set
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT=y
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_SCHED_WALT=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_PSI=y
+CONFIG_PSI_FTRACE=y
+CONFIG_RCU_EXPERT=y
+CONFIG_RCU_FAST_NO_HZ=y
+CONFIG_RCU_NOCB_CPU=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
+CONFIG_MEMCG=y
+CONFIG_MEMCG_SWAP=y
+CONFIG_BLK_CGROUP=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_BPF=y
+CONFIG_SCHED_CORE_CTL=y
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_SCHED_TUNE=y
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+# CONFIG_RD_LZ4 is not set
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+# CONFIG_FHANDLE is not set
+CONFIG_KALLSYMS_ALL=y
+CONFIG_BPF_SYSCALL=y
+CONFIG_EMBEDDED=y
+# CONFIG_SLUB_DEBUG is not set
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLAB_FREELIST_RANDOM=y
+CONFIG_SLAB_FREELIST_HARDENED=y
+CONFIG_PROFILING=y
+CONFIG_HOTPLUG_SIZE_BITS=29
+CONFIG_ARCH_QCOM=y
+CONFIG_ARCH_SDM660=y
+CONFIG_PCI=y
+CONFIG_PCI_MSM=y
+CONFIG_SCHED_MC=y
+CONFIG_NR_CPUS=8
+CONFIG_HZ_100=y
+CONFIG_SECCOMP=y
+CONFIG_PRINT_VMEMLAYOUT=y
+CONFIG_ARMV8_DEPRECATED=y
+CONFIG_SWP_EMULATION=y
+CONFIG_CP15_BARRIER_EMULATION=y
+CONFIG_SETEND_EMULATION=y
+CONFIG_ARM64_SW_TTBR0_PAN=y
+# CONFIG_ARM64_VHE is not set
+CONFIG_RANDOMIZE_BASE=y
+# CONFIG_EFI is not set
+CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y
+CONFIG_COMPAT=y
+CONFIG_PM_AUTOSLEEP=y
+CONFIG_PM_WAKELOCKS=y
+CONFIG_PM_WAKELOCKS_LIMIT=0
+# CONFIG_PM_WAKELOCKS_GC is not set
+CONFIG_CPU_IDLE=y
+CONFIG_ARM_CPUIDLE=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_TIMES=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPU_BOOST=y
+CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
+CONFIG_ARM_QCOM_CPUFREQ_HW=y
+CONFIG_CPU_FREQ_MSM=y
+CONFIG_MSM_TZ_LOG=y
+CONFIG_ARM64_CRYPTO=y
+CONFIG_CRYPTO_SHA1_ARM64_CE=y
+CONFIG_CRYPTO_SHA2_ARM64_CE=y
+CONFIG_CRYPTO_GHASH_ARM64_CE=y
+CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
+CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
+CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y
+CONFIG_ARCH_MMAP_RND_COMPAT_BITS=16
+CONFIG_PANIC_ON_REFCOUNT_ERROR=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SIG=y
+CONFIG_MODULE_SIG_FORCE=y
+CONFIG_MODULE_SIG_SHA512=y
+CONFIG_BLK_DEV_ZONED=y
+CONFIG_BLK_INLINE_ENCRYPTION=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_CFQ_GROUP_IOSCHED=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_MEMORY_HOTPLUG=y
+CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y
+CONFIG_MEMORY_HOTPLUG_MOVABLE_NODE=y
+CONFIG_MEMORY_HOTREMOVE=y
+CONFIG_CLEANCACHE=y
+CONFIG_CMA=y
+CONFIG_CMA_AREAS=8
+CONFIG_ZSMALLOC=y
+CONFIG_BALANCE_ANON_FILE_RECLAIM=y
+CONFIG_HAVE_USERSPACE_LOW_MEMORY_KILLER=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_XFRM_INTERFACE=y
+CONFIG_XFRM_STATISTICS=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_NET_IPGRE_DEMUX=y
+CONFIG_SYN_COOKIES=y
+CONFIG_NET_IPVTI=y
+CONFIG_INET_AH=y
+CONFIG_INET_ESP=y
+CONFIG_INET_IPCOMP=y
+CONFIG_INET_UDP_DIAG=y
+CONFIG_INET_DIAG_DESTROY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_VTI=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SANE=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_HARDIDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_LOG=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=y
+CONFIG_NETFILTER_XT_TARGET_TEE=y
+CONFIG_NETFILTER_XT_TARGET_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_TRACE=y
+CONFIG_NETFILTER_XT_TARGET_SECMARK=y
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_MATCH_BPF=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_DSCP=y
+CONFIG_NETFILTER_XT_MATCH_ESP=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+# CONFIG_NETFILTER_XT_MATCH_L2TP is not set
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+CONFIG_NETFILTER_XT_MATCH_OWNER=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y
+# CONFIG_NETFILTER_XT_MATCH_SCTP is not set
+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TIME=y
+CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_RPFILTER=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_NAT=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_SECURITY=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_MATCH_RPFILTER=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+CONFIG_BRIDGE_NF_EBTABLES=y
+CONFIG_BRIDGE_EBT_BROUTE=y
+CONFIG_IP_SCTP=y
+CONFIG_L2TP=y
+CONFIG_L2TP_V3=y
+CONFIG_L2TP_IP=y
+CONFIG_L2TP_ETH=y
+CONFIG_BRIDGE=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NET_SCH_PRIO=y
+CONFIG_NET_SCH_MULTIQ=y
+CONFIG_NET_SCH_INGRESS=y
+CONFIG_NET_CLS_FW=y
+CONFIG_NET_CLS_U32=y
+CONFIG_CLS_U32_MARK=y
+CONFIG_NET_CLS_FLOW=y
+CONFIG_NET_CLS_BPF=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_CMP=y
+CONFIG_NET_EMATCH_NBYTE=y
+CONFIG_NET_EMATCH_U32=y
+CONFIG_NET_EMATCH_META=y
+CONFIG_NET_EMATCH_TEXT=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_GACT=y
+CONFIG_NET_ACT_MIRRED=y
+CONFIG_NET_ACT_SKBEDIT=y
+CONFIG_DNS_RESOLVER=y
+CONFIG_QRTR=y
+CONFIG_QRTR_SMD=y
+CONFIG_SOCKEV_NLMCAST=y
+CONFIG_BT=y
+CONFIG_MSM_BT_POWER=y
+CONFIG_BTFM_SLIM_WCN3990=y
+CONFIG_CFG80211=y
+CONFIG_CFG80211_CERTIFICATION_ONUS=y
+CONFIG_CFG80211_REG_CELLULAR_HINTS=y
+CONFIG_CFG80211_INTERNAL_REGDB=y
+CONFIG_RFKILL=y
+CONFIG_NFC_NQ=y
+CONFIG_FW_LOADER_USER_HELPER=y
+CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
+CONFIG_REGMAP_WCD_IRQ=y
+CONFIG_DMA_CMA=y
+CONFIG_MTD=m
+CONFIG_ZRAM=y
+CONFIG_ZRAM_DEDUP=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_LOOP_MIN_COUNT=16
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_HDCP_QSEECOM=y
+CONFIG_QSEECOM=y
+CONFIG_UID_SYS_STATS=y
+CONFIG_MEMORY_STATE_TIME=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_SCH=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_SCSI_UFSHCD=y
+CONFIG_SCSI_UFSHCD_PLATFORM=y
+CONFIG_SCSI_UFS_QCOM=y
+CONFIG_SCSI_UFSHCD_CMD_LOGGING=y
+CONFIG_SCSI_UFS_CRYPTO=y
+CONFIG_SCSI_UFS_CRYPTO_QTI=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=y
+CONFIG_MD_LINEAR=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_DEFAULT_KEY=y
+CONFIG_DM_UEVENT=y
+CONFIG_DM_VERITY=y
+CONFIG_DM_VERITY_FEC=y
+CONFIG_DM_ANDROID_VERITY=y
+CONFIG_DM_BOW=y
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=y
+CONFIG_DUMMY=y
+CONFIG_TUN=y
+CONFIG_SKY2=y
+CONFIG_RMNET=y
+CONFIG_SMSC911X=y
+CONFIG_PPP=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_MPPE=y
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPPOE=y
+CONFIG_PPTP=y
+CONFIG_PPPOL2TP=y
+CONFIG_PPP_ASYNC=y
+CONFIG_PPP_SYNC_TTY=y
+CONFIG_USB_RTL8152=y
+CONFIG_USB_USBNET=y
+CONFIG_WIL6210=m
+CONFIG_WCNSS_MEM_PRE_ALLOC=y
+CONFIG_CLD_LL_CORE=y
+CONFIG_CNSS_GENL=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_INPUT_KEYRESET=y
+CONFIG_KEYBOARD_GPIO=y
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_JOYSTICK=y
+CONFIG_INPUT_TOUCHSCREEN=y
+# CONFIG_TOUCHSCREEN_SYNAPTICS_DSX is not set
+# CONFIG_TOUCHSCREEN_SYNAPTICS_TCM is not set
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_QPNP_POWER_ON=y
+CONFIG_INPUT_QTI_HAPTICS=y
+CONFIG_INPUT_UINPUT=y
+CONFIG_INPUT_GPIO=y
+# CONFIG_SERIO_SERPORT is not set
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVMEM is not set
+CONFIG_SERIAL_DEV_BUS=y
+CONFIG_TTY_PRINTK=y
+CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_MSM_LEGACY=y
+# CONFIG_DEVPORT is not set
+CONFIG_DIAG_CHAR=y
+CONFIG_MSM_FASTCVPD=y
+CONFIG_MSM_ADSPRPC=y
+CONFIG_MSM_RDBG=m
+CONFIG_I2C_CHARDEV=y
+CONFIG_SPI=y
+CONFIG_SPI_QUP=y
+CONFIG_SPI_SPIDEV=y
+CONFIG_SPMI=y
+CONFIG_SPMI_MSM_PMIC_ARB_DEBUG=y
+CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_POWER_RESET_QCOM=y
+CONFIG_POWER_RESET_XGENE=y
+CONFIG_POWER_RESET_SYSCON=y
+CONFIG_QPNP_QNOVO5=y
+CONFIG_THERMAL=y
+CONFIG_THERMAL_WRITABLE_TRIPS=y
+CONFIG_THERMAL_GOV_USER_SPACE=y
+CONFIG_THERMAL_GOV_LOW_LIMITS=y
+CONFIG_CPU_THERMAL=y
+CONFIG_DEVFREQ_THERMAL=y
+CONFIG_QCOM_SPMI_TEMP_ALARM=y
+CONFIG_THERMAL_TSENS=y
+CONFIG_QTI_ADC_TM=y
+CONFIG_QTI_VIRTUAL_SENSOR=y
+CONFIG_QTI_BCL_PMIC5=y
+CONFIG_QTI_BCL_SOC_DRIVER=y
+CONFIG_QTI_QMI_COOLING_DEVICE=y
+CONFIG_QTI_THERMAL_LIMITS_DCVS=y
+CONFIG_REGULATOR_COOLING_DEVICE=y
+CONFIG_QTI_CX_IPEAK_COOLING_DEVICE=y
+CONFIG_MFD_I2C_PMIC=y
+CONFIG_MFD_SPMI_PMIC=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_PROXY_CONSUMER=y
+CONFIG_REGULATOR_QCOM_SMD_RPM=y
+CONFIG_REGULATOR_QPNP_LABIBB=y
+CONFIG_REGULATOR_QPNP_LCDB=y
+CONFIG_REGULATOR_QPNP_OLEDB=y
+CONFIG_REGULATOR_RPM_SMD=y
+CONFIG_REGULATOR_STUB=y
+CONFIG_RC_CORE=m
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y
+CONFIG_MEDIA_CONTROLLER=y
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
+CONFIG_VIDEO_FIXED_MINOR_RANGES=y
+CONFIG_MEDIA_USB_SUPPORT=y
+CONFIG_USB_VIDEO_CLASS=y
+CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_DVB_MPQ=m
+CONFIG_DVB_MPQ_DEMUX=m
+CONFIG_FB=y
+CONFIG_FB_ARMCLCD=y
+CONFIG_FB_VIRTUAL=y
+CONFIG_BACKLIGHT_QCOM_SPMI_WLED=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_DYNAMIC_MINORS=y
+CONFIG_SND_USB_AUDIO=y
+CONFIG_SND_USB_AUDIO_QMI=y
+CONFIG_SND_SOC=y
+CONFIG_UHID=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_ELECOM=y
+CONFIG_HID_MAGICMOUSE=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MULTITOUCH=y
+CONFIG_HID_PLANTRONICS=y
+CONFIG_HID_SONY=y
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_DWC3=y
+CONFIG_USB_DWC3_MSM=y
+CONFIG_USB_ISP1760=y
+CONFIG_USB_ISP1760_HOST_ROLE=y
+CONFIG_USB_EHSET_TEST_FIXTURE=y
+CONFIG_USB_LINK_LAYER_TEST=y
+CONFIG_NOP_USB_XCEIV=y
+CONFIG_USB_MSM_SSPHY_QMP=y
+CONFIG_MSM_QUSB_PHY=y
+CONFIG_MSM_HSUSB_PHY=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_VBUS_DRAW=500
+CONFIG_USB_CONFIGFS=y
+CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_NCM=y
+CONFIG_USB_CONFIGFS_MASS_STORAGE=y
+CONFIG_USB_CONFIGFS_F_FS=y
+CONFIG_USB_CONFIGFS_F_ACC=y
+CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
+CONFIG_USB_CONFIGFS_F_MIDI=y
+CONFIG_USB_CONFIGFS_F_HID=y
+CONFIG_USB_CONFIGFS_F_DIAG=y
+CONFIG_USB_CONFIGFS_F_CDEV=y
+CONFIG_USB_CONFIGFS_F_CCID=y
+CONFIG_USB_CONFIGFS_F_QDSS=y
+CONFIG_USB_CONFIGFS_F_GSI=y
+CONFIG_USB_CONFIGFS_F_MTP=y
+CONFIG_USB_CONFIGFS_F_PTP=y
+CONFIG_TYPEC=y
+CONFIG_USB_PD_POLICY=y
+CONFIG_QPNP_USB_PDPHY=y
+CONFIG_MMC=y
+CONFIG_MMC_BLOCK_MINORS=32
+CONFIG_MMC_BLOCK_DEFERRED_RESUME=y
+CONFIG_MMC_TEST=m
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_MSM=y
+CONFIG_MMC_CQHCI_CRYPTO=y
+CONFIG_MMC_CQHCI_CRYPTO_QTI=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_QTI_TRI_LED=y
+CONFIG_LEDS_QPNP_FLASH_V2=y
+CONFIG_RTC_CLASS=y
+CONFIG_DMADEVICES=y
+CONFIG_QCOM_GPI_DMA=y
+CONFIG_SYNC_FILE=y
+CONFIG_UIO=y
+CONFIG_UIO_MSM_SHAREDMEM=y
+CONFIG_STAGING=y
+CONFIG_ASHMEM=y
+CONFIG_ION=y
+CONFIG_QPNP_REVID=y
+CONFIG_SPS=y
+CONFIG_SPS_SUPPORT_NDP_BAM=y
+CONFIG_GSI=y
+CONFIG_MSM_11AD=m
+CONFIG_USB_BAM=y
+CONFIG_HWSPINLOCK=y
+CONFIG_HWSPINLOCK_QCOM=y
+CONFIG_MAILBOX=y
+CONFIG_QCOM_APCS_IPC=y
+CONFIG_MSM_QMP=y
+CONFIG_IOMMU_IO_PGTABLE_FAST=y
+CONFIG_ARM_SMMU=y
+CONFIG_ARM_SMMU_TESTBUS_DUMP=y
+CONFIG_QCOM_LAZY_MAPPING=y
+CONFIG_RPMSG_CHAR=y
+CONFIG_RPMSG_QCOM_GLINK_RPM=y
+CONFIG_RPMSG_QCOM_GLINK_SMEM=y
+CONFIG_RPMSG_QCOM_GLINK_SPSS=y
+CONFIG_RPMSG_QCOM_GLINK_SPI=y
+CONFIG_RPMSG_QCOM_SMD=y
+CONFIG_MSM_RPM_SMD=y
+CONFIG_QCOM_COMMAND_DB=y
+CONFIG_QCOM_MEM_OFFLINE=y
+CONFIG_OVERRIDE_MEMORY_LIMIT=y
+CONFIG_QCOM_CPUSS_DUMP=y
+CONFIG_QCOM_RUN_QUEUE_STATS=y
+CONFIG_QPNP_PBS=y
+CONFIG_QCOM_QMI_HELPERS=y
+CONFIG_QCOM_SMEM=y
+CONFIG_QCOM_SMD_RPM=y
+CONFIG_QCOM_EARLY_RANDOM=y
+CONFIG_QCOM_MEMORY_DUMP_V2=y
+CONFIG_QCOM_SMP2P=y
+CONFIG_SETUP_SSR_NOTIF_TIMEOUTS=y
+CONFIG_SSR_SYSMON_NOTIF_TIMEOUT=20000
+CONFIG_SSR_SUBSYS_NOTIF_TIMEOUT=20000
+CONFIG_PANIC_ON_SSR_NOTIF_TIMEOUT=y
+CONFIG_QCOM_SECURE_BUFFER=y
+CONFIG_MSM_SERVICE_LOCATOR=y
+CONFIG_MSM_SERVICE_NOTIFIER=y
+CONFIG_MSM_SUBSYSTEM_RESTART=y
+CONFIG_MSM_PIL=y
+CONFIG_MSM_SYSMON_QMI_COMM=y
+CONFIG_MSM_PIL_SSR_GENERIC=y
+CONFIG_MSM_BOOT_STATS=y
+CONFIG_QCOM_DCC_V2=y
+CONFIG_QCOM_MINIDUMP=y
+CONFIG_MSM_CORE_HANG_DETECT=y
+CONFIG_MSM_GLADIATOR_HANG_DETECT=y
+CONFIG_QCOM_FSA4480_I2C=y
+CONFIG_QCOM_WATCHDOG_V2=y
+CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
+CONFIG_QCOM_WDOG_IPI_ENABLE=y
+CONFIG_QCOM_BUS_SCALING=y
+CONFIG_MSM_SPCOM=y
+CONFIG_MSM_SPSS_UTILS=y
+CONFIG_QSEE_IPC_IRQ_BRIDGE=y
+CONFIG_QCOM_GLINK=y
+CONFIG_QCOM_GLINK_PKT=y
+CONFIG_QCOM_SMP2P_SLEEPSTATE=y
+CONFIG_MSM_CDSP_LOADER=y
+CONFIG_QCOM_SMCINVOKE=y
+CONFIG_MSM_EVENT_TIMER=y
+CONFIG_MSM_PM=y
+CONFIG_QTI_RPM_STATS_LOG=y
+CONFIG_QTEE_SHM_BRIDGE=y
+CONFIG_MEM_SHARE_QMI_SERVICE=y
+CONFIG_MSM_PERFORMANCE=y
+CONFIG_QCOM_CDSP_RM=y
+CONFIG_QCOM_CX_IPEAK=y
+CONFIG_QTI_CRYPTO_COMMON=y
+CONFIG_QTI_CRYPTO_TZ=y
+CONFIG_ICNSS=y
+CONFIG_ICNSS_QMI=y
+CONFIG_DEVFREQ_GOV_PASSIVE=y
+CONFIG_QCOM_BIMC_BWMON=y
+CONFIG_ARM_MEMLAT_MON=y
+CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y
+CONFIG_DEVFREQ_GOV_QCOM_CACHE_HWMON=y
+CONFIG_DEVFREQ_GOV_MEMLAT=y
+CONFIG_DEVFREQ_SIMPLE_DEV=y
+CONFIG_QCOM_DEVFREQ_DEVBW=y
+CONFIG_IIO=y
+CONFIG_QCOM_SPMI_ADC5=y
+CONFIG_QCOM_SPMI_VADC=y
+CONFIG_PWM=y
+CONFIG_PWM_QTI_LPG=y
+CONFIG_ARM_GIC_V3_ACL=y
+CONFIG_QCOM_MPM=y
+CONFIG_PHY_XGENE=y
+CONFIG_RAS=y
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_QCOM_QFPROM=y
+CONFIG_NVMEM_SPMI_SDAM=y
+CONFIG_SLIMBUS_MSM_NGD=y
+CONFIG_SENSORS_SSC=y
+CONFIG_QCOM_KGSL=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_EXT4_ENCRYPTION=y
+CONFIG_F2FS_FS=y
+CONFIG_F2FS_FS_SECURITY=y
+CONFIG_F2FS_FS_ENCRYPTION=y
+CONFIG_FS_ENCRYPTION_INLINE_CRYPT=y
+CONFIG_QUOTA=y
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+CONFIG_QFMT_V2=y
+CONFIG_FUSE_FS=y
+CONFIG_OVERLAY_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_ECRYPT_FS=y
+CONFIG_ECRYPT_FS_MESSAGING=y
+CONFIG_SDCARD_FS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
+CONFIG_SECURITY=y
+CONFIG_HARDENED_USERCOPY=y
+CONFIG_HARDENED_USERCOPY_PAGESPAN=y
+CONFIG_FORTIFY_SOURCE=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SMACK=y
+CONFIG_CRYPTO_GCM=y
+CONFIG_CRYPTO_XCBC=y
+CONFIG_CRYPTO_MD4=y
+CONFIG_CRYPTO_TWOFISH=y
+CONFIG_CRYPTO_ANSI_CPRNG=y
+CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
+CONFIG_CRYPTO_DEV_QCRYPTO=y
+CONFIG_CRYPTO_DEV_QCEDEV=y
+CONFIG_SYSTEM_TRUSTED_KEYS="verity.x509.pem"
+CONFIG_XZ_DEC=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+CONFIG_PAGE_OWNER=y
+# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_PANIC_TIMEOUT=5
+CONFIG_SCHEDSTATS=y
+# CONFIG_DEBUG_PREEMPT is not set
+CONFIG_IPC_LOGGING=y
+CONFIG_DEBUG_ALIGN_RODATA=y
+CONFIG_CORESIGHT=y
+CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
+CONFIG_CORESIGHT_SOURCE_ETM4X=y
+CONFIG_CORESIGHT_STM=y
+CONFIG_CORESIGHT_CTI=y
+CONFIG_CORESIGHT_TPDA=y
+CONFIG_CORESIGHT_TPDM=y
+CONFIG_CORESIGHT_HWEVENT=y
+CONFIG_CORESIGHT_REMOTE_ETM=y
diff --git a/arch/arm64/configs/vendor/sdm660_defconfig b/arch/arm64/configs/vendor/sdm660_defconfig
new file mode 100644
index 0000000..6e561f2
--- /dev/null
+++ b/arch/arm64/configs/vendor/sdm660_defconfig
@@ -0,0 +1,698 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_AUDIT=y
+# CONFIG_AUDITSYSCALL is not set
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT=y
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_SCHED_WALT=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_PSI=y
+CONFIG_PSI_FTRACE=y
+CONFIG_RCU_EXPERT=y
+CONFIG_RCU_FAST_NO_HZ=y
+CONFIG_RCU_NOCB_CPU=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
+CONFIG_MEMCG=y
+CONFIG_MEMCG_SWAP=y
+CONFIG_BLK_CGROUP=y
+CONFIG_DEBUG_BLK_CGROUP=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_BPF=y
+CONFIG_CGROUP_DEBUG=y
+CONFIG_SCHED_CORE_CTL=y
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_SCHED_TUNE=y
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+# CONFIG_RD_LZ4 is not set
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+# CONFIG_FHANDLE is not set
+CONFIG_KALLSYMS_ALL=y
+CONFIG_BPF_SYSCALL=y
+CONFIG_EMBEDDED=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLAB_FREELIST_RANDOM=y
+CONFIG_SLAB_FREELIST_HARDENED=y
+CONFIG_PROFILING=y
+# CONFIG_ZONE_DMA32 is not set
+CONFIG_HOTPLUG_SIZE_BITS=29
+CONFIG_ARCH_QCOM=y
+CONFIG_ARCH_SDM660=y
+CONFIG_PCI=y
+CONFIG_PCI_MSM=y
+CONFIG_SCHED_MC=y
+CONFIG_NR_CPUS=8
+CONFIG_HZ_100=y
+CONFIG_SECCOMP=y
+CONFIG_PRINT_VMEMLAYOUT=y
+CONFIG_ARMV8_DEPRECATED=y
+CONFIG_SWP_EMULATION=y
+CONFIG_CP15_BARRIER_EMULATION=y
+CONFIG_SETEND_EMULATION=y
+CONFIG_ARM64_SW_TTBR0_PAN=y
+# CONFIG_ARM64_VHE is not set
+CONFIG_RANDOMIZE_BASE=y
+CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y
+CONFIG_COMPAT=y
+CONFIG_PM_AUTOSLEEP=y
+CONFIG_PM_WAKELOCKS=y
+CONFIG_PM_WAKELOCKS_LIMIT=0
+# CONFIG_PM_WAKELOCKS_GC is not set
+CONFIG_PM_DEBUG=y
+CONFIG_CPU_IDLE=y
+CONFIG_ARM_CPUIDLE=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_TIMES=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPU_BOOST=y
+CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
+CONFIG_ARM_QCOM_CPUFREQ_HW=y
+CONFIG_MSM_TZ_LOG=y
+CONFIG_ARM64_CRYPTO=y
+CONFIG_CRYPTO_SHA1_ARM64_CE=y
+CONFIG_CRYPTO_SHA2_ARM64_CE=y
+CONFIG_CRYPTO_GHASH_ARM64_CE=y
+CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
+CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
+CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y
+CONFIG_ARCH_MMAP_RND_COMPAT_BITS=16
+CONFIG_PANIC_ON_REFCOUNT_ERROR=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SIG=y
+CONFIG_MODULE_SIG_FORCE=y
+CONFIG_MODULE_SIG_SHA512=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_BLK_DEV_ZONED=y
+CONFIG_BLK_INLINE_ENCRYPTION=y
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_IOSCHED_DEADLINE is not set
+CONFIG_CFQ_GROUP_IOSCHED=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_MEMORY_HOTPLUG=y
+CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y
+CONFIG_MEMORY_HOTPLUG_MOVABLE_NODE=y
+CONFIG_MEMORY_HOTREMOVE=y
+CONFIG_CLEANCACHE=y
+CONFIG_CMA=y
+CONFIG_CMA_DEBUG=y
+CONFIG_CMA_DEBUGFS=y
+CONFIG_CMA_ALLOW_WRITE_DEBUGFS=y
+CONFIG_CMA_AREAS=8
+CONFIG_ZSMALLOC=y
+CONFIG_BALANCE_ANON_FILE_RECLAIM=y
+CONFIG_HAVE_USERSPACE_LOW_MEMORY_KILLER=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_XFRM_INTERFACE=y
+CONFIG_XFRM_STATISTICS=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_NET_IPGRE_DEMUX=y
+CONFIG_SYN_COOKIES=y
+CONFIG_NET_IPVTI=y
+CONFIG_INET_AH=y
+CONFIG_INET_ESP=y
+CONFIG_INET_IPCOMP=y
+CONFIG_INET_UDP_DIAG=y
+CONFIG_INET_DIAG_DESTROY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_VTI=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SANE=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_HARDIDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_LOG=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=y
+CONFIG_NETFILTER_XT_TARGET_TEE=y
+CONFIG_NETFILTER_XT_TARGET_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_TRACE=y
+CONFIG_NETFILTER_XT_TARGET_SECMARK=y
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_MATCH_BPF=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_DSCP=y
+CONFIG_NETFILTER_XT_MATCH_ESP=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+# CONFIG_NETFILTER_XT_MATCH_L2TP is not set
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+CONFIG_NETFILTER_XT_MATCH_OWNER=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TIME=y
+CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_RPFILTER=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_NAT=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_SECURITY=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_MATCH_RPFILTER=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+CONFIG_BRIDGE_NF_EBTABLES=y
+CONFIG_BRIDGE_EBT_BROUTE=y
+CONFIG_IP_SCTP=y
+CONFIG_L2TP=y
+CONFIG_L2TP_DEBUGFS=y
+CONFIG_L2TP_V3=y
+CONFIG_L2TP_IP=y
+CONFIG_L2TP_ETH=y
+CONFIG_BRIDGE=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NET_SCH_PRIO=y
+CONFIG_NET_SCH_MULTIQ=y
+CONFIG_NET_SCH_INGRESS=y
+CONFIG_NET_CLS_FW=y
+CONFIG_NET_CLS_U32=y
+CONFIG_CLS_U32_MARK=y
+CONFIG_NET_CLS_FLOW=y
+CONFIG_NET_CLS_BPF=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_CMP=y
+CONFIG_NET_EMATCH_NBYTE=y
+CONFIG_NET_EMATCH_U32=y
+CONFIG_NET_EMATCH_META=y
+CONFIG_NET_EMATCH_TEXT=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_GACT=y
+CONFIG_NET_ACT_MIRRED=y
+CONFIG_NET_ACT_SKBEDIT=y
+CONFIG_DNS_RESOLVER=y
+CONFIG_QRTR=y
+CONFIG_QRTR_SMD=y
+CONFIG_SOCKEV_NLMCAST=y
+CONFIG_BT=y
+CONFIG_MSM_BT_POWER=y
+CONFIG_BTFM_SLIM_WCN3990=y
+CONFIG_CFG80211=y
+CONFIG_CFG80211_CERTIFICATION_ONUS=y
+CONFIG_CFG80211_REG_CELLULAR_HINTS=y
+CONFIG_CFG80211_INTERNAL_REGDB=y
+# CONFIG_CFG80211_CRDA_SUPPORT is not set
+CONFIG_RFKILL=y
+CONFIG_NFC_NQ=y
+CONFIG_FW_LOADER_USER_HELPER=y
+CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
+CONFIG_REGMAP_WCD_IRQ=y
+CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y
+CONFIG_DMA_CMA=y
+CONFIG_ZRAM=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_LOOP_MIN_COUNT=16
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_HDCP_QSEECOM=y
+CONFIG_QSEECOM=y
+CONFIG_UID_SYS_STATS=y
+CONFIG_MEMORY_STATE_TIME=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_SCH=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_SCSI_UFSHCD=y
+CONFIG_SCSI_UFSHCD_PLATFORM=y
+CONFIG_SCSI_UFS_QCOM=y
+CONFIG_SCSI_UFSHCD_CMD_LOGGING=y
+CONFIG_SCSI_UFS_CRYPTO=y
+CONFIG_SCSI_UFS_CRYPTO_QTI=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=y
+CONFIG_MD_LINEAR=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_DEFAULT_KEY=y
+CONFIG_DM_UEVENT=y
+CONFIG_DM_VERITY=y
+CONFIG_DM_VERITY_FEC=y
+CONFIG_DM_ANDROID_VERITY=y
+CONFIG_DM_BOW=y
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=y
+CONFIG_DUMMY=y
+CONFIG_TUN=y
+CONFIG_RMNET=y
+CONFIG_PPP=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_MPPE=y
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPPOE=y
+CONFIG_PPTP=y
+CONFIG_PPPOL2TP=y
+CONFIG_PPP_ASYNC=y
+CONFIG_PPP_SYNC_TTY=y
+CONFIG_USB_RTL8152=y
+CONFIG_USB_USBNET=y
+CONFIG_WIL6210=m
+CONFIG_WCNSS_MEM_PRE_ALLOC=y
+CONFIG_CLD_LL_CORE=y
+CONFIG_CNSS_GENL=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_INPUT_KEYRESET=y
+CONFIG_KEYBOARD_GPIO=y
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_JOYSTICK=y
+CONFIG_INPUT_TOUCHSCREEN=y
+# CONFIG_TOUCHSCREEN_SYNAPTICS_DSX is not set
+# CONFIG_TOUCHSCREEN_SYNAPTICS_TCM is not set
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_QPNP_POWER_ON=y
+CONFIG_INPUT_QTI_HAPTICS=y
+CONFIG_INPUT_UINPUT=y
+CONFIG_INPUT_GPIO=y
+# CONFIG_SERIO_SERPORT is not set
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVMEM is not set
+CONFIG_SERIAL_MSM=y
+CONFIG_SERIAL_MSM_CONSOLE=y
+CONFIG_SERIAL_DEV_BUS=y
+CONFIG_TTY_PRINTK=y
+CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_MSM_LEGACY=y
+# CONFIG_DEVPORT is not set
+CONFIG_DIAG_CHAR=y
+CONFIG_MSM_FASTCVPD=y
+CONFIG_MSM_ADSPRPC=y
+CONFIG_MSM_RDBG=m
+CONFIG_I2C_CHARDEV=y
+CONFIG_SPI=y
+CONFIG_SPI_QUP=y
+CONFIG_SPI_SPIDEV=y
+CONFIG_SPMI=y
+CONFIG_SPMI_MSM_PMIC_ARB_DEBUG=y
+CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_POWER_RESET_QCOM=y
+CONFIG_POWER_RESET_XGENE=y
+CONFIG_POWER_RESET_SYSCON=y
+CONFIG_THERMAL=y
+CONFIG_THERMAL_WRITABLE_TRIPS=y
+CONFIG_THERMAL_GOV_USER_SPACE=y
+CONFIG_THERMAL_GOV_LOW_LIMITS=y
+CONFIG_CPU_THERMAL=y
+CONFIG_DEVFREQ_THERMAL=y
+CONFIG_QCOM_SPMI_TEMP_ALARM=y
+CONFIG_THERMAL_TSENS=y
+CONFIG_QTI_ADC_TM=y
+CONFIG_QTI_VIRTUAL_SENSOR=y
+CONFIG_QTI_BCL_PMIC5=y
+CONFIG_QTI_BCL_SOC_DRIVER=y
+CONFIG_QTI_QMI_COOLING_DEVICE=y
+CONFIG_QTI_THERMAL_LIMITS_DCVS=y
+CONFIG_REGULATOR_COOLING_DEVICE=y
+CONFIG_MFD_I2C_PMIC=y
+CONFIG_MFD_SPMI_PMIC=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_PROXY_CONSUMER=y
+CONFIG_REGULATOR_QPNP_LABIBB=y
+CONFIG_REGULATOR_QPNP_LCDB=y
+CONFIG_REGULATOR_QPNP_OLEDB=y
+CONFIG_REGULATOR_RPM_SMD=y
+CONFIG_REGULATOR_STUB=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y
+CONFIG_MEDIA_CONTROLLER=y
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
+CONFIG_VIDEO_ADV_DEBUG=y
+CONFIG_VIDEO_FIXED_MINOR_RANGES=y
+CONFIG_MEDIA_USB_SUPPORT=y
+CONFIG_USB_VIDEO_CLASS=y
+CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_DVB_MPQ=m
+CONFIG_DVB_MPQ_DEMUX=m
+CONFIG_FB=y
+CONFIG_FB_VIRTUAL=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+CONFIG_BACKLIGHT_QCOM_SPMI_WLED=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_DYNAMIC_MINORS=y
+CONFIG_SND_USB_AUDIO=y
+CONFIG_SND_USB_AUDIO_QMI=y
+CONFIG_SND_SOC=y
+CONFIG_UHID=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_ELECOM=y
+CONFIG_HID_MAGICMOUSE=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MULTITOUCH=y
+CONFIG_HID_PLANTRONICS=y
+CONFIG_HID_SONY=y
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_DWC3=y
+CONFIG_USB_DWC3_MSM=y
+CONFIG_USB_ISP1760=y
+CONFIG_USB_ISP1760_HOST_ROLE=y
+CONFIG_USB_EHSET_TEST_FIXTURE=y
+CONFIG_USB_LINK_LAYER_TEST=y
+CONFIG_NOP_USB_XCEIV=y
+CONFIG_USB_MSM_SSPHY_QMP=y
+CONFIG_MSM_QUSB_PHY=y
+CONFIG_MSM_HSUSB_PHY=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_VBUS_DRAW=500
+CONFIG_USB_CONFIGFS=y
+CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_NCM=y
+CONFIG_USB_CONFIGFS_MASS_STORAGE=y
+CONFIG_USB_CONFIGFS_F_FS=y
+CONFIG_USB_CONFIGFS_F_ACC=y
+CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
+CONFIG_USB_CONFIGFS_F_MIDI=y
+CONFIG_USB_CONFIGFS_F_HID=y
+CONFIG_USB_CONFIGFS_F_DIAG=y
+CONFIG_USB_CONFIGFS_F_CDEV=y
+CONFIG_USB_CONFIGFS_F_CCID=y
+CONFIG_USB_CONFIGFS_F_QDSS=y
+CONFIG_USB_CONFIGFS_F_GSI=y
+CONFIG_USB_CONFIGFS_F_MTP=y
+CONFIG_USB_CONFIGFS_F_PTP=y
+CONFIG_TYPEC=y
+CONFIG_USB_PD_POLICY=y
+CONFIG_QPNP_USB_PDPHY=y
+CONFIG_MMC=y
+CONFIG_MMC_BLOCK_MINORS=32
+CONFIG_MMC_BLOCK_DEFERRED_RESUME=y
+CONFIG_MMC_TEST=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_MSM=y
+CONFIG_MMC_CQHCI_CRYPTO=y
+CONFIG_MMC_CQHCI_CRYPTO_QTI=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_QPNP_FLASH_V2=y
+CONFIG_RTC_CLASS=y
+CONFIG_DMADEVICES=y
+CONFIG_QCOM_GPI_DMA=y
+CONFIG_QCOM_GPI_DMA_DEBUG=y
+CONFIG_SYNC_FILE=y
+CONFIG_DEBUG_DMA_BUF_REF=y
+CONFIG_UIO=y
+CONFIG_UIO_MSM_SHAREDMEM=y
+CONFIG_STAGING=y
+CONFIG_ASHMEM=y
+CONFIG_ION=y
+CONFIG_QPNP_REVID=y
+CONFIG_SPS=y
+CONFIG_SPS_SUPPORT_NDP_BAM=y
+CONFIG_GSI=y
+CONFIG_MSM_11AD=m
+CONFIG_USB_BAM=y
+CONFIG_HWSPINLOCK=y
+CONFIG_HWSPINLOCK_QCOM=y
+CONFIG_MAILBOX=y
+CONFIG_QCOM_APCS_IPC=y
+CONFIG_MSM_QMP=y
+CONFIG_IOMMU_IO_PGTABLE_FAST=y
+CONFIG_ARM_SMMU=y
+CONFIG_IOMMU_TLBSYNC_DEBUG=y
+CONFIG_ARM_SMMU_TESTBUS_DUMP=y
+CONFIG_QCOM_LAZY_MAPPING=y
+CONFIG_IOMMU_DEBUG=y
+CONFIG_IOMMU_TESTS=y
+CONFIG_RPMSG_CHAR=y
+CONFIG_RPMSG_QCOM_GLINK_RPM=y
+CONFIG_RPMSG_QCOM_GLINK_SMEM=y
+CONFIG_RPMSG_QCOM_GLINK_SPSS=y
+CONFIG_RPMSG_QCOM_GLINK_SPI=y
+CONFIG_RPMSG_QCOM_SMD=y
+CONFIG_MSM_RPM_SMD=y
+CONFIG_QCOM_COMMAND_DB=y
+CONFIG_QCOM_MEM_OFFLINE=y
+CONFIG_OVERRIDE_MEMORY_LIMIT=y
+CONFIG_QCOM_CPUSS_DUMP=y
+CONFIG_QCOM_RUN_QUEUE_STATS=y
+CONFIG_QPNP_PBS=y
+CONFIG_QCOM_QMI_HELPERS=y
+CONFIG_QCOM_SMEM=y
+CONFIG_QCOM_SMD_RPM=y
+CONFIG_QCOM_EARLY_RANDOM=y
+CONFIG_QCOM_MEMORY_DUMP_V2=y
+CONFIG_QCOM_SMP2P=y
+CONFIG_SETUP_SSR_NOTIF_TIMEOUTS=y
+CONFIG_SSR_SYSMON_NOTIF_TIMEOUT=20000
+CONFIG_SSR_SUBSYS_NOTIF_TIMEOUT=20000
+CONFIG_PANIC_ON_SSR_NOTIF_TIMEOUT=y
+CONFIG_QCOM_SECURE_BUFFER=y
+CONFIG_MSM_SERVICE_LOCATOR=y
+CONFIG_MSM_SERVICE_NOTIFIER=y
+CONFIG_MSM_SUBSYSTEM_RESTART=y
+CONFIG_MSM_PIL=y
+CONFIG_MSM_SYSMON_QMI_COMM=y
+CONFIG_MSM_PIL_SSR_GENERIC=y
+CONFIG_MSM_BOOT_STATS=y
+CONFIG_QCOM_DCC_V2=y
+CONFIG_QCOM_MINIDUMP=y
+CONFIG_MSM_CORE_HANG_DETECT=y
+CONFIG_MSM_GLADIATOR_HANG_DETECT=y
+CONFIG_QCOM_FSA4480_I2C=y
+CONFIG_QCOM_WATCHDOG_V2=y
+CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
+CONFIG_QCOM_WDOG_IPI_ENABLE=y
+CONFIG_QCOM_BUS_SCALING=y
+CONFIG_MSM_SPCOM=y
+CONFIG_MSM_SPSS_UTILS=y
+CONFIG_QSEE_IPC_IRQ_BRIDGE=y
+CONFIG_QCOM_GLINK=y
+CONFIG_QCOM_GLINK_PKT=y
+CONFIG_QCOM_SMP2P_SLEEPSTATE=y
+CONFIG_MSM_CDSP_LOADER=y
+CONFIG_QCOM_SMCINVOKE=y
+CONFIG_MSM_EVENT_TIMER=y
+CONFIG_MSM_PM=y
+CONFIG_QTI_RPM_STATS_LOG=y
+CONFIG_QTEE_SHM_BRIDGE=y
+CONFIG_MEM_SHARE_QMI_SERVICE=y
+CONFIG_MSM_PERFORMANCE=y
+CONFIG_QCOM_CDSP_RM=y
+CONFIG_QCOM_CX_IPEAK=y
+CONFIG_QTI_CRYPTO_COMMON=y
+CONFIG_QTI_CRYPTO_TZ=y
+CONFIG_ICNSS=y
+CONFIG_ICNSS_DEBUG=y
+CONFIG_ICNSS_QMI=y
+CONFIG_DEVFREQ_GOV_PASSIVE=y
+CONFIG_QCOM_BIMC_BWMON=y
+CONFIG_ARM_MEMLAT_MON=y
+CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y
+CONFIG_DEVFREQ_GOV_MEMLAT=y
+CONFIG_QCOM_DEVFREQ_DEVBW=y
+CONFIG_IIO=y
+CONFIG_QCOM_SPMI_ADC5=y
+CONFIG_QCOM_SPMI_VADC=y
+CONFIG_PWM=y
+CONFIG_PWM_QTI_LPG=y
+CONFIG_ARM_GIC_V3_ACL=y
+CONFIG_QCOM_MPM=y
+CONFIG_PHY_XGENE=y
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_QCOM_QFPROM=y
+CONFIG_NVMEM_SPMI_SDAM=y
+CONFIG_SENSORS_SSC=y
+CONFIG_QCOM_KGSL=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_EXT4_ENCRYPTION=y
+CONFIG_F2FS_FS=y
+CONFIG_F2FS_FS_SECURITY=y
+CONFIG_F2FS_FS_ENCRYPTION=y
+CONFIG_FS_ENCRYPTION_INLINE_CRYPT=y
+CONFIG_QUOTA=y
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+CONFIG_QFMT_V2=y
+CONFIG_FUSE_FS=y
+CONFIG_OVERLAY_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_EFIVAR_FS=y
+CONFIG_ECRYPT_FS=y
+CONFIG_ECRYPT_FS_MESSAGING=y
+CONFIG_SDCARD_FS=y
+# CONFIG_NETWORK_FILESYSTEMS is not set
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
+CONFIG_SECURITY=y
+CONFIG_HARDENED_USERCOPY=y
+CONFIG_HARDENED_USERCOPY_PAGESPAN=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SMACK=y
+CONFIG_CRYPTO_GCM=y
+CONFIG_CRYPTO_XCBC=y
+CONFIG_CRYPTO_MD4=y
+CONFIG_CRYPTO_TWOFISH=y
+CONFIG_CRYPTO_ANSI_CPRNG=y
+CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
+CONFIG_CRYPTO_DEV_QCRYPTO=y
+CONFIG_CRYPTO_DEV_QCEDEV=y
+CONFIG_SYSTEM_TRUSTED_KEYS="verity.x509.pem"
+CONFIG_XZ_DEC=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DYNAMIC_DEBUG=y
+CONFIG_DEBUG_MODULE_LOAD_INFO=y
+CONFIG_DEBUG_INFO=y
+CONFIG_PAGE_OWNER=y
+CONFIG_PAGE_OWNER_ENABLE_DEFAULT=y
+CONFIG_DEBUG_SECTION_MISMATCH=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_PAGEALLOC=y
+CONFIG_SLUB_DEBUG_PANIC_ON=y
+CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT=y
+CONFIG_PAGE_POISONING=y
+CONFIG_PAGE_POISONING_ENABLE_DEFAULT=y
+CONFIG_DEBUG_OBJECTS=y
+CONFIG_DEBUG_OBJECTS_FREE=y
+CONFIG_DEBUG_OBJECTS_TIMERS=y
+CONFIG_DEBUG_OBJECTS_WORK=y
+CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
+CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y
+CONFIG_SLUB_DEBUG_ON=y
+CONFIG_DEBUG_KMEMLEAK=y
+CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE=4000
+CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y
+CONFIG_DEBUG_STACK_USAGE=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_SOFTLOCKUP_DETECTOR=y
+CONFIG_WQ_WATCHDOG=y
+CONFIG_PANIC_TIMEOUT=5
+CONFIG_PANIC_ON_SCHED_BUG=y
+CONFIG_PANIC_ON_RT_THROTTLING=y
+CONFIG_SCHEDSTATS=y
+CONFIG_SCHED_STACK_END_CHECK=y
+# CONFIG_DEBUG_PREEMPT is not set
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_MUTEXES=y
+CONFIG_DEBUG_ATOMIC_SLEEP=y
+CONFIG_DEBUG_SG=y
+CONFIG_DEBUG_NOTIFIERS=y
+CONFIG_DEBUG_CREDENTIALS=y
+CONFIG_FAULT_INJECTION=y
+CONFIG_FAIL_PAGE_ALLOC=y
+CONFIG_FAULT_INJECTION_DEBUG_FS=y
+CONFIG_UFS_FAULT_INJECTION=y
+CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
+CONFIG_IPC_LOGGING=y
+CONFIG_QCOM_RTB=y
+CONFIG_QCOM_RTB_SEPARATE_CPUS=y
+CONFIG_FUNCTION_TRACER=y
+CONFIG_PREEMPTIRQ_EVENTS=y
+CONFIG_IRQSOFF_TRACER=y
+CONFIG_PREEMPT_TRACER=y
+CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_LKDTM=m
+CONFIG_MEMTEST=y
+CONFIG_BUG_ON_DATA_CORRUPTION=y
+CONFIG_PANIC_ON_DATA_CORRUPTION=y
+CONFIG_ARM64_STRICT_BREAK_BEFORE_MAKE=y
+CONFIG_CORESIGHT=y
+CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
+CONFIG_CORESIGHT_SOURCE_ETM4X=y
+CONFIG_CORESIGHT_STM=y
+CONFIG_CORESIGHT_CTI=y
+CONFIG_CORESIGHT_TPDA=y
+CONFIG_CORESIGHT_TPDM=y
+CONFIG_CORESIGHT_HWEVENT=y
+CONFIG_CORESIGHT_REMOTE_ETM=y
diff --git a/drivers/bus/mhi/core/mhi_main.c b/drivers/bus/mhi/core/mhi_main.c
index 9237651..f09c333 100644
--- a/drivers/bus/mhi/core/mhi_main.c
+++ b/drivers/bus/mhi/core/mhi_main.c
@@ -1322,7 +1322,7 @@
 		chan = MHI_TRE_GET_EV_CHID(local_rp);
 		if (chan >= mhi_cntrl->max_chan) {
 			MHI_ERR("invalid channel id %u\n", chan);
-			continue;
+			goto next_er_element;
 		}
 		mhi_chan = &mhi_cntrl->mhi_chan[chan];
 
@@ -1334,6 +1334,7 @@
 			event_quota--;
 		}
 
+next_er_element:
 		mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
 		local_rp = ev_ring->rp;
 		dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
@@ -1445,29 +1446,12 @@
 	struct mhi_link_info link_info, *cur_info = &mhi_cntrl->mhi_link_info;
 	int result, ret = 0;
 
-	if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state))) {
-		MHI_LOG("No EV access, PM_STATE:%s\n",
-			to_mhi_pm_state_str(mhi_cntrl->pm_state));
-		ret = -EIO;
-		goto exit_no_lock;
-	}
-
-	ret = __mhi_device_get_sync(mhi_cntrl);
-	if (ret)
-		goto exit_no_lock;
-
-	mutex_lock(&mhi_cntrl->pm_mutex);
-
 	spin_lock_bh(&mhi_event->lock);
 	dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
 
 	if (ev_ring->rp == dev_rp) {
 		spin_unlock_bh(&mhi_event->lock);
-		read_lock_bh(&mhi_cntrl->pm_lock);
-		mhi_cntrl->wake_put(mhi_cntrl, false);
-		read_unlock_bh(&mhi_cntrl->pm_lock);
-		MHI_VERB("no pending event found\n");
-		goto exit_bw_process;
+		goto exit_bw_scale_process;
 	}
 
 	/* if rp points to base, we need to wrap it around */
@@ -1475,6 +1459,13 @@
 		dev_rp = ev_ring->base + ev_ring->len;
 	dev_rp--;
 
+	/* fast forward to currently processed element and recycle er */
+	ev_ring->rp = dev_rp;
+	ev_ring->wp = dev_rp - 1;
+	if (ev_ring->wp < ev_ring->base)
+		ev_ring->wp = ev_ring->base + ev_ring->len - ev_ring->el_size;
+	mhi_recycle_fwd_ev_ring_element(mhi_cntrl, ev_ring);
+
 	MHI_ASSERT(MHI_TRE_GET_EV_TYPE(dev_rp) != MHI_PKT_TYPE_BW_REQ_EVENT,
 		   "!BW SCALE REQ event");
 
@@ -1487,19 +1478,22 @@
 		 link_info.target_link_speed,
 		 link_info.target_link_width);
 
-	/* fast forward to currently processed element and recycle er */
-	ev_ring->rp = dev_rp;
-	ev_ring->wp = dev_rp - 1;
-	if (ev_ring->wp < ev_ring->base)
-		ev_ring->wp = ev_ring->base + ev_ring->len - ev_ring->el_size;
-	mhi_recycle_fwd_ev_ring_element(mhi_cntrl, ev_ring);
-
 	read_lock_bh(&mhi_cntrl->pm_lock);
 	if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
 		mhi_ring_er_db(mhi_event);
 	read_unlock_bh(&mhi_cntrl->pm_lock);
 	spin_unlock_bh(&mhi_event->lock);
 
+	atomic_inc(&mhi_cntrl->pending_pkts);
+	ret = mhi_device_get_sync(mhi_cntrl->mhi_dev,
+				  MHI_VOTE_DEVICE | MHI_VOTE_BUS);
+	if (ret) {
+		atomic_dec(&mhi_cntrl->pending_pkts);
+		goto exit_bw_scale_process;
+	}
+
+	mutex_lock(&mhi_cntrl->pm_mutex);
+
 	ret = mhi_cntrl->bw_scale(mhi_cntrl, &link_info);
 	if (!ret)
 		*cur_info = link_info;
@@ -1509,17 +1503,17 @@
 	read_lock_bh(&mhi_cntrl->pm_lock);
 	if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
 		mhi_cntrl->write_reg(mhi_cntrl, mhi_cntrl->bw_scale_db, 0,
-			      MHI_BW_SCALE_RESULT(result,
-						  link_info.sequence_num));
-
-	mhi_cntrl->wake_put(mhi_cntrl, false);
+				     MHI_BW_SCALE_RESULT(result,
+				     link_info.sequence_num));
 	read_unlock_bh(&mhi_cntrl->pm_lock);
 
-exit_bw_process:
+	mhi_device_put(mhi_cntrl->mhi_dev, MHI_VOTE_DEVICE | MHI_VOTE_BUS);
+	atomic_dec(&mhi_cntrl->pending_pkts);
+
 	mutex_unlock(&mhi_cntrl->pm_mutex);
 
-exit_no_lock:
-	MHI_VERB("exit er_index:%u\n", mhi_event->er_index);
+exit_bw_scale_process:
+	MHI_VERB("exit er_index:%u ret:%d\n", mhi_event->er_index, ret);
 
 	return ret;
 }
diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c
index 6dd25c0..883c872 100644
--- a/drivers/char/adsprpc.c
+++ b/drivers/char/adsprpc.c
@@ -3213,7 +3213,7 @@
 		pr_err("adsprpc: ERROR: %s: user application %s trying to unmap without initialization\n",
 			 __func__, current->comm);
 		err = EBADR;
-		goto bail;
+		return err;
 	}
 	mutex_lock(&fl->internal_map_mutex);
 
@@ -3262,6 +3262,11 @@
 	return err;
 }
 
+/*
+ *	fastrpc_internal_munmap_fd can only be used for buffers
+ *	mapped with persist attributes. This can only be called
+ *	once for any persist buffer
+ */
 static int fastrpc_internal_munmap_fd(struct fastrpc_file *fl,
 				struct fastrpc_ioctl_munmap_fd *ud)
 {
@@ -3270,14 +3275,15 @@
 
 	VERIFY(err, (fl && ud));
 	if (err)
-		goto bail;
+		return err;
 	VERIFY(err, fl->dsp_proc_init == 1);
 	if (err) {
 		pr_err("adsprpc: ERROR: %s: user application %s trying to unmap without initialization\n",
 			__func__, current->comm);
 		err = EBADR;
-		goto bail;
+		return err;
 	}
+	mutex_lock(&fl->internal_map_mutex);
 	mutex_lock(&fl->map_mutex);
 	if (fastrpc_mmap_find(fl, ud->fd, ud->va, ud->len, 0, 0, &map)) {
 		pr_err("adsprpc: mapping not found to unmap fd 0x%x, va 0x%llx, len 0x%x\n",
@@ -3287,10 +3293,13 @@
 		mutex_unlock(&fl->map_mutex);
 		goto bail;
 	}
-	if (map)
+	if (map && (map->attr & FASTRPC_ATTR_KEEP_MAP)) {
+		map->attr = map->attr & (~FASTRPC_ATTR_KEEP_MAP);
 		fastrpc_mmap_free(map, 0);
+	}
 	mutex_unlock(&fl->map_mutex);
 bail:
+	mutex_unlock(&fl->internal_map_mutex);
 	return err;
 }
 
@@ -3309,7 +3318,7 @@
 		pr_err("adsprpc: ERROR: %s: user application %s trying to map without initialization\n",
 			__func__, current->comm);
 		err = EBADR;
-		goto bail;
+		return err;
 	}
 	mutex_lock(&fl->internal_map_mutex);
 	if ((ud->flags == ADSP_MMAP_ADD_PAGES) ||
@@ -4015,19 +4024,26 @@
 
 	fl->tgid = current->tgid;
 	snprintf(strpid, PID_SIZE, "%d", current->pid);
-	buf_size = strlen(current->comm) + strlen("_") + strlen(strpid) + 1;
-	fl->debug_buf = kzalloc(buf_size, GFP_KERNEL);
-	if (!fl->debug_buf) {
-		err = -ENOMEM;
-		return err;
-	}
-	snprintf(fl->debug_buf, UL_SIZE, "%.10s%s%d",
+	if (debugfs_root) {
+		buf_size = strlen(current->comm) + strlen("_")
+			+ strlen(strpid) + 1;
+		fl->debug_buf = kzalloc(buf_size, GFP_KERNEL);
+		if (!fl->debug_buf) {
+			err = -ENOMEM;
+			return err;
+		}
+		snprintf(fl->debug_buf, UL_SIZE, "%.10s%s%d",
 			current->comm, "_", current->pid);
-	fl->debugfs_file = debugfs_create_file(fl->debug_buf, 0644,
-					debugfs_root, fl, &debugfs_fops);
-	if (!fl->debugfs_file)
-		pr_warn("Error: %s: %s: failed to create debugfs file %s\n",
+		fl->debugfs_file = debugfs_create_file(fl->debug_buf, 0644,
+			debugfs_root, fl, &debugfs_fops);
+		if (IS_ERR_OR_NULL(fl->debugfs_file)) {
+			pr_warn("Error: %s: %s: failed to create debugfs file %s\n",
 				current->comm, __func__, fl->debug_buf);
+			fl->debugfs_file = NULL;
+			kfree(fl->debug_buf);
+			fl->debug_buf = NULL;
+		}
+	}
 	return err;
 }
 
@@ -4700,8 +4716,15 @@
 	}
 
 	chan->sesscount++;
-	debugfs_global_file = debugfs_create_file("global", 0644, debugfs_root,
-							NULL, &debugfs_fops);
+	if (debugfs_root) {
+		debugfs_global_file = debugfs_create_file("global", 0644,
+			debugfs_root, NULL, &debugfs_fops);
+		if (IS_ERR_OR_NULL(debugfs_global_file)) {
+			pr_warn("Error: %s: %s: failed to create debugfs global file\n",
+				current->comm, __func__);
+			debugfs_global_file = NULL;
+		}
+	}
 bail:
 	return err;
 }
@@ -5010,6 +5033,12 @@
 	int err = 0, i;
 
 	debugfs_root = debugfs_create_dir("adsprpc", NULL);
+	if (IS_ERR_OR_NULL(debugfs_root)) {
+		pr_warn("Error: %s: %s: failed to create debugfs root dir\n",
+			current->comm, __func__);
+		debugfs_remove_recursive(debugfs_root);
+		debugfs_root = NULL;
+	}
 	memset(me, 0, sizeof(*me));
 	fastrpc_init(me);
 	me->dev = NULL;
@@ -5018,7 +5047,7 @@
 	if (err)
 		goto register_bail;
 	VERIFY(err, 0 == alloc_chrdev_region(&me->dev_no, 0, NUM_CHANNELS,
-					DEVICE_NAME));
+		DEVICE_NAME));
 	if (err)
 		goto alloc_chrdev_bail;
 	cdev_init(&me->cdev, &fops);
diff --git a/drivers/char/diag/diag_dci.c b/drivers/char/diag/diag_dci.c
index 5108bca..4f38dba 100644
--- a/drivers/char/diag/diag_dci.c
+++ b/drivers/char/diag/diag_dci.c
@@ -1668,10 +1668,13 @@
 {
 	uint8_t retries = 0, max_retries = 50;
 	unsigned char *buf = NULL;
+	unsigned long flags;
 
 	do {
+		spin_lock_irqsave(&driver->dci_mempool_lock, flags);
 		buf = diagmem_alloc(driver, DIAG_MDM_BUF_SIZE,
 				    dci_ops_tbl[token].mempool);
+		spin_unlock_irqrestore(&driver->dci_mempool_lock, flags);
 		if (!buf) {
 			usleep_range(5000, 5100);
 			retries++;
@@ -1689,13 +1692,16 @@
 
 int diag_dci_write_done_bridge(int index, unsigned char *buf, int len)
 {
+	unsigned long flags;
 	int token = BRIDGE_TO_TOKEN(index);
 
 	if (!VALID_DCI_TOKEN(token)) {
 		pr_err("diag: Invalid DCI token %d in %s\n", token, __func__);
 		return -EINVAL;
 	}
+	spin_lock_irqsave(&driver->dci_mempool_lock, flags);
 	diagmem_free(driver, buf, dci_ops_tbl[token].mempool);
+	spin_unlock_irqrestore(&driver->dci_mempool_lock, flags);
 	return 0;
 }
 #endif
@@ -1709,6 +1715,7 @@
 	int dci_header_size = sizeof(struct diag_dci_header_t);
 	int ret = DIAG_DCI_NO_ERROR;
 	uint32_t write_len = 0;
+	unsigned long flags;
 
 	if (!data)
 		return -EIO;
@@ -1742,7 +1749,9 @@
 	if (ret) {
 		pr_err("diag: error writing dci pkt to remote proc, token: %d, err: %d\n",
 			token, ret);
+		spin_lock_irqsave(&driver->dci_mempool_lock, flags);
 		diagmem_free(driver, buf, dci_ops_tbl[token].mempool);
+		spin_unlock_irqrestore(&driver->dci_mempool_lock, flags);
 	} else {
 		ret = DIAG_DCI_NO_ERROR;
 	}
@@ -1766,6 +1775,7 @@
 	struct diag_ctrl_dci_handshake_pkt ctrl_pkt;
 	unsigned char *buf = NULL;
 	struct diag_dci_header_t dci_header;
+	unsigned long flags;
 
 	if (!VALID_DCI_TOKEN(token)) {
 		pr_err("diag: In %s, invalid DCI token %d\n", __func__, token);
@@ -1805,7 +1815,9 @@
 	if (err) {
 		pr_err("diag: error writing ack packet to remote proc, token: %d, err: %d\n",
 		       token, err);
+		spin_lock_irqsave(&driver->dci_mempool_lock, flags);
 		diagmem_free(driver, buf, dci_ops_tbl[token].mempool);
+		spin_unlock_irqrestore(&driver->dci_mempool_lock, flags);
 		return err;
 	}
 
@@ -2469,6 +2481,7 @@
 	int i, ret = DIAG_DCI_NO_ERROR, err = DIAG_DCI_NO_ERROR;
 	unsigned char *event_mask_ptr = NULL;
 	uint32_t write_len = 0;
+	unsigned long flags;
 
 	mutex_lock(&dci_event_mask_mutex);
 	event_mask_ptr = dci_ops_tbl[token].event_mask_composite;
@@ -2514,7 +2527,9 @@
 	if (err) {
 		pr_err("diag: error writing event mask to remote proc, token: %d, err: %d\n",
 		       token, err);
+		spin_lock_irqsave(&driver->dci_mempool_lock, flags);
 		diagmem_free(driver, buf, dci_ops_tbl[token].mempool);
+		spin_unlock_irqrestore(&driver->dci_mempool_lock, flags);
 		ret = err;
 	} else {
 		ret = DIAG_DCI_NO_ERROR;
@@ -2671,6 +2686,7 @@
 	int i, ret = DIAG_DCI_NO_ERROR, err = DIAG_DCI_NO_ERROR;
 	int updated;
 	uint32_t write_len = 0;
+	unsigned long flags;
 
 	mutex_lock(&dci_log_mask_mutex);
 	log_mask_ptr = dci_ops_tbl[token].log_mask_composite;
@@ -2710,7 +2726,10 @@
 		if (err) {
 			pr_err("diag: error writing log mask to remote processor, equip_id: %d, token: %d, err: %d\n",
 			       i, token, err);
+			spin_lock_irqsave(&driver->dci_mempool_lock, flags);
 			diagmem_free(driver, buf, dci_ops_tbl[token].mempool);
+			spin_unlock_irqrestore(&driver->dci_mempool_lock,
+				flags);
 			updated = 0;
 		}
 		if (updated)
@@ -2850,6 +2869,7 @@
 	mutex_init(&dci_log_mask_mutex);
 	mutex_init(&dci_event_mask_mutex);
 	spin_lock_init(&ws_lock);
+	spin_lock_init(&driver->dci_mempool_lock);
 
 	ret = diag_dci_init_ops_tbl();
 	if (ret)
diff --git a/drivers/char/diag/diagchar.h b/drivers/char/diag/diagchar.h
index 0516429..880cbeb 100644
--- a/drivers/char/diag/diagchar.h
+++ b/drivers/char/diag/diagchar.h
@@ -797,6 +797,7 @@
 	struct mutex diag_id_mutex;
 	struct mutex diagid_v2_mutex;
 	struct mutex cmd_reg_mutex;
+	spinlock_t dci_mempool_lock;
 	uint32_t cmd_reg_count;
 	struct mutex diagfwd_channel_mutex[NUM_PERIPHERALS];
 	/* Sizes that reflect memory pool sizes */
diff --git a/drivers/char/diag/diagfwd.c b/drivers/char/diag/diagfwd.c
index a337b63..8e9ee2c 100644
--- a/drivers/char/diag/diagfwd.c
+++ b/drivers/char/diag/diagfwd.c
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (c) 2008-2019, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2008-2020, The Linux Foundation. All rights reserved.
  */
 #include <linux/slab.h>
 #include <linux/init.h>
@@ -1378,8 +1378,8 @@
 			}
 		}
 		mutex_unlock(&driver->md_session_lock);
-		diag_update_md_clients(HDLC_SUPPORT_TYPE);
 		mutex_unlock(&driver->hdlc_disable_mutex);
+		diag_update_md_clients(HDLC_SUPPORT_TYPE);
 		return 0;
 	}
 #endif
diff --git a/drivers/char/hw_random/msm_rng.c b/drivers/char/hw_random/msm_rng.c
index 4479b1d..541fa71 100644
--- a/drivers/char/hw_random/msm_rng.c
+++ b/drivers/char/hw_random/msm_rng.c
@@ -285,6 +285,10 @@
 					"qcom,msm-rng-iface-clk")) {
 				msm_rng_dev->prng_clk = clk_get(&pdev->dev,
 							"iface_clk");
+			} else if (of_property_read_bool(pdev->dev.of_node,
+					"qcom,msm-rng-hwkm-clk")) {
+				msm_rng_dev->prng_clk = clk_get(&pdev->dev,
+							 "km_clk_src");
 			} else {
 				msm_rng_dev->prng_clk = clk_get(&pdev->dev,
 							 "core_clk");
diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
index 8c1fd83..b5481ab 100644
--- a/drivers/cpufreq/qcom-cpufreq-hw.c
+++ b/drivers/cpufreq/qcom-cpufreq-hw.c
@@ -413,8 +413,10 @@
 		dev_dbg(dev, "index=%d freq=%d, core_count %d\n",
 			i, c->table[i].frequency, core_count);
 
-		if (core_count != c->max_cores)
+		if (core_count != c->max_cores) {
 			cur_freq = CPUFREQ_ENTRY_INVALID;
+			c->table[i].flags = CPUFREQ_BOOST_FREQ;
+		}
 
 		/*
 		 * Two of the same frequencies with the same core counts means
@@ -431,12 +433,12 @@
 		prev_cc = core_count;
 		prev_freq = cur_freq;
 
-		cur_freq *= 1000;
 		for_each_cpu(cpu, &c->related_cpus) {
 			cpu_dev = get_cpu_device(cpu);
 			if (!cpu_dev)
 				continue;
-			dev_pm_opp_add(cpu_dev, cur_freq, volt);
+			dev_pm_opp_add(cpu_dev, c->table[i].frequency * 1000,
+							volt);
 		}
 	}
 
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index c23396f..bc0c4cf 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -33,6 +33,7 @@
 obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/
 obj-$(CONFIG_CRYPTO_DEV_QAT) += qat/
 obj-$(CONFIG_CRYPTO_DEV_QCE) += qce/
+obj-$(CONFIG_CRYPTO_DEV_QCOM_MSM_QCE) += msm/
 obj-$(CONFIG_CRYPTO_DEV_QCOM_RNG) += qcom-rng.o
 obj-$(CONFIG_CRYPTO_DEV_ROCKCHIP) += rockchip/
 obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o
diff --git a/drivers/crypto/msm/qcedev.c b/drivers/crypto/msm/qcedev.c
index 812ba67..9f19508 100644
--- a/drivers/crypto/msm/qcedev.c
+++ b/drivers/crypto/msm/qcedev.c
@@ -2308,7 +2308,7 @@
 
 	_debug_dent = debugfs_create_dir("qcedev", NULL);
 	if (IS_ERR(_debug_dent)) {
-		pr_err("qcedev debugfs_create_dir fail, error %ld\n",
+		pr_debug("qcedev debugfs_create_dir fail, error %ld\n",
 				PTR_ERR(_debug_dent));
 		return PTR_ERR(_debug_dent);
 	}
@@ -2318,7 +2318,7 @@
 	dent = debugfs_create_file(name, 0644, _debug_dent,
 			&_debug_qcedev, &_debug_stats_ops);
 	if (dent == NULL) {
-		pr_err("qcedev debugfs_create_file fail, error %ld\n",
+		pr_debug("qcedev debugfs_create_file fail, error %ld\n",
 				PTR_ERR(dent));
 		rc = PTR_ERR(dent);
 		goto err;
diff --git a/drivers/crypto/msm/qcrypto.c b/drivers/crypto/msm/qcrypto.c
index 6a8e0d2..9578c3a 100644
--- a/drivers/crypto/msm/qcrypto.c
+++ b/drivers/crypto/msm/qcrypto.c
@@ -5523,7 +5523,7 @@
 
 	_debug_dent = debugfs_create_dir("qcrypto", NULL);
 	if (IS_ERR(_debug_dent)) {
-		pr_err("qcrypto debugfs_create_dir fail, error %ld\n",
+		pr_debug("qcrypto debugfs_create_dir fail, error %ld\n",
 				PTR_ERR(_debug_dent));
 		return PTR_ERR(_debug_dent);
 	}
@@ -5533,7 +5533,7 @@
 	dent = debugfs_create_file(name, 0644, _debug_dent,
 				&_debug_qcrypto, &_debug_stats_ops);
 	if (dent == NULL) {
-		pr_err("qcrypto debugfs_create_file fail, error %ld\n",
+		pr_debug("qcrypto debugfs_create_file fail, error %ld\n",
 				PTR_ERR(dent));
 		rc = PTR_ERR(dent);
 		goto err;
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 65f5613..7221983 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -623,6 +623,7 @@
 	dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll;
 	dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0;
 	dmabuf->buf_name = bufname;
+	dmabuf->name = bufname;
 	dmabuf->ktime = ktime_get();
 	atomic_set(&dmabuf->dent_count, 1);
 
diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c
index 9be7cc4..06125b1 100644
--- a/drivers/extcon/extcon.c
+++ b/drivers/extcon/extcon.c
@@ -881,6 +881,17 @@
 }
 EXPORT_SYMBOL_GPL(extcon_set_property_capability);
 
+int extcon_set_mutually_exclusive(struct extcon_dev *edev,
+				const u32 *exclusive)
+{
+	if (!edev)
+		return -EINVAL;
+
+	edev->mutually_exclusive = exclusive;
+	return 0;
+}
+EXPORT_SYMBOL(extcon_set_mutually_exclusive);
+
 /**
  * extcon_get_extcon_dev() - Get the extcon device instance from the name.
  * @extcon_name:	the extcon name provided with extcon_dev_register()
diff --git a/drivers/gpu/drm/bridge/lt9611uxc.c b/drivers/gpu/drm/bridge/lt9611uxc.c
index e37e770..3a40d05 100644
--- a/drivers/gpu/drm/bridge/lt9611uxc.c
+++ b/drivers/gpu/drm/bridge/lt9611uxc.c
@@ -893,7 +893,7 @@
 		gpio_set_value(pdata->reset_gpio, 0);
 		msleep(20);
 		gpio_set_value(pdata->reset_gpio, 1);
-		msleep(300);
+		msleep(180);
 	} else {
 		gpio_set_value(pdata->reset_gpio, 0);
 	}
diff --git a/drivers/gpu/msm/adreno_a6xx.c b/drivers/gpu/msm/adreno_a6xx.c
index bbf1059..b3cd958 100644
--- a/drivers/gpu/msm/adreno_a6xx.c
+++ b/drivers/gpu/msm/adreno_a6xx.c
@@ -2645,7 +2645,7 @@
 static void a6xx_clk_set_options(struct adreno_device *adreno_dev,
 	const char *name, struct clk *clk, bool on)
 {
-	if (!adreno_is_a610(adreno_dev))
+	if (!adreno_is_a610(adreno_dev) && !adreno_is_a702(adreno_dev))
 		return;
 
 	/* Handle clock settings for GFX PSCBCs */
diff --git a/drivers/gpu/msm/adreno_a6xx_snapshot.c b/drivers/gpu/msm/adreno_a6xx_snapshot.c
index ef4c8f2..ba1520f 100644
--- a/drivers/gpu/msm/adreno_a6xx_snapshot.c
+++ b/drivers/gpu/msm/adreno_a6xx_snapshot.c
@@ -346,11 +346,11 @@
 	/* CP */
 	0x0800, 0x0803, 0x0806, 0x0808, 0x0810, 0x0813, 0x0820, 0x0821,
 	0x0823, 0x0824, 0x0826, 0x0827, 0x0830, 0x0833, 0x0840, 0x0845,
-	0x084F, 0x086F, 0x0880, 0x088A, 0x08A0, 0x08AB, 0x08C0, 0x08C4,
-	0x08D0, 0x08DD, 0x08F0, 0x08F3, 0x0900, 0x0903, 0x0908, 0x0911,
-	0x0928, 0x093E, 0x0942, 0x094D, 0x0980, 0x0984, 0x098D, 0x0996,
-	0x0998, 0x099E, 0x09A0, 0x09A6, 0x09A8, 0x09AE, 0x09B0, 0x09B1,
-	0x09C2, 0x09C8, 0x0A00, 0x0A03,
+	0x084F, 0x088A, 0x08A0, 0x08AB, 0x08C0, 0x08C4, 0x08D0, 0x08DD,
+	0x08F0, 0x08F3, 0x0900, 0x0903, 0x0908, 0x0911, 0x0928, 0x093E,
+	0x0942, 0x094D, 0x0980, 0x0984, 0x098D, 0x0996, 0x0998, 0x099E,
+	0x09A0, 0x09A6, 0x09A8, 0x09AE, 0x09B0, 0x09B4, 0x09C2, 0x09C9,
+	0x0A00, 0x0A04,
 	/* VSC */
 	0x0C00, 0x0C04, 0x0C06, 0x0C06, 0x0C10, 0x0CD9, 0x0E00, 0x0E0E,
 	/* UCHE */
@@ -404,6 +404,7 @@
 	A6XX_DBGBUS_RAS          = 0xc,
 	A6XX_DBGBUS_VSC          = 0xd,
 	A6XX_DBGBUS_COM          = 0xe,
+	A6XX_DBGBUS_COM_1        = 0xf,
 	A6XX_DBGBUS_LRZ          = 0x10,
 	A6XX_DBGBUS_A2D          = 0x11,
 	A6XX_DBGBUS_CCUFCHE      = 0x12,
@@ -515,6 +516,11 @@
 	{ A6XX_DBGBUS_SPTP_5, 0x100, },
 };
 
+static const struct adreno_debugbus_block a702_dbgc_debugbus_blocks[] = {
+	{ A6XX_DBGBUS_COM_1, 0x100, },
+	{ A6XX_DBGBUS_SPTP_0, 0x100, },
+};
+
 #define A6XX_NUM_SHADER_BANKS 3
 #define A6XX_SHADER_STATETYPE_SHIFT 8
 
@@ -1528,6 +1534,15 @@
 		}
 	}
 
+	if (adreno_is_a702(adreno_dev)) {
+		for (i = 0; i < ARRAY_SIZE(a702_dbgc_debugbus_blocks); i++) {
+			kgsl_snapshot_add_section(device,
+				KGSL_SNAPSHOT_SECTION_DEBUGBUS,
+				snapshot, a6xx_snapshot_dbgc_debugbus_block,
+				(void *) &a702_dbgc_debugbus_blocks[i]);
+		}
+	}
+
 	/*
 	 * GBIF has same debugbus as of other GPU blocks hence fall back to
 	 * default path if GPU uses GBIF.
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index 606df36..ca22326 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -3284,6 +3284,41 @@
 EXPORT_SYMBOL(kgsl_pwr_limits_set_freq);
 
 /**
+ * kgsl_pwr_limits_set_gpu_fmax() - Set the requested limit for the
+ * client, if requested freq value is larger than fmax supported
+ * function returns with success.
+ * @limit_ptr: Client handle
+ * @freq: Client requested frequency
+ *
+ * Set the new limit for the client and adjust the clocks
+ */
+int kgsl_pwr_limits_set_gpu_fmax(void *limit_ptr, unsigned int freq)
+{
+	struct kgsl_pwrctrl *pwr;
+	struct kgsl_pwr_limit *limit = limit_ptr;
+	int level;
+
+	if (IS_ERR_OR_NULL(limit))
+		return -EINVAL;
+
+	pwr = &limit->device->pwrctrl;
+
+	/*
+	 * When requested frequency is greater than fmax,
+	 * requested limit is implicit, return success here.
+	 */
+	if (freq >= pwr->pwrlevels[0].gpu_freq)
+		return 0;
+
+	level = _get_nearest_pwrlevel(pwr, freq);
+	if (level < 0)
+		return -EINVAL;
+	_update_limits(limit, KGSL_PWR_SET_LIMIT, level);
+	return 0;
+}
+EXPORT_SYMBOL(kgsl_pwr_limits_set_gpu_fmax);
+
+/**
  * kgsl_pwr_limits_set_default() - Set the default thermal limit for the client
  * @limit_ptr: Client handle
  *
diff --git a/drivers/gpu/msm/kgsl_sharedmem.c b/drivers/gpu/msm/kgsl_sharedmem.c
index 6079b70..2634bf6 100644
--- a/drivers/gpu/msm/kgsl_sharedmem.c
+++ b/drivers/gpu/msm/kgsl_sharedmem.c
@@ -9,6 +9,7 @@
 #include <soc/qcom/scm.h>
 #include <soc/qcom/secure_buffer.h>
 #include <linux/shmem_fs.h>
+#include <linux/bitfield.h>
 
 #include "kgsl_device.h"
 #include "kgsl_sharedmem.h"
@@ -847,6 +848,7 @@
 {
 	struct kgsl_mmu *mmu = &device->mmu;
 	unsigned int align;
+	u32 cachemode;
 
 	memset(memdesc, 0, sizeof(*memdesc));
 	/* Turn off SVM if the system doesn't support it */
@@ -861,6 +863,17 @@
 	if (!MMU_FEATURE(mmu, KGSL_MMU_IO_COHERENT))
 		flags &= ~((uint64_t) KGSL_MEMFLAGS_IOCOHERENT);
 
+	/*
+	 * We can't enable I/O coherency on uncached surfaces because of
+	 * situations where hardware might snoop the cpu caches which can
+	 * have stale data. This happens primarily due to the limitations
+	 * of dma caching APIs available on arm64
+	 */
+	cachemode = FIELD_GET(KGSL_CACHEMODE_MASK, flags);
+	if ((cachemode == KGSL_CACHEMODE_WRITECOMBINE ||
+		cachemode == KGSL_CACHEMODE_UNCACHED))
+		flags &= ~((u64) KGSL_MEMFLAGS_IOCOHERENT);
+
 	if (MMU_FEATURE(mmu, KGSL_MMU_NEED_GUARD_PAGE))
 		memdesc->priv |= KGSL_MEMDESC_GUARD_PAGE;
 
diff --git a/drivers/hwtracing/coresight/coresight-dynamic-replicator.c b/drivers/hwtracing/coresight/coresight-dynamic-replicator.c
index 8be9ed1..1d9f40f 100644
--- a/drivers/hwtracing/coresight/coresight-dynamic-replicator.c
+++ b/drivers/hwtracing/coresight/coresight-dynamic-replicator.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- * Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2015, 2020 The Linux Foundation. All rights reserved.
  */
 
 #include <linux/amba/bus.h>
@@ -84,6 +84,18 @@
 	return rc;
 }
 
+static bool is_replicator_disabled(struct coresight_device *csdev)
+{
+	int i;
+
+	for (i = 0; i < csdev->nr_outport; i++) {
+		if (atomic_read(&csdev->refcnt[i]) > 0)
+			return false;
+	}
+
+	return true;
+}
+
 static int replicator_enable(struct coresight_device *csdev, int inport,
 			     int outport)
 {
@@ -93,6 +105,10 @@
 	bool first_enable = false;
 
 	spin_lock_irqsave(&drvdata->spinlock, flags);
+
+	if (is_replicator_disabled(csdev))
+		replicator_reset(drvdata);
+
 	if (atomic_read(&csdev->refcnt[outport]) == 0) {
 		rc = dynamic_replicator_enable(drvdata, inport, outport);
 		if (!rc)
diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
index d8e69f8..5e98878 100644
--- a/drivers/hwtracing/coresight/coresight.c
+++ b/drivers/hwtracing/coresight/coresight.c
@@ -1570,6 +1570,8 @@
 		}
 
 		for (i = 0; i < csdev->nr_outport; i++) {
+			if (desc->pdata->child_names[i] == NULL)
+				continue;
 			conns[i].outport = desc->pdata->outports[i];
 			conns[i].child_name = desc->pdata->child_names[i];
 			conns[i].child_port = desc->pdata->child_ports[i];
diff --git a/drivers/hwtracing/coresight/of_coresight.c b/drivers/hwtracing/coresight/of_coresight.c
index 32ac255..36c62dc 100644
--- a/drivers/hwtracing/coresight/of_coresight.c
+++ b/drivers/hwtracing/coresight/of_coresight.c
@@ -50,16 +50,22 @@
 {
 	struct device_node *ep = NULL;
 	int in = 0, out = 0;
+	struct of_endpoint endpoint;
 
 	do {
 		ep = of_graph_get_next_endpoint(node, ep);
 		if (!ep)
 			break;
 
+		if (of_graph_parse_endpoint(ep, &endpoint))
+			continue;
+
 		if (of_property_read_bool(ep, "slave-mode"))
-			in++;
+			in = (endpoint.port + 1 > in) ?
+				endpoint.port + 1 : in;
 		else
-			out++;
+			out = (endpoint.port + 1) > out ?
+				endpoint.port + 1 : out;
 
 	} while (ep);
 
diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
index 06ca3f7..053a18c 100644
--- a/drivers/iio/inkern.c
+++ b/drivers/iio/inkern.c
@@ -934,3 +934,21 @@
 			       chan->channel, buf, len);
 }
 EXPORT_SYMBOL_GPL(iio_write_channel_ext_info);
+
+int iio_write_channel_processed(struct iio_channel *chan, int val)
+{
+	int ret;
+
+	mutex_lock(&chan->indio_dev->info_exist_lock);
+	if (chan->indio_dev->info == NULL) {
+		ret = -ENODEV;
+		goto err_unlock;
+	}
+
+	ret = iio_channel_write(chan, val, 0, IIO_CHAN_INFO_PROCESSED);
+err_unlock:
+	mutex_unlock(&chan->indio_dev->info_exist_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL(iio_write_channel_processed);
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index e3a9948..3a01526 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -88,5 +88,5 @@
 obj-$(CONFIG_GOLDFISH_PIC) 		+= irq-goldfish-pic.o
 obj-$(CONFIG_NDS32)			+= irq-ativic32.o
 obj-$(CONFIG_QCOM_PDC)			+= qcom-pdc.o
-obj-$(CONFIG_QCOM_MPM)			+= qcom-mpm.o qcom-mpm-bengal.o qcom-mpm-scuba.o
+obj-$(CONFIG_QCOM_MPM)			+= qcom-mpm.o qcom-mpm-bengal.o qcom-mpm-scuba.o qcom-mpm-sdm660.o
 obj-$(CONFIG_SIFIVE_PLIC)		+= irq-sifive-plic.o
diff --git a/drivers/irqchip/qcom-mpm-sdm660.c b/drivers/irqchip/qcom-mpm-sdm660.c
new file mode 100644
index 0000000..2e482e0
--- /dev/null
+++ b/drivers/irqchip/qcom-mpm-sdm660.c
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <soc/qcom/mpm.h>
+
+const struct mpm_pin mpm_sdm660_gic_chip_data[] = {
+	{2, 216}, /* tsens1_tsens_upper_lower_int */
+	{52, 275}, /* qmp_usb3_lfps_rxterm_irq_cx */
+	{61, 209}, /* lpi_dir_conn_irq_apps[1] */
+	{79, 379}, /* qusb2phy_intr for Dm */
+	{80, 380}, /* qusb2phy_intr for Dm for secondary PHY */
+	{81, 379}, /* qusb2phy_intr for Dp */
+	{82, 380}, /* qusb2phy_intr for Dp for secondary PHY */
+	{87, 358}, /* ee0_apps_hlos_spmi_periph_irq */
+	{91, 519}, /* lpass_pmu_tmr_timeout_irq_cx */
+	{-1},
+};
diff --git a/drivers/irqchip/qcom-mpm.c b/drivers/irqchip/qcom-mpm.c
index f7f4864..ab8a3b3 100644
--- a/drivers/irqchip/qcom-mpm.c
+++ b/drivers/irqchip/qcom-mpm.c
@@ -592,6 +592,10 @@
 		.compatible = "qcom,mpm-gic-scuba",
 		.data = mpm_scuba_gic_chip_data,
 	},
+	{
+		.compatible = "qcom,mpm-gic-sdm660",
+		.data = mpm_sdm660_gic_chip_data,
+	},
 	{}
 };
 MODULE_DEVICE_TABLE(of, mpm_gic_chip_data_table);
diff --git a/drivers/leds/leds-qti-flash.c b/drivers/leds/leds-qti-flash.c
index 0e02e78..b0ca27b 100644
--- a/drivers/leds/leds-qti-flash.c
+++ b/drivers/leds/leds-qti-flash.c
@@ -277,7 +277,7 @@
 
 	for (i = 0; i < 60; i++) {
 		/* wait for the flash vreg_ok to be set */
-		usleep_range(5000, 5500);
+		mdelay(5);
 
 		rc = power_supply_get_property(led->main_psy,
 					POWER_SUPPLY_PROP_FLASH_TRIGGER, &pval);
diff --git a/drivers/mailbox/qcom-apcs-ipc-mailbox.c b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
index 1854e9b..ab4a6af 100644
--- a/drivers/mailbox/qcom-apcs-ipc-mailbox.c
+++ b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
@@ -138,6 +138,7 @@
 	{ .compatible = "qcom,kona-spcs-global", .data = (void *)0 },
 	{ .compatible = "qcom,bengal-apcs-hmss-global", .data = (void *)8 },
 	{ .compatible = "qcom,scuba-apcs-hmss-global", .data = (void *)8 },
+	{ .compatible = "qcom,sdm660-apcs-hmss-global", .data = (void *)8 },
 	{}
 };
 MODULE_DEVICE_TABLE(of, qcom_apcs_ipc_of_match);
diff --git a/drivers/md/dm-default-key.c b/drivers/md/dm-default-key.c
index 19be201..ea29ebc 100644
--- a/drivers/md/dm-default-key.c
+++ b/drivers/md/dm-default-key.c
@@ -133,9 +133,11 @@
 	return 0;
 }
 
-void default_key_adjust_sector_size_and_iv(char **argv, struct dm_target *ti,
-					   struct default_key_c **dkc, u8 *raw,
-					   u32 size)
+static void default_key_adjust_sector_size_and_iv(char **argv,
+						  struct dm_target *ti,
+						  struct default_key_c **dkc,
+						  u8 *raw, u32 size,
+						  bool is_legacy)
 {
 	struct dm_dev *dev;
 	int i;
@@ -146,7 +148,7 @@
 
 	dev = (*dkc)->dev;
 
-	if (!strcmp(argv[0], "AES-256-XTS")) {
+	if (is_legacy) {
 		memcpy(key_new.bytes, raw, size);
 
 		for (i = 0; i < ARRAY_SIZE(key_new.words); i++)
@@ -179,6 +181,24 @@
 	unsigned long long tmpll;
 	char dummy;
 	int err;
+	char *_argv[10];
+	bool is_legacy = false;
+
+	if (argc >= 4 && !strcmp(argv[0], "AES-256-XTS")) {
+		argc = 0;
+		_argv[argc++] = "aes-xts-plain64";
+		_argv[argc++] = argv[1];
+		_argv[argc++] = "0";
+		_argv[argc++] = argv[2];
+		_argv[argc++] = argv[3];
+		_argv[argc++] = "3";
+		_argv[argc++] = "allow_discards";
+		_argv[argc++] = "sector_size:4096";
+		_argv[argc++] = "iv_large_sectors";
+		_argv[argc] = NULL;
+		argv = _argv;
+		is_legacy = true;
+	}
 
 	if (argc < 5) {
 		ti->error = "Not enough arguments";
@@ -254,7 +274,7 @@
 	}
 
 	default_key_adjust_sector_size_and_iv(argv, ti, &dkc, raw_key,
-					      raw_key_size);
+					      raw_key_size, is_legacy);
 
 	dkc->sector_bits = ilog2(dkc->sector_size);
 	if (ti->len & ((dkc->sector_size >> SECTOR_SHIFT) - 1)) {
diff --git a/drivers/media/platform/msm/npu/npu_host_ipc.c b/drivers/media/platform/msm/npu/npu_host_ipc.c
index 0d2816f..5ea693b 100644
--- a/drivers/media/platform/msm/npu/npu_host_ipc.c
+++ b/drivers/media/platform/msm/npu/npu_host_ipc.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
  */
 
 /* -------------------------------------------------------------------------
@@ -367,8 +367,6 @@
 	/* Update qhdr_write_idx */
 	queue.qhdr_write_idx = new_write_idx;
 
-	*is_rx_req_set = (queue.qhdr_rx_req == 1) ? 1 : 0;
-
 	/* Update Write pointer -- queue.qhdr_write_idx */
 exit:
 	/* Update TX request -- queue.qhdr_tx_req */
@@ -379,6 +377,13 @@
 		(size_t)&(queue.qhdr_write_idx) - (size_t)&queue))),
 		&queue.qhdr_write_idx, sizeof(queue.qhdr_write_idx));
 
+	/* check if irq is required after write_idx is updated */
+	MEMR(npu_dev, (void *)((size_t)(offset + (uint32_t)(
+		(size_t)&(queue.qhdr_rx_req) - (size_t)&queue))),
+		(uint8_t *)&queue.qhdr_rx_req,
+		sizeof(queue.qhdr_rx_req));
+	*is_rx_req_set = (queue.qhdr_rx_req == 1) ? 1 : 0;
+
 	return status;
 }
 
diff --git a/drivers/media/radio/rtc6226/radio-rtc6226-common.c b/drivers/media/radio/rtc6226/radio-rtc6226-common.c
index dfc8096..2e50c4f 100644
--- a/drivers/media/radio/rtc6226/radio-rtc6226-common.c
+++ b/drivers/media/radio/rtc6226/radio-rtc6226-common.c
@@ -385,8 +385,30 @@
 			next_freq_khz, radio->registers[RSSI] & RSSI_RSSI);
 
 		if (radio->registers[STATUS] & STATUS_SF) {
-			FMDERR("%s band limit reached. Seek one more.\n",
+			FMDERR("%s Seek one more time if lower freq is valid\n",
 					__func__);
+			retval = rtc6226_set_seek(radio, SRCH_UP, WRAP_ENABLE);
+			if (retval < 0) {
+				FMDERR("%s seek fail %d\n", __func__, retval);
+				goto seek_tune_fail;
+			}
+			if (!wait_for_completion_timeout(&radio->completion,
+					msecs_to_jiffies(WAIT_TIMEOUT_MSEC))) {
+				FMDERR("timeout didn't receive STC for seek\n");
+			} else {
+				FMDERR("%s: received STC for seek\n", __func__);
+				retval = rtc6226_get_freq(radio,
+						&next_freq_khz);
+				if (retval < 0) {
+					FMDERR("%s getFreq failed\n", __func__);
+					goto seek_tune_fail;
+				}
+				if ((radio->recv_conf.band_low_limit *
+						TUNE_STEP_SIZE) ==
+							next_freq_khz)
+					rtc6226_q_event(radio,
+						RTC6226_EVT_TUNE_SUCC);
+			}
 			break;
 		}
 		if (radio->g_search_mode == SCAN)
@@ -438,13 +460,12 @@
 		if (!wait_for_completion_timeout(&radio->completion,
 			msecs_to_jiffies(WAIT_TIMEOUT_MSEC)))
 			FMDERR("%s: didn't receive STD for tune\n", __func__);
-		else {
+		else
 			FMDERR("%s: received STD for tune\n", __func__);
-			rtc6226_q_event(radio, RTC6226_EVT_TUNE_SUCC);
-		}
 	}
 seek_cancelled:
 	rtc6226_q_event(radio, RTC6226_EVT_SEEK_COMPLETE);
+	rtc6226_q_event(radio, RTC6226_EVT_TUNE_SUCC);
 	radio->seek_tune_status = NO_SEEK_TUNE_PENDING;
 	FMDERR("%s seek cancelled %d\n", __func__, retval);
 	return;
diff --git a/drivers/media/radio/rtc6226/radio-rtc6226-i2c.c b/drivers/media/radio/rtc6226/radio-rtc6226-i2c.c
index f4e62c1..4ea5011 100644
--- a/drivers/media/radio/rtc6226/radio-rtc6226-i2c.c
+++ b/drivers/media/radio/rtc6226/radio-rtc6226-i2c.c
@@ -523,13 +523,9 @@
 int rtc6226_fops_open(struct file *file)
 {
 	struct rtc6226_device *radio = video_drvdata(file);
-	int retval = v4l2_fh_open(file);
+	int retval;
 
 	FMDBG("%s enter user num = %d\n", __func__, radio->users);
-	if (retval) {
-		FMDERR("%s fail to open v4l2\n", __func__);
-		return retval;
-	}
 	if (atomic_inc_return(&radio->users) != 1) {
 		FMDERR("Device already in use. Try again later\n");
 		atomic_dec(&radio->users);
@@ -560,7 +556,6 @@
 	rtc6226_fm_power_cfg(radio, TURNING_OFF);
 open_err_setup:
 	atomic_dec(&radio->users);
-	v4l2_fh_release(file);
 	return retval;
 }
 
@@ -573,18 +568,16 @@
 	int retval = 0;
 
 	FMDBG("%s : Exit\n", __func__);
-	if (v4l2_fh_is_singular_file(file)) {
-		if (radio->mode != FM_OFF) {
-			rtc6226_power_down(radio);
-			radio->mode = FM_OFF;
-		}
+	if (radio->mode != FM_OFF) {
+		rtc6226_power_down(radio);
+		radio->mode = FM_OFF;
 	}
 	rtc6226_disable_irq(radio);
 	atomic_dec(&radio->users);
 	retval = rtc6226_fm_power_cfg(radio, TURNING_OFF);
 	if (retval < 0)
 		FMDERR("%s: failed to apply voltage\n", __func__);
-	return v4l2_fh_release(file);
+	return retval;
 }
 
 static int rtc6226_parse_dt(struct device *dev,
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index 1ca6820..81c7405 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -2616,6 +2616,11 @@
 		case QSEOS_RESULT_SUCCESS:
 		case QSEOS_RESULT_INCOMPLETE:
 			break;
+		case QSEOS_RESULT_CBACK_REQUEST:
+			pr_warn("get cback req app_id = %d, resp->data = %d\n",
+				data->client.app_id, resp->data);
+			resp->resp_type = SMCINVOKE_RESULT_INBOUND_REQ_NEEDED;
+			break;
 		default:
 			pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
 				resp->result, data->client.app_id, lstnr);
@@ -9291,6 +9296,15 @@
 		goto exit_del_cdev;
 	}
 
+	if (!qseecom.dev->dma_parms) {
+		qseecom.dev->dma_parms =
+			kzalloc(sizeof(*qseecom.dev->dma_parms), GFP_KERNEL);
+		if (!qseecom.dev->dma_parms) {
+			rc = -ENOMEM;
+			goto exit_del_cdev;
+		}
+	}
+	dma_set_max_seg_size(qseecom.dev, DMA_BIT_MASK(32));
 	return 0;
 
 exit_del_cdev:
@@ -9307,6 +9321,8 @@
 
 static void qseecom_deinit_dev(void)
 {
+	kfree(qseecom.dev->dma_parms);
+	qseecom.dev->dma_parms = NULL;
 	cdev_del(&qseecom.cdev);
 	device_destroy(qseecom.driver_class, qseecom.qseecom_device_no);
 	class_destroy(qseecom.driver_class);
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index d83d0e3..f483926 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -114,7 +114,6 @@
 				mmc_cqe_recovery_notifier(mrq);
 			return BLK_EH_RESET_TIMER;
 		}
-		/* No timeout (XXX: huh? comment doesn't make much sense) */
 
 		pr_info("%s: %s: Timeout even before req reaching LDD, completing the req. Active reqs: %d Req: %p Tag: %d\n",
 				mmc_hostname(host), __func__,
@@ -122,7 +121,7 @@
 		mmc_log_string(host,
 				"Timeout even before req reaching LDD,completing the req. Active reqs: %d Req: %p Tag: %d\n",
 				mmc_cqe_qcnt(mq), req, req->tag);
-		blk_mq_complete_request(req);
+		/* The request has gone already */
 		return BLK_EH_DONE;
 	default:
 		/* Timeout is handled by mmc core */
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 02b5509..9d097b8 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -163,6 +163,8 @@
 
 #define INVALID_TUNING_PHASE	-1
 #define sdhci_is_valid_gpio_wakeup_int(_h) ((_h)->pdata->sdiowakeup_irq >= 0)
+#define sdhci_is_valid_gpio_testbus_trigger_int(_h) \
+	((_h)->pdata->testbus_trigger_irq >= 0)
 
 #define NUM_TUNING_PHASES		16
 #define MAX_DRV_TYPES_SUPPORTED_HS200	4
@@ -1210,7 +1212,115 @@
 			drv_type);
 }
 
+#define MAX_TESTBUS 127
 #define IPCAT_MINOR_MASK(val) ((val & 0x0fff0000) >> 0x10)
+#define TB_CONF_MASK 0x7f
+#define TB_TRIG_CONF 0xff80ffff
+#define TB_WRITE_STATUS BIT(8)
+
+/*
+ * This function needs to be used when getting mask and
+ * match pattern either from cmdline or sysfs
+ */
+void sdhci_msm_mm_dbg_configure(struct sdhci_host *host, u32 mask,
+			u32 match, u32 bit_shift, u32 testbus)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	struct platform_device *pdev = msm_host->pdev;
+	u32 val;
+	u32 enable_dbg_feature = 0;
+	int ret = 0;
+
+	if (testbus > MAX_TESTBUS) {
+		dev_err(&pdev->dev, "%s: testbus should be less than 128.\n",
+						__func__);
+		return;
+	}
+
+	/* Enable debug mode */
+	writel_relaxed(ENABLE_DBG,
+			host->ioaddr + SDCC_TESTBUS_CONFIG);
+	writel_relaxed(DUMMY,
+			host->ioaddr + SDCC_DEBUG_EN_DIS_REG);
+	writel_relaxed((readl_relaxed(host->ioaddr +
+			SDCC_TESTBUS_CONFIG) | TESTBUS_EN),
+			host->ioaddr + SDCC_TESTBUS_CONFIG);
+
+	/* Enable particular feature */
+	enable_dbg_feature |= MM_TRIGGER_DISABLE;
+	writel_relaxed((readl_relaxed(host->ioaddr +
+			SDCC_DEBUG_FEATURE_CFG_REG) | enable_dbg_feature),
+			host->ioaddr + SDCC_DEBUG_FEATURE_CFG_REG);
+
+	/* Configure Mask & Match pattern*/
+	writel_relaxed((mask << bit_shift),
+			host->ioaddr + SDCC_DEBUG_MASK_PATTERN_REG);
+	writel_relaxed((match << bit_shift),
+			host->ioaddr + SDCC_DEBUG_MATCH_PATTERN_REG);
+
+	/* Configure test bus for above mm */
+	writel_relaxed((testbus & TB_CONF_MASK), host->ioaddr +
+			SDCC_DEBUG_MM_TB_CFG_REG);
+	/* Initiate conf shifting */
+	writel_relaxed(BIT(8),
+			host->ioaddr + SDCC_DEBUG_MM_TB_CFG_REG);
+
+	/* Wait for test bus to be configured */
+	ret = readl_poll_timeout(host->ioaddr + SDCC_DEBUG_MM_TB_CFG_REG,
+			val, !(val & TB_WRITE_STATUS), 50, 1000);
+	if (ret == -ETIMEDOUT)
+		pr_err("%s: Unable to set mask & match\n",
+				mmc_hostname(host->mmc));
+
+	/* Direct test bus to GPIO */
+	writel_relaxed(((readl_relaxed(host->ioaddr +
+				SDCC_TESTBUS_CONFIG) & TB_TRIG_CONF)
+				| (testbus << 16)), host->ioaddr +
+				SDCC_TESTBUS_CONFIG);
+
+	/* Read back to ensure write went through */
+	readl_relaxed(host->ioaddr + SDCC_DEBUG_FEATURE_CFG_REG);
+}
+
+static ssize_t store_mask_and_match(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct sdhci_host *host = dev_get_drvdata(dev);
+	unsigned long value;
+	char *token;
+	int i = 0;
+	u32 mask, match, bit_shift, testbus;
+
+	char *temp = (char *)buf;
+
+	if (!host)
+		return -EINVAL;
+
+	while ((token = strsep(&temp, " "))) {
+		kstrtoul(token, 0, &value);
+		if (i == 0)
+			mask = value;
+		else if (i == 1)
+			match = value;
+		else if (i == 2)
+			bit_shift = value;
+		else if (i == 3) {
+			testbus = value;
+			break;
+		}
+		i++;
+	}
+
+	pr_info("%s: M&M parameter passed are: %d %d %d %d\n",
+		mmc_hostname(host->mmc), mask, match, bit_shift, testbus);
+	pm_runtime_get_sync(dev);
+	sdhci_msm_mm_dbg_configure(host, mask, match, bit_shift, testbus);
+	pm_runtime_put_sync(dev);
+
+	pr_debug("%s: M&M debug enabled.\n", mmc_hostname(host->mmc));
+	return count;
+}
 
 /* Enter sdcc debug mode */
 void sdhci_msm_enter_dbg_mode(struct sdhci_host *host)
@@ -2861,6 +2971,16 @@
 	msm_host->is_sdiowakeup_enabled = enable;
 }
 
+static irqreturn_t sdhci_msm_testbus_trigger_irq(int irq, void *data)
+{
+	struct sdhci_host *host = (struct sdhci_host *)data;
+
+	pr_info("%s: match happened against mask\n",
+				mmc_hostname(host->mmc));
+
+	return IRQ_HANDLED;
+}
+
 static irqreturn_t sdhci_msm_sdiowakeup_irq(int irq, void *data)
 {
 	struct sdhci_host *host = (struct sdhci_host *)data;
@@ -5564,6 +5684,22 @@
 		}
 	}
 
+	msm_host->pdata->testbus_trigger_irq = platform_get_irq_byname(pdev,
+							  "tb_trig_irq");
+	if (sdhci_is_valid_gpio_testbus_trigger_int(msm_host)) {
+		dev_info(&pdev->dev, "%s: testbus_trigger_irq = %d\n", __func__,
+				msm_host->pdata->testbus_trigger_irq);
+		ret = request_irq(msm_host->pdata->testbus_trigger_irq,
+				  sdhci_msm_testbus_trigger_irq,
+				  IRQF_SHARED | IRQF_TRIGGER_RISING,
+				  "sdhci-msm tb_trig", host);
+		if (ret) {
+			dev_err(&pdev->dev, "%s: request tb_trig IRQ %d: failed: %d\n",
+				__func__, msm_host->pdata->testbus_trigger_irq,
+				ret);
+		}
+	}
+
 	if (of_device_is_compatible(node, "qcom,sdhci-msm-cqe")) {
 		dev_dbg(&pdev->dev, "node with qcom,sdhci-msm-cqe\n");
 		ret = sdhci_msm_cqe_add_host(host, pdev);
@@ -5622,6 +5758,20 @@
 		device_remove_file(&pdev->dev, &msm_host->auto_cmd21_attr);
 	}
 
+	if (IPCAT_MINOR_MASK(readl_relaxed(host->ioaddr +
+				SDCC_IP_CATALOG)) >= 2) {
+		msm_host->mask_and_match.store = store_mask_and_match;
+		sysfs_attr_init(&msm_host->mask_and_match.attr);
+		msm_host->mask_and_match.attr.name = "mask_and_match";
+		msm_host->mask_and_match.attr.mode = 0644;
+		ret = device_create_file(&pdev->dev,
+					&msm_host->mask_and_match);
+		if (ret) {
+			pr_err("%s: %s: failed creating M&M attr: %d\n",
+					mmc_hostname(host->mmc), __func__, ret);
+		}
+	}
+
 	if (sdhci_msm_is_bootdevice(&pdev->dev))
 		mmc_flush_detect_work(host->mmc);
 	/* Successful initialization */
diff --git a/drivers/mmc/host/sdhci-msm.h b/drivers/mmc/host/sdhci-msm.h
index fa83f09..026faae 100644
--- a/drivers/mmc/host/sdhci-msm.h
+++ b/drivers/mmc/host/sdhci-msm.h
@@ -207,6 +207,7 @@
 	u32 *sup_clk_table;
 	unsigned char sup_clk_cnt;
 	int sdiowakeup_irq;
+	int testbus_trigger_irq;
 	struct sdhci_msm_pm_qos_data pm_qos_data;
 	u32 *bus_clk_table;
 	unsigned char bus_clk_cnt;
@@ -288,6 +289,7 @@
 	struct completion pwr_irq_completion;
 	struct sdhci_msm_bus_vote msm_bus_vote;
 	struct device_attribute	polling;
+	struct device_attribute mask_and_match;
 	u32 clk_rate; /* Keeps track of current clock rate that is set */
 	bool tuning_done;
 	bool calibration_done;
diff --git a/drivers/net/wireless/cnss2/bus.c b/drivers/net/wireless/cnss2/bus.c
index 94e0a4d..87d2d8b 100644
--- a/drivers/net/wireless/cnss2/bus.c
+++ b/drivers/net/wireless/cnss2/bus.c
@@ -95,6 +95,23 @@
 	}
 }
 
+void cnss_bus_add_fw_prefix_name(struct cnss_plat_data *plat_priv,
+				 char *prefix_name, char *name)
+{
+	if (!plat_priv)
+		return;
+
+	switch (plat_priv->bus_type) {
+	case CNSS_BUS_PCI:
+		return cnss_pci_add_fw_prefix_name(plat_priv->bus_priv,
+						   prefix_name, name);
+	default:
+		cnss_pr_err("Unsupported bus type: %d\n",
+			    plat_priv->bus_type);
+		return;
+	}
+}
+
 int cnss_bus_load_m3(struct cnss_plat_data *plat_priv)
 {
 	if (!plat_priv)
diff --git a/drivers/net/wireless/cnss2/bus.h b/drivers/net/wireless/cnss2/bus.h
index 1e7cc0f..686b12d 100644
--- a/drivers/net/wireless/cnss2/bus.h
+++ b/drivers/net/wireless/cnss2/bus.h
@@ -24,6 +24,8 @@
 struct cnss_plat_data *cnss_bus_dev_to_plat_priv(struct device *dev);
 int cnss_bus_init(struct cnss_plat_data *plat_priv);
 void cnss_bus_deinit(struct cnss_plat_data *plat_priv);
+void cnss_bus_add_fw_prefix_name(struct cnss_plat_data *plat_priv,
+				 char *prefix_name, char *name);
 int cnss_bus_load_m3(struct cnss_plat_data *plat_priv);
 int cnss_bus_alloc_fw_mem(struct cnss_plat_data *plat_priv);
 int cnss_bus_alloc_qdss_mem(struct cnss_plat_data *plat_priv);
diff --git a/drivers/net/wireless/cnss2/debug.c b/drivers/net/wireless/cnss2/debug.c
index 21ceda6..9002866 100644
--- a/drivers/net/wireless/cnss2/debug.c
+++ b/drivers/net/wireless/cnss2/debug.c
@@ -879,12 +879,6 @@
 
 	return 0;
 }
-#else
-static int cnss_create_debug_only_node(struct cnss_plat_data *plat_priv)
-{
-	return 0;
-}
-#endif
 
 int cnss_debugfs_create(struct cnss_plat_data *plat_priv)
 {
@@ -910,6 +904,12 @@
 out:
 	return ret;
 }
+#else
+int cnss_debugfs_create(struct cnss_plat_data *plat_priv)
+{
+	return 0;
+}
+#endif
 
 void cnss_debugfs_destroy(struct cnss_plat_data *plat_priv)
 {
diff --git a/drivers/net/wireless/cnss2/main.c b/drivers/net/wireless/cnss2/main.c
index ff15aed..6757ea9 100644
--- a/drivers/net/wireless/cnss2/main.c
+++ b/drivers/net/wireless/cnss2/main.c
@@ -665,6 +665,11 @@
 		return -ENODEV;
 	}
 
+	if (!mutex_trylock(&plat_priv->driver_ops_lock)) {
+		cnss_pr_dbg("Another driver operation is in progress, ignore idle restart\n");
+		return -EBUSY;
+	}
+
 	cnss_pr_dbg("Doing idle restart\n");
 
 	reinit_completion(&plat_priv->power_up_complete);
@@ -703,9 +708,11 @@
 		goto out;
 	}
 
+	mutex_unlock(&plat_priv->driver_ops_lock);
 	return 0;
 
 out:
+	mutex_unlock(&plat_priv->driver_ops_lock);
 	return ret;
 }
 EXPORT_SYMBOL(cnss_idle_restart);
@@ -1083,6 +1090,7 @@
 
 	if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)) {
 		cnss_pr_err("Recovery is already in progress\n");
+		CNSS_ASSERT(0);
 		ret = -EINVAL;
 		goto out;
 	}
@@ -1135,7 +1143,8 @@
 	struct cnss_recovery_data *data;
 	int gfp = GFP_KERNEL;
 
-	cnss_bus_update_status(plat_priv, CNSS_FW_DOWN);
+	if (!test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state))
+		cnss_bus_update_status(plat_priv, CNSS_FW_DOWN);
 
 	if (test_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state) ||
 	    test_bit(CNSS_DRIVER_IDLE_SHUTDOWN, &plat_priv->driver_state)) {
@@ -2154,6 +2163,7 @@
 	init_completion(&plat_priv->rddm_complete);
 	init_completion(&plat_priv->recovery_complete);
 	mutex_init(&plat_priv->dev_lock);
+	mutex_init(&plat_priv->driver_ops_lock);
 
 	return 0;
 }
@@ -2195,6 +2205,12 @@
 		    plat_priv->set_wlaon_pwr_ctrl);
 }
 
+static bool cnss_use_fw_path_with_prefix(struct cnss_plat_data *plat_priv)
+{
+	return of_property_read_bool(plat_priv->plat_dev->dev.of_node,
+				     "qcom,converged-dt");
+}
+
 static const struct platform_device_id cnss_platform_id_table[] = {
 	{ .name = "qca6174", .driver_data = QCA6174_DEVICE_ID, },
 	{ .name = "qca6290", .driver_data = QCA6290_DEVICE_ID, },
@@ -2260,6 +2276,8 @@
 	plat_priv->device_id = device_id->driver_data;
 	plat_priv->bus_type = cnss_get_bus_type(plat_priv->device_id);
 	plat_priv->use_nv_mac = cnss_use_nv_mac(plat_priv);
+	plat_priv->use_fw_path_with_prefix =
+		cnss_use_fw_path_with_prefix(plat_priv);
 	cnss_set_plat_priv(plat_dev, plat_priv);
 	platform_set_drvdata(plat_dev, plat_priv);
 	INIT_LIST_HEAD(&plat_priv->vreg_list);
@@ -2303,9 +2321,7 @@
 	if (ret)
 		goto deinit_event_work;
 
-	ret = cnss_debugfs_create(plat_priv);
-	if (ret)
-		goto deinit_qmi;
+	cnss_debugfs_create(plat_priv);
 
 	ret = cnss_misc_init(plat_priv);
 	if (ret)
@@ -2324,7 +2340,6 @@
 
 destroy_debugfs:
 	cnss_debugfs_destroy(plat_priv);
-deinit_qmi:
 	cnss_qmi_deinit(plat_priv);
 deinit_event_work:
 	cnss_event_work_deinit(plat_priv);
diff --git a/drivers/net/wireless/cnss2/main.h b/drivers/net/wireless/cnss2/main.h
index e6b4eba..5dfd4a4 100644
--- a/drivers/net/wireless/cnss2/main.h
+++ b/drivers/net/wireless/cnss2/main.h
@@ -25,6 +25,7 @@
 #define TIME_CLOCK_FREQ_HZ		19200000
 #define CNSS_RAMDUMP_MAGIC		0x574C414E
 #define CNSS_RAMDUMP_VERSION		0
+#define MAX_FIRMWARE_NAME_LEN		20
 
 #define CNSS_EVENT_SYNC   BIT(0)
 #define CNSS_EVENT_UNINTERRUPTIBLE BIT(1)
@@ -361,6 +362,7 @@
 	struct completion power_up_complete;
 	struct completion cal_complete;
 	struct mutex dev_lock; /* mutex for register access through debugfs */
+	struct mutex driver_ops_lock; /* mutex for external driver ops */
 	u32 device_freq_hz;
 	u32 diag_reg_read_addr;
 	u32 diag_reg_read_mem_type;
@@ -368,7 +370,9 @@
 	u8 *diag_reg_read_buf;
 	u8 cal_done;
 	u8 powered_on;
-	char firmware_name[13];
+	u8 use_fw_path_with_prefix;
+	char firmware_name[MAX_FIRMWARE_NAME_LEN];
+	char fw_fallback_name[MAX_FIRMWARE_NAME_LEN];
 	struct completion rddm_complete;
 	struct completion recovery_complete;
 	struct cnss_control_params ctrl_params;
diff --git a/drivers/net/wireless/cnss2/pci.c b/drivers/net/wireless/cnss2/pci.c
index 9bf676c..cc94890 100644
--- a/drivers/net/wireless/cnss2/pci.c
+++ b/drivers/net/wireless/cnss2/pci.c
@@ -38,7 +38,8 @@
 #define MHI_NODE_NAME			"qcom,mhi"
 #define MHI_MSI_NAME			"MHI"
 
-#define MAX_M3_FILE_NAME_LENGTH		13
+#define QCA6390_PATH_PREFIX		"qca6390/"
+#define QCA6490_PATH_PREFIX		"qca6490/"
 #define DEFAULT_M3_FILE_NAME		"m3.bin"
 #define DEFAULT_FW_FILE_NAME		"amss.bin"
 #define FW_V2_FILE_NAME			"amss20.bin"
@@ -2076,6 +2077,12 @@
 		return -EEXIST;
 	}
 
+	if (!driver_ops->id_table || !pci_dev_present(driver_ops->id_table)) {
+		cnss_pr_err("PCIe device id is %x, not supported by loading driver\n",
+			    pci_priv->device_id);
+		return -ENODEV;
+	}
+
 	if (!test_bit(CNSS_COLD_BOOT_CAL, &plat_priv->driver_state))
 		goto register_driver;
 
@@ -2126,9 +2133,9 @@
 		return;
 	}
 
-	if (plat_priv->device_id == QCA6174_DEVICE_ID ||
-	    !(test_bit(CNSS_DRIVER_IDLE_RESTART, &plat_priv->driver_state) ||
-	      test_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state)))
+	mutex_lock(&plat_priv->driver_ops_lock);
+
+	if (plat_priv->device_id == QCA6174_DEVICE_ID)
 		goto skip_wait_power_up;
 
 	timeout = cnss_get_qmi_timeout(plat_priv);
@@ -2157,6 +2164,8 @@
 	cnss_driver_event_post(plat_priv,
 			       CNSS_DRIVER_EVENT_UNREGISTER_DRIVER,
 			       CNSS_EVENT_SYNC_UNKILLABLE, NULL);
+
+	mutex_unlock(&plat_priv->driver_ops_lock);
 }
 EXPORT_SYMBOL(cnss_wlan_unregister_driver);
 
@@ -3105,12 +3114,13 @@
 {
 	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
 	struct cnss_fw_mem *m3_mem = &plat_priv->m3_mem;
-	char filename[MAX_M3_FILE_NAME_LENGTH];
+	char filename[MAX_FIRMWARE_NAME_LEN];
 	const struct firmware *fw_entry;
 	int ret = 0;
 
 	if (!m3_mem->va && !m3_mem->size) {
-		snprintf(filename, sizeof(filename), DEFAULT_M3_FILE_NAME);
+		cnss_pci_add_fw_prefix_name(pci_priv, filename,
+					    DEFAULT_M3_FILE_NAME);
 
 		ret = request_firmware(&fw_entry, filename,
 				       &pci_priv->pci_dev->dev);
@@ -4030,6 +4040,99 @@
 	cnss_pci_pm_runtime_put_noidle(pci_priv);
 }
 
+void cnss_pci_add_fw_prefix_name(struct cnss_pci_data *pci_priv,
+				 char *prefix_name, char *name)
+{
+	struct cnss_plat_data *plat_priv;
+
+	if (!pci_priv)
+		return;
+
+	plat_priv = pci_priv->plat_priv;
+
+	if (!plat_priv->use_fw_path_with_prefix) {
+		scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN, "%s", name);
+		return;
+	}
+
+	switch (pci_priv->device_id) {
+	case QCA6390_DEVICE_ID:
+		scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN,
+			  QCA6390_PATH_PREFIX "%s", name);
+		break;
+	case QCA6490_DEVICE_ID:
+		scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN,
+			  QCA6490_PATH_PREFIX "%s", name);
+		break;
+	default:
+		scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN, "%s", name);
+		break;
+	}
+
+	cnss_pr_dbg("FW name added with prefix: %s\n", prefix_name);
+}
+
+static int cnss_pci_update_fw_name(struct cnss_pci_data *pci_priv)
+{
+	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+	struct mhi_controller *mhi_ctrl = pci_priv->mhi_ctrl;
+
+	plat_priv->device_version.family_number = mhi_ctrl->family_number;
+	plat_priv->device_version.device_number = mhi_ctrl->device_number;
+	plat_priv->device_version.major_version = mhi_ctrl->major_version;
+	plat_priv->device_version.minor_version = mhi_ctrl->minor_version;
+
+	cnss_pr_dbg("Get device version info, family number: 0x%x, device number: 0x%x, major version: 0x%x, minor version: 0x%x\n",
+		    plat_priv->device_version.family_number,
+		    plat_priv->device_version.device_number,
+		    plat_priv->device_version.major_version,
+		    plat_priv->device_version.minor_version);
+
+	switch (pci_priv->device_id) {
+	case QCA6390_DEVICE_ID:
+		if (plat_priv->device_version.major_version < FW_V2_NUMBER) {
+			cnss_pr_dbg("Device ID:version (0x%lx:%d) is not supported\n",
+				    pci_priv->device_id,
+				    plat_priv->device_version.major_version);
+			return -EINVAL;
+		}
+		cnss_pci_add_fw_prefix_name(pci_priv, plat_priv->firmware_name,
+					    FW_V2_FILE_NAME);
+		snprintf(plat_priv->fw_fallback_name, MAX_FIRMWARE_NAME_LEN,
+			 FW_V2_FILE_NAME);
+		break;
+	case QCA6490_DEVICE_ID:
+		switch (plat_priv->device_version.major_version) {
+		case FW_V2_NUMBER:
+			cnss_pci_add_fw_prefix_name(pci_priv,
+						    plat_priv->firmware_name,
+						    FW_V2_FILE_NAME);
+			snprintf(plat_priv->fw_fallback_name,
+				 MAX_FIRMWARE_NAME_LEN, FW_V2_FILE_NAME);
+			break;
+		default:
+			cnss_pci_add_fw_prefix_name(pci_priv,
+						    plat_priv->firmware_name,
+						    DEFAULT_FW_FILE_NAME);
+			snprintf(plat_priv->fw_fallback_name,
+				 MAX_FIRMWARE_NAME_LEN, DEFAULT_FW_FILE_NAME);
+			break;
+		}
+		break;
+	default:
+		cnss_pci_add_fw_prefix_name(pci_priv, plat_priv->firmware_name,
+					    DEFAULT_FW_FILE_NAME);
+		snprintf(plat_priv->fw_fallback_name, MAX_FIRMWARE_NAME_LEN,
+			 DEFAULT_FW_FILE_NAME);
+		break;
+	}
+
+	cnss_pr_dbg("FW name is %s, FW fallback name is %s\n",
+		    mhi_ctrl->fw_image, mhi_ctrl->fw_image_fallback);
+
+	return 0;
+}
+
 static char *cnss_mhi_notify_status_to_str(enum MHI_CB status)
 {
 	switch (status) {
@@ -4043,6 +4146,8 @@
 		return "FATAL_ERROR";
 	case MHI_CB_EE_MISSION_MODE:
 		return "MISSION_MODE";
+	case MHI_CB_FW_FALLBACK_IMG:
+		return "FW_FALLBACK";
 	default:
 		return "UNKNOWN";
 	}
@@ -4115,6 +4220,10 @@
 		cnss_pci_update_status(pci_priv, CNSS_FW_DOWN);
 		cnss_reason = CNSS_REASON_RDDM;
 		break;
+	case MHI_CB_FW_FALLBACK_IMG:
+		plat_priv->use_fw_path_with_prefix = false;
+		cnss_pci_update_fw_name(pci_priv);
+		return;
 	default:
 		cnss_pr_err("Unsupported MHI status cb reason: %d\n", reason);
 		return;
@@ -4152,55 +4261,6 @@
 	return 0;
 }
 
-static int cnss_pci_update_fw_name(struct cnss_pci_data *pci_priv)
-{
-	struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
-	struct mhi_controller *mhi_ctrl = pci_priv->mhi_ctrl;
-
-	plat_priv->device_version.family_number = mhi_ctrl->family_number;
-	plat_priv->device_version.device_number = mhi_ctrl->device_number;
-	plat_priv->device_version.major_version = mhi_ctrl->major_version;
-	plat_priv->device_version.minor_version = mhi_ctrl->minor_version;
-
-	cnss_pr_dbg("Get device version info, family number: 0x%x, device number: 0x%x, major version: 0x%x, minor version: 0x%x\n",
-		    plat_priv->device_version.family_number,
-		    plat_priv->device_version.device_number,
-		    plat_priv->device_version.major_version,
-		    plat_priv->device_version.minor_version);
-
-	switch (pci_priv->device_id) {
-	case QCA6390_DEVICE_ID:
-		if (plat_priv->device_version.major_version < FW_V2_NUMBER) {
-			cnss_pr_dbg("Device ID:version (0x%lx:%d) is not supported\n",
-				    pci_priv->device_id,
-				    plat_priv->device_version.major_version);
-			return -EINVAL;
-		}
-		scnprintf(plat_priv->firmware_name,
-			  sizeof(plat_priv->firmware_name), FW_V2_FILE_NAME);
-		mhi_ctrl->fw_image = plat_priv->firmware_name;
-		break;
-	case QCA6490_DEVICE_ID:
-		switch (plat_priv->device_version.major_version) {
-		case FW_V2_NUMBER:
-			scnprintf(plat_priv->firmware_name,
-				  sizeof(plat_priv->firmware_name),
-				  FW_V2_FILE_NAME);
-			break;
-		default:
-			break;
-		}
-
-		break;
-	default:
-		break;
-	}
-
-	cnss_pr_dbg("Firmware name is %s\n", mhi_ctrl->fw_image);
-
-	return 0;
-}
-
 static int cnss_pci_register_mhi(struct cnss_pci_data *pci_priv)
 {
 	int ret = 0;
@@ -4225,6 +4285,7 @@
 	mhi_ctrl->slot = PCI_SLOT(pci_dev->devfn);
 
 	mhi_ctrl->fw_image = plat_priv->firmware_name;
+	mhi_ctrl->fw_image_fallback = plat_priv->fw_fallback_name;
 
 	mhi_ctrl->regs = pci_priv->bar;
 	cnss_pr_dbg("BAR starts at %pa\n",
@@ -4355,8 +4416,6 @@
 	cnss_set_pci_priv(pci_dev, pci_priv);
 	plat_priv->device_id = pci_dev->device;
 	plat_priv->bus_priv = pci_priv;
-	snprintf(plat_priv->firmware_name, sizeof(plat_priv->firmware_name),
-		 DEFAULT_FW_FILE_NAME);
 	mutex_init(&pci_priv->bus_lock);
 
 	ret = of_reserved_mem_device_init(dev);
diff --git a/drivers/net/wireless/cnss2/pci.h b/drivers/net/wireless/cnss2/pci.h
index 2984273..a05ad7c 100644
--- a/drivers/net/wireless/cnss2/pci.h
+++ b/drivers/net/wireless/cnss2/pci.h
@@ -167,6 +167,8 @@
 int cnss_resume_pci_link(struct cnss_pci_data *pci_priv);
 int cnss_pci_init(struct cnss_plat_data *plat_priv);
 void cnss_pci_deinit(struct cnss_plat_data *plat_priv);
+void cnss_pci_add_fw_prefix_name(struct cnss_pci_data *pci_priv,
+				 char *prefix_name, char *name);
 int cnss_pci_alloc_fw_mem(struct cnss_pci_data *pci_priv);
 int cnss_pci_alloc_qdss_mem(struct cnss_pci_data *pci_priv);
 void cnss_pci_free_qdss_mem(struct cnss_pci_data *pci_priv);
diff --git a/drivers/net/wireless/cnss2/qmi.c b/drivers/net/wireless/cnss2/qmi.c
index ea73016..e7dc9ff 100644
--- a/drivers/net/wireless/cnss2/qmi.c
+++ b/drivers/net/wireless/cnss2/qmi.c
@@ -12,7 +12,6 @@
 
 #define WLFW_SERVICE_INS_ID_V01		1
 #define WLFW_CLIENT_ID			0x4b4e454c
-#define MAX_BDF_FILE_NAME		13
 #define BDF_FILE_NAME_PREFIX		"bdwlan"
 #define ELF_BDF_FILE_NAME		"bdwlan.elf"
 #define ELF_BDF_FILE_NAME_PREFIX	"bdwlan.e"
@@ -459,42 +458,43 @@
 				  u32 bdf_type, char *filename,
 				  u32 filename_len)
 {
+	char filename_tmp[MAX_FIRMWARE_NAME_LEN];
 	int ret = 0;
 
 	switch (bdf_type) {
 	case CNSS_BDF_ELF:
 		if (plat_priv->board_info.board_id == 0xFF)
-			snprintf(filename, filename_len, ELF_BDF_FILE_NAME);
+			snprintf(filename_tmp, filename_len, ELF_BDF_FILE_NAME);
 		else if (plat_priv->board_info.board_id < 0xFF)
-			snprintf(filename, filename_len,
+			snprintf(filename_tmp, filename_len,
 				 ELF_BDF_FILE_NAME_PREFIX "%02x",
 				 plat_priv->board_info.board_id);
 		else
-			snprintf(filename, filename_len,
+			snprintf(filename_tmp, filename_len,
 				 BDF_FILE_NAME_PREFIX "%02x.e%02x",
 				 plat_priv->board_info.board_id >> 8 & 0xFF,
 				 plat_priv->board_info.board_id & 0xFF);
 		break;
 	case CNSS_BDF_BIN:
 		if (plat_priv->board_info.board_id == 0xFF)
-			snprintf(filename, filename_len, BIN_BDF_FILE_NAME);
+			snprintf(filename_tmp, filename_len, BIN_BDF_FILE_NAME);
 		else if (plat_priv->board_info.board_id < 0xFF)
-			snprintf(filename, filename_len,
+			snprintf(filename_tmp, filename_len,
 				 BIN_BDF_FILE_NAME_PREFIX "%02x",
 				 plat_priv->board_info.board_id);
 		else
-			snprintf(filename, filename_len,
+			snprintf(filename_tmp, filename_len,
 				 BDF_FILE_NAME_PREFIX "%02x.b%02x",
 				 plat_priv->board_info.board_id >> 8 & 0xFF,
 				 plat_priv->board_info.board_id & 0xFF);
 		break;
 	case CNSS_BDF_REGDB:
-		snprintf(filename, filename_len, REGDB_FILE_NAME);
+		snprintf(filename_tmp, filename_len, REGDB_FILE_NAME);
 		break;
 	case CNSS_BDF_DUMMY:
 		cnss_pr_dbg("CNSS_BDF_DUMMY is set, sending dummy BDF\n");
-		snprintf(filename, filename_len, DUMMY_BDF_FILE_NAME);
-		ret = MAX_BDF_FILE_NAME;
+		snprintf(filename_tmp, filename_len, DUMMY_BDF_FILE_NAME);
+		ret = MAX_FIRMWARE_NAME_LEN;
 		break;
 	default:
 		cnss_pr_err("Invalid BDF type: %d\n",
@@ -502,6 +502,10 @@
 		ret = -EINVAL;
 		break;
 	}
+
+	if (ret >= 0)
+		cnss_bus_add_fw_prefix_name(plat_priv, filename, filename_tmp);
+
 	return ret;
 }
 
@@ -511,7 +515,7 @@
 	struct wlfw_bdf_download_req_msg_v01 *req;
 	struct wlfw_bdf_download_resp_msg_v01 *resp;
 	struct qmi_txn txn;
-	char filename[MAX_BDF_FILE_NAME];
+	char filename[MAX_FIRMWARE_NAME_LEN];
 	const struct firmware *fw_entry = NULL;
 	const u8 *temp;
 	unsigned int remaining;
@@ -534,7 +538,7 @@
 				     filename, sizeof(filename));
 	if (ret > 0) {
 		temp = DUMMY_BDF_FILE_NAME;
-		remaining = MAX_BDF_FILE_NAME;
+		remaining = MAX_FIRMWARE_NAME_LEN;
 		goto bypass_bdf;
 	} else if (ret < 0) {
 		goto err_req_fw;
@@ -2092,6 +2096,12 @@
 	if (!plat_priv)
 		return -ENODEV;
 
+	if (test_bit(CNSS_QMI_WLFW_CONNECTED, &plat_priv->driver_state)) {
+		cnss_pr_err("Unexpected WLFW server arrive\n");
+		CNSS_ASSERT(0);
+		return -EINVAL;
+	}
+
 	ret = cnss_wlfw_connect_to_server(plat_priv, data);
 	if (ret < 0)
 		goto out;
diff --git a/drivers/nfc/nq-nci.c b/drivers/nfc/nq-nci.c
index 1d49640..ab47b32 100644
--- a/drivers/nfc/nq-nci.c
+++ b/drivers/nfc/nq-nci.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2020, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/kernel.h>
@@ -22,6 +22,7 @@
 #include <linux/compat.h>
 #endif
 #include <linux/jiffies.h>
+#include <linux/regulator/consumer.h>
 
 struct nqx_platform_data {
 	unsigned int irq_gpio;
@@ -29,6 +30,8 @@
 	unsigned int clkreq_gpio;
 	unsigned int firm_gpio;
 	unsigned int ese_gpio;
+	int vdd_levels[2];
+	int max_current;
 	const char *clk_src_name;
 	/* NFC_CLK pin voting state */
 	bool clk_pin_voting;
@@ -67,6 +70,8 @@
 	/* NFC_IRQ wake-up state */
 	bool			irq_wake_up;
 	bool			cold_reset_rsp_pending;
+	bool			is_vreg_enabled;
+	bool			is_ese_session_active;
 	uint8_t			cold_reset_status;
 	spinlock_t		irq_enabled_lock;
 	unsigned int		count_irq;
@@ -81,6 +86,7 @@
 	size_t kbuflen;
 	u8 *kbuf;
 	struct nqx_platform_data *pdata;
+	struct regulator *reg;
 };
 
 static int nfcc_reboot(struct notifier_block *notifier, unsigned long val,
@@ -455,6 +461,7 @@
 		} else {
 			dev_dbg(&nqx_dev->client->dev, "en_gpio already HIGH\n");
 		}
+		nqx_dev->is_ese_session_active = true;
 		r = 0;
 	} else if (arg == ESE_POWER_OFF) {
 		if (!nqx_dev->nfc_ven_enabled) {
@@ -465,6 +472,7 @@
 		} else {
 			dev_dbg(&nqx_dev->client->dev, "keep en_gpio high as NFC is enabled\n");
 		}
+		nqx_dev->is_ese_session_active = false;
 		r = 0;
 	} else if (arg == ESE_COLD_RESET) {
 		// set default value for status as failure
@@ -618,6 +626,123 @@
 	return r;
 }
 
+/**
+ * nfc_ldo_vote()
+ * @nqx_dev: NFC device containing regulator handle
+ *
+ * LDO voting based on voltage and current entries in DT
+ *
+ * Return: 0 on success and -ve on failure
+ */
+static int nfc_ldo_vote(struct nqx_dev *nqx_dev)
+{
+	struct device *dev = &nqx_dev->client->dev;
+	int ret;
+
+	ret =  regulator_set_voltage(nqx_dev->reg,
+			nqx_dev->pdata->vdd_levels[0],
+			nqx_dev->pdata->vdd_levels[1]);
+	if (ret < 0) {
+		dev_err(dev, "%s:set voltage failed\n", __func__);
+		return ret;
+	}
+
+	/* pass expected current from NFC in uA */
+	ret = regulator_set_load(nqx_dev->reg, nqx_dev->pdata->max_current);
+	if (ret < 0) {
+		dev_err(dev, "%s:set load failed\n", __func__);
+		return ret;
+	}
+
+	ret = regulator_enable(nqx_dev->reg);
+	if (ret < 0)
+		dev_err(dev, "%s:regulator_enable failed\n", __func__);
+	else
+		nqx_dev->is_vreg_enabled = true;
+	return ret;
+}
+
+/**
+ * nfc_ldo_config()
+ * @client: I2C client instance, containing node to read DT entry
+ * @nqx_dev: NFC device containing regulator handle
+ *
+ * Configure LDO if entry is present in DT file otherwise
+ * with success as it's optional
+ *
+ * Return: 0 on success and -ve on failure
+ */
+static int nfc_ldo_config(struct i2c_client *client, struct nqx_dev *nqx_dev)
+{
+	int r;
+
+	if (of_get_property(client->dev.of_node, NFC_LDO_SUPPLY_NAME, NULL)) {
+		// Get the regulator handle
+		nqx_dev->reg = regulator_get(&client->dev,
+					NFC_LDO_SUPPLY_DT_NAME);
+		if (IS_ERR(nqx_dev->reg)) {
+			r = PTR_ERR(nqx_dev->reg);
+			nqx_dev->reg = NULL;
+			dev_err(&client->dev,
+				"%s: regulator_get failed, ret = %d\n",
+				__func__, r);
+			return r;
+		}
+	} else {
+		nqx_dev->reg = NULL;
+		dev_err(&client->dev,
+			"%s: regulator entry not present\n", __func__);
+		// return success as it's optional to configure LDO
+		return 0;
+	}
+
+	// LDO config supported by platform DT
+	r = nfc_ldo_vote(nqx_dev);
+	if (r < 0) {
+		dev_err(&client->dev,
+			"%s: LDO voting failed, ret = %d\n", __func__, r);
+		regulator_put(nqx_dev->reg);
+	}
+	return r;
+}
+
+/**
+ * nfc_ldo_unvote()
+ * @nqx_dev: NFC device containing regulator handle
+ *
+ * set voltage and load to zero and disable regulator
+ *
+ * Return: 0 on success and -ve on failure
+ */
+static int nfc_ldo_unvote(struct nqx_dev *nqx_dev)
+{
+	struct device *dev = &nqx_dev->client->dev;
+	int ret;
+
+	if (!nqx_dev->is_vreg_enabled) {
+		dev_err(dev, "%s: regulator already disabled\n", __func__);
+		return -EINVAL;
+	}
+
+	ret = regulator_disable(nqx_dev->reg);
+	if (ret < 0) {
+		dev_err(dev, "%s:regulator_disable failed\n", __func__);
+		return ret;
+	}
+	nqx_dev->is_vreg_enabled = false;
+
+	ret =  regulator_set_voltage(nqx_dev->reg, 0, NFC_VDDIO_MAX);
+	if (ret < 0) {
+		dev_err(dev, "%s:set voltage failed\n", __func__);
+		return ret;
+	}
+
+	ret = regulator_set_load(nqx_dev->reg, 0);
+	if (ret < 0)
+		dev_err(dev, "%s:set load failed\n", __func__);
+	return ret;
+}
+
 static int nfc_open(struct inode *inode, struct file *filp)
 {
 	struct nqx_dev *nqx_dev = container_of(inode->i_cdev,
@@ -1219,9 +1344,29 @@
 	else
 		pdata->clk_pin_voting = true;
 
+	// optional property
+	r = of_property_read_u32_array(np, NFC_LDO_VOL_DT_NAME,
+			(u32 *) pdata->vdd_levels,
+			ARRAY_SIZE(pdata->vdd_levels));
+	if (r) {
+		dev_err(dev, "error reading NFC VDDIO min and max value\n");
+		// set default as per datasheet
+		pdata->vdd_levels[0] = NFC_VDDIO_MIN;
+		pdata->vdd_levels[1] = NFC_VDDIO_MAX;
+	}
+
+	// optional property
+	r = of_property_read_u32(np, NFC_LDO_CUR_DT_NAME, &pdata->max_current);
+	if (r) {
+		dev_err(dev, "error reading NFC current value\n");
+		// set default as per datasheet
+		pdata->max_current = NFC_CURRENT_MAX;
+	}
+
 	pdata->clkreq_gpio = of_get_named_gpio(np, "qcom,nq-clkreq", 0);
 
-	return r;
+	// return success as above properties are optional
+	return 0;
 }
 
 static inline int gpio_input_init(const struct device * const dev,
@@ -1466,6 +1611,12 @@
 	}
 	nqx_disable_irq(nqx_dev);
 
+	r = nfc_ldo_config(client, nqx_dev);
+	if (r) {
+		dev_err(&client->dev, "%s: LDO config failed\n", __func__);
+		goto err_ldo_config_failed;
+	}
+
 	/*
 	 * To be efficient we need to test whether nfcc hardware is physically
 	 * present before attempting further hardware initialisation.
@@ -1507,6 +1658,7 @@
 	nqx_dev->irq_wake_up = false;
 	nqx_dev->cold_reset_rsp_pending = false;
 	nqx_dev->nfc_enabled = false;
+	nqx_dev->is_ese_session_active = false;
 
 	dev_err(&client->dev,
 	"%s: probing NFCC NQxxx exited successfully\n",
@@ -1518,6 +1670,11 @@
 	unregister_reboot_notifier(&nfcc_notifier);
 #endif
 err_request_hw_check_failed:
+	if (nqx_dev->reg) {
+		nfc_ldo_unvote(nqx_dev);
+		regulator_put(nqx_dev->reg);
+	}
+err_ldo_config_failed:
 	free_irq(client->irq, nqx_dev);
 err_request_irq_failed:
 	device_destroy(nqx_dev->nqx_class, nqx_dev->devno);
@@ -1568,6 +1725,13 @@
 		goto err;
 	}
 
+	gpio_set_value(nqx_dev->en_gpio, 0);
+	// HW dependent delay before LDO goes into LPM mode
+	usleep_range(10000, 10100);
+	if (nqx_dev->reg) {
+		ret = nfc_ldo_unvote(nqx_dev);
+		regulator_put(nqx_dev->reg);
+	}
 	unregister_reboot_notifier(&nfcc_notifier);
 	free_irq(client->irq, nqx_dev);
 	cdev_del(&nqx_dev->c_dev);
diff --git a/drivers/nfc/nq-nci.h b/drivers/nfc/nq-nci.h
index 8d807ec..dee13be 100644
--- a/drivers/nfc/nq-nci.h
+++ b/drivers/nfc/nq-nci.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2020, The Linux Foundation. All rights reserved.
  */
 
 #ifndef __NQ_NCI_H
@@ -50,6 +50,15 @@
 #define PAYLOAD_LENGTH_MAX		(256)
 #define BYTE				(0x8)
 #define NCI_IDENTIFIER			(0x10)
+#define NFC_LDO_SUPPLY_DT_NAME		"qcom,nq-vdd-1p8"
+#define NFC_LDO_SUPPLY_NAME		"qcom,nq-vdd-1p8-supply"
+#define NFC_LDO_VOL_DT_NAME		"qcom,nq-vdd-1p8-voltage"
+#define NFC_LDO_CUR_DT_NAME		"qcom,nq-vdd-1p8-current"
+
+//as per SN1x0 datasheet
+#define NFC_VDDIO_MIN			1650000 //in uV
+#define NFC_VDDIO_MAX			1950000 //in uV
+#define NFC_CURRENT_MAX			157000 //in uA
 
 enum ese_ioctl_request {
 	/* eSE POWER ON */
diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig
index dc6efb1..e773c73 100644
--- a/drivers/pinctrl/qcom/Kconfig
+++ b/drivers/pinctrl/qcom/Kconfig
@@ -201,4 +201,12 @@
 	  Qualcomm Technologies Inc TLMM block found on the Qualcomm
 	  Technologies Inc SCUBA platform.
 
+config PINCTRL_SDM660
+	tristate "Qualcomm Technologies, Inc SDM660 pin controller driver"
+	depends on GPIOLIB && OF
+	select PINCTRL_MSM
+	help
+	 This is the pinctrl, pinmux, pinconf and gpiolib driver for
+	 the Qualcomm Technologies Inc TLMM block found in the
+	 Qualcomm Technologies, Inc. SDM660 platform.
 endif
diff --git a/drivers/pinctrl/qcom/Makefile b/drivers/pinctrl/qcom/Makefile
index 996270d..e178e71 100644
--- a/drivers/pinctrl/qcom/Makefile
+++ b/drivers/pinctrl/qcom/Makefile
@@ -25,3 +25,4 @@
 obj-$(CONFIG_PINCTRL_BENGAL) += pinctrl-bengal.o
 obj-$(CONFIG_PINCTRL_LAGOON) += pinctrl-lagoon.o
 obj-$(CONFIG_PINCTRL_SCUBA) += pinctrl-scuba.o
+obj-$(CONFIG_PINCTRL_SDM660)   += pinctrl-sdm660.o
diff --git a/drivers/pinctrl/qcom/pinctrl-sdm660.c b/drivers/pinctrl/qcom/pinctrl-sdm660.c
new file mode 100644
index 0000000..2d900b4
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-sdm660.c
@@ -0,0 +1,1750 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2016, 2018-2020, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-msm.h"
+
+#define FUNCTION(fname)					\
+	[msm_mux_##fname] = {		                \
+		.name = #fname,				\
+		.groups = fname##_groups,               \
+		.ngroups = ARRAY_SIZE(fname##_groups),	\
+	}
+
+#define NORTH	0x00900000
+#define CENTER	0x00500000
+#define SOUTH	0x00100000
+#define REG_SIZE 0x1000
+#define PINGROUP(id, base, f1, f2, f3, f4, f5, f6, f7, f8, f9)	\
+	{					        \
+		.name = "gpio" #id,			\
+		.pins = gpio##id##_pins,		\
+		.npins = (unsigned int)ARRAY_SIZE(gpio##id##_pins),	\
+		.funcs = (int[]){			\
+			msm_mux_gpio, /* gpio mode */	\
+			msm_mux_##f1,			\
+			msm_mux_##f2,			\
+			msm_mux_##f3,			\
+			msm_mux_##f4,			\
+			msm_mux_##f5,			\
+			msm_mux_##f6,			\
+			msm_mux_##f7,			\
+			msm_mux_##f8,			\
+			msm_mux_##f9			\
+		},				        \
+		.nfuncs = 10,				\
+		.ctl_reg = base + REG_SIZE * id,	\
+		.io_reg = base + 0x4 + REG_SIZE * id,		\
+		.intr_cfg_reg = base + 0x8 + REG_SIZE * id,		\
+		.intr_status_reg = base + 0xc + REG_SIZE * id,	\
+		.intr_target_reg = base + 0x8 + REG_SIZE * id,	\
+		.mux_bit = 2,			\
+		.pull_bit = 0,			\
+		.drv_bit = 6,			\
+		.oe_bit = 9,			\
+		.in_bit = 0,			\
+		.out_bit = 1,			\
+		.intr_enable_bit = 0,		\
+		.intr_status_bit = 0,		\
+		.intr_target_bit = 5,		\
+		.intr_target_kpss_val = 3,	\
+		.intr_raw_status_bit = 4,	\
+		.intr_polarity_bit = 1,		\
+		.intr_detection_bit = 2,	\
+		.intr_detection_width = 2,	\
+	}
+
+#define SDC_QDSD_PINGROUP(pg_name, ctl, pull, drv)	\
+	{					        \
+		.name = #pg_name,			\
+		.pins = pg_name##_pins,			\
+		.npins = (unsigned int)ARRAY_SIZE(pg_name##_pins),	\
+		.ctl_reg = ctl,				\
+		.io_reg = 0,				\
+		.intr_cfg_reg = 0,			\
+		.intr_status_reg = 0,			\
+		.intr_target_reg = 0,			\
+		.mux_bit = -1,				\
+		.pull_bit = pull,			\
+		.drv_bit = drv,				\
+		.oe_bit = -1,				\
+		.in_bit = -1,				\
+		.out_bit = -1,				\
+		.intr_enable_bit = -1,			\
+		.intr_status_bit = -1,			\
+		.intr_target_bit = -1,			\
+		.intr_raw_status_bit = -1,		\
+		.intr_polarity_bit = -1,		\
+		.intr_detection_bit = -1,		\
+		.intr_detection_width = -1,		\
+	}
+
+#define UFS_RESET(pg_name, offset)			\
+	{						\
+		.name = #pg_name,			\
+		.pins = pg_name##_pins,			\
+		.npins = (unsigned int)ARRAY_SIZE(pg_name##_pins),	\
+		.ctl_reg = offset,			\
+		.io_reg = offset + 0x4,			\
+		.intr_cfg_reg = 0,			\
+		.intr_status_reg = 0,			\
+		.intr_target_reg = 0,			\
+		.mux_bit = -1,				\
+		.pull_bit = 3,				\
+		.drv_bit = 0,				\
+		.oe_bit = -1,				\
+		.in_bit = -1,				\
+		.out_bit = 0,				\
+		.intr_enable_bit = -1,			\
+		.intr_status_bit = -1,			\
+		.intr_target_bit = -1,			\
+		.intr_raw_status_bit = -1,		\
+		.intr_polarity_bit = -1,		\
+		.intr_detection_bit = -1,		\
+		.intr_detection_width = -1,		\
+	}
+static const struct pinctrl_pin_desc sdm660_pins[] = {
+	PINCTRL_PIN(0, "GPIO_0"),
+	PINCTRL_PIN(1, "GPIO_1"),
+	PINCTRL_PIN(2, "GPIO_2"),
+	PINCTRL_PIN(3, "GPIO_3"),
+	PINCTRL_PIN(4, "GPIO_4"),
+	PINCTRL_PIN(5, "GPIO_5"),
+	PINCTRL_PIN(6, "GPIO_6"),
+	PINCTRL_PIN(7, "GPIO_7"),
+	PINCTRL_PIN(8, "GPIO_8"),
+	PINCTRL_PIN(9, "GPIO_9"),
+	PINCTRL_PIN(10, "GPIO_10"),
+	PINCTRL_PIN(11, "GPIO_11"),
+	PINCTRL_PIN(12, "GPIO_12"),
+	PINCTRL_PIN(13, "GPIO_13"),
+	PINCTRL_PIN(14, "GPIO_14"),
+	PINCTRL_PIN(15, "GPIO_15"),
+	PINCTRL_PIN(16, "GPIO_16"),
+	PINCTRL_PIN(17, "GPIO_17"),
+	PINCTRL_PIN(18, "GPIO_18"),
+	PINCTRL_PIN(19, "GPIO_19"),
+	PINCTRL_PIN(20, "GPIO_20"),
+	PINCTRL_PIN(21, "GPIO_21"),
+	PINCTRL_PIN(22, "GPIO_22"),
+	PINCTRL_PIN(23, "GPIO_23"),
+	PINCTRL_PIN(24, "GPIO_24"),
+	PINCTRL_PIN(25, "GPIO_25"),
+	PINCTRL_PIN(26, "GPIO_26"),
+	PINCTRL_PIN(27, "GPIO_27"),
+	PINCTRL_PIN(28, "GPIO_28"),
+	PINCTRL_PIN(29, "GPIO_29"),
+	PINCTRL_PIN(30, "GPIO_30"),
+	PINCTRL_PIN(31, "GPIO_31"),
+	PINCTRL_PIN(32, "GPIO_32"),
+	PINCTRL_PIN(33, "GPIO_33"),
+	PINCTRL_PIN(34, "GPIO_34"),
+	PINCTRL_PIN(35, "GPIO_35"),
+	PINCTRL_PIN(36, "GPIO_36"),
+	PINCTRL_PIN(37, "GPIO_37"),
+	PINCTRL_PIN(38, "GPIO_38"),
+	PINCTRL_PIN(39, "GPIO_39"),
+	PINCTRL_PIN(40, "GPIO_40"),
+	PINCTRL_PIN(41, "GPIO_41"),
+	PINCTRL_PIN(42, "GPIO_42"),
+	PINCTRL_PIN(43, "GPIO_43"),
+	PINCTRL_PIN(44, "GPIO_44"),
+	PINCTRL_PIN(45, "GPIO_45"),
+	PINCTRL_PIN(46, "GPIO_46"),
+	PINCTRL_PIN(47, "GPIO_47"),
+	PINCTRL_PIN(48, "GPIO_48"),
+	PINCTRL_PIN(49, "GPIO_49"),
+	PINCTRL_PIN(50, "GPIO_50"),
+	PINCTRL_PIN(51, "GPIO_51"),
+	PINCTRL_PIN(52, "GPIO_52"),
+	PINCTRL_PIN(53, "GPIO_53"),
+	PINCTRL_PIN(54, "GPIO_54"),
+	PINCTRL_PIN(55, "GPIO_55"),
+	PINCTRL_PIN(56, "GPIO_56"),
+	PINCTRL_PIN(57, "GPIO_57"),
+	PINCTRL_PIN(58, "GPIO_58"),
+	PINCTRL_PIN(59, "GPIO_59"),
+	PINCTRL_PIN(60, "GPIO_60"),
+	PINCTRL_PIN(61, "GPIO_61"),
+	PINCTRL_PIN(62, "GPIO_62"),
+	PINCTRL_PIN(63, "GPIO_63"),
+	PINCTRL_PIN(64, "GPIO_64"),
+	PINCTRL_PIN(65, "GPIO_65"),
+	PINCTRL_PIN(66, "GPIO_66"),
+	PINCTRL_PIN(67, "GPIO_67"),
+	PINCTRL_PIN(68, "GPIO_68"),
+	PINCTRL_PIN(69, "GPIO_69"),
+	PINCTRL_PIN(70, "GPIO_70"),
+	PINCTRL_PIN(71, "GPIO_71"),
+	PINCTRL_PIN(72, "GPIO_72"),
+	PINCTRL_PIN(73, "GPIO_73"),
+	PINCTRL_PIN(74, "GPIO_74"),
+	PINCTRL_PIN(75, "GPIO_75"),
+	PINCTRL_PIN(76, "GPIO_76"),
+	PINCTRL_PIN(77, "GPIO_77"),
+	PINCTRL_PIN(78, "GPIO_78"),
+	PINCTRL_PIN(79, "GPIO_79"),
+	PINCTRL_PIN(80, "GPIO_80"),
+	PINCTRL_PIN(81, "GPIO_81"),
+	PINCTRL_PIN(82, "GPIO_82"),
+	PINCTRL_PIN(83, "GPIO_83"),
+	PINCTRL_PIN(84, "GPIO_84"),
+	PINCTRL_PIN(85, "GPIO_85"),
+	PINCTRL_PIN(86, "GPIO_86"),
+	PINCTRL_PIN(87, "GPIO_87"),
+	PINCTRL_PIN(88, "GPIO_88"),
+	PINCTRL_PIN(89, "GPIO_89"),
+	PINCTRL_PIN(90, "GPIO_90"),
+	PINCTRL_PIN(91, "GPIO_91"),
+	PINCTRL_PIN(92, "GPIO_92"),
+	PINCTRL_PIN(93, "GPIO_93"),
+	PINCTRL_PIN(94, "GPIO_94"),
+	PINCTRL_PIN(95, "GPIO_95"),
+	PINCTRL_PIN(96, "GPIO_96"),
+	PINCTRL_PIN(97, "GPIO_97"),
+	PINCTRL_PIN(98, "GPIO_98"),
+	PINCTRL_PIN(99, "GPIO_99"),
+	PINCTRL_PIN(100, "GPIO_100"),
+	PINCTRL_PIN(101, "GPIO_101"),
+	PINCTRL_PIN(102, "GPIO_102"),
+	PINCTRL_PIN(103, "GPIO_103"),
+	PINCTRL_PIN(104, "GPIO_104"),
+	PINCTRL_PIN(105, "GPIO_105"),
+	PINCTRL_PIN(106, "GPIO_106"),
+	PINCTRL_PIN(107, "GPIO_107"),
+	PINCTRL_PIN(108, "GPIO_108"),
+	PINCTRL_PIN(109, "GPIO_109"),
+	PINCTRL_PIN(110, "GPIO_110"),
+	PINCTRL_PIN(111, "GPIO_111"),
+	PINCTRL_PIN(112, "GPIO_112"),
+	PINCTRL_PIN(113, "GPIO_113"),
+	PINCTRL_PIN(114, "SDC1_CLK"),
+	PINCTRL_PIN(115, "SDC1_CMD"),
+	PINCTRL_PIN(116, "SDC1_DATA"),
+	PINCTRL_PIN(117, "SDC2_CLK"),
+	PINCTRL_PIN(118, "SDC2_CMD"),
+	PINCTRL_PIN(119, "SDC2_DATA"),
+	PINCTRL_PIN(120, "SDC1_RCLK"),
+	PINCTRL_PIN(121, "UFS_RESET"),
+};
+
+#define DECLARE_MSM_GPIO_PINS(pin) \
+	static const unsigned int gpio##pin##_pins[] = { pin }
+DECLARE_MSM_GPIO_PINS(0);
+DECLARE_MSM_GPIO_PINS(1);
+DECLARE_MSM_GPIO_PINS(2);
+DECLARE_MSM_GPIO_PINS(3);
+DECLARE_MSM_GPIO_PINS(4);
+DECLARE_MSM_GPIO_PINS(5);
+DECLARE_MSM_GPIO_PINS(6);
+DECLARE_MSM_GPIO_PINS(7);
+DECLARE_MSM_GPIO_PINS(8);
+DECLARE_MSM_GPIO_PINS(9);
+DECLARE_MSM_GPIO_PINS(10);
+DECLARE_MSM_GPIO_PINS(11);
+DECLARE_MSM_GPIO_PINS(12);
+DECLARE_MSM_GPIO_PINS(13);
+DECLARE_MSM_GPIO_PINS(14);
+DECLARE_MSM_GPIO_PINS(15);
+DECLARE_MSM_GPIO_PINS(16);
+DECLARE_MSM_GPIO_PINS(17);
+DECLARE_MSM_GPIO_PINS(18);
+DECLARE_MSM_GPIO_PINS(19);
+DECLARE_MSM_GPIO_PINS(20);
+DECLARE_MSM_GPIO_PINS(21);
+DECLARE_MSM_GPIO_PINS(22);
+DECLARE_MSM_GPIO_PINS(23);
+DECLARE_MSM_GPIO_PINS(24);
+DECLARE_MSM_GPIO_PINS(25);
+DECLARE_MSM_GPIO_PINS(26);
+DECLARE_MSM_GPIO_PINS(27);
+DECLARE_MSM_GPIO_PINS(28);
+DECLARE_MSM_GPIO_PINS(29);
+DECLARE_MSM_GPIO_PINS(30);
+DECLARE_MSM_GPIO_PINS(31);
+DECLARE_MSM_GPIO_PINS(32);
+DECLARE_MSM_GPIO_PINS(33);
+DECLARE_MSM_GPIO_PINS(34);
+DECLARE_MSM_GPIO_PINS(35);
+DECLARE_MSM_GPIO_PINS(36);
+DECLARE_MSM_GPIO_PINS(37);
+DECLARE_MSM_GPIO_PINS(38);
+DECLARE_MSM_GPIO_PINS(39);
+DECLARE_MSM_GPIO_PINS(40);
+DECLARE_MSM_GPIO_PINS(41);
+DECLARE_MSM_GPIO_PINS(42);
+DECLARE_MSM_GPIO_PINS(43);
+DECLARE_MSM_GPIO_PINS(44);
+DECLARE_MSM_GPIO_PINS(45);
+DECLARE_MSM_GPIO_PINS(46);
+DECLARE_MSM_GPIO_PINS(47);
+DECLARE_MSM_GPIO_PINS(48);
+DECLARE_MSM_GPIO_PINS(49);
+DECLARE_MSM_GPIO_PINS(50);
+DECLARE_MSM_GPIO_PINS(51);
+DECLARE_MSM_GPIO_PINS(52);
+DECLARE_MSM_GPIO_PINS(53);
+DECLARE_MSM_GPIO_PINS(54);
+DECLARE_MSM_GPIO_PINS(55);
+DECLARE_MSM_GPIO_PINS(56);
+DECLARE_MSM_GPIO_PINS(57);
+DECLARE_MSM_GPIO_PINS(58);
+DECLARE_MSM_GPIO_PINS(59);
+DECLARE_MSM_GPIO_PINS(60);
+DECLARE_MSM_GPIO_PINS(61);
+DECLARE_MSM_GPIO_PINS(62);
+DECLARE_MSM_GPIO_PINS(63);
+DECLARE_MSM_GPIO_PINS(64);
+DECLARE_MSM_GPIO_PINS(65);
+DECLARE_MSM_GPIO_PINS(66);
+DECLARE_MSM_GPIO_PINS(67);
+DECLARE_MSM_GPIO_PINS(68);
+DECLARE_MSM_GPIO_PINS(69);
+DECLARE_MSM_GPIO_PINS(70);
+DECLARE_MSM_GPIO_PINS(71);
+DECLARE_MSM_GPIO_PINS(72);
+DECLARE_MSM_GPIO_PINS(73);
+DECLARE_MSM_GPIO_PINS(74);
+DECLARE_MSM_GPIO_PINS(75);
+DECLARE_MSM_GPIO_PINS(76);
+DECLARE_MSM_GPIO_PINS(77);
+DECLARE_MSM_GPIO_PINS(78);
+DECLARE_MSM_GPIO_PINS(79);
+DECLARE_MSM_GPIO_PINS(80);
+DECLARE_MSM_GPIO_PINS(81);
+DECLARE_MSM_GPIO_PINS(82);
+DECLARE_MSM_GPIO_PINS(83);
+DECLARE_MSM_GPIO_PINS(84);
+DECLARE_MSM_GPIO_PINS(85);
+DECLARE_MSM_GPIO_PINS(86);
+DECLARE_MSM_GPIO_PINS(87);
+DECLARE_MSM_GPIO_PINS(88);
+DECLARE_MSM_GPIO_PINS(89);
+DECLARE_MSM_GPIO_PINS(90);
+DECLARE_MSM_GPIO_PINS(91);
+DECLARE_MSM_GPIO_PINS(92);
+DECLARE_MSM_GPIO_PINS(93);
+DECLARE_MSM_GPIO_PINS(94);
+DECLARE_MSM_GPIO_PINS(95);
+DECLARE_MSM_GPIO_PINS(96);
+DECLARE_MSM_GPIO_PINS(97);
+DECLARE_MSM_GPIO_PINS(98);
+DECLARE_MSM_GPIO_PINS(99);
+DECLARE_MSM_GPIO_PINS(100);
+DECLARE_MSM_GPIO_PINS(101);
+DECLARE_MSM_GPIO_PINS(102);
+DECLARE_MSM_GPIO_PINS(103);
+DECLARE_MSM_GPIO_PINS(104);
+DECLARE_MSM_GPIO_PINS(105);
+DECLARE_MSM_GPIO_PINS(106);
+DECLARE_MSM_GPIO_PINS(107);
+DECLARE_MSM_GPIO_PINS(108);
+DECLARE_MSM_GPIO_PINS(109);
+DECLARE_MSM_GPIO_PINS(110);
+DECLARE_MSM_GPIO_PINS(111);
+DECLARE_MSM_GPIO_PINS(112);
+DECLARE_MSM_GPIO_PINS(113);
+
+static const unsigned int sdc1_clk_pins[] = { 114 };
+static const unsigned int sdc1_cmd_pins[] = { 115 };
+static const unsigned int sdc1_data_pins[] = { 116 };
+static const unsigned int sdc2_clk_pins[] = { 117 };
+static const unsigned int sdc2_cmd_pins[] = { 118 };
+static const unsigned int sdc2_data_pins[] = { 119 };
+static const unsigned int sdc1_rclk_pins[] = { 120 };
+static const unsigned int ufs_reset_pins[] = { 121 };
+
+enum sdm660_functions {
+	msm_mux_blsp_spi1,
+	msm_mux_gpio,
+	msm_mux_blsp_uim1,
+	msm_mux_tgu_ch0,
+	msm_mux_qdss_gpio4,
+	msm_mux_atest_gpsadc1,
+	msm_mux_blsp_uart1,
+	msm_mux_SMB_STAT,
+	msm_mux_phase_flag14,
+	msm_mux_blsp_i2c2,
+	msm_mux_phase_flag31,
+	msm_mux_blsp_spi3,
+	msm_mux_blsp_spi3_cs1,
+	msm_mux_blsp_spi3_cs2,
+	msm_mux_wlan1_adc1,
+	msm_mux_atest_usb13,
+	msm_mux_tgu_ch1,
+	msm_mux_qdss_gpio5,
+	msm_mux_atest_gpsadc0,
+	msm_mux_blsp_i2c1,
+	msm_mux_ddr_bist,
+	msm_mux_atest_tsens2,
+	msm_mux_atest_usb1,
+	msm_mux_blsp_spi2,
+	msm_mux_blsp_uim2,
+	msm_mux_phase_flag3,
+	msm_mux_bimc_dte1,
+	msm_mux_wlan1_adc0,
+	msm_mux_atest_usb12,
+	msm_mux_bimc_dte0,
+	msm_mux_blsp_i2c3,
+	msm_mux_wlan2_adc1,
+	msm_mux_atest_usb11,
+	msm_mux_dbg_out,
+	msm_mux_wlan2_adc0,
+	msm_mux_atest_usb10,
+	msm_mux_RCM_MARKER,
+	msm_mux_blsp_spi4,
+	msm_mux_pri_mi2s,
+	msm_mux_phase_flag26,
+	msm_mux_qdss_cti0_a,
+	msm_mux_qdss_cti0_b,
+	msm_mux_qdss_cti1_a,
+	msm_mux_qdss_cti1_b,
+	msm_mux_DP_HOT,
+	msm_mux_pri_mi2s_ws,
+	msm_mux_phase_flag27,
+	msm_mux_blsp_i2c4,
+	msm_mux_phase_flag28,
+	msm_mux_blsp_uart5,
+	msm_mux_blsp_spi5,
+	msm_mux_blsp_uim5,
+	msm_mux_phase_flag5,
+	msm_mux_blsp_i2c5,
+	msm_mux_blsp_spi6,
+	msm_mux_blsp_uart2,
+	msm_mux_blsp_uim6,
+	msm_mux_phase_flag11,
+	msm_mux_vsense_data0,
+	msm_mux_blsp_i2c6,
+	msm_mux_phase_flag12,
+	msm_mux_vsense_data1,
+	msm_mux_phase_flag13,
+	msm_mux_vsense_mode,
+	msm_mux_blsp_spi7,
+	msm_mux_blsp_uart6_a,
+	msm_mux_blsp_uart6_b,
+	msm_mux_sec_mi2s,
+	msm_mux_sndwire_clk,
+	msm_mux_phase_flag17,
+	msm_mux_vsense_clkout,
+	msm_mux_sndwire_data,
+	msm_mux_phase_flag18,
+	msm_mux_WSA_SPKR,
+	msm_mux_blsp_i2c7,
+	msm_mux_phase_flag19,
+	msm_mux_vfr_1,
+	msm_mux_phase_flag20,
+	msm_mux_NFC_INT,
+	msm_mux_blsp_spi8_cs1,
+	msm_mux_blsp_spi8_cs2,
+	msm_mux_m_voc,
+	msm_mux_phase_flag21,
+	msm_mux_NFC_EN,
+	msm_mux_phase_flag22,
+	msm_mux_NFC_DWL,
+	msm_mux_blsp_i2c8_a,
+	msm_mux_blsp_i2c8_b,
+	msm_mux_phase_flag23,
+	msm_mux_NFC_ESE,
+	msm_mux_pwr_modem,
+	msm_mux_phase_flag24,
+	msm_mux_qdss_gpio,
+	msm_mux_cam_mclk,
+	msm_mux_pwr_nav,
+	msm_mux_qdss_gpio0,
+	msm_mux_qspi_data0,
+	msm_mux_pwr_crypto,
+	msm_mux_qdss_gpio1,
+	msm_mux_qspi_data1,
+	msm_mux_agera_pll,
+	msm_mux_qdss_gpio2,
+	msm_mux_qspi_data2,
+	msm_mux_jitter_bist,
+	msm_mux_qdss_gpio3,
+	msm_mux_qdss_gpio7,
+	msm_mux_FL_R3LED,
+	msm_mux_CCI_TIMER0,
+	msm_mux_FL_STROBE,
+	msm_mux_CCI_TIMER1,
+	msm_mux_CAM_LDO1,
+	msm_mux_mdss_vsync0,
+	msm_mux_mdss_vsync1,
+	msm_mux_mdss_vsync2,
+	msm_mux_mdss_vsync3,
+	msm_mux_qdss_gpio9,
+	msm_mux_CAM_IRQ,
+	msm_mux_atest_usb2,
+	msm_mux_cci_i2c,
+	msm_mux_pll_bypassnl,
+	msm_mux_atest_tsens,
+	msm_mux_atest_usb21,
+	msm_mux_pll_reset,
+	msm_mux_atest_usb23,
+	msm_mux_qdss_gpio6,
+	msm_mux_CCI_TIMER3,
+	msm_mux_CCI_ASYNC,
+	msm_mux_qspi_cs,
+	msm_mux_qdss_gpio10,
+	msm_mux_CAM3_STANDBY,
+	msm_mux_CCI_TIMER4,
+	msm_mux_qdss_gpio11,
+	msm_mux_CAM_LDO2,
+	msm_mux_cci_async,
+	msm_mux_qdss_gpio12,
+	msm_mux_CAM0_RST,
+	msm_mux_qdss_gpio13,
+	msm_mux_CAM1_RST,
+	msm_mux_qspi_clk,
+	msm_mux_phase_flag30,
+	msm_mux_qdss_gpio14,
+	msm_mux_qspi_resetn,
+	msm_mux_phase_flag1,
+	msm_mux_qdss_gpio15,
+	msm_mux_CAM0_STANDBY,
+	msm_mux_phase_flag2,
+	msm_mux_CAM1_STANDBY,
+	msm_mux_phase_flag9,
+	msm_mux_CAM2_STANDBY,
+	msm_mux_qspi_data3,
+	msm_mux_phase_flag15,
+	msm_mux_qdss_gpio8,
+	msm_mux_CAM3_RST,
+	msm_mux_CCI_TIMER2,
+	msm_mux_phase_flag16,
+	msm_mux_LCD0_RESET,
+	msm_mux_phase_flag6,
+	msm_mux_SD_CARD,
+	msm_mux_phase_flag29,
+	msm_mux_DP_EN,
+	msm_mux_phase_flag25,
+	msm_mux_USBC_ORIENTATION,
+	msm_mux_phase_flag10,
+	msm_mux_atest_usb20,
+	msm_mux_gcc_gp1,
+	msm_mux_phase_flag4,
+	msm_mux_atest_usb22,
+	msm_mux_USB_PHY,
+	msm_mux_gcc_gp2,
+	msm_mux_atest_char,
+	msm_mux_mdp_vsync,
+	msm_mux_gcc_gp3,
+	msm_mux_atest_char3,
+	msm_mux_FORCE_TOUCH,
+	msm_mux_cri_trng0,
+	msm_mux_atest_char2,
+	msm_mux_cri_trng1,
+	msm_mux_atest_char1,
+	msm_mux_AUDIO_USBC,
+	msm_mux_audio_ref,
+	msm_mux_MDP_VSYNC,
+	msm_mux_cri_trng,
+	msm_mux_atest_char0,
+	msm_mux_US_EURO,
+	msm_mux_LCD_BACKLIGHT,
+	msm_mux_blsp_spi8_a,
+	msm_mux_blsp_spi8_b,
+	msm_mux_sp_cmu,
+	msm_mux_nav_pps_a,
+	msm_mux_nav_pps_b,
+	msm_mux_nav_pps_c,
+	msm_mux_gps_tx_a,
+	msm_mux_gps_tx_b,
+	msm_mux_gps_tx_c,
+	msm_mux_adsp_ext,
+	msm_mux_TS_RESET,
+	msm_mux_ssc_irq,
+	msm_mux_isense_dbg,
+	msm_mux_phase_flag0,
+	msm_mux_phase_flag7,
+	msm_mux_phase_flag8,
+	msm_mux_tsense_pwm1,
+	msm_mux_tsense_pwm2,
+	msm_mux_SENSOR_RST,
+	msm_mux_WMSS_RESETN,
+	msm_mux_HAPTICS_PWM,
+	msm_mux_GPS_eLNA,
+	msm_mux_mss_lte,
+	msm_mux_uim2_data,
+	msm_mux_uim2_clk,
+	msm_mux_uim2_reset,
+	msm_mux_uim2_present,
+	msm_mux_uim1_data,
+	msm_mux_uim1_clk,
+	msm_mux_uim1_reset,
+	msm_mux_uim1_present,
+	msm_mux_uim_batt,
+	msm_mux_pa_indicator,
+	msm_mux_ldo_en,
+	msm_mux_ldo_update,
+	msm_mux_qlink_request,
+	msm_mux_qlink_enable,
+	msm_mux_prng_rosc,
+	msm_mux_LCD_PWR,
+	msm_mux_NA,
+};
+
+static const char * const blsp_spi1_groups[] = {
+	"gpio0", "gpio1", "gpio2", "gpio3", "gpio46",
+};
+static const char * const gpio_groups[] = {
+	"gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7",
+	"gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14",
+	"gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21",
+	"gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28",
+	"gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35",
+	"gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42",
+	"gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49",
+	"gpio50", "gpio51", "gpio52", "gpio53", "gpio54", "gpio55", "gpio56",
+	"gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63",
+	"gpio64", "gpio65", "gpio66", "gpio67", "gpio68", "gpio69", "gpio70",
+	"gpio71", "gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77",
+	"gpio78", "gpio79", "gpio80", "gpio81", "gpio82", "gpio83", "gpio84",
+	"gpio85", "gpio86", "gpio87", "gpio88", "gpio89", "gpio90", "gpio91",
+	"gpio92", "gpio93", "gpio94", "gpio95", "gpio96", "gpio97", "gpio98",
+	"gpio99", "gpio100", "gpio101", "gpio102", "gpio103", "gpio104",
+	"gpio105", "gpio106", "gpio107", "gpio108", "gpio109", "gpio110",
+	"gpio111", "gpio112", "gpio113",
+};
+static const char * const blsp_uim1_groups[] = {
+	"gpio0", "gpio1",
+};
+static const char * const tgu_ch0_groups[] = {
+	"gpio0",
+};
+static const char * const qdss_gpio4_groups[] = {
+	"gpio0", "gpio36",
+};
+static const char * const atest_gpsadc1_groups[] = {
+	"gpio0",
+};
+static const char * const blsp_uart1_groups[] = {
+	"gpio0", "gpio1", "gpio2", "gpio3",
+};
+static const char * const SMB_STAT_groups[] = {
+	"gpio5",
+};
+static const char * const phase_flag14_groups[] = {
+	"gpio5",
+};
+static const char * const blsp_i2c2_groups[] = {
+	"gpio6", "gpio7",
+};
+static const char * const phase_flag31_groups[] = {
+	"gpio6",
+};
+static const char * const blsp_spi3_groups[] = {
+	"gpio8", "gpio9", "gpio10", "gpio11",
+};
+static const char * const blsp_spi3_cs1_groups[] = {
+	"gpio30",
+};
+static const char * const blsp_spi3_cs2_groups[] = {
+	"gpio65",
+};
+static const char * const wlan1_adc1_groups[] = {
+	"gpio8",
+};
+static const char * const atest_usb13_groups[] = {
+	"gpio8",
+};
+static const char * const tgu_ch1_groups[] = {
+	"gpio1",
+};
+static const char * const qdss_gpio5_groups[] = {
+	"gpio1", "gpio37",
+};
+static const char * const atest_gpsadc0_groups[] = {
+	"gpio1",
+};
+static const char * const blsp_i2c1_groups[] = {
+	"gpio2", "gpio3",
+};
+static const char * const ddr_bist_groups[] = {
+	"gpio3", "gpio8", "gpio9", "gpio10",
+};
+static const char * const atest_tsens2_groups[] = {
+	"gpio3",
+};
+static const char * const atest_usb1_groups[] = {
+	"gpio3",
+};
+static const char * const blsp_spi2_groups[] = {
+	"gpio4", "gpio5", "gpio6", "gpio7",
+};
+static const char * const blsp_uim2_groups[] = {
+	"gpio4", "gpio5",
+};
+static const char * const phase_flag3_groups[] = {
+	"gpio4",
+};
+static const char * const bimc_dte1_groups[] = {
+	"gpio8", "gpio10",
+};
+static const char * const wlan1_adc0_groups[] = {
+	"gpio9",
+};
+static const char * const atest_usb12_groups[] = {
+	"gpio9",
+};
+static const char * const bimc_dte0_groups[] = {
+	"gpio9", "gpio11",
+};
+static const char * const blsp_i2c3_groups[] = {
+	"gpio10", "gpio11",
+};
+static const char * const wlan2_adc1_groups[] = {
+	"gpio10",
+};
+static const char * const atest_usb11_groups[] = {
+	"gpio10",
+};
+static const char * const dbg_out_groups[] = {
+	"gpio11",
+};
+static const char * const wlan2_adc0_groups[] = {
+	"gpio11",
+};
+static const char * const atest_usb10_groups[] = {
+	"gpio11",
+};
+static const char * const RCM_MARKER_groups[] = {
+	"gpio12", "gpio13",
+};
+static const char * const blsp_spi4_groups[] = {
+	"gpio12", "gpio13", "gpio14", "gpio15",
+};
+static const char * const pri_mi2s_groups[] = {
+	"gpio12", "gpio14", "gpio15", "gpio61",
+};
+static const char * const phase_flag26_groups[] = {
+	"gpio12",
+};
+static const char * const qdss_cti0_a_groups[] = {
+	"gpio49", "gpio50",
+};
+static const char * const qdss_cti0_b_groups[] = {
+	"gpio13", "gpio21",
+};
+static const char * const qdss_cti1_a_groups[] = {
+	"gpio53", "gpio55",
+};
+static const char * const qdss_cti1_b_groups[] = {
+	"gpio12", "gpio66",
+};
+static const char * const DP_HOT_groups[] = {
+	"gpio13",
+};
+static const char * const pri_mi2s_ws_groups[] = {
+	"gpio13",
+};
+static const char * const phase_flag27_groups[] = {
+	"gpio13",
+};
+static const char * const blsp_i2c4_groups[] = {
+	"gpio14", "gpio15",
+};
+static const char * const phase_flag28_groups[] = {
+	"gpio14",
+};
+static const char * const blsp_uart5_groups[] = {
+	"gpio16", "gpio17", "gpio18", "gpio19",
+};
+static const char * const blsp_spi5_groups[] = {
+	"gpio16", "gpio17", "gpio18", "gpio19",
+};
+static const char * const blsp_uim5_groups[] = {
+	"gpio16", "gpio17",
+};
+static const char * const phase_flag5_groups[] = {
+	"gpio17",
+};
+static const char * const blsp_i2c5_groups[] = {
+	"gpio18", "gpio19",
+};
+static const char * const blsp_spi6_groups[] = {
+	"gpio49", "gpio52", "gpio22", "gpio23",
+};
+static const char * const blsp_uart2_groups[] = {
+	"gpio4", "gpio5", "gpio6", "gpio7",
+};
+static const char * const blsp_uim6_groups[] = {
+	"gpio20", "gpio21",
+};
+static const char * const phase_flag11_groups[] = {
+	"gpio21",
+};
+static const char * const vsense_data0_groups[] = {
+	"gpio21",
+};
+static const char * const blsp_i2c6_groups[] = {
+	"gpio22", "gpio23",
+};
+static const char * const phase_flag12_groups[] = {
+	"gpio22",
+};
+static const char * const vsense_data1_groups[] = {
+	"gpio22",
+};
+static const char * const phase_flag13_groups[] = {
+	"gpio23",
+};
+static const char * const vsense_mode_groups[] = {
+	"gpio23",
+};
+static const char * const blsp_spi7_groups[] = {
+	"gpio24", "gpio25", "gpio26", "gpio27",
+};
+static const char * const blsp_uart6_a_groups[] = {
+	"gpio24", "gpio25", "gpio26", "gpio27",
+};
+static const char * const blsp_uart6_b_groups[] = {
+	"gpio28", "gpio29", "gpio30", "gpio31",
+};
+static const char * const sec_mi2s_groups[] = {
+	"gpio24", "gpio25", "gpio26", "gpio27", "gpio62",
+};
+static const char * const sndwire_clk_groups[] = {
+	"gpio24",
+};
+static const char * const phase_flag17_groups[] = {
+	"gpio24",
+};
+static const char * const vsense_clkout_groups[] = {
+	"gpio24",
+};
+static const char * const sndwire_data_groups[] = {
+	"gpio25",
+};
+static const char * const phase_flag18_groups[] = {
+	"gpio25",
+};
+static const char * const WSA_SPKR_groups[] = {
+	"gpio26", "gpio27",
+};
+static const char * const blsp_i2c7_groups[] = {
+	"gpio26", "gpio27",
+};
+static const char * const phase_flag19_groups[] = {
+	"gpio26",
+};
+static const char * const vfr_1_groups[] = {
+	"gpio27",
+};
+static const char * const phase_flag20_groups[] = {
+	"gpio27",
+};
+static const char * const NFC_INT_groups[] = {
+	"gpio28",
+};
+static const char * const blsp_spi8_a_groups[] = {
+	"gpio28", "gpio29", "gpio30", "gpio31",
+};
+static const char * const blsp_spi8_b_groups[] = {
+	"gpio40", "gpio41", "gpio44", "gpio52",
+};
+static const char * const m_voc_groups[] = {
+	"gpio28",
+};
+static const char * const phase_flag21_groups[] = {
+	"gpio28",
+};
+static const char * const NFC_EN_groups[] = {
+	"gpio29",
+};
+static const char * const phase_flag22_groups[] = {
+	"gpio29",
+};
+static const char * const NFC_DWL_groups[] = {
+	"gpio30",
+};
+static const char * const blsp_i2c8_a_groups[] = {
+	"gpio30", "gpio31",
+};
+static const char * const blsp_i2c8_b_groups[] = {
+	"gpio44", "gpio52",
+};
+static const char * const phase_flag23_groups[] = {
+	"gpio30",
+};
+static const char * const NFC_ESE_groups[] = {
+	"gpio31",
+};
+static const char * const pwr_modem_groups[] = {
+	"gpio31",
+};
+static const char * const phase_flag24_groups[] = {
+	"gpio31",
+};
+static const char * const qdss_gpio_groups[] = {
+	"gpio31", "gpio52", "gpio68", "gpio69",
+};
+static const char * const cam_mclk_groups[] = {
+	"gpio32", "gpio33", "gpio34", "gpio35",
+};
+static const char * const pwr_nav_groups[] = {
+	"gpio32",
+};
+static const char * const qdss_gpio0_groups[] = {
+	"gpio32", "gpio67",
+};
+static const char * const qspi_data0_groups[] = {
+	"gpio33",
+};
+static const char * const pwr_crypto_groups[] = {
+	"gpio33",
+};
+static const char * const qdss_gpio1_groups[] = {
+	"gpio33", "gpio63",
+};
+static const char * const qspi_data1_groups[] = {
+	"gpio34",
+};
+static const char * const agera_pll_groups[] = {
+	"gpio34", "gpio36",
+};
+static const char * const qdss_gpio2_groups[] = {
+	"gpio34", "gpio64",
+};
+static const char * const qspi_data2_groups[] = {
+	"gpio35",
+};
+static const char * const jitter_bist_groups[] = {
+	"gpio35",
+};
+static const char * const qdss_gpio3_groups[] = {
+	"gpio35", "gpio56",
+};
+static const char * const qdss_gpio7_groups[] = {
+	"gpio39", "gpio71",
+};
+static const char * const FL_R3LED_groups[] = {
+	"gpio40",
+};
+static const char * const CCI_TIMER0_groups[] = {
+	"gpio40",
+};
+static const char * const FL_STROBE_groups[] = {
+	"gpio41",
+};
+static const char * const CCI_TIMER1_groups[] = {
+	"gpio41",
+};
+static const char * const CAM_LDO1_groups[] = {
+	"gpio42",
+};
+static const char * const mdss_vsync0_groups[] = {
+	"gpio42",
+};
+static const char * const mdss_vsync1_groups[] = {
+	"gpio42",
+};
+static const char * const mdss_vsync2_groups[] = {
+	"gpio42",
+};
+static const char * const mdss_vsync3_groups[] = {
+	"gpio42",
+};
+static const char * const qdss_gpio9_groups[] = {
+	"gpio42", "gpio76",
+};
+static const char * const CAM_IRQ_groups[] = {
+	"gpio43",
+};
+static const char * const atest_usb2_groups[] = {
+	"gpio35",
+};
+static const char * const cci_i2c_groups[] = {
+	"gpio36", "gpio37", "gpio38", "gpio39",
+};
+static const char * const pll_bypassnl_groups[] = {
+	"gpio36",
+};
+static const char * const atest_tsens_groups[] = {
+	"gpio36",
+};
+static const char * const atest_usb21_groups[] = {
+	"gpio36",
+};
+static const char * const pll_reset_groups[] = {
+	"gpio37",
+};
+static const char * const atest_usb23_groups[] = {
+	"gpio37",
+};
+static const char * const qdss_gpio6_groups[] = {
+	"gpio38", "gpio70",
+};
+static const char * const CCI_TIMER3_groups[] = {
+	"gpio43",
+};
+static const char * const CCI_ASYNC_groups[] = {
+	"gpio43", "gpio44",
+};
+static const char * const qspi_cs_groups[] = {
+	"gpio43", "gpio50",
+};
+static const char * const qdss_gpio10_groups[] = {
+	"gpio43", "gpio77",
+};
+static const char * const CAM3_STANDBY_groups[] = {
+	"gpio44",
+};
+static const char * const CCI_TIMER4_groups[] = {
+	"gpio44",
+};
+static const char * const qdss_gpio11_groups[] = {
+	"gpio44", "gpio79",
+};
+static const char * const CAM_LDO2_groups[] = {
+	"gpio45",
+};
+static const char * const cci_async_groups[] = {
+	"gpio45",
+};
+static const char * const qdss_gpio12_groups[] = {
+	"gpio45", "gpio80",
+};
+static const char * const CAM0_RST_groups[] = {
+	"gpio46",
+};
+static const char * const qdss_gpio13_groups[] = {
+	"gpio46", "gpio78",
+};
+static const char * const CAM1_RST_groups[] = {
+	"gpio47",
+};
+static const char * const qspi_clk_groups[] = {
+	"gpio47",
+};
+static const char * const phase_flag30_groups[] = {
+	"gpio47",
+};
+static const char * const qdss_gpio14_groups[] = {
+	"gpio47", "gpio72",
+};
+static const char * const qspi_resetn_groups[] = {
+	"gpio48",
+};
+static const char * const phase_flag1_groups[] = {
+	"gpio48",
+};
+static const char * const qdss_gpio15_groups[] = {
+	"gpio48", "gpio73",
+};
+static const char * const CAM0_STANDBY_groups[] = {
+	"gpio49",
+};
+static const char * const phase_flag2_groups[] = {
+	"gpio49",
+};
+static const char * const CAM1_STANDBY_groups[] = {
+	"gpio50",
+};
+static const char * const phase_flag9_groups[] = {
+	"gpio50",
+};
+static const char * const CAM2_STANDBY_groups[] = {
+	"gpio51",
+};
+static const char * const qspi_data3_groups[] = {
+	"gpio51",
+};
+static const char * const phase_flag15_groups[] = {
+	"gpio51",
+};
+static const char * const qdss_gpio8_groups[] = {
+	"gpio51", "gpio75",
+};
+static const char * const CAM3_RST_groups[] = {
+	"gpio52",
+};
+static const char * const CCI_TIMER2_groups[] = {
+	"gpio52",
+};
+static const char * const phase_flag16_groups[] = {
+	"gpio52",
+};
+static const char * const LCD0_RESET_groups[] = {
+	"gpio53",
+};
+static const char * const phase_flag6_groups[] = {
+	"gpio53",
+};
+static const char * const SD_CARD_groups[] = {
+	"gpio54",
+};
+static const char * const phase_flag29_groups[] = {
+	"gpio54",
+};
+static const char * const DP_EN_groups[] = {
+	"gpio55",
+};
+static const char * const phase_flag25_groups[] = {
+	"gpio55",
+};
+static const char * const USBC_ORIENTATION_groups[] = {
+	"gpio56",
+};
+static const char * const phase_flag10_groups[] = {
+	"gpio56",
+};
+static const char * const atest_usb20_groups[] = {
+	"gpio56",
+};
+static const char * const gcc_gp1_groups[] = {
+	"gpio57", "gpio78",
+};
+static const char * const phase_flag4_groups[] = {
+	"gpio57",
+};
+static const char * const atest_usb22_groups[] = {
+	"gpio57",
+};
+static const char * const USB_PHY_groups[] = {
+	"gpio58",
+};
+static const char * const gcc_gp2_groups[] = {
+	"gpio58", "gpio81",
+};
+static const char * const atest_char_groups[] = {
+	"gpio58",
+};
+static const char * const mdp_vsync_groups[] = {
+	"gpio59", "gpio74",
+};
+static const char * const gcc_gp3_groups[] = {
+	"gpio59", "gpio82",
+};
+static const char * const atest_char3_groups[] = {
+	"gpio59",
+};
+static const char * const FORCE_TOUCH_groups[] = {
+	"gpio60", "gpio73",
+};
+static const char * const cri_trng0_groups[] = {
+	"gpio60",
+};
+static const char * const atest_char2_groups[] = {
+	"gpio60",
+};
+static const char * const cri_trng1_groups[] = {
+	"gpio61",
+};
+static const char * const atest_char1_groups[] = {
+	"gpio61",
+};
+static const char * const AUDIO_USBC_groups[] = {
+	"gpio62",
+};
+static const char * const audio_ref_groups[] = {
+	"gpio62",
+};
+static const char * const MDP_VSYNC_groups[] = {
+	"gpio62",
+};
+static const char * const cri_trng_groups[] = {
+	"gpio62",
+};
+static const char * const atest_char0_groups[] = {
+	"gpio62",
+};
+static const char * const US_EURO_groups[] = {
+	"gpio63",
+};
+static const char * const LCD_BACKLIGHT_groups[] = {
+	"gpio64",
+};
+static const char * const blsp_spi8_cs1_groups[] = {
+	"gpio64",
+};
+static const char * const blsp_spi8_cs2_groups[] = {
+	"gpio76",
+};
+static const char * const sp_cmu_groups[] = {
+	"gpio64",
+};
+static const char * const nav_pps_a_groups[] = {
+	"gpio65",
+};
+static const char * const nav_pps_b_groups[] = {
+	"gpio98",
+};
+static const char * const nav_pps_c_groups[] = {
+	"gpio80",
+};
+static const char * const gps_tx_a_groups[] = {
+	"gpio65",
+};
+static const char * const gps_tx_b_groups[] = {
+	"gpio98",
+};
+static const char * const gps_tx_c_groups[] = {
+	"gpio80",
+};
+static const char * const adsp_ext_groups[] = {
+	"gpio65",
+};
+static const char * const TS_RESET_groups[] = {
+	"gpio66",
+};
+static const char * const ssc_irq_groups[] = {
+	"gpio67", "gpio68", "gpio69", "gpio70", "gpio71", "gpio72", "gpio74",
+	"gpio75", "gpio76",
+};
+static const char * const isense_dbg_groups[] = {
+	"gpio68",
+};
+static const char * const phase_flag0_groups[] = {
+	"gpio68",
+};
+static const char * const phase_flag7_groups[] = {
+	"gpio69",
+};
+static const char * const phase_flag8_groups[] = {
+	"gpio70",
+};
+static const char * const tsense_pwm1_groups[] = {
+	"gpio71",
+};
+static const char * const tsense_pwm2_groups[] = {
+	"gpio71",
+};
+static const char * const SENSOR_RST_groups[] = {
+	"gpio77",
+};
+static const char * const WMSS_RESETN_groups[] = {
+	"gpio78",
+};
+static const char * const HAPTICS_PWM_groups[] = {
+	"gpio79",
+};
+static const char * const GPS_eLNA_groups[] = {
+	"gpio80",
+};
+static const char * const mss_lte_groups[] = {
+	"gpio81", "gpio82",
+};
+static const char * const uim2_data_groups[] = {
+	"gpio83",
+};
+static const char * const uim2_clk_groups[] = {
+	"gpio84",
+};
+static const char * const uim2_reset_groups[] = {
+	"gpio85",
+};
+static const char * const uim2_present_groups[] = {
+	"gpio86",
+};
+static const char * const uim1_data_groups[] = {
+	"gpio87",
+};
+static const char * const uim1_clk_groups[] = {
+	"gpio88",
+};
+static const char * const uim1_reset_groups[] = {
+	"gpio89",
+};
+static const char * const uim1_present_groups[] = {
+	"gpio90",
+};
+static const char * const uim_batt_groups[] = {
+	"gpio91",
+};
+static const char * const pa_indicator_groups[] = {
+	"gpio92",
+};
+static const char * const ldo_en_groups[] = {
+	"gpio97",
+};
+static const char * const ldo_update_groups[] = {
+	"gpio98",
+};
+static const char * const qlink_request_groups[] = {
+	"gpio99",
+};
+static const char * const qlink_enable_groups[] = {
+	"gpio100",
+};
+static const char * const prng_rosc_groups[] = {
+	"gpio102",
+};
+static const char * const LCD_PWR_groups[] = {
+	"gpio113",
+};
+
+static const struct msm_function sdm660_functions[] = {
+	FUNCTION(blsp_spi1),
+	FUNCTION(gpio),
+	FUNCTION(blsp_uim1),
+	FUNCTION(tgu_ch0),
+	FUNCTION(qdss_gpio4),
+	FUNCTION(atest_gpsadc1),
+	FUNCTION(blsp_uart1),
+	FUNCTION(SMB_STAT),
+	FUNCTION(phase_flag14),
+	FUNCTION(blsp_i2c2),
+	FUNCTION(phase_flag31),
+	FUNCTION(blsp_spi3),
+	FUNCTION(blsp_spi3_cs1),
+	FUNCTION(blsp_spi3_cs2),
+	FUNCTION(wlan1_adc1),
+	FUNCTION(atest_usb13),
+	FUNCTION(tgu_ch1),
+	FUNCTION(qdss_gpio5),
+	FUNCTION(atest_gpsadc0),
+	FUNCTION(blsp_i2c1),
+	FUNCTION(ddr_bist),
+	FUNCTION(atest_tsens2),
+	FUNCTION(atest_usb1),
+	FUNCTION(blsp_spi2),
+	FUNCTION(blsp_uim2),
+	FUNCTION(phase_flag3),
+	FUNCTION(bimc_dte1),
+	FUNCTION(wlan1_adc0),
+	FUNCTION(atest_usb12),
+	FUNCTION(bimc_dte0),
+	FUNCTION(blsp_i2c3),
+	FUNCTION(wlan2_adc1),
+	FUNCTION(atest_usb11),
+	FUNCTION(dbg_out),
+	FUNCTION(wlan2_adc0),
+	FUNCTION(atest_usb10),
+	FUNCTION(RCM_MARKER),
+	FUNCTION(blsp_spi4),
+	FUNCTION(pri_mi2s),
+	FUNCTION(phase_flag26),
+	FUNCTION(qdss_cti0_a),
+	FUNCTION(qdss_cti0_b),
+	FUNCTION(qdss_cti1_a),
+	FUNCTION(qdss_cti1_b),
+	FUNCTION(DP_HOT),
+	FUNCTION(pri_mi2s_ws),
+	FUNCTION(phase_flag27),
+	FUNCTION(blsp_i2c4),
+	FUNCTION(phase_flag28),
+	FUNCTION(blsp_uart5),
+	FUNCTION(blsp_spi5),
+	FUNCTION(blsp_uim5),
+	FUNCTION(phase_flag5),
+	FUNCTION(blsp_i2c5),
+	FUNCTION(blsp_spi6),
+	FUNCTION(blsp_uart2),
+	FUNCTION(blsp_uim6),
+	FUNCTION(phase_flag11),
+	FUNCTION(vsense_data0),
+	FUNCTION(blsp_i2c6),
+	FUNCTION(phase_flag12),
+	FUNCTION(vsense_data1),
+	FUNCTION(phase_flag13),
+	FUNCTION(vsense_mode),
+	FUNCTION(blsp_spi7),
+	FUNCTION(blsp_uart6_a),
+	FUNCTION(blsp_uart6_b),
+	FUNCTION(sec_mi2s),
+	FUNCTION(sndwire_clk),
+	FUNCTION(phase_flag17),
+	FUNCTION(vsense_clkout),
+	FUNCTION(sndwire_data),
+	FUNCTION(phase_flag18),
+	FUNCTION(WSA_SPKR),
+	FUNCTION(blsp_i2c7),
+	FUNCTION(phase_flag19),
+	FUNCTION(vfr_1),
+	FUNCTION(phase_flag20),
+	FUNCTION(NFC_INT),
+	FUNCTION(blsp_spi8_cs1),
+	FUNCTION(blsp_spi8_cs2),
+	FUNCTION(m_voc),
+	FUNCTION(phase_flag21),
+	FUNCTION(NFC_EN),
+	FUNCTION(phase_flag22),
+	FUNCTION(NFC_DWL),
+	FUNCTION(blsp_i2c8_a),
+	FUNCTION(blsp_i2c8_b),
+	FUNCTION(phase_flag23),
+	FUNCTION(NFC_ESE),
+	FUNCTION(pwr_modem),
+	FUNCTION(phase_flag24),
+	FUNCTION(qdss_gpio),
+	FUNCTION(cam_mclk),
+	FUNCTION(pwr_nav),
+	FUNCTION(qdss_gpio0),
+	FUNCTION(qspi_data0),
+	FUNCTION(pwr_crypto),
+	FUNCTION(qdss_gpio1),
+	FUNCTION(qspi_data1),
+	FUNCTION(agera_pll),
+	FUNCTION(qdss_gpio2),
+	FUNCTION(qspi_data2),
+	FUNCTION(jitter_bist),
+	FUNCTION(qdss_gpio3),
+	FUNCTION(qdss_gpio7),
+	FUNCTION(FL_R3LED),
+	FUNCTION(CCI_TIMER0),
+	FUNCTION(FL_STROBE),
+	FUNCTION(CCI_TIMER1),
+	FUNCTION(CAM_LDO1),
+	FUNCTION(mdss_vsync0),
+	FUNCTION(mdss_vsync1),
+	FUNCTION(mdss_vsync2),
+	FUNCTION(mdss_vsync3),
+	FUNCTION(qdss_gpio9),
+	FUNCTION(CAM_IRQ),
+	FUNCTION(atest_usb2),
+	FUNCTION(cci_i2c),
+	FUNCTION(pll_bypassnl),
+	FUNCTION(atest_tsens),
+	FUNCTION(atest_usb21),
+	FUNCTION(pll_reset),
+	FUNCTION(atest_usb23),
+	FUNCTION(qdss_gpio6),
+	FUNCTION(CCI_TIMER3),
+	FUNCTION(CCI_ASYNC),
+	FUNCTION(qspi_cs),
+	FUNCTION(qdss_gpio10),
+	FUNCTION(CAM3_STANDBY),
+	FUNCTION(CCI_TIMER4),
+	FUNCTION(qdss_gpio11),
+	FUNCTION(CAM_LDO2),
+	FUNCTION(cci_async),
+	FUNCTION(qdss_gpio12),
+	FUNCTION(CAM0_RST),
+	FUNCTION(qdss_gpio13),
+	FUNCTION(CAM1_RST),
+	FUNCTION(qspi_clk),
+	FUNCTION(phase_flag30),
+	FUNCTION(qdss_gpio14),
+	FUNCTION(qspi_resetn),
+	FUNCTION(phase_flag1),
+	FUNCTION(qdss_gpio15),
+	FUNCTION(CAM0_STANDBY),
+	FUNCTION(phase_flag2),
+	FUNCTION(CAM1_STANDBY),
+	FUNCTION(phase_flag9),
+	FUNCTION(CAM2_STANDBY),
+	FUNCTION(qspi_data3),
+	FUNCTION(phase_flag15),
+	FUNCTION(qdss_gpio8),
+	FUNCTION(CAM3_RST),
+	FUNCTION(CCI_TIMER2),
+	FUNCTION(phase_flag16),
+	FUNCTION(LCD0_RESET),
+	FUNCTION(phase_flag6),
+	FUNCTION(SD_CARD),
+	FUNCTION(phase_flag29),
+	FUNCTION(DP_EN),
+	FUNCTION(phase_flag25),
+	FUNCTION(USBC_ORIENTATION),
+	FUNCTION(phase_flag10),
+	FUNCTION(atest_usb20),
+	FUNCTION(gcc_gp1),
+	FUNCTION(phase_flag4),
+	FUNCTION(atest_usb22),
+	FUNCTION(USB_PHY),
+	FUNCTION(gcc_gp2),
+	FUNCTION(atest_char),
+	FUNCTION(mdp_vsync),
+	FUNCTION(gcc_gp3),
+	FUNCTION(atest_char3),
+	FUNCTION(FORCE_TOUCH),
+	FUNCTION(cri_trng0),
+	FUNCTION(atest_char2),
+	FUNCTION(cri_trng1),
+	FUNCTION(atest_char1),
+	FUNCTION(AUDIO_USBC),
+	FUNCTION(audio_ref),
+	FUNCTION(MDP_VSYNC),
+	FUNCTION(cri_trng),
+	FUNCTION(atest_char0),
+	FUNCTION(US_EURO),
+	FUNCTION(LCD_BACKLIGHT),
+	FUNCTION(blsp_spi8_a),
+	FUNCTION(blsp_spi8_b),
+	FUNCTION(sp_cmu),
+	FUNCTION(nav_pps_a),
+	FUNCTION(nav_pps_b),
+	FUNCTION(nav_pps_c),
+	FUNCTION(gps_tx_a),
+	FUNCTION(gps_tx_b),
+	FUNCTION(gps_tx_c),
+	FUNCTION(adsp_ext),
+	FUNCTION(TS_RESET),
+	FUNCTION(ssc_irq),
+	FUNCTION(isense_dbg),
+	FUNCTION(phase_flag0),
+	FUNCTION(phase_flag7),
+	FUNCTION(phase_flag8),
+	FUNCTION(tsense_pwm1),
+	FUNCTION(tsense_pwm2),
+	FUNCTION(SENSOR_RST),
+	FUNCTION(WMSS_RESETN),
+	FUNCTION(HAPTICS_PWM),
+	FUNCTION(GPS_eLNA),
+	FUNCTION(mss_lte),
+	FUNCTION(uim2_data),
+	FUNCTION(uim2_clk),
+	FUNCTION(uim2_reset),
+	FUNCTION(uim2_present),
+	FUNCTION(uim1_data),
+	FUNCTION(uim1_clk),
+	FUNCTION(uim1_reset),
+	FUNCTION(uim1_present),
+	FUNCTION(uim_batt),
+	FUNCTION(pa_indicator),
+	FUNCTION(ldo_en),
+	FUNCTION(ldo_update),
+	FUNCTION(qlink_request),
+	FUNCTION(qlink_enable),
+	FUNCTION(prng_rosc),
+	FUNCTION(LCD_PWR),
+};
+
+static const struct msm_pingroup sdm660_groups[] = {
+	PINGROUP(0, SOUTH, blsp_spi1, blsp_uart1, blsp_uim1, tgu_ch0, NA, NA,
+		 qdss_gpio4, atest_gpsadc1, NA),
+	PINGROUP(1, SOUTH, blsp_spi1, blsp_uart1, blsp_uim1, tgu_ch1, NA, NA,
+		 qdss_gpio5, atest_gpsadc0, NA),
+	PINGROUP(2, SOUTH, blsp_spi1, blsp_uart1, blsp_i2c1, NA, NA, NA, NA,
+		 NA, NA),
+	PINGROUP(3, SOUTH, blsp_spi1, blsp_uart1, blsp_i2c1, ddr_bist, NA, NA,
+		 atest_tsens2, atest_usb1, NA),
+	PINGROUP(4, NORTH, blsp_spi2, blsp_uim2, blsp_uart2, phase_flag3, NA,
+		 NA, NA, NA, NA),
+	PINGROUP(5, SOUTH, blsp_spi2, blsp_uim2, blsp_uart2, phase_flag14, NA,
+		 NA, NA, NA, NA),
+	PINGROUP(6, SOUTH, blsp_spi2, blsp_i2c2, blsp_uart2, phase_flag31, NA,
+		 NA, NA, NA, NA),
+	PINGROUP(7, SOUTH, blsp_spi2, blsp_i2c2, blsp_uart2, NA, NA, NA, NA,
+		 NA, NA),
+	PINGROUP(8, NORTH, blsp_spi3, ddr_bist, NA, NA, NA, wlan1_adc1,
+		 atest_usb13, bimc_dte1, NA),
+	PINGROUP(9, NORTH, blsp_spi3, ddr_bist, NA, NA, NA, wlan1_adc0,
+		 atest_usb12, bimc_dte0, NA),
+	PINGROUP(10, NORTH, blsp_spi3, blsp_i2c3, ddr_bist, NA, NA, wlan2_adc1,
+		 atest_usb11, bimc_dte1, NA),
+	PINGROUP(11, NORTH, blsp_spi3, blsp_i2c3, NA, dbg_out, wlan2_adc0,
+		 atest_usb10, bimc_dte0, NA, NA),
+	PINGROUP(12, NORTH, blsp_spi4, pri_mi2s, NA, phase_flag26, qdss_cti1_b,
+		 NA, NA, NA, NA),
+	PINGROUP(13, NORTH, blsp_spi4, DP_HOT, pri_mi2s_ws, NA, NA,
+		 phase_flag27, qdss_cti0_b, NA, NA),
+	PINGROUP(14, NORTH, blsp_spi4, blsp_i2c4, pri_mi2s, NA, phase_flag28,
+		 NA, NA, NA, NA),
+	PINGROUP(15, NORTH, blsp_spi4, blsp_i2c4, pri_mi2s, NA, NA, NA, NA, NA,
+		 NA),
+	PINGROUP(16, CENTER, blsp_uart5, blsp_spi5, blsp_uim5, NA, NA, NA, NA,
+		 NA, NA),
+	PINGROUP(17, CENTER, blsp_uart5, blsp_spi5, blsp_uim5, NA, phase_flag5,
+		 NA, NA, NA, NA),
+	PINGROUP(18, CENTER, blsp_uart5, blsp_spi5, blsp_i2c5, NA, NA, NA, NA,
+		 NA, NA),
+	PINGROUP(19, CENTER, blsp_uart5, blsp_spi5, blsp_i2c5, NA, NA, NA, NA,
+		 NA, NA),
+	PINGROUP(20, SOUTH, NA, NA, blsp_uim6, NA, NA, NA, NA,
+		 NA, NA),
+	PINGROUP(21, SOUTH, NA, NA, blsp_uim6, NA, phase_flag11,
+		 qdss_cti0_b, vsense_data0, NA, NA),
+	PINGROUP(22, CENTER, blsp_spi6, NA, blsp_i2c6, NA,
+		 phase_flag12, vsense_data1, NA, NA, NA),
+	PINGROUP(23, CENTER, blsp_spi6, NA, blsp_i2c6, NA,
+		 phase_flag13, vsense_mode, NA, NA, NA),
+	PINGROUP(24, NORTH, blsp_spi7, blsp_uart6_a, sec_mi2s, sndwire_clk, NA,
+		 NA, phase_flag17, vsense_clkout, NA),
+	PINGROUP(25, NORTH, blsp_spi7, blsp_uart6_a, sec_mi2s, sndwire_data, NA,
+		 NA, phase_flag18, NA, NA),
+	PINGROUP(26, NORTH, blsp_spi7, blsp_uart6_a, blsp_i2c7, sec_mi2s, NA,
+		 phase_flag19, NA, NA, NA),
+	PINGROUP(27, NORTH, blsp_spi7, blsp_uart6_a, blsp_i2c7, vfr_1, sec_mi2s,
+		 NA, phase_flag20, NA, NA),
+	PINGROUP(28, CENTER, blsp_spi8_a, blsp_uart6_b, m_voc, NA, phase_flag21,
+		 NA, NA, NA, NA),
+	PINGROUP(29, CENTER, blsp_spi8_a, blsp_uart6_b, NA, NA, phase_flag22,
+		 NA, NA, NA, NA),
+	PINGROUP(30, CENTER, blsp_spi8_a, blsp_uart6_b, blsp_i2c8_a,
+		 blsp_spi3_cs1, NA, phase_flag23, NA, NA, NA),
+	PINGROUP(31, CENTER, blsp_spi8_a, blsp_uart6_b, blsp_i2c8_a, pwr_modem,
+		 NA, phase_flag24, qdss_gpio, NA, NA),
+	PINGROUP(32, SOUTH, cam_mclk, pwr_nav, NA, NA, qdss_gpio0, NA, NA, NA,
+		 NA),
+	PINGROUP(33, SOUTH, cam_mclk, qspi_data0, pwr_crypto, NA, NA,
+		 qdss_gpio1, NA, NA, NA),
+	PINGROUP(34, SOUTH, cam_mclk, qspi_data1, agera_pll, NA, NA,
+		 qdss_gpio2, NA, NA, NA),
+	PINGROUP(35, SOUTH, cam_mclk, qspi_data2, jitter_bist, NA, NA,
+		 qdss_gpio3, NA, atest_usb2, NA),
+	PINGROUP(36, SOUTH, cci_i2c, pll_bypassnl, agera_pll, NA, NA,
+		 qdss_gpio4, atest_tsens, atest_usb21, NA),
+	PINGROUP(37, SOUTH, cci_i2c, pll_reset, NA, NA, qdss_gpio5,
+		 atest_usb23, NA, NA, NA),
+	PINGROUP(38, SOUTH, cci_i2c, NA, NA, qdss_gpio6, NA, NA, NA, NA, NA),
+	PINGROUP(39, SOUTH, cci_i2c, NA, NA, qdss_gpio7, NA, NA, NA, NA, NA),
+	PINGROUP(40, SOUTH, CCI_TIMER0, NA, blsp_spi8_b, NA, NA, NA, NA, NA,
+		 NA),
+	PINGROUP(41, SOUTH, CCI_TIMER1, NA, blsp_spi8_b, NA, NA, NA, NA, NA,
+		 NA),
+	PINGROUP(42, SOUTH, mdss_vsync0, mdss_vsync1, mdss_vsync2, mdss_vsync3,
+		 NA, NA, qdss_gpio9, NA, NA),
+	PINGROUP(43, SOUTH, CCI_TIMER3, CCI_ASYNC, qspi_cs, NA, NA,
+		 qdss_gpio10, NA, NA, NA),
+	PINGROUP(44, SOUTH, CCI_TIMER4, CCI_ASYNC, blsp_spi8_b, blsp_i2c8_b, NA,
+		 NA, qdss_gpio11, NA, NA),
+	PINGROUP(45, SOUTH, cci_async, NA, NA, qdss_gpio12, NA, NA, NA, NA, NA),
+	PINGROUP(46, SOUTH, blsp_spi1, NA, NA, qdss_gpio13, NA, NA, NA, NA, NA),
+	PINGROUP(47, SOUTH, qspi_clk, NA, phase_flag30, qdss_gpio14, NA, NA,
+		 NA, NA, NA),
+	PINGROUP(48, SOUTH, NA, phase_flag1, qdss_gpio15, NA, NA, NA, NA, NA,
+		 NA),
+	PINGROUP(49, SOUTH, blsp_spi6, phase_flag2, qdss_cti0_a, NA, NA, NA,
+		 NA, NA, NA),
+	PINGROUP(50, SOUTH, qspi_cs, NA, phase_flag9, qdss_cti0_a, NA, NA, NA,
+		 NA, NA),
+	PINGROUP(51, SOUTH, qspi_data3, NA, phase_flag15, qdss_gpio8, NA, NA,
+		 NA, NA, NA),
+	PINGROUP(52, SOUTH, CCI_TIMER2, blsp_spi8_b, blsp_i2c8_b, blsp_spi6,
+		 phase_flag16, qdss_gpio, NA, NA, NA),
+	PINGROUP(53, NORTH, NA, phase_flag6, qdss_cti1_a, NA, NA, NA, NA, NA,
+		 NA),
+	PINGROUP(54, NORTH, NA, NA, phase_flag29, NA, NA, NA, NA, NA, NA),
+	PINGROUP(55, SOUTH, NA, phase_flag25, qdss_cti1_a, NA, NA, NA, NA, NA,
+		 NA),
+	PINGROUP(56, SOUTH, NA, phase_flag10, qdss_gpio3, NA, atest_usb20, NA,
+		 NA, NA, NA),
+	PINGROUP(57, SOUTH, gcc_gp1, NA, phase_flag4, atest_usb22, NA, NA, NA,
+		 NA, NA),
+	PINGROUP(58, SOUTH, USB_PHY, gcc_gp2, NA, NA, atest_char, NA, NA, NA,
+		 NA),
+	PINGROUP(59, NORTH, mdp_vsync, gcc_gp3, NA, NA, atest_char3, NA, NA,
+		 NA, NA),
+	PINGROUP(60, NORTH, cri_trng0, NA, NA, atest_char2, NA, NA, NA, NA, NA),
+	PINGROUP(61, NORTH, pri_mi2s, cri_trng1, NA, NA, atest_char1, NA, NA,
+		 NA, NA),
+	PINGROUP(62, NORTH, sec_mi2s, audio_ref, MDP_VSYNC, cri_trng, NA, NA,
+		 atest_char0, NA, NA),
+	PINGROUP(63, NORTH, NA, NA, NA, qdss_gpio1, NA, NA, NA, NA, NA),
+	PINGROUP(64, SOUTH, blsp_spi8_cs1, sp_cmu, NA, NA, qdss_gpio2, NA, NA,
+		 NA, NA),
+	PINGROUP(65, SOUTH, NA, nav_pps_a, nav_pps_a, gps_tx_a, blsp_spi3_cs2,
+		 adsp_ext, NA, NA, NA),
+	PINGROUP(66, NORTH, NA, NA, qdss_cti1_b, NA, NA, NA, NA, NA, NA),
+	PINGROUP(67, NORTH, NA, NA, qdss_gpio0, NA, NA, NA, NA, NA, NA),
+	PINGROUP(68, NORTH, isense_dbg, NA, phase_flag0, qdss_gpio, NA, NA, NA,
+		 NA, NA),
+	PINGROUP(69, NORTH, NA, phase_flag7, qdss_gpio, NA, NA, NA, NA, NA, NA),
+	PINGROUP(70, NORTH, NA, phase_flag8, qdss_gpio6, NA, NA, NA, NA, NA,
+		 NA),
+	PINGROUP(71, NORTH, NA, NA, qdss_gpio7, tsense_pwm1, tsense_pwm2, NA,
+		 NA, NA, NA),
+	PINGROUP(72, NORTH, NA, qdss_gpio14, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(73, NORTH, NA, NA, qdss_gpio15, NA, NA, NA, NA, NA, NA),
+	PINGROUP(74, NORTH, mdp_vsync, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(75, NORTH, NA, NA, qdss_gpio8, NA, NA, NA, NA, NA, NA),
+	PINGROUP(76, NORTH, blsp_spi8_cs2, NA, NA, NA, qdss_gpio9, NA, NA, NA,
+		 NA),
+	PINGROUP(77, NORTH, NA, NA, qdss_gpio10, NA, NA, NA, NA, NA, NA),
+	PINGROUP(78, NORTH, gcc_gp1, NA, qdss_gpio13, NA, NA, NA, NA, NA, NA),
+	PINGROUP(79, SOUTH, NA, NA, qdss_gpio11, NA, NA, NA, NA, NA, NA),
+	PINGROUP(80, SOUTH, nav_pps_b, nav_pps_b, gps_tx_c, NA, NA, qdss_gpio12,
+		 NA, NA, NA),
+	PINGROUP(81, CENTER, mss_lte, gcc_gp2, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(82, CENTER, mss_lte, gcc_gp3, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(83, SOUTH, uim2_data, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(84, SOUTH, uim2_clk, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(85, SOUTH, uim2_reset, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(86, SOUTH, uim2_present, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(87, SOUTH, uim1_data, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(88, SOUTH, uim1_clk, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(89, SOUTH, uim1_reset, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(90, SOUTH, uim1_present, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(91, SOUTH, uim_batt, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(92, SOUTH, NA, NA, pa_indicator, NA, NA, NA, NA, NA, NA),
+	PINGROUP(93, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(94, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(95, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(96, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(97, SOUTH, NA, ldo_en, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(98, SOUTH, NA, nav_pps_c, nav_pps_c, gps_tx_b, ldo_update, NA,
+		 NA, NA, NA),
+	PINGROUP(99, SOUTH, qlink_request, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(100, SOUTH, qlink_enable, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(101, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(102, SOUTH, NA, prng_rosc, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(103, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(104, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(105, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(106, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(107, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(108, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(109, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(110, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(111, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(112, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(113, SOUTH, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+	SDC_QDSD_PINGROUP(sdc1_clk, 0x99a000, 13, 6),
+	SDC_QDSD_PINGROUP(sdc1_cmd, 0x99a000, 11, 3),
+	SDC_QDSD_PINGROUP(sdc1_data, 0x99a000, 9, 0),
+	SDC_QDSD_PINGROUP(sdc2_clk, 0x99b000, 14, 6),
+	SDC_QDSD_PINGROUP(sdc2_cmd, 0x99b000, 11, 3),
+	SDC_QDSD_PINGROUP(sdc2_data, 0x99b000, 9, 0),
+	SDC_QDSD_PINGROUP(sdc1_rclk, 0x99a000, 15, 0),
+	UFS_RESET(ufs_reset, 0x9a3000),
+};
+
+static const struct msm_pinctrl_soc_data sdm660_pinctrl = {
+	.pins = sdm660_pins,
+	.npins = ARRAY_SIZE(sdm660_pins),
+	.functions = sdm660_functions,
+	.nfunctions = ARRAY_SIZE(sdm660_functions),
+	.groups = sdm660_groups,
+	.ngroups = ARRAY_SIZE(sdm660_groups),
+	.ngpios = 114,
+};
+
+static int sdm660_pinctrl_probe(struct platform_device *pdev)
+{
+	return msm_pinctrl_probe(pdev, &sdm660_pinctrl);
+}
+
+static const struct of_device_id sdm660_pinctrl_of_match[] = {
+	{ .compatible = "qcom,sdm660-pinctrl", },
+	{ },
+};
+
+static struct platform_driver sdm660_pinctrl_driver = {
+	.driver = {
+		.name = "sdm660-pinctrl",
+		.of_match_table = sdm660_pinctrl_of_match,
+	},
+	.probe = sdm660_pinctrl_probe,
+	.remove = msm_pinctrl_remove,
+};
+
+static int __init sdm660_pinctrl_init(void)
+{
+	return platform_driver_register(&sdm660_pinctrl_driver);
+}
+arch_initcall(sdm660_pinctrl_init);
+
+static void __exit sdm660_pinctrl_exit(void)
+{
+	platform_driver_unregister(&sdm660_pinctrl_driver);
+}
+module_exit(sdm660_pinctrl_exit);
+
+MODULE_DESCRIPTION("QTI sdm660 pinctrl driver");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(of, sdm660_pinctrl_of_match);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
index 4fe58bd..25cf988 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
@@ -87,7 +87,7 @@
 
 #define IPA_QMAP_ID_BYTE 0
 
-#define IPA_TX_MAX_DESC (20)
+#define IPA_TX_MAX_DESC (50)
 
 static struct sk_buff *ipa3_get_skb_ipa_rx(unsigned int len, gfp_t flags);
 static void ipa3_replenish_wlan_rx_cache(struct ipa3_sys_context *sys);
@@ -197,6 +197,15 @@
 	ipa3_wq_write_done_common(sys, tx_pkt);
 }
 
+static void ipa3_tasklet_schd_work(struct work_struct *work)
+{
+	struct ipa3_sys_context *sys;
+
+	sys = container_of(work, struct ipa3_sys_context, tasklet_work);
+	if (atomic_read(&sys->xmit_eot_cnt))
+		tasklet_schedule(&sys->tasklet);
+}
+
 /**
  * ipa_write_done() - this function will be (eventually) called when a Tx
  * operation is complete
@@ -235,10 +244,13 @@
 		 * to watchdog bark. For avoiding these scenarios exit from
 		 * tasklet after reaching max limit.
 		 */
-		if (max_tx_pkt == IPA_TX_MAX_DESC)
+		if (max_tx_pkt >= IPA_TX_MAX_DESC)
 			break;
 	}
 	spin_unlock_bh(&sys->spinlock);
+
+	if (max_tx_pkt >= IPA_TX_MAX_DESC)
+		queue_work(sys->tasklet_wq, &sys->tasklet_work);
 }
 
 
@@ -1040,6 +1052,16 @@
 			goto fail_wq2;
 		}
 
+		snprintf(buff, IPA_RESOURCE_NAME_MAX, "ipataskletwq%d",
+				sys_in->client);
+		ep->sys->tasklet_wq = alloc_workqueue(buff,
+				WQ_MEM_RECLAIM | WQ_UNBOUND | WQ_SYSFS, 1);
+		if (!ep->sys->tasklet_wq) {
+			IPAERR("failed to create rep wq for client %d\n",
+					sys_in->client);
+			result = -EFAULT;
+			goto fail_wq3;
+		}
 		INIT_LIST_HEAD(&ep->sys->head_desc_list);
 		INIT_LIST_HEAD(&ep->sys->rcycl_list);
 		spin_lock_init(&ep->sys->spinlock);
@@ -1088,6 +1110,8 @@
 	atomic_set(&ep->sys->xmit_eot_cnt, 0);
 	tasklet_init(&ep->sys->tasklet, ipa3_tasklet_write_done,
 			(unsigned long) ep->sys);
+	INIT_WORK(&ep->sys->tasklet_work,
+		ipa3_tasklet_schd_work);
 	ep->skip_ep_cfg = sys_in->skip_ep_cfg;
 	if (ipa3_assign_policy(sys_in, ep->sys)) {
 		IPAERR("failed to sys ctx for client %d\n", sys_in->client);
@@ -1273,6 +1297,8 @@
 fail_gen2:
 	ipa_pm_deregister(ep->sys->pm_hdl);
 fail_pm:
+	destroy_workqueue(ep->sys->tasklet_wq);
+fail_wq3:
 	destroy_workqueue(ep->sys->repl_wq);
 fail_wq2:
 	destroy_workqueue(ep->sys->wq);
@@ -1402,6 +1428,8 @@
 	}
 	if (ep->sys->repl_wq)
 		flush_workqueue(ep->sys->repl_wq);
+	if (ep->sys->tasklet_wq)
+		flush_workqueue(ep->sys->tasklet_wq);
 	if (IPA_CLIENT_IS_CONS(ep->client))
 		ipa3_cleanup_rx(ep->sys);
 
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_hw_stats.c b/drivers/platform/msm/ipa/ipa_v3/ipa_hw_stats.c
index 43fb446..1b900a2 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_hw_stats.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_hw_stats.c
@@ -2235,6 +2235,81 @@
 	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
 }
 
+static ssize_t ipa_debugfs_enable_disable_drop_stats(struct file *file,
+	const char __user *ubuf, size_t count, loff_t *ppos)
+{
+	unsigned long missing;
+	unsigned int pipe_num = 0;
+	bool enable_pipe = true;
+	u32 pipe_bitmask = ipa3_ctx->hw_stats.drop.init.enabled_bitmask;
+	char seprator = ',';
+	int i, j;
+	bool is_pipe = false;
+	ssize_t ret;
+
+	mutex_lock(&ipa3_ctx->lock);
+	if (sizeof(dbg_buff) < count + 1) {
+		ret = -EFAULT;
+		goto bail;
+	}
+
+	missing = copy_from_user(dbg_buff, ubuf, count);
+	if (missing) {
+		ret = -EFAULT;
+		goto bail;
+	}
+	dbg_buff[count] = '\0';
+	IPADBG("data is %s", dbg_buff);
+
+	i = 0;
+	while (dbg_buff[i] != ' ' && i < count)
+		i++;
+	j = i;
+	i++;
+	if (i < count) {
+		if (dbg_buff[i] == '0') {
+			enable_pipe = false;
+			IPADBG("Drop stats will be disabled for pipes:");
+		}
+	}
+
+	for (i = 0; i < j; i++) {
+		if (dbg_buff[i] >= '0' && dbg_buff[i] <= '9') {
+			pipe_num = (pipe_num * 10) + (dbg_buff[i] - '0');
+			is_pipe = true;
+		}
+		if (dbg_buff[i] == seprator) {
+			if (pipe_num >= 0 && pipe_num < ipa3_ctx->ipa_num_pipes
+				&& ipa3_get_client_by_pipe(pipe_num) <
+				IPA_CLIENT_MAX) {
+				IPADBG("pipe number %u\n", pipe_num);
+				if (enable_pipe)
+					pipe_bitmask = pipe_bitmask |
+							(1 << pipe_num);
+				else
+					pipe_bitmask = pipe_bitmask &
+							(~(1 << pipe_num));
+			}
+			pipe_num = 0;
+			is_pipe = false;
+		}
+	}
+	if (is_pipe && pipe_num >= 0 && pipe_num < ipa3_ctx->ipa_num_pipes &&
+		ipa3_get_client_by_pipe(pipe_num) < IPA_CLIENT_MAX) {
+		IPADBG("pipe number %u\n", pipe_num);
+		if (enable_pipe)
+			pipe_bitmask = pipe_bitmask | (1 << pipe_num);
+		else
+			pipe_bitmask = pipe_bitmask & (~(1 << pipe_num));
+	}
+
+	ipa_init_drop_stats(pipe_bitmask);
+	ret = count;
+bail:
+	mutex_unlock(&ipa3_ctx->lock);
+	return ret;
+}
+
 static const struct file_operations ipa3_quota_ops = {
 	.read = ipa_debugfs_print_quota_stats,
 	.write = ipa_debugfs_reset_quota_stats,
@@ -2255,10 +2330,14 @@
 	.write = ipa_debugfs_reset_drop_stats,
 };
 
+static const struct file_operations ipa3_enable_drop_ops = {
+	.write = ipa_debugfs_enable_disable_drop_stats,
+};
 
 int ipa_debugfs_init_stats(struct dentry *parent)
 {
 	const mode_t read_write_mode = 0664;
+	const mode_t write_mode = 0220;
 	struct dentry *file;
 	struct dentry *dent;
 
@@ -2285,6 +2364,13 @@
 		goto fail;
 	}
 
+	file = debugfs_create_file("enable_drop_stats", write_mode, dent, NULL,
+		&ipa3_enable_drop_ops);
+	if (IS_ERR_OR_NULL(file)) {
+		IPAERR("fail to create file %s\n", "enable_drop_stats");
+		goto fail;
+	}
+
 	file = debugfs_create_file("tethering", read_write_mode, dent, NULL,
 		&ipa3_tethering_ops);
 	if (IS_ERR_OR_NULL(file)) {
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index bb6f7a0..a270f44 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -1031,6 +1031,7 @@
 	struct list_head pending_pkts[GSI_VEID_MAX];
 	atomic_t xmit_eot_cnt;
 	struct tasklet_struct tasklet;
+	struct work_struct tasklet_work;
 
 	/* ordering is important - mutable fields go above */
 	struct ipa3_ep_context *ep;
@@ -1044,6 +1045,7 @@
 	u32 pm_hdl;
 	unsigned int napi_sch_cnt;
 	unsigned int napi_comp_cnt;
+	struct workqueue_struct *tasklet_wq;
 	/* ordering is important - other immutable fields go below */
 };
 
diff --git a/drivers/power/supply/qcom/battery.c b/drivers/power/supply/qcom/battery.c
index a87725b..8a119ca 100644
--- a/drivers/power/supply/qcom/battery.c
+++ b/drivers/power/supply/qcom/battery.c
@@ -43,6 +43,7 @@
 #define FCC_STEPPER_VOTER		"FCC_STEPPER_VOTER"
 #define FCC_VOTER			"FCC_VOTER"
 #define MAIN_FCC_VOTER			"MAIN_FCC_VOTER"
+#define PD_VOTER			"PD_VOTER"
 
 struct pl_data {
 	int			pl_mode;
@@ -190,30 +191,57 @@
 	return pval.intval;
 }
 
-static int get_hvdcp3_icl_limit(struct pl_data *chip)
+static int get_adapter_icl_based_ilim(struct pl_data *chip)
 {
-	int main_icl, target_icl = -EINVAL;
+	int main_icl, adapter_icl = -EINVAL, rc = -EINVAL, final_icl = -EINVAL;
+	union power_supply_propval pval = {0, };
 
+	rc = power_supply_get_property(chip->usb_psy,
+			POWER_SUPPLY_PROP_PD_ACTIVE, &pval);
+	if (rc < 0)
+		pr_err("Failed to read PD_ACTIVE status rc=%d\n",
+				rc);
+	/* Check for QC 3, 3.5 and PPS adapters, return if its none of them */
 	if (chip->charger_type != POWER_SUPPLY_TYPE_USB_HVDCP_3 &&
-		chip->charger_type != POWER_SUPPLY_TYPE_USB_HVDCP_3P5)
-		return target_icl;
+		chip->charger_type != POWER_SUPPLY_TYPE_USB_HVDCP_3P5 &&
+		pval.intval != POWER_SUPPLY_PD_PPS_ACTIVE)
+		return final_icl;
 
 	/*
-	 * For HVDCP3 adapters, limit max. ILIM as follows:
-	 * HVDCP3_ICL: Maximum ICL of HVDCP3 adapter(from DT configuration)
-	 * For Parallel input configurations:
-	 * VBUS: target_icl = HVDCP3_ICL - main_ICL
-	 * VMID: target_icl = HVDCP3_ICL
+	 * For HVDCP3/HVDCP_3P5 adapters, limit max. ILIM as:
+	 * HVDCP3_ICL: Maximum ICL of HVDCP3 adapter(from DT
+	 * configuration).
+	 *
+	 * For PPS adapters, limit max. ILIM to
+	 * MIN(qc4_max_icl, PD_CURRENT_MAX)
 	 */
-	target_icl = chip->chg_param->hvdcp3_max_icl_ua;
+	if (pval.intval == POWER_SUPPLY_PD_PPS_ACTIVE) {
+		adapter_icl = min_t(int, chip->chg_param->qc4_max_icl_ua,
+				get_client_vote_locked(chip->usb_icl_votable,
+				PD_VOTER));
+		if (adapter_icl <= 0)
+			adapter_icl = chip->chg_param->qc4_max_icl_ua;
+	} else {
+		adapter_icl = chip->chg_param->hvdcp3_max_icl_ua;
+	}
+
+	/*
+	 * For Parallel input configurations:
+	 * VBUS: final_icl = adapter_icl - main_ICL
+	 * VMID: final_icl = adapter_icl
+	 */
+	final_icl = adapter_icl;
 	if (cp_get_parallel_mode(chip, PARALLEL_INPUT_MODE)
 					== POWER_SUPPLY_PL_USBIN_USBIN) {
 		main_icl = get_effective_result_locked(chip->usb_icl_votable);
-		if ((main_icl >= 0) && (main_icl < target_icl))
-			target_icl -= main_icl;
+		if ((main_icl >= 0) && (main_icl < adapter_icl))
+			final_icl = adapter_icl - main_icl;
 	}
 
-	return target_icl;
+	pr_debug("charger_type=%d final_icl=%d adapter_icl=%d main_icl=%d\n",
+		chip->charger_type, final_icl, adapter_icl, main_icl);
+
+	return final_icl;
 }
 
 /*
@@ -244,7 +272,7 @@
 					== POWER_SUPPLY_PL_OUTPUT_VPH)
 		return;
 
-	target_icl = get_hvdcp3_icl_limit(chip);
+	target_icl = get_adapter_icl_based_ilim(chip);
 	ilim = (target_icl > 0) ? min(ilim, target_icl) : ilim;
 
 	rc = power_supply_get_property(chip->cp_master_psy,
@@ -742,7 +770,7 @@
 		if (!chip->cp_ilim_votable)
 			chip->cp_ilim_votable = find_votable("CP_ILIM");
 
-		target_icl = get_hvdcp3_icl_limit(chip) * 2;
+		target_icl = get_adapter_icl_based_ilim(chip) * 2;
 		total_fcc_ua -= chip->main_fcc_ua;
 
 		/*
diff --git a/drivers/power/supply/qcom/battery.h b/drivers/power/supply/qcom/battery.h
index acb4984..c614544 100644
--- a/drivers/power/supply/qcom/battery.h
+++ b/drivers/power/supply/qcom/battery.h
@@ -13,6 +13,7 @@
 	u32 hvdcp2_max_icl_ua;
 	u32 hvdcp3_max_icl_ua;
 	u32 forced_main_fcc;
+	u32 qc4_max_icl_ua;
 };
 
 int qcom_batt_init(struct charger_param *param);
diff --git a/drivers/power/supply/qcom/fg-alg.c b/drivers/power/supply/qcom/fg-alg.c
index ead4839..c17c61e 100644
--- a/drivers/power/supply/qcom/fg-alg.c
+++ b/drivers/power/supply/qcom/fg-alg.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2020 The Linux Foundation. All rights reserved.
  */
 
 #define pr_fmt(fmt)	"ALG: %s: " fmt, __func__
@@ -714,7 +714,7 @@
 	pr_debug("Aborting cap_learning\n");
 	cl->active = false;
 	cl->init_cap_uah = 0;
-	mutex_lock(&cl->lock);
+	mutex_unlock(&cl->lock);
 }
 
 /**
diff --git a/drivers/power/supply/qcom/qpnp-qg.c b/drivers/power/supply/qcom/qpnp-qg.c
index 0acae67..4a29912 100644
--- a/drivers/power/supply/qcom/qpnp-qg.c
+++ b/drivers/power/supply/qcom/qpnp-qg.c
@@ -4427,6 +4427,7 @@
 		return rc;
 	}
 	sleep_fifo_length &= SLEEP_IBAT_QUALIFIED_LENGTH_MASK;
+	sleep_fifo_length++;
 
 	if (chip->dt.qg_sleep_config) {
 		qg_dbg(chip, QG_DEBUG_STATUS, "Suspend: Forcing S2_SLEEP\n");
diff --git a/drivers/power/supply/qcom/qpnp-smb5.c b/drivers/power/supply/qcom/qpnp-smb5.c
index 103ca3a..5672c31 100644
--- a/drivers/power/supply/qcom/qpnp-smb5.c
+++ b/drivers/power/supply/qcom/qpnp-smb5.c
@@ -433,6 +433,7 @@
 #define MICRO_P1A			100000
 #define MICRO_1PA			1000000
 #define MICRO_3PA			3000000
+#define MICRO_4PA			4000000
 #define OTG_DEFAULT_DEGLITCH_TIME_MS	50
 #define DEFAULT_WD_BARK_TIME		64
 #define DEFAULT_WD_SNARL_TIME_8S	0x07
@@ -605,6 +606,12 @@
 	if (chg->chg_param.hvdcp2_max_icl_ua <= 0)
 		chg->chg_param.hvdcp2_max_icl_ua = MICRO_3PA;
 
+	/* Used only in Adapter CV mode of operation */
+	of_property_read_u32(node, "qcom,qc4-max-icl-ua",
+					&chg->chg_param.qc4_max_icl_ua);
+	if (chg->chg_param.qc4_max_icl_ua <= 0)
+		chg->chg_param.qc4_max_icl_ua = MICRO_4PA;
+
 	return 0;
 }
 
diff --git a/drivers/power/supply/qcom/smb5-lib.c b/drivers/power/supply/qcom/smb5-lib.c
index 651d212..9219018 100644
--- a/drivers/power/supply/qcom/smb5-lib.c
+++ b/drivers/power/supply/qcom/smb5-lib.c
@@ -2200,7 +2200,8 @@
 			 * If Vbatt is within 40mV above Vfloat, then don't
 			 * treat it as overvoltage.
 			 */
-			effective_fv_uv = get_effective_result(chg->fv_votable);
+			effective_fv_uv = get_effective_result_locked(
+							chg->fv_votable);
 			if (pval.intval >= effective_fv_uv + 40000) {
 				val->intval = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
 				smblib_err(chg, "battery over-voltage vbat_fg = %duV, fv = %duV\n",
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 5e96575..7e7dd02 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -1057,6 +1057,14 @@
 	  This driver provides support for the voltage regulators on the
 	  WM8994 CODEC.
 
+config REGULATOR_MEM_ACC
+	tristate "QTI Memory accelerator regulator driver"
+	help
+	  Say y here to enable the memory accelerator driver for
+	  Qualcomm Technologies, Inc. (QTI) chips. The accelerator
+	  controls delays applied for memory accesses.  This driver
+	  configures the power-mode(corner) for the memory accelerator.
+
 config REGULATOR_REFGEN
 	tristate "Qualcomm Technologies, Inc. REFGEN regulator driver"
 	depends on OF
@@ -1086,6 +1094,16 @@
 	  be used on systems which contain an RPM which communicates with the
 	  application processor over SMD.
 
+config REGULATOR_SPM
+	bool "SPM regulator driver"
+	depends on SPMI
+	help
+	  Enable support for the SPM regulator driver which is used for
+	  setting voltages of processor supply regulators via the SPM module
+	  found inside chips of Qualcomm Technologies Inc. The SPM regulator
+	  driver can be used on QTI SoCs where the APSS processor cores are
+	  supplied by their own PMIC regulator.
+
 config REGULATOR_STUB
 	tristate "Stub Regulator"
 	help
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 9d6e77d..2f8b0ba 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -135,8 +135,9 @@
 obj-$(CONFIG_REGULATOR_WM8350) += wm8350-regulator.o
 obj-$(CONFIG_REGULATOR_WM8400) += wm8400-regulator.o
 obj-$(CONFIG_REGULATOR_WM8994) += wm8994-regulator.o
-
+obj-$(CONFIG_REGULATOR_MEM_ACC) += mem-acc-regulator.o
 obj-$(CONFIG_REGULATOR_REFGEN) += refgen.o
+obj-$(CONFIG_REGULATOR_SPM) += spm-regulator.o
 obj-$(CONFIG_REGULATOR_RPMH) += rpmh-regulator.o
 obj-$(CONFIG_REGULATOR_STUB) += stub-regulator.o
 
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index f2360fa..6968971 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -2734,6 +2734,40 @@
 EXPORT_SYMBOL_GPL(regulator_list_hardware_vsel);
 
 /**
+ * regulator_list_corner_voltage - return the maximum voltage in microvolts that
+ *	can be physically configured for the regulator when operating at the
+ *	specified voltage corner
+ * @regulator: regulator source
+ * @corner: voltage corner value
+ * Context: can sleep
+ *
+ * This function can be used for regulators which allow scaling between
+ * different voltage corners as opposed to be different absolute voltages.  The
+ * absolute voltage for a given corner may vary part-to-part or for a given part
+ * at runtime based upon various factors.
+ *
+ * Returns a voltage corresponding to the specified voltage corner or a negative
+ * errno if the corner value can't be used on this system.
+ */
+int regulator_list_corner_voltage(struct regulator *regulator, int corner)
+{
+	struct regulator_dev *rdev = regulator->rdev;
+	int ret;
+
+	if (corner < rdev->constraints->min_uV ||
+	    corner > rdev->constraints->max_uV ||
+	    !rdev->desc->ops->list_corner_voltage)
+		return -EINVAL;
+
+	mutex_lock(&rdev->mutex);
+	ret = rdev->desc->ops->list_corner_voltage(rdev, corner);
+	mutex_unlock(&rdev->mutex);
+
+	return ret;
+}
+EXPORT_SYMBOL(regulator_list_corner_voltage);
+
+/**
  * regulator_get_linear_step - return the voltage step size between VSEL values
  * @regulator: regulator source
  *
diff --git a/drivers/regulator/mem-acc-regulator.c b/drivers/regulator/mem-acc-regulator.c
new file mode 100644
index 0000000..07c867b
--- /dev/null
+++ b/drivers/regulator/mem-acc-regulator.c
@@ -0,0 +1,1506 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2014-2018, 2020, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt)	"ACC: %s: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/string.h>
+#include <soc/qcom/scm.h>
+
+#define MEM_ACC_DEFAULT_SEL_SIZE	2
+
+#define BYTES_PER_FUSE_ROW		8
+
+/* mem-acc config flags */
+
+enum {
+	MEM_ACC_USE_CORNER_ACC_MAP	= BIT(0),
+	MEM_ACC_USE_ADDR_VAL_MAP	= BIT(1),
+};
+
+#define FUSE_MAP_NO_MATCH		(-1)
+#define FUSE_PARAM_MATCH_ANY		(-1)
+#define PARAM_MATCH_ANY			(-1)
+
+enum {
+	MEMORY_L1,
+	MEMORY_L2,
+	MEMORY_MAX,
+};
+
+#define MEM_ACC_TYPE_MAX		6
+
+/**
+ * struct acc_reg_value - Acc register configuration structure
+ * @addr_index:	An index in to phys_reg_addr_list and remap_reg_addr_list
+ *		to get the ACC register physical address and remapped address.
+ * @reg_val:	Value to program in to the register mapped by addr_index.
+ */
+struct acc_reg_value {
+	u32		addr_index;
+	u32		reg_val;
+};
+
+struct corner_acc_reg_config {
+	struct acc_reg_value	*reg_config_list;
+	int			max_reg_config_len;
+};
+
+struct mem_acc_regulator {
+	struct device		*dev;
+	struct regulator_desc	rdesc;
+	struct regulator_dev	*rdev;
+
+	int			corner;
+	bool			mem_acc_supported[MEMORY_MAX];
+	bool			mem_acc_custom_supported[MEMORY_MAX];
+
+	u32			*acc_sel_mask[MEMORY_MAX];
+	u32			*acc_sel_bit_pos[MEMORY_MAX];
+	u32			acc_sel_bit_size[MEMORY_MAX];
+	u32			num_acc_sel[MEMORY_MAX];
+	u32			*acc_en_bit_pos;
+	u32			num_acc_en;
+	u32			*corner_acc_map;
+	u32			num_corners;
+	u32			override_fuse_value;
+	int			override_map_match;
+	int			override_map_count;
+
+
+	void __iomem		*acc_sel_base[MEMORY_MAX];
+	void __iomem		*acc_en_base;
+	phys_addr_t		acc_sel_addr[MEMORY_MAX];
+	phys_addr_t		acc_en_addr;
+	u32			flags;
+
+	void __iomem		*acc_custom_addr[MEMORY_MAX];
+	u32			*acc_custom_data[MEMORY_MAX];
+
+	phys_addr_t		mem_acc_type_addr[MEM_ACC_TYPE_MAX];
+	u32			*mem_acc_type_data;
+
+	/* eFuse parameters */
+	phys_addr_t		efuse_addr;
+	void __iomem		*efuse_base;
+
+	u32			num_acc_reg;
+	u32			*phys_reg_addr_list;
+	void __iomem		**remap_reg_addr_list;
+	struct corner_acc_reg_config	*corner_acc_reg_config;
+	u32			*override_acc_range_fuse_list;
+	int			override_acc_range_fuse_num;
+};
+
+static DEFINE_MUTEX(mem_acc_memory_mutex);
+
+static u64 mem_acc_read_efuse_row(struct mem_acc_regulator *mem_acc_vreg,
+					u32 row_num, bool use_tz_api)
+{
+	int rc;
+	u64 efuse_bits;
+	struct scm_desc desc = {0};
+
+	if (!use_tz_api) {
+		efuse_bits = readq_relaxed(mem_acc_vreg->efuse_base
+			+ row_num * BYTES_PER_FUSE_ROW);
+		return efuse_bits;
+	}
+
+	desc.args[0] = mem_acc_vreg->efuse_addr + row_num * BYTES_PER_FUSE_ROW;
+	desc.args[1] = 0;
+	desc.arginfo = SCM_ARGS(2);
+	efuse_bits = 0;
+
+	rc = scm_call2(SCM_SIP_FNID(SCM_SVC_FUSE, SCM_FUSE_READ), &desc);
+	if (rc) {
+		pr_err("read row %d failed, err code = %d\n", row_num, rc);
+	} else {
+		efuse_bits = ((u64)(desc.ret[1]) << 32) + (u64)desc.ret[0];
+	}
+
+	return efuse_bits;
+}
+
+static inline u32 apc_to_acc_corner(struct mem_acc_regulator *mem_acc_vreg,
+								int corner)
+{
+	/*
+	 * corner_acc_map maps the corner from index 0 and  APC corner value
+	 * starts from the value 1
+	 */
+	return mem_acc_vreg->corner_acc_map[corner - 1];
+}
+
+static void __update_acc_sel(struct mem_acc_regulator *mem_acc_vreg,
+						int corner, int mem_type)
+{
+	u32 acc_data, acc_data_old, i, bit, acc_corner;
+
+	acc_data = readl_relaxed(mem_acc_vreg->acc_sel_base[mem_type]);
+	acc_data_old = acc_data;
+	for (i = 0; i < mem_acc_vreg->num_acc_sel[mem_type]; i++) {
+		bit = mem_acc_vreg->acc_sel_bit_pos[mem_type][i];
+		acc_data &= ~mem_acc_vreg->acc_sel_mask[mem_type][i];
+		acc_corner = apc_to_acc_corner(mem_acc_vreg, corner);
+		acc_data |= (acc_corner << bit) &
+			mem_acc_vreg->acc_sel_mask[mem_type][i];
+	}
+	pr_debug("corner=%d old_acc_sel=0x%02x new_acc_sel=0x%02x mem_type=%d\n",
+			corner, acc_data_old, acc_data, mem_type);
+	writel_relaxed(acc_data, mem_acc_vreg->acc_sel_base[mem_type]);
+}
+
+static void __update_acc_type(struct mem_acc_regulator *mem_acc_vreg,
+				int corner)
+{
+	int i, rc;
+
+	for (i = 0; i < MEM_ACC_TYPE_MAX; i++) {
+		if (mem_acc_vreg->mem_acc_type_addr[i]) {
+			rc = scm_io_write(mem_acc_vreg->mem_acc_type_addr[i],
+				mem_acc_vreg->mem_acc_type_data[corner - 1 + i *
+				mem_acc_vreg->num_corners]);
+			if (rc)
+				pr_err("scm_io_write: %pa failure rc:%d\n",
+					&(mem_acc_vreg->mem_acc_type_addr[i]),
+					rc);
+		}
+	}
+}
+
+static void __update_acc_custom(struct mem_acc_regulator *mem_acc_vreg,
+						int corner, int mem_type)
+{
+	writel_relaxed(
+		mem_acc_vreg->acc_custom_data[mem_type][corner-1],
+		mem_acc_vreg->acc_custom_addr[mem_type]);
+	pr_debug("corner=%d mem_type=%d custom_data=0x%2x\n", corner,
+		mem_type, mem_acc_vreg->acc_custom_data[mem_type][corner-1]);
+}
+
+static void update_acc_sel(struct mem_acc_regulator *mem_acc_vreg, int corner)
+{
+	int i;
+
+	for (i = 0; i < MEMORY_MAX; i++) {
+		if (mem_acc_vreg->mem_acc_supported[i])
+			__update_acc_sel(mem_acc_vreg, corner, i);
+		if (mem_acc_vreg->mem_acc_custom_supported[i])
+			__update_acc_custom(mem_acc_vreg, corner, i);
+	}
+
+	if (mem_acc_vreg->mem_acc_type_data)
+		__update_acc_type(mem_acc_vreg, corner);
+}
+
+static void update_acc_reg(struct mem_acc_regulator *mem_acc_vreg, int corner)
+{
+	struct corner_acc_reg_config *corner_acc_reg_config;
+	struct acc_reg_value *reg_config_list;
+	int i, index;
+	u32 addr_index, reg_val;
+
+	corner_acc_reg_config =
+		&mem_acc_vreg->corner_acc_reg_config[mem_acc_vreg->corner];
+	reg_config_list = corner_acc_reg_config->reg_config_list;
+	for (i = 0; i < corner_acc_reg_config->max_reg_config_len; i++) {
+		/*
+		 * Use (corner - 1) in the below equation as
+		 * the reg_config_list[] stores the values starting from
+		 * index '0' where as the minimum corner value allowed
+		 * in regulator framework is '1'.
+		 */
+		index = (corner - 1) * corner_acc_reg_config->max_reg_config_len
+			+ i;
+		addr_index = reg_config_list[index].addr_index;
+		reg_val = reg_config_list[index].reg_val;
+
+		if (addr_index == PARAM_MATCH_ANY)
+			break;
+
+		writel_relaxed(reg_val,
+				mem_acc_vreg->remap_reg_addr_list[addr_index]);
+		/* make sure write complete */
+		mb();
+
+		pr_debug("corner=%d register:0x%x value:0x%x\n", corner,
+			mem_acc_vreg->phys_reg_addr_list[addr_index], reg_val);
+	}
+}
+
+static int mem_acc_regulator_set_voltage(struct regulator_dev *rdev,
+		int corner, int corner_max, unsigned int *selector)
+{
+	struct mem_acc_regulator *mem_acc_vreg = rdev_get_drvdata(rdev);
+	int i;
+
+	if (corner > mem_acc_vreg->num_corners) {
+		pr_err("Invalid corner=%d requested\n", corner);
+		return -EINVAL;
+	}
+
+	pr_debug("old corner=%d, new corner=%d\n",
+			mem_acc_vreg->corner, corner);
+
+	if (corner == mem_acc_vreg->corner)
+		return 0;
+
+	/* go up or down one level at a time */
+	mutex_lock(&mem_acc_memory_mutex);
+
+	if (mem_acc_vreg->flags & MEM_ACC_USE_ADDR_VAL_MAP) {
+		update_acc_reg(mem_acc_vreg, corner);
+	} else if (mem_acc_vreg->flags & MEM_ACC_USE_CORNER_ACC_MAP) {
+		if (corner > mem_acc_vreg->corner) {
+			for (i = mem_acc_vreg->corner + 1; i <= corner; i++) {
+				pr_debug("UP: to corner %d\n", i);
+				update_acc_sel(mem_acc_vreg, i);
+			}
+		} else {
+			for (i = mem_acc_vreg->corner - 1; i >= corner; i--) {
+				pr_debug("DOWN: to corner %d\n", i);
+				update_acc_sel(mem_acc_vreg, i);
+			}
+		}
+	}
+
+	mutex_unlock(&mem_acc_memory_mutex);
+
+	pr_debug("new voltage corner set %d\n", corner);
+
+	mem_acc_vreg->corner = corner;
+
+	return 0;
+}
+
+static int mem_acc_regulator_get_voltage(struct regulator_dev *rdev)
+{
+	struct mem_acc_regulator *mem_acc_vreg = rdev_get_drvdata(rdev);
+
+	return mem_acc_vreg->corner;
+}
+
+static struct regulator_ops mem_acc_corner_ops = {
+	.set_voltage		= mem_acc_regulator_set_voltage,
+	.get_voltage		= mem_acc_regulator_get_voltage,
+};
+
+static int __mem_acc_sel_init(struct mem_acc_regulator *mem_acc_vreg,
+							int mem_type)
+{
+	int i;
+	u32 bit, mask;
+
+	mem_acc_vreg->acc_sel_mask[mem_type] = devm_kzalloc(mem_acc_vreg->dev,
+		mem_acc_vreg->num_acc_sel[mem_type] * sizeof(u32), GFP_KERNEL);
+	if (!mem_acc_vreg->acc_sel_mask[mem_type])
+		return -ENOMEM;
+
+	for (i = 0; i < mem_acc_vreg->num_acc_sel[mem_type]; i++) {
+		bit = mem_acc_vreg->acc_sel_bit_pos[mem_type][i];
+		mask = BIT(mem_acc_vreg->acc_sel_bit_size[mem_type]) - 1;
+		mem_acc_vreg->acc_sel_mask[mem_type][i] = mask << bit;
+	}
+
+	return 0;
+}
+
+static int mem_acc_sel_init(struct mem_acc_regulator *mem_acc_vreg)
+{
+	int i, rc;
+
+	for (i = 0; i < MEMORY_MAX; i++) {
+		if (mem_acc_vreg->mem_acc_supported[i]) {
+			rc = __mem_acc_sel_init(mem_acc_vreg, i);
+			if (rc) {
+				pr_err("Unable to initialize mem_type=%d rc=%d\n",
+					i, rc);
+				return rc;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static void mem_acc_en_init(struct mem_acc_regulator *mem_acc_vreg)
+{
+	int i, bit;
+	u32 acc_data;
+
+	acc_data = readl_relaxed(mem_acc_vreg->acc_en_base);
+	pr_debug("init: acc_en_register=%x\n", acc_data);
+	for (i = 0; i < mem_acc_vreg->num_acc_en; i++) {
+		bit = mem_acc_vreg->acc_en_bit_pos[i];
+		acc_data |= BIT(bit);
+	}
+	pr_debug("final: acc_en_register=%x\n", acc_data);
+	writel_relaxed(acc_data, mem_acc_vreg->acc_en_base);
+}
+
+static int populate_acc_data(struct mem_acc_regulator *mem_acc_vreg,
+			const char *prop_name, u32 **value, u32 *len)
+{
+	int rc;
+
+	if (!of_get_property(mem_acc_vreg->dev->of_node, prop_name, len)) {
+		pr_err("Unable to find %s property\n", prop_name);
+		return -EINVAL;
+	}
+	*len /= sizeof(u32);
+	if (!(*len)) {
+		pr_err("Incorrect entries in %s\n", prop_name);
+		return -EINVAL;
+	}
+
+	*value = devm_kzalloc(mem_acc_vreg->dev, (*len) * sizeof(u32),
+							GFP_KERNEL);
+	if (!(*value)) {
+		pr_err("Unable to allocate memory for %s\n", prop_name);
+		return -ENOMEM;
+	}
+
+	pr_debug("Found %s, data-length = %d\n", prop_name, *len);
+
+	rc = of_property_read_u32_array(mem_acc_vreg->dev->of_node,
+					prop_name, *value, *len);
+	if (rc) {
+		pr_err("Unable to populate %s rc=%d\n", prop_name, rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int mem_acc_sel_setup(struct mem_acc_regulator *mem_acc_vreg,
+			struct resource *res, int mem_type)
+{
+	int len, rc;
+	char *mem_select_str;
+	char *mem_select_size_str;
+
+	mem_acc_vreg->acc_sel_addr[mem_type] = res->start;
+	len = resource_size(res);
+	pr_debug("'acc_sel_addr' = %pa mem_type=%d (len=%d)\n",
+					&res->start, mem_type, len);
+
+	mem_acc_vreg->acc_sel_base[mem_type] = devm_ioremap(mem_acc_vreg->dev,
+			mem_acc_vreg->acc_sel_addr[mem_type], len);
+	if (!mem_acc_vreg->acc_sel_base[mem_type]) {
+		pr_err("Unable to map 'acc_sel_addr' %pa for mem_type=%d\n",
+			&mem_acc_vreg->acc_sel_addr[mem_type], mem_type);
+		return -EINVAL;
+	}
+
+	switch (mem_type) {
+	case MEMORY_L1:
+		mem_select_str = "qcom,acc-sel-l1-bit-pos";
+		mem_select_size_str = "qcom,acc-sel-l1-bit-size";
+		break;
+	case MEMORY_L2:
+		mem_select_str = "qcom,acc-sel-l2-bit-pos";
+		mem_select_size_str = "qcom,acc-sel-l2-bit-size";
+		break;
+	default:
+		pr_err("Invalid memory type: %d\n", mem_type);
+		return -EINVAL;
+	}
+
+	mem_acc_vreg->acc_sel_bit_size[mem_type] = MEM_ACC_DEFAULT_SEL_SIZE;
+	of_property_read_u32(mem_acc_vreg->dev->of_node, mem_select_size_str,
+			&mem_acc_vreg->acc_sel_bit_size[mem_type]);
+
+	rc = populate_acc_data(mem_acc_vreg, mem_select_str,
+			&mem_acc_vreg->acc_sel_bit_pos[mem_type],
+			&mem_acc_vreg->num_acc_sel[mem_type]);
+	if (rc)
+		pr_err("Unable to populate '%s' rc=%d\n", mem_select_str, rc);
+
+	return rc;
+}
+
+static int mem_acc_efuse_init(struct platform_device *pdev,
+				 struct mem_acc_regulator *mem_acc_vreg)
+{
+	struct resource *res;
+	int len;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "efuse_addr");
+	if (!res || !res->start) {
+		mem_acc_vreg->efuse_base = NULL;
+		pr_debug("'efuse_addr' resource missing or not used.\n");
+		return 0;
+	}
+
+	mem_acc_vreg->efuse_addr = res->start;
+	len = resource_size(res);
+
+	pr_info("efuse_addr = %pa (len=0x%x)\n", &res->start, len);
+
+	mem_acc_vreg->efuse_base = devm_ioremap(&pdev->dev,
+						mem_acc_vreg->efuse_addr, len);
+	if (!mem_acc_vreg->efuse_base) {
+		pr_err("Unable to map efuse_addr %pa\n",
+				&mem_acc_vreg->efuse_addr);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int mem_acc_custom_data_init(struct platform_device *pdev,
+				 struct mem_acc_regulator *mem_acc_vreg,
+				 int mem_type)
+{
+	struct resource *res;
+	char *custom_apc_addr_str, *custom_apc_data_str;
+	int len, rc = 0;
+
+	switch (mem_type) {
+	case MEMORY_L1:
+		custom_apc_addr_str = "acc-l1-custom";
+		custom_apc_data_str = "qcom,l1-acc-custom-data";
+		break;
+	case MEMORY_L2:
+		custom_apc_addr_str = "acc-l2-custom";
+		custom_apc_data_str = "qcom,l2-acc-custom-data";
+		break;
+	default:
+		pr_err("Invalid memory type: %d\n", mem_type);
+		return -EINVAL;
+	}
+
+	if (!of_find_property(mem_acc_vreg->dev->of_node,
+				custom_apc_data_str, NULL)) {
+		pr_debug("%s custom_data not specified\n", custom_apc_data_str);
+		return 0;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						custom_apc_addr_str);
+	if (!res || !res->start) {
+		pr_debug("%s resource missing\n", custom_apc_addr_str);
+		return -EINVAL;
+	}
+
+	len = resource_size(res);
+	mem_acc_vreg->acc_custom_addr[mem_type] =
+		devm_ioremap(mem_acc_vreg->dev, res->start, len);
+	if (!mem_acc_vreg->acc_custom_addr[mem_type]) {
+		pr_err("Unable to map %s %pa\n",
+			custom_apc_addr_str, &res->start);
+		return -EINVAL;
+	}
+
+	rc = populate_acc_data(mem_acc_vreg, custom_apc_data_str,
+				&mem_acc_vreg->acc_custom_data[mem_type], &len);
+	if (rc) {
+		pr_err("Unable to find %s rc=%d\n", custom_apc_data_str, rc);
+		return rc;
+	}
+
+	if (mem_acc_vreg->num_corners != len) {
+		pr_err("Custom data is not present for all the corners\n");
+		return -EINVAL;
+	}
+
+	mem_acc_vreg->mem_acc_custom_supported[mem_type] = true;
+
+	return 0;
+}
+
+static int override_mem_acc_custom_data(struct mem_acc_regulator *mem_acc_vreg,
+		 int mem_type)
+{
+	char *custom_apc_data_str;
+	int len, rc = 0, i;
+	int tuple_count, tuple_match;
+	u32 index = 0, value = 0;
+
+	switch (mem_type) {
+	case MEMORY_L1:
+		custom_apc_data_str = "qcom,override-l1-acc-custom-data";
+		break;
+	case MEMORY_L2:
+		custom_apc_data_str = "qcom,override-l2-acc-custom-data";
+		break;
+	default:
+		pr_err("Invalid memory type: %d\n", mem_type);
+		return -EINVAL;
+	}
+
+	if (!of_find_property(mem_acc_vreg->dev->of_node,
+				custom_apc_data_str, &len)) {
+		pr_debug("%s not specified\n", custom_apc_data_str);
+		return 0;
+	}
+
+	if (mem_acc_vreg->override_map_count) {
+		if (mem_acc_vreg->override_map_match == FUSE_MAP_NO_MATCH)
+			return 0;
+		tuple_count = mem_acc_vreg->override_map_count;
+		tuple_match = mem_acc_vreg->override_map_match;
+	} else {
+		tuple_count = 1;
+		tuple_match = 0;
+	}
+
+	if (len != mem_acc_vreg->num_corners * tuple_count * sizeof(u32)) {
+		pr_err("%s length=%d is invalid\n", custom_apc_data_str, len);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < mem_acc_vreg->num_corners; i++) {
+		index = (tuple_match * mem_acc_vreg->num_corners) + i;
+		rc = of_property_read_u32_index(mem_acc_vreg->dev->of_node,
+					custom_apc_data_str, index, &value);
+		if (rc) {
+			pr_err("Unable read %s index %u, rc=%d\n",
+					custom_apc_data_str, index, rc);
+			return rc;
+		}
+		mem_acc_vreg->acc_custom_data[mem_type][i] = value;
+	}
+
+	return 0;
+}
+
+static int mem_acc_override_corner_map(struct mem_acc_regulator *mem_acc_vreg)
+{
+	int len = 0, i, rc;
+	int tuple_count, tuple_match;
+	u32 index = 0, value = 0;
+	char *prop_str = "qcom,override-corner-acc-map";
+
+	if (!of_find_property(mem_acc_vreg->dev->of_node, prop_str, &len))
+		return 0;
+
+	if (mem_acc_vreg->override_map_count) {
+		if (mem_acc_vreg->override_map_match ==	FUSE_MAP_NO_MATCH)
+			return 0;
+		tuple_count = mem_acc_vreg->override_map_count;
+		tuple_match = mem_acc_vreg->override_map_match;
+	} else {
+		tuple_count = 1;
+		tuple_match = 0;
+	}
+
+	if (len != mem_acc_vreg->num_corners * tuple_count * sizeof(u32)) {
+		pr_err("%s length=%d is invalid\n", prop_str, len);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < mem_acc_vreg->num_corners; i++) {
+		index = (tuple_match * mem_acc_vreg->num_corners) + i;
+		rc = of_property_read_u32_index(mem_acc_vreg->dev->of_node,
+						prop_str, index, &value);
+		if (rc) {
+			pr_err("Unable read %s index %u, rc=%d\n",
+						prop_str, index, rc);
+			return rc;
+		}
+		mem_acc_vreg->corner_acc_map[i] = value;
+	}
+
+	return 0;
+
+}
+
+static void mem_acc_read_efuse_param(struct mem_acc_regulator *mem_acc_vreg,
+		u32 *fuse_sel, int *val)
+{
+	u64 fuse_bits;
+
+	fuse_bits = mem_acc_read_efuse_row(mem_acc_vreg, fuse_sel[0],
+					   fuse_sel[3]);
+	/*
+	 * fuse_sel[1] = LSB position in row (shift)
+	 * fuse_sel[2] = num of bits (mask)
+	 */
+	*val = (fuse_bits >> fuse_sel[1]) & ((1 << fuse_sel[2]) - 1);
+}
+
+#define FUSE_TUPLE_SIZE 4
+static int mem_acc_parse_override_fuse_version_map(
+			 struct mem_acc_regulator *mem_acc_vreg)
+{
+	struct device_node *of_node = mem_acc_vreg->dev->of_node;
+	int i, rc, tuple_size;
+	int len = 0;
+	u32 *tmp;
+	u32 fuse_sel[4];
+	char *prop_str;
+
+	prop_str = "qcom,override-acc-fuse-sel";
+	rc = of_property_read_u32_array(of_node, prop_str, fuse_sel,
+					FUSE_TUPLE_SIZE);
+	if (rc < 0) {
+		pr_err("Read failed - %s rc=%d\n", prop_str, rc);
+		return rc;
+	}
+
+	mem_acc_read_efuse_param(mem_acc_vreg, fuse_sel,
+				 &mem_acc_vreg->override_fuse_value);
+
+	prop_str = "qcom,override-fuse-version-map";
+	if (!of_find_property(of_node, prop_str, &len))
+		return -EINVAL;
+
+	tuple_size = 1;
+	mem_acc_vreg->override_map_count = len / (sizeof(u32) * tuple_size);
+	if (len == 0 || len % (sizeof(u32) * tuple_size)) {
+		pr_err("%s length=%d is invalid\n", prop_str, len);
+		return -EINVAL;
+	}
+
+	tmp = kzalloc(len, GFP_KERNEL);
+	if (!tmp)
+		return -ENOMEM;
+
+	rc = of_property_read_u32_array(of_node, prop_str, tmp,
+			mem_acc_vreg->override_map_count * tuple_size);
+	if (rc) {
+		pr_err("could not read %s rc=%d\n", prop_str, rc);
+		goto done;
+	}
+
+	for (i = 0; i < mem_acc_vreg->override_map_count; i++) {
+		if (tmp[i * tuple_size] != mem_acc_vreg->override_fuse_value
+		    && tmp[i * tuple_size] != FUSE_PARAM_MATCH_ANY) {
+			continue;
+		} else {
+			mem_acc_vreg->override_map_match = i;
+			break;
+		}
+	}
+
+	if (mem_acc_vreg->override_map_match != FUSE_MAP_NO_MATCH)
+		pr_info("override_fuse_val=%d, %s tuple match found: %d\n",
+			mem_acc_vreg->override_fuse_value, prop_str,
+			mem_acc_vreg->override_map_match);
+	else
+		pr_err("%s tuple match not found\n", prop_str);
+
+done:
+	kfree(tmp);
+	return rc;
+}
+
+static int mem_acc_parse_override_fuse_version_range(
+			 struct mem_acc_regulator *mem_acc_vreg)
+{
+	struct device_node *of_node = mem_acc_vreg->dev->of_node;
+	int i, j, rc, size, row_size;
+	int num_fuse_sel, len = 0;
+	u32 *tmp = NULL;
+	char *prop_str;
+	u32 *fuse_val, *fuse_sel;
+	char *buf = NULL;
+	int pos = 0, buflen;
+
+	prop_str = "qcom,override-acc-range-fuse-list";
+	if (!of_find_property(of_node, prop_str, &len)) {
+		pr_err("%s property is missing\n", prop_str);
+		return -EINVAL;
+	}
+
+	size = len / sizeof(u32);
+	if (len == 0 || (size % FUSE_TUPLE_SIZE)) {
+		pr_err("%s property length (%d) is invalid\n", prop_str, len);
+		return -EINVAL;
+	}
+
+	num_fuse_sel = size / FUSE_TUPLE_SIZE;
+	fuse_val = devm_kcalloc(mem_acc_vreg->dev, num_fuse_sel,
+				sizeof(*fuse_val), GFP_KERNEL);
+	if (!fuse_val)
+		return -ENOMEM;
+	mem_acc_vreg->override_acc_range_fuse_list = fuse_val;
+	mem_acc_vreg->override_acc_range_fuse_num = num_fuse_sel;
+
+	fuse_sel = kzalloc(len, GFP_KERNEL);
+	if (!fuse_sel) {
+		rc = -ENOMEM;
+		goto done;
+	}
+
+	rc = of_property_read_u32_array(of_node, prop_str, fuse_sel,
+					size);
+	if (rc) {
+		pr_err("%s read failed, rc=%d\n", prop_str, rc);
+		goto done;
+	}
+
+	for (i = 0; i < num_fuse_sel; i++) {
+		mem_acc_read_efuse_param(mem_acc_vreg, &fuse_sel[i * 4],
+					 &fuse_val[i]);
+	}
+
+	prop_str = "qcom,override-fuse-range-map";
+	if (!of_find_property(of_node, prop_str, &len))
+		goto done;
+
+	row_size = num_fuse_sel * 2;
+	mem_acc_vreg->override_map_count = len / (sizeof(u32) * row_size);
+
+	if (len == 0 || len % (sizeof(u32) * row_size)) {
+		pr_err("%s length=%d is invalid\n", prop_str, len);
+		rc = -EINVAL;
+		goto done;
+	}
+
+	tmp = kzalloc(len, GFP_KERNEL);
+	if (!tmp) {
+		rc = -ENOMEM;
+		goto done;
+	}
+
+	rc = of_property_read_u32_array(of_node, prop_str, tmp,
+				mem_acc_vreg->override_map_count * row_size);
+	if (rc) {
+		pr_err("could not read %s rc=%d\n", prop_str, rc);
+		goto done;
+	}
+
+	for (i = 0; i < mem_acc_vreg->override_map_count; i++) {
+		for (j = 0; j < num_fuse_sel; j++) {
+			if (tmp[i * row_size + j * 2] > fuse_val[j]
+				|| tmp[i * row_size + j * 2 + 1] < fuse_val[j])
+				break;
+		}
+
+		if (j == num_fuse_sel) {
+			mem_acc_vreg->override_map_match = i;
+			break;
+		}
+	}
+
+	/*
+	 * Log register and value mapping since they are useful for
+	 * baseline MEM ACC logging.
+	 */
+	buflen = num_fuse_sel * sizeof("fuse_selxxxx = XXXX ");
+	buf = kzalloc(buflen, GFP_KERNEL);
+	if (!buf)
+		goto done;
+
+	for (j = 0; j < num_fuse_sel; j++)
+		pos += scnprintf(buf + pos, buflen - pos, "fuse_sel%d = %d ",
+				 j, fuse_val[j]);
+	buf[pos] = '\0';
+	if (mem_acc_vreg->override_map_match != FUSE_MAP_NO_MATCH)
+		pr_info("%s %s tuple match found: %d\n", buf, prop_str,
+			mem_acc_vreg->override_map_match);
+	else
+		pr_err("%s %s tuple match not found\n", buf, prop_str);
+
+done:
+	kfree(fuse_sel);
+	kfree(tmp);
+	kfree(buf);
+	return rc;
+}
+
+#define MAX_CHARS_PER_INT	20
+
+static int mem_acc_reg_addr_val_dump(struct mem_acc_regulator *mem_acc_vreg,
+			struct corner_acc_reg_config *corner_acc_reg_config,
+			u32 corner)
+{
+	int i, k, index, pos = 0;
+	u32 addr_index;
+	size_t buflen;
+	char *buf;
+	struct acc_reg_value *reg_config_list =
+					corner_acc_reg_config->reg_config_list;
+	int max_reg_config_len = corner_acc_reg_config->max_reg_config_len;
+	int num_corners = mem_acc_vreg->num_corners;
+
+	/*
+	 * Log register and value mapping since they are useful for
+	 * baseline MEM ACC logging.
+	 */
+	buflen = max_reg_config_len * (MAX_CHARS_PER_INT + 6) * sizeof(*buf);
+	buf = kzalloc(buflen, GFP_KERNEL);
+	if (buf == NULL) {
+		pr_err("Could not allocate memory for acc register and value logging\n");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < num_corners; i++) {
+		if (corner == i + 1)
+			continue;
+
+		pr_debug("Corner: %d --> %d:\n", corner, i + 1);
+		pos = 0;
+		for (k = 0; k < max_reg_config_len; k++) {
+			index = i * max_reg_config_len + k;
+			addr_index = reg_config_list[index].addr_index;
+			if (addr_index == PARAM_MATCH_ANY)
+				break;
+
+			pos += scnprintf(buf + pos, buflen - pos,
+				"<0x%x 0x%x> ",
+				mem_acc_vreg->phys_reg_addr_list[addr_index],
+				reg_config_list[index].reg_val);
+		}
+		buf[pos] = '\0';
+		pr_debug("%s\n", buf);
+	}
+
+	kfree(buf);
+	return 0;
+}
+
+static int mem_acc_get_reg_addr_val(struct device_node *of_node,
+		const char *prop_str, struct acc_reg_value *reg_config_list,
+		int list_offset, int list_size, u32 max_reg_index)
+{
+
+	int i, index, rc  = 0;
+
+	for (i = 0; i < list_size / 2; i++) {
+		index = (list_offset * list_size) + i * 2;
+		rc = of_property_read_u32_index(of_node, prop_str, index,
+					&reg_config_list[i].addr_index);
+		rc |= of_property_read_u32_index(of_node, prop_str, index + 1,
+					&reg_config_list[i].reg_val);
+		if (rc) {
+			pr_err("could not read %s at tuple %u: rc=%d\n",
+				prop_str, index, rc);
+			return rc;
+		}
+
+		if (reg_config_list[i].addr_index == PARAM_MATCH_ANY)
+			continue;
+
+		if ((!reg_config_list[i].addr_index) ||
+			reg_config_list[i].addr_index > max_reg_index) {
+			pr_err("Invalid register index %u in %s at tuple %u\n",
+				reg_config_list[i].addr_index, prop_str, index);
+			return -EINVAL;
+		}
+	}
+
+	return rc;
+}
+
+static int mem_acc_override_reg_addr_val_init(
+			struct mem_acc_regulator *mem_acc_vreg)
+{
+	struct device_node *of_node = mem_acc_vreg->dev->of_node;
+	struct corner_acc_reg_config *corner_acc_reg_config;
+	struct acc_reg_value *override_reg_config_list;
+	int i, tuple_count, tuple_match, len = 0, rc = 0;
+	u32 list_size, override_max_reg_config_len;
+	char prop_str[40];
+	struct property *prop;
+	int num_corners = mem_acc_vreg->num_corners;
+
+	if (!mem_acc_vreg->corner_acc_reg_config)
+		return 0;
+
+	if (mem_acc_vreg->override_map_count) {
+		if (mem_acc_vreg->override_map_match == FUSE_MAP_NO_MATCH)
+			return 0;
+		tuple_count = mem_acc_vreg->override_map_count;
+		tuple_match = mem_acc_vreg->override_map_match;
+	} else {
+		tuple_count = 1;
+		tuple_match = 0;
+	}
+
+	corner_acc_reg_config = mem_acc_vreg->corner_acc_reg_config;
+	for (i = 1; i <= num_corners; i++) {
+		snprintf(prop_str, sizeof(prop_str),
+			 "qcom,override-corner%d-addr-val-map", i);
+		prop = of_find_property(of_node, prop_str, &len);
+		list_size = len / (tuple_count * sizeof(u32));
+		if (!prop) {
+			pr_debug("%s property not specified\n", prop_str);
+			continue;
+		}
+
+		if ((!list_size) || list_size < (num_corners * 2)) {
+			pr_err("qcom,override-corner%d-addr-val-map property is missed or invalid length: len=%d\n",
+			i, len);
+			return -EINVAL;
+		}
+
+		override_max_reg_config_len = list_size / (num_corners * 2);
+		override_reg_config_list =
+				corner_acc_reg_config[i].reg_config_list;
+
+		if (corner_acc_reg_config[i].max_reg_config_len
+					!= override_max_reg_config_len) {
+			/* Free already allocate memory */
+			devm_kfree(mem_acc_vreg->dev, override_reg_config_list);
+
+			/* Allocated memory for new requirement */
+			override_reg_config_list =
+				devm_kcalloc(mem_acc_vreg->dev,
+				override_max_reg_config_len * num_corners,
+				sizeof(*override_reg_config_list), GFP_KERNEL);
+			if (!override_reg_config_list)
+				return -ENOMEM;
+
+			corner_acc_reg_config[i].max_reg_config_len =
+						override_max_reg_config_len;
+			corner_acc_reg_config[i].reg_config_list =
+						override_reg_config_list;
+		}
+
+		rc = mem_acc_get_reg_addr_val(of_node, prop_str,
+					override_reg_config_list, tuple_match,
+					list_size, mem_acc_vreg->num_acc_reg);
+		if (rc) {
+			pr_err("Failed to read %s property: rc=%d\n",
+				prop_str, rc);
+			return rc;
+		}
+
+		rc = mem_acc_reg_addr_val_dump(mem_acc_vreg,
+						&corner_acc_reg_config[i], i);
+		if (rc) {
+			pr_err("could not dump acc address-value dump for corner=%d: rc=%d\n",
+				i, rc);
+			return rc;
+		}
+	}
+
+	return rc;
+}
+
+static int mem_acc_parse_override_config(struct mem_acc_regulator *mem_acc_vreg)
+{
+	struct device_node *of_node = mem_acc_vreg->dev->of_node;
+	int i, rc = 0;
+
+	/* Specify default no match case. */
+	mem_acc_vreg->override_map_match = FUSE_MAP_NO_MATCH;
+	mem_acc_vreg->override_map_count = 0;
+
+	if (of_find_property(of_node, "qcom,override-fuse-range-map",
+			     NULL)) {
+		rc = mem_acc_parse_override_fuse_version_range(mem_acc_vreg);
+		if (rc) {
+			pr_err("parsing qcom,override-fuse-range-map property failed, rc=%d\n",
+				rc);
+			return rc;
+		}
+	} else if (of_find_property(of_node, "qcom,override-fuse-version-map",
+				    NULL)) {
+		rc = mem_acc_parse_override_fuse_version_map(mem_acc_vreg);
+		if (rc) {
+			pr_err("parsing qcom,override-fuse-version-map property failed, rc=%d\n",
+				rc);
+			return rc;
+		}
+	} else {
+		/* No override fuse configuration defined in device node */
+		return 0;
+	}
+
+	if (mem_acc_vreg->override_map_match == FUSE_MAP_NO_MATCH)
+		return 0;
+
+	rc = mem_acc_override_corner_map(mem_acc_vreg);
+	if (rc) {
+		pr_err("Unable to override corner map rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = mem_acc_override_reg_addr_val_init(mem_acc_vreg);
+	if (rc) {
+		pr_err("Unable to override reg_config_list init rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	for (i = 0; i < MEMORY_MAX; i++) {
+		rc = override_mem_acc_custom_data(mem_acc_vreg, i);
+		if (rc) {
+			pr_err("Unable to override custom data for mem_type=%d rc=%d\n",
+				i, rc);
+			return rc;
+		}
+	}
+
+	return rc;
+}
+
+static int mem_acc_init_reg_config(struct mem_acc_regulator *mem_acc_vreg)
+{
+	struct device_node *of_node = mem_acc_vreg->dev->of_node;
+	int i, size, len = 0, rc = 0;
+	u32 addr_index, reg_val, index;
+	char *prop_str = "qcom,acc-init-reg-config";
+
+	if (!of_find_property(of_node, prop_str, &len)) {
+		/* Initial acc register configuration not specified */
+		return rc;
+	}
+
+	size = len / sizeof(u32);
+	if ((!size) || (size % 2)) {
+		pr_err("%s specified with invalid length: %d\n",
+			prop_str, size);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < size / 2; i++) {
+		index = i * 2;
+		rc = of_property_read_u32_index(of_node, prop_str, index,
+						&addr_index);
+		rc |= of_property_read_u32_index(of_node, prop_str, index + 1,
+						&reg_val);
+		if (rc) {
+			pr_err("could not read %s at tuple %u: rc=%d\n",
+				prop_str, index, rc);
+			return rc;
+		}
+
+		if ((!addr_index) || addr_index > mem_acc_vreg->num_acc_reg) {
+			pr_err("Invalid register index %u in %s at tuple %u\n",
+				addr_index, prop_str, index);
+			return -EINVAL;
+		}
+
+		writel_relaxed(reg_val,
+				mem_acc_vreg->remap_reg_addr_list[addr_index]);
+		/* make sure write complete */
+		mb();
+
+		pr_debug("acc initial config: register:0x%x value:0x%x\n",
+			mem_acc_vreg->phys_reg_addr_list[addr_index], reg_val);
+	}
+
+	return rc;
+}
+
+static int mem_acc_get_reg_addr(struct mem_acc_regulator *mem_acc_vreg)
+{
+	struct device_node *of_node = mem_acc_vreg->dev->of_node;
+	void __iomem **remap_reg_addr_list;
+	u32 *phys_reg_addr_list;
+	int i, num_acc_reg, len = 0, rc = 0;
+
+	if (!of_find_property(of_node, "qcom,acc-reg-addr-list", &len)) {
+		/* acc register address list not specified */
+		return rc;
+	}
+
+	num_acc_reg = len / sizeof(u32);
+	if (!num_acc_reg) {
+		pr_err("qcom,acc-reg-addr-list has invalid len = %d\n", len);
+		return -EINVAL;
+	}
+
+	phys_reg_addr_list = devm_kcalloc(mem_acc_vreg->dev, num_acc_reg + 1,
+				sizeof(*phys_reg_addr_list), GFP_KERNEL);
+	if (!phys_reg_addr_list)
+		return -ENOMEM;
+
+	remap_reg_addr_list = devm_kcalloc(mem_acc_vreg->dev, num_acc_reg + 1,
+				sizeof(*remap_reg_addr_list), GFP_KERNEL);
+	if (!remap_reg_addr_list)
+		return -ENOMEM;
+
+	rc = of_property_read_u32_array(of_node, "qcom,acc-reg-addr-list",
+					&phys_reg_addr_list[1], num_acc_reg);
+	if (rc) {
+		pr_err("Read- qcom,acc-reg-addr-list failed: rc=%d\n", rc);
+		return rc;
+	}
+
+	for (i = 1; i <= num_acc_reg; i++) {
+		remap_reg_addr_list[i] = devm_ioremap(mem_acc_vreg->dev,
+						phys_reg_addr_list[i], 0x4);
+		if (!remap_reg_addr_list[i]) {
+			pr_err("Unable to map register address 0x%x\n",
+					phys_reg_addr_list[i]);
+			return -EINVAL;
+		}
+	}
+
+	mem_acc_vreg->num_acc_reg = num_acc_reg;
+	mem_acc_vreg->phys_reg_addr_list = phys_reg_addr_list;
+	mem_acc_vreg->remap_reg_addr_list = remap_reg_addr_list;
+
+	return rc;
+}
+
+static int mem_acc_reg_config_init(struct mem_acc_regulator *mem_acc_vreg)
+{
+	struct device_node *of_node = mem_acc_vreg->dev->of_node;
+	struct acc_reg_value *reg_config_list;
+	int len, size, rc, i, num_corners;
+	struct property *prop;
+	char prop_str[30];
+	struct corner_acc_reg_config *corner_acc_reg_config;
+
+	rc = of_property_read_u32(of_node, "qcom,num-acc-corners",
+				&num_corners);
+	if (rc) {
+		pr_err("could not read qcom,num-acc-corners: rc=%d\n", rc);
+		return rc;
+	}
+
+	mem_acc_vreg->num_corners = num_corners;
+
+	rc = of_property_read_u32(of_node, "qcom,boot-acc-corner",
+				&mem_acc_vreg->corner);
+	if (rc) {
+		pr_err("could not read qcom,boot-acc-corner: rc=%d\n", rc);
+		return rc;
+	}
+	pr_debug("boot acc corner = %d\n", mem_acc_vreg->corner);
+
+	corner_acc_reg_config = devm_kcalloc(mem_acc_vreg->dev, num_corners + 1,
+						sizeof(*corner_acc_reg_config),
+						GFP_KERNEL);
+	if (!corner_acc_reg_config)
+		return -ENOMEM;
+
+	for (i = 1; i <= num_corners; i++) {
+		snprintf(prop_str, sizeof(prop_str),
+				"qcom,corner%d-reg-config", i);
+		prop = of_find_property(of_node, prop_str, &len);
+		size = len / sizeof(u32);
+		if ((!prop) || (!size) || size < (num_corners * 2)) {
+			pr_err("%s property is missed or invalid length: len=%d\n",
+				prop_str, len);
+			return -EINVAL;
+		}
+
+		reg_config_list = devm_kcalloc(mem_acc_vreg->dev, size / 2,
+					sizeof(*reg_config_list), GFP_KERNEL);
+		if (!reg_config_list)
+			return -ENOMEM;
+
+		rc = mem_acc_get_reg_addr_val(of_node, prop_str,
+						reg_config_list, 0, size,
+						mem_acc_vreg->num_acc_reg);
+		if (rc) {
+			pr_err("Failed to read %s property: rc=%d\n",
+				prop_str, rc);
+			return rc;
+		}
+
+		corner_acc_reg_config[i].max_reg_config_len =
+						size / (num_corners * 2);
+		corner_acc_reg_config[i].reg_config_list = reg_config_list;
+
+		rc = mem_acc_reg_addr_val_dump(mem_acc_vreg,
+						&corner_acc_reg_config[i], i);
+		if (rc) {
+			pr_err("could not dump acc address-value dump for corner=%d: rc=%d\n",
+				i, rc);
+			return rc;
+		}
+	}
+
+	mem_acc_vreg->corner_acc_reg_config = corner_acc_reg_config;
+	mem_acc_vreg->flags |= MEM_ACC_USE_ADDR_VAL_MAP;
+	return rc;
+}
+
+#define MEM_TYPE_STRING_LEN	20
+static int mem_acc_init(struct platform_device *pdev,
+		struct mem_acc_regulator *mem_acc_vreg)
+{
+	struct device_node *of_node = pdev->dev.of_node;
+	struct resource *res;
+	int len, rc, i, j;
+	bool acc_type_present = false;
+	char tmps[MEM_TYPE_STRING_LEN];
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "acc-en");
+	if (!res || !res->start) {
+		pr_debug("'acc-en' resource missing or not used.\n");
+	} else {
+		mem_acc_vreg->acc_en_addr = res->start;
+		len = resource_size(res);
+		pr_debug("'acc_en_addr' = %pa (len=0x%x)\n", &res->start, len);
+
+		mem_acc_vreg->acc_en_base = devm_ioremap(mem_acc_vreg->dev,
+				mem_acc_vreg->acc_en_addr, len);
+		if (!mem_acc_vreg->acc_en_base) {
+			pr_err("Unable to map 'acc_en_addr' %pa\n",
+					&mem_acc_vreg->acc_en_addr);
+			return -EINVAL;
+		}
+
+		rc = populate_acc_data(mem_acc_vreg, "qcom,acc-en-bit-pos",
+				&mem_acc_vreg->acc_en_bit_pos,
+				&mem_acc_vreg->num_acc_en);
+		if (rc) {
+			pr_err("Unable to populate 'qcom,acc-en-bit-pos' rc=%d\n",
+					rc);
+			return rc;
+		}
+	}
+
+	rc = mem_acc_efuse_init(pdev, mem_acc_vreg);
+	if (rc) {
+		pr_err("Wrong eFuse address specified: rc=%d\n", rc);
+		return rc;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "acc-sel-l1");
+	if (!res || !res->start) {
+		pr_debug("'acc-sel-l1' resource missing or not used.\n");
+	} else {
+		rc = mem_acc_sel_setup(mem_acc_vreg, res, MEMORY_L1);
+		if (rc) {
+			pr_err("Unable to setup mem-acc for mem_type=%d rc=%d\n",
+					MEMORY_L1, rc);
+			return rc;
+		}
+		mem_acc_vreg->mem_acc_supported[MEMORY_L1] = true;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "acc-sel-l2");
+	if (!res || !res->start) {
+		pr_debug("'acc-sel-l2' resource missing or not used.\n");
+	} else {
+		rc = mem_acc_sel_setup(mem_acc_vreg, res, MEMORY_L2);
+		if (rc) {
+			pr_err("Unable to setup mem-acc for mem_type=%d rc=%d\n",
+					MEMORY_L2, rc);
+			return rc;
+		}
+		mem_acc_vreg->mem_acc_supported[MEMORY_L2] = true;
+	}
+
+	for (i = 0; i < MEM_ACC_TYPE_MAX; i++) {
+		snprintf(tmps, MEM_TYPE_STRING_LEN, "mem-acc-type%d", i + 1);
+		res = platform_get_resource_byname(pdev, IORESOURCE_MEM, tmps);
+
+		if (!res || !res->start) {
+			pr_debug("'%s' resource missing or not used.\n", tmps);
+		} else {
+			mem_acc_vreg->mem_acc_type_addr[i] = res->start;
+			acc_type_present = true;
+		}
+	}
+
+	rc = mem_acc_get_reg_addr(mem_acc_vreg);
+	if (rc) {
+		pr_err("Unable to get acc register addresses: rc=%d\n", rc);
+		return rc;
+	}
+
+	if (mem_acc_vreg->phys_reg_addr_list) {
+		rc = mem_acc_reg_config_init(mem_acc_vreg);
+		if (rc) {
+			pr_err("acc register address-value map failed: rc=%d\n",
+				rc);
+			return rc;
+		}
+	}
+
+	if (of_find_property(of_node, "qcom,corner-acc-map", NULL)) {
+		rc = populate_acc_data(mem_acc_vreg, "qcom,corner-acc-map",
+			&mem_acc_vreg->corner_acc_map,
+			&mem_acc_vreg->num_corners);
+
+		/* Check if at least one valid mem-acc config. is specified */
+		for (i = 0; i < MEMORY_MAX; i++) {
+			if (mem_acc_vreg->mem_acc_supported[i])
+				break;
+		}
+		if (i == MEMORY_MAX && !acc_type_present) {
+			pr_err("No mem-acc configuration specified\n");
+			return -EINVAL;
+		}
+
+		mem_acc_vreg->flags |= MEM_ACC_USE_CORNER_ACC_MAP;
+	}
+
+	if ((mem_acc_vreg->flags & MEM_ACC_USE_CORNER_ACC_MAP) &&
+		(mem_acc_vreg->flags & MEM_ACC_USE_ADDR_VAL_MAP)) {
+		pr_err("Invalid configuration, both qcom,corner-acc-map and qcom,cornerX-addr-val-map specified\n");
+		return -EINVAL;
+	}
+
+	pr_debug("num_corners = %d\n", mem_acc_vreg->num_corners);
+
+	if (mem_acc_vreg->num_acc_en)
+		mem_acc_en_init(mem_acc_vreg);
+
+	if (mem_acc_vreg->phys_reg_addr_list) {
+		rc = mem_acc_init_reg_config(mem_acc_vreg);
+		if (rc) {
+			pr_err("acc initial register configuration failed: rc=%d\n",
+				rc);
+			return rc;
+		}
+	}
+
+	rc = mem_acc_sel_init(mem_acc_vreg);
+	if (rc) {
+		pr_err("Unable to initialize mem_acc_sel reg rc=%d\n", rc);
+		return rc;
+	}
+
+	for (i = 0; i < MEMORY_MAX; i++) {
+		rc = mem_acc_custom_data_init(pdev, mem_acc_vreg, i);
+		if (rc) {
+			pr_err("Unable to initialize custom data for mem_type=%d rc=%d\n",
+					i, rc);
+			return rc;
+		}
+	}
+
+	rc = mem_acc_parse_override_config(mem_acc_vreg);
+	if (rc) {
+		pr_err("Unable to parse mem acc override configuration, rc=%d\n",
+			rc);
+		return rc;
+	}
+	if (acc_type_present) {
+		mem_acc_vreg->mem_acc_type_data = devm_kzalloc(
+			mem_acc_vreg->dev, mem_acc_vreg->num_corners *
+			MEM_ACC_TYPE_MAX * sizeof(u32), GFP_KERNEL);
+
+		if (!mem_acc_vreg->mem_acc_type_data) {
+			pr_err("Unable to allocate memory for mem_acc_type\n");
+			return -ENOMEM;
+		}
+
+		for (i = 0; i < MEM_ACC_TYPE_MAX; i++) {
+			if (mem_acc_vreg->mem_acc_type_addr[i]) {
+				snprintf(tmps, MEM_TYPE_STRING_LEN,
+					"qcom,mem-acc-type%d", i + 1);
+
+				j = i * mem_acc_vreg->num_corners;
+				rc = of_property_read_u32_array(
+					mem_acc_vreg->dev->of_node,
+					tmps,
+					&mem_acc_vreg->mem_acc_type_data[j],
+					mem_acc_vreg->num_corners);
+				if (rc) {
+					pr_err("Unable to get property %s rc=%d\n",
+						tmps, rc);
+					return rc;
+				}
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int mem_acc_regulator_probe(struct platform_device *pdev)
+{
+	struct regulator_config reg_config = {};
+	struct mem_acc_regulator *mem_acc_vreg;
+	struct regulator_desc *rdesc;
+	struct regulator_init_data *init_data;
+	int rc;
+
+	if (!pdev->dev.of_node) {
+		pr_err("Device tree node is missing\n");
+		return -EINVAL;
+	}
+
+	init_data = of_get_regulator_init_data(&pdev->dev, pdev->dev.of_node,
+					NULL);
+	if (!init_data) {
+		pr_err("regulator init data is missing\n");
+		return -EINVAL;
+	}
+
+	init_data->constraints.input_uV = init_data->constraints.max_uV;
+	init_data->constraints.valid_ops_mask |= REGULATOR_CHANGE_VOLTAGE;
+
+	mem_acc_vreg = devm_kzalloc(&pdev->dev, sizeof(*mem_acc_vreg),
+			GFP_KERNEL);
+	if (!mem_acc_vreg)
+		return -ENOMEM;
+
+	mem_acc_vreg->dev = &pdev->dev;
+
+	rc = mem_acc_init(pdev, mem_acc_vreg);
+	if (rc) {
+		pr_err("Unable to initialize mem_acc configuration rc=%d\n",
+				rc);
+		return rc;
+	}
+
+	rdesc			= &mem_acc_vreg->rdesc;
+	rdesc->owner		= THIS_MODULE;
+	rdesc->type		= REGULATOR_VOLTAGE;
+	rdesc->ops		= &mem_acc_corner_ops;
+	rdesc->name		= init_data->constraints.name;
+
+	reg_config.dev = &pdev->dev;
+	reg_config.init_data = init_data;
+	reg_config.driver_data = mem_acc_vreg;
+	reg_config.of_node = pdev->dev.of_node;
+	mem_acc_vreg->rdev = regulator_register(rdesc, &reg_config);
+	if (IS_ERR(mem_acc_vreg->rdev)) {
+		rc = PTR_ERR(mem_acc_vreg->rdev);
+		if (rc != -EPROBE_DEFER)
+			pr_err("regulator_register failed: rc=%d\n", rc);
+		return rc;
+	}
+
+	platform_set_drvdata(pdev, mem_acc_vreg);
+
+	return 0;
+}
+
+static int mem_acc_regulator_remove(struct platform_device *pdev)
+{
+	struct mem_acc_regulator *mem_acc_vreg = platform_get_drvdata(pdev);
+
+	regulator_unregister(mem_acc_vreg->rdev);
+
+	return 0;
+}
+
+static const struct of_device_id mem_acc_regulator_match_table[] = {
+	{ .compatible = "qcom,mem-acc-regulator", },
+	{}
+};
+
+static struct platform_driver mem_acc_regulator_driver = {
+	.probe		= mem_acc_regulator_probe,
+	.remove		= mem_acc_regulator_remove,
+	.driver		= {
+		.name		= "qcom,mem-acc-regulator",
+		.of_match_table = mem_acc_regulator_match_table,
+
+	},
+};
+
+int __init mem_acc_regulator_init(void)
+{
+	return platform_driver_register(&mem_acc_regulator_driver);
+}
+postcore_initcall(mem_acc_regulator_init);
+
+static void __exit mem_acc_regulator_exit(void)
+{
+	platform_driver_unregister(&mem_acc_regulator_driver);
+}
+module_exit(mem_acc_regulator_exit);
+
+MODULE_DESCRIPTION("MEM-ACC-SEL regulator driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/qcom_pm8008-regulator.c b/drivers/regulator/qcom_pm8008-regulator.c
index 15e0829..3c14d23 100644
--- a/drivers/regulator/qcom_pm8008-regulator.c
+++ b/drivers/regulator/qcom_pm8008-regulator.c
@@ -48,8 +48,6 @@
 
 #define LDO_VSET_LB_REG(base)		(base + 0x40)
 
-#define LDO_VSET_VALID_LB_REG(base)	(base + 0x42)
-
 #define LDO_MODE_CTL1_REG(base)		(base + 0x45)
 #define MODE_PRIMARY_MASK		GENMASK(2, 0)
 #define LDO_MODE_NPM			7
@@ -157,7 +155,7 @@
 	int rc;
 
 	rc = pm8008_read(pm8008_reg->regmap,
-			LDO_VSET_VALID_LB_REG(pm8008_reg->base),
+			LDO_VSET_LB_REG(pm8008_reg->base),
 			vset_raw, 2);
 	if (rc < 0) {
 		pm8008_err(pm8008_reg,
diff --git a/drivers/regulator/spm-regulator.c b/drivers/regulator/spm-regulator.c
new file mode 100644
index 0000000..313dcee
--- /dev/null
+++ b/drivers/regulator/spm-regulator.c
@@ -0,0 +1,1351 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2013-2017, 2020, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/arm-smccc.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/regmap.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/slab.h>
+#include <linux/spmi.h>
+#include <linux/platform_device.h>
+#include <linux/string.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/regulator/spm-regulator.h>
+#include <soc/qcom/spm.h>
+#include <linux/arm-smccc.h>
+
+#if defined(CONFIG_ARM64) || (defined(CONFIG_ARM) && defined(CONFIG_ARM_PSCI))
+#else
+	#define __invoke_psci_fn_smc(a, b, c, d) 0
+#endif
+
+#define SPM_REGULATOR_DRIVER_NAME "qcom,spm-regulator"
+
+struct voltage_range {
+	int min_uV;
+	int set_point_min_uV;
+	int max_uV;
+	int step_uV;
+};
+
+enum qpnp_regulator_uniq_type {
+	QPNP_TYPE_HF,
+	QPNP_TYPE_FTS2,
+	QPNP_TYPE_FTS2p5,
+	QPNP_TYPE_FTS426,
+	QPNP_TYPE_ULT_HF,
+	QPNP_TYPE_HFS430,
+};
+
+enum qpnp_regulator_type {
+	QPNP_HF_TYPE		= 0x03,
+	QPNP_FTS2_TYPE		= 0x1C,
+	QPNP_FTS2p5_TYPE	= 0x1C,
+	QPNP_FTS426_TYPE	= 0x1C,
+	QPNP_ULT_HF_TYPE	= 0x22,
+};
+
+enum qpnp_regulator_subtype {
+	QPNP_FTS2_SUBTYPE	= 0x08,
+	QPNP_HF_SUBTYPE		= 0x08,
+	QPNP_FTS2p5_SUBTYPE	= 0x09,
+	QPNP_FTS426_SUBTYPE	= 0x0A,
+	QPNP_ULT_HF_SUBTYPE	= 0x0D,
+	QPNP_HFS430_SUBTYPE	= 0x0A,
+};
+
+enum qpnp_logical_mode {
+	QPNP_LOGICAL_MODE_AUTO,
+	QPNP_LOGICAL_MODE_PWM,
+};
+
+static const struct voltage_range fts2_range0 = {0, 350000, 1275000,  5000};
+static const struct voltage_range fts2_range1 = {0, 700000, 2040000, 10000};
+static const struct voltage_range fts2p5_range0
+					 = { 80000, 350000, 1355000,  5000};
+static const struct voltage_range fts2p5_range1
+					 = {160000, 700000, 2200000, 10000};
+static const struct voltage_range fts426_range = {0, 320000, 1352000, 4000};
+static const struct voltage_range hfs430_range = {0, 320000, 2040000, 8000};
+static const struct voltage_range ult_hf_range0 = {375000, 375000, 1562500,
+								12500};
+static const struct voltage_range ult_hf_range1 = {750000, 750000, 1525000,
+								25000};
+static const struct voltage_range hf_range0 = {375000, 375000, 1562500, 12500};
+static const struct voltage_range hf_range1 = {1550000, 1550000, 3125000,
+								25000};
+
+#define QPNP_SMPS_REG_TYPE		0x04
+#define QPNP_SMPS_REG_SUBTYPE		0x05
+#define QPNP_SMPS_REG_VOLTAGE_RANGE	0x40
+#define QPNP_SMPS_REG_VOLTAGE_SETPOINT	0x41
+#define QPNP_SMPS_REG_MODE		0x45
+#define QPNP_SMPS_REG_STEP_CTRL		0x61
+#define QPNP_SMPS_REG_UL_LL_CTRL	0x68
+
+/* FTS426/HFS430 voltage control registers */
+#define QPNP_FTS426_HFS430_REG_VOLTAGE_LB	0x40
+#define QPNP_FTS426_HFS430_REG_VOLTAGE_UB	0x41
+#define QPNP_FTS426_HFS430_REG_VOLTAGE_VALID_LB	0x42
+#define QPNP_FTS426_HFS430_REG_VOLTAGE_VALID_UB	0x43
+
+/* HF voltage limit registers */
+#define QPNP_HF_REG_VOLTAGE_ULS		0x69
+#define QPNP_HF_REG_VOLTAGE_LLS		0x6B
+
+/* FTS voltage limit registers */
+#define QPNP_FTS_REG_VOLTAGE_ULS_VALID	0x6A
+#define QPNP_FTS_REG_VOLTAGE_LLS_VALID	0x6C
+
+/* FTS426/HFS430 voltage limit registers */
+#define QPNP_FTS426_HFS430_REG_VOLTAGE_ULS_LB	0x68
+#define QPNP_FTS426_HFS430_REG_VOLTAGE_ULS_UB	0x69
+
+/* Common regulator UL & LL limits control register layout */
+#define QPNP_COMMON_UL_EN_MASK		0x80
+#define QPNP_COMMON_LL_EN_MASK		0x40
+
+#define QPNP_SMPS_MODE_PWM		0x80
+#define QPNP_SMPS_MODE_AUTO		0x40
+#define QPNP_FTS426_HFS430_MODE_PWM	0x07
+#define QPNP_FTS426_HFS430_MODE_AUTO	0x06
+
+#define QPNP_SMPS_STEP_CTRL_STEP_MASK	0x18
+#define QPNP_SMPS_STEP_CTRL_STEP_SHIFT	3
+#define QPNP_SMPS_STEP_CTRL_DELAY_MASK	0x07
+#define QPNP_SMPS_STEP_CTRL_DELAY_SHIFT	0
+#define QPNP_FTS426_HFS430_STEP_CTRL_DELAY_MASK		0x03
+#define QPNP_FTS426_HFS430_STEP_CTRL_DELAY_SHIFT	0
+
+/* Clock rate in kHz of the FTS2 regulator reference clock. */
+#define QPNP_SMPS_CLOCK_RATE		19200
+#define QPNP_FTS426_CLOCK_RATE		4800
+#define QPNP_HFS430_CLOCK_RATE		1600
+
+/* Time to delay in us to ensure that a mode change has completed. */
+#define QPNP_FTS2_MODE_CHANGE_DELAY	50
+
+/* Minimum time in us that it takes to complete a single SPMI write. */
+#define QPNP_SPMI_WRITE_MIN_DELAY	8
+
+/* Minimum voltage stepper delay for each step. */
+#define QPNP_FTS2_STEP_DELAY		8
+#define QPNP_HF_STEP_DELAY		20
+#define QPNP_FTS426_HFS430_STEP_DELAY	2
+
+/* Arbitrarily large max step size used to avoid possible numerical overflow */
+#define SPM_REGULATOR_MAX_STEP_UV	10000000
+
+/*
+ * The ratio QPNP_FTS2_STEP_MARGIN_NUM/QPNP_FTS2_STEP_MARGIN_DEN is use to
+ * adjust the step rate in order to account for oscillator variance.
+ */
+#define QPNP_FTS2_STEP_MARGIN_NUM	4
+#define QPNP_FTS2_STEP_MARGIN_DEN	5
+#define QPNP_FTS426_HFS430_STEP_MARGIN_NUM	10
+#define QPNP_FTS426_HFS430_STEP_MARGIN_DEN	11
+
+/*
+ * Settling delay for FTS2.5
+ * Warm-up=20uS, 0-10% & 90-100% non-linear V-ramp delay = 50uS
+ */
+#define FTS2P5_SETTLING_DELAY_US	70
+
+/* VSET value to decide the range of ULT SMPS */
+#define ULT_SMPS_RANGE_SPLIT 0x60
+
+struct spm_vreg {
+	struct regulator_desc		rdesc;
+	struct regulator_dev		*rdev;
+	struct platform_device		*pdev;
+	struct regmap			*regmap;
+	const struct voltage_range	*range;
+	int				uV;
+	int				last_set_uV;
+	unsigned int			vlevel;
+	unsigned int			last_set_vlevel;
+	u32				max_step_uV;
+	bool				online;
+	u16				spmi_base_addr;
+	enum qpnp_logical_mode		init_mode;
+	enum qpnp_logical_mode		mode;
+	int				step_rate;
+	enum qpnp_regulator_uniq_type	regulator_type;
+	u32				cpu_num;
+	bool				bypass_spm;
+	struct regulator_desc		avs_rdesc;
+	struct regulator_dev		*avs_rdev;
+	int				avs_min_uV;
+	int				avs_max_uV;
+	bool				avs_enabled;
+	u32				recal_cluster_mask;
+};
+
+static inline bool spm_regulator_using_avs(struct spm_vreg *vreg)
+{
+	return vreg->avs_rdev && !vreg->bypass_spm;
+}
+
+static int spm_regulator_uv_to_vlevel(struct spm_vreg *vreg, int uV)
+{
+	int vlevel;
+
+	if (vreg->regulator_type == QPNP_TYPE_FTS426
+		|| vreg->regulator_type == QPNP_TYPE_HFS430)
+		return roundup(uV, vreg->range->step_uV) / 1000;
+
+	vlevel = DIV_ROUND_UP(uV - vreg->range->min_uV, vreg->range->step_uV);
+
+	/* Fix VSET for ULT HF Buck */
+	if (vreg->regulator_type == QPNP_TYPE_ULT_HF
+	    && vreg->range == &ult_hf_range1) {
+		vlevel &= 0x1F;
+		vlevel |= ULT_SMPS_RANGE_SPLIT;
+	}
+
+	return vlevel;
+}
+
+static int spm_regulator_vlevel_to_uv(struct spm_vreg *vreg, int vlevel)
+{
+	if (vreg->regulator_type == QPNP_TYPE_FTS426
+		|| vreg->regulator_type == QPNP_TYPE_HFS430)
+		return vlevel * 1000;
+	/*
+	 * Calculate ULT HF buck VSET based on range:
+	 * In case of range 0: VSET is a 7 bit value.
+	 * In case of range 1: VSET is a 5 bit value.
+	 */
+	if (vreg->regulator_type == QPNP_TYPE_ULT_HF
+	    && vreg->range == &ult_hf_range1)
+		vlevel &= ~ULT_SMPS_RANGE_SPLIT;
+
+	return vlevel * vreg->range->step_uV + vreg->range->min_uV;
+}
+
+static unsigned int spm_regulator_vlevel_to_selector(struct spm_vreg *vreg,
+						 unsigned int vlevel)
+{
+	/* Fix VSET for ULT HF Buck */
+	if (vreg->regulator_type == QPNP_TYPE_ULT_HF
+	    && vreg->range == &ult_hf_range1)
+		vlevel &= ~ULT_SMPS_RANGE_SPLIT;
+
+	if (vreg->regulator_type == QPNP_TYPE_HFS430)
+		vlevel = spm_regulator_vlevel_to_uv(vreg, vlevel)
+				/ vreg->range->step_uV;
+
+	return vlevel - (vreg->range->set_point_min_uV - vreg->range->min_uV)
+				/ vreg->range->step_uV;
+}
+
+static int qpnp_smps_read_voltage(struct spm_vreg *vreg)
+{
+	int rc;
+	u8 val[2] = {0};
+
+	if (vreg->regulator_type == QPNP_TYPE_FTS426
+		|| vreg->regulator_type == QPNP_TYPE_HFS430) {
+		rc = regmap_bulk_read(vreg->regmap,
+				vreg->spmi_base_addr
+				+ QPNP_FTS426_HFS430_REG_VOLTAGE_VALID_LB,
+				val, 2);
+		if (rc) {
+			dev_err(&vreg->pdev->dev, "%s: could not read voltage setpoint registers, rc=%d\n",
+				__func__, rc);
+			return rc;
+		}
+
+		vreg->last_set_vlevel = ((unsigned int)val[1] << 8) | val[0];
+	} else {
+		rc = regmap_bulk_read(vreg->regmap,
+			vreg->spmi_base_addr + QPNP_SMPS_REG_VOLTAGE_SETPOINT,
+				val, 1);
+		if (rc) {
+			dev_err(&vreg->pdev->dev, "%s: could not read voltage setpoint register, rc=%d\n",
+				__func__, rc);
+			return rc;
+		}
+		vreg->last_set_vlevel = val[0];
+	}
+
+	vreg->last_set_uV = spm_regulator_vlevel_to_uv(vreg,
+						vreg->last_set_vlevel);
+	return rc;
+}
+
+static int qpnp_smps_write_voltage(struct spm_vreg *vreg, unsigned int vlevel)
+{
+	int rc = 0;
+	u8 reg[2];
+
+	/* Set voltage control registers via SPMI. */
+	reg[0] = vlevel & 0xFF;
+	reg[1] = (vlevel >> 8) & 0xFF;
+
+	if (vreg->regulator_type == QPNP_TYPE_FTS426
+		|| vreg->regulator_type == QPNP_TYPE_HFS430) {
+		rc = regmap_bulk_write(vreg->regmap,
+			  vreg->spmi_base_addr
+			  + QPNP_FTS426_HFS430_REG_VOLTAGE_LB,
+			  reg, 2);
+	} else {
+		rc = regmap_write(vreg->regmap,
+			  vreg->spmi_base_addr + QPNP_SMPS_REG_VOLTAGE_SETPOINT,
+			  reg[0]);
+	}
+
+	if (rc)
+		pr_err("%s: regmap_write failed, rc=%d\n",
+			vreg->rdesc.name, rc);
+
+	return rc;
+}
+
+static inline enum qpnp_logical_mode qpnp_regval_to_mode(struct spm_vreg *vreg,
+							u8 regval)
+{
+	if (vreg->regulator_type == QPNP_TYPE_FTS426
+		|| vreg->regulator_type == QPNP_TYPE_HFS430)
+		return (regval == QPNP_FTS426_HFS430_MODE_PWM)
+			? QPNP_LOGICAL_MODE_PWM : QPNP_LOGICAL_MODE_AUTO;
+	else
+		return (regval & QPNP_SMPS_MODE_PWM)
+			? QPNP_LOGICAL_MODE_PWM : QPNP_LOGICAL_MODE_AUTO;
+}
+
+static inline u8 qpnp_mode_to_regval(struct spm_vreg *vreg,
+					enum qpnp_logical_mode mode)
+{
+	if (vreg->regulator_type == QPNP_TYPE_FTS426
+		|| vreg->regulator_type == QPNP_TYPE_HFS430)
+		return (mode == QPNP_LOGICAL_MODE_PWM)
+			? QPNP_FTS426_HFS430_MODE_PWM
+			: QPNP_FTS426_HFS430_MODE_AUTO;
+	else
+		return (mode == QPNP_LOGICAL_MODE_PWM)
+			? QPNP_SMPS_MODE_PWM : QPNP_SMPS_MODE_AUTO;
+}
+
+static int qpnp_smps_set_mode(struct spm_vreg *vreg, u8 mode)
+{
+	int rc;
+
+	rc = regmap_write(vreg->regmap,
+			  vreg->spmi_base_addr + QPNP_SMPS_REG_MODE,
+			  qpnp_mode_to_regval(vreg, mode));
+	if (rc)
+		dev_err(&vreg->pdev->dev,
+			"%s: could not write to mode register, rc=%d\n",
+			__func__, rc);
+
+	return rc;
+}
+
+static int spm_regulator_get_voltage(struct regulator_dev *rdev)
+{
+	struct spm_vreg *vreg = rdev_get_drvdata(rdev);
+	int vlevel, rc;
+
+	if (spm_regulator_using_avs(vreg)) {
+		vlevel = msm_spm_get_vdd(vreg->cpu_num);
+
+		if (vlevel < 0) {
+			pr_debug("%s: msm_spm_get_vdd failed, rc=%d; falling back on SPMI read\n",
+				vreg->rdesc.name, vlevel);
+
+			rc = qpnp_smps_read_voltage(vreg);
+			if (rc) {
+				pr_err("%s: voltage read failed, rc=%d\n",
+				       vreg->rdesc.name, rc);
+				return rc;
+			}
+
+			return vreg->last_set_uV;
+		}
+
+		vreg->last_set_vlevel = vlevel;
+		vreg->last_set_uV = spm_regulator_vlevel_to_uv(vreg, vlevel);
+
+		return vreg->last_set_uV;
+	} else {
+		return vreg->uV;
+	}
+};
+
+static int spm_regulator_write_voltage(struct spm_vreg *vreg, int uV)
+{
+	unsigned int vlevel = spm_regulator_uv_to_vlevel(vreg, uV);
+	bool spm_failed = false;
+	int rc = 0;
+	u32 slew_delay;
+
+	if (likely(!vreg->bypass_spm)) {
+		/* Set voltage control register via SPM. */
+		rc = msm_spm_set_vdd(vreg->cpu_num, vlevel);
+		if (rc) {
+			pr_debug("%s: msm_spm_set_vdd failed, rc=%d; falling back on SPMI write\n",
+				vreg->rdesc.name, rc);
+			spm_failed = true;
+		}
+	}
+
+	if (unlikely(vreg->bypass_spm || spm_failed)) {
+		rc = qpnp_smps_write_voltage(vreg, vlevel);
+		if (rc) {
+			pr_err("%s: voltage write failed, rc=%d\n",
+				vreg->rdesc.name, rc);
+			return rc;
+		}
+	}
+
+	if (uV > vreg->last_set_uV) {
+		/* Wait for voltage stepping to complete. */
+		slew_delay = DIV_ROUND_UP(uV - vreg->last_set_uV,
+					vreg->step_rate);
+		if (vreg->regulator_type == QPNP_TYPE_FTS2p5)
+			slew_delay += FTS2P5_SETTLING_DELAY_US;
+		udelay(slew_delay);
+	} else if (vreg->regulator_type == QPNP_TYPE_FTS2p5) {
+		/* add the ramp-down delay */
+		slew_delay = DIV_ROUND_UP(vreg->last_set_uV - uV,
+				vreg->step_rate) + FTS2P5_SETTLING_DELAY_US;
+		udelay(slew_delay);
+	}
+
+	vreg->last_set_uV = uV;
+	vreg->last_set_vlevel = vlevel;
+
+	return rc;
+}
+
+static int spm_regulator_recalibrate(struct spm_vreg *vreg)
+{
+	struct arm_smccc_res res;
+
+	if (!vreg->recal_cluster_mask)
+		return 0;
+
+	arm_smccc_smc(0xC4000020, vreg->recal_cluster_mask,
+		2, 0, 0, 0, 0, 0, &res);
+	if (res.a0)
+		pr_err("%s: recalibration failed, rc=%ld\n", vreg->rdesc.name,
+			res.a0);
+
+	return res.a0;
+}
+
+static int _spm_regulator_set_voltage(struct regulator_dev *rdev)
+{
+	struct spm_vreg *vreg = rdev_get_drvdata(rdev);
+	bool pwm_required;
+	int rc = 0;
+	int uV;
+
+	rc = spm_regulator_get_voltage(rdev);
+	if (rc < 0)
+		return rc;
+
+	if (vreg->vlevel == vreg->last_set_vlevel)
+		return 0;
+
+	pwm_required = (vreg->regulator_type == QPNP_TYPE_FTS2)
+			&& (vreg->init_mode != QPNP_LOGICAL_MODE_PWM)
+			&& vreg->uV > vreg->last_set_uV;
+
+	if (pwm_required) {
+		/* Switch to PWM mode so that voltage ramping is fast. */
+		rc = qpnp_smps_set_mode(vreg, QPNP_LOGICAL_MODE_PWM);
+		if (rc)
+			return rc;
+	}
+
+	do {
+		uV = vreg->uV > vreg->last_set_uV
+		    ? min(vreg->uV, vreg->last_set_uV + (int)vreg->max_step_uV)
+		    : max(vreg->uV, vreg->last_set_uV - (int)vreg->max_step_uV);
+
+		rc = spm_regulator_write_voltage(vreg, uV);
+		if (rc)
+			return rc;
+	} while (vreg->last_set_uV != vreg->uV);
+
+	if (pwm_required) {
+		/* Wait for mode transition to complete. */
+		udelay(QPNP_FTS2_MODE_CHANGE_DELAY - QPNP_SPMI_WRITE_MIN_DELAY);
+		/* Switch to AUTO mode so that power consumption is lowered. */
+		rc = qpnp_smps_set_mode(vreg, QPNP_LOGICAL_MODE_AUTO);
+		if (rc)
+			return rc;
+	}
+
+	rc = spm_regulator_recalibrate(vreg);
+
+	return rc;
+}
+
+static int spm_regulator_set_voltage(struct regulator_dev *rdev, int min_uV,
+					int max_uV, unsigned int *selector)
+{
+	struct spm_vreg *vreg = rdev_get_drvdata(rdev);
+	const struct voltage_range *range = vreg->range;
+	int uV = min_uV;
+	unsigned int vlevel;
+
+	if (uV < range->set_point_min_uV && max_uV >= range->set_point_min_uV)
+		uV = range->set_point_min_uV;
+
+	if (uV < range->set_point_min_uV || uV > range->max_uV) {
+		pr_err("%s: request v=[%d, %d] is outside possible v=[%d, %d]\n",
+			vreg->rdesc.name, min_uV, max_uV,
+			range->set_point_min_uV, range->max_uV);
+		return -EINVAL;
+	}
+
+	vlevel = spm_regulator_uv_to_vlevel(vreg, uV);
+	uV = spm_regulator_vlevel_to_uv(vreg, vlevel);
+
+	if (uV > max_uV) {
+		pr_err("%s: request v=[%d, %d] cannot be met by any set point\n",
+			vreg->rdesc.name, min_uV, max_uV);
+		return -EINVAL;
+	}
+
+	*selector = spm_regulator_vlevel_to_selector(vreg, vlevel);
+	vreg->vlevel = vlevel;
+	vreg->uV = uV;
+
+	if (!vreg->online)
+		return 0;
+
+	return _spm_regulator_set_voltage(rdev);
+}
+
+static int spm_regulator_list_voltage(struct regulator_dev *rdev,
+					unsigned int selector)
+{
+	struct spm_vreg *vreg = rdev_get_drvdata(rdev);
+
+	if (selector >= vreg->rdesc.n_voltages)
+		return 0;
+
+	return selector * vreg->range->step_uV + vreg->range->set_point_min_uV;
+}
+
+static int spm_regulator_enable(struct regulator_dev *rdev)
+{
+	struct spm_vreg *vreg = rdev_get_drvdata(rdev);
+	int rc;
+
+	rc = _spm_regulator_set_voltage(rdev);
+
+	if (!rc)
+		vreg->online = true;
+
+	return rc;
+}
+
+static int spm_regulator_disable(struct regulator_dev *rdev)
+{
+	struct spm_vreg *vreg = rdev_get_drvdata(rdev);
+
+	vreg->online = false;
+
+	return 0;
+}
+
+static int spm_regulator_is_enabled(struct regulator_dev *rdev)
+{
+	struct spm_vreg *vreg = rdev_get_drvdata(rdev);
+
+	return vreg->online;
+}
+
+static unsigned int spm_regulator_get_mode(struct regulator_dev *rdev)
+{
+	struct spm_vreg *vreg = rdev_get_drvdata(rdev);
+
+	return vreg->mode == QPNP_LOGICAL_MODE_PWM
+			? REGULATOR_MODE_NORMAL : REGULATOR_MODE_IDLE;
+}
+
+static int spm_regulator_set_mode(struct regulator_dev *rdev, unsigned int mode)
+{
+	struct spm_vreg *vreg = rdev_get_drvdata(rdev);
+
+	/*
+	 * Map REGULATOR_MODE_NORMAL to PWM mode and REGULATOR_MODE_IDLE to
+	 * init_mode.  This ensures that the regulator always stays in PWM mode
+	 * in the case that qcom,mode has been specified as "pwm" in device
+	 * tree.
+	 */
+	vreg->mode = (mode == REGULATOR_MODE_NORMAL) ? QPNP_LOGICAL_MODE_PWM
+						     : vreg->init_mode;
+
+	return qpnp_smps_set_mode(vreg, vreg->mode);
+}
+
+static struct regulator_ops spm_regulator_ops = {
+	.get_voltage	= spm_regulator_get_voltage,
+	.set_voltage	= spm_regulator_set_voltage,
+	.list_voltage	= spm_regulator_list_voltage,
+	.get_mode	= spm_regulator_get_mode,
+	.set_mode	= spm_regulator_set_mode,
+	.enable		= spm_regulator_enable,
+	.disable	= spm_regulator_disable,
+	.is_enabled	= spm_regulator_is_enabled,
+};
+
+static int spm_regulator_avs_set_voltage(struct regulator_dev *rdev, int min_uV,
+					int max_uV, unsigned int *selector)
+{
+	struct spm_vreg *vreg = rdev_get_drvdata(rdev);
+	const struct voltage_range *range = vreg->range;
+	unsigned int vlevel_min, vlevel_max;
+	int uV, avs_min_uV, avs_max_uV, rc;
+
+	uV = min_uV;
+
+	if (uV < range->set_point_min_uV && max_uV >= range->set_point_min_uV)
+		uV = range->set_point_min_uV;
+
+	if (uV < range->set_point_min_uV || uV > range->max_uV) {
+		pr_err("%s: request v=[%d, %d] is outside possible v=[%d, %d]\n",
+			vreg->avs_rdesc.name, min_uV, max_uV,
+			range->set_point_min_uV, range->max_uV);
+		return -EINVAL;
+	}
+
+	vlevel_min = spm_regulator_uv_to_vlevel(vreg, uV);
+	avs_min_uV = spm_regulator_vlevel_to_uv(vreg, vlevel_min);
+
+	if (avs_min_uV > max_uV) {
+		pr_err("%s: request v=[%d, %d] cannot be met by any set point\n",
+			vreg->avs_rdesc.name, min_uV, max_uV);
+		return -EINVAL;
+	}
+
+	uV = max_uV;
+
+	if (uV > range->max_uV && min_uV <= range->max_uV)
+		uV = range->max_uV;
+
+	if (uV < range->set_point_min_uV || uV > range->max_uV) {
+		pr_err("%s: request v=[%d, %d] is outside possible v=[%d, %d]\n",
+			vreg->avs_rdesc.name, min_uV, max_uV,
+			range->set_point_min_uV, range->max_uV);
+		return -EINVAL;
+	}
+
+	vlevel_max = spm_regulator_uv_to_vlevel(vreg, uV);
+	avs_max_uV = spm_regulator_vlevel_to_uv(vreg, vlevel_max);
+
+	if (avs_max_uV < min_uV) {
+		pr_err("%s: request v=[%d, %d] cannot be met by any set point\n",
+			vreg->avs_rdesc.name, min_uV, max_uV);
+		return -EINVAL;
+	}
+
+	if (likely(!vreg->bypass_spm)) {
+		rc = msm_spm_avs_set_limit(vreg->cpu_num, vlevel_min,
+						vlevel_max);
+		if (rc) {
+			pr_err("%s: AVS limit setting failed, rc=%d\n",
+				vreg->avs_rdesc.name, rc);
+			return rc;
+		}
+	}
+
+	*selector = spm_regulator_vlevel_to_selector(vreg, vlevel_min);
+	vreg->avs_min_uV = avs_min_uV;
+	vreg->avs_max_uV = avs_max_uV;
+
+	return 0;
+}
+
+static int spm_regulator_avs_get_voltage(struct regulator_dev *rdev)
+{
+	struct spm_vreg *vreg = rdev_get_drvdata(rdev);
+
+	return vreg->avs_min_uV;
+}
+
+static int spm_regulator_avs_enable(struct regulator_dev *rdev)
+{
+	struct spm_vreg *vreg = rdev_get_drvdata(rdev);
+	int rc;
+
+	if (likely(!vreg->bypass_spm)) {
+		rc = msm_spm_avs_enable(vreg->cpu_num);
+		if (rc) {
+			pr_err("%s: AVS enable failed, rc=%d\n",
+				vreg->avs_rdesc.name, rc);
+			return rc;
+		}
+	}
+
+	vreg->avs_enabled = true;
+
+	return 0;
+}
+
+static int spm_regulator_avs_disable(struct regulator_dev *rdev)
+{
+	struct spm_vreg *vreg = rdev_get_drvdata(rdev);
+	int rc;
+
+	if (likely(!vreg->bypass_spm)) {
+		rc = msm_spm_avs_disable(vreg->cpu_num);
+		if (rc) {
+			pr_err("%s: AVS disable failed, rc=%d\n",
+				vreg->avs_rdesc.name, rc);
+			return rc;
+		}
+	}
+
+	vreg->avs_enabled = false;
+
+	return 0;
+}
+
+static int spm_regulator_avs_is_enabled(struct regulator_dev *rdev)
+{
+	struct spm_vreg *vreg = rdev_get_drvdata(rdev);
+
+	return vreg->avs_enabled;
+}
+
+static struct regulator_ops spm_regulator_avs_ops = {
+	.get_voltage	= spm_regulator_avs_get_voltage,
+	.set_voltage	= spm_regulator_avs_set_voltage,
+	.list_voltage	= spm_regulator_list_voltage,
+	.enable		= spm_regulator_avs_enable,
+	.disable	= spm_regulator_avs_disable,
+	.is_enabled	= spm_regulator_avs_is_enabled,
+};
+
+static int qpnp_smps_check_type(struct spm_vreg *vreg)
+{
+	int rc;
+	u8 type[2];
+
+	rc = regmap_bulk_read(vreg->regmap,
+			      vreg->spmi_base_addr + QPNP_SMPS_REG_TYPE,
+			      type,
+			      2);
+	if (rc) {
+		dev_err(&vreg->pdev->dev,
+			"%s: could not read type register, rc=%d\n",
+			__func__, rc);
+		return rc;
+	}
+
+	if (type[0] == QPNP_FTS2_TYPE && type[1] == QPNP_FTS2_SUBTYPE) {
+		vreg->regulator_type = QPNP_TYPE_FTS2;
+	} else if (type[0] == QPNP_FTS2p5_TYPE
+					&& type[1] == QPNP_FTS2p5_SUBTYPE) {
+		vreg->regulator_type = QPNP_TYPE_FTS2p5;
+	} else if (type[0] == QPNP_FTS426_TYPE
+					&& type[1] == QPNP_FTS426_SUBTYPE) {
+		vreg->regulator_type = QPNP_TYPE_FTS426;
+	} else if (type[0] == QPNP_HF_TYPE
+					&& type[1] == QPNP_HFS430_SUBTYPE) {
+		vreg->regulator_type = QPNP_TYPE_HFS430;
+	} else if (type[0] == QPNP_ULT_HF_TYPE
+					&& type[1] == QPNP_ULT_HF_SUBTYPE) {
+		vreg->regulator_type = QPNP_TYPE_ULT_HF;
+	} else if (type[0] == QPNP_HF_TYPE
+					&& type[1] == QPNP_HF_SUBTYPE) {
+		vreg->regulator_type = QPNP_TYPE_HF;
+	} else {
+		dev_err(&vreg->pdev->dev,
+			"%s: invalid type=0x%02X, subtype=0x%02X register pair\n",
+			 __func__, type[0], type[1]);
+		return -ENODEV;
+	}
+
+	return rc;
+}
+
+static int qpnp_smps_init_range(struct spm_vreg *vreg,
+	const struct voltage_range *range0, const struct voltage_range *range1)
+{
+	int rc;
+	u8 reg = 0;
+	uint val;
+
+	rc = regmap_read(vreg->regmap,
+			 vreg->spmi_base_addr + QPNP_SMPS_REG_VOLTAGE_RANGE,
+			 &val);
+	if (rc) {
+		dev_err(&vreg->pdev->dev,
+			"%s: could not read voltage range register, rc=%d\n",
+			__func__, rc);
+		return rc;
+	}
+	reg = (u8)val;
+
+	if (reg == 0x00) {
+		vreg->range = range0;
+	} else if (reg == 0x01) {
+		vreg->range = range1;
+	} else {
+		dev_err(&vreg->pdev->dev, "%s: voltage range=%d is invalid\n",
+			__func__, reg);
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+
+static int qpnp_ult_hf_init_range(struct spm_vreg *vreg)
+{
+	int rc;
+	u8 reg = 0;
+	uint val;
+
+	rc = regmap_read(vreg->regmap,
+			 vreg->spmi_base_addr + QPNP_SMPS_REG_VOLTAGE_SETPOINT,
+			 &val);
+	if (rc) {
+		dev_err(&vreg->pdev->dev,
+			"%s: could not read voltage range register, rc=%d\n",
+			__func__, rc);
+		return rc;
+	}
+	reg = (u8)val;
+
+	vreg->range = (reg < ULT_SMPS_RANGE_SPLIT) ? &ult_hf_range0 :
+							&ult_hf_range1;
+	return rc;
+}
+
+static int qpnp_smps_init_voltage(struct spm_vreg *vreg)
+{
+	int rc;
+
+	rc = qpnp_smps_read_voltage(vreg);
+	if (rc) {
+		pr_err("%s: voltage read failed, rc=%d\n", vreg->rdesc.name,
+			rc);
+		return rc;
+	}
+
+	vreg->vlevel = vreg->last_set_vlevel;
+	vreg->uV = vreg->last_set_uV;
+
+	/* Initialize SAW voltage control register */
+	if (!vreg->bypass_spm) {
+		rc = msm_spm_set_vdd(vreg->cpu_num, vreg->vlevel);
+		if (rc)
+			pr_err("%s: msm_spm_set_vdd failed, rc=%d\n",
+			       vreg->rdesc.name, rc);
+	}
+
+	return 0;
+}
+
+static int qpnp_smps_init_mode(struct spm_vreg *vreg)
+{
+	const char *mode_name;
+	int rc;
+	uint val;
+
+	rc = of_property_read_string(vreg->pdev->dev.of_node, "qcom,mode",
+					&mode_name);
+	if (!rc) {
+		if (strcmp("pwm", mode_name) == 0) {
+			vreg->init_mode = QPNP_LOGICAL_MODE_PWM;
+		} else if ((strcmp("auto", mode_name) == 0) &&
+				(vreg->regulator_type != QPNP_TYPE_ULT_HF)) {
+			vreg->init_mode = QPNP_LOGICAL_MODE_AUTO;
+		} else {
+			dev_err(&vreg->pdev->dev,
+				"%s: unknown regulator mode: %s\n",
+				__func__, mode_name);
+			return -EINVAL;
+		}
+
+		rc = qpnp_smps_set_mode(vreg, vreg->init_mode);
+		if (rc)
+			return rc;
+	} else {
+		rc = regmap_read(vreg->regmap,
+				 vreg->spmi_base_addr + QPNP_SMPS_REG_MODE,
+				 &val);
+		if (rc)
+			dev_err(&vreg->pdev->dev,
+				"%s: could not read mode register, rc=%d\n",
+				__func__, rc);
+			 vreg->init_mode = qpnp_regval_to_mode(vreg, val);
+	}
+
+	vreg->mode = vreg->init_mode;
+
+	return rc;
+}
+
+static int qpnp_smps_init_step_rate(struct spm_vreg *vreg)
+{
+	int rc;
+	u8 reg = 0;
+	int step = 0, delay;
+	uint val;
+
+	rc = regmap_read(vreg->regmap,
+			 vreg->spmi_base_addr + QPNP_SMPS_REG_STEP_CTRL, &val);
+	if (rc) {
+		dev_err(&vreg->pdev->dev,
+			"%s: could not read stepping control register, rc=%d\n",
+			__func__, rc);
+		return rc;
+	}
+	reg = (u8)val;
+
+	/* ULT and FTS426 bucks do not support steps */
+	if (vreg->regulator_type != QPNP_TYPE_ULT_HF && vreg->regulator_type !=
+		QPNP_TYPE_FTS426  && vreg->regulator_type != QPNP_TYPE_HFS430)
+		step = (reg & QPNP_SMPS_STEP_CTRL_STEP_MASK)
+			>> QPNP_SMPS_STEP_CTRL_STEP_SHIFT;
+
+	if (vreg->regulator_type == QPNP_TYPE_FTS426
+		|| vreg->regulator_type == QPNP_TYPE_HFS430) {
+		delay = (reg & QPNP_FTS426_HFS430_STEP_CTRL_DELAY_MASK)
+			>> QPNP_FTS426_HFS430_STEP_CTRL_DELAY_SHIFT;
+
+		/* step_rate has units of uV/us. */
+		vreg->step_rate = ((vreg->regulator_type == QPNP_TYPE_FTS426)
+					? QPNP_FTS426_CLOCK_RATE
+					: QPNP_HFS430_CLOCK_RATE)
+					* vreg->range->step_uV;
+	} else {
+		delay = (reg & QPNP_SMPS_STEP_CTRL_DELAY_MASK)
+			>> QPNP_SMPS_STEP_CTRL_DELAY_SHIFT;
+
+		/* step_rate has units of uV/us. */
+		vreg->step_rate = QPNP_SMPS_CLOCK_RATE * vreg->range->step_uV
+					* (1 << step);
+	}
+
+	if ((vreg->regulator_type == QPNP_TYPE_ULT_HF)
+			|| (vreg->regulator_type == QPNP_TYPE_HF))
+		vreg->step_rate /= 1000 * (QPNP_HF_STEP_DELAY << delay);
+	else if (vreg->regulator_type == QPNP_TYPE_FTS426
+			|| vreg->regulator_type == QPNP_TYPE_HFS430)
+		vreg->step_rate /= 1000 * (QPNP_FTS426_HFS430_STEP_DELAY
+						<< delay);
+	else
+		vreg->step_rate /= 1000 * (QPNP_FTS2_STEP_DELAY << delay);
+
+	if (vreg->regulator_type == QPNP_TYPE_FTS426
+			|| vreg->regulator_type == QPNP_TYPE_HFS430)
+		vreg->step_rate = vreg->step_rate
+					* QPNP_FTS426_HFS430_STEP_MARGIN_NUM
+					/ QPNP_FTS426_HFS430_STEP_MARGIN_DEN;
+	else
+		vreg->step_rate = vreg->step_rate * QPNP_FTS2_STEP_MARGIN_NUM
+					/ QPNP_FTS2_STEP_MARGIN_DEN;
+
+	/* Ensure that the stepping rate is greater than 0. */
+	vreg->step_rate = max(vreg->step_rate, 1);
+
+	return rc;
+}
+
+static int qpnp_smps_check_constraints(struct spm_vreg *vreg,
+					struct regulator_init_data *init_data)
+{
+	int rc = 0, limit_min_uV, limit_max_uV;
+	u16 ul_reg, ll_reg;
+	u8 reg[2];
+
+	limit_min_uV = 0;
+	limit_max_uV = INT_MAX;
+
+	ul_reg = QPNP_FTS_REG_VOLTAGE_ULS_VALID;
+	ll_reg = QPNP_FTS_REG_VOLTAGE_LLS_VALID;
+
+	switch (vreg->regulator_type) {
+	case QPNP_TYPE_HF:
+		ul_reg = QPNP_HF_REG_VOLTAGE_ULS;
+		ll_reg = QPNP_HF_REG_VOLTAGE_LLS;
+	case QPNP_TYPE_FTS2:
+	case QPNP_TYPE_FTS2p5:
+		rc = regmap_bulk_read(vreg->regmap, vreg->spmi_base_addr
+					+ QPNP_SMPS_REG_UL_LL_CTRL, reg, 1);
+		if (rc) {
+			dev_err(&vreg->pdev->dev, "%s: UL_LL register read failed, rc=%d\n",
+				__func__, rc);
+			return rc;
+		}
+
+		if (reg[0] & QPNP_COMMON_UL_EN_MASK) {
+			rc = regmap_bulk_read(vreg->regmap, vreg->spmi_base_addr
+						+ ul_reg, &reg[1], 1);
+			if (rc) {
+				dev_err(&vreg->pdev->dev, "%s: ULS register read failed, rc=%d\n",
+					__func__, rc);
+				return rc;
+			}
+
+			limit_max_uV = spm_regulator_vlevel_to_uv(vreg, reg[1]);
+		}
+
+		if (reg[0] & QPNP_COMMON_LL_EN_MASK) {
+			rc = regmap_bulk_read(vreg->regmap, vreg->spmi_base_addr
+						+ ll_reg, &reg[1], 1);
+			if (rc) {
+				dev_err(&vreg->pdev->dev, "%s: LLS register read failed, rc=%d\n",
+					__func__, rc);
+				return rc;
+			}
+
+			limit_min_uV = spm_regulator_vlevel_to_uv(vreg, reg[1]);
+		}
+
+		break;
+	case QPNP_TYPE_FTS426:
+	case QPNP_TYPE_HFS430:
+		rc = regmap_bulk_read(vreg->regmap, vreg->spmi_base_addr
+					+ QPNP_FTS426_HFS430_REG_VOLTAGE_ULS_LB,
+					reg, 2);
+		if (rc) {
+			dev_err(&vreg->pdev->dev, "%s: could not read voltage limit registers, rc=%d\n",
+				__func__, rc);
+			return rc;
+		}
+
+		limit_max_uV = spm_regulator_vlevel_to_uv(vreg,
+					((unsigned int)reg[1] << 8) | reg[0]);
+		break;
+	case QPNP_TYPE_ULT_HF:
+		/* no HW voltage limit configuration */
+		break;
+	}
+
+	if (init_data->constraints.min_uV < limit_min_uV
+	    || init_data->constraints.max_uV >  limit_max_uV) {
+		dev_err(&vreg->pdev->dev, "regulator min/max(%d/%d) constraints do not fit within HW configured min/max(%d/%d) constraints\n",
+			init_data->constraints.min_uV,
+			init_data->constraints.max_uV, limit_min_uV,
+			limit_max_uV);
+		return -EINVAL;
+	}
+
+	return rc;
+}
+
+static bool spm_regulator_using_range0(struct spm_vreg *vreg)
+{
+	return vreg->range == &fts2_range0 || vreg->range == &fts2p5_range0
+		|| vreg->range == &ult_hf_range0 || vreg->range == &hf_range0
+		|| vreg->range == &fts426_range;
+}
+
+/* Register a regulator to enable/disable AVS and set AVS min/max limits. */
+static int spm_regulator_avs_register(struct spm_vreg *vreg,
+				struct device *dev, struct device_node *node)
+{
+	struct regulator_config reg_config = {};
+	struct device_node *avs_node = NULL;
+	struct device_node *child_node;
+	struct regulator_init_data *init_data;
+	int rc;
+
+	/*
+	 * Find the first available child node (if any).  It corresponds to an
+	 * AVS limits regulator.
+	 */
+	for_each_available_child_of_node(node, child_node) {
+		avs_node = child_node;
+		break;
+	}
+
+	if (!avs_node)
+		return 0;
+
+	init_data = of_get_regulator_init_data(dev, avs_node, &vreg->avs_rdesc);
+	if (!init_data) {
+		dev_err(dev, "%s: unable to allocate memory\n", __func__);
+		return -ENOMEM;
+	}
+	init_data->constraints.input_uV = init_data->constraints.max_uV;
+	init_data->constraints.valid_ops_mask |= REGULATOR_CHANGE_STATUS
+						| REGULATOR_CHANGE_VOLTAGE;
+
+	if (!init_data->constraints.name) {
+		dev_err(dev, "%s: AVS node is missing regulator name\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	vreg->avs_rdesc.name	= init_data->constraints.name;
+	vreg->avs_rdesc.type	= REGULATOR_VOLTAGE;
+	vreg->avs_rdesc.owner	= THIS_MODULE;
+	vreg->avs_rdesc.ops	= &spm_regulator_avs_ops;
+	vreg->avs_rdesc.n_voltages
+		= (vreg->range->max_uV - vreg->range->set_point_min_uV)
+			/ vreg->range->step_uV + 1;
+
+	reg_config.dev = dev;
+	reg_config.init_data = init_data;
+	reg_config.driver_data = vreg;
+	reg_config.of_node = avs_node;
+
+	vreg->avs_rdev = regulator_register(&vreg->avs_rdesc, &reg_config);
+	if (IS_ERR(vreg->avs_rdev)) {
+		rc = PTR_ERR(vreg->avs_rdev);
+		dev_err(dev, "%s: AVS regulator_register failed, rc=%d\n",
+			__func__, rc);
+		return rc;
+	}
+
+	if (vreg->bypass_spm)
+		pr_debug("%s: SPM bypassed so AVS regulator calls are no-ops\n",
+			vreg->avs_rdesc.name);
+
+	return 0;
+}
+
+static int spm_regulator_probe(struct platform_device *pdev)
+{
+	struct regulator_config reg_config = {};
+	struct device_node *node = pdev->dev.of_node;
+	struct regulator_init_data *init_data;
+	struct spm_vreg *vreg;
+	unsigned int base;
+	bool bypass_spm;
+	int rc;
+
+	if (!node) {
+		dev_err(&pdev->dev, "%s: device node missing\n", __func__);
+		return -ENODEV;
+	}
+
+	bypass_spm = of_property_read_bool(node, "qcom,bypass-spm");
+	if (!bypass_spm) {
+		rc = msm_spm_probe_done();
+		if (rc) {
+			if (rc != -EPROBE_DEFER)
+				dev_err(&pdev->dev,
+					"%s: spm unavailable, rc=%d\n",
+					__func__, rc);
+			return rc;
+		}
+	}
+
+	vreg = devm_kzalloc(&pdev->dev, sizeof(*vreg), GFP_KERNEL);
+	if (!vreg)
+		return -ENOMEM;
+
+	vreg->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+	if (!vreg->regmap) {
+		dev_err(&pdev->dev, "Couldn't get parent's regmap\n");
+		return -EINVAL;
+	}
+	vreg->pdev = pdev;
+	vreg->bypass_spm = bypass_spm;
+
+	rc = of_property_read_u32(pdev->dev.of_node, "reg", &base);
+	if (rc < 0) {
+		dev_err(&pdev->dev,
+			"Couldn't find reg in node = %s rc = %d\n",
+			pdev->dev.of_node->full_name, rc);
+		return rc;
+	}
+	vreg->spmi_base_addr = base;
+
+	rc = qpnp_smps_check_type(vreg);
+	if (rc)
+		return rc;
+
+	/* Specify CPU 0 as default in order to handle shared regulator case. */
+	vreg->cpu_num = 0;
+	of_property_read_u32(vreg->pdev->dev.of_node, "qcom,cpu-num",
+						&vreg->cpu_num);
+
+	of_property_read_u32(vreg->pdev->dev.of_node, "qcom,recal-mask",
+						&vreg->recal_cluster_mask);
+
+	/*
+	 * The regulator must be initialized to range 0 or range 1 during
+	 * PMIC power on sequence.  Once it is set, it cannot be changed
+	 * dynamically.
+	 */
+	if (vreg->regulator_type == QPNP_TYPE_FTS2)
+		rc = qpnp_smps_init_range(vreg, &fts2_range0, &fts2_range1);
+	else if (vreg->regulator_type == QPNP_TYPE_FTS2p5)
+		rc = qpnp_smps_init_range(vreg, &fts2p5_range0, &fts2p5_range1);
+	else if (vreg->regulator_type == QPNP_TYPE_FTS426)
+		vreg->range = &fts426_range;
+	else if (vreg->regulator_type == QPNP_TYPE_HFS430)
+		vreg->range = &hfs430_range;
+	else if (vreg->regulator_type == QPNP_TYPE_HF)
+		rc = qpnp_smps_init_range(vreg, &hf_range0, &hf_range1);
+	else if (vreg->regulator_type == QPNP_TYPE_ULT_HF)
+		rc = qpnp_ult_hf_init_range(vreg);
+	if (rc)
+		return rc;
+
+	rc = qpnp_smps_init_voltage(vreg);
+	if (rc)
+		return rc;
+
+	rc = qpnp_smps_init_mode(vreg);
+	if (rc)
+		return rc;
+
+	rc = qpnp_smps_init_step_rate(vreg);
+	if (rc)
+		return rc;
+
+	init_data = of_get_regulator_init_data(&pdev->dev, node, &vreg->rdesc);
+	if (!init_data) {
+		dev_err(&pdev->dev, "%s: unable to allocate memory\n",
+				__func__);
+		return -ENOMEM;
+	}
+	init_data->constraints.input_uV = init_data->constraints.max_uV;
+	init_data->constraints.valid_ops_mask |= REGULATOR_CHANGE_STATUS
+			| REGULATOR_CHANGE_VOLTAGE | REGULATOR_CHANGE_MODE;
+	init_data->constraints.valid_modes_mask
+				= REGULATOR_MODE_NORMAL | REGULATOR_MODE_IDLE;
+
+	if (!init_data->constraints.name) {
+		dev_err(&pdev->dev, "%s: node is missing regulator name\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	rc = qpnp_smps_check_constraints(vreg, init_data);
+	if (rc) {
+		dev_err(&pdev->dev, "%s: regulator constraints check failed, rc=%d\n",
+			__func__, rc);
+		return rc;
+	}
+
+	vreg->rdesc.name	= init_data->constraints.name;
+	vreg->rdesc.type	= REGULATOR_VOLTAGE;
+	vreg->rdesc.owner	= THIS_MODULE;
+	vreg->rdesc.ops		= &spm_regulator_ops;
+	vreg->rdesc.n_voltages
+		= (vreg->range->max_uV - vreg->range->set_point_min_uV)
+			/ vreg->range->step_uV + 1;
+
+	vreg->max_step_uV = SPM_REGULATOR_MAX_STEP_UV;
+	of_property_read_u32(vreg->pdev->dev.of_node,
+				"qcom,max-voltage-step", &vreg->max_step_uV);
+
+	if (vreg->max_step_uV > SPM_REGULATOR_MAX_STEP_UV)
+		vreg->max_step_uV = SPM_REGULATOR_MAX_STEP_UV;
+
+	vreg->max_step_uV = rounddown(vreg->max_step_uV, vreg->range->step_uV);
+	pr_debug("%s: max single voltage step size=%u uV\n",
+		vreg->rdesc.name, vreg->max_step_uV);
+
+	reg_config.dev = &pdev->dev;
+	reg_config.init_data = init_data;
+	reg_config.driver_data = vreg;
+	reg_config.of_node = node;
+	vreg->rdev = regulator_register(&vreg->rdesc, &reg_config);
+
+	if (IS_ERR(vreg->rdev)) {
+		rc = PTR_ERR(vreg->rdev);
+		dev_err(&pdev->dev, "%s: regulator_register failed, rc=%d\n",
+			__func__, rc);
+		return rc;
+	}
+
+	rc = spm_regulator_avs_register(vreg, &pdev->dev, node);
+	if (rc) {
+		regulator_unregister(vreg->rdev);
+		return rc;
+	}
+
+	dev_set_drvdata(&pdev->dev, vreg);
+
+	pr_info("name=%s, range=%s, voltage=%d uV, mode=%s, step rate=%d uV/us\n",
+		vreg->rdesc.name,
+		spm_regulator_using_range0(vreg) ? "LV" : "MV",
+		vreg->uV,
+		vreg->init_mode == QPNP_LOGICAL_MODE_PWM ? "PWM" :
+		   (vreg->init_mode == QPNP_LOGICAL_MODE_AUTO ? "AUTO" : "PFM"),
+		vreg->step_rate);
+
+	return rc;
+}
+
+static int spm_regulator_remove(struct platform_device *pdev)
+{
+	struct spm_vreg *vreg = dev_get_drvdata(&pdev->dev);
+
+	if (vreg->avs_rdev)
+		regulator_unregister(vreg->avs_rdev);
+	regulator_unregister(vreg->rdev);
+
+	return 0;
+}
+
+static const struct of_device_id spm_regulator_match_table[] = {
+	{ .compatible = SPM_REGULATOR_DRIVER_NAME, },
+	{}
+};
+
+static const struct platform_device_id spm_regulator_id[] = {
+	{ SPM_REGULATOR_DRIVER_NAME, 0 },
+	{}
+};
+MODULE_DEVICE_TABLE(spmi, spm_regulator_id);
+
+static struct platform_driver spm_regulator_driver = {
+	.driver = {
+		.name		= SPM_REGULATOR_DRIVER_NAME,
+		.of_match_table = spm_regulator_match_table,
+	},
+	.probe		= spm_regulator_probe,
+	.remove		= spm_regulator_remove,
+	.id_table	= spm_regulator_id,
+};
+
+/**
+ * spm_regulator_init() - register spmi driver for spm-regulator
+ *
+ * This initialization function should be called in systems in which driver
+ * registration ordering must be controlled precisely.
+ *
+ * Returns 0 on success or errno on failure.
+ */
+int __init spm_regulator_init(void)
+{
+	static bool has_registered;
+
+	if (has_registered)
+		return 0;
+
+	has_registered = true;
+
+	return platform_driver_register(&spm_regulator_driver);
+}
+EXPORT_SYMBOL(spm_regulator_init);
+
+static void __exit spm_regulator_exit(void)
+{
+	platform_driver_unregister(&spm_regulator_driver);
+}
+
+arch_initcall(spm_regulator_init);
+module_exit(spm_regulator_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("SPM regulator driver");
+MODULE_ALIAS("platform:spm-regulator");
diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c
index 4e127b2..56b7e0e 100644
--- a/drivers/rpmsg/qcom_glink_native.c
+++ b/drivers/rpmsg/qcom_glink_native.c
@@ -137,6 +137,7 @@
 	struct qcom_glink_pipe *tx_pipe;
 
 	int irq;
+	char irqname[GLINK_NAME_SIZE];
 
 	struct kthread_worker kworker;
 	struct task_struct *task;
@@ -2023,11 +2024,13 @@
 	if (ret)
 		dev_err(dev, "failed to register early notif %d\n", ret);
 
+	snprintf(glink->irqname, 32, "glink-native-%s", glink->name);
+
 	irq = of_irq_get(dev->of_node, 0);
 	ret = devm_request_irq(dev, irq,
 			       qcom_glink_native_intr,
 			       IRQF_NO_SUSPEND | IRQF_SHARED,
-			       "glink-native", glink);
+			       glink->irqname, glink);
 	if (ret) {
 		dev_err(dev, "failed to request IRQ\n");
 		goto unregister;
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 98c3c24..9efd8af 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -274,6 +274,22 @@
 	  Say M here if you want to include support for the Qualcomm RPM as a
 	  module. This will build a module called "qcom-smd-rpm".
 
+config MSM_SPM
+        bool "Driver support for SPM and AVS wrapper hardware"
+        help
+          Enables the support for SPM and AVS wrapper hardware on MSMs. SPM
+          hardware is used to manage the processor power during sleep. The
+          driver allows configuring SPM to allow different low power modes for
+          both core and L2.
+
+config MSM_L2_SPM
+        bool "SPM support for L2 cache"
+        help
+          Enable SPM driver support for L2 cache. Some MSM chipsets allow
+          control of L2 cache low power mode with a Subsystem Power manager.
+          Enabling this driver allows configuring L2 SPM for low power modes
+          on supported chipsets.
+
 config QCOM_SCM
 	bool "Secure Channel Manager (SCM) support"
 	default n
@@ -327,6 +343,15 @@
 	  Client driver for the WCNSS_CTRL SMD channel, used to download nv
 	  firmware to a newly booted WCNSS chip.
 
+config MSM_PIL_MSS_QDSP6V5
+	tristate "MSS QDSP6v5 (Hexagon) Boot Support"
+	depends on MSM_PIL && MSM_SUBSYSTEM_RESTART
+	help
+	 Support for booting and shutting down QDSP6v5 (Hexagon) processors
+	 in modem subsystems. If you would like to make or receive phone
+	 calls then say Y here.
+	 If unsure, say N.
+
 config SETUP_SSR_NOTIF_TIMEOUTS
 	bool "Set timeouts on SSR sysmon notifications and notifier callbacks"
 	help
@@ -873,6 +898,15 @@
 	  An offline CPU is considered as a reserved CPU since this OS can't use
 	  it.
 
+config QTI_HW_KEY_MANAGER
+	tristate "Enable QTI Hardware Key Manager for storage encryption"
+	default n
+	help
+	 Say 'Y' to enable the hardware key manager driver used to operate
+	 and access key manager hardware block. This is used to interface with
+	 HWKM hardware to perform key operations from the kernel which will
+	 be used for storage encryption.
+
 source "drivers/soc/qcom/icnss2/Kconfig"
 
 config ICNSS
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index 4856a43..62a34a5 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -16,6 +16,7 @@
 qcom_rpmh-y			+= rpmh.o
 obj-$(CONFIG_QCOM_SMD_RPM)	+= smd-rpm.o
 obj-$(CONFIG_QCOM_SMEM) +=	smem.o
+obj-$(CONFIG_MSM_SPM) += msm-spm.o spm_devices.o
 obj-$(CONFIG_QCOM_SMEM_STATE) += smem_state.o
 obj-$(CONFIG_QCOM_SMP2P)	+= smp2p.o
 obj-$(CONFIG_QCOM_SMSM)	+= smsm.o
@@ -45,6 +46,7 @@
 obj-$(CONFIG_MSM_SERVICE_LOCATOR) += service-locator.o
 obj-$(CONFIG_MSM_SYSMON_GLINK_COMM) += sysmon-glink.o
 obj-$(CONFIG_MSM_SYSMON_QMI_COMM) += sysmon-qmi.o
+obj-$(CONFIG_MSM_PIL_MSS_QDSP6V5) += pil-q6v5.o pil-msa.o pil-q6v5-mss.o
 obj-$(CONFIG_MSM_PIL_SSR_GENERIC) += subsys-pil-tz.o
 obj-$(CONFIG_MEM_SHARE_QMI_SERVICE)		+= memshare/
 obj-$(CONFIG_MSM_PIL)   +=      peripheral-loader.o
@@ -94,11 +96,14 @@
 obj-$(CONFIG_MSM_RPM_SMD)   +=  rpm-smd-debug.o
 endif
 obj-$(CONFIG_QCOM_CDSP_RM) += cdsprm.o
-obj-$(CONFIG_ICNSS) += icnss.o
-obj-$(CONFIG_ICNSS_QMI) += icnss_qmi.o wlan_firmware_service_v01.o
+obj-$(CONFIG_ICNSS) += msm_icnss.o
+msm_icnss-y := icnss.o
+msm_icnss-$(CONFIG_ICNSS_QMI) += icnss_qmi.o wlan_firmware_service_v01.o
 obj-$(CONFIG_RMNET_CTL) += rmnet_ctl/
 obj-$(CONFIG_QCOM_CX_IPEAK) += cx_ipeak.o
 obj-$(CONFIG_QTI_L2_REUSE) += l2_reuse.o
 obj-$(CONFIG_ICNSS2) += icnss2/
 obj-$(CONFIG_QTI_CRYPTO_COMMON) += crypto-qti-common.o
 obj-$(CONFIG_QTI_CRYPTO_TZ) += crypto-qti-tz.o
+obj-$(CONFIG_QTI_HW_KEY_MANAGER) += hwkm_qti.o
+hwkm_qti-y += hwkm.o
diff --git a/drivers/soc/qcom/eud.c b/drivers/soc/qcom/eud.c
index 864bd65..0ee43a8 100644
--- a/drivers/soc/qcom/eud.c
+++ b/drivers/soc/qcom/eud.c
@@ -92,14 +92,6 @@
 static bool eud_ready;
 static struct platform_device *eud_private;
 
-static int check_eud_mode_mgr2(struct eud_chip *chip)
-{
-	u32 val;
-
-	val = scm_io_read(chip->eud_mode_mgr2_phys_base);
-	return val & BIT(0);
-}
-
 static void enable_eud(struct platform_device *pdev)
 {
 	struct eud_chip *priv = platform_get_drvdata(pdev);
@@ -113,7 +105,7 @@
 			priv->eud_reg_base + EUD_REG_INT1_EN_MASK);
 
 	/* Enable secure eud if supported */
-	if (priv->secure_eud_en && !check_eud_mode_mgr2(priv)) {
+	if (priv->secure_eud_en) {
 		ret = scm_io_write(priv->eud_mode_mgr2_phys_base +
 				   EUD_REG_EUD_EN2, EUD_ENABLE_CMD);
 		if (ret)
@@ -572,9 +564,6 @@
 		}
 
 		chip->eud_mode_mgr2_phys_base = res->start;
-
-		if (check_eud_mode_mgr2(chip))
-			enable = 1;
 	}
 
 	chip->need_phy_clk_vote = of_property_read_bool(pdev->dev.of_node,
diff --git a/drivers/soc/qcom/hwkm.c b/drivers/soc/qcom/hwkm.c
new file mode 100644
index 0000000..af19d18
--- /dev/null
+++ b/drivers/soc/qcom/hwkm.c
@@ -0,0 +1,1214 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * QTI hardware key manager driver.
+ *
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/device.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/crypto.h>
+#include <linux/bitops.h>
+#include <crypto/hash.h>
+#include <crypto/sha.h>
+#include <linux/iommu.h>
+
+#include <linux/hwkm.h>
+#include "hwkmregs.h"
+#include "hwkm_serialize.h"
+
+#define BYTES_TO_WORDS(bytes) (((bytes) + 3) / 4)
+
+#define WRITE_TO_KDF_PACKET(cmd_ptr, src, len)	\
+	do {					\
+		memcpy(cmd_ptr, src, len);	\
+		cmd_ptr += len;			\
+	} while (0)
+
+#define ASYNC_CMD_HANDLING false
+
+// Maximum number of times to poll
+#define MAX_RETRIES 20000
+
+int retries;
+#define WAIT_UNTIL(cond)			\
+for (retries = 0; !(cond) && (retries < MAX_RETRIES); retries++)
+
+#define ICEMEM_SLAVE_TPKEY_VAL	0x192
+#define ICEMEM_SLAVE_TPKEY_SLOT	0x92
+#define KM_MASTER_TPKEY_SLOT	10
+
+struct hwkm_clk_info {
+	struct list_head list;
+	struct clk *clk;
+	const char *name;
+	u32 max_freq;
+	u32 min_freq;
+	u32 curr_freq;
+	bool enabled;
+};
+
+struct hwkm_device {
+	struct device *dev;
+	void __iomem *km_base;
+	void __iomem *ice_base;
+	struct resource *km_res;
+	struct resource *ice_res;
+	struct list_head clk_list_head;
+	bool is_hwkm_clk_available;
+	bool is_hwkm_enabled;
+};
+
+static struct hwkm_device *km_device;
+
+#define qti_hwkm_readl(hwkm, reg, dest)				\
+	(((dest) == KM_MASTER) ?				\
+	(readl_relaxed((hwkm)->km_base + (reg))) :		\
+	(readl_relaxed((hwkm)->ice_base + (reg))))
+#define qti_hwkm_writel(hwkm, val, reg, dest)			\
+	(((dest) == KM_MASTER) ?				\
+	(writel_relaxed((val), (hwkm)->km_base + (reg))) :	\
+	(writel_relaxed((val), (hwkm)->ice_base + (reg))))
+#define qti_hwkm_setb(hwkm, reg, nr, dest) {			\
+	u32 val = qti_hwkm_readl(hwkm, reg, dest);		\
+	val |= (0x1 << nr);					\
+	qti_hwkm_writel(hwkm, val, reg, dest);			\
+}
+#define qti_hwkm_clearb(hwkm, reg, nr, dest) {			\
+	u32 val = qti_hwkm_readl(hwkm, reg, dest);		\
+	val &= ~(0x1 << nr);					\
+	qti_hwkm_writel(hwkm, val, reg, dest);			\
+}
+
+static inline bool qti_hwkm_testb(struct hwkm_device *hwkm, u32 reg, u8 nr,
+				  enum hwkm_destination dest)
+{
+	u32 val = qti_hwkm_readl(hwkm, reg, dest);
+
+	val = (val >> nr) & 0x1;
+	if (val == 0)
+		return false;
+	return true;
+}
+
+static inline unsigned int qti_hwkm_get_reg_data(struct hwkm_device *dev,
+						 u32 reg, u32 offset, u32 mask,
+						 enum hwkm_destination dest)
+{
+	u32 val = 0;
+
+	val = qti_hwkm_readl(dev, reg, dest);
+	return ((val & mask) >> offset);
+}
+
+/**
+ * @brief Send a command packet to the HWKM Master instance as described
+ *        in section 3.2.5.1 of Key Manager HPG
+ *        - Clear CMD FIFO
+ *        - Clear Error Status Register
+ *        - Write CMD_ENABLE = 1
+ *        - for word in cmd_packet:
+ *          - poll until CMD_FIFO_AVAILABLE_SPACE > 0.
+ *            Timeout error after 1,000 retries.
+ *          - write word to CMD register
+ *        - for word in rsp_packet:
+ *          - poll until RSP_FIFO_AVAILABLE_DATA > 0.
+ *            Timeout error after 1,000 retries.
+ *          - read word from RSP register
+ *        - Verify CMD_DONE == 1
+ *        - Clear CMD_DONE
+ *
+ * @return HWKM_SUCCESS if successful. HWKW Error Code otherwise.
+ */
+
+static int qti_hwkm_master_transaction(struct hwkm_device *dev,
+				       const uint32_t *cmd_packet,
+				       size_t cmd_words,
+				       uint32_t *rsp_packet,
+				       size_t rsp_words)
+{
+	int i = 0;
+	int err = 0;
+
+	// Clear CMD FIFO
+	qti_hwkm_setb(dev, QTI_HWKM_MASTER_RG_BANK2_BANKN_CTL,
+			CMD_FIFO_CLEAR_BIT, KM_MASTER);
+	/* Write memory barrier */
+	wmb();
+	qti_hwkm_clearb(dev, QTI_HWKM_MASTER_RG_BANK2_BANKN_CTL,
+			CMD_FIFO_CLEAR_BIT, KM_MASTER);
+	/* Write memory barrier */
+	wmb();
+
+	// Clear previous CMD errors
+	qti_hwkm_writel(dev, 0x0, QTI_HWKM_MASTER_RG_BANK2_BANKN_ESR,
+			KM_MASTER);
+	/* Write memory barrier */
+	wmb();
+
+	// Enable command
+	qti_hwkm_setb(dev, QTI_HWKM_MASTER_RG_BANK2_BANKN_CTL, CMD_ENABLE_BIT,
+			KM_MASTER);
+	/* Write memory barrier */
+	wmb();
+
+	if (qti_hwkm_testb(dev, QTI_HWKM_MASTER_RG_BANK2_BANKN_CTL,
+			CMD_FIFO_CLEAR_BIT, KM_MASTER)) {
+
+		pr_err("%s: CMD_FIFO_CLEAR_BIT not set\n", __func__);
+		err = -1;
+		return -err;
+	}
+
+	for (i = 0; i < cmd_words; i++) {
+		WAIT_UNTIL(qti_hwkm_get_reg_data(dev,
+			QTI_HWKM_MASTER_RG_BANK2_BANKN_STATUS,
+			CMD_FIFO_AVAILABLE_SPACE, CMD_FIFO_AVAILABLE_SPACE_MASK,
+			KM_MASTER) > 0);
+		if (qti_hwkm_get_reg_data(dev,
+			QTI_HWKM_MASTER_RG_BANK2_BANKN_STATUS,
+			CMD_FIFO_AVAILABLE_SPACE, CMD_FIFO_AVAILABLE_SPACE_MASK,
+			KM_MASTER) == 0) {
+			pr_err("%s: cmd fifo space not available\n", __func__);
+			err = -1;
+			return err;
+		}
+		qti_hwkm_writel(dev, cmd_packet[i],
+				QTI_HWKM_MASTER_RG_BANK2_CMD_0, KM_MASTER);
+		/* Write memory barrier */
+		wmb();
+	}
+
+	for (i = 0; i < rsp_words; i++) {
+		WAIT_UNTIL(qti_hwkm_get_reg_data(dev,
+			QTI_HWKM_MASTER_RG_BANK2_BANKN_STATUS,
+			RSP_FIFO_AVAILABLE_DATA, RSP_FIFO_AVAILABLE_DATA_MASK,
+			KM_MASTER) > 0);
+		if (qti_hwkm_get_reg_data(dev,
+			QTI_HWKM_MASTER_RG_BANK2_BANKN_STATUS,
+			RSP_FIFO_AVAILABLE_DATA, RSP_FIFO_AVAILABLE_DATA_MASK,
+			KM_MASTER) == 0) {
+			pr_err("%s: rsp fifo data not available\n", __func__);
+			err = -1;
+			return err;
+		}
+		rsp_packet[i] = qti_hwkm_readl(dev,
+				QTI_HWKM_MASTER_RG_BANK2_RSP_0, KM_MASTER);
+	}
+
+	if (!qti_hwkm_testb(dev, QTI_HWKM_MASTER_RG_BANK2_BANKN_IRQ_STATUS,
+			CMD_DONE_BIT, KM_MASTER)) {
+		pr_err("%s: CMD_DONE_BIT not set\n", __func__);
+		err = -1;
+		return err;
+	}
+
+	// Clear CMD_DONE status bit
+	qti_hwkm_setb(dev, QTI_HWKM_MASTER_RG_BANK2_BANKN_IRQ_STATUS,
+			CMD_DONE_BIT, KM_MASTER);
+	/* Write memory barrier */
+	wmb();
+
+	return err;
+}
+
+/**
+ * @brief Send a command packet to the HWKM ICE slave instance as described in
+ *        section 3.2.5.1 of Key Manager HPG
+ *        - Clear CMD FIFO
+ *        - Clear Error Status Register
+ *        - Write CMD_ENABLE = 1
+ *        - for word in cmd_packet:
+ *          - poll until CMD_FIFO_AVAILABLE_SPACE > 0.
+ *            Timeout error after 1,000 retries.
+ *          - write word to CMD register
+ *        - for word in rsp_packet:
+ *          - poll until RSP_FIFO_AVAILABLE_DATA > 0.
+ *            Timeout error after 1,000 retries.
+ *          - read word from RSP register
+ *        - Verify CMD_DONE == 1
+ *        - Clear CMD_DONE
+ *
+ * @return HWKM_SUCCESS if successful. HWKW Error Code otherwise.
+ */
+
+static int qti_hwkm_ice_transaction(struct hwkm_device *dev,
+				    const uint32_t *cmd_packet,
+				    size_t cmd_words,
+				    uint32_t *rsp_packet,
+				    size_t rsp_words)
+{
+	int i = 0;
+	int err = 0;
+
+	// Clear CMD FIFO
+	qti_hwkm_setb(dev, QTI_HWKM_ICE_RG_BANK0_BANKN_CTL,
+			CMD_FIFO_CLEAR_BIT, ICEMEM_SLAVE);
+	/* Write memory barrier */
+	wmb();
+	qti_hwkm_clearb(dev, QTI_HWKM_ICE_RG_BANK0_BANKN_CTL,
+			CMD_FIFO_CLEAR_BIT, ICEMEM_SLAVE);
+	/* Write memory barrier */
+	wmb();
+
+	// Clear previous CMD errors
+	qti_hwkm_writel(dev, 0x0, QTI_HWKM_ICE_RG_BANK0_BANKN_ESR,
+			ICEMEM_SLAVE);
+	/* Write memory barrier */
+	wmb();
+
+	// Enable command
+	qti_hwkm_setb(dev, QTI_HWKM_ICE_RG_BANK0_BANKN_CTL, CMD_ENABLE_BIT,
+			ICEMEM_SLAVE);
+	/* Write memory barrier */
+	wmb();
+
+	if (qti_hwkm_testb(dev, QTI_HWKM_ICE_RG_BANK0_BANKN_CTL,
+			CMD_FIFO_CLEAR_BIT, ICEMEM_SLAVE)) {
+
+		pr_err("%s: CMD_FIFO_CLEAR_BIT not set\n", __func__);
+		err = -1;
+		return err;
+	}
+
+	for (i = 0; i < cmd_words; i++) {
+		WAIT_UNTIL(qti_hwkm_get_reg_data(dev,
+			QTI_HWKM_ICE_RG_BANK0_BANKN_STATUS,
+			CMD_FIFO_AVAILABLE_SPACE, CMD_FIFO_AVAILABLE_SPACE_MASK,
+			ICEMEM_SLAVE) > 0);
+		if (qti_hwkm_get_reg_data(dev,
+			QTI_HWKM_ICE_RG_BANK0_BANKN_STATUS,
+			CMD_FIFO_AVAILABLE_SPACE, CMD_FIFO_AVAILABLE_SPACE_MASK,
+			ICEMEM_SLAVE) == 0) {
+			pr_err("%s: cmd fifo space not available\n", __func__);
+			err = -1;
+			return err;
+		}
+		qti_hwkm_writel(dev, cmd_packet[i],
+				QTI_HWKM_ICE_RG_BANK0_CMD_0, ICEMEM_SLAVE);
+		/* Write memory barrier */
+		wmb();
+	}
+
+	for (i = 0; i < rsp_words; i++) {
+		WAIT_UNTIL(qti_hwkm_get_reg_data(dev,
+			QTI_HWKM_ICE_RG_BANK0_BANKN_STATUS,
+			RSP_FIFO_AVAILABLE_DATA, RSP_FIFO_AVAILABLE_DATA_MASK,
+			ICEMEM_SLAVE) > 0);
+		if (qti_hwkm_get_reg_data(dev,
+			QTI_HWKM_ICE_RG_BANK0_BANKN_STATUS,
+			RSP_FIFO_AVAILABLE_DATA, RSP_FIFO_AVAILABLE_DATA_MASK,
+			ICEMEM_SLAVE) == 0) {
+			pr_err("%s: rsp fifo data not available\n", __func__);
+			err = -1;
+			return err;
+		}
+		rsp_packet[i] = qti_hwkm_readl(dev,
+				QTI_HWKM_ICE_RG_BANK0_RSP_0, ICEMEM_SLAVE);
+	}
+
+	if (!qti_hwkm_testb(dev, QTI_HWKM_ICE_RG_BANK0_BANKN_IRQ_STATUS,
+			CMD_DONE_BIT, ICEMEM_SLAVE)) {
+		pr_err("%s: CMD_DONE_BIT not set\n", __func__);
+		err = -1;
+		return err;
+	}
+
+	// Clear CMD_DONE status bit
+	qti_hwkm_setb(dev, QTI_HWKM_ICE_RG_BANK0_BANKN_IRQ_STATUS,
+			CMD_DONE_BIT, ICEMEM_SLAVE);
+	/* Write memory barrier */
+	wmb();
+
+	return err;
+}
+
+/*
+ * @brief Send a command packet to the selected KM instance and read
+ *        the response
+ *
+ * @param dest            [in]  Destination KM instance
+ * @param cmd_packet      [in]  pointer to start of command packet
+ * @param cmd_words       [in]  words in the command packet
+ * @param rsp_packet      [out] pointer to start of response packet
+ * @param rsp_words       [in]  words in the response buffer
+ *
+ * @return HWKM_SUCCESS if successful. HWKW Error Code otherwise.
+ */
+
+static int qti_hwkm_run_transaction(enum hwkm_destination dest,
+				    const uint32_t *cmd_packet,
+				    size_t cmd_words,
+				    uint32_t *rsp_packet,
+				    size_t rsp_words)
+{
+	int status = 0;
+
+	if (cmd_packet == NULL || rsp_packet == NULL) {
+		status = -1;
+		return status;
+	}
+
+	switch (dest) {
+	case KM_MASTER:
+		status = qti_hwkm_master_transaction(km_device,
+					cmd_packet, cmd_words,
+					rsp_packet, rsp_words);
+		break;
+	case ICEMEM_SLAVE:
+		status = qti_hwkm_ice_transaction(km_device,
+					cmd_packet, cmd_words,
+					rsp_packet, rsp_words);
+		break;
+	default:
+		status = -2;
+		break;
+	}
+
+	return status;
+}
+
+static void serialize_policy(struct hwkm_serialized_policy *out,
+			     const struct hwkm_key_policy *policy)
+{
+	memset(out, 0, sizeof(struct hwkm_serialized_policy));
+	out->wrap_with_tpkey = policy->wrap_with_tpk_allowed;
+	out->hw_destination = policy->hw_destination;
+	out->security_level = policy->security_lvl;
+	out->swap_export_allowed = policy->swap_export_allowed;
+	out->wrap_export_allowed = policy->wrap_export_allowed;
+	out->key_type = policy->key_type;
+	out->kdf_depth = policy->kdf_depth;
+	out->encrypt_allowed = policy->enc_allowed;
+	out->decrypt_allowed = policy->dec_allowed;
+	out->alg_allowed = policy->alg_allowed;
+	out->key_management_by_tz_secure_allowed = policy->km_by_tz_allowed;
+	out->key_management_by_nonsecure_allowed = policy->km_by_nsec_allowed;
+	out->key_management_by_modem_allowed = policy->km_by_modem_allowed;
+	out->key_management_by_spu_allowed = policy->km_by_spu_allowed;
+}
+
+static void serialize_kdf_bsve(struct hwkm_kdf_bsve *out,
+			       const struct hwkm_bsve *bsve, u8 mks)
+{
+	memset(out, 0, sizeof(struct hwkm_kdf_bsve));
+	out->mks = mks;
+	out->key_policy_version_en = bsve->km_key_policy_ver_en;
+	out->apps_secure_en = bsve->km_apps_secure_en;
+	out->msa_secure_en = bsve->km_msa_secure_en;
+	out->lcm_fuse_row_en = bsve->km_lcm_fuse_en;
+	out->boot_stage_otp_en = bsve->km_boot_stage_otp_en;
+	out->swc_en = bsve->km_swc_en;
+	out->fuse_region_sha_digest_en = bsve->km_fuse_region_sha_digest_en;
+	out->child_key_policy_en = bsve->km_child_key_policy_en;
+	out->mks_en = bsve->km_mks_en;
+}
+
+static void deserialize_policy(struct hwkm_key_policy *out,
+			       const struct hwkm_serialized_policy *policy)
+{
+	memset(out, 0, sizeof(struct hwkm_key_policy));
+	out->wrap_with_tpk_allowed = policy->wrap_with_tpkey;
+	out->hw_destination = policy->hw_destination;
+	out->security_lvl = policy->security_level;
+	out->swap_export_allowed = policy->swap_export_allowed;
+	out->wrap_export_allowed = policy->wrap_export_allowed;
+	out->key_type = policy->key_type;
+	out->kdf_depth = policy->kdf_depth;
+	out->enc_allowed = policy->encrypt_allowed;
+	out->dec_allowed = policy->decrypt_allowed;
+	out->alg_allowed = policy->alg_allowed;
+	out->km_by_tz_allowed = policy->key_management_by_tz_secure_allowed;
+	out->km_by_nsec_allowed = policy->key_management_by_nonsecure_allowed;
+	out->km_by_modem_allowed = policy->key_management_by_modem_allowed;
+	out->km_by_spu_allowed = policy->key_management_by_spu_allowed;
+}
+
+static void reverse_key(u8 *key, size_t keylen)
+{
+	size_t left = 0;
+	size_t right = 0;
+
+	for (left = 0, right = keylen - 1; left < right; left++, right--) {
+		key[left] ^= key[right];
+		key[right] ^= key[left];
+		key[left] ^= key[right];
+	}
+}
+
+/*
+ * Command packet format (word indices):
+ * CMD[0]    = Operation info (OP, IRQ_EN, DKS, LEN)
+ * CMD[1:17] = Wrapped Key Blob
+ * CMD[18]   = CRC (disabled)
+ *
+ * Response packet format (word indices):
+ * RSP[0]    = Operation info (OP, IRQ_EN, LEN)
+ * RSP[1]    = Error status
+ */
+
+static int qti_handle_key_unwrap_import(const struct hwkm_cmd *cmd_in,
+					struct hwkm_rsp *rsp_in)
+{
+	int status = 0;
+	u32 cmd[UNWRAP_IMPORT_CMD_WORDS] = {0};
+	u32 rsp[UNWRAP_IMPORT_RSP_WORDS] = {0};
+	struct hwkm_operation_info operation = {
+		.op = KEY_UNWRAP_IMPORT,
+		.irq_en = ASYNC_CMD_HANDLING,
+		.slot1_desc = cmd_in->unwrap.dks,
+		.slot2_desc = cmd_in->unwrap.kwk,
+		.len = UNWRAP_IMPORT_CMD_WORDS
+	};
+
+	pr_debug("%s: KEY_UNWRAP_IMPORT start\n", __func__);
+
+	memcpy(cmd, &operation, OPERATION_INFO_LENGTH);
+	memcpy(cmd + COMMAND_WRAPPED_KEY_IDX, cmd_in->unwrap.wkb,
+			cmd_in->unwrap.sz);
+
+	status = qti_hwkm_run_transaction(ICEMEM_SLAVE, cmd,
+			UNWRAP_IMPORT_CMD_WORDS, rsp, UNWRAP_IMPORT_RSP_WORDS);
+	if (status) {
+		pr_err("%s: Error running transaction %d\n", __func__, status);
+		return status;
+	}
+
+	rsp_in->status = rsp[RESPONSE_ERR_IDX];
+	if (rsp_in->status) {
+		pr_err("%s: KEY_UNWRAP_IMPORT error status 0x%x\n", __func__,
+								rsp_in->status);
+		return rsp_in->status;
+	}
+
+	return status;
+}
+
+/*
+ * Command packet format (word indices):
+ * CMD[0] = Operation info (OP, IRQ_EN, DKS, DK, LEN)
+ * CMD[1] = CRC (disabled)
+ *
+ * Response packet format (word indices):
+ * RSP[0] = Operation info (OP, IRQ_EN, LEN)
+ * RSP[1] = Error status
+ */
+
+static int qti_handle_keyslot_clear(const struct hwkm_cmd *cmd_in,
+				    struct hwkm_rsp *rsp_in)
+{
+	int status = 0;
+	u32 cmd[KEYSLOT_CLEAR_CMD_WORDS] = {0};
+	u32 rsp[KEYSLOT_CLEAR_RSP_WORDS] = {0};
+	struct hwkm_operation_info operation = {
+		.op = KEY_SLOT_CLEAR,
+		.irq_en = ASYNC_CMD_HANDLING,
+		.slot1_desc = cmd_in->clear.dks,
+		.op_flag = cmd_in->clear.is_double_key,
+		.len = KEYSLOT_CLEAR_CMD_WORDS
+	};
+
+	pr_debug("%s: KEY_SLOT_CLEAR start\n", __func__);
+
+	memcpy(cmd, &operation, OPERATION_INFO_LENGTH);
+
+	status = qti_hwkm_run_transaction(ICEMEM_SLAVE, cmd,
+				KEYSLOT_CLEAR_CMD_WORDS, rsp,
+				KEYSLOT_CLEAR_RSP_WORDS);
+	if (status) {
+		pr_err("%s: Error running transaction %d\n", __func__, status);
+		return status;
+	}
+
+	rsp_in->status = rsp[RESPONSE_ERR_IDX];
+	if (rsp_in->status) {
+		pr_err("%s: KEYSLOT_CLEAR error status 0x%x\n",
+				__func__, rsp_in->status);
+		return rsp_in->status;
+	}
+
+	return status;
+}
+
+/*
+ * NOTE: The command packet can vary in length. If BE = 0, the last 2 indices
+ * for the BSVE are skipped. Similarly, if Software Context Length (SCL) < 16,
+ * only SCL words are written to the packet. The CRC word is after the last
+ * word of the SWC. The LEN field of this command does not include the SCL
+ * (unlike other commands where the LEN field is the length of the entire
+ * packet). The HW will expect SCL + LEN words to be sent.
+ *
+ * Command packet format (word indices):
+ * CMD[0]    = Operation info (OP, IRQ_EN, DKS, KDK, BE, SCL, LEN)
+ * CMD[1:2]  = Policy
+ * CMD[3]    = BSVE[0] if BE = 1, 0 if BE = 0
+ * CMD[4:5]  = BSVE[1:2] if BE = 1, skipped if BE = 0
+ * CMD[6:21] = Software Context, only writing the number of words in SCL
+ * CMD[22]   = CRC
+ *
+ * Response packet format (word indices):
+ * RSP[0]    = Operation info (OP, IRQ_EN, LEN)
+ * RSP[1]    = Error status
+ */
+
+static int qti_handle_system_kdf(const struct hwkm_cmd *cmd_in,
+				 struct hwkm_rsp *rsp_in)
+{
+	int status = 0;
+	u32 cmd[SYSTEM_KDF_CMD_MAX_WORDS] = {0};
+	u32 rsp[SYSTEM_KDF_RSP_WORDS] = {0};
+	u8 *cmd_ptr = (u8 *) cmd;
+	struct hwkm_serialized_policy policy;
+	struct hwkm_operation_info operation = {
+		.op = SYSTEM_KDF,
+		.irq_en = ASYNC_CMD_HANDLING,
+		.slot1_desc = cmd_in->kdf.dks,
+		.slot2_desc = cmd_in->kdf.kdk,
+		.op_flag = cmd_in->kdf.bsve.enabled,
+		.context_len = BYTES_TO_WORDS(cmd_in->kdf.sz),
+		.len = SYSTEM_KDF_CMD_MIN_WORDS +
+			(cmd_in->kdf.bsve.enabled ? BSVE_WORDS : 1)
+	};
+
+	pr_debug("%s: SYSTEM_KDF start\n", __func__);
+
+	serialize_policy(&policy, &cmd_in->kdf.policy);
+
+	WRITE_TO_KDF_PACKET(cmd_ptr, &operation, OPERATION_INFO_LENGTH);
+	WRITE_TO_KDF_PACKET(cmd_ptr, &policy, KEY_POLICY_LENGTH);
+
+	if (cmd_in->kdf.bsve.enabled) {
+		struct hwkm_kdf_bsve bsve;
+
+		serialize_kdf_bsve(&bsve, &cmd_in->kdf.bsve, cmd_in->kdf.mks);
+		WRITE_TO_KDF_PACKET(cmd_ptr, &bsve, MAX_BSVE_LENGTH);
+	} else {
+		// Skip the remaining 3 bytes of the current word
+		cmd_ptr += 3 * (sizeof(u8));
+	}
+
+	WRITE_TO_KDF_PACKET(cmd_ptr, cmd_in->kdf.ctx, cmd_in->kdf.sz);
+
+	status = qti_hwkm_run_transaction(ICEMEM_SLAVE, cmd,
+				operation.len + operation.context_len,
+				rsp, SYSTEM_KDF_RSP_WORDS);
+	if (status) {
+		pr_err("%s: Error running transaction %d\n", __func__, status);
+		return status;
+	}
+
+	rsp_in->status = rsp[RESPONSE_ERR_IDX];
+	if (rsp_in->status) {
+		pr_err("%s: SYSTEM_KDF error status 0x%x\n", __func__,
+					rsp_in->status);
+		return rsp_in->status;
+	}
+
+	return status;
+}
+
+/*
+ * Command packet format (word indices):
+ * CMD[0] = Operation info (OP, IRQ_EN, SKS, LEN)
+ * CMD[1] = CRC (disabled)
+ *
+ * Response packet format (word indices):
+ * RSP[0] = Operation info (OP, IRQ_EN, LEN)
+ * RSP[1] = Error status
+ */
+
+static int qti_handle_set_tpkey(const struct hwkm_cmd *cmd_in,
+				struct hwkm_rsp *rsp_in)
+{
+	int status = 0;
+	u32 cmd[SET_TPKEY_CMD_WORDS] = {0};
+	u32 rsp[SET_TPKEY_RSP_WORDS] = {0};
+	struct hwkm_operation_info operation = {
+		.op = SET_TPKEY,
+		.irq_en = ASYNC_CMD_HANDLING,
+		.slot1_desc = cmd_in->set_tpkey.sks,
+		.len = SET_TPKEY_CMD_WORDS
+	};
+
+	pr_debug("%s: SET_TPKEY start\n", __func__);
+
+	memcpy(cmd, &operation, OPERATION_INFO_LENGTH);
+
+	status = qti_hwkm_run_transaction(KM_MASTER, cmd,
+			SET_TPKEY_CMD_WORDS, rsp, SET_TPKEY_RSP_WORDS);
+	if (status) {
+		pr_err("%s: Error running transaction %d\n", __func__, status);
+		return status;
+	}
+
+	rsp_in->status = rsp[RESPONSE_ERR_IDX];
+	if (rsp_in->status) {
+		pr_err("%s: SET_TPKEY error status 0x%x\n", __func__,
+					rsp_in->status);
+		return rsp_in->status;
+	}
+
+	return status;
+}
+
+/**
+ * 254 * NOTE: To anyone maintaining or porting this code wondering why the key
+ * is reversed in the command packet: the plaintext key value is expected by
+ * the HW in reverse byte order.
+ *       See section 1.8.2.2 of the HWKM CPAS for more details
+ *       Mapping of key to CE key read order:
+ *       Key[255:224] -> CRYPTO0_CRYPTO_ENCR_KEY0
+ *       Key[223:192] -> CRYPTO0_CRYPTO_ENCR_KEY1
+ *       ...
+ *       Key[63:32]   -> CRYPTO0_CRYPTO_ENCR_KEY6
+ *       Key[31:0]    -> CRYPTO0_CRYPTO_ENCR_KEY7
+ *       In this notation Key[31:0] is the least significant word of the key
+ *       If the key length is less than 256 bits, the key is filled in from
+ *       higher index to lower
+ *       For example, for a 128 bit key, Key[255:128] would have the key,
+ *       Key[127:0] would be all 0
+ *       This means that CMD[3:6] is all 0, CMD[7:10] has the key value.
+ *
+ * Command packet format (word indices):
+ * CMD[0]    = Operation info (OP, IRQ_EN, DKS/SKS, WE, LEN)
+ * CMD[1:2]  = Policy (0 if we == 0)
+ * CMD[3:10] = Write key value (0 if we == 0)
+ * CMD[11]   = CRC (disabled)
+ *
+ * Response packet format (word indices):
+ * RSP[0]    = Operation info (OP, IRQ_EN, LEN)
+ * RSP[1]    = Error status
+ * RSP[2:3]  = Policy (0 if we == 1)
+ * RSP[4:11] = Read key value (0 if we == 1)
+ **/
+
+static int qti_handle_keyslot_rdwr(const struct hwkm_cmd *cmd_in,
+				   struct hwkm_rsp *rsp_in)
+{
+	int status = 0;
+	u32 cmd[KEYSLOT_RDWR_CMD_WORDS] = {0};
+	u32 rsp[KEYSLOT_RDWR_RSP_WORDS] = {0};
+	struct hwkm_serialized_policy policy;
+	struct hwkm_operation_info operation = {
+		.op = KEY_SLOT_RDWR,
+		.irq_en = ASYNC_CMD_HANDLING,
+		.slot1_desc = cmd_in->rdwr.slot,
+		.op_flag = cmd_in->rdwr.is_write,
+		.len = KEYSLOT_RDWR_CMD_WORDS
+	};
+
+	pr_debug("%s: KEY_SLOT_RDWR start\n", __func__);
+	memcpy(cmd, &operation, OPERATION_INFO_LENGTH);
+
+	if (cmd_in->rdwr.is_write) {
+		serialize_policy(&policy, &cmd_in->rdwr.policy);
+		memcpy(cmd + COMMAND_KEY_POLICY_IDX, &policy,
+				KEY_POLICY_LENGTH);
+		memcpy(cmd + COMMAND_KEY_VALUE_IDX, cmd_in->rdwr.key,
+				cmd_in->rdwr.sz);
+		// Need to reverse the key because the HW expects it in reverse
+		// byte order
+		reverse_key((u8 *) (cmd + COMMAND_KEY_VALUE_IDX),
+				HWKM_MAX_KEY_SIZE);
+	}
+
+	status = qti_hwkm_run_transaction(ICEMEM_SLAVE, cmd,
+			KEYSLOT_RDWR_CMD_WORDS, rsp, KEYSLOT_RDWR_RSP_WORDS);
+	if (status) {
+		pr_err("%s: Error running transaction %d\n", __func__, status);
+		return status;
+	}
+
+	rsp_in->status = rsp[RESPONSE_ERR_IDX];
+	if (rsp_in->status) {
+		pr_err("%s: KEY_SLOT_RDWR error status 0x%x\n",
+				__func__, rsp_in->status);
+		return rsp_in->status;
+	}
+
+	if (!cmd_in->rdwr.is_write &&
+			(rsp_in->status == 0)) {
+		memcpy(&policy, rsp + RESPONSE_KEY_POLICY_IDX,
+						KEY_POLICY_LENGTH);
+		memcpy(rsp_in->rdwr.key,
+			rsp + RESPONSE_KEY_VALUE_IDX, RESPONSE_KEY_LENGTH);
+		// Need to reverse the key because the HW returns it in
+		// reverse byte order
+		reverse_key(rsp_in->rdwr.key, HWKM_MAX_KEY_SIZE);
+		deserialize_policy(&rsp_in->rdwr.policy, &policy);
+	}
+
+	// Clear cmd and rsp buffers, since they may contain plaintext keys
+	memset(cmd, 0, sizeof(cmd));
+	memset(rsp, 0, sizeof(rsp));
+
+	return status;
+}
+
+static int qti_hwkm_parse_clock_info(struct platform_device *pdev,
+				     struct hwkm_device *hwkm_dev)
+{
+	int ret = -1, cnt, i, len;
+	struct device *dev = &pdev->dev;
+	struct device_node *np = dev->of_node;
+	char *name;
+	struct hwkm_clk_info *clki;
+	u32 *clkfreq = NULL;
+
+	if (!np)
+		goto out;
+
+	cnt = of_property_count_strings(np, "clock-names");
+	if (cnt <= 0) {
+		dev_info(dev, "%s: Unable to find clocks, assuming enabled\n",
+				__func__);
+		ret = cnt;
+		goto out;
+	}
+
+	if (!of_get_property(np, "qcom,op-freq-hz", &len)) {
+		dev_info(dev, "qcom,op-freq-hz property not specified\n");
+		goto out;
+	}
+
+	len = len/sizeof(*clkfreq);
+	if (len != cnt)
+		goto out;
+
+	clkfreq = devm_kzalloc(dev, len * sizeof(*clkfreq), GFP_KERNEL);
+	if (!clkfreq) {
+		ret = -ENOMEM;
+		goto out;
+	}
+	ret = of_property_read_u32_array(np, "qcom,op-freq-hz", clkfreq, len);
+
+	INIT_LIST_HEAD(&hwkm_dev->clk_list_head);
+
+	for (i = 0; i < cnt; i++) {
+		ret = of_property_read_string_index(np,
+			"clock-names", i, (const char **)&name);
+		if (ret)
+			goto out;
+
+		clki = devm_kzalloc(dev, sizeof(*clki), GFP_KERNEL);
+		if (!clki) {
+			ret = -ENOMEM;
+			goto out;
+		}
+		clki->max_freq = clkfreq[i];
+		clki->name = kstrdup(name, GFP_KERNEL);
+		list_add_tail(&clki->list, &hwkm_dev->clk_list_head);
+	}
+out:
+	return ret;
+}
+
+static int qti_hwkm_init_clocks(struct hwkm_device *hwkm_dev)
+{
+	int ret = -EINVAL;
+	struct hwkm_clk_info *clki = NULL;
+	struct device *dev = hwkm_dev->dev;
+	struct list_head *head = &hwkm_dev->clk_list_head;
+
+	if (!hwkm_dev->is_hwkm_clk_available)
+		return 0;
+
+	if (!head || list_empty(head)) {
+		dev_err(dev, "%s: HWKM clock list null/empty\n", __func__);
+		goto out;
+	}
+
+	list_for_each_entry(clki, head, list) {
+		if (!clki->name)
+			continue;
+
+		clki->clk = devm_clk_get(dev, clki->name);
+		if (IS_ERR(clki->clk)) {
+			ret = PTR_ERR(clki->clk);
+			dev_err(dev, "%s: %s clk get failed, %d\n",
+					__func__, clki->name, ret);
+			goto out;
+		}
+
+		ret = 0;
+		if (clki->max_freq) {
+			ret = clk_set_rate(clki->clk, clki->max_freq);
+			if (ret) {
+				dev_err(dev,
+				"%s: %s clk set rate(%dHz) failed, %d\n",
+				__func__, clki->name, clki->max_freq, ret);
+				goto out;
+			}
+			clki->curr_freq = clki->max_freq;
+			dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__,
+				clki->name, clk_get_rate(clki->clk));
+		}
+	}
+out:
+	return ret;
+}
+
+static int qti_hwkm_enable_disable_clocks(struct hwkm_device *hwkm_dev,
+					  bool enable)
+{
+	int ret = 0;
+	struct hwkm_clk_info *clki = NULL;
+	struct device *dev = hwkm_dev->dev;
+	struct list_head *head = &hwkm_dev->clk_list_head;
+
+	if (!head || list_empty(head)) {
+		dev_err(dev, "%s: HWKM clock list null/empty\n", __func__);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (!hwkm_dev->is_hwkm_clk_available) {
+		dev_err(dev, "%s: HWKM clock not available\n", __func__);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	list_for_each_entry(clki, head, list) {
+		if (!clki->name)
+			continue;
+
+		if (enable)
+			ret = clk_prepare_enable(clki->clk);
+		else
+			clk_disable_unprepare(clki->clk);
+
+		if (ret) {
+			dev_err(dev, "Unable to %s HWKM clock\n",
+				enable?"enable":"disable");
+			goto out;
+		}
+	}
+out:
+	return ret;
+}
+
+int qti_hwkm_clocks(bool on)
+{
+	int ret = 0;
+
+	ret = qti_hwkm_enable_disable_clocks(km_device, on);
+	if (ret) {
+		pr_err("%s:%pK Could not enable/disable clocks\n",
+				__func__, km_device);
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL(qti_hwkm_clocks);
+
+static int qti_hwkm_get_device_tree_data(struct platform_device *pdev,
+					 struct hwkm_device *hwkm_dev)
+{
+	struct device *dev = &pdev->dev;
+	int ret = 0;
+
+	hwkm_dev->km_res = platform_get_resource_byname(pdev,
+				IORESOURCE_MEM, "km_master");
+	hwkm_dev->ice_res = platform_get_resource_byname(pdev,
+				IORESOURCE_MEM, "ice_slave");
+	if (!hwkm_dev->km_res || !hwkm_dev->ice_res) {
+		pr_err("%s: No memory available for IORESOURCE\n", __func__);
+		return -ENOMEM;
+	}
+
+	hwkm_dev->km_base = devm_ioremap_resource(dev, hwkm_dev->km_res);
+	hwkm_dev->ice_base = devm_ioremap_resource(dev, hwkm_dev->ice_res);
+
+	if (IS_ERR(hwkm_dev->km_base) || IS_ERR(hwkm_dev->ice_base)) {
+		ret = PTR_ERR(hwkm_dev->km_base);
+		pr_err("%s: Error = %d mapping HWKM memory\n", __func__, ret);
+		goto out;
+	}
+
+	hwkm_dev->is_hwkm_clk_available = of_property_read_bool(
+				dev->of_node, "qcom,enable-hwkm-clk");
+
+	if (hwkm_dev->is_hwkm_clk_available) {
+		ret = qti_hwkm_parse_clock_info(pdev, hwkm_dev);
+		if (ret) {
+			pr_err("%s: qti_hwkm_parse_clock_info failed (%d)\n",
+				__func__, ret);
+			goto out;
+		}
+	}
+
+out:
+	return ret;
+}
+
+int qti_hwkm_handle_cmd(struct hwkm_cmd *cmd, struct hwkm_rsp *rsp)
+{
+	switch (cmd->op) {
+	case SYSTEM_KDF:
+		return qti_handle_system_kdf(cmd, rsp);
+	case KEY_UNWRAP_IMPORT:
+		return qti_handle_key_unwrap_import(cmd, rsp);
+	case KEY_SLOT_CLEAR:
+		return qti_handle_keyslot_clear(cmd, rsp);
+	case KEY_SLOT_RDWR:
+		return qti_handle_keyslot_rdwr(cmd, rsp);
+	case SET_TPKEY:
+		return qti_handle_set_tpkey(cmd, rsp);
+	case NIST_KEYGEN:
+	case KEY_WRAP_EXPORT:
+	case QFPROM_KEY_RDWR: // cmd for HW initialization cmd only
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(qti_hwkm_handle_cmd);
+
+static void qti_hwkm_configure_slot_access(struct hwkm_device *dev)
+{
+	qti_hwkm_writel(dev, 0xffffffff,
+		QTI_HWKM_ICE_RG_BANK0_AC_BANKN_BBAC_0, ICEMEM_SLAVE);
+	qti_hwkm_writel(dev, 0xffffffff,
+		QTI_HWKM_ICE_RG_BANK0_AC_BANKN_BBAC_1, ICEMEM_SLAVE);
+	qti_hwkm_writel(dev, 0xffffffff,
+		QTI_HWKM_ICE_RG_BANK0_AC_BANKN_BBAC_2, ICEMEM_SLAVE);
+	qti_hwkm_writel(dev, 0xffffffff,
+		QTI_HWKM_ICE_RG_BANK0_AC_BANKN_BBAC_3, ICEMEM_SLAVE);
+	qti_hwkm_writel(dev, 0xffffffff,
+		QTI_HWKM_ICE_RG_BANK0_AC_BANKN_BBAC_4, ICEMEM_SLAVE);
+}
+
+static int qti_hwkm_check_bist_status(struct hwkm_device *hwkm_dev)
+{
+	if (!qti_hwkm_testb(hwkm_dev, QTI_HWKM_ICE_RG_TZ_KM_STATUS,
+		BIST_DONE, ICEMEM_SLAVE)) {
+		pr_err("%s: Error with BIST_DONE\n", __func__);
+		return -EINVAL;
+	}
+
+	if (!qti_hwkm_testb(hwkm_dev, QTI_HWKM_ICE_RG_TZ_KM_STATUS,
+		CRYPTO_LIB_BIST_DONE, ICEMEM_SLAVE)) {
+		pr_err("%s: Error with CRYPTO_LIB_BIST_DONE\n", __func__);
+		return -EINVAL;
+	}
+
+	if (!qti_hwkm_testb(hwkm_dev, QTI_HWKM_ICE_RG_TZ_KM_STATUS,
+		BOOT_CMD_LIST1_DONE, ICEMEM_SLAVE)) {
+		pr_err("%s: Error with BOOT_CMD_LIST1_DONE\n", __func__);
+		return -EINVAL;
+	}
+
+	if (!qti_hwkm_testb(hwkm_dev, QTI_HWKM_ICE_RG_TZ_KM_STATUS,
+		BOOT_CMD_LIST0_DONE, ICEMEM_SLAVE)) {
+		pr_err("%s: Error with BOOT_CMD_LIST0_DONE\n", __func__);
+		return -EINVAL;
+	}
+
+	if (!qti_hwkm_testb(hwkm_dev, QTI_HWKM_ICE_RG_TZ_KM_STATUS,
+		KT_CLEAR_DONE, ICEMEM_SLAVE)) {
+		pr_err("%s: KT_CLEAR_DONE\n", __func__);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int qti_hwkm_ice_init_sequence(struct hwkm_device *hwkm_dev)
+{
+	int ret = 0;
+
+	// Put ICE in standard mode
+	qti_hwkm_writel(hwkm_dev, 0x7, QTI_HWKM_ICE_RG_TZ_KM_CTL, ICEMEM_SLAVE);
+	/* Write memory barrier */
+	wmb();
+
+	ret = qti_hwkm_check_bist_status(hwkm_dev);
+	if (ret) {
+		pr_err("%s: Error in BIST initialization %d\n", __func__, ret);
+		return ret;
+	}
+
+	// Disable CRC checks
+	qti_hwkm_clearb(hwkm_dev, QTI_HWKM_ICE_RG_TZ_KM_CTL, CRC_CHECK_EN,
+			ICEMEM_SLAVE);
+	/* Write memory barrier */
+	wmb();
+
+	// Configure key slots to be accessed by HLOS
+	qti_hwkm_configure_slot_access(hwkm_dev);
+	/* Write memory barrier */
+	wmb();
+
+	// Clear RSP_FIFO_FULL bit
+	qti_hwkm_setb(hwkm_dev, QTI_HWKM_ICE_RG_BANK0_BANKN_IRQ_STATUS,
+			RSP_FIFO_FULL, ICEMEM_SLAVE);
+	/* Write memory barrier */
+	wmb();
+
+	return ret;
+}
+
+static void qti_hwkm_enable_slave_receive_mode(struct hwkm_device *hwkm_dev)
+{
+	qti_hwkm_clearb(hwkm_dev, QTI_HWKM_ICE_RG_TZ_TPKEY_RECEIVE_CTL,
+			TPKEY_EN, ICEMEM_SLAVE);
+	/* Write memory barrier */
+	wmb();
+	qti_hwkm_writel(hwkm_dev, ICEMEM_SLAVE_TPKEY_VAL,
+			QTI_HWKM_ICE_RG_TZ_TPKEY_RECEIVE_CTL, ICEMEM_SLAVE);
+	/* Write memory barrier */
+	wmb();
+}
+
+static void qti_hwkm_disable_slave_receive_mode(struct hwkm_device *hwkm_dev)
+{
+	qti_hwkm_clearb(hwkm_dev, QTI_HWKM_ICE_RG_TZ_TPKEY_RECEIVE_CTL,
+			TPKEY_EN, ICEMEM_SLAVE);
+	/* Write memory barrier */
+	wmb();
+}
+
+static void qti_hwkm_check_tpkey_status(struct hwkm_device *hwkm_dev)
+{
+	int val = 0;
+
+	val = qti_hwkm_readl(hwkm_dev, QTI_HWKM_ICE_RG_TZ_TPKEY_RECEIVE_STATUS,
+			ICEMEM_SLAVE);
+
+	pr_debug("%s: Tpkey receive status 0x%x\n", __func__, val);
+}
+
+static int qti_hwkm_set_tpkey(void)
+{
+	int ret = 0;
+	struct hwkm_cmd cmd;
+	struct hwkm_rsp rsp;
+
+	cmd.op = SET_TPKEY;
+	cmd.set_tpkey.sks = KM_MASTER_TPKEY_SLOT;
+
+	qti_hwkm_enable_slave_receive_mode(km_device);
+	ret = qti_hwkm_handle_cmd(&cmd, &rsp);
+	if (ret) {
+		pr_err("%s: Error running commands\n", __func__, ret);
+		return ret;
+	}
+
+	qti_hwkm_check_tpkey_status(km_device);
+	qti_hwkm_disable_slave_receive_mode(km_device);
+
+	return 0;
+}
+
+int qti_hwkm_init(void)
+{
+	int ret = 0;
+
+	ret = qti_hwkm_ice_init_sequence(km_device);
+	if (ret) {
+		pr_err("%s: Error in ICE init sequence %d\n", __func__, ret);
+		return ret;
+	}
+
+	ret = qti_hwkm_set_tpkey();
+	if (ret) {
+		pr_err("%s: Error setting ICE to receive %d\n", __func__, ret);
+		return ret;
+	}
+	/* Write memory barrier */
+	wmb();
+	return ret;
+}
+EXPORT_SYMBOL(qti_hwkm_init);
+
+static int qti_hwkm_probe(struct platform_device *pdev)
+{
+	struct hwkm_device *hwkm_dev;
+	int ret = 0;
+
+	pr_debug("%s %d: HWKM probe start\n", __func__, __LINE__);
+	if (!pdev) {
+		pr_err("%s: Invalid platform_device passed\n", __func__);
+		return -EINVAL;
+	}
+
+	hwkm_dev = kzalloc(sizeof(struct hwkm_device), GFP_KERNEL);
+	if (!hwkm_dev) {
+		ret = -ENOMEM;
+		pr_err("%s: Error %d allocating memory for HWKM device\n",
+			__func__, ret);
+		goto err_hwkm_dev;
+	}
+
+	hwkm_dev->dev = &pdev->dev;
+	if (!hwkm_dev->dev) {
+		ret = -EINVAL;
+		pr_err("%s: Invalid device passed in platform_device\n",
+			__func__);
+		goto err_hwkm_dev;
+	}
+
+	if (pdev->dev.of_node)
+		ret = qti_hwkm_get_device_tree_data(pdev, hwkm_dev);
+	else {
+		ret = -EINVAL;
+		pr_err("%s: HWKM device node not found\n", __func__);
+	}
+	if (ret)
+		goto err_hwkm_dev;
+
+	ret = qti_hwkm_init_clocks(hwkm_dev);
+	if (ret) {
+		pr_err("%s: Error initializing clocks %d\n", __func__, ret);
+		goto err_hwkm_dev;
+	}
+
+	hwkm_dev->is_hwkm_enabled = true;
+	km_device = hwkm_dev;
+	platform_set_drvdata(pdev, hwkm_dev);
+
+	pr_debug("%s %d: HWKM probe ends\n", __func__, __LINE__);
+	return ret;
+
+err_hwkm_dev:
+	km_device = NULL;
+	kfree(hwkm_dev);
+	return ret;
+}
+
+
+static int qti_hwkm_remove(struct platform_device *pdev)
+{
+	kfree(km_device);
+	return 0;
+}
+
+static const struct of_device_id qti_hwkm_match[] = {
+	{ .compatible = "qcom,hwkm"},
+	{},
+};
+MODULE_DEVICE_TABLE(of, qti_hwkm_match);
+
+static struct platform_driver qti_hwkm_driver = {
+	.probe		= qti_hwkm_probe,
+	.remove		= qti_hwkm_remove,
+	.driver		= {
+		.name	= "qti_hwkm",
+		.of_match_table	= qti_hwkm_match,
+	},
+};
+module_platform_driver(qti_hwkm_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("QTI Hardware Key Manager driver");
diff --git a/drivers/soc/qcom/hwkm_serialize.h b/drivers/soc/qcom/hwkm_serialize.h
new file mode 100644
index 0000000..2d73ff5
--- /dev/null
+++ b/drivers/soc/qcom/hwkm_serialize.h
@@ -0,0 +1,122 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __HWKM_SERIALIZE_H_
+#define __HWKM_SERIALIZE_H_
+
+#include <stdbool.h>
+#include <stddef.h>
+
+#include <linux/hwkm.h>
+
+/* Command lengths (words) */
+#define NIST_KEYGEN_CMD_WORDS 4
+#define SYSTEM_KDF_CMD_MIN_WORDS 4
+#define SYSTEM_KDF_CMD_MAX_WORDS 29
+#define KEYSLOT_CLEAR_CMD_WORDS 2
+#define UNWRAP_IMPORT_CMD_WORDS 19
+#define WRAP_EXPORT_CMD_WORDS 5
+#define SET_TPKEY_CMD_WORDS 2
+#define KEYSLOT_RDWR_CMD_WORDS 12
+#define QFPROM_RDWR_CMD_WORDS 2
+
+/* Response lengths (words) */
+#define NIST_KEYGEN_RSP_WORDS 2
+#define SYSTEM_KDF_RSP_WORDS 2
+#define KEYSLOT_CLEAR_RSP_WORDS 2
+#define UNWRAP_IMPORT_RSP_WORDS 2
+#define WRAP_EXPORT_RSP_WORDS 19
+#define SET_TPKEY_RSP_WORDS 2
+#define KEYSLOT_RDWR_RSP_WORDS 12
+#define QFPROM_RDWR_RSP_WORDS 2
+
+/* Field lengths (words) */
+#define OPERATION_INFO_WORDS 1
+#define KEY_POLICY_WORDS 2
+#define BSVE_WORDS 3
+#define MAX_SWC_WORDS 16
+#define RESPONSE_KEY_WORDS 8
+#define KEY_BLOB_WORDS 17
+
+/* Field lengths (bytes) */
+#define OPERATION_INFO_LENGTH (OPERATION_INFO_WORDS * sizeof(uint32_t))
+#define KEY_POLICY_LENGTH (KEY_POLICY_WORDS * sizeof(uint32_t))
+#define MAX_BSVE_LENGTH (BSVE_WORDS * sizeof(uint32_t))
+#define MAX_SWC_LENGTH (MAX_SWC_WORDS * sizeof(uint32_t))
+#define RESPONSE_KEY_LENGTH (RESPONSE_KEY_WORDS * sizeof(uint32_t))
+#define KEY_BLOB_LENGTH (KEY_BLOB_WORDS * sizeof(uint32_t))
+
+/* Command indices */
+#define COMMAND_KEY_POLICY_IDX 1
+#define COMMAND_KEY_VALUE_IDX 3
+#define COMMAND_WRAPPED_KEY_IDX 1
+#define COMMAND_KEY_WRAP_BSVE_IDX 1
+
+/* Response indices */
+#define RESPONSE_ERR_IDX 1
+#define RESPONSE_KEY_POLICY_IDX 2
+#define RESPONSE_KEY_VALUE_IDX 4
+#define RESPONSE_WRAPPED_KEY_IDX 2
+
+struct hwkm_serialized_policy {
+	unsigned dbg_qfprom_key_rd_iv_sel:1;		// [0]
+	unsigned reserved0:1;				// [1]
+	unsigned wrap_with_tpkey:1;			// [2]
+	unsigned hw_destination:4;			// [3:6]
+	unsigned reserved1:1;				// [7]
+	unsigned propagate_sec_level_to_child_keys:1;	// [8]
+	unsigned security_level:2;			// [9:10]
+	unsigned swap_export_allowed:1;			// [11]
+	unsigned wrap_export_allowed:1;			// [12]
+	unsigned key_type:3;				// [13:15]
+	unsigned kdf_depth:8;				// [16:23]
+	unsigned decrypt_allowed:1;			// [24]
+	unsigned encrypt_allowed:1;			// [25]
+	unsigned alg_allowed:6;				// [26:31]
+	unsigned key_management_by_tz_secure_allowed:1;	// [32]
+	unsigned key_management_by_nonsecure_allowed:1;	// [33]
+	unsigned key_management_by_modem_allowed:1;	// [34]
+	unsigned key_management_by_spu_allowed:1;	// [35]
+	unsigned reserved2:28;				// [36:63]
+} __packed;
+
+struct hwkm_kdf_bsve {
+	unsigned mks:8;				// [0:7]
+	unsigned key_policy_version_en:1;	// [8]
+	unsigned apps_secure_en:1;		// [9]
+	unsigned msa_secure_en:1;		// [10]
+	unsigned lcm_fuse_row_en:1;		// [11]
+	unsigned boot_stage_otp_en:1;		// [12]
+	unsigned swc_en:1;			// [13]
+	u64 fuse_region_sha_digest_en:64;	// [14:78]
+	unsigned child_key_policy_en:1;		// [79]
+	unsigned mks_en:1;			// [80]
+	unsigned reserved:16;			// [81:95]
+} __packed;
+
+struct hwkm_wrapping_bsve {
+	unsigned key_policy_version_en:1;      // [0]
+	unsigned apps_secure_en:1;             // [1]
+	unsigned msa_secure_en:1;              // [2]
+	unsigned lcm_fuse_row_en:1;            // [3]
+	unsigned boot_stage_otp_en:1;          // [4]
+	unsigned swc_en:1;                     // [5]
+	u64 fuse_region_sha_digest_en:64; // [6:69]
+	unsigned child_key_policy_en:1;        // [70]
+	unsigned mks_en:1;                     // [71]
+	unsigned reserved:24;                  // [72:95]
+} __packed;
+
+struct hwkm_operation_info {
+	unsigned op:4;		// [0-3]
+	unsigned irq_en:1;	// [4]
+	unsigned slot1_desc:8;	// [5,12]
+	unsigned slot2_desc:8;	// [13,20]
+	unsigned op_flag:1;	// [21]
+	unsigned context_len:5;	// [22-26]
+	unsigned len:5;		// [27-31]
+} __packed;
+
+#endif /* __HWKM_SERIALIZE_H_ */
diff --git a/drivers/soc/qcom/hwkmregs.h b/drivers/soc/qcom/hwkmregs.h
new file mode 100644
index 0000000..552e489
--- /dev/null
+++ b/drivers/soc/qcom/hwkmregs.h
@@ -0,0 +1,261 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _QTI_HARDWARE_KEY_MANAGER_REGS_H_
+#define _QTI_HARDWARE_KEY_MANAGER_REGS_H_
+
+#define HWKM_VERSION_STEP_REV_MASK		0xFFFF
+#define HWKM_VERSION_STEP_REV			0 /* bit 15-0 */
+#define HWKM_VERSION_MAJOR_REV_MASK		0xFF000000
+#define HWKM_VERSION_MAJOR_REV			24 /* bit 31-24 */
+#define HWKM_VERSION_MINOR_REV_MASK		0xFF0000
+#define HWKM_VERSION_MINOR_REV			16 /* bit 23-16 */
+
+/* QTI HWKM master registers from SWI */
+/* QTI HWKM master shared registers */
+#define QTI_HWKM_MASTER_RG_IPCAT_VERSION		0x0000
+#define QTI_HWKM_MASTER_RG_KEY_POLICY_VERSION		0x0004
+#define QTI_HWKM_MASTER_RG_SHARED_STATUS		0x0008
+#define QTI_HWKM_MASTER_RG_KEYTABLE_SIZE		0x000C
+
+/* QTI HWKM master register bank 2 */
+#define QTI_HWKM_MASTER_RG_BANK2_BANKN_CTL		0x4000
+#define QTI_HWKM_MASTER_RG_BANK2_BANKN_STATUS		0x4004
+#define QTI_HWKM_MASTER_RG_BANK2_BANKN_IRQ_STATUS	0x4008
+#define QTI_HWKM_MASTER_RG_BANK2_BANKN_IRQ_MASK		0x400C
+#define QTI_HWKM_MASTER_RG_BANK2_BANKN_ESR		0x4010
+#define QTI_HWKM_MASTER_RG_BANK2_BANKN_ESR_IRQ_MASK	0x4014
+#define QTI_HWKM_MASTER_RG_BANK2_BANKN_ESYNR		0x4018
+#define QTI_HWKM_MASTER_RG_BANK2_CMD_0			0x401C
+#define QTI_HWKM_MASTER_RG_BANK2_CMD_1			0x4020
+#define QTI_HWKM_MASTER_RG_BANK2_CMD_2			0x4024
+#define QTI_HWKM_MASTER_RG_BANK2_CMD_3			0x4028
+#define QTI_HWKM_MASTER_RG_BANK2_CMD_4			0x402C
+#define QTI_HWKM_MASTER_RG_BANK2_CMD_5			0x4030
+#define QTI_HWKM_MASTER_RG_BANK2_CMD_6			0x4034
+#define QTI_HWKM_MASTER_RG_BANK2_CMD_7			0x4038
+#define QTI_HWKM_MASTER_RG_BANK2_CMD_8			0x403C
+#define QTI_HWKM_MASTER_RG_BANK2_CMD_9			0x4040
+#define QTI_HWKM_MASTER_RG_BANK2_CMD_10			0x4044
+#define QTI_HWKM_MASTER_RG_BANK2_CMD_11			0x4048
+#define QTI_HWKM_MASTER_RG_BANK2_CMD_12			0x404C
+#define QTI_HWKM_MASTER_RG_BANK2_CMD_13			0x4050
+#define QTI_HWKM_MASTER_RG_BANK2_CMD_14			0x4054
+#define QTI_HWKM_MASTER_RG_BANK2_CMD_15			0x4058
+#define QTI_HWKM_MASTER_RG_BANK2_RSP_0			0x405C
+#define QTI_HWKM_MASTER_RG_BANK2_RSP_1			0x4060
+#define QTI_HWKM_MASTER_RG_BANK2_RSP_2			0x4064
+#define QTI_HWKM_MASTER_RG_BANK2_RSP_3			0x4068
+#define QTI_HWKM_MASTER_RG_BANK2_RSP_4			0x406C
+#define QTI_HWKM_MASTER_RG_BANK2_RSP_5			0x4070
+#define QTI_HWKM_MASTER_RG_BANK2_RSP_6			0x4074
+#define QTI_HWKM_MASTER_RG_BANK2_RSP_7			0x4078
+#define QTI_HWKM_MASTER_RG_BANK2_RSP_8			0x407C
+#define QTI_HWKM_MASTER_RG_BANK2_RSP_9			0x4080
+#define QTI_HWKM_MASTER_RG_BANK2_RSP_10			0x4084
+#define QTI_HWKM_MASTER_RG_BANK2_RSP_11			0x4088
+#define QTI_HWKM_MASTER_RG_BANK2_RSP_12			0x408C
+#define QTI_HWKM_MASTER_RG_BANK2_RSP_13			0x4090
+#define QTI_HWKM_MASTER_RG_BANK2_RSP_14			0x4094
+#define QTI_HWKM_MASTER_RG_BANK2_RSP_15			0x4098
+#define QTI_HWKM_MASTER_RG_BANK2_BANKN_IRQ_ROUTING	0x409C
+#define QTI_HWKM_MASTER_RG_BANK2_BANKN_BBAC_0		0x40A0
+#define QTI_HWKM_MASTER_RG_BANK2_BANKN_BBAC_1		0x40A4
+#define QTI_HWKM_MASTER_RG_BANK2_BANKN_BBAC_2		0x40A8
+#define QTI_HWKM_MASTER_RG_BANK2_BANKN_BBAC_3		0x40AC
+#define QTI_HWKM_MASTER_RG_BANK2_BANKN_BBAC_4		0x40B0
+
+/* QTI HWKM master register bank 3 */
+#define QTI_HWKM_MASTER_RG_BANK3_BANKN_CTL		0x5000
+#define QTI_HWKM_MASTER_RG_BANK3_BANKN_STATUS		0x5004
+#define QTI_HWKM_MASTER_RG_BANK3_BANKN_IRQ_STATUS	0x5008
+#define QTI_HWKM_MASTER_RG_BANK3_BANKN_IRQ_MASK		0x500C
+#define QTI_HWKM_MASTER_RG_BANK3_BANKN_ESR		0x5010
+#define QTI_HWKM_MASTER_RG_BANK3_BANKN_ESR_IRQ_MASK	0x5014
+#define QTI_HWKM_MASTER_RG_BANK3_BANKN_ESYNR		0x5018
+#define QTI_HWKM_MASTER_RG_BANK3_CMD_0			0x501C
+#define QTI_HWKM_MASTER_RG_BANK3_CMD_1			0x5020
+#define QTI_HWKM_MASTER_RG_BANK3_CMD_2			0x5024
+#define QTI_HWKM_MASTER_RG_BANK3_CMD_3			0x5028
+#define QTI_HWKM_MASTER_RG_BANK3_CMD_4			0x502C
+#define QTI_HWKM_MASTER_RG_BANK3_CMD_5			0x5030
+#define QTI_HWKM_MASTER_RG_BANK3_CMD_6			0x5034
+#define QTI_HWKM_MASTER_RG_BANK3_CMD_7			0x5038
+#define QTI_HWKM_MASTER_RG_BANK3_CMD_8			0x503C
+#define QTI_HWKM_MASTER_RG_BANK3_CMD_9			0x5040
+#define QTI_HWKM_MASTER_RG_BANK3_CMD_10			0x5044
+#define QTI_HWKM_MASTER_RG_BANK3_CMD_11			0x5048
+#define QTI_HWKM_MASTER_RG_BANK3_CMD_12			0x504C
+#define QTI_HWKM_MASTER_RG_BANK3_CMD_13			0x5050
+#define QTI_HWKM_MASTER_RG_BANK3_CMD_14			0x5054
+#define QTI_HWKM_MASTER_RG_BANK3_CMD_15			0x5058
+#define QTI_HWKM_MASTER_RG_BANK3_RSP_0			0x505C
+#define QTI_HWKM_MASTER_RG_BANK3_RSP_1			0x5060
+#define QTI_HWKM_MASTER_RG_BANK3_RSP_2			0x5064
+#define QTI_HWKM_MASTER_RG_BANK3_RSP_3			0x5068
+#define QTI_HWKM_MASTER_RG_BANK3_RSP_4			0x506C
+#define QTI_HWKM_MASTER_RG_BANK3_RSP_5			0x5070
+#define QTI_HWKM_MASTER_RG_BANK3_RSP_6			0x5074
+#define QTI_HWKM_MASTER_RG_BANK3_RSP_7			0x5078
+#define QTI_HWKM_MASTER_RG_BANK3_RSP_8			0x507C
+#define QTI_HWKM_MASTER_RG_BANK3_RSP_9			0x5080
+#define QTI_HWKM_MASTER_RG_BANK3_RSP_10			0x5084
+#define QTI_HWKM_MASTER_RG_BANK3_RSP_11			0x5088
+#define QTI_HWKM_MASTER_RG_BANK3_RSP_12			0x508C
+#define QTI_HWKM_MASTER_RG_BANK3_RSP_13			0x5090
+#define QTI_HWKM_MASTER_RG_BANK3_RSP_14			0x5094
+#define QTI_HWKM_MASTER_RG_BANK3_RSP_15			0x5098
+#define QTI_HWKM_MASTER_RG_BANK3_BANKN_IRQ_ROUTING	0x509C
+#define QTI_HWKM_MASTER_RG_BANK3_BANKN_BBAC_0		0x50A0
+#define QTI_HWKM_MASTER_RG_BANK3_BANKN_BBAC_1		0x50A4
+#define QTI_HWKM_MASTER_RG_BANK3_BANKN_BBAC_2		0x50A8
+#define QTI_HWKM_MASTER_RG_BANK3_BANKN_BBAC_3		0x50AC
+#define QTI_HWKM_MASTER_RG_BANK3_BANKN_BBAC_4		0x50B0
+
+/* QTI HWKM access control registers for Bank 2 */
+#define QTI_HWKM_MASTER_RG_BANK2_AC_BANKN_BBAC_0	0x8000
+#define QTI_HWKM_MASTER_RG_BANK2_AC_BANKN_BBAC_1	0x8004
+#define QTI_HWKM_MASTER_RG_BANK2_AC_BANKN_BBAC_2	0x8008
+#define QTI_HWKM_MASTER_RG_BANK2_AC_BANKN_BBAC_3	0x800C
+#define QTI_HWKM_MASTER_RG_BANK2_AC_BANKN_BBAC_4	0x8010
+
+/* QTI HWKM access control registers for Bank 3 */
+#define QTI_HWKM_MASTER_RG_BANK3_AC_BANKN_BBAC_0	0x9000
+#define QTI_HWKM_MASTER_RG_BANK3_AC_BANKN_BBAC_1	0x9004
+#define QTI_HWKM_MASTER_RG_BANK3_AC_BANKN_BBAC_2	0x9008
+#define QTI_HWKM_MASTER_RG_BANK3_AC_BANKN_BBAC_3	0x900C
+#define QTI_HWKM_MASTER_RG_BANK3_AC_BANKN_BBAC_4	0x9010
+
+/* QTI HWKM ICE slave config and status registers */
+#define QTI_HWKM_ICE_RG_TZ_KM_CTL			0x1000
+#define QTI_HWKM_ICE_RG_TZ_KM_STATUS			0x1004
+#define QTI_HWKM_ICE_RG_TZ_KM_STATUS_IRQ_MASK		0x1008
+#define QTI_HWKM_ICE_RG_TZ_KM_BOOT_STAGE_OTP		0x100C
+#define QTI_HWKM_ICE_RG_TZ_KM_DEBUG_CTL			0x1010
+#define QTI_HWKM_ICE_RG_TZ_KM_DEBUG_WRITE		0x1014
+#define QTI_HWKM_ICE_RG_TZ_KM_DEBUG_READ		0x1018
+#define QTI_HWKM_ICE_RG_TZ_TPKEY_RECEIVE_CTL		0x101C
+#define QTI_HWKM_ICE_RG_TZ_TPKEY_RECEIVE_STATUS		0x1020
+#define QTI_HWKM_ICE_RG_TZ_KM_COMMON_IRQ_ROUTING	0x1024
+
+/* QTI HWKM ICE slave registers from SWI */
+/* QTI HWKM ICE slave shared registers */
+#define QTI_HWKM_ICE_RG_IPCAT_VERSION			0x0000
+#define QTI_HWKM_ICE_RG_KEY_POLICY_VERSION		0x0004
+#define QTI_HWKM_ICE_RG_SHARED_STATUS			0x0008
+#define QTI_HWKM_ICE_RG_KEYTABLE_SIZE			0x000C
+
+/* QTI HWKM ICE slave register bank 0 */
+#define QTI_HWKM_ICE_RG_BANK0_BANKN_CTL			0x2000
+#define QTI_HWKM_ICE_RG_BANK0_BANKN_STATUS		0x2004
+#define QTI_HWKM_ICE_RG_BANK0_BANKN_IRQ_STATUS		0x2008
+#define QTI_HWKM_ICE_RG_BANK0_BANKN_IRQ_MASK		0x200C
+#define QTI_HWKM_ICE_RG_BANK0_BANKN_ESR			0x2010
+#define QTI_HWKM_ICE_RG_BANK0_BANKN_ESR_IRQ_MASK	0x2014
+#define QTI_HWKM_ICE_RG_BANK0_BANKN_ESYNR		0x2018
+#define QTI_HWKM_ICE_RG_BANK0_CMD_0			0x201C
+#define QTI_HWKM_ICE_RG_BANK0_CMD_1			0x2020
+#define QTI_HWKM_ICE_RG_BANK0_CMD_2			0x2024
+#define QTI_HWKM_ICE_RG_BANK0_CMD_3			0x2028
+#define QTI_HWKM_ICE_RG_BANK0_CMD_4			0x202C
+#define QTI_HWKM_ICE_RG_BANK0_CMD_5			0x2030
+#define QTI_HWKM_ICE_RG_BANK0_CMD_6			0x2034
+#define QTI_HWKM_ICE_RG_BANK0_CMD_7			0x2038
+#define QTI_HWKM_ICE_RG_BANK0_CMD_8			0x203C
+#define QTI_HWKM_ICE_RG_BANK0_CMD_9			0x2040
+#define QTI_HWKM_ICE_RG_BANK0_CMD_10			0x2044
+#define QTI_HWKM_ICE_RG_BANK0_CMD_11			0x2048
+#define QTI_HWKM_ICE_RG_BANK0_CMD_12			0x204C
+#define QTI_HWKM_ICE_RG_BANK0_CMD_13			0x2050
+#define QTI_HWKM_ICE_RG_BANK0_CMD_14			0x2054
+#define QTI_HWKM_ICE_RG_BANK0_CMD_15			0x2058
+#define QTI_HWKM_ICE_RG_BANK0_RSP_0			0x205C
+#define QTI_HWKM_ICE_RG_BANK0_RSP_1			0x2060
+#define QTI_HWKM_ICE_RG_BANK0_RSP_2			0x2064
+#define QTI_HWKM_ICE_RG_BANK0_RSP_3			0x2068
+#define QTI_HWKM_ICE_RG_BANK0_RSP_4			0x206C
+#define QTI_HWKM_ICE_RG_BANK0_RSP_5			0x2070
+#define QTI_HWKM_ICE_RG_BANK0_RSP_6			0x2074
+#define QTI_HWKM_ICE_RG_BANK0_RSP_7			0x2078
+#define QTI_HWKM_ICE_RG_BANK0_RSP_8			0x207C
+#define QTI_HWKM_ICE_RG_BANK0_RSP_9			0x2080
+#define QTI_HWKM_ICE_RG_BANK0_RSP_10			0x2084
+#define QTI_HWKM_ICE_RG_BANK0_RSP_11			0x2088
+#define QTI_HWKM_ICE_RG_BANK0_RSP_12			0x208C
+#define QTI_HWKM_ICE_RG_BANK0_RSP_13			0x2090
+#define QTI_HWKM_ICE_RG_BANK0_RSP_14			0x2094
+#define QTI_HWKM_ICE_RG_BANK0_RSP_15			0x2098
+#define QTI_HWKM_ICE_RG_BANK0_BANKN_IRQ_ROUTING		0x209C
+#define QTI_HWKM_ICE_RG_BANK0_BANKN_BBAC_0		0x20A0
+#define QTI_HWKM_ICE_RG_BANK0_BANKN_BBAC_1		0x20A4
+#define QTI_HWKM_ICE_RG_BANK0_BANKN_BBAC_2		0x20A8
+#define QTI_HWKM_ICE_RG_BANK0_BANKN_BBAC_3		0x20AC
+#define QTI_HWKM_ICE_RG_BANK0_BANKN_BBAC_4		0x20B0
+
+/* QTI HWKM access control registers for Bank 2 */
+#define QTI_HWKM_ICE_RG_BANK0_AC_BANKN_BBAC_0		0x5000
+#define QTI_HWKM_ICE_RG_BANK0_AC_BANKN_BBAC_1		0x5004
+#define QTI_HWKM_ICE_RG_BANK0_AC_BANKN_BBAC_2		0x5008
+#define QTI_HWKM_ICE_RG_BANK0_AC_BANKN_BBAC_3		0x500C
+#define QTI_HWKM_ICE_RG_BANK0_AC_BANKN_BBAC_4		0x5010
+
+
+/* QTI HWKM ICE slave config reg vals */
+
+/* HWKM_ICEMEM_SLAVE_ICE_KM_RG_TZ_KM_CTL */
+#define CRC_CHECK_EN				0
+#define KEYTABLE_HW_WR_ACCESS_EN		1
+#define KEYTABLE_HW_RD_ACCESS_EN		2
+#define BOOT_INIT0_DISABLE			3
+#define BOOT_INIT1_DISABLE			4
+#define ICE_LEGACY_MODE_EN_OTP			5
+
+/* HWKM_ICEMEM_SLAVE_ICE_KM_RG_TZ_KM_STATUS */
+#define KT_CLEAR_DONE				0
+#define BOOT_CMD_LIST0_DONE			1
+#define BOOT_CMD_LIST1_DONE			2
+#define KEYTABLE_KEY_POLICY			3
+#define KEYTABLE_INTEGRITY_ERROR		4
+#define KEYTABLE_KEY_SLOT_ERROR			5
+#define KEYTABLE_KEY_SLOT_NOT_EVEN_ERROR	6
+#define KEYTABLE_KEY_SLOT_OUT_OF_RANGE		7
+#define KEYTABLE_KEY_SIZE_ERROR			8
+#define KEYTABLE_OPERATION_ERROR		9
+#define LAST_ACTIVITY_BANK			10
+#define CRYPTO_LIB_BIST_ERROR			13
+#define CRYPTO_LIB_BIST_DONE			14
+#define BIST_ERROR				15
+#define BIST_DONE				16
+#define LAST_ACTIVITY_BANK_MASK			0x1c00
+
+/* HWKM_ICEMEM_SLAVE_ICE_KM_RG_TZ_TPKEY_RECEIVE_CTL */
+#define TPKEY_EN				8
+
+/* QTI HWKM Bank status & control reg vals */
+
+/* HWKM_MASTER_CFG_KM_BANKN_CTL */
+#define CMD_ENABLE_BIT				0
+#define CMD_FIFO_CLEAR_BIT			1
+
+/* HWKM_MASTER_CFG_KM_BANKN_STATUS */
+#define CURRENT_CMD_REMAINING_LENGTH		0
+#define MOST_RECENT_OPCODE			5
+#define RSP_FIFO_AVAILABLE_DATA			9
+#define CMD_FIFO_AVAILABLE_SPACE		14
+#define ICE_LEGACY_MODE_BIT			19
+#define CMD_FIFO_AVAILABLE_SPACE_MASK		0x7c000
+#define RSP_FIFO_AVAILABLE_DATA_MASK		0x3e00
+#define MOST_RECENT_OPCODE_MASK			0x1e0
+#define CURRENT_CMD_REMAINING_LENGTH_MASK	0x1f
+
+/* HWKM_MASTER_CFG_KM_BANKN_IRQ_STATUS */
+#define ARB_GRAN_WINNER				0
+#define CMD_DONE_BIT				1
+#define RSP_FIFO_NOT_EMPTY			2
+#define RSP_FIFO_FULL				3
+#define RSP_FIFO_UNDERFLOW			4
+#define CMD_FIFO_UNDERFLOW			5
+
+#endif /* __QTI_HARDWARE_KEY_MANAGER_REGS_H_ */
diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c
index 3e3a41b..addf18e 100644
--- a/drivers/soc/qcom/icnss.c
+++ b/drivers/soc/qcom/icnss.c
@@ -846,6 +846,11 @@
 	clear_bit(ICNSS_FW_DOWN, &penv->state);
 	icnss_ignore_fw_timeout(false);
 
+	if (test_bit(ICNSS_WLFW_CONNECTED, &penv->state)) {
+		icnss_pr_err("QMI Server already in Connected State\n");
+		ICNSS_ASSERT(0);
+	}
+
 	ret = icnss_connect_to_fw_server(penv, data);
 	if (ret)
 		goto fail;
diff --git a/drivers/soc/qcom/icnss2/main.c b/drivers/soc/qcom/icnss2/main.c
index b869497..2cea0df 100644
--- a/drivers/soc/qcom/icnss2/main.c
+++ b/drivers/soc/qcom/icnss2/main.c
@@ -177,6 +177,20 @@
 	return "UNKNOWN";
 };
 
+char *icnss_soc_wake_event_to_str(enum icnss_soc_wake_event_type type)
+{
+	switch (type) {
+	case ICNSS_SOC_WAKE_REQUEST_EVENT:
+		return "SOC_WAKE_REQUEST";
+	case ICNSS_SOC_WAKE_RELEASE_EVENT:
+		return "SOC_WAKE_RELEASE";
+	case ICNSS_SOC_WAKE_EVENT_MAX:
+		return "SOC_EVENT_MAX";
+	}
+
+	return "UNKNOWN";
+};
+
 int icnss_driver_event_post(struct icnss_priv *priv,
 			    enum icnss_driver_event_type type,
 			    u32 flags, void *data)
@@ -249,6 +263,78 @@
 	return ret;
 }
 
+int icnss_soc_wake_event_post(struct icnss_priv *priv,
+			      enum icnss_soc_wake_event_type type,
+			      u32 flags, void *data)
+{
+	struct icnss_soc_wake_event *event;
+	unsigned long irq_flags;
+	int gfp = GFP_KERNEL;
+	int ret = 0;
+
+	if (!priv)
+		return -ENODEV;
+
+	icnss_pr_dbg("Posting event: %s(%d), %s, flags: 0x%x, state: 0x%lx\n",
+		     icnss_soc_wake_event_to_str(type), type, current->comm,
+		     flags, priv->state);
+
+	if (type >= ICNSS_SOC_WAKE_EVENT_MAX) {
+		icnss_pr_err("Invalid Event type: %d, can't post", type);
+		return -EINVAL;
+	}
+
+	if (in_interrupt() || irqs_disabled())
+		gfp = GFP_ATOMIC;
+
+	event = kzalloc(sizeof(*event), gfp);
+	if (!event)
+		return -ENOMEM;
+
+	icnss_pm_stay_awake(priv);
+
+	event->type = type;
+	event->data = data;
+	init_completion(&event->complete);
+	event->ret = ICNSS_EVENT_PENDING;
+	event->sync = !!(flags & ICNSS_EVENT_SYNC);
+
+	spin_lock_irqsave(&priv->soc_wake_msg_lock, irq_flags);
+	list_add_tail(&event->list, &priv->soc_wake_msg_list);
+	spin_unlock_irqrestore(&priv->soc_wake_msg_lock, irq_flags);
+
+	priv->stats.soc_wake_events[type].posted++;
+	queue_work(priv->soc_wake_wq, &priv->soc_wake_msg_work);
+
+	if (!(flags & ICNSS_EVENT_SYNC))
+		goto out;
+
+	if (flags & ICNSS_EVENT_UNINTERRUPTIBLE)
+		wait_for_completion(&event->complete);
+	else
+		ret = wait_for_completion_interruptible(&event->complete);
+
+	icnss_pr_dbg("Completed event: %s(%d), state: 0x%lx, ret: %d/%d\n",
+		     icnss_soc_wake_event_to_str(type), type, priv->state, ret,
+		     event->ret);
+
+	spin_lock_irqsave(&priv->soc_wake_msg_lock, irq_flags);
+	if (ret == -ERESTARTSYS && event->ret == ICNSS_EVENT_PENDING) {
+		event->sync = false;
+		spin_unlock_irqrestore(&priv->soc_wake_msg_lock, irq_flags);
+		ret = -EINTR;
+		goto out;
+	}
+	spin_unlock_irqrestore(&priv->soc_wake_msg_lock, irq_flags);
+
+	ret = event->ret;
+	kfree(event);
+
+out:
+	icnss_pm_relax(priv);
+	return ret;
+}
+
 bool icnss_is_fw_ready(void)
 {
 	if (!penv)
@@ -888,6 +974,41 @@
 	return ret;
 }
 
+static int icnss_event_soc_wake_request(struct icnss_priv *priv, void *data)
+{
+	int ret = 0;
+
+	if (!priv)
+		return -ENODEV;
+
+	ret = wlfw_send_soc_wake_msg(priv, QMI_WLFW_WAKE_REQUEST_V01);
+	if (!ret)
+		atomic_inc(&priv->soc_wake_ref_count);
+
+	return ret;
+}
+
+static int icnss_event_soc_wake_release(struct icnss_priv *priv, void *data)
+{
+	int ret = 0;
+	int count = 0;
+
+	if (!priv)
+		return -ENODEV;
+
+	count = atomic_dec_return(&priv->soc_wake_ref_count);
+
+	if (count) {
+		icnss_pr_dbg("Wake release not called. Ref count: %d",
+			     count);
+		return 0;
+	}
+
+	ret = wlfw_send_soc_wake_msg(priv, QMI_WLFW_WAKE_RELEASE_V01);
+
+	return ret;
+}
+
 static int icnss_driver_event_register_driver(struct icnss_priv *priv,
 							 void *data)
 {
@@ -1225,6 +1346,68 @@
 	icnss_pm_relax(priv);
 }
 
+static void icnss_soc_wake_msg_work(struct work_struct *work)
+{
+	struct icnss_priv *priv =
+		container_of(work, struct icnss_priv, soc_wake_msg_work);
+	struct icnss_soc_wake_event *event;
+	unsigned long flags;
+	int ret;
+
+	icnss_pm_stay_awake(priv);
+
+	spin_lock_irqsave(&priv->soc_wake_msg_lock, flags);
+
+	while (!list_empty(&priv->soc_wake_msg_list)) {
+		event = list_first_entry(&priv->soc_wake_msg_list,
+					 struct icnss_soc_wake_event, list);
+		list_del(&event->list);
+		spin_unlock_irqrestore(&priv->soc_wake_msg_lock, flags);
+
+		icnss_pr_dbg("Processing event: %s%s(%d), state: 0x%lx\n",
+			     icnss_soc_wake_event_to_str(event->type),
+			     event->sync ? "-sync" : "", event->type,
+			     priv->state);
+
+		switch (event->type) {
+		case ICNSS_SOC_WAKE_REQUEST_EVENT:
+			ret = icnss_event_soc_wake_request(priv,
+							   event->data);
+			break;
+		case ICNSS_SOC_WAKE_RELEASE_EVENT:
+			ret = icnss_event_soc_wake_release(priv,
+							   event->data);
+			break;
+		default:
+			icnss_pr_err("Invalid Event type: %d", event->type);
+			kfree(event);
+			continue;
+		}
+
+		priv->stats.soc_wake_events[event->type].processed++;
+
+		icnss_pr_dbg("Event Processed: %s%s(%d), ret: %d, state: 0x%lx\n",
+			     icnss_soc_wake_event_to_str(event->type),
+			     event->sync ? "-sync" : "", event->type, ret,
+			     priv->state);
+
+		spin_lock_irqsave(&priv->soc_wake_msg_lock, flags);
+		if (event->sync) {
+			event->ret = ret;
+			complete(&event->complete);
+			continue;
+		}
+		spin_unlock_irqrestore(&priv->soc_wake_msg_lock, flags);
+
+		kfree(event);
+
+		spin_lock_irqsave(&priv->soc_wake_msg_lock, flags);
+	}
+	spin_unlock_irqrestore(&priv->soc_wake_msg_lock, flags);
+
+	icnss_pm_relax(priv);
+}
+
 static int icnss_msa0_ramdump(struct icnss_priv *priv)
 {
 	struct ramdump_segment segment;
@@ -1963,6 +2146,71 @@
 }
 EXPORT_SYMBOL(icnss_set_fw_log_mode);
 
+int icnss_force_wake_request(struct device *dev)
+{
+	struct icnss_priv *priv = dev_get_drvdata(dev);
+	int count = 0;
+
+	if (!dev)
+		return -ENODEV;
+
+	if (!priv) {
+		icnss_pr_err("Platform driver not initialized\n");
+		return -EINVAL;
+	}
+
+	icnss_pr_dbg("Calling SOC Wake request");
+
+	if (atomic_read(&priv->soc_wake_ref_count)) {
+		count = atomic_inc_return(&priv->soc_wake_ref_count);
+		icnss_pr_dbg("SOC already awake, Ref count: %d", count);
+		return 0;
+	}
+
+	icnss_soc_wake_event_post(priv, ICNSS_SOC_WAKE_REQUEST_EVENT,
+				  0, NULL);
+
+	return 0;
+}
+EXPORT_SYMBOL(icnss_force_wake_request);
+
+int icnss_force_wake_release(struct device *dev)
+{
+	struct icnss_priv *priv = dev_get_drvdata(dev);
+
+	if (!dev)
+		return -ENODEV;
+
+	if (!priv) {
+		icnss_pr_err("Platform driver not initialized\n");
+		return -EINVAL;
+	}
+
+	icnss_pr_dbg("Calling SOC Wake response");
+
+	icnss_soc_wake_event_post(priv, ICNSS_SOC_WAKE_RELEASE_EVENT,
+				  0, NULL);
+
+	return 0;
+}
+EXPORT_SYMBOL(icnss_force_wake_release);
+
+int icnss_is_device_awake(struct device *dev)
+{
+	struct icnss_priv *priv = dev_get_drvdata(dev);
+
+	if (!dev)
+		return -ENODEV;
+
+	if (!priv) {
+		icnss_pr_err("Platform driver not initialized\n");
+		return -EINVAL;
+	}
+
+	return atomic_read(&priv->soc_wake_ref_count);
+}
+EXPORT_SYMBOL(icnss_is_device_awake);
+
 int icnss_athdiag_read(struct device *dev, uint32_t offset,
 		       uint32_t mem_type, uint32_t data_len,
 		       uint8_t *output)
@@ -2656,6 +2904,7 @@
 
 	spin_lock_init(&priv->event_lock);
 	spin_lock_init(&priv->on_off_lock);
+	spin_lock_init(&priv->soc_wake_msg_lock);
 	mutex_init(&priv->dev_lock);
 
 	priv->event_wq = alloc_workqueue("icnss_driver_event", WQ_UNBOUND, 1);
@@ -2668,10 +2917,21 @@
 	INIT_WORK(&priv->event_work, icnss_driver_event_work);
 	INIT_LIST_HEAD(&priv->event_list);
 
+	priv->soc_wake_wq = alloc_workqueue("icnss_soc_wake_event",
+					    WQ_UNBOUND, 1);
+	if (!priv->soc_wake_wq) {
+		icnss_pr_err("Soc wake Workqueue creation failed\n");
+		ret = -EFAULT;
+		goto out_destroy_wq;
+	}
+
+	INIT_WORK(&priv->soc_wake_msg_work, icnss_soc_wake_msg_work);
+	INIT_LIST_HEAD(&priv->soc_wake_msg_list);
+
 	ret = icnss_register_fw_service(priv);
 	if (ret < 0) {
 		icnss_pr_err("fw service registration failed: %d\n", ret);
-		goto out_destroy_wq;
+		goto out_destroy_soc_wq;
 	}
 
 	icnss_enable_recovery(priv);
@@ -2697,6 +2957,8 @@
 
 	return 0;
 
+out_destroy_soc_wq:
+	destroy_workqueue(priv->soc_wake_wq);
 out_destroy_wq:
 	destroy_workqueue(priv->event_wq);
 smmu_cleanup:
@@ -2733,6 +2995,9 @@
 	if (priv->event_wq)
 		destroy_workqueue(priv->event_wq);
 
+	if (priv->soc_wake_wq)
+		destroy_workqueue(priv->soc_wake_wq);
+
 	priv->iommu_domain = NULL;
 
 	icnss_hw_power_off(priv);
diff --git a/drivers/soc/qcom/icnss2/main.h b/drivers/soc/qcom/icnss2/main.h
index cd5d6dd..44efede 100644
--- a/drivers/soc/qcom/icnss2/main.h
+++ b/drivers/soc/qcom/icnss2/main.h
@@ -55,6 +55,12 @@
 	ICNSS_DRIVER_EVENT_MAX,
 };
 
+enum icnss_soc_wake_event_type {
+	ICNSS_SOC_WAKE_REQUEST_EVENT,
+	ICNSS_SOC_WAKE_RELEASE_EVENT,
+	ICNSS_SOC_WAKE_EVENT_MAX,
+};
+
 struct icnss_event_server_arrive_data {
 	unsigned int node;
 	unsigned int port;
@@ -74,6 +80,15 @@
 	void *data;
 };
 
+struct icnss_soc_wake_event {
+	struct list_head list;
+	enum icnss_soc_wake_event_type type;
+	bool sync;
+	struct completion complete;
+	int ret;
+	void *data;
+};
+
 enum icnss_driver_state {
 	ICNSS_WLFW_CONNECTED,
 	ICNSS_POWER_ON,
@@ -150,6 +165,11 @@
 	} events[ICNSS_DRIVER_EVENT_MAX];
 
 	struct {
+		u32 posted;
+		u32 processed;
+	} soc_wake_events[ICNSS_SOC_WAKE_EVENT_MAX];
+
+	struct {
 		uint32_t request;
 		uint32_t free;
 		uint32_t enable;
@@ -210,6 +230,9 @@
 	u32 exit_power_save_req;
 	u32 exit_power_save_resp;
 	u32 exit_power_save_err;
+	u32 soc_wake_req;
+	u32 soc_wake_resp;
+	u32 soc_wake_err;
 };
 
 #define WLFW_MAX_TIMESTAMP_LEN 32
@@ -282,10 +305,14 @@
 	size_t smmu_iova_ipa_len;
 	struct qmi_handle qmi;
 	struct list_head event_list;
+	struct list_head soc_wake_msg_list;
 	spinlock_t event_lock;
+	spinlock_t soc_wake_msg_lock;
 	struct work_struct event_work;
 	struct work_struct fw_recv_msg_work;
+	struct work_struct soc_wake_msg_work;
 	struct workqueue_struct *event_wq;
+	struct workqueue_struct *soc_wake_wq;
 	phys_addr_t msa_pa;
 	phys_addr_t msi_addr_pa;
 	dma_addr_t msi_addr_iova;
@@ -342,6 +369,7 @@
 	struct icnss_fw_mem qdss_mem[QMI_WLFW_MAX_NUM_MEM_SEG];
 	void *get_info_cb_ctx;
 	int (*get_info_cb)(void *ctx, void *event, int event_len);
+	atomic_t soc_wake_ref_count;
 };
 
 struct icnss_reg_info {
@@ -358,5 +386,9 @@
 			    u32 flags, void *data);
 void icnss_allow_recursive_recovery(struct device *dev);
 void icnss_disallow_recursive_recovery(struct device *dev);
+char *icnss_soc_wake_event_to_str(enum icnss_soc_wake_event_type type);
+int icnss_soc_wake_event_post(struct icnss_priv *priv,
+			      enum icnss_soc_wake_event_type type,
+			      u32 flags, void *data);
 #endif
 
diff --git a/drivers/soc/qcom/icnss2/qmi.c b/drivers/soc/qcom/icnss2/qmi.c
index 3a96131..225afb1 100644
--- a/drivers/soc/qcom/icnss2/qmi.c
+++ b/drivers/soc/qcom/icnss2/qmi.c
@@ -413,6 +413,82 @@
 	return ret;
 }
 
+int wlfw_send_soc_wake_msg(struct icnss_priv *priv,
+			   enum wlfw_soc_wake_enum_v01 type)
+{
+	int ret;
+	struct wlfw_soc_wake_req_msg_v01 *req;
+	struct wlfw_soc_wake_resp_msg_v01 *resp;
+	struct qmi_txn txn;
+
+	if (!priv)
+		return -ENODEV;
+
+	if (test_bit(ICNSS_FW_DOWN, &priv->state))
+		return -EINVAL;
+
+	icnss_pr_dbg("Sending soc wake msg, type: 0x%x\n",
+		     type);
+
+	req = kzalloc(sizeof(*req), GFP_KERNEL);
+	if (!req)
+		return -ENOMEM;
+
+	resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+	if (!resp) {
+		kfree(req);
+		return -ENOMEM;
+	}
+	req->wake_valid = 1;
+	req->wake = type;
+
+	priv->stats.soc_wake_req++;
+
+	ret = qmi_txn_init(&priv->qmi, &txn,
+			   wlfw_soc_wake_resp_msg_v01_ei, resp);
+
+	if (ret < 0) {
+		icnss_pr_err("Fail to init txn for wake msg resp %d\n",
+			     ret);
+		goto out;
+	}
+
+	ret = qmi_send_request(&priv->qmi, NULL, &txn,
+			       QMI_WLFW_SOC_WAKE_REQ_V01,
+			       WLFW_SOC_WAKE_REQ_MSG_V01_MAX_MSG_LEN,
+			       wlfw_soc_wake_req_msg_v01_ei, req);
+	if (ret < 0) {
+		qmi_txn_cancel(&txn);
+		icnss_pr_err("Fail to send soc wake msg %d\n", ret);
+		goto out;
+	}
+
+	ret = qmi_txn_wait(&txn, priv->ctrl_params.qmi_timeout);
+	if (ret < 0) {
+		icnss_qmi_fatal_err("SOC wake timed out with ret %d\n",
+				    ret);
+		goto out;
+	} else if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
+		icnss_qmi_fatal_err(
+			"SOC wake request rejected,result:%d error:%d\n",
+			resp->resp.result, resp->resp.error);
+		ret = -resp->resp.result;
+		goto out;
+	}
+
+	priv->stats.soc_wake_resp++;
+
+	kfree(resp);
+	kfree(req);
+	return 0;
+
+out:
+	kfree(req);
+	kfree(resp);
+	priv->stats.soc_wake_err++;
+	return ret;
+}
+
 int wlfw_ind_register_send_sync_msg(struct icnss_priv *priv)
 {
 	int ret;
@@ -2196,7 +2272,7 @@
 	if (cmd_len > QMI_WLFW_MAX_DATA_SIZE_V01)
 		return -EINVAL;
 
-	if (test_bit(ICNSS_FW_DOWN, &priv->state))
+	if (test_bit(ICNSS_FW_DOWN, &plat_priv->state))
 		return -EINVAL;
 
 	req = kzalloc(sizeof(*req), GFP_KERNEL);
diff --git a/drivers/soc/qcom/icnss2/qmi.h b/drivers/soc/qcom/icnss2/qmi.h
index c4c42ce..f4c1d2b 100644
--- a/drivers/soc/qcom/icnss2/qmi.h
+++ b/drivers/soc/qcom/icnss2/qmi.h
@@ -139,6 +139,12 @@
 {
 	return 0;
 }
+
+int wlfw_send_soc_wake_msg(struct icnss_priv *priv,
+			   enum wlfw_soc_wake_enum_v01 type)
+{
+	return 0;
+}
 #else
 int wlfw_ind_register_send_sync_msg(struct icnss_priv *priv);
 int icnss_connect_to_fw_server(struct icnss_priv *priv, void *data);
@@ -177,6 +183,8 @@
 int wlfw_exit_power_save_send_msg(struct icnss_priv *priv);
 int icnss_wlfw_get_info_send_sync(struct icnss_priv *priv, int type,
 				  void *cmd, int cmd_len);
+int wlfw_send_soc_wake_msg(struct icnss_priv *priv,
+			   enum wlfw_soc_wake_enum_v01 type);
 #endif
 
 #endif /* __ICNSS_QMI_H__*/
diff --git a/drivers/soc/qcom/l2_reuse.c b/drivers/soc/qcom/l2_reuse.c
index e20b49a..bd96d38 100644
--- a/drivers/soc/qcom/l2_reuse.c
+++ b/drivers/soc/qcom/l2_reuse.c
@@ -13,7 +13,7 @@
 
 #define L2_REUSE_SMC_ID 0x00200090C
 
-static bool l2_reuse_enable = true;
+static bool l2_reuse_enable;
 static struct kobject *l2_reuse_kobj;
 
 static ssize_t sysfs_show(struct kobject *kobj,
@@ -38,12 +38,12 @@
 	return count;
 }
 
-struct kobj_attribute l2_reuse_attr = __ATTR(l2_reuse_enable, 0660,
+struct kobj_attribute l2_reuse_attr = __ATTR(extended_cache_enable, 0660,
 		sysfs_show, sysfs_store);
 
 static int __init l2_reuse_driver_init(void)
 {
-	l2_reuse_kobj = kobject_create_and_add("l2_reuse_enable", power_kobj);
+	l2_reuse_kobj = kobject_create_and_add("l2_reuse", power_kobj);
 
 	if (!l2_reuse_kobj) {
 		pr_info("kobj creation for l2_reuse failed\n");
diff --git a/drivers/soc/qcom/memshare/msm_memshare.c b/drivers/soc/qcom/memshare/msm_memshare.c
index 2a7b7fd..cbea060 100644
--- a/drivers/soc/qcom/memshare/msm_memshare.c
+++ b/drivers/soc/qcom/memshare/msm_memshare.c
@@ -308,24 +308,35 @@
 
 	case SUBSYS_BEFORE_SHUTDOWN:
 		bootup_request++;
+		dev_info(memsh_drv->dev,
+		"memshare: SUBSYS_BEFORE_SHUTDOWN: bootup_request:%d\n",
+		bootup_request);
 		for (i = 0; i < MAX_CLIENTS; i++)
 			memblock[i].alloc_request = 0;
 		break;
 
 	case SUBSYS_RAMDUMP_NOTIFICATION:
 		ramdump_event = true;
+		dev_info(memsh_drv->dev,
+		"memshare: SUBSYS_RAMDUMP_NOTIFICATION: ramdump_event:%d\n",
+		ramdump_event);
 		break;
 
 	case SUBSYS_BEFORE_POWERUP:
 		if (_cmd) {
 			notifdata = (struct notif_data *) _cmd;
+			dev_info(memsh_drv->dev,
+			"memshare: SUBSYS_BEFORE_POWERUP: enable_ramdump: %d, ramdump_event: %d\n",
+			notifdata->enable_ramdump, ramdump_event);
 		} else {
 			ramdump_event = false;
+			dev_info(memsh_drv->dev,
+			"memshare: SUBSYS_BEFORE_POWERUP: ramdump_event: %d\n",
+			ramdump_event);
 			break;
 		}
 
 		if (notifdata->enable_ramdump && ramdump_event) {
-			dev_info(memsh_child->dev, "memshare: Ramdump collection is enabled\n");
 			ret = mem_share_do_ramdump();
 			if (ret)
 				dev_err(memsh_child->dev, "memshare: Ramdump collection failed\n");
@@ -334,7 +345,7 @@
 		break;
 
 	case SUBSYS_AFTER_POWERUP:
-		dev_dbg(memsh_child->dev, "memshare: Modem has booted up\n");
+		dev_info(memsh_drv->dev, "memshare: SUBSYS_AFTER_POWERUP: Modem has booted up\n");
 		for (i = 0; i < MAX_CLIENTS; i++) {
 			size = memblock[i].size;
 			if (memblock[i].free_memory > 0 &&
@@ -398,8 +409,9 @@
 	default:
 		break;
 	}
-
 	mutex_unlock(&memsh_drv->mem_share);
+	dev_info(memsh_drv->dev,
+	"memshare: notifier_cb processed for code: %d\n", code);
 	return NOTIFY_DONE;
 }
 
diff --git a/drivers/soc/qcom/msm-spm.c b/drivers/soc/qcom/msm-spm.c
new file mode 100644
index 0000000..357c0d3
--- /dev/null
+++ b/drivers/soc/qcom/msm-spm.c
@@ -0,0 +1,771 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2011-2017, 2020, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+
+#include "spm_driver.h"
+
+#define MSM_SPM_PMIC_STATE_IDLE  0
+
+enum {
+	MSM_SPM_DEBUG_SHADOW = 1U << 0,
+	MSM_SPM_DEBUG_VCTL = 1U << 1,
+};
+
+static int msm_spm_debug_mask;
+module_param_named(
+	debug_mask, msm_spm_debug_mask, int, 0664
+);
+
+struct saw2_data {
+	const char *ver_name;
+	uint32_t major;
+	uint32_t minor;
+	uint32_t *spm_reg_offset_ptr;
+};
+
+static uint32_t msm_spm_reg_offsets_saw2_v2_1[MSM_SPM_REG_NR] = {
+	[MSM_SPM_REG_SAW_SECURE]		= 0x00,
+	[MSM_SPM_REG_SAW_ID]			= 0x04,
+	[MSM_SPM_REG_SAW_CFG]			= 0x08,
+	[MSM_SPM_REG_SAW_SPM_STS]		= 0x0C,
+	[MSM_SPM_REG_SAW_AVS_STS]		= 0x10,
+	[MSM_SPM_REG_SAW_PMIC_STS]		= 0x14,
+	[MSM_SPM_REG_SAW_RST]			= 0x18,
+	[MSM_SPM_REG_SAW_VCTL]			= 0x1C,
+	[MSM_SPM_REG_SAW_AVS_CTL]		= 0x20,
+	[MSM_SPM_REG_SAW_AVS_LIMIT]		= 0x24,
+	[MSM_SPM_REG_SAW_AVS_DLY]		= 0x28,
+	[MSM_SPM_REG_SAW_AVS_HYSTERESIS]	= 0x2C,
+	[MSM_SPM_REG_SAW_SPM_CTL]		= 0x30,
+	[MSM_SPM_REG_SAW_SPM_DLY]		= 0x34,
+	[MSM_SPM_REG_SAW_PMIC_DATA_0]		= 0x40,
+	[MSM_SPM_REG_SAW_PMIC_DATA_1]		= 0x44,
+	[MSM_SPM_REG_SAW_PMIC_DATA_2]		= 0x48,
+	[MSM_SPM_REG_SAW_PMIC_DATA_3]		= 0x4C,
+	[MSM_SPM_REG_SAW_PMIC_DATA_4]		= 0x50,
+	[MSM_SPM_REG_SAW_PMIC_DATA_5]		= 0x54,
+	[MSM_SPM_REG_SAW_PMIC_DATA_6]		= 0x58,
+	[MSM_SPM_REG_SAW_PMIC_DATA_7]		= 0x5C,
+	[MSM_SPM_REG_SAW_SEQ_ENTRY]		= 0x80,
+	[MSM_SPM_REG_SAW_VERSION]		= 0xFD0,
+};
+
+static uint32_t msm_spm_reg_offsets_saw2_v3_0[MSM_SPM_REG_NR] = {
+	[MSM_SPM_REG_SAW_SECURE]		= 0x00,
+	[MSM_SPM_REG_SAW_ID]			= 0x04,
+	[MSM_SPM_REG_SAW_CFG]			= 0x08,
+	[MSM_SPM_REG_SAW_SPM_STS]		= 0x0C,
+	[MSM_SPM_REG_SAW_AVS_STS]		= 0x10,
+	[MSM_SPM_REG_SAW_PMIC_STS]		= 0x14,
+	[MSM_SPM_REG_SAW_RST]			= 0x18,
+	[MSM_SPM_REG_SAW_VCTL]			= 0x1C,
+	[MSM_SPM_REG_SAW_AVS_CTL]		= 0x20,
+	[MSM_SPM_REG_SAW_AVS_LIMIT]		= 0x24,
+	[MSM_SPM_REG_SAW_AVS_DLY]		= 0x28,
+	[MSM_SPM_REG_SAW_AVS_HYSTERESIS]	= 0x2C,
+	[MSM_SPM_REG_SAW_SPM_CTL]		= 0x30,
+	[MSM_SPM_REG_SAW_SPM_DLY]		= 0x34,
+	[MSM_SPM_REG_SAW_STS2]			= 0x38,
+	[MSM_SPM_REG_SAW_PMIC_DATA_0]		= 0x40,
+	[MSM_SPM_REG_SAW_PMIC_DATA_1]		= 0x44,
+	[MSM_SPM_REG_SAW_PMIC_DATA_2]		= 0x48,
+	[MSM_SPM_REG_SAW_PMIC_DATA_3]		= 0x4C,
+	[MSM_SPM_REG_SAW_PMIC_DATA_4]		= 0x50,
+	[MSM_SPM_REG_SAW_PMIC_DATA_5]		= 0x54,
+	[MSM_SPM_REG_SAW_PMIC_DATA_6]		= 0x58,
+	[MSM_SPM_REG_SAW_PMIC_DATA_7]		= 0x5C,
+	[MSM_SPM_REG_SAW_SEQ_ENTRY]		= 0x400,
+	[MSM_SPM_REG_SAW_VERSION]		= 0xFD0,
+};
+
+static uint32_t msm_spm_reg_offsets_saw2_v4_1[MSM_SPM_REG_NR] = {
+	[MSM_SPM_REG_SAW_SECURE]		= 0xC00,
+	[MSM_SPM_REG_SAW_ID]			= 0xC04,
+	[MSM_SPM_REG_SAW_STS2]			= 0xC10,
+	[MSM_SPM_REG_SAW_SPM_STS]		= 0xC0C,
+	[MSM_SPM_REG_SAW_AVS_STS]		= 0xC14,
+	[MSM_SPM_REG_SAW_PMIC_STS]		= 0xC18,
+	[MSM_SPM_REG_SAW_RST]			= 0xC1C,
+	[MSM_SPM_REG_SAW_VCTL]			= 0x900,
+	[MSM_SPM_REG_SAW_AVS_CTL]		= 0x904,
+	[MSM_SPM_REG_SAW_AVS_LIMIT]		= 0x908,
+	[MSM_SPM_REG_SAW_AVS_DLY]		= 0x90C,
+	[MSM_SPM_REG_SAW_SPM_CTL]		= 0x0,
+	[MSM_SPM_REG_SAW_SPM_DLY]		= 0x4,
+	[MSM_SPM_REG_SAW_CFG]			= 0x0C,
+	[MSM_SPM_REG_SAW_PMIC_DATA_0]		= 0x40,
+	[MSM_SPM_REG_SAW_PMIC_DATA_1]		= 0x44,
+	[MSM_SPM_REG_SAW_PMIC_DATA_2]		= 0x48,
+	[MSM_SPM_REG_SAW_PMIC_DATA_3]		= 0x4C,
+	[MSM_SPM_REG_SAW_PMIC_DATA_4]		= 0x50,
+	[MSM_SPM_REG_SAW_PMIC_DATA_5]		= 0x54,
+	[MSM_SPM_REG_SAW_SEQ_ENTRY]		= 0x400,
+	[MSM_SPM_REG_SAW_VERSION]		= 0xFD0,
+};
+
+static struct saw2_data saw2_info[] = {
+	[0] = {
+		"SAW_v2.1",
+		0x2,
+		0x1,
+		msm_spm_reg_offsets_saw2_v2_1,
+	},
+	[1] = {
+		"SAW_v2.3",
+		0x3,
+		0x0,
+		msm_spm_reg_offsets_saw2_v3_0,
+	},
+	[2] = {
+		"SAW_v3.0",
+		0x1,
+		0x0,
+		msm_spm_reg_offsets_saw2_v3_0,
+	},
+	[3] = {
+		"SAW_v4.0",
+		0x4,
+		0x1,
+		msm_spm_reg_offsets_saw2_v4_1,
+	},
+};
+
+static uint32_t num_pmic_data;
+
+static void msm_spm_drv_flush_shadow(struct msm_spm_driver_data *dev,
+		unsigned int reg_index)
+{
+	if (!dev)
+		return;
+
+	__raw_writel(dev->reg_shadow[reg_index],
+		dev->reg_base_addr + dev->reg_offsets[reg_index]);
+}
+
+static void msm_spm_drv_load_shadow(struct msm_spm_driver_data *dev,
+		unsigned int reg_index)
+{
+	dev->reg_shadow[reg_index] =
+		__raw_readl(dev->reg_base_addr +
+				dev->reg_offsets[reg_index]);
+}
+
+static inline uint32_t msm_spm_drv_get_num_spm_entry(
+		struct msm_spm_driver_data *dev)
+{
+	if (!dev)
+		return -ENODEV;
+
+	msm_spm_drv_load_shadow(dev, MSM_SPM_REG_SAW_ID);
+	return (dev->reg_shadow[MSM_SPM_REG_SAW_ID] >> 24) & 0xFF;
+}
+
+static inline void msm_spm_drv_set_start_addr(
+		struct msm_spm_driver_data *dev, uint32_t ctl)
+{
+	dev->reg_shadow[MSM_SPM_REG_SAW_SPM_CTL] = ctl;
+}
+
+static inline bool msm_spm_pmic_arb_present(struct msm_spm_driver_data *dev)
+{
+	msm_spm_drv_load_shadow(dev, MSM_SPM_REG_SAW_ID);
+	return (dev->reg_shadow[MSM_SPM_REG_SAW_ID] >> 2) & 0x1;
+}
+
+static inline void msm_spm_drv_set_vctl2(struct msm_spm_driver_data *dev,
+				uint32_t vlevel, uint32_t vctl_port)
+{
+	unsigned int pmic_data = 0;
+
+	pmic_data |= vlevel;
+	pmic_data |= (vctl_port & 0x7) << 16;
+
+	dev->reg_shadow[MSM_SPM_REG_SAW_VCTL] &= ~0x700FF;
+	dev->reg_shadow[MSM_SPM_REG_SAW_VCTL] |= pmic_data;
+
+	dev->reg_shadow[MSM_SPM_REG_SAW_PMIC_DATA_3] &= ~0x700FF;
+	dev->reg_shadow[MSM_SPM_REG_SAW_PMIC_DATA_3] |= pmic_data;
+
+	msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW_VCTL);
+	msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW_PMIC_DATA_3);
+}
+
+static inline uint32_t msm_spm_drv_get_num_pmic_data(
+		struct msm_spm_driver_data *dev)
+{
+	msm_spm_drv_load_shadow(dev, MSM_SPM_REG_SAW_ID);
+	mb(); /* Ensure we flush */
+	return (dev->reg_shadow[MSM_SPM_REG_SAW_ID] >> 4) & 0x7;
+}
+
+static inline uint32_t msm_spm_drv_get_sts_pmic_state(
+		struct msm_spm_driver_data *dev)
+{
+	msm_spm_drv_load_shadow(dev, MSM_SPM_REG_SAW_PMIC_STS);
+	return (dev->reg_shadow[MSM_SPM_REG_SAW_PMIC_STS] >> 16) &
+				0x03;
+}
+
+uint32_t msm_spm_drv_get_sts_curr_pmic_data(
+		struct msm_spm_driver_data *dev)
+{
+	msm_spm_drv_load_shadow(dev, MSM_SPM_REG_SAW_PMIC_STS);
+	return dev->reg_shadow[MSM_SPM_REG_SAW_PMIC_STS] & 0x300FF;
+}
+
+static inline void msm_spm_drv_get_saw2_ver(struct msm_spm_driver_data *dev,
+		uint32_t *major, uint32_t *minor)
+{
+	uint32_t val = 0;
+
+	dev->reg_shadow[MSM_SPM_REG_SAW_VERSION] =
+			__raw_readl(dev->reg_base_addr + dev->ver_reg);
+
+	val = dev->reg_shadow[MSM_SPM_REG_SAW_VERSION];
+
+	*major = (val >> 28) & 0xF;
+	*minor = (val >> 16) & 0xFFF;
+}
+
+inline int msm_spm_drv_set_spm_enable(
+		struct msm_spm_driver_data *dev, bool enable)
+{
+	uint32_t value = enable ? 0x01 : 0x00;
+
+	if (!dev)
+		return -EINVAL;
+
+	if ((dev->reg_shadow[MSM_SPM_REG_SAW_SPM_CTL] & 0x01) ^ value) {
+
+		dev->reg_shadow[MSM_SPM_REG_SAW_SPM_CTL] &= ~0x1;
+		dev->reg_shadow[MSM_SPM_REG_SAW_SPM_CTL] |= value;
+
+		msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW_SPM_CTL);
+		wmb(); /* Ensure we flush */
+	}
+	return 0;
+}
+
+int msm_spm_drv_get_avs_enable(struct msm_spm_driver_data *dev)
+{
+	if (!dev)
+		return -EINVAL;
+
+	return dev->reg_shadow[MSM_SPM_REG_SAW_AVS_CTL] & 0x01;
+}
+
+int msm_spm_drv_set_avs_enable(struct msm_spm_driver_data *dev,
+		 bool enable)
+{
+	uint32_t value = enable ? 0x1 : 0x0;
+
+	if (!dev)
+		return -EINVAL;
+
+	if ((dev->reg_shadow[MSM_SPM_REG_SAW_AVS_CTL] & 0x1) ^ value) {
+		dev->reg_shadow[MSM_SPM_REG_SAW_AVS_CTL] &= ~0x1;
+		dev->reg_shadow[MSM_SPM_REG_SAW_AVS_CTL] |= value;
+
+		msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW_AVS_CTL);
+	}
+
+	return 0;
+}
+
+int msm_spm_drv_set_avs_limit(struct msm_spm_driver_data *dev,
+		uint32_t min_lvl, uint32_t max_lvl)
+{
+	uint32_t value = (max_lvl & 0xff) << 16 | (min_lvl & 0xff);
+
+	if (!dev)
+		return -EINVAL;
+
+	dev->reg_shadow[MSM_SPM_REG_SAW_AVS_LIMIT] = value;
+
+	msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW_AVS_LIMIT);
+
+	return 0;
+}
+
+static int msm_spm_drv_avs_irq_mask(enum msm_spm_avs_irq irq)
+{
+	switch (irq) {
+	case MSM_SPM_AVS_IRQ_MIN:
+		return BIT(1);
+	case MSM_SPM_AVS_IRQ_MAX:
+		return BIT(2);
+	default:
+		return -EINVAL;
+	}
+}
+
+int msm_spm_drv_set_avs_irq_enable(struct msm_spm_driver_data *dev,
+		enum msm_spm_avs_irq irq, bool enable)
+{
+	int mask = msm_spm_drv_avs_irq_mask(irq);
+	uint32_t value;
+
+	if (!dev)
+		return -EINVAL;
+	else if (mask < 0)
+		return mask;
+
+	value = enable ? mask : 0;
+
+	if ((dev->reg_shadow[MSM_SPM_REG_SAW_AVS_CTL] & mask) ^ value) {
+		dev->reg_shadow[MSM_SPM_REG_SAW_AVS_CTL] &= ~mask;
+		dev->reg_shadow[MSM_SPM_REG_SAW_AVS_CTL] |= value;
+		msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW_AVS_CTL);
+	}
+
+	return 0;
+}
+
+int msm_spm_drv_avs_clear_irq(struct msm_spm_driver_data *dev,
+		enum msm_spm_avs_irq irq)
+{
+	int mask = msm_spm_drv_avs_irq_mask(irq);
+
+	if (!dev)
+		return -EINVAL;
+	else if (mask < 0)
+		return mask;
+
+	if (dev->reg_shadow[MSM_SPM_REG_SAW_AVS_CTL] & mask) {
+		/*
+		 * The interrupt status is cleared by disabling and then
+		 * re-enabling the interrupt.
+		 */
+		dev->reg_shadow[MSM_SPM_REG_SAW_AVS_CTL] &= ~mask;
+		msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW_AVS_CTL);
+		dev->reg_shadow[MSM_SPM_REG_SAW_AVS_CTL] |= mask;
+		msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW_AVS_CTL);
+	}
+
+	return 0;
+}
+
+void msm_spm_drv_flush_seq_entry(struct msm_spm_driver_data *dev)
+{
+	int i;
+	int num_spm_entry = msm_spm_drv_get_num_spm_entry(dev);
+
+	if (!dev) {
+		__WARN();
+		return;
+	}
+
+	for (i = 0; i < num_spm_entry; i++) {
+		__raw_writel(dev->reg_seq_entry_shadow[i],
+			dev->reg_base_addr
+			+ dev->reg_offsets[MSM_SPM_REG_SAW_SEQ_ENTRY]
+			+ 4 * i);
+	}
+	mb(); /* Ensure we flush */
+}
+
+void dump_regs(struct msm_spm_driver_data *dev, int cpu)
+{
+	msm_spm_drv_load_shadow(dev, MSM_SPM_REG_SAW_SPM_STS);
+	mb(); /* Ensure we flush */
+	pr_err("CPU%d: spm register MSM_SPM_REG_SAW_SPM_STS: 0x%x\n", cpu,
+			dev->reg_shadow[MSM_SPM_REG_SAW_SPM_STS]);
+	msm_spm_drv_load_shadow(dev, MSM_SPM_REG_SAW_SPM_CTL);
+	mb(); /* Ensure we flush */
+	pr_err("CPU%d: spm register MSM_SPM_REG_SAW_SPM_CTL: 0x%x\n", cpu,
+			dev->reg_shadow[MSM_SPM_REG_SAW_SPM_CTL]);
+}
+
+int msm_spm_drv_write_seq_data(struct msm_spm_driver_data *dev,
+		uint8_t *cmd, uint32_t *offset)
+{
+	uint32_t cmd_w;
+	uint32_t offset_w = *offset / 4;
+	uint8_t last_cmd;
+
+	if (!cmd)
+		return -EINVAL;
+
+	while (1) {
+		int i;
+
+		cmd_w = 0;
+		last_cmd = 0;
+		cmd_w = dev->reg_seq_entry_shadow[offset_w];
+
+		for (i = (*offset % 4); i < 4; i++) {
+			last_cmd = *(cmd++);
+			cmd_w |=  last_cmd << (i * 8);
+			(*offset)++;
+			if (last_cmd == 0x0f)
+				break;
+		}
+
+		dev->reg_seq_entry_shadow[offset_w++] = cmd_w;
+		if (last_cmd == 0x0f)
+			break;
+	}
+
+	return 0;
+}
+
+int msm_spm_drv_set_low_power_mode(struct msm_spm_driver_data *dev,
+		uint32_t ctl)
+{
+
+	/* SPM is configured to reset start address to zero after end of Program
+	 */
+	if (!dev)
+		return -EINVAL;
+
+	msm_spm_drv_set_start_addr(dev, ctl);
+
+	msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW_SPM_CTL);
+	wmb(); /* Ensure we flush */
+
+	if (msm_spm_debug_mask & MSM_SPM_DEBUG_SHADOW) {
+		int i;
+
+		for (i = 0; i < MSM_SPM_REG_NR; i++)
+			pr_info("%s: reg %02x = 0x%08x\n", __func__,
+				dev->reg_offsets[i], dev->reg_shadow[i]);
+	}
+	msm_spm_drv_load_shadow(dev, MSM_SPM_REG_SAW_SPM_STS);
+
+	return 0;
+}
+
+uint32_t msm_spm_drv_get_vdd(struct msm_spm_driver_data *dev)
+{
+	msm_spm_drv_load_shadow(dev, MSM_SPM_REG_SAW_PMIC_STS);
+	return dev->reg_shadow[MSM_SPM_REG_SAW_PMIC_STS] & 0xFF;
+}
+
+#ifdef CONFIG_MSM_AVS_HW
+static bool msm_spm_drv_is_avs_enabled(struct msm_spm_driver_data *dev)
+{
+	msm_spm_drv_load_shadow(dev, MSM_SPM_REG_SAW_AVS_CTL);
+	return dev->reg_shadow[MSM_SPM_REG_SAW_AVS_CTL] & BIT(0);
+}
+
+static void msm_spm_drv_disable_avs(struct msm_spm_driver_data *dev)
+{
+	msm_spm_drv_load_shadow(dev, MSM_SPM_REG_SAW_AVS_CTL);
+	dev->reg_shadow[MSM_SPM_REG_SAW_AVS_CTL] &= ~BIT(27);
+	msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW_AVS_CTL);
+}
+
+static void msm_spm_drv_enable_avs(struct msm_spm_driver_data *dev)
+{
+	dev->reg_shadow[MSM_SPM_REG_SAW_AVS_CTL] |= BIT(27);
+	msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW_AVS_CTL);
+}
+
+static void msm_spm_drv_set_avs_vlevel(struct msm_spm_driver_data *dev,
+		unsigned int vlevel)
+{
+	vlevel &= 0x3f;
+	dev->reg_shadow[MSM_SPM_REG_SAW_AVS_CTL] &= ~0x7efc00;
+	dev->reg_shadow[MSM_SPM_REG_SAW_AVS_CTL] |= ((vlevel - 4) << 10);
+	dev->reg_shadow[MSM_SPM_REG_SAW_AVS_CTL] |= (vlevel << 17);
+	msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW_AVS_CTL);
+}
+
+#else
+static bool msm_spm_drv_is_avs_enabled(struct msm_spm_driver_data *dev)
+{
+	return false;
+}
+
+static void msm_spm_drv_disable_avs(struct msm_spm_driver_data *dev) { }
+
+static void msm_spm_drv_enable_avs(struct msm_spm_driver_data *dev) { }
+
+static void msm_spm_drv_set_avs_vlevel(struct msm_spm_driver_data *dev,
+		unsigned int vlevel)
+{
+}
+#endif
+
+static inline int msm_spm_drv_validate_data(struct msm_spm_driver_data *dev,
+					unsigned int vlevel, int vctl_port)
+{
+	int timeout_us = dev->vctl_timeout_us;
+	uint32_t new_level;
+
+	/* Confirm the voltage we set was what hardware sent and
+	 * FSM is idle.
+	 */
+	do {
+		udelay(1);
+		new_level = msm_spm_drv_get_sts_curr_pmic_data(dev);
+
+		/**
+		 * VCTL_PORT has to be 0, for vlevel to be updated.
+		 * If port is not 0, check for PMIC_STATE only.
+		 */
+
+		if (((new_level & 0x30000) == MSM_SPM_PMIC_STATE_IDLE) &&
+				(vctl_port || ((new_level & 0xFF) == vlevel)))
+			break;
+	} while (--timeout_us);
+
+	if (!timeout_us) {
+		pr_err("Wrong level %#x\n", new_level);
+		return -EIO;
+	}
+
+	if (msm_spm_debug_mask & MSM_SPM_DEBUG_VCTL)
+		pr_info("%s: done, remaining timeout %u us\n",
+			__func__, timeout_us);
+
+	return 0;
+}
+
+int msm_spm_drv_set_vdd(struct msm_spm_driver_data *dev, unsigned int vlevel)
+{
+	uint32_t vlevel_set = vlevel;
+	bool avs_enabled;
+	int ret = 0;
+
+	if (!dev)
+		return -EINVAL;
+
+	avs_enabled  = msm_spm_drv_is_avs_enabled(dev);
+
+	if (!msm_spm_pmic_arb_present(dev))
+		return -ENODEV;
+
+	if (msm_spm_debug_mask & MSM_SPM_DEBUG_VCTL)
+		pr_info("%s: requesting vlevel %#x\n", __func__, vlevel);
+
+	if (avs_enabled)
+		msm_spm_drv_disable_avs(dev);
+
+	if (dev->vctl_port_ub >= 0) {
+		/**
+		 * VCTL can send 8bit voltage level at once.
+		 * Send lower 8bit first, vlevel change happens
+		 * when upper 8bit is sent.
+		 */
+		vlevel = vlevel_set & 0xFF;
+	}
+
+	/* Kick the state machine back to idle */
+	dev->reg_shadow[MSM_SPM_REG_SAW_RST] = 1;
+	msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW_RST);
+
+	msm_spm_drv_set_vctl2(dev, vlevel, dev->vctl_port);
+
+	ret = msm_spm_drv_validate_data(dev, vlevel, dev->vctl_port);
+	if (ret)
+		goto set_vdd_bail;
+
+	if (dev->vctl_port_ub >= 0) {
+		/* Send upper 8bit of voltage level */
+		vlevel = (vlevel_set >> 8) & 0xFF;
+
+		/* Kick the state machine back to idle */
+		dev->reg_shadow[MSM_SPM_REG_SAW_RST] = 1;
+		msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW_RST);
+
+		/*
+		 * Steps for sending for vctl port other than '0'
+		 * Write VCTL register with pmic data and address index
+		 * Perform system barrier
+		 * Wait for 1us
+		 * Read PMIC_STS register to make sure operation is complete
+		 */
+		msm_spm_drv_set_vctl2(dev, vlevel, dev->vctl_port_ub);
+
+		mb(); /* To make sure data is sent before checking status */
+
+		ret = msm_spm_drv_validate_data(dev, vlevel, dev->vctl_port_ub);
+		if (ret)
+			goto set_vdd_bail;
+	}
+
+	/* Set AVS min/max */
+	if (avs_enabled) {
+		msm_spm_drv_set_avs_vlevel(dev, vlevel_set);
+		msm_spm_drv_enable_avs(dev);
+	}
+
+	return ret;
+
+set_vdd_bail:
+	if (avs_enabled)
+		msm_spm_drv_enable_avs(dev);
+
+	pr_err("%s: failed %#x vlevel setting in timeout %uus\n",
+			__func__, vlevel_set, dev->vctl_timeout_us);
+	return -EIO;
+}
+
+static int msm_spm_drv_get_pmic_port(struct msm_spm_driver_data *dev,
+		enum msm_spm_pmic_port port)
+{
+	int index = -1;
+
+	switch (port) {
+	case MSM_SPM_PMIC_VCTL_PORT:
+		index = dev->vctl_port;
+		break;
+	case MSM_SPM_PMIC_PHASE_PORT:
+		index = dev->phase_port;
+		break;
+	case MSM_SPM_PMIC_PFM_PORT:
+		index = dev->pfm_port;
+		break;
+	default:
+		break;
+	}
+
+	return index;
+}
+
+int msm_spm_drv_set_pmic_data(struct msm_spm_driver_data *dev,
+		enum msm_spm_pmic_port port, unsigned int data)
+{
+	unsigned int pmic_data = 0;
+	unsigned int timeout_us = 0;
+	int index = 0;
+
+	if (!msm_spm_pmic_arb_present(dev))
+		return -ENODEV;
+
+	index = msm_spm_drv_get_pmic_port(dev, port);
+	if (index < 0)
+		return -ENODEV;
+
+	pmic_data |= data & 0xFF;
+	pmic_data |= (index & 0x7) << 16;
+
+	dev->reg_shadow[MSM_SPM_REG_SAW_VCTL] &= ~0x700FF;
+	dev->reg_shadow[MSM_SPM_REG_SAW_VCTL] |= pmic_data;
+	msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW_VCTL);
+	mb(); /* Ensure we flush */
+
+	timeout_us = dev->vctl_timeout_us;
+	/**
+	 * Confirm the pmic data set was what hardware sent by
+	 * checking the PMIC FSM state.
+	 * We cannot use the sts_pmic_data and check it against
+	 * the value like we do fot set_vdd, since the PMIC_STS
+	 * is only updated for SAW_VCTL sent with port index 0.
+	 */
+	do {
+		if (msm_spm_drv_get_sts_pmic_state(dev) ==
+				MSM_SPM_PMIC_STATE_IDLE)
+			break;
+		udelay(1);
+	} while (--timeout_us);
+
+	if (!timeout_us) {
+		pr_err("%s: failed, remaining timeout %u us, data %d\n",
+				__func__, timeout_us, data);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+void msm_spm_drv_reinit(struct msm_spm_driver_data *dev, bool seq_write)
+{
+	int i;
+
+	if (seq_write)
+		msm_spm_drv_flush_seq_entry(dev);
+
+	for (i = 0; i < MSM_SPM_REG_SAW_PMIC_DATA_0 + num_pmic_data; i++)
+		msm_spm_drv_load_shadow(dev, i);
+
+	for (i = MSM_SPM_REG_NR_INITIALIZE + 1; i < MSM_SPM_REG_NR; i++)
+		msm_spm_drv_load_shadow(dev, i);
+}
+
+int msm_spm_drv_reg_init(struct msm_spm_driver_data *dev,
+		struct msm_spm_platform_data *data)
+{
+	int i;
+	bool found = false;
+
+	dev->ver_reg = data->ver_reg;
+	dev->reg_base_addr = data->reg_base_addr;
+	msm_spm_drv_get_saw2_ver(dev, &dev->major, &dev->minor);
+	for (i = 0; i < ARRAY_SIZE(saw2_info); i++)
+		if (dev->major == saw2_info[i].major &&
+			dev->minor == saw2_info[i].minor) {
+			pr_debug("%s: Version found\n",
+					saw2_info[i].ver_name);
+			dev->reg_offsets = saw2_info[i].spm_reg_offset_ptr;
+			found = true;
+			break;
+		}
+
+	if (!found) {
+		pr_err("%s: No SAW version found\n", __func__);
+		WARN_ON(!found);
+	}
+	return 0;
+}
+
+void msm_spm_drv_upd_reg_shadow(struct msm_spm_driver_data *dev, int id,
+		int val)
+{
+	dev->reg_shadow[id] = val;
+	msm_spm_drv_flush_shadow(dev, id);
+	/* Complete the above writes before other accesses */
+	mb();
+}
+
+int msm_spm_drv_init(struct msm_spm_driver_data *dev,
+		struct msm_spm_platform_data *data)
+{
+	int num_spm_entry;
+
+	if (!dev || !data)
+		return -ENODEV;
+
+	dev->vctl_port = data->vctl_port;
+	dev->vctl_port_ub = data->vctl_port_ub;
+	dev->phase_port = data->phase_port;
+	dev->pfm_port = data->pfm_port;
+	dev->reg_base_addr = data->reg_base_addr;
+	memcpy(dev->reg_shadow, data->reg_init_values,
+			sizeof(data->reg_init_values));
+
+	dev->vctl_timeout_us = data->vctl_timeout_us;
+
+
+	if (!num_pmic_data)
+		num_pmic_data = msm_spm_drv_get_num_pmic_data(dev);
+
+	num_spm_entry = msm_spm_drv_get_num_spm_entry(dev);
+
+	dev->reg_seq_entry_shadow =
+		kcalloc(num_spm_entry, sizeof(*dev->reg_seq_entry_shadow),
+				GFP_KERNEL);
+
+	if (!dev->reg_seq_entry_shadow)
+		return -ENOMEM;
+
+	return 0;
+}
diff --git a/drivers/soc/qcom/peripheral-loader.c b/drivers/soc/qcom/peripheral-loader.c
index 6fa278f..1df009f 100644
--- a/drivers/soc/qcom/peripheral-loader.c
+++ b/drivers/soc/qcom/peripheral-loader.c
@@ -1305,7 +1305,7 @@
 	 * Fallback to serial loading of blobs if the
 	 * workqueue creatation failed during module init.
 	 */
-	if (pil_wq) {
+	if (pil_wq && !(desc->sequential_loading)) {
 		ret = pil_load_segs(desc);
 		if (ret)
 			goto err_deinit_image;
diff --git a/drivers/soc/qcom/peripheral-loader.h b/drivers/soc/qcom/peripheral-loader.h
index c83b038..29fa4b6 100644
--- a/drivers/soc/qcom/peripheral-loader.h
+++ b/drivers/soc/qcom/peripheral-loader.h
@@ -63,6 +63,7 @@
 	bool shutdown_fail;
 	bool modem_ssr;
 	bool clear_fw_region;
+	bool sequential_loading;
 	u32 subsys_vmid;
 	bool signal_aop;
 	struct mbox_client cl;
diff --git a/drivers/soc/qcom/pil-msa.c b/drivers/soc/qcom/pil-msa.c
new file mode 100644
index 0000000..9cff905
--- /dev/null
+++ b/drivers/soc/qcom/pil-msa.c
@@ -0,0 +1,1051 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/firmware.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+#include <linux/dma-mapping.h>
+#include <linux/highmem.h>
+#include <soc/qcom/scm.h>
+#include <soc/qcom/secure_buffer.h>
+#include <trace/events/trace_msm_pil_event.h>
+
+#include "peripheral-loader.h"
+#include "pil-q6v5.h"
+#include "pil-msa.h"
+
+/* Q6 Register Offsets */
+#define QDSP6SS_RST_EVB			0x010
+#define QDSP6SS_DBG_CFG			0x018
+#define QDSP6SS_NMI_CFG			0x40
+
+/* AXI Halting Registers */
+#define MSS_Q6_HALT_BASE		0x180
+#define MSS_MODEM_HALT_BASE		0x200
+#define MSS_NC_HALT_BASE		0x280
+
+/* RMB Status Register Values */
+#define STATUS_PBL_SUCCESS		0x1
+#define STATUS_XPU_UNLOCKED		0x1
+#define STATUS_XPU_UNLOCKED_SCRIBBLED	0x2
+
+/* PBL/MBA interface registers */
+#define RMB_MBA_IMAGE			0x00
+#define RMB_PBL_STATUS			0x04
+#define RMB_MBA_COMMAND			0x08
+#define RMB_MBA_STATUS			0x0C
+#define RMB_PMI_META_DATA		0x10
+#define RMB_PMI_CODE_START		0x14
+#define RMB_PMI_CODE_LENGTH		0x18
+#define RMB_PROTOCOL_VERSION		0x1C
+#define RMB_MBA_DEBUG_INFORMATION	0x20
+
+#define POLL_INTERVAL_US		50
+
+#define CMD_META_DATA_READY		0x1
+#define CMD_LOAD_READY			0x2
+#define CMD_PILFAIL_NFY_MBA		0xffffdead
+
+#define STATUS_META_DATA_AUTH_SUCCESS	0x3
+#define STATUS_AUTH_COMPLETE		0x4
+#define STATUS_MBA_UNLOCKED		0x6
+
+/* External BHS */
+#define EXTERNAL_BHS_ON			BIT(0)
+#define EXTERNAL_BHS_STATUS		BIT(4)
+#define BHS_TIMEOUT_US			50
+
+#define MSS_RESTART_PARAM_ID		0x2
+#define MSS_RESTART_ID			0xA
+
+#define MSS_MAGIC			0XAABADEAD
+
+/* Timeout value for MBA boot when minidump is enabled */
+#define MBA_ENCRYPTION_TIMEOUT	3000
+enum scm_cmd {
+	PAS_MEM_SETUP_CMD = 2,
+};
+
+static int pbl_mba_boot_timeout_ms = 1000;
+module_param(pbl_mba_boot_timeout_ms, int, 0644);
+
+static int modem_auth_timeout_ms = 10000;
+module_param(modem_auth_timeout_ms, int, 0644);
+
+/* If set to 0xAABADEAD, MBA failures trigger a kernel panic */
+static uint modem_trigger_panic;
+module_param(modem_trigger_panic, uint, 0644);
+
+/* To set the modem debug cookie in DBG_CFG register for debugging */
+static uint modem_dbg_cfg;
+module_param(modem_dbg_cfg, uint, 0644);
+
+static void modem_log_rmb_regs(void __iomem *base)
+{
+	pr_err("RMB_MBA_IMAGE: %08x\n", readl_relaxed(base + RMB_MBA_IMAGE));
+	pr_err("RMB_PBL_STATUS: %08x\n", readl_relaxed(base + RMB_PBL_STATUS));
+	pr_err("RMB_MBA_COMMAND: %08x\n",
+				readl_relaxed(base + RMB_MBA_COMMAND));
+	pr_err("RMB_MBA_STATUS: %08x\n", readl_relaxed(base + RMB_MBA_STATUS));
+	pr_err("RMB_PMI_META_DATA: %08x\n",
+				readl_relaxed(base + RMB_PMI_META_DATA));
+	pr_err("RMB_PMI_CODE_START: %08x\n",
+				readl_relaxed(base + RMB_PMI_CODE_START));
+	pr_err("RMB_PMI_CODE_LENGTH: %08x\n",
+				readl_relaxed(base + RMB_PMI_CODE_LENGTH));
+	pr_err("RMB_PROTOCOL_VERSION: %08x\n",
+				readl_relaxed(base + RMB_PROTOCOL_VERSION));
+	pr_err("RMB_MBA_DEBUG_INFORMATION: %08x\n",
+			readl_relaxed(base + RMB_MBA_DEBUG_INFORMATION));
+
+	if (modem_trigger_panic == MSS_MAGIC)
+		panic("%s: System ramdump is needed!!!\n", __func__);
+}
+
+static int pil_mss_power_up(struct q6v5_data *drv)
+{
+	int ret = 0;
+	u32 regval;
+
+	if (drv->cxrail_bhs) {
+		regval = readl_relaxed(drv->cxrail_bhs);
+		regval |= EXTERNAL_BHS_ON;
+		writel_relaxed(regval, drv->cxrail_bhs);
+
+		ret = readl_poll_timeout(drv->cxrail_bhs, regval,
+			regval & EXTERNAL_BHS_STATUS, 1, BHS_TIMEOUT_US);
+	}
+
+	return ret;
+}
+
+static int pil_mss_power_down(struct q6v5_data *drv)
+{
+	u32 regval;
+
+	if (drv->cxrail_bhs) {
+		regval = readl_relaxed(drv->cxrail_bhs);
+		regval &= ~EXTERNAL_BHS_ON;
+		writel_relaxed(regval, drv->cxrail_bhs);
+	}
+
+	return 0;
+}
+
+static int pil_mss_enable_clks(struct q6v5_data *drv)
+{
+	int ret;
+
+	ret = clk_prepare_enable(drv->ahb_clk);
+	if (ret)
+		goto err_ahb_clk;
+	ret = clk_prepare_enable(drv->axi_clk);
+	if (ret)
+		goto err_axi_clk;
+	ret = clk_prepare_enable(drv->rom_clk);
+	if (ret)
+		goto err_rom_clk;
+	ret = clk_prepare_enable(drv->gpll0_mss_clk);
+	if (ret)
+		goto err_gpll0_mss_clk;
+	ret = clk_prepare_enable(drv->snoc_axi_clk);
+	if (ret)
+		goto err_snoc_axi_clk;
+	ret = clk_prepare_enable(drv->mnoc_axi_clk);
+	if (ret)
+		goto err_mnoc_axi_clk;
+	return 0;
+err_mnoc_axi_clk:
+	clk_disable_unprepare(drv->mnoc_axi_clk);
+err_snoc_axi_clk:
+	clk_disable_unprepare(drv->snoc_axi_clk);
+err_gpll0_mss_clk:
+	clk_disable_unprepare(drv->gpll0_mss_clk);
+err_rom_clk:
+	clk_disable_unprepare(drv->rom_clk);
+err_axi_clk:
+	clk_disable_unprepare(drv->axi_clk);
+err_ahb_clk:
+	clk_disable_unprepare(drv->ahb_clk);
+	return ret;
+}
+
+static void pil_mss_disable_clks(struct q6v5_data *drv)
+{
+	clk_disable_unprepare(drv->mnoc_axi_clk);
+	clk_disable_unprepare(drv->snoc_axi_clk);
+	clk_disable_unprepare(drv->gpll0_mss_clk);
+	clk_disable_unprepare(drv->rom_clk);
+	clk_disable_unprepare(drv->axi_clk);
+	if (!drv->ahb_clk_vote)
+		clk_disable_unprepare(drv->ahb_clk);
+}
+
+static void pil_mss_pdc_sync(struct q6v5_data *drv, bool pdc_sync)
+{
+	u32 val = 0;
+	u32 mss_pdc_mask = BIT(drv->mss_pdc_offset);
+
+	if (drv->pdc_sync) {
+		val = readl_relaxed(drv->pdc_sync);
+		if (pdc_sync)
+			val |= mss_pdc_mask;
+		else
+			val &= ~mss_pdc_mask;
+		writel_relaxed(val, drv->pdc_sync);
+		/* Ensure PDC is written before next write */
+		wmb();
+		udelay(2);
+	}
+}
+
+static void pil_mss_alt_reset(struct q6v5_data *drv, u32 val)
+{
+	if (drv->alt_reset) {
+		writel_relaxed(val, drv->alt_reset);
+		/* Ensure alt reset is written before restart reg */
+		wmb();
+		udelay(2);
+	}
+}
+
+static int pil_mss_restart_reg(struct q6v5_data *drv, u32 mss_restart)
+{
+	int ret = 0;
+	int scm_ret = 0;
+	struct scm_desc desc = {0};
+
+	desc.args[0] = mss_restart;
+	desc.args[1] = 0;
+	desc.arginfo = SCM_ARGS(2);
+
+	if (drv->restart_reg && !drv->restart_reg_sec) {
+		writel_relaxed(mss_restart, drv->restart_reg);
+		/* Ensure physical address access is done before returning.*/
+		mb();
+		udelay(2);
+	} else if (drv->restart_reg_sec) {
+		ret = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL,
+					MSS_RESTART_ID), &desc);
+		scm_ret = desc.ret[0];
+		if (ret || scm_ret)
+			pr_err("Secure MSS restart failed\n");
+	}
+
+	return ret;
+}
+
+int pil_mss_assert_resets(struct q6v5_data *drv)
+{
+	int ret = 0;
+
+	pil_mss_pdc_sync(drv, 1);
+	pil_mss_alt_reset(drv, 1);
+	if (drv->reset_clk) {
+		pil_mss_disable_clks(drv);
+		if (drv->ahb_clk_vote)
+			clk_disable_unprepare(drv->ahb_clk);
+	}
+
+	ret = pil_mss_restart_reg(drv, true);
+
+	return ret;
+}
+
+int pil_mss_deassert_resets(struct q6v5_data *drv)
+{
+	int ret = 0;
+
+	ret = pil_mss_restart_reg(drv, 0);
+	if (ret)
+		return ret;
+	/* Wait 6 32kHz sleep cycles for reset */
+	udelay(200);
+
+	if (drv->reset_clk)
+		pil_mss_enable_clks(drv);
+	pil_mss_alt_reset(drv, 0);
+	pil_mss_pdc_sync(drv, false);
+
+	return ret;
+}
+
+static int pil_msa_wait_for_mba_ready(struct q6v5_data *drv)
+{
+	struct device *dev = drv->desc.dev;
+	int ret;
+	u32 status;
+	u64 val;
+
+	if (of_property_read_bool(dev->of_node, "qcom,minidump-id"))
+		pbl_mba_boot_timeout_ms = MBA_ENCRYPTION_TIMEOUT;
+
+	val = is_timeout_disabled() ? 0 : pbl_mba_boot_timeout_ms * 1000;
+
+	/* Wait for PBL completion. */
+	ret = readl_poll_timeout(drv->rmb_base + RMB_PBL_STATUS, status,
+				 status != 0, POLL_INTERVAL_US, val);
+	if (ret) {
+		dev_err(dev, "PBL boot timed out (rc:%d)\n", ret);
+		return ret;
+	}
+	if (status != STATUS_PBL_SUCCESS) {
+		dev_err(dev, "PBL returned unexpected status %d\n", status);
+		return -EINVAL;
+	}
+
+	/* Wait for MBA completion. */
+	ret = readl_poll_timeout(drv->rmb_base + RMB_MBA_STATUS, status,
+				status != 0, POLL_INTERVAL_US, val);
+	if (ret) {
+		dev_err(dev, "MBA boot timed out (rc:%d)\n", ret);
+		return ret;
+	}
+	if (status != STATUS_XPU_UNLOCKED &&
+	    status != STATUS_XPU_UNLOCKED_SCRIBBLED) {
+		dev_err(dev, "MBA returned unexpected status %d\n", status);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int pil_mss_shutdown(struct pil_desc *pil)
+{
+	struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+	int ret = 0;
+
+	if (drv->axi_halt_base) {
+		pil_q6v5_halt_axi_port(pil,
+			drv->axi_halt_base + MSS_Q6_HALT_BASE);
+		pil_q6v5_halt_axi_port(pil,
+			drv->axi_halt_base + MSS_MODEM_HALT_BASE);
+		pil_q6v5_halt_axi_port(pil,
+			drv->axi_halt_base + MSS_NC_HALT_BASE);
+	}
+
+	if (drv->axi_halt_q6)
+		pil_q6v5_halt_axi_port(pil, drv->axi_halt_q6);
+	if (drv->axi_halt_mss)
+		pil_q6v5_halt_axi_port(pil, drv->axi_halt_mss);
+	if (drv->axi_halt_nc)
+		pil_q6v5_halt_axi_port(pil, drv->axi_halt_nc);
+
+	/*
+	 * Software workaround to avoid high MX current during LPASS/MSS
+	 * restart.
+	 */
+	if (drv->mx_spike_wa && drv->ahb_clk_vote) {
+		ret = clk_prepare_enable(drv->ahb_clk);
+		if (!ret)
+			assert_clamps(pil);
+		else
+			dev_err(pil->dev, "error turning ON AHB clock(rc:%d)\n",
+									ret);
+	}
+
+	pil_mss_pdc_sync(drv, true);
+	/* Wait 6 32kHz sleep cycles for PDC SYNC true */
+	udelay(200);
+	pil_mss_restart_reg(drv, 1);
+	/* Wait 6 32kHz sleep cycles for reset */
+	udelay(200);
+	ret =  pil_mss_restart_reg(drv, 0);
+	/* Wait 6 32kHz sleep cycles for reset false */
+	udelay(200);
+	pil_mss_pdc_sync(drv, false);
+
+	if (drv->is_booted) {
+		pil_mss_disable_clks(drv);
+		pil_mss_power_down(drv);
+		drv->is_booted = false;
+	}
+
+	return ret;
+}
+
+int __pil_mss_deinit_image(struct pil_desc *pil, bool err_path)
+{
+	struct modem_data *drv = dev_get_drvdata(pil->dev);
+	struct q6v5_data *q6_drv = container_of(pil, struct q6v5_data, desc);
+	int ret = 0;
+	struct device *dma_dev = drv->mba_mem_dev_fixed ?: &drv->mba_mem_dev;
+	s32 status;
+	u64 val = is_timeout_disabled() ? 0 : pbl_mba_boot_timeout_ms * 1000;
+
+	if (err_path) {
+		writel_relaxed(CMD_PILFAIL_NFY_MBA,
+				drv->rmb_base + RMB_MBA_COMMAND);
+		ret = readl_poll_timeout(drv->rmb_base + RMB_MBA_STATUS, status,
+				status == STATUS_MBA_UNLOCKED || status < 0,
+				POLL_INTERVAL_US, val);
+		if (ret)
+			dev_err(pil->dev, "MBA region unlock timed out(rc:%d)\n",
+									ret);
+		else if (status < 0)
+			dev_err(pil->dev, "MBA unlock returned err status: %d\n",
+						status);
+	}
+
+	ret = pil_mss_shutdown(pil);
+
+	if (q6_drv->ahb_clk_vote)
+		clk_disable_unprepare(q6_drv->ahb_clk);
+
+	/* In case of any failure where reclaiming MBA and DP memory
+	 * could not happen, free the memory here
+	 */
+	if (drv->q6->mba_dp_virt && !drv->mba_mem_dev_fixed) {
+		if (pil->subsys_vmid > 0)
+			pil_assign_mem_to_linux(pil, drv->q6->mba_dp_phys,
+						drv->q6->mba_dp_size);
+		dma_free_attrs(dma_dev, drv->q6->mba_dp_size,
+				drv->q6->mba_dp_virt, drv->q6->mba_dp_phys,
+				drv->attrs_dma);
+		drv->q6->mba_dp_virt = NULL;
+	}
+
+	return ret;
+}
+
+int pil_mss_deinit_image(struct pil_desc *pil)
+{
+	return __pil_mss_deinit_image(pil, true);
+}
+
+int pil_mss_make_proxy_votes(struct pil_desc *pil)
+{
+	int ret;
+	struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+	int uv = 0;
+
+	ret = of_property_read_u32(pil->dev->of_node, "vdd_mx-uV", &uv);
+	if (ret) {
+		dev_err(pil->dev, "missing vdd_mx-uV property(rc:%d)\n", ret);
+		return ret;
+	}
+
+	ret = regulator_set_voltage(drv->vreg_mx, uv, INT_MAX);
+	if (ret) {
+		dev_err(pil->dev, "Failed to request vreg_mx voltage(rc:%d)\n",
+									ret);
+		return ret;
+	}
+
+	ret = regulator_enable(drv->vreg_mx);
+	if (ret) {
+		dev_err(pil->dev, "Failed to enable vreg_mx(rc:%d)\n", ret);
+		regulator_set_voltage(drv->vreg_mx, 0, INT_MAX);
+		return ret;
+	}
+
+	if (drv->vreg) {
+		ret = of_property_read_u32(pil->dev->of_node, "vdd_mss-uV",
+								&uv);
+		if (ret) {
+			dev_err(pil->dev,
+				"missing vdd_mss-uV property(rc:%d)\n", ret);
+			goto out;
+		}
+
+		ret = regulator_set_voltage(drv->vreg, uv,
+						INT_MAX);
+		if (ret) {
+			dev_err(pil->dev, "Failed to set vreg voltage(rc:%d)\n",
+									ret);
+			goto out;
+		}
+
+		ret = regulator_set_load(drv->vreg, 100000);
+		if (ret < 0) {
+			dev_err(pil->dev, "Failed to set vreg mode(rc:%d)\n",
+									ret);
+			goto out;
+		}
+		ret = regulator_enable(drv->vreg);
+		if (ret) {
+			dev_err(pil->dev, "Failed to enable vreg(rc:%d)\n",
+				ret);
+			regulator_set_voltage(drv->vreg, 0, INT_MAX);
+			goto out;
+		}
+	}
+
+	ret = pil_q6v5_make_proxy_votes(pil);
+	if (ret && drv->vreg) {
+		regulator_disable(drv->vreg);
+		regulator_set_voltage(drv->vreg, 0, INT_MAX);
+	}
+out:
+	if (ret) {
+		regulator_disable(drv->vreg_mx);
+		regulator_set_voltage(drv->vreg_mx, 0, INT_MAX);
+	}
+
+	return ret;
+}
+
+void pil_mss_remove_proxy_votes(struct pil_desc *pil)
+{
+	struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+
+	pil_q6v5_remove_proxy_votes(pil);
+	regulator_disable(drv->vreg_mx);
+	regulator_set_voltage(drv->vreg_mx, 0, INT_MAX);
+	if (drv->vreg) {
+		regulator_disable(drv->vreg);
+		regulator_set_voltage(drv->vreg, 0, INT_MAX);
+	}
+}
+
+static int pil_mss_mem_setup(struct pil_desc *pil,
+					phys_addr_t addr, size_t size)
+{
+	struct modem_data *md = dev_get_drvdata(pil->dev);
+
+	struct pas_init_image_req {
+		u32	proc;
+		u32	start_addr;
+		u32	len;
+	} request;
+	u32 scm_ret = 0;
+	int ret;
+	struct scm_desc desc = {0};
+
+	if (!md->subsys_desc.pil_mss_memsetup)
+		return 0;
+
+	request.proc = md->pas_id;
+	request.start_addr = addr;
+	request.len = size;
+
+	desc.args[0] = md->pas_id;
+	desc.args[1] = addr;
+	desc.args[2] = size;
+	desc.arginfo = SCM_ARGS(3);
+	ret = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL, PAS_MEM_SETUP_CMD),
+			&desc);
+	scm_ret = desc.ret[0];
+
+	if (ret)
+		return ret;
+	return scm_ret;
+}
+
+static int pil_mss_reset(struct pil_desc *pil)
+{
+	struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+	phys_addr_t start_addr = pil_get_entry_addr(pil);
+	u32 debug_val = 0;
+	int ret;
+
+	trace_pil_func(__func__);
+	if (drv->mba_dp_phys)
+		start_addr = drv->mba_dp_phys;
+
+	/*
+	 * Bring subsystem out of reset and enable required
+	 * regulators and clocks.
+	 */
+	ret = pil_mss_power_up(drv);
+	if (ret)
+		goto err_power;
+
+	ret = pil_mss_enable_clks(drv);
+	if (ret)
+		goto err_clks;
+
+	if (!pil->minidump_ss || !pil->modem_ssr) {
+		/* Save state of modem debug register before full reset */
+		debug_val = readl_relaxed(drv->reg_base + QDSP6SS_DBG_CFG);
+	}
+
+	/* Assert reset to subsystem */
+	pil_mss_assert_resets(drv);
+	/* Wait 6 32kHz sleep cycles for reset */
+	udelay(200);
+	ret = pil_mss_deassert_resets(drv);
+	if (ret)
+		goto err_restart;
+
+	if (!pil->minidump_ss || !pil->modem_ssr) {
+		writel_relaxed(debug_val, drv->reg_base + QDSP6SS_DBG_CFG);
+		if (modem_dbg_cfg)
+			writel_relaxed(modem_dbg_cfg,
+				drv->reg_base + QDSP6SS_DBG_CFG);
+	}
+
+	/* Program Image Address */
+	if (drv->self_auth) {
+		writel_relaxed(start_addr, drv->rmb_base + RMB_MBA_IMAGE);
+		/*
+		 * Ensure write to RMB base occurs before reset
+		 * is released.
+		 */
+		mb();
+	} else {
+		writel_relaxed((start_addr >> 4) & 0x0FFFFFF0,
+				drv->reg_base + QDSP6SS_RST_EVB);
+	}
+
+	/* Program DP Address */
+	if (drv->dp_size) {
+		writel_relaxed(start_addr + SZ_1M, drv->rmb_base +
+			       RMB_PMI_CODE_START);
+		writel_relaxed(drv->dp_size, drv->rmb_base +
+			       RMB_PMI_CODE_LENGTH);
+	} else {
+		writel_relaxed(0, drv->rmb_base + RMB_PMI_CODE_START);
+		writel_relaxed(0, drv->rmb_base + RMB_PMI_CODE_LENGTH);
+	}
+	/* Make sure RMB regs are written before bringing modem out of reset */
+	mb();
+
+	ret = pil_q6v5_reset(pil);
+	if (ret)
+		goto err_q6v5_reset;
+
+	/* Wait for MBA to start. Check for PBL and MBA errors while waiting. */
+	if (drv->self_auth) {
+		ret = pil_msa_wait_for_mba_ready(drv);
+		if (ret)
+			goto err_q6v5_reset;
+	}
+
+	dev_info(pil->dev, "MBA boot done\n");
+	drv->is_booted = true;
+
+	return 0;
+
+err_q6v5_reset:
+	modem_log_rmb_regs(drv->rmb_base);
+err_restart:
+	pil_mss_disable_clks(drv);
+	if (drv->ahb_clk_vote)
+		clk_disable_unprepare(drv->ahb_clk);
+err_clks:
+	pil_mss_power_down(drv);
+err_power:
+	return ret;
+}
+
+int pil_mss_reset_load_mba(struct pil_desc *pil)
+{
+	struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+	struct modem_data *md = dev_get_drvdata(pil->dev);
+	const struct firmware *fw = NULL, *dp_fw = NULL;
+	char fw_name_legacy[10] = "mba.b00";
+	char fw_name[10] = "mba.mbn";
+	char *dp_name = "msadp";
+	char *fw_name_p;
+	void *mba_dp_virt;
+	dma_addr_t mba_dp_phys, mba_dp_phys_end;
+	int ret;
+	const u8 *data;
+	struct device *dma_dev = md->mba_mem_dev_fixed ?: &md->mba_mem_dev;
+
+	trace_pil_func(__func__);
+	if (drv->mba_dp_virt && md->mba_mem_dev_fixed)
+		goto mss_reset;
+	fw_name_p = drv->non_elf_image ? fw_name_legacy : fw_name;
+	ret = request_firmware(&fw, fw_name_p, pil->dev);
+	if (ret) {
+		dev_err(pil->dev, "Failed to locate %s (rc:%d)\n",
+						fw_name_p, ret);
+		return ret;
+	}
+
+	data = fw ? fw->data : NULL;
+	if (!data) {
+		dev_err(pil->dev, "MBA data is NULL\n");
+		ret = -ENOMEM;
+		goto err_invalid_fw;
+	}
+
+	drv->mba_dp_size = SZ_1M;
+
+	arch_setup_dma_ops(dma_dev, 0, 0, NULL, 0);
+
+	dma_dev->coherent_dma_mask = DMA_BIT_MASK(sizeof(dma_addr_t) * 8);
+
+	md->attrs_dma = 0;
+	md->attrs_dma |= DMA_ATTR_SKIP_ZEROING;
+	md->attrs_dma |= DMA_ATTR_STRONGLY_ORDERED;
+
+	ret = request_firmware(&dp_fw, dp_name, pil->dev);
+	if (ret) {
+		dev_warn(pil->dev, "Debug policy not present - %s. Continue.\n",
+						dp_name);
+	} else {
+		if (!dp_fw || !dp_fw->data) {
+			dev_err(pil->dev, "Invalid DP firmware\n");
+			ret = -ENOMEM;
+			goto err_invalid_fw;
+		}
+		drv->dp_size = dp_fw->size;
+		drv->mba_dp_size += drv->dp_size;
+		drv->mba_dp_size = ALIGN(drv->mba_dp_size, SZ_4K);
+	}
+
+	mba_dp_virt = dma_alloc_attrs(dma_dev, drv->mba_dp_size, &mba_dp_phys,
+				   GFP_KERNEL, md->attrs_dma);
+	if (!mba_dp_virt) {
+		dev_err(pil->dev, "%s MBA/DP buffer allocation %zx bytes failed\n",
+				 __func__, drv->mba_dp_size);
+		ret = -ENOMEM;
+		goto err_invalid_fw;
+	}
+
+	/* Make sure there are no mappings in PKMAP and fixmap */
+	kmap_flush_unused();
+	kmap_atomic_flush_unused();
+
+	drv->mba_dp_phys = mba_dp_phys;
+	drv->mba_dp_virt = mba_dp_virt;
+	mba_dp_phys_end = mba_dp_phys + drv->mba_dp_size;
+
+	dev_info(pil->dev, "Loading MBA and DP (if present) from %pa to %pa\n",
+					&mba_dp_phys, &mba_dp_phys_end);
+
+	/* Load the MBA image into memory */
+	if (fw->size <= SZ_1M) {
+		/* Ensures memcpy is done for max 1MB fw size */
+		memcpy(mba_dp_virt, data, fw->size);
+	} else {
+		dev_err(pil->dev, "%s fw image loading into memory is failed due to fw size overflow\n",
+			__func__);
+		ret = -EINVAL;
+		goto err_mba_data;
+	}
+	/* Ensure memcpy of the MBA memory is done before loading the DP */
+	wmb();
+
+	/* Load the DP image into memory */
+	if (drv->mba_dp_size > SZ_1M) {
+		memcpy(mba_dp_virt + SZ_1M, dp_fw->data, dp_fw->size);
+		/* Ensure memcpy is done before powering up modem */
+		wmb();
+	}
+
+	if (pil->subsys_vmid > 0) {
+		ret = pil_assign_mem_to_subsys(pil, drv->mba_dp_phys,
+							drv->mba_dp_size);
+		if (ret) {
+			pr_err("scm_call to unprotect MBA and DP mem failed(rc:%d)\n",
+									ret);
+			goto err_mba_data;
+		}
+	}
+	if (dp_fw)
+		release_firmware(dp_fw);
+	release_firmware(fw);
+	dp_fw = NULL;
+	fw = NULL;
+
+mss_reset:
+	ret = pil_mss_reset(pil);
+	if (ret) {
+		dev_err(pil->dev, "MBA boot failed(rc:%d)\n", ret);
+		goto err_mss_reset;
+	}
+
+	return 0;
+
+err_mss_reset:
+	if (pil->subsys_vmid > 0)
+		pil_assign_mem_to_linux(pil, drv->mba_dp_phys,
+							drv->mba_dp_size);
+err_mba_data:
+	dma_free_attrs(dma_dev, drv->mba_dp_size, drv->mba_dp_virt,
+				drv->mba_dp_phys, md->attrs_dma);
+err_invalid_fw:
+	if (dp_fw)
+		release_firmware(dp_fw);
+	if (fw)
+		release_firmware(fw);
+	drv->mba_dp_virt = NULL;
+	return ret;
+}
+
+int pil_mss_debug_reset(struct pil_desc *pil)
+{
+	struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+	u32 encryption_status;
+	int ret;
+
+
+	if (!pil->minidump_ss)
+		return 0;
+
+	encryption_status = pil->minidump_ss->encryption_status;
+
+	if ((pil->minidump_ss->md_ss_enable_status != MD_SS_ENABLED) ||
+		encryption_status == MD_SS_ENCR_NOTREQ)
+		return 0;
+
+	/*
+	 * Bring subsystem out of reset and enable required
+	 * regulators and clocks.
+	 */
+	ret = pil_mss_enable_clks(drv);
+	if (ret)
+		return ret;
+
+	if (pil->minidump_ss) {
+		writel_relaxed(0x1, drv->reg_base + QDSP6SS_NMI_CFG);
+		/* Let write complete before proceeding */
+		mb();
+		udelay(2);
+	}
+	/* Assert reset to subsystem */
+	pil_mss_restart_reg(drv, true);
+	/* Wait 6 32kHz sleep cycles for reset */
+	udelay(200);
+	ret =  pil_mss_restart_reg(drv, false);
+	if (ret)
+		goto err_restart;
+	/* Let write complete before proceeding */
+	mb();
+	udelay(200);
+	ret = pil_q6v5_reset(pil);
+	/*
+	 * Need to Wait for timeout for debug reset sequence to
+	 * complete before returning
+	 */
+	pr_info("Minidump: waiting encryption to complete\n");
+	msleep(13000);
+	if (pil->minidump_ss) {
+		writel_relaxed(0x2, drv->reg_base + QDSP6SS_NMI_CFG);
+		/* Let write complete before proceeding */
+		mb();
+		udelay(200);
+	}
+	if (ret)
+		goto err_restart;
+	return 0;
+err_restart:
+	pil_mss_disable_clks(drv);
+	if (drv->ahb_clk_vote)
+		clk_disable_unprepare(drv->ahb_clk);
+	return ret;
+}
+
+static int pil_msa_auth_modem_mdt(struct pil_desc *pil, const u8 *metadata,
+				  size_t size)
+{
+	struct modem_data *drv = dev_get_drvdata(pil->dev);
+	void *mdata_virt;
+	dma_addr_t mdata_phys;
+	s32 status;
+	int ret;
+	u64 val = is_timeout_disabled() ? 0 : modem_auth_timeout_ms * 1000;
+	struct device *dma_dev = drv->mba_mem_dev_fixed ?: &drv->mba_mem_dev;
+	unsigned long attrs = 0;
+
+	trace_pil_func(__func__);
+	dma_dev->coherent_dma_mask = DMA_BIT_MASK(sizeof(dma_addr_t) * 8);
+	attrs |= DMA_ATTR_SKIP_ZEROING;
+	attrs |= DMA_ATTR_STRONGLY_ORDERED;
+	/* Make metadata physically contiguous and 4K aligned. */
+	mdata_virt = dma_alloc_attrs(dma_dev, size, &mdata_phys,
+					GFP_KERNEL, attrs);
+	if (!mdata_virt) {
+		dev_err(pil->dev, "MBA metadata buffer allocation failed\n");
+		ret = -ENOMEM;
+		goto fail;
+	}
+	memcpy(mdata_virt, metadata, size);
+	/* wmb() ensures copy completes prior to starting authentication. */
+	wmb();
+
+	if (pil->subsys_vmid > 0) {
+		ret = pil_assign_mem_to_subsys(pil, mdata_phys,
+							ALIGN(size, SZ_4K));
+		if (ret) {
+			pr_err("scm_call to unprotect modem metadata mem failed(rc:%d)\n",
+									ret);
+			dma_free_attrs(dma_dev, size, mdata_virt, mdata_phys,
+									attrs);
+			goto fail;
+		}
+	}
+
+	/* Initialize length counter to 0 */
+	writel_relaxed(0, drv->rmb_base + RMB_PMI_CODE_LENGTH);
+
+	/* Pass address of meta-data to the MBA and perform authentication */
+	writel_relaxed(mdata_phys, drv->rmb_base + RMB_PMI_META_DATA);
+	writel_relaxed(CMD_META_DATA_READY, drv->rmb_base + RMB_MBA_COMMAND);
+	ret = readl_poll_timeout(drv->rmb_base + RMB_MBA_STATUS, status,
+			status == STATUS_META_DATA_AUTH_SUCCESS || status < 0,
+			POLL_INTERVAL_US, val);
+	if (ret) {
+		dev_err(pil->dev, "MBA authentication of headers timed out(rc:%d)\n",
+								ret);
+	} else if (status < 0) {
+		dev_err(pil->dev, "MBA returned error %d for headers\n",
+				status);
+		ret = -EINVAL;
+	}
+
+	if (pil->subsys_vmid > 0)
+		pil_assign_mem_to_linux(pil, mdata_phys, ALIGN(size, SZ_4K));
+
+	dma_free_attrs(dma_dev, size, mdata_virt, mdata_phys, attrs);
+
+	if (!ret)
+		return ret;
+
+fail:
+	modem_log_rmb_regs(drv->rmb_base);
+	if (drv->q6) {
+		pil_mss_shutdown(pil);
+		if (pil->subsys_vmid > 0)
+			pil_assign_mem_to_linux(pil, drv->q6->mba_dp_phys,
+						drv->q6->mba_dp_size);
+		if (drv->q6->mba_dp_virt && !drv->mba_mem_dev_fixed) {
+			dma_free_attrs(dma_dev, drv->q6->mba_dp_size,
+				drv->q6->mba_dp_virt, drv->q6->mba_dp_phys,
+				drv->attrs_dma);
+			drv->q6->mba_dp_virt = NULL;
+		}
+
+	}
+	return ret;
+}
+
+static int pil_msa_mss_reset_mba_load_auth_mdt(struct pil_desc *pil,
+				const u8 *metadata, size_t size)
+{
+	int ret;
+
+	ret = pil_mss_reset_load_mba(pil);
+	if (ret)
+		return ret;
+
+	return pil_msa_auth_modem_mdt(pil, metadata, size);
+}
+
+static int pil_msa_mba_verify_blob(struct pil_desc *pil, phys_addr_t phy_addr,
+				   size_t size)
+{
+	struct modem_data *drv = dev_get_drvdata(pil->dev);
+	s32 status;
+	u32 img_length = readl_relaxed(drv->rmb_base + RMB_PMI_CODE_LENGTH);
+
+	/* Begin image authentication */
+	if (img_length == 0) {
+		writel_relaxed(phy_addr, drv->rmb_base + RMB_PMI_CODE_START);
+		writel_relaxed(CMD_LOAD_READY, drv->rmb_base + RMB_MBA_COMMAND);
+	}
+	/* Increment length counter */
+	img_length += size;
+	writel_relaxed(img_length, drv->rmb_base + RMB_PMI_CODE_LENGTH);
+
+	status = readl_relaxed(drv->rmb_base + RMB_MBA_STATUS);
+	if (status < 0) {
+		dev_err(pil->dev, "MBA returned error %d\n", status);
+		modem_log_rmb_regs(drv->rmb_base);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int pil_msa_mba_auth(struct pil_desc *pil)
+{
+	struct modem_data *drv = dev_get_drvdata(pil->dev);
+	struct q6v5_data *q6_drv = container_of(pil, struct q6v5_data, desc);
+	int ret;
+	struct device *dma_dev = drv->mba_mem_dev_fixed ?: &drv->mba_mem_dev;
+	s32 status;
+	u64 val = is_timeout_disabled() ? 0 : modem_auth_timeout_ms * 1000;
+
+	/* Wait for all segments to be authenticated or an error to occur */
+	ret = readl_poll_timeout(drv->rmb_base + RMB_MBA_STATUS, status,
+		status == STATUS_AUTH_COMPLETE || status < 0, 50, val);
+	if (ret) {
+		dev_err(pil->dev, "MBA authentication of image timed out(rc:%d)\n",
+									ret);
+	} else if (status < 0) {
+		dev_err(pil->dev, "MBA returned error %d for image\n", status);
+		ret = -EINVAL;
+	}
+
+	if (drv->q6) {
+		if (drv->q6->mba_dp_virt && !drv->mba_mem_dev_fixed) {
+			/* Reclaim MBA and DP (if allocated) memory. */
+			if (pil->subsys_vmid > 0)
+				pil_assign_mem_to_linux(pil,
+					drv->q6->mba_dp_phys,
+					drv->q6->mba_dp_size);
+			dma_free_attrs(dma_dev, drv->q6->mba_dp_size,
+				drv->q6->mba_dp_virt, drv->q6->mba_dp_phys,
+				drv->attrs_dma);
+
+			drv->q6->mba_dp_virt = NULL;
+		}
+	}
+	if (ret)
+		modem_log_rmb_regs(drv->rmb_base);
+	if (q6_drv->ahb_clk_vote)
+		clk_disable_unprepare(q6_drv->ahb_clk);
+
+	return ret;
+}
+
+/*
+ * To be used only if self-auth is disabled, or if the
+ * MBA image is loaded as segments and not in init_image.
+ */
+struct pil_reset_ops pil_msa_mss_ops = {
+	.proxy_vote = pil_mss_make_proxy_votes,
+	.proxy_unvote = pil_mss_remove_proxy_votes,
+	.auth_and_reset = pil_mss_reset,
+	.shutdown = pil_mss_shutdown,
+};
+
+/*
+ * To be used if self-auth is enabled and the MBA is to be loaded
+ * in init_image and the modem headers are also to be authenticated
+ * in init_image. Modem segments authenticated in auth_and_reset.
+ */
+struct pil_reset_ops pil_msa_mss_ops_selfauth = {
+	.init_image = pil_msa_mss_reset_mba_load_auth_mdt,
+	.proxy_vote = pil_mss_make_proxy_votes,
+	.proxy_unvote = pil_mss_remove_proxy_votes,
+	.mem_setup = pil_mss_mem_setup,
+	.verify_blob = pil_msa_mba_verify_blob,
+	.auth_and_reset = pil_msa_mba_auth,
+	.deinit_image = pil_mss_deinit_image,
+	.shutdown = pil_mss_shutdown,
+};
+
+/*
+ * To be used if the modem headers are to be authenticated
+ * in init_image, and the modem segments in auth_and_reset.
+ */
+struct pil_reset_ops pil_msa_femto_mba_ops = {
+	.init_image = pil_msa_auth_modem_mdt,
+	.verify_blob = pil_msa_mba_verify_blob,
+	.auth_and_reset = pil_msa_mba_auth,
+};
diff --git a/drivers/soc/qcom/pil-msa.h b/drivers/soc/qcom/pil-msa.h
new file mode 100644
index 0000000..0310234
--- /dev/null
+++ b/drivers/soc/qcom/pil-msa.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_PIL_MSA_H
+#define __MSM_PIL_MSA_H
+
+#include <soc/qcom/subsystem_restart.h>
+
+#include "peripheral-loader.h"
+
+struct modem_data {
+	struct q6v5_data *q6;
+	struct subsys_device *subsys;
+	struct subsys_desc subsys_desc;
+	void *ramdump_dev;
+	void *minidump_dev;
+	bool crash_shutdown;
+	u32 pas_id;
+	bool ignore_errors;
+	struct completion stop_ack;
+	void __iomem *rmb_base;
+	struct clk *xo;
+	struct pil_desc desc;
+	struct device mba_mem_dev;
+	struct device *mba_mem_dev_fixed;
+	unsigned long attrs_dma;
+};
+
+extern struct pil_reset_ops pil_msa_mss_ops;
+extern struct pil_reset_ops pil_msa_mss_ops_selfauth;
+extern struct pil_reset_ops pil_msa_femto_mba_ops;
+
+int pil_mss_reset_load_mba(struct pil_desc *pil);
+int pil_mss_make_proxy_votes(struct pil_desc *pil);
+void pil_mss_remove_proxy_votes(struct pil_desc *pil);
+int pil_mss_shutdown(struct pil_desc *pil);
+int pil_mss_deinit_image(struct pil_desc *pil);
+int __pil_mss_deinit_image(struct pil_desc *pil, bool err_path);
+int pil_mss_assert_resets(struct q6v5_data *drv);
+int pil_mss_deassert_resets(struct q6v5_data *drv);
+int pil_mss_debug_reset(struct pil_desc *pil);
+#endif
diff --git a/drivers/soc/qcom/pil-q6v5-mss.c b/drivers/soc/qcom/pil-q6v5-mss.c
new file mode 100644
index 0000000..db48b1a
--- /dev/null
+++ b/drivers/soc/qcom/pil-q6v5-mss.c
@@ -0,0 +1,554 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <soc/qcom/subsystem_restart.h>
+#include <soc/qcom/ramdump.h>
+#include <linux/soc/qcom/smem.h>
+#include <linux/soc/qcom/smem_state.h>
+
+#include "peripheral-loader.h"
+#include "pil-q6v5.h"
+#include "pil-msa.h"
+
+#define PROXY_TIMEOUT_MS	10000
+#define MAX_SSR_REASON_LEN	256U
+#define STOP_ACK_TIMEOUT_MS	1000
+
+#define subsys_to_drv(d) container_of(d, struct modem_data, subsys_desc)
+
+static void log_modem_sfr(struct modem_data *drv)
+{
+	size_t size;
+	char *smem_reason, reason[MAX_SSR_REASON_LEN];
+
+	if (drv->q6->smem_id == -1)
+		return;
+
+	smem_reason = qcom_smem_get(QCOM_SMEM_HOST_ANY, drv->q6->smem_id,
+								&size);
+	if (IS_ERR(smem_reason) || !size) {
+		pr_err("modem SFR: (unknown, qcom_smem_get failed).\n");
+		return;
+	}
+	if (!smem_reason[0]) {
+		pr_err("modem SFR: (unknown, empty string found).\n");
+		return;
+	}
+
+	strlcpy(reason, smem_reason, min(size, (size_t)MAX_SSR_REASON_LEN));
+	pr_err("modem subsystem failure reason: %s.\n", reason);
+}
+
+static void restart_modem(struct modem_data *drv)
+{
+	log_modem_sfr(drv);
+	drv->ignore_errors = true;
+	subsystem_restart_dev(drv->subsys);
+}
+
+static irqreturn_t modem_err_fatal_intr_handler(int irq, void *dev_id)
+{
+	struct modem_data *drv = subsys_to_drv(dev_id);
+
+	/* Ignore if we're the one that set the force stop BIT */
+	if (drv->crash_shutdown)
+		return IRQ_HANDLED;
+
+	pr_err("Fatal error on the modem.\n");
+	subsys_set_crash_status(drv->subsys, CRASH_STATUS_ERR_FATAL);
+	restart_modem(drv);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t modem_stop_ack_intr_handler(int irq, void *dev_id)
+{
+	struct modem_data *drv = subsys_to_drv(dev_id);
+
+	pr_info("Received stop ack interrupt from modem\n");
+	complete(&drv->stop_ack);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t modem_shutdown_ack_intr_handler(int irq, void *dev_id)
+{
+	struct modem_data *drv = subsys_to_drv(dev_id);
+
+	pr_info("Received stop shutdown interrupt from modem\n");
+	complete_shutdown_ack(drv->subsys);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t modem_ramdump_disable_intr_handler(int irq, void *dev_id)
+{
+	struct modem_data *drv = subsys_to_drv(dev_id);
+
+	pr_info("Received ramdump disable interrupt from modem\n");
+	drv->subsys_desc.ramdump_disable = 1;
+	return IRQ_HANDLED;
+}
+
+static int modem_shutdown(const struct subsys_desc *subsys, bool force_stop)
+{
+	struct modem_data *drv = subsys_to_drv(subsys);
+	unsigned long ret;
+
+	if (subsys->is_not_loadable)
+		return 0;
+
+	if (!subsys_get_crash_status(drv->subsys) && force_stop &&
+	    subsys->force_stop_bit) {
+		qcom_smem_state_update_bits(subsys->state,
+				BIT(subsys->force_stop_bit), 1);
+		ret = wait_for_completion_timeout(&drv->stop_ack,
+				msecs_to_jiffies(STOP_ACK_TIMEOUT_MS));
+		if (!ret)
+			pr_warn("Timed out on stop ack from modem.\n");
+		qcom_smem_state_update_bits(subsys->state,
+				BIT(subsys->force_stop_bit), 0);
+	}
+
+	if (drv->subsys_desc.ramdump_disable_irq) {
+		pr_warn("Ramdump disable value is %d\n",
+			drv->subsys_desc.ramdump_disable);
+	}
+
+	pil_shutdown(&drv->q6->desc);
+
+	return 0;
+}
+
+static int modem_powerup(const struct subsys_desc *subsys)
+{
+	struct modem_data *drv = subsys_to_drv(subsys);
+
+	if (subsys->is_not_loadable)
+		return 0;
+	/*
+	 * At this time, the modem is shutdown. Therefore this function cannot
+	 * run concurrently with the watchdog bite error handler, making it safe
+	 * to unset the flag below.
+	 */
+	reinit_completion(&drv->stop_ack);
+	drv->subsys_desc.ramdump_disable = 0;
+	drv->ignore_errors = false;
+	drv->q6->desc.fw_name = subsys->fw_name;
+	return pil_boot(&drv->q6->desc);
+}
+
+static void modem_crash_shutdown(const struct subsys_desc *subsys)
+{
+	struct modem_data *drv = subsys_to_drv(subsys);
+
+	drv->crash_shutdown = true;
+	if (!subsys_get_crash_status(drv->subsys) &&
+		subsys->force_stop_bit) {
+		qcom_smem_state_update_bits(subsys->state,
+				BIT(subsys->force_stop_bit), 1);
+		msleep(STOP_ACK_TIMEOUT_MS);
+	}
+}
+
+static int modem_ramdump(int enable, const struct subsys_desc *subsys)
+{
+	struct modem_data *drv = subsys_to_drv(subsys);
+	int ret;
+
+	if (!enable)
+		return 0;
+
+	ret = pil_mss_make_proxy_votes(&drv->q6->desc);
+	if (ret)
+		return ret;
+
+	ret = pil_mss_debug_reset(&drv->q6->desc);
+	if (ret)
+		return ret;
+
+	pil_mss_remove_proxy_votes(&drv->q6->desc);
+	ret = pil_mss_make_proxy_votes(&drv->q6->desc);
+	if (ret)
+		return ret;
+
+	ret = pil_mss_reset_load_mba(&drv->q6->desc);
+	if (ret)
+		return ret;
+
+	ret = pil_do_ramdump(&drv->q6->desc,
+			drv->ramdump_dev, drv->minidump_dev);
+	if (ret < 0)
+		pr_err("Unable to dump modem fw memory (rc = %d).\n", ret);
+
+	ret = __pil_mss_deinit_image(&drv->q6->desc, false);
+	if (ret < 0)
+		pr_err("Unable to free up resources (rc = %d).\n", ret);
+
+	pil_mss_remove_proxy_votes(&drv->q6->desc);
+	return ret;
+}
+
+static irqreturn_t modem_wdog_bite_intr_handler(int irq, void *dev_id)
+{
+	struct modem_data *drv = subsys_to_drv(dev_id);
+
+	if (drv->ignore_errors)
+		return IRQ_HANDLED;
+
+	pr_err("Watchdog bite received from modem software!\n");
+	if (drv->subsys_desc.system_debug)
+		panic("%s: System ramdump requested. Triggering device restart!\n",
+							__func__);
+	subsys_set_crash_status(drv->subsys, CRASH_STATUS_WDOG_BITE);
+	restart_modem(drv);
+	return IRQ_HANDLED;
+}
+
+static int pil_subsys_init(struct modem_data *drv,
+					struct platform_device *pdev)
+{
+	int ret = -EINVAL;
+
+	drv->subsys_desc.name = "modem";
+	drv->subsys_desc.dev = &pdev->dev;
+	drv->subsys_desc.owner = THIS_MODULE;
+	drv->subsys_desc.shutdown = modem_shutdown;
+	drv->subsys_desc.powerup = modem_powerup;
+	drv->subsys_desc.ramdump = modem_ramdump;
+	drv->subsys_desc.crash_shutdown = modem_crash_shutdown;
+	drv->subsys_desc.err_fatal_handler = modem_err_fatal_intr_handler;
+	drv->subsys_desc.stop_ack_handler = modem_stop_ack_intr_handler;
+	drv->subsys_desc.wdog_bite_handler = modem_wdog_bite_intr_handler;
+	drv->subsys_desc.ramdump_disable_handler =
+					modem_ramdump_disable_intr_handler;
+	drv->subsys_desc.shutdown_ack_handler = modem_shutdown_ack_intr_handler;
+
+	if (IS_ERR_OR_NULL(drv->q6)) {
+		ret = PTR_ERR(drv->q6);
+		dev_err(&pdev->dev, "Pil q6 data is err %pK %d!!!\n",
+			drv->q6, ret);
+		goto err_subsys;
+	}
+
+	drv->q6->desc.modem_ssr = false;
+	drv->q6->desc.signal_aop = of_property_read_bool(pdev->dev.of_node,
+						"qcom,signal-aop");
+	if (drv->q6->desc.signal_aop) {
+		drv->q6->desc.cl.dev = &pdev->dev;
+		drv->q6->desc.cl.tx_block = true;
+		drv->q6->desc.cl.tx_tout = 1000;
+		drv->q6->desc.cl.knows_txdone = false;
+		drv->q6->desc.mbox = mbox_request_channel(&drv->q6->desc.cl, 0);
+		if (IS_ERR(drv->q6->desc.mbox)) {
+			ret = PTR_ERR(drv->q6->desc.mbox);
+			dev_err(&pdev->dev, "Failed to get mailbox channel %pK %d\n",
+				drv->q6->desc.mbox, ret);
+			goto err_subsys;
+		}
+	}
+
+	drv->subsys = subsys_register(&drv->subsys_desc);
+	if (IS_ERR(drv->subsys)) {
+		ret = PTR_ERR(drv->subsys);
+		goto err_subsys;
+	}
+
+	drv->ramdump_dev = create_ramdump_device("modem", &pdev->dev);
+	if (!drv->ramdump_dev) {
+		pr_err("%s: Unable to create a modem ramdump device.\n",
+			__func__);
+		ret = -ENOMEM;
+		goto err_ramdump;
+	}
+	drv->minidump_dev = create_ramdump_device("md_modem", &pdev->dev);
+	if (!drv->minidump_dev) {
+		pr_err("%s: Unable to create a modem minidump device.\n",
+			__func__);
+		ret = -ENOMEM;
+		goto err_minidump;
+	}
+
+	return 0;
+
+err_minidump:
+	destroy_ramdump_device(drv->ramdump_dev);
+err_ramdump:
+	subsys_unregister(drv->subsys);
+err_subsys:
+	return ret;
+}
+
+static int pil_mss_loadable_init(struct modem_data *drv,
+					struct platform_device *pdev)
+{
+	struct q6v5_data *q6;
+	struct pil_desc *q6_desc;
+	struct resource *res;
+	struct property *prop;
+	int ret;
+
+	q6 = pil_q6v5_init(pdev);
+	if (IS_ERR_OR_NULL(q6))
+		return PTR_ERR(q6);
+	drv->q6 = q6;
+	drv->xo = q6->xo;
+
+	q6_desc = &q6->desc;
+	q6_desc->owner = THIS_MODULE;
+	q6_desc->proxy_timeout = PROXY_TIMEOUT_MS;
+
+	q6_desc->ops = &pil_msa_mss_ops;
+
+	q6_desc->sequential_loading = of_property_read_bool(pdev->dev.of_node,
+						"qcom,sequential-fw-load");
+	q6->reset_clk = of_property_read_bool(pdev->dev.of_node,
+							"qcom,reset-clk");
+	q6->self_auth = of_property_read_bool(pdev->dev.of_node,
+							"qcom,pil-self-auth");
+	if (q6->self_auth) {
+		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						    "rmb_base");
+		q6->rmb_base = devm_ioremap_resource(&pdev->dev, res);
+		if (IS_ERR(q6->rmb_base))
+			return PTR_ERR(q6->rmb_base);
+		drv->rmb_base = q6->rmb_base;
+		q6_desc->ops = &pil_msa_mss_ops_selfauth;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "restart_reg");
+	if (!res) {
+		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+							"restart_reg_sec");
+		if (!res) {
+			dev_err(&pdev->dev, "No restart register defined\n");
+			return -ENOMEM;
+		}
+		q6->restart_reg_sec = true;
+	}
+
+	q6->restart_reg = devm_ioremap(&pdev->dev,
+						res->start, resource_size(res));
+	if (!q6->restart_reg)
+		return -ENOMEM;
+
+	q6->pdc_sync = NULL;
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pdc_sync");
+	if (res) {
+		q6->pdc_sync = devm_ioremap(&pdev->dev,
+						res->start, resource_size(res));
+		if (of_property_read_u32(pdev->dev.of_node,
+			"qcom,mss_pdc_offset", &q6->mss_pdc_offset)) {
+			dev_err(&pdev->dev,
+				"Offset for MSS PDC not specified\n");
+			return -EINVAL;
+		}
+
+	}
+
+	q6->alt_reset = NULL;
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "alt_reset");
+	if (res) {
+		q6->alt_reset = devm_ioremap(&pdev->dev,
+						res->start, resource_size(res));
+	}
+
+	q6->vreg = NULL;
+
+	prop = of_find_property(pdev->dev.of_node, "vdd_mss-supply", NULL);
+	if (prop) {
+		q6->vreg = devm_regulator_get(&pdev->dev, "vdd_mss");
+		if (IS_ERR(q6->vreg))
+			return PTR_ERR(q6->vreg);
+	}
+
+	q6->vreg_mx = devm_regulator_get(&pdev->dev, "vdd_mx");
+	if (IS_ERR(q6->vreg_mx))
+		return PTR_ERR(q6->vreg_mx);
+	prop = of_find_property(pdev->dev.of_node, "vdd_mx-uV", NULL);
+	if (!prop) {
+		dev_err(&pdev->dev, "Missing vdd_mx-uV property\n");
+		return -EINVAL;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+		"cxrail_bhs_reg");
+	if (res)
+		q6->cxrail_bhs = devm_ioremap(&pdev->dev, res->start,
+					  resource_size(res));
+
+	q6->ahb_clk = devm_clk_get(&pdev->dev, "iface_clk");
+	if (IS_ERR(q6->ahb_clk))
+		return PTR_ERR(q6->ahb_clk);
+
+	q6->axi_clk = devm_clk_get(&pdev->dev, "bus_clk");
+	if (IS_ERR(q6->axi_clk))
+		return PTR_ERR(q6->axi_clk);
+
+	q6->rom_clk = devm_clk_get(&pdev->dev, "mem_clk");
+	if (IS_ERR(q6->rom_clk))
+		return PTR_ERR(q6->rom_clk);
+
+	ret = of_property_read_u32(pdev->dev.of_node,
+					"qcom,pas-id", &drv->pas_id);
+	if (ret)
+		dev_info(&pdev->dev, "No pas_id found.\n");
+
+	drv->subsys_desc.pil_mss_memsetup =
+	of_property_read_bool(pdev->dev.of_node, "qcom,pil-mss-memsetup");
+
+	/* Optional. */
+	if (of_property_match_string(pdev->dev.of_node,
+			"qcom,active-clock-names", "gpll0_mss_clk") >= 0)
+		q6->gpll0_mss_clk = devm_clk_get(&pdev->dev, "gpll0_mss_clk");
+
+	if (of_property_match_string(pdev->dev.of_node,
+			"qcom,active-clock-names", "snoc_axi_clk") >= 0)
+		q6->snoc_axi_clk = devm_clk_get(&pdev->dev, "snoc_axi_clk");
+
+	if (of_property_match_string(pdev->dev.of_node,
+			"qcom,active-clock-names", "mnoc_axi_clk") >= 0)
+		q6->mnoc_axi_clk = devm_clk_get(&pdev->dev, "mnoc_axi_clk");
+
+	/* Defaulting smem_id to be not present */
+	q6->smem_id = -1;
+
+	if (of_find_property(pdev->dev.of_node, "qcom,smem-id", NULL)) {
+		ret = of_property_read_u32(pdev->dev.of_node, "qcom,smem-id",
+					   &q6->smem_id);
+		if (ret) {
+			dev_err(&pdev->dev, "Failed to get the smem_id(ret:%d)\n",
+				ret);
+			return ret;
+		}
+	}
+
+	ret = pil_desc_init(q6_desc);
+
+	return ret;
+}
+
+static int pil_mss_driver_probe(struct platform_device *pdev)
+{
+	struct modem_data *drv;
+	int ret, is_not_loadable;
+
+	drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
+	if (!drv)
+		return -ENOMEM;
+	platform_set_drvdata(pdev, drv);
+
+	is_not_loadable = of_property_read_bool(pdev->dev.of_node,
+							"qcom,is-not-loadable");
+	if (is_not_loadable) {
+		drv->subsys_desc.is_not_loadable = 1;
+	} else {
+		ret = pil_mss_loadable_init(drv, pdev);
+		if (ret)
+			return ret;
+	}
+	init_completion(&drv->stop_ack);
+
+	/* Probe the MBA mem device if present */
+	ret = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+	if (ret)
+		return ret;
+
+	return pil_subsys_init(drv, pdev);
+}
+
+static int pil_mss_driver_exit(struct platform_device *pdev)
+{
+	struct modem_data *drv = platform_get_drvdata(pdev);
+
+	subsys_unregister(drv->subsys);
+	destroy_ramdump_device(drv->ramdump_dev);
+	destroy_ramdump_device(drv->minidump_dev);
+	pil_desc_release(&drv->q6->desc);
+	return 0;
+}
+
+static int pil_mba_mem_driver_probe(struct platform_device *pdev)
+{
+	struct modem_data *drv;
+
+	if (!pdev->dev.parent) {
+		pr_err("No parent found.\n");
+		return -EINVAL;
+	}
+	drv = dev_get_drvdata(pdev->dev.parent);
+	drv->mba_mem_dev_fixed = &pdev->dev;
+	return 0;
+}
+
+static const struct of_device_id mba_mem_match_table[] = {
+	{ .compatible = "qcom,pil-mba-mem" },
+	{}
+};
+
+static struct platform_driver pil_mba_mem_driver = {
+	.probe = pil_mba_mem_driver_probe,
+	.driver = {
+		.name = "pil-mba-mem",
+		.of_match_table = mba_mem_match_table,
+	},
+};
+
+static const struct of_device_id mss_match_table[] = {
+	{ .compatible = "qcom,pil-q6v5-mss" },
+	{ .compatible = "qcom,pil-q6v55-mss" },
+	{ .compatible = "qcom,pil-q6v56-mss" },
+	{}
+};
+
+static struct platform_driver pil_mss_driver = {
+	.probe = pil_mss_driver_probe,
+	.remove = pil_mss_driver_exit,
+	.driver = {
+		.name = "pil-q6v5-mss",
+		.of_match_table = mss_match_table,
+	},
+};
+
+static int __init pil_mss_init(void)
+{
+	int ret;
+
+	ret = platform_driver_register(&pil_mba_mem_driver);
+	if (!ret)
+		ret = platform_driver_register(&pil_mss_driver);
+	return ret;
+}
+module_init(pil_mss_init);
+
+static void __exit pil_mss_exit(void)
+{
+	platform_driver_unregister(&pil_mss_driver);
+}
+module_exit(pil_mss_exit);
+
+MODULE_DESCRIPTION("Support for booting modem subsystems with QDSP6v5 Hexagon processors");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/pil-q6v5.c b/drivers/soc/qcom/pil-q6v5.c
new file mode 100644
index 0000000..208e327
--- /dev/null
+++ b/drivers/soc/qcom/pil-q6v5.c
@@ -0,0 +1,848 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
+#include <trace/events/trace_msm_pil_event.h>
+
+#include "peripheral-loader.h"
+#include "pil-msa.h"
+#include "pil-q6v5.h"
+
+/* QDSP6SS Register Offsets */
+#define QDSP6SS_RESET			0x014
+#define QDSP6SS_GFMUX_CTL		0x020
+#define QDSP6SS_PWR_CTL			0x030
+#define QDSP6V6SS_MEM_PWR_CTL		0x034
+#define QDSP6SS_BHS_STATUS		0x078
+#define QDSP6SS_MEM_PWR_CTL		0x0B0
+#define QDSP6SS_STRAP_ACC		0x110
+#define QDSP6V62SS_BHS_STATUS		0x0C4
+
+/* AXI Halt Register Offsets */
+#define AXI_HALTREQ			0x0
+#define AXI_HALTACK			0x4
+#define AXI_IDLE			0x8
+
+#define HALT_ACK_TIMEOUT_US		100000
+
+/* QDSP6SS_RESET */
+#define Q6SS_STOP_CORE			BIT(0)
+#define Q6SS_CORE_ARES			BIT(1)
+#define Q6SS_BUS_ARES_ENA		BIT(2)
+
+/* QDSP6SS_GFMUX_CTL */
+#define Q6SS_CLK_ENA			BIT(1)
+#define Q6SS_CLK_SRC_SEL_C		BIT(3)
+#define Q6SS_CLK_SRC_SEL_FIELD		0xC
+#define Q6SS_CLK_SRC_SWITCH_CLK_OVR	BIT(8)
+
+/* QDSP6SS_PWR_CTL */
+#define Q6SS_L2DATA_SLP_NRET_N_0	BIT(0)
+#define Q6SS_L2DATA_SLP_NRET_N_1	BIT(1)
+#define Q6SS_L2DATA_SLP_NRET_N_2	BIT(2)
+#define Q6SS_L2TAG_SLP_NRET_N		BIT(16)
+#define Q6SS_ETB_SLP_NRET_N		BIT(17)
+#define Q6SS_L2DATA_STBY_N		BIT(18)
+#define Q6SS_SLP_RET_N			BIT(19)
+#define Q6SS_CLAMP_IO			BIT(20)
+#define QDSS_BHS_ON			BIT(21)
+#define QDSS_LDO_BYP			BIT(22)
+
+/* QDSP6v55 parameters */
+#define QDSP6v55_LDO_ON                 BIT(26)
+#define QDSP6v55_LDO_BYP                BIT(25)
+#define QDSP6v55_BHS_ON                 BIT(24)
+#define QDSP6v55_CLAMP_WL               BIT(21)
+#define QDSP6v55_CLAMP_QMC_MEM          BIT(22)
+#define L1IU_SLP_NRET_N                 BIT(15)
+#define L1DU_SLP_NRET_N                 BIT(14)
+#define L2PLRU_SLP_NRET_N               BIT(13)
+#define QDSP6v55_BHS_EN_REST_ACK        BIT(0)
+
+#define HALT_CHECK_MAX_LOOPS            (200)
+#define BHS_CHECK_MAX_LOOPS             (200)
+#define QDSP6SS_XO_CBCR                 (0x0038)
+
+/* QDSP6v65 parameters */
+#define QDSP6SS_BOOT_CORE_START		(0x400)
+#define QDSP6SS_BOOT_CMD		(0x404)
+#define MSS_STATUS			(0x40)
+#define QDSP6SS_SLEEP			(0x3C)
+#define SLEEP_CHECK_MAX_LOOPS		(200)
+#define BOOT_FSM_TIMEOUT		(10000)
+
+#define QDSP6SS_ACC_OVERRIDE_VAL	0x20
+
+int pil_q6v5_make_proxy_votes(struct pil_desc *pil)
+{
+	int ret;
+	struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+	int uv;
+
+	ret = of_property_read_u32(pil->dev->of_node, "vdd_cx-voltage", &uv);
+	if (ret) {
+		dev_err(pil->dev, "missing vdd_cx-voltage property(rc:%d)\n",
+								ret);
+		return ret;
+	}
+
+	ret = clk_prepare_enable(drv->xo);
+	if (ret) {
+		dev_err(pil->dev, "Failed to vote for XO(rc:%d)\n", ret);
+		goto out;
+	}
+
+	ret = clk_prepare_enable(drv->pnoc_clk);
+	if (ret) {
+		dev_err(pil->dev, "Failed to vote for pnoc(rc:%d)\n", ret);
+		goto err_pnoc_vote;
+	}
+
+	ret = clk_prepare_enable(drv->qdss_clk);
+	if (ret) {
+		dev_err(pil->dev, "Failed to vote for qdss(rc:%d)\n", ret);
+		goto err_qdss_vote;
+	}
+
+	ret = clk_prepare_enable(drv->prng_clk);
+	if (ret) {
+		dev_err(pil->dev, "Failed to vote for prng(rc:%d)\n", ret);
+		goto err_prng_vote;
+	}
+
+	ret = clk_prepare_enable(drv->axis2_clk);
+	if (ret) {
+		dev_err(pil->dev, "Failed to vote for axis2(rc:%d)\n", ret);
+		goto err_axis2_vote;
+	}
+
+	ret = regulator_set_voltage(drv->vreg_cx, uv, INT_MAX);
+	if (ret) {
+		dev_err(pil->dev, "Failed to request vdd_cx voltage(rc:%d)\n",
+								ret);
+		goto err_cx_voltage;
+	}
+
+	ret = regulator_set_load(drv->vreg_cx, 100000);
+	if (ret < 0) {
+		dev_err(pil->dev, "Failed to set vdd_cx mode(rc:%d)\n", ret);
+		goto err_cx_mode;
+	}
+
+	ret = regulator_enable(drv->vreg_cx);
+	if (ret) {
+		dev_err(pil->dev, "Failed to vote for vdd_cx(rc:%d)\n", ret);
+		goto err_cx_enable;
+	}
+
+	if (drv->vreg_pll) {
+		ret = regulator_enable(drv->vreg_pll);
+		if (ret) {
+			dev_err(pil->dev, "Failed to vote for vdd_pll(rc:%d)\n",
+									ret);
+			goto err_vreg_pll;
+		}
+	}
+
+	return 0;
+
+err_vreg_pll:
+	regulator_disable(drv->vreg_cx);
+err_cx_enable:
+	regulator_set_load(drv->vreg_cx, 0);
+err_cx_mode:
+	regulator_set_voltage(drv->vreg_cx, 0, INT_MAX);
+err_cx_voltage:
+	clk_disable_unprepare(drv->axis2_clk);
+err_axis2_vote:
+	clk_disable_unprepare(drv->prng_clk);
+err_prng_vote:
+	clk_disable_unprepare(drv->qdss_clk);
+err_qdss_vote:
+	clk_disable_unprepare(drv->pnoc_clk);
+err_pnoc_vote:
+	clk_disable_unprepare(drv->xo);
+out:
+	return ret;
+}
+EXPORT_SYMBOL(pil_q6v5_make_proxy_votes);
+
+void pil_q6v5_remove_proxy_votes(struct pil_desc *pil)
+{
+	struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+	int uv, ret = 0;
+
+	ret = of_property_read_u32(pil->dev->of_node, "vdd_cx-voltage", &uv);
+	if (ret) {
+		dev_err(pil->dev, "missing vdd_cx-voltage property(rc:%d)\n",
+									ret);
+		return;
+	}
+
+	if (drv->vreg_pll) {
+		regulator_disable(drv->vreg_pll);
+		regulator_set_load(drv->vreg_pll, 0);
+	}
+	regulator_disable(drv->vreg_cx);
+	regulator_set_load(drv->vreg_cx, 0);
+	regulator_set_voltage(drv->vreg_cx, 0, INT_MAX);
+	clk_disable_unprepare(drv->xo);
+	clk_disable_unprepare(drv->pnoc_clk);
+	clk_disable_unprepare(drv->qdss_clk);
+	clk_disable_unprepare(drv->prng_clk);
+	clk_disable_unprepare(drv->axis2_clk);
+}
+EXPORT_SYMBOL(pil_q6v5_remove_proxy_votes);
+
+void pil_q6v5_halt_axi_port(struct pil_desc *pil, void __iomem *halt_base)
+{
+	int ret;
+	u32 status;
+
+	/* Assert halt request */
+	writel_relaxed(1, halt_base + AXI_HALTREQ);
+
+	/* Wait for halt */
+	ret = readl_poll_timeout(halt_base + AXI_HALTACK,
+		status, status != 0, 50, HALT_ACK_TIMEOUT_US);
+	if (ret)
+		dev_warn(pil->dev, "Port %pK halt timeout\n", halt_base);
+	else if (!readl_relaxed(halt_base + AXI_IDLE))
+		dev_warn(pil->dev, "Port %pK halt failed\n", halt_base);
+
+	/* Clear halt request (port will remain halted until reset) */
+	writel_relaxed(0, halt_base + AXI_HALTREQ);
+}
+EXPORT_SYMBOL(pil_q6v5_halt_axi_port);
+
+void assert_clamps(struct pil_desc *pil)
+{
+	u32 val;
+	struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+
+	/*
+	 * Assert QDSP6 I/O clamp, memory wordline clamp, and compiler memory
+	 * clamp as a software workaround to avoid high MX current during
+	 * LPASS/MSS restart.
+	 */
+	val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
+	val |= (Q6SS_CLAMP_IO | QDSP6v55_CLAMP_WL |
+			QDSP6v55_CLAMP_QMC_MEM);
+	writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+	/* To make sure asserting clamps is done before MSS restart*/
+	mb();
+}
+
+static void __pil_q6v5_shutdown(struct pil_desc *pil)
+{
+	u32 val;
+	struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+
+	/* Turn off core clock */
+	val = readl_relaxed(drv->reg_base + QDSP6SS_GFMUX_CTL);
+	val &= ~Q6SS_CLK_ENA;
+	writel_relaxed(val, drv->reg_base + QDSP6SS_GFMUX_CTL);
+
+	/* Clamp IO */
+	val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
+	val |= Q6SS_CLAMP_IO;
+	writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+	/* Turn off Q6 memories */
+	val &= ~(Q6SS_L2DATA_SLP_NRET_N_0 | Q6SS_L2DATA_SLP_NRET_N_1 |
+		 Q6SS_L2DATA_SLP_NRET_N_2 | Q6SS_SLP_RET_N |
+		 Q6SS_L2TAG_SLP_NRET_N | Q6SS_ETB_SLP_NRET_N |
+		 Q6SS_L2DATA_STBY_N);
+	writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+	/* Assert Q6 resets */
+	val = readl_relaxed(drv->reg_base + QDSP6SS_RESET);
+	val |= (Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENA);
+	writel_relaxed(val, drv->reg_base + QDSP6SS_RESET);
+
+	/* Kill power at block headswitch */
+	val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
+	val &= ~QDSS_BHS_ON;
+	writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+}
+
+void pil_q6v5_shutdown(struct pil_desc *pil)
+{
+	struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+
+	if (drv->qdsp6v55) {
+		/* Subsystem driver expected to halt bus and assert reset */
+		return;
+	}
+	__pil_q6v5_shutdown(pil);
+}
+EXPORT_SYMBOL(pil_q6v5_shutdown);
+
+static int __pil_q6v5_reset(struct pil_desc *pil)
+{
+	struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+	u32 val;
+
+	/* Assert resets, stop core */
+	val = readl_relaxed(drv->reg_base + QDSP6SS_RESET);
+	val |= (Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENA | Q6SS_STOP_CORE);
+	writel_relaxed(val, drv->reg_base + QDSP6SS_RESET);
+
+	/* Enable power block headswitch, and wait for it to stabilize */
+	val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
+	val |= QDSS_BHS_ON | QDSS_LDO_BYP;
+	writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+	/* Ensure physical memory access is done*/
+	mb();
+	udelay(1);
+
+	/*
+	 * Turn on memories. L2 banks should be done individually
+	 * to minimize inrush current.
+	 */
+	val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
+	val |= Q6SS_SLP_RET_N | Q6SS_L2TAG_SLP_NRET_N |
+	       Q6SS_ETB_SLP_NRET_N | Q6SS_L2DATA_STBY_N;
+	writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+	val |= Q6SS_L2DATA_SLP_NRET_N_2;
+	writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+	val |= Q6SS_L2DATA_SLP_NRET_N_1;
+	writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+	val |= Q6SS_L2DATA_SLP_NRET_N_0;
+	writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+	/* Remove IO clamp */
+	val &= ~Q6SS_CLAMP_IO;
+	writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+	/* Bring core out of reset */
+	val = readl_relaxed(drv->reg_base + QDSP6SS_RESET);
+	val &= ~Q6SS_CORE_ARES;
+	writel_relaxed(val, drv->reg_base + QDSP6SS_RESET);
+
+	/* Turn on core clock */
+	val = readl_relaxed(drv->reg_base + QDSP6SS_GFMUX_CTL);
+	val |= Q6SS_CLK_ENA;
+
+	/* Need a different clock source for v5.2.0 */
+	if (drv->qdsp6v5_2_0) {
+		val &= ~Q6SS_CLK_SRC_SEL_FIELD;
+		val |= Q6SS_CLK_SRC_SEL_C;
+	}
+
+	/* force clock on during source switch */
+	if (drv->qdsp6v56)
+		val |= Q6SS_CLK_SRC_SWITCH_CLK_OVR;
+
+	writel_relaxed(val, drv->reg_base + QDSP6SS_GFMUX_CTL);
+
+	/* Start core execution */
+	val = readl_relaxed(drv->reg_base + QDSP6SS_RESET);
+	val &= ~Q6SS_STOP_CORE;
+	writel_relaxed(val, drv->reg_base + QDSP6SS_RESET);
+
+	return 0;
+}
+
+static int q6v55_branch_clk_enable(struct q6v5_data *drv)
+{
+	u32 val, count;
+	void __iomem *cbcr_reg = drv->reg_base + QDSP6SS_XO_CBCR;
+
+	val = readl_relaxed(cbcr_reg);
+	val |= 0x1;
+	writel_relaxed(val, cbcr_reg);
+
+	for (count = HALT_CHECK_MAX_LOOPS; count > 0; count--) {
+		val = readl_relaxed(cbcr_reg);
+		if (!(val & BIT(31)))
+			return 0;
+		udelay(1);
+	}
+
+	dev_err(drv->desc.dev, "Failed to enable xo branch clock.\n");
+	return -EINVAL;
+}
+
+static int __pil_q6v65_reset(struct pil_desc *pil)
+{
+	struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+	u32 val, count;
+	int ret;
+
+	val = readl_relaxed(drv->reg_base + QDSP6SS_SLEEP);
+	val |= 0x1;
+	writel_relaxed(val, drv->reg_base + QDSP6SS_SLEEP);
+	for (count = SLEEP_CHECK_MAX_LOOPS; count > 0; count--) {
+		val = readl_relaxed(drv->reg_base + QDSP6SS_SLEEP);
+		if (!(val & BIT(31)))
+			break;
+		udelay(1);
+	}
+
+	if (!count) {
+		dev_err(drv->desc.dev, "Sleep clock did not come on in time\n");
+		return -ETIMEDOUT;
+	}
+
+	/* De-assert QDSP6 stop core */
+	writel_relaxed(1, drv->reg_base + QDSP6SS_BOOT_CORE_START);
+	/* De-assert stop core before starting boot FSM */
+	mb();
+	/* Trigger boot FSM */
+	writel_relaxed(1, drv->reg_base + QDSP6SS_BOOT_CMD);
+
+	/* Wait for boot FSM to complete */
+	ret = readl_poll_timeout(drv->rmb_base + MSS_STATUS, val,
+			(val & BIT(0)) != 0, 10, BOOT_FSM_TIMEOUT);
+
+	if (ret) {
+		dev_err(drv->desc.dev, "Boot FSM failed to complete.\n");
+		/* Reset the modem so that boot FSM is in reset state */
+		pil_mss_assert_resets(drv);
+		/* Wait 6 32kHz sleep cycles for reset */
+		udelay(200);
+		pil_mss_deassert_resets(drv);
+	}
+
+	return ret;
+}
+
+static int __pil_q6v55_reset(struct pil_desc *pil)
+{
+	struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+	u32 val;
+	int i;
+
+	trace_pil_func(__func__);
+	/* Override the ACC value if required */
+	if (drv->override_acc)
+		writel_relaxed(QDSP6SS_ACC_OVERRIDE_VAL,
+				drv->reg_base + QDSP6SS_STRAP_ACC);
+
+	/* Override the ACC value with input value */
+	if (!of_property_read_u32(pil->dev->of_node, "qcom,override-acc-1",
+				&drv->override_acc_1))
+		writel_relaxed(drv->override_acc_1,
+				drv->reg_base + QDSP6SS_STRAP_ACC);
+
+	/* Assert resets, stop core */
+	val = readl_relaxed(drv->reg_base + QDSP6SS_RESET);
+	val |= (Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENA | Q6SS_STOP_CORE);
+	writel_relaxed(val, drv->reg_base + QDSP6SS_RESET);
+
+	/* BHS require xo cbcr to be enabled */
+	i = q6v55_branch_clk_enable(drv);
+	if (i)
+		return i;
+
+	/* Enable power block headswitch, and wait for it to stabilize */
+	val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
+	val |= QDSP6v55_BHS_ON;
+	writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+	/* Ensure physical memory access is done*/
+	mb();
+	udelay(1);
+
+	if (drv->qdsp6v62_1_2 || drv->qdsp6v62_1_5 || drv->qdsp6v62_1_4) {
+		for (i = BHS_CHECK_MAX_LOOPS; i > 0; i--) {
+			if (readl_relaxed(drv->reg_base + QDSP6V62SS_BHS_STATUS)
+			    & QDSP6v55_BHS_EN_REST_ACK)
+				break;
+			udelay(1);
+		}
+		if (!i) {
+			pr_err("%s: BHS_EN_REST_ACK not set!\n", __func__);
+			return -ETIMEDOUT;
+		}
+	}
+
+	if (drv->qdsp6v61_1_1) {
+		for (i = BHS_CHECK_MAX_LOOPS; i > 0; i--) {
+			if (readl_relaxed(drv->reg_base + QDSP6SS_BHS_STATUS)
+			    & QDSP6v55_BHS_EN_REST_ACK)
+				break;
+			udelay(1);
+		}
+		if (!i) {
+			pr_err("%s: BHS_EN_REST_ACK not set!\n", __func__);
+			return -ETIMEDOUT;
+		}
+	}
+
+	/* Put LDO in bypass mode */
+	val |= QDSP6v55_LDO_BYP;
+	writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+	if (drv->qdsp6v56_1_3) {
+		/* Deassert memory peripheral sleep and L2 memory standby */
+		val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
+		val |= (Q6SS_L2DATA_STBY_N | Q6SS_SLP_RET_N);
+		writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+		/* Turn on L1, L2 and ETB memories 1 at a time */
+		for (i = 17; i >= 0; i--) {
+			val |= BIT(i);
+			writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+			udelay(1);
+		}
+	} else if (drv->qdsp6v56_1_5 || drv->qdsp6v56_1_8
+					|| drv->qdsp6v56_1_10) {
+		/* Deassert QDSP6 compiler memory clamp */
+		val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
+		val &= ~QDSP6v55_CLAMP_QMC_MEM;
+		writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+		/* Deassert memory peripheral sleep and L2 memory standby */
+		val |= (Q6SS_L2DATA_STBY_N | Q6SS_SLP_RET_N);
+		writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+		/* Turn on L1, L2, ETB and JU memories 1 at a time */
+		val = readl_relaxed(drv->reg_base + QDSP6SS_MEM_PWR_CTL);
+		for (i = 19; i >= 0; i--) {
+			val |= BIT(i);
+			writel_relaxed(val, drv->reg_base +
+						QDSP6SS_MEM_PWR_CTL);
+			val |= readl_relaxed(drv->reg_base +
+						QDSP6SS_MEM_PWR_CTL);
+			/*
+			 * Wait for 1us for both memory peripheral and
+			 * data array to turn on.
+			 */
+			udelay(1);
+		}
+	} else if (drv->qdsp6v56_1_8_inrush_current) {
+		/* Deassert QDSP6 compiler memory clamp */
+		val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
+		val &= ~QDSP6v55_CLAMP_QMC_MEM;
+		writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+		/* Deassert memory peripheral sleep and L2 memory standby */
+		val |= (Q6SS_L2DATA_STBY_N | Q6SS_SLP_RET_N);
+		writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+		/* Turn on L1, L2, ETB and JU memories 1 at a time */
+		val = readl_relaxed(drv->reg_base + QDSP6SS_MEM_PWR_CTL);
+		for (i = 19; i >= 6; i--) {
+			val |= BIT(i);
+			writel_relaxed(val, drv->reg_base +
+						QDSP6SS_MEM_PWR_CTL);
+			/*
+			 * Wait for 1us for both memory peripheral and
+			 * data array to turn on.
+			 */
+			udelay(1);
+		}
+
+		for (i = 0 ; i <= 5 ; i++) {
+			val |= BIT(i);
+			writel_relaxed(val, drv->reg_base +
+						QDSP6SS_MEM_PWR_CTL);
+			/*
+			 * Wait for 1us for both memory peripheral and
+			 * data array to turn on.
+			 */
+			udelay(1);
+		}
+	} else if (drv->qdsp6v61_1_1 || drv->qdsp6v62_1_2 ||
+			drv->qdsp6v62_1_4 || drv->qdsp6v62_1_5) {
+		/* Deassert QDSP6 compiler memory clamp */
+		val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
+		val &= ~QDSP6v55_CLAMP_QMC_MEM;
+		writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+		/* Deassert memory peripheral sleep and L2 memory standby */
+		val |= (Q6SS_L2DATA_STBY_N | Q6SS_SLP_RET_N);
+		writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+		/* Turn on L1, L2, ETB and JU memories 1 at a time */
+		val = readl_relaxed(drv->reg_base +
+				QDSP6V6SS_MEM_PWR_CTL);
+
+		if (drv->qdsp6v62_1_4 || drv->qdsp6v62_1_5)
+			i = 29;
+		else
+			i = 28;
+
+		for ( ; i >= 0; i--) {
+			val |= BIT(i);
+			writel_relaxed(val, drv->reg_base +
+					QDSP6V6SS_MEM_PWR_CTL);
+			/*
+			 * Wait for 1us for both memory peripheral and
+			 * data array to turn on.
+			 */
+			udelay(1);
+		}
+	} else {
+		/* Turn on memories. */
+		val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
+		val |= 0xFFF00;
+		writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+		/* Turn on L2 banks 1 at a time */
+		for (i = 0; i <= 7; i++) {
+			val |= BIT(i);
+			writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+		}
+	}
+
+	/* Remove word line clamp */
+	val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
+	val &= ~QDSP6v55_CLAMP_WL;
+	writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+	/* Remove IO clamp */
+	val &= ~Q6SS_CLAMP_IO;
+	writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+	/* Bring core out of reset */
+	val = readl_relaxed(drv->reg_base + QDSP6SS_RESET);
+	val &= ~(Q6SS_CORE_ARES | Q6SS_STOP_CORE);
+	writel_relaxed(val, drv->reg_base + QDSP6SS_RESET);
+
+	/* Turn on core clock */
+	val = readl_relaxed(drv->reg_base + QDSP6SS_GFMUX_CTL);
+	val |= Q6SS_CLK_ENA;
+	writel_relaxed(val, drv->reg_base + QDSP6SS_GFMUX_CTL);
+
+	return 0;
+}
+
+int pil_q6v5_reset(struct pil_desc *pil)
+{
+	struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+
+
+	if (drv->qdsp6v65_1_0)
+		return __pil_q6v65_reset(pil);
+	else if (drv->qdsp6v55)
+		return __pil_q6v55_reset(pil);
+	else
+		return __pil_q6v5_reset(pil);
+}
+EXPORT_SYMBOL(pil_q6v5_reset);
+
+struct q6v5_data *pil_q6v5_init(struct platform_device *pdev)
+{
+	struct q6v5_data *drv;
+	struct resource *res;
+	struct pil_desc *desc;
+	struct property *prop;
+	int ret, vdd_pll;
+
+	drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
+	if (!drv)
+		return ERR_PTR(-ENOMEM);
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qdsp6_base");
+	drv->reg_base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(drv->reg_base))
+		return drv->reg_base;
+
+	desc = &drv->desc;
+	ret = of_property_read_string(pdev->dev.of_node, "qcom,firmware-name",
+				      &desc->name);
+	if (ret)
+		return ERR_PTR(ret);
+
+	desc->clear_fw_region = false;
+	desc->dev = &pdev->dev;
+
+	drv->qdsp6v5_2_0 = of_device_is_compatible(pdev->dev.of_node,
+						   "qcom,pil-femto-modem");
+
+	if (drv->qdsp6v5_2_0)
+		return drv;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "halt_base");
+	if (res) {
+		drv->axi_halt_base = devm_ioremap(&pdev->dev, res->start,
+							resource_size(res));
+		if (!drv->axi_halt_base) {
+			dev_err(&pdev->dev, "Failed to map axi_halt_base.\n");
+			return ERR_PTR(-ENOMEM);
+		}
+	}
+
+	if (!drv->axi_halt_base) {
+		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+								"halt_q6");
+		if (res) {
+			drv->axi_halt_q6 = devm_ioremap(&pdev->dev,
+					res->start, resource_size(res));
+			if (!drv->axi_halt_q6) {
+				dev_err(&pdev->dev, "Failed to map axi_halt_q6.\n");
+				return ERR_PTR(-ENOMEM);
+			}
+		}
+
+		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+								"halt_modem");
+		if (res) {
+			drv->axi_halt_mss = devm_ioremap(&pdev->dev,
+					res->start, resource_size(res));
+			if (!drv->axi_halt_mss) {
+				dev_err(&pdev->dev, "Failed to map axi_halt_mss.\n");
+				return ERR_PTR(-ENOMEM);
+			}
+		}
+
+		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+								"halt_nc");
+		if (res) {
+			drv->axi_halt_nc = devm_ioremap(&pdev->dev,
+					res->start, resource_size(res));
+			if (!drv->axi_halt_nc) {
+				dev_err(&pdev->dev, "Failed to map axi_halt_nc.\n");
+				return ERR_PTR(-ENOMEM);
+			}
+		}
+	}
+
+	if (!(drv->axi_halt_base || (drv->axi_halt_q6 && drv->axi_halt_mss
+					&& drv->axi_halt_nc))) {
+		dev_err(&pdev->dev, "halt bases for Q6 are not defined.\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	drv->qdsp6v55 = of_device_is_compatible(pdev->dev.of_node,
+						"qcom,pil-q6v55-mss");
+	drv->qdsp6v56 = of_device_is_compatible(pdev->dev.of_node,
+						"qcom,pil-q6v56-mss");
+
+	drv->qdsp6v56_1_3 = of_property_read_bool(pdev->dev.of_node,
+						"qcom,qdsp6v56-1-3");
+	drv->qdsp6v56_1_5 = of_property_read_bool(pdev->dev.of_node,
+						"qcom,qdsp6v56-1-5");
+
+	drv->qdsp6v56_1_8 = of_property_read_bool(pdev->dev.of_node,
+						"qcom,qdsp6v56-1-8");
+	drv->qdsp6v56_1_10 = of_property_read_bool(pdev->dev.of_node,
+						"qcom,qdsp6v56-1-10");
+
+	drv->qdsp6v56_1_8_inrush_current = of_property_read_bool(
+						pdev->dev.of_node,
+						"qcom,qdsp6v56-1-8-inrush-current");
+
+	drv->qdsp6v61_1_1 = of_property_read_bool(pdev->dev.of_node,
+						"qcom,qdsp6v61-1-1");
+
+	drv->qdsp6v62_1_2 = of_property_read_bool(pdev->dev.of_node,
+						"qcom,qdsp6v62-1-2");
+
+	drv->qdsp6v62_1_4 = of_property_read_bool(pdev->dev.of_node,
+						"qcom,qdsp6v62-1-4");
+
+	drv->qdsp6v62_1_5 = of_property_read_bool(pdev->dev.of_node,
+						"qcom,qdsp6v62-1-5");
+
+	drv->qdsp6v65_1_0 = of_property_read_bool(pdev->dev.of_node,
+						"qcom,qdsp6v65-1-0");
+
+	drv->non_elf_image = of_property_read_bool(pdev->dev.of_node,
+						"qcom,mba-image-is-not-elf");
+
+	drv->override_acc = of_property_read_bool(pdev->dev.of_node,
+						"qcom,override-acc");
+
+	drv->ahb_clk_vote = of_property_read_bool(pdev->dev.of_node,
+						"qcom,ahb-clk-vote");
+	drv->mx_spike_wa = of_property_read_bool(pdev->dev.of_node,
+						"qcom,mx-spike-wa");
+
+	drv->xo = devm_clk_get(&pdev->dev, "xo");
+	if (IS_ERR(drv->xo))
+		return ERR_CAST(drv->xo);
+
+	if (of_property_read_bool(pdev->dev.of_node, "qcom,pnoc-clk-vote")) {
+		drv->pnoc_clk = devm_clk_get(&pdev->dev, "pnoc_clk");
+		if (IS_ERR(drv->pnoc_clk))
+			return ERR_CAST(drv->pnoc_clk);
+	} else {
+		drv->pnoc_clk = NULL;
+	}
+
+	if (of_property_match_string(pdev->dev.of_node,
+			"qcom,proxy-clock-names", "qdss_clk") >= 0) {
+		drv->qdss_clk = devm_clk_get(&pdev->dev, "qdss_clk");
+		if (IS_ERR(drv->qdss_clk))
+			return ERR_CAST(drv->qdss_clk);
+	} else {
+		drv->qdss_clk = NULL;
+	}
+
+	if (of_property_match_string(pdev->dev.of_node,
+			"qcom,proxy-clock-names", "prng_clk") >= 0) {
+		drv->prng_clk = devm_clk_get(&pdev->dev, "prng_clk");
+		if (IS_ERR(drv->prng_clk))
+			return ERR_CAST(drv->prng_clk);
+	} else {
+		drv->prng_clk = NULL;
+	}
+
+	if (of_property_match_string(pdev->dev.of_node,
+			"qcom,proxy-clock-names", "axis2_clk") >= 0) {
+		drv->axis2_clk = devm_clk_get(&pdev->dev, "axis2_clk");
+		if (IS_ERR(drv->axis2_clk))
+			return ERR_CAST(drv->axis2_clk);
+	} else {
+		drv->axis2_clk = NULL;
+	}
+
+	drv->vreg_cx = devm_regulator_get(&pdev->dev, "vdd_cx");
+	if (IS_ERR(drv->vreg_cx))
+		return ERR_CAST(drv->vreg_cx);
+	prop = of_find_property(pdev->dev.of_node, "vdd_cx-voltage", NULL);
+	if (!prop) {
+		dev_err(&pdev->dev, "Missing vdd_cx-voltage property\n");
+		return ERR_CAST(prop);
+	}
+
+	ret = of_property_read_u32(pdev->dev.of_node, "qcom,vdd_pll",
+		&vdd_pll);
+	if (!ret) {
+		drv->vreg_pll = devm_regulator_get(&pdev->dev, "vdd_pll");
+		if (!IS_ERR_OR_NULL(drv->vreg_pll)) {
+			ret = regulator_set_voltage(drv->vreg_pll, vdd_pll,
+							vdd_pll);
+			if (ret) {
+				dev_err(&pdev->dev, "Failed to set vdd_pll voltage(rc:%d)\n",
+									ret);
+				return ERR_PTR(ret);
+			}
+
+			ret = regulator_set_load(drv->vreg_pll, 10000);
+			if (ret < 0) {
+				dev_err(&pdev->dev, "Failed to set vdd_pll mode(rc:%d)\n",
+									ret);
+				return ERR_PTR(ret);
+			}
+		} else
+			drv->vreg_pll = NULL;
+	}
+
+	return drv;
+}
+EXPORT_SYMBOL(pil_q6v5_init);
diff --git a/drivers/soc/qcom/pil-q6v5.h b/drivers/soc/qcom/pil-q6v5.h
new file mode 100644
index 0000000..01b1cef
--- /dev/null
+++ b/drivers/soc/qcom/pil-q6v5.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef __MSM_PIL_Q6V5_H
+#define __MSM_PIL_Q6V5_H
+
+#include "peripheral-loader.h"
+
+struct regulator;
+struct clk;
+struct pil_device;
+struct platform_device;
+
+struct q6v5_data {
+	void __iomem *reg_base;
+	void __iomem *rmb_base;
+	void __iomem *cxrail_bhs;  /* External BHS register */
+	struct clk *xo;		   /* XO clock source */
+	struct clk *pnoc_clk;	   /* PNOC bus clock source */
+	struct clk *ahb_clk;	   /* PIL access to registers */
+	struct clk *axi_clk;	   /* CPU access to memory */
+	struct clk *core_clk;	   /* CPU core */
+	struct clk *reg_clk;	   /* CPU access registers */
+	struct clk *gpll0_mss_clk; /* GPLL0 to MSS connection */
+	struct clk *rom_clk;	   /* Boot ROM */
+	struct clk *snoc_axi_clk;
+	struct clk *mnoc_axi_clk;
+	struct clk *qdss_clk;
+	struct clk *prng_clk;
+	struct clk *axis2_clk;
+	void __iomem *axi_halt_base; /* Halt base of q6, mss,
+				      * nc are in same 4K page
+				      */
+	void __iomem *axi_halt_q6;
+	void __iomem *axi_halt_mss;
+	void __iomem *axi_halt_nc;
+	void __iomem *restart_reg;
+	void __iomem *pdc_sync;
+	void __iomem *alt_reset;
+	struct regulator *vreg;
+	struct regulator *vreg_cx;
+	struct regulator *vreg_mx;
+	struct regulator *vreg_pll;
+	bool is_booted;
+	struct pil_desc desc;
+	bool self_auth;
+	phys_addr_t mba_dp_phys;
+	void *mba_dp_virt;
+	size_t mba_dp_size;
+	size_t dp_size;
+	bool qdsp6v55;
+	bool qdsp6v5_2_0;
+	bool qdsp6v56;
+	bool qdsp6v56_1_3;
+	bool qdsp6v56_1_5;
+	bool qdsp6v56_1_8;
+	bool qdsp6v56_1_8_inrush_current;
+	bool qdsp6v56_1_10;
+	bool qdsp6v61_1_1;
+	bool qdsp6v62_1_2;
+	bool qdsp6v62_1_4;
+	bool qdsp6v62_1_5;
+	bool qdsp6v65_1_0;
+	bool non_elf_image;
+	bool restart_reg_sec;
+	bool override_acc;
+	int override_acc_1;
+	int mss_pdc_offset;
+	int smem_id;
+	bool ahb_clk_vote;
+	bool mx_spike_wa;
+	bool reset_clk;
+};
+
+int pil_q6v5_make_proxy_votes(struct pil_desc *pil);
+void pil_q6v5_remove_proxy_votes(struct pil_desc *pil);
+void pil_q6v5_halt_axi_port(struct pil_desc *pil, void __iomem *halt_base);
+void pil_q6v5_shutdown(struct pil_desc *pil);
+int pil_q6v5_reset(struct pil_desc *pil);
+void assert_clamps(struct pil_desc *pil);
+struct q6v5_data *pil_q6v5_init(struct platform_device *pdev);
+
+#endif
diff --git a/drivers/soc/qcom/spcom.c b/drivers/soc/qcom/spcom.c
index 33b4fbd..2233010 100644
--- a/drivers/soc/qcom/spcom.c
+++ b/drivers/soc/qcom/spcom.c
@@ -597,7 +597,6 @@
 {
 	int ret = 0;
 	struct spcom_user_create_channel_command *cmd = cmd_buf;
-	const size_t maxlen = sizeof(cmd->ch_name);
 
 	if (cmd_size != sizeof(*cmd)) {
 		spcom_pr_err("cmd_size [%d] , expected [%d]\n",
@@ -605,11 +604,6 @@
 		return -EINVAL;
 	}
 
-	if (strnlen(cmd->ch_name, maxlen) == maxlen) {
-		spcom_pr_err("channel name is not NULL terminated\n");
-		return -EINVAL;
-	}
-
 	ret = spcom_create_channel_chardev(cmd->ch_name, cmd->is_sharable);
 
 	return ret;
@@ -2002,6 +1996,12 @@
 	void *priv;
 	struct cdev *cdev;
 
+	if (!name || strnlen(name, SPCOM_CHANNEL_NAME_SIZE) ==
+			SPCOM_CHANNEL_NAME_SIZE) {
+		spcom_pr_err("invalid channel name\n");
+		return -EINVAL;
+	}
+
 	spcom_pr_dbg("creating channel [%s]\n", name);
 	mutex_lock(&spcom_dev->create_channel_lock);
 
@@ -2040,7 +2040,12 @@
 
 	devt = spcom_dev->device_no + atomic_read(&spcom_dev->chdev_count);
 	priv = ch;
-	dev = device_create(cls, parent, devt, priv, name);
+
+	/*
+	 * Pass channel name as formatted string to avoid abuse by using a
+	 * formatted string as channel name
+	 */
+	dev = device_create(cls, parent, devt, priv, "%s", name);
 	if (IS_ERR(dev)) {
 		spcom_pr_err("device_create failed\n");
 		ret = -ENODEV;
diff --git a/drivers/soc/qcom/spm_devices.c b/drivers/soc/qcom/spm_devices.c
new file mode 100644
index 0000000..b1f15f2
--- /dev/null
+++ b/drivers/soc/qcom/spm_devices.c
@@ -0,0 +1,1011 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2011-2020  The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/cpu.h>
+#include <soc/qcom/spm.h>
+#include "spm_driver.h"
+
+#define VDD_DEFAULT 0xDEADF00D
+#define SLP_CMD_BIT 17
+#define PC_MODE_BIT 16
+#define RET_MODE_BIT 15
+#define EVENT_SYNC_BIT 24
+#define ISAR_BIT 3
+#define SPM_EN_BIT 0
+
+struct msm_spm_power_modes {
+	uint32_t mode;
+	uint32_t ctl;
+};
+
+struct msm_spm_device {
+	struct list_head list;
+	bool initialized;
+	const char *name;
+	struct msm_spm_driver_data reg_data;
+	struct msm_spm_power_modes *modes;
+	uint32_t num_modes;
+	uint32_t cpu_vdd;
+	struct cpumask mask;
+	void __iomem *q2s_reg;
+	bool qchannel_ignore;
+	bool allow_rpm_hs;
+	bool use_spm_clk_gating;
+	bool use_qchannel_for_wfi;
+	void __iomem *flush_base_addr;
+	void __iomem *slpreq_base_addr;
+};
+
+struct msm_spm_vdd_info {
+	struct msm_spm_device *vctl_dev;
+	uint32_t vlevel;
+	int err;
+};
+
+static LIST_HEAD(spm_list);
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct msm_spm_device, msm_cpu_spm_device);
+static DEFINE_PER_CPU(struct msm_spm_device *, cpu_vctl_device);
+
+static void msm_spm_smp_set_vdd(void *data)
+{
+	struct msm_spm_vdd_info *info = (struct msm_spm_vdd_info *)data;
+	struct msm_spm_device *dev = info->vctl_dev;
+
+	dev->cpu_vdd = info->vlevel;
+	info->err = msm_spm_drv_set_vdd(&dev->reg_data, info->vlevel);
+}
+
+/**
+ * msm_spm_probe_done(): Verify and return the status of the cpu(s) and l2
+ * probe.
+ * Return: 0 if all spm devices have been probed, else return -EPROBE_DEFER.
+ * if probe failed, then return the err number for that failure.
+ */
+int msm_spm_probe_done(void)
+{
+	struct msm_spm_device *dev;
+	int cpu;
+	int ret = 0;
+
+	for_each_possible_cpu(cpu) {
+		dev = per_cpu(cpu_vctl_device, cpu);
+		if (!dev)
+			return -EPROBE_DEFER;
+
+		ret = IS_ERR(dev);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(msm_spm_probe_done);
+
+void msm_spm_dump_regs(unsigned int cpu)
+{
+	dump_regs(&per_cpu(msm_cpu_spm_device, cpu).reg_data, cpu);
+}
+
+/**
+ * msm_spm_set_vdd(): Set core voltage
+ * @cpu: core id
+ * @vlevel: Encoded PMIC data.
+ *
+ * Return: 0 on success or -(ERRNO) on failure.
+ */
+int msm_spm_set_vdd(unsigned int cpu, unsigned int vlevel)
+{
+	struct msm_spm_vdd_info info;
+	struct msm_spm_device *dev = per_cpu(cpu_vctl_device, cpu);
+	int ret;
+
+	if (!dev)
+		return -EPROBE_DEFER;
+
+	ret = IS_ERR(dev);
+	if (ret)
+		return ret;
+
+	info.vctl_dev = dev;
+	info.vlevel = vlevel;
+
+	ret = smp_call_function_any(&dev->mask, msm_spm_smp_set_vdd, &info,
+					true);
+	if (ret)
+		return ret;
+
+	return info.err;
+}
+EXPORT_SYMBOL(msm_spm_set_vdd);
+
+/**
+ * msm_spm_get_vdd(): Get core voltage
+ * @cpu: core id
+ * @return: Returns encoded PMIC data.
+ */
+int msm_spm_get_vdd(unsigned int cpu)
+{
+	struct msm_spm_device *dev = per_cpu(cpu_vctl_device, cpu);
+
+	if (!dev)
+		return -EPROBE_DEFER;
+
+	return msm_spm_drv_get_vdd(&dev->reg_data) ? : -EINVAL;
+}
+EXPORT_SYMBOL(msm_spm_get_vdd);
+
+static void msm_spm_config_q2s(struct msm_spm_device *dev, unsigned int mode)
+{
+	uint32_t spm_legacy_mode = 0;
+	uint32_t qchannel_ignore = 0;
+	uint32_t val = 0;
+
+	if (!dev->q2s_reg)
+		return;
+
+	switch (mode) {
+	case MSM_SPM_MODE_DISABLED:
+	case MSM_SPM_MODE_CLOCK_GATING:
+		qchannel_ignore = !dev->use_qchannel_for_wfi;
+		spm_legacy_mode = 0;
+		break;
+	case MSM_SPM_MODE_RETENTION:
+		qchannel_ignore = 0;
+		spm_legacy_mode = 0;
+		break;
+	case MSM_SPM_MODE_GDHS:
+	case MSM_SPM_MODE_STANDALONE_POWER_COLLAPSE:
+	case MSM_SPM_MODE_POWER_COLLAPSE:
+		qchannel_ignore = dev->qchannel_ignore;
+		spm_legacy_mode = 1;
+		break;
+	default:
+		break;
+	}
+
+	val = spm_legacy_mode << 2 | qchannel_ignore << 1;
+	__raw_writel(val, dev->q2s_reg);
+	mb(); /* Ensure flush */
+}
+
+static void msm_spm_config_hw_flush(struct msm_spm_device *dev,
+		unsigned int mode)
+{
+	uint32_t val = 0;
+
+	if (!dev->flush_base_addr)
+		return;
+
+	switch (mode) {
+	case MSM_SPM_MODE_FASTPC:
+	case MSM_SPM_MODE_STANDALONE_POWER_COLLAPSE:
+	case MSM_SPM_MODE_POWER_COLLAPSE:
+		val = BIT(0);
+		break;
+	default:
+		break;
+	}
+
+	__raw_writel(val, dev->flush_base_addr);
+}
+
+static void msm_spm_config_slpreq(struct msm_spm_device *dev,
+		unsigned int mode)
+{
+	uint32_t val = 0;
+
+	if (!dev->slpreq_base_addr)
+		return;
+
+	switch (mode) {
+	case MSM_SPM_MODE_FASTPC:
+	case MSM_SPM_MODE_GDHS:
+	case MSM_SPM_MODE_STANDALONE_POWER_COLLAPSE:
+	case MSM_SPM_MODE_POWER_COLLAPSE:
+		val = BIT(4);
+		break;
+	default:
+		break;
+	}
+
+	val = (__raw_readl(dev->slpreq_base_addr) & ~BIT(4)) | val;
+	__raw_writel(val, dev->slpreq_base_addr);
+}
+
+static int msm_spm_dev_set_low_power_mode(struct msm_spm_device *dev,
+		unsigned int mode, bool notify_rpm, bool set_spm_enable)
+{
+	uint32_t i;
+	int ret = -EINVAL;
+	uint32_t ctl = 0;
+
+	if (!dev) {
+		pr_err("dev is NULL\n");
+		return -ENODEV;
+	}
+
+	if (!dev->initialized)
+		return -ENXIO;
+
+	if (!dev->num_modes)
+		return 0;
+
+	if (mode == MSM_SPM_MODE_DISABLED && set_spm_enable) {
+		ret = msm_spm_drv_set_spm_enable(&dev->reg_data, false);
+	} else {
+		if (set_spm_enable)
+			ret = msm_spm_drv_set_spm_enable(&dev->reg_data, true);
+		for (i = 0; i < dev->num_modes; i++) {
+			if (dev->modes[i].mode != mode)
+				continue;
+
+			ctl = dev->modes[i].ctl;
+			if (!dev->allow_rpm_hs && notify_rpm)
+				ctl &= ~BIT(SLP_CMD_BIT);
+
+			break;
+		}
+		ret = msm_spm_drv_set_low_power_mode(&dev->reg_data, ctl);
+	}
+
+	msm_spm_config_q2s(dev, mode);
+	msm_spm_config_hw_flush(dev, mode);
+	msm_spm_config_slpreq(dev, mode);
+
+	return ret;
+}
+
+static int msm_spm_dev_init(struct msm_spm_device *dev,
+		struct msm_spm_platform_data *data)
+{
+	int i, ret = -ENOMEM;
+	uint32_t offset = 0;
+
+	dev->cpu_vdd = VDD_DEFAULT;
+	dev->num_modes = data->num_modes;
+	dev->modes = kmalloc_array(
+			dev->num_modes, sizeof(struct msm_spm_power_modes),
+			GFP_KERNEL);
+
+	if (!dev->modes)
+		goto spm_failed_malloc;
+
+	ret = msm_spm_drv_init(&dev->reg_data, data);
+
+	if (ret)
+		goto spm_failed_init;
+
+	for (i = 0; i < dev->num_modes; i++) {
+
+		/* Default offset is 0 and gets updated as we write more
+		 * sequences into SPM
+		 */
+		dev->modes[i].ctl = data->modes[i].ctl | ((offset & 0x1FF)
+						<< 4);
+		ret = msm_spm_drv_write_seq_data(&dev->reg_data,
+						data->modes[i].cmd, &offset);
+		if (ret < 0)
+			goto spm_failed_init;
+
+		dev->modes[i].mode = data->modes[i].mode;
+	}
+
+	msm_spm_drv_reinit(&dev->reg_data, dev->num_modes ? true : false);
+
+	dev->initialized = true;
+
+	return 0;
+
+spm_failed_init:
+	kfree(dev->modes);
+spm_failed_malloc:
+	return ret;
+}
+
+/**
+ * msm_spm_turn_on_cpu_rail(): Power on cpu rail before turning on core
+ * @node: The SPM node that controls the voltage for the CPU
+ * @val: The value to be set on the rail
+ * @cpu: The cpu for this with rail is being powered on
+ */
+int msm_spm_turn_on_cpu_rail(struct device_node *vctl_node,
+		unsigned int val, int cpu, int vctl_offset)
+{
+	uint32_t timeout = 2000; /* delay for voltage to settle on the core */
+	struct msm_spm_device *dev = per_cpu(cpu_vctl_device, cpu);
+	void __iomem *base;
+
+	base = of_iomap(vctl_node, 1);
+	if (base) {
+		/*
+		 * Program Q2S to disable SPM legacy mode and ignore Q2S
+		 * channel requests.
+		 * bit[1] = qchannel_ignore = 1
+		 * bit[2] = spm_legacy_mode = 0
+		 */
+		writel_relaxed(0x2, base);
+		mb(); /* Ensure flush */
+		iounmap(base);
+	}
+
+	base = of_iomap(vctl_node, 0);
+	if (!base)
+		return -ENOMEM;
+
+	if (dev && (dev->cpu_vdd != VDD_DEFAULT))
+		return 0;
+
+	/* Set the CPU supply regulator voltage */
+	val = (val & 0xFF);
+	writel_relaxed(val, base + vctl_offset);
+	mb(); /* Ensure flush */
+	udelay(timeout);
+
+	/* Enable the CPU supply regulator*/
+	val = 0x30080;
+	writel_relaxed(val, base + vctl_offset);
+	mb(); /* Ensure flush */
+	udelay(timeout);
+
+	iounmap(base);
+
+	return 0;
+}
+EXPORT_SYMBOL(msm_spm_turn_on_cpu_rail);
+
+void msm_spm_reinit(void)
+{
+	unsigned int cpu;
+
+	for_each_possible_cpu(cpu)
+		msm_spm_drv_reinit(
+			&per_cpu(msm_cpu_spm_device.reg_data, cpu), true);
+}
+EXPORT_SYMBOL(msm_spm_reinit);
+
+/*
+ * msm_spm_is_mode_avail() - Specifies if a mode is available for the cpu
+ * It should only be used to decide a mode before lpm driver is probed.
+ * @mode: SPM LPM mode to be selected
+ */
+bool msm_spm_is_mode_avail(unsigned int mode)
+{
+	struct msm_spm_device *dev = this_cpu_ptr(&msm_cpu_spm_device);
+	int i;
+
+	for (i = 0; i < dev->num_modes; i++) {
+		if (dev->modes[i].mode == mode)
+			return true;
+	}
+
+	return false;
+}
+
+/**
+ * msm_spm_is_avs_enabled() - Functions returns 1 if AVS is enabled and
+ *			      0 if it is not.
+ * @cpu: specifies cpu's avs should be read
+ *
+ * Returns errno in case of failure or AVS enable state otherwise
+ */
+int msm_spm_is_avs_enabled(unsigned int cpu)
+{
+	struct msm_spm_device *dev = per_cpu(cpu_vctl_device, cpu);
+
+	if (!dev)
+		return -ENXIO;
+
+	return msm_spm_drv_get_avs_enable(&dev->reg_data);
+}
+EXPORT_SYMBOL(msm_spm_is_avs_enabled);
+
+/**
+ * msm_spm_avs_enable() - Enables AVS on the SAW that controls this cpu's
+ *			  voltage.
+ * @cpu: specifies which cpu's avs should be enabled
+ *
+ * Returns errno in case of failure or 0 if successful
+ */
+int msm_spm_avs_enable(unsigned int cpu)
+{
+	struct msm_spm_device *dev = per_cpu(cpu_vctl_device, cpu);
+
+	if (!dev)
+		return -ENXIO;
+
+	return msm_spm_drv_set_avs_enable(&dev->reg_data, true);
+}
+EXPORT_SYMBOL(msm_spm_avs_enable);
+
+/**
+ * msm_spm_avs_disable() - Disables AVS on the SAW that controls this cpu's
+ *			   voltage.
+ * @cpu: specifies which cpu's avs should be enabled
+ *
+ * Returns errno in case of failure or 0 if successful
+ */
+int msm_spm_avs_disable(unsigned int cpu)
+{
+	struct msm_spm_device *dev = per_cpu(cpu_vctl_device, cpu);
+
+	if (!dev)
+		return -ENXIO;
+
+	return msm_spm_drv_set_avs_enable(&dev->reg_data, false);
+}
+EXPORT_SYMBOL(msm_spm_avs_disable);
+
+/**
+ * msm_spm_avs_set_limit() - Set maximum and minimum AVS limits on the
+ *			     SAW that controls this cpu's voltage.
+ * @cpu: specify which cpu's avs should be configured
+ * @min_lvl: specifies the minimum PMIC output voltage control register
+ *		value that may be sent to the PMIC
+ * @max_lvl: specifies the maximum PMIC output voltage control register
+ *		value that may be sent to the PMIC
+ * Returns errno in case of failure or 0 if successful
+ */
+int msm_spm_avs_set_limit(unsigned int cpu,
+		uint32_t min_lvl, uint32_t max_lvl)
+{
+	struct msm_spm_device *dev = per_cpu(cpu_vctl_device, cpu);
+
+	if (!dev)
+		return -ENXIO;
+
+	return msm_spm_drv_set_avs_limit(&dev->reg_data, min_lvl, max_lvl);
+}
+EXPORT_SYMBOL(msm_spm_avs_set_limit);
+
+/**
+ * msm_spm_avs_enable_irq() - Enable an AVS interrupt
+ * @cpu: specifies which CPU's AVS should be configured
+ * @irq: specifies which interrupt to enable
+ *
+ * Returns errno in case of failure or 0 if successful.
+ */
+int msm_spm_avs_enable_irq(unsigned int cpu, enum msm_spm_avs_irq irq)
+{
+	struct msm_spm_device *dev = per_cpu(cpu_vctl_device, cpu);
+
+	if (!dev)
+		return -ENXIO;
+
+	return msm_spm_drv_set_avs_irq_enable(&dev->reg_data, irq, true);
+}
+EXPORT_SYMBOL(msm_spm_avs_enable_irq);
+
+/**
+ * msm_spm_avs_disable_irq() - Disable an AVS interrupt
+ * @cpu: specifies which CPU's AVS should be configured
+ * @irq: specifies which interrupt to disable
+ *
+ * Returns errno in case of failure or 0 if successful.
+ */
+int msm_spm_avs_disable_irq(unsigned int cpu, enum msm_spm_avs_irq irq)
+{
+	struct msm_spm_device *dev = per_cpu(cpu_vctl_device, cpu);
+
+	if (!dev)
+		return -ENXIO;
+
+	return msm_spm_drv_set_avs_irq_enable(&dev->reg_data, irq, false);
+}
+EXPORT_SYMBOL(msm_spm_avs_disable_irq);
+
+/**
+ * msm_spm_avs_clear_irq() - Clear a latched AVS interrupt
+ * @cpu: specifies which CPU's AVS should be configured
+ * @irq: specifies which interrupt to clear
+ *
+ * Returns errno in case of failure or 0 if successful.
+ */
+int msm_spm_avs_clear_irq(unsigned int cpu, enum msm_spm_avs_irq irq)
+{
+	struct msm_spm_device *dev = per_cpu(cpu_vctl_device, cpu);
+
+	if (!dev)
+		return -ENXIO;
+
+	return msm_spm_drv_avs_clear_irq(&dev->reg_data, irq);
+}
+EXPORT_SYMBOL(msm_spm_avs_clear_irq);
+
+/**
+ * msm_spm_set_low_power_mode() - Configure SPM start address for low power mode
+ * @mode: SPM LPM mode to enter
+ * @notify_rpm: Notify RPM in this mode
+ */
+int msm_spm_set_low_power_mode(unsigned int mode, bool notify_rpm)
+{
+	struct msm_spm_device *dev = this_cpu_ptr(&msm_cpu_spm_device);
+
+	return msm_spm_dev_set_low_power_mode(dev, mode, notify_rpm, true);
+}
+EXPORT_SYMBOL(msm_spm_set_low_power_mode);
+
+void msm_spm_set_rpm_hs(bool allow_rpm_hs)
+{
+	struct msm_spm_device *dev = this_cpu_ptr(&msm_cpu_spm_device);
+
+	dev->allow_rpm_hs = allow_rpm_hs;
+}
+EXPORT_SYMBOL(msm_spm_set_rpm_hs);
+
+int msm_spm_config_low_power_mode_addr(struct msm_spm_device *dev,
+		unsigned int mode, bool notify_rpm)
+{
+	return msm_spm_dev_set_low_power_mode(dev, mode, notify_rpm, false);
+}
+
+/**
+ * msm_spm_init(): Board initalization function
+ * @data: platform specific SPM register configuration data
+ * @nr_devs: Number of SPM devices being initialized
+ */
+int __init msm_spm_init(struct msm_spm_platform_data *data, int nr_devs)
+{
+	unsigned int cpu;
+	int ret = 0;
+
+	if ((nr_devs < num_possible_cpus()) || !data)
+		return -EINVAL;
+
+	for_each_possible_cpu(cpu) {
+		struct msm_spm_device *dev = &per_cpu(msm_cpu_spm_device, cpu);
+
+		ret = msm_spm_dev_init(dev, &data[cpu]);
+		if (ret < 0) {
+			pr_warn("%s():failed CPU:%u ret:%d\n", __func__,
+					cpu, ret);
+			break;
+		}
+	}
+
+	return ret;
+}
+
+struct msm_spm_device *msm_spm_get_device_by_name(const char *name)
+{
+	struct list_head *list;
+
+	list_for_each(list, &spm_list) {
+		struct msm_spm_device *dev
+			= list_entry(list, typeof(*dev), list);
+		if (dev->name && !strcmp(dev->name, name))
+			return dev;
+	}
+	return ERR_PTR(-ENODEV);
+}
+
+int msm_spm_config_low_power_mode(struct msm_spm_device *dev,
+		unsigned int mode, bool notify_rpm)
+{
+	return msm_spm_dev_set_low_power_mode(dev, mode, notify_rpm, true);
+}
+#ifdef CONFIG_MSM_L2_SPM
+
+/**
+ * msm_spm_apcs_set_phase(): Set number of SMPS phases.
+ * @cpu: cpu which is requesting the change in number of phases.
+ * @phase_cnt: Number of phases to be set active
+ */
+int msm_spm_apcs_set_phase(int cpu, unsigned int phase_cnt)
+{
+	struct msm_spm_device *dev = per_cpu(cpu_vctl_device, cpu);
+
+	if (!dev)
+		return -ENXIO;
+
+	return msm_spm_drv_set_pmic_data(&dev->reg_data,
+			MSM_SPM_PMIC_PHASE_PORT, phase_cnt);
+}
+EXPORT_SYMBOL(msm_spm_apcs_set_phase);
+
+/** msm_spm_enable_fts_lpm() : Enable FTS to switch to low power
+ *                             when the cores are in low power modes
+ * @cpu: cpu that is entering low power mode.
+ * @mode: The mode configuration for FTS
+ */
+int msm_spm_enable_fts_lpm(int cpu, uint32_t mode)
+{
+	struct msm_spm_device *dev = per_cpu(cpu_vctl_device, cpu);
+
+	if (!dev)
+		return -ENXIO;
+
+	return msm_spm_drv_set_pmic_data(&dev->reg_data,
+			MSM_SPM_PMIC_PFM_PORT, mode);
+}
+EXPORT_SYMBOL(msm_spm_enable_fts_lpm);
+
+#endif
+
+static int get_cpu_id(struct device_node *node)
+{
+	struct device_node *cpu_node;
+	u32 cpu;
+	char *key = "qcom,cpu";
+
+	cpu_node = of_parse_phandle(node, key, 0);
+	if (cpu_node) {
+		for_each_possible_cpu(cpu) {
+			if (of_get_cpu_node(cpu, NULL) == cpu_node)
+				return cpu;
+		}
+	} else
+		return num_possible_cpus();
+
+	return -EINVAL;
+}
+
+static struct msm_spm_device *msm_spm_get_device(struct platform_device *pdev)
+{
+	struct msm_spm_device *dev = NULL;
+	const char *val = NULL;
+	char *key = "qcom,name";
+	int cpu = get_cpu_id(pdev->dev.of_node);
+
+	if ((cpu >= 0) && cpu < num_possible_cpus())
+		dev = &per_cpu(msm_cpu_spm_device, cpu);
+	else if (cpu == num_possible_cpus())
+		dev = devm_kzalloc(&pdev->dev, sizeof(struct msm_spm_device),
+					GFP_KERNEL);
+
+	if (!dev)
+		return NULL;
+
+	if (of_property_read_string(pdev->dev.of_node, key, &val)) {
+		pr_err("%s(): Cannot find a required node key:%s\n",
+				__func__, key);
+		return NULL;
+	}
+	dev->name = val;
+	list_add(&dev->list, &spm_list);
+
+	return dev;
+}
+
+static void get_cpumask(struct device_node *node, struct cpumask *mask)
+{
+	unsigned int c;
+	int idx = 0;
+	struct device_node *cpu_node;
+	char *key = "qcom,cpu-vctl-list";
+
+	cpu_node = of_parse_phandle(node, key, idx++);
+	while (cpu_node) {
+		for_each_possible_cpu(c) {
+			if (of_get_cpu_node(c, NULL) == cpu_node)
+				cpumask_set_cpu(c, mask);
+		}
+		cpu_node = of_parse_phandle(node, key, idx++);
+	};
+}
+
+static int msm_spm_dev_probe(struct platform_device *pdev)
+{
+	int ret = 0;
+	int cpu = 0;
+	int i = 0;
+	struct device_node *node = pdev->dev.of_node;
+	struct device_node *n = NULL;
+	struct msm_spm_platform_data spm_data;
+	char *key = NULL;
+	uint32_t val = 0;
+	struct msm_spm_seq_entry modes[MSM_SPM_MODE_NR];
+	int len = 0;
+	struct msm_spm_device *dev = NULL;
+	struct resource *res = NULL;
+	uint32_t mode_count = 0;
+
+	struct spm_of {
+		char *key;
+		uint32_t id;
+	};
+
+	struct spm_of spm_of_data[] = {
+		{"qcom,saw2-cfg", MSM_SPM_REG_SAW_CFG},
+		{"qcom,saw2-avs-ctl", MSM_SPM_REG_SAW_AVS_CTL},
+		{"qcom,saw2-avs-hysteresis", MSM_SPM_REG_SAW_AVS_HYSTERESIS},
+		{"qcom,saw2-avs-limit", MSM_SPM_REG_SAW_AVS_LIMIT},
+		{"qcom,saw2-avs-dly", MSM_SPM_REG_SAW_AVS_DLY},
+		{"qcom,saw2-spm-dly", MSM_SPM_REG_SAW_SPM_DLY},
+		{"qcom,saw2-spm-ctl", MSM_SPM_REG_SAW_SPM_CTL},
+		{"qcom,saw2-pmic-data0", MSM_SPM_REG_SAW_PMIC_DATA_0},
+		{"qcom,saw2-pmic-data1", MSM_SPM_REG_SAW_PMIC_DATA_1},
+		{"qcom,saw2-pmic-data2", MSM_SPM_REG_SAW_PMIC_DATA_2},
+		{"qcom,saw2-pmic-data3", MSM_SPM_REG_SAW_PMIC_DATA_3},
+		{"qcom,saw2-pmic-data4", MSM_SPM_REG_SAW_PMIC_DATA_4},
+		{"qcom,saw2-pmic-data5", MSM_SPM_REG_SAW_PMIC_DATA_5},
+		{"qcom,saw2-pmic-data6", MSM_SPM_REG_SAW_PMIC_DATA_6},
+		{"qcom,saw2-pmic-data7", MSM_SPM_REG_SAW_PMIC_DATA_7},
+	};
+
+	struct mode_of {
+		char *key;
+		uint32_t id;
+	};
+
+	struct mode_of mode_of_data[] = {
+		{"qcom,saw2-spm-cmd-wfi", MSM_SPM_MODE_CLOCK_GATING},
+		{"qcom,saw2-spm-cmd-ret", MSM_SPM_MODE_RETENTION},
+		{"qcom,saw2-spm-cmd-gdhs", MSM_SPM_MODE_GDHS},
+		{"qcom,saw2-spm-cmd-spc",
+				MSM_SPM_MODE_STANDALONE_POWER_COLLAPSE},
+		{"qcom,saw2-spm-cmd-pc", MSM_SPM_MODE_POWER_COLLAPSE},
+		{"qcom,saw2-spm-cmd-fpc", MSM_SPM_MODE_FASTPC},
+	};
+
+	dev = msm_spm_get_device(pdev);
+	if (!dev) {
+		/*
+		 * For partial goods support some CPUs might not be available
+		 * in which case, shouldn't throw an error
+		 */
+		return 0;
+	}
+	get_cpumask(node, &dev->mask);
+
+	memset(&spm_data, 0, sizeof(struct msm_spm_platform_data));
+	memset(&modes, 0,
+		(MSM_SPM_MODE_NR - 2) * sizeof(struct msm_spm_seq_entry));
+
+	key = "qcom,saw2-ver-reg";
+	ret = of_property_read_u32(node, key, &val);
+	if (ret)
+		goto fail;
+	spm_data.ver_reg = val;
+
+	key = "qcom,vctl-timeout-us";
+	ret = of_property_read_u32(node, key, &val);
+	if (!ret)
+		spm_data.vctl_timeout_us = val;
+
+	/* SAW start address */
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		ret = -EFAULT;
+		goto fail;
+	}
+
+	spm_data.reg_base_addr = devm_ioremap(&pdev->dev, res->start,
+					resource_size(res));
+	if (!spm_data.reg_base_addr) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	spm_data.vctl_port = -1;
+	spm_data.vctl_port_ub = -1;
+	spm_data.phase_port = -1;
+	spm_data.pfm_port = -1;
+
+	key = "qcom,vctl-port";
+	of_property_read_u32(node, key, &spm_data.vctl_port);
+
+	key = "qcom,vctl-port-ub";
+	of_property_read_u32(node, key, &spm_data.vctl_port_ub);
+
+	key = "qcom,phase-port";
+	of_property_read_u32(node, key, &spm_data.phase_port);
+
+	key = "qcom,pfm-port";
+	of_property_read_u32(node, key, &spm_data.pfm_port);
+
+	/* Q2S (QChannel-2-SPM) register */
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "q2s");
+	if (res) {
+		dev->q2s_reg = devm_ioremap(&pdev->dev, res->start,
+						resource_size(res));
+		if (!dev->q2s_reg) {
+			pr_err("%s(): Unable to iomap Q2S register\n",
+					__func__);
+			ret = -EADDRNOTAVAIL;
+			goto fail;
+		}
+	}
+
+	key = "qcom,use-qchannel-for-pc";
+	dev->qchannel_ignore = !of_property_read_bool(node, key);
+
+	key = "qcom,use-spm-clock-gating";
+	dev->use_spm_clk_gating = of_property_read_bool(node, key);
+
+	key = "qcom,use-qchannel-for-wfi";
+	dev->use_qchannel_for_wfi = of_property_read_bool(node, key);
+
+	/* HW flush address */
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hw-flush");
+	if (res) {
+		dev->flush_base_addr = devm_ioremap_resource(&pdev->dev, res);
+		if (IS_ERR(dev->flush_base_addr)) {
+			ret = PTR_ERR(dev->flush_base_addr);
+			pr_err("%s(): Unable to iomap hw flush register %d\n",
+					__func__, ret);
+			goto fail;
+		}
+	}
+
+	/* Sleep req address */
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "slpreq");
+	if (res) {
+		dev->slpreq_base_addr = devm_ioremap(&pdev->dev, res->start,
+					resource_size(res));
+		if (!dev->slpreq_base_addr) {
+			ret = -ENOMEM;
+			pr_err("%s(): Unable to iomap slpreq register\n",
+					__func__);
+			ret = -EADDRNOTAVAIL;
+			goto fail;
+		}
+	}
+
+	/*
+	 * At system boot, cpus and or clusters can remain in reset. CCI SPM
+	 * will not be triggered unless SPM_LEGACY_MODE bit is set for the
+	 * cluster in reset. Initialize q2s registers and set the
+	 * SPM_LEGACY_MODE bit.
+	 */
+	msm_spm_config_q2s(dev, MSM_SPM_MODE_POWER_COLLAPSE);
+	msm_spm_drv_reg_init(&dev->reg_data, &spm_data);
+
+	for (i = 0; i < ARRAY_SIZE(spm_of_data); i++) {
+		ret = of_property_read_u32(node, spm_of_data[i].key, &val);
+		if (ret)
+			continue;
+		msm_spm_drv_upd_reg_shadow(&dev->reg_data, spm_of_data[i].id,
+				val);
+	}
+
+	for_each_child_of_node(node, n) {
+		const char *name;
+		bool bit_set;
+		int sync;
+
+		if (!n->name)
+			continue;
+
+		ret = of_property_read_string(n, "qcom,label", &name);
+		if (ret)
+			continue;
+
+		for (i = 0; i < ARRAY_SIZE(mode_of_data); i++)
+			if (!strcmp(name, mode_of_data[i].key))
+				break;
+
+		if (i == ARRAY_SIZE(mode_of_data)) {
+			pr_err("Mode name invalid %s\n", name);
+			break;
+		}
+
+		modes[mode_count].mode = mode_of_data[i].id;
+		modes[mode_count].cmd =
+			(uint8_t *)of_get_property(n, "qcom,sequence", &len);
+		if (!modes[mode_count].cmd) {
+			pr_err("cmd is empty\n");
+			continue;
+		}
+
+		bit_set = of_property_read_bool(n, "qcom,pc_mode");
+		modes[mode_count].ctl |= bit_set ? BIT(PC_MODE_BIT) : 0;
+
+		bit_set = of_property_read_bool(n, "qcom,ret_mode");
+		modes[mode_count].ctl |= bit_set ? BIT(RET_MODE_BIT) : 0;
+
+		bit_set = of_property_read_bool(n, "qcom,slp_cmd_mode");
+		modes[mode_count].ctl |= bit_set ? BIT(SLP_CMD_BIT) : 0;
+
+		bit_set = of_property_read_bool(n, "qcom,isar");
+		modes[mode_count].ctl |= bit_set ? BIT(ISAR_BIT) : 0;
+
+		bit_set = of_property_read_bool(n, "qcom,spm_en");
+		modes[mode_count].ctl |= bit_set ? BIT(SPM_EN_BIT) : 0;
+
+		ret = of_property_read_u32(n, "qcom,event_sync", &sync);
+		if (!ret)
+			modes[mode_count].ctl |= sync << EVENT_SYNC_BIT;
+
+		mode_count++;
+	}
+
+	spm_data.modes = modes;
+	spm_data.num_modes = mode_count;
+
+	key = "qcom,supports-rpm-hs";
+	dev->allow_rpm_hs = of_property_read_bool(pdev->dev.of_node, key);
+
+	ret = msm_spm_dev_init(dev, &spm_data);
+	if (ret)
+		pr_err("SPM modes programming is not available from HLOS\n");
+
+	platform_set_drvdata(pdev, dev);
+
+	for_each_cpu(cpu, &dev->mask)
+		per_cpu(cpu_vctl_device, cpu) = dev;
+
+	if (!spm_data.num_modes)
+		return 0;
+
+	cpu = get_cpu_id(pdev->dev.of_node);
+
+	/* For CPUs that are online, the SPM has to be programmed for
+	 * clockgating mode to ensure that it can use SPM for entering these
+	 * low power modes.
+	 */
+	get_online_cpus();
+	if ((cpu >= 0) && (cpu < num_possible_cpus()) && (cpu_online(cpu)))
+		msm_spm_config_low_power_mode(dev, MSM_SPM_MODE_CLOCK_GATING,
+				false);
+	put_online_cpus();
+	return ret;
+
+fail:
+	cpu = get_cpu_id(pdev->dev.of_node);
+	if (dev && (cpu >= num_possible_cpus() || (cpu < 0))) {
+		for_each_cpu(cpu, &dev->mask)
+			per_cpu(cpu_vctl_device, cpu) = ERR_PTR(ret);
+	}
+
+	pr_err("%s: CPU%d SPM device probe failed: %d\n", __func__, cpu, ret);
+
+	return ret;
+}
+
+static int msm_spm_dev_remove(struct platform_device *pdev)
+{
+	struct msm_spm_device *dev = platform_get_drvdata(pdev);
+
+	list_del(&dev->list);
+	return 0;
+}
+
+static const struct of_device_id msm_spm_match_table[] = {
+	{.compatible = "qcom,spm-v2"},
+	{},
+};
+
+static struct platform_driver msm_spm_device_driver = {
+	.probe = msm_spm_dev_probe,
+	.remove = msm_spm_dev_remove,
+	.driver = {
+		.name = "spm-v2",
+		.of_match_table = msm_spm_match_table,
+	},
+};
+
+/**
+ * msm_spm_device_init(): Device tree initialization function
+ */
+int __init msm_spm_device_init(void)
+{
+	static bool registered;
+
+	if (registered)
+		return 0;
+	registered = true;
+	return platform_driver_register(&msm_spm_device_driver);
+}
+arch_initcall(msm_spm_device_init);
diff --git a/drivers/soc/qcom/spm_driver.h b/drivers/soc/qcom/spm_driver.h
new file mode 100644
index 0000000..f362ecd
--- /dev/null
+++ b/drivers/soc/qcom/spm_driver.h
@@ -0,0 +1,137 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2011-2017, 2020, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef __ARCH_ARM_MACH_MSM_SPM_DEVICES_H
+#define __ARCH_ARM_MACH_MSM_SPM_DEVICES_H
+
+#include <soc/qcom/spm.h>
+
+enum {
+	MSM_SPM_REG_SAW_CFG,
+	MSM_SPM_REG_SAW_AVS_CTL,
+	MSM_SPM_REG_SAW_AVS_HYSTERESIS,
+	MSM_SPM_REG_SAW_SPM_CTL,
+	MSM_SPM_REG_SAW_PMIC_DLY,
+	MSM_SPM_REG_SAW_AVS_LIMIT,
+	MSM_SPM_REG_SAW_AVS_DLY,
+	MSM_SPM_REG_SAW_SPM_DLY,
+	MSM_SPM_REG_SAW_PMIC_DATA_0,
+	MSM_SPM_REG_SAW_PMIC_DATA_1,
+	MSM_SPM_REG_SAW_PMIC_DATA_2,
+	MSM_SPM_REG_SAW_PMIC_DATA_3,
+	MSM_SPM_REG_SAW_PMIC_DATA_4,
+	MSM_SPM_REG_SAW_PMIC_DATA_5,
+	MSM_SPM_REG_SAW_PMIC_DATA_6,
+	MSM_SPM_REG_SAW_PMIC_DATA_7,
+	MSM_SPM_REG_SAW_RST,
+
+	MSM_SPM_REG_NR_INITIALIZE = MSM_SPM_REG_SAW_RST,
+
+	MSM_SPM_REG_SAW_ID,
+	MSM_SPM_REG_SAW_SECURE,
+	MSM_SPM_REG_SAW_STS0,
+	MSM_SPM_REG_SAW_STS1,
+	MSM_SPM_REG_SAW_STS2,
+	MSM_SPM_REG_SAW_VCTL,
+	MSM_SPM_REG_SAW_SEQ_ENTRY,
+	MSM_SPM_REG_SAW_SPM_STS,
+	MSM_SPM_REG_SAW_AVS_STS,
+	MSM_SPM_REG_SAW_PMIC_STS,
+	MSM_SPM_REG_SAW_VERSION,
+
+	MSM_SPM_REG_NR,
+};
+
+struct msm_spm_seq_entry {
+	uint32_t mode;
+	uint8_t *cmd;
+	uint32_t ctl;
+};
+
+struct msm_spm_platform_data {
+	void __iomem *reg_base_addr;
+	uint32_t reg_init_values[MSM_SPM_REG_NR_INITIALIZE];
+
+	uint32_t ver_reg;
+	uint32_t vctl_port;
+	int vctl_port_ub;
+	uint32_t phase_port;
+	uint32_t pfm_port;
+
+	uint8_t awake_vlevel;
+	uint32_t vctl_timeout_us;
+	uint32_t avs_timeout_us;
+
+	uint32_t num_modes;
+	struct msm_spm_seq_entry *modes;
+};
+
+enum msm_spm_pmic_port {
+	MSM_SPM_PMIC_VCTL_PORT,
+	MSM_SPM_PMIC_PHASE_PORT,
+	MSM_SPM_PMIC_PFM_PORT,
+};
+
+struct msm_spm_driver_data {
+	uint32_t major;
+	uint32_t minor;
+	uint32_t ver_reg;
+	uint32_t vctl_port;
+	int vctl_port_ub;
+	uint32_t phase_port;
+	uint32_t pfm_port;
+	void __iomem *reg_base_addr;
+	uint32_t vctl_timeout_us;
+	uint32_t avs_timeout_us;
+	uint32_t reg_shadow[MSM_SPM_REG_NR];
+	uint32_t *reg_seq_entry_shadow;
+	uint32_t *reg_offsets;
+};
+
+int msm_spm_drv_init(struct msm_spm_driver_data *dev,
+		struct msm_spm_platform_data *data);
+int msm_spm_drv_reg_init(struct msm_spm_driver_data *dev,
+		struct msm_spm_platform_data *data);
+void msm_spm_drv_reinit(struct msm_spm_driver_data *dev, bool seq);
+int msm_spm_drv_set_low_power_mode(struct msm_spm_driver_data *dev,
+		uint32_t ctl);
+int msm_spm_drv_set_vdd(struct msm_spm_driver_data *dev,
+		unsigned int vlevel);
+void dump_regs(struct msm_spm_driver_data *dev, int cpu);
+uint32_t msm_spm_drv_get_sts_curr_pmic_data(
+		struct msm_spm_driver_data *dev);
+int msm_spm_drv_write_seq_data(struct msm_spm_driver_data *dev,
+		uint8_t *cmd, uint32_t *offset);
+void msm_spm_drv_flush_seq_entry(struct msm_spm_driver_data *dev);
+int msm_spm_drv_set_spm_enable(struct msm_spm_driver_data *dev,
+		bool enable);
+int msm_spm_drv_set_pmic_data(struct msm_spm_driver_data *dev,
+		enum msm_spm_pmic_port port, unsigned int data);
+
+int msm_spm_drv_set_avs_limit(struct msm_spm_driver_data *dev,
+		 uint32_t min_lvl, uint32_t max_lvl);
+
+int msm_spm_drv_set_avs_enable(struct msm_spm_driver_data *dev,
+		 bool enable);
+int msm_spm_drv_get_avs_enable(struct msm_spm_driver_data *dev);
+
+int msm_spm_drv_set_avs_irq_enable(struct msm_spm_driver_data *dev,
+		enum msm_spm_avs_irq irq, bool enable);
+int msm_spm_drv_avs_clear_irq(struct msm_spm_driver_data *dev,
+		enum msm_spm_avs_irq irq);
+
+void msm_spm_reinit(void);
+int msm_spm_init(struct msm_spm_platform_data *data, int nr_devs);
+void msm_spm_drv_upd_reg_shadow(struct msm_spm_driver_data *dev, int id,
+		int val);
+uint32_t msm_spm_drv_get_vdd(struct msm_spm_driver_data *dev);
+#endif
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 8f7d544..3c2bd4c 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -108,7 +108,7 @@
 obj-$(CONFIG_SPI_XLP)			+= spi-xlp.o
 obj-$(CONFIG_SPI_XTENSA_XTFPGA)		+= spi-xtensa-xtfpga.o
 obj-$(CONFIG_SPI_ZYNQMP_GQSPI)		+= spi-zynqmp-gqspi.o
-
+obj-$(CONFIG_SPI_QUP)			+= spi_qsd.o
 # SPI slave protocol handlers
 obj-$(CONFIG_SPI_SLAVE_TIME)		+= spi-slave-time.o
 obj-$(CONFIG_SPI_SLAVE_SYSTEM_CONTROL)	+= spi-slave-system-control.o
diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
index 00bfed4..63556d1 100644
--- a/drivers/spi/spi-geni-qcom.c
+++ b/drivers/spi/spi-geni-qcom.c
@@ -1108,12 +1108,30 @@
 				"Failed to cancel/abort m_cmd\n");
 	}
 	if (mas->cur_xfer_mode == SE_DMA) {
-		if (xfer->tx_buf)
+		if (xfer->tx_buf) {
+			reinit_completion(&mas->xfer_done);
+			writel_relaxed(1, mas->base +
+				SE_DMA_TX_FSM_RST);
+			timeout =
+			wait_for_completion_timeout(&mas->xfer_done, HZ);
+			if (!timeout)
+				dev_err(mas->dev,
+					"DMA TX RESET failed\n");
 			geni_se_tx_dma_unprep(mas->wrapper_dev,
-					xfer->tx_dma, xfer->len);
-		if (xfer->rx_buf)
+				xfer->tx_dma, xfer->len);
+		}
+		if (xfer->rx_buf) {
+			reinit_completion(&mas->xfer_done);
+			writel_relaxed(1, mas->base +
+				SE_DMA_RX_FSM_RST);
+			timeout =
+			wait_for_completion_timeout(&mas->xfer_done, HZ);
+			if (!timeout)
+				dev_err(mas->dev,
+					"DMA RX RESET failed\n");
 			geni_se_rx_dma_unprep(mas->wrapper_dev,
-					xfer->rx_dma, xfer->len);
+				xfer->rx_dma, xfer->len);
+		}
 	}
 
 }
diff --git a/drivers/spi/spi_qsd.c b/drivers/spi/spi_qsd.c
new file mode 100644
index 0000000..aa3e69c
--- /dev/null
+++ b/drivers/spi/spi_qsd.c
@@ -0,0 +1,2768 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2008-2018, 2020, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+/*
+ * SPI driver for Qualcomm Technologies, Inc. MSM platforms
+ *
+ */
+
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/irq.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/interrupt.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/workqueue.h>
+#include <linux/io.h>
+#include <linux/debugfs.h>
+#include <linux/gpio.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/dma-mapping.h>
+#include <linux/sched.h>
+#include <linux/mutex.h>
+#include <linux/atomic.h>
+#include <linux/pm_runtime.h>
+#include <linux/spi/qcom-spi.h>
+#include <linux/msm-sps.h>
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+#include "spi_qsd.h"
+
+#define SPI_MAX_BYTES_PER_WORD			(4)
+
+static int msm_spi_pm_resume_runtime(struct device *device);
+static int msm_spi_pm_suspend_runtime(struct device *device);
+static inline void msm_spi_dma_unmap_buffers(struct msm_spi *dd);
+static int get_local_resources(struct msm_spi *dd);
+static void put_local_resources(struct msm_spi *dd);
+
+static inline int msm_spi_configure_gsbi(struct msm_spi *dd,
+					struct platform_device *pdev)
+{
+	struct resource *resource;
+	unsigned long   gsbi_mem_phys_addr;
+	size_t          gsbi_mem_size;
+	void __iomem    *gsbi_base;
+
+	resource  = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	if (!resource)
+		return 0;
+
+	gsbi_mem_phys_addr = resource->start;
+	gsbi_mem_size = resource_size(resource);
+	if (!devm_request_mem_region(&pdev->dev, gsbi_mem_phys_addr,
+					gsbi_mem_size, SPI_DRV_NAME))
+		return -ENXIO;
+
+	gsbi_base = devm_ioremap(&pdev->dev, gsbi_mem_phys_addr,
+					gsbi_mem_size);
+	if (!gsbi_base)
+		return -ENXIO;
+
+	/* Set GSBI to SPI mode */
+	writel_relaxed(GSBI_SPI_CONFIG, gsbi_base + GSBI_CTRL_REG);
+
+	return 0;
+}
+
+static inline void msm_spi_register_init(struct msm_spi *dd)
+{
+	writel_relaxed(0x00000001, dd->base + SPI_SW_RESET);
+	msm_spi_set_state(dd, SPI_OP_STATE_RESET);
+	writel_relaxed(0x00000000, dd->base + SPI_OPERATIONAL);
+	writel_relaxed(0x00000000, dd->base + SPI_CONFIG);
+	writel_relaxed(0x00000000, dd->base + SPI_IO_MODES);
+	if (dd->qup_ver)
+		writel_relaxed(0x00000000, dd->base + QUP_OPERATIONAL_MASK);
+}
+
+static int msm_spi_pinctrl_init(struct msm_spi *dd)
+{
+	dd->pinctrl = devm_pinctrl_get(dd->dev);
+	if (IS_ERR_OR_NULL(dd->pinctrl)) {
+		dev_err(dd->dev, "Failed to get pin ctrl\n");
+		return PTR_ERR(dd->pinctrl);
+	}
+	dd->pins_active = pinctrl_lookup_state(dd->pinctrl,
+				SPI_PINCTRL_STATE_DEFAULT);
+	if (IS_ERR_OR_NULL(dd->pins_active)) {
+		dev_err(dd->dev, "Failed to lookup pinctrl default state\n");
+		return PTR_ERR(dd->pins_active);
+	}
+
+	dd->pins_sleep = pinctrl_lookup_state(dd->pinctrl,
+				SPI_PINCTRL_STATE_SLEEP);
+	if (IS_ERR_OR_NULL(dd->pins_sleep)) {
+		dev_err(dd->dev, "Failed to lookup pinctrl sleep state\n");
+		return PTR_ERR(dd->pins_sleep);
+	}
+
+	return 0;
+}
+
+static inline int msm_spi_request_gpios(struct msm_spi *dd)
+{
+	int i = 0;
+	int result = 0;
+
+	if (!dd->pdata->use_pinctrl) {
+		for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
+			if (dd->spi_gpios[i] >= 0) {
+				result = gpio_request(dd->spi_gpios[i],
+						spi_rsrcs[i]);
+				if (result) {
+					dev_err(dd->dev,
+					"error %d gpio_request for pin %d\n",
+					result, dd->spi_gpios[i]);
+					goto error;
+				}
+			}
+		}
+	} else {
+		result = pinctrl_select_state(dd->pinctrl, dd->pins_active);
+		if (result) {
+			dev_err(dd->dev, "%s: Can not set %s pins\n",
+			__func__, SPI_PINCTRL_STATE_DEFAULT);
+			goto error;
+		}
+	}
+	return 0;
+error:
+	if (!dd->pdata->use_pinctrl) {
+		for (; --i >= 0;) {
+			if (dd->spi_gpios[i] >= 0)
+				gpio_free(dd->spi_gpios[i]);
+		}
+	}
+	return result;
+}
+
+static inline void msm_spi_free_gpios(struct msm_spi *dd)
+{
+	int i;
+	int result = 0;
+
+	if (!dd->pdata->use_pinctrl) {
+		for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
+			if (dd->spi_gpios[i] >= 0)
+				gpio_free(dd->spi_gpios[i]);
+			}
+
+		for (i = 0; i < ARRAY_SIZE(spi_cs_rsrcs); ++i) {
+			if (dd->cs_gpios[i].valid) {
+				gpio_free(dd->cs_gpios[i].gpio_num);
+				dd->cs_gpios[i].valid = false;
+			}
+		}
+	} else {
+		result = pinctrl_select_state(dd->pinctrl, dd->pins_sleep);
+		if (result)
+			dev_err(dd->dev, "%s: Can not set %s pins\n",
+			__func__, SPI_PINCTRL_STATE_SLEEP);
+	}
+}
+
+static inline int msm_spi_request_cs_gpio(struct msm_spi *dd)
+{
+	int cs_num;
+	int rc;
+
+	cs_num = dd->spi->chip_select;
+	if (!(dd->spi->mode & SPI_LOOP)) {
+		if (!dd->pdata->use_pinctrl) {
+			if ((!(dd->cs_gpios[cs_num].valid)) &&
+				(dd->cs_gpios[cs_num].gpio_num >= 0)) {
+				rc = gpio_request(dd->cs_gpios[cs_num].gpio_num,
+					spi_cs_rsrcs[cs_num]);
+
+				if (rc) {
+					dev_err(dd->dev,
+					"gpio_request for pin %d failed,error %d\n",
+					dd->cs_gpios[cs_num].gpio_num, rc);
+					return rc;
+				}
+				dd->cs_gpios[cs_num].valid = true;
+			}
+		}
+	}
+	return 0;
+}
+
+static inline void msm_spi_free_cs_gpio(struct msm_spi *dd)
+{
+	int cs_num;
+
+	cs_num = dd->spi->chip_select;
+	if (!dd->pdata->use_pinctrl) {
+		if (dd->cs_gpios[cs_num].valid) {
+			gpio_free(dd->cs_gpios[cs_num].gpio_num);
+			dd->cs_gpios[cs_num].valid = false;
+		}
+	}
+}
+
+
+/**
+ * msm_spi_clk_max_rate: finds the nearest lower rate for a clk
+ * @clk the clock for which to find nearest lower rate
+ * @rate clock frequency in Hz
+ * @return nearest lower rate or negative error value
+ *
+ * Public clock API extends clk_round_rate which is a ceiling function. This
+ * function is a floor function implemented as a binary search using the
+ * ceiling function.
+ */
+static long msm_spi_clk_max_rate(struct clk *clk, unsigned long rate)
+{
+	long lowest_available, nearest_low, step_size, cur;
+	long step_direction = -1;
+	long guess = rate;
+	int  max_steps = 10;
+
+	cur =  clk_round_rate(clk, rate);
+	if (cur == rate)
+		return rate;
+
+	/* if we got here then: cur > rate */
+	lowest_available =  clk_round_rate(clk, 0);
+	if (lowest_available > rate)
+		return -EINVAL;
+
+	step_size = (rate - lowest_available) >> 1;
+	nearest_low = lowest_available;
+
+	while (max_steps-- && step_size) {
+		guess += step_size * step_direction;
+
+		cur =  clk_round_rate(clk, guess);
+
+		if ((cur < rate) && (cur > nearest_low))
+			nearest_low = cur;
+
+		/*
+		 * if we stepped too far, then start stepping in the other
+		 * direction with half the step size
+		 */
+		if (((cur > rate) && (step_direction > 0))
+		 || ((cur < rate) && (step_direction < 0))) {
+			step_direction = -step_direction;
+			step_size >>= 1;
+		}
+	}
+	return nearest_low;
+}
+
+static void msm_spi_clock_set(struct msm_spi *dd, int speed)
+{
+	long rate;
+	int rc;
+
+	rate = msm_spi_clk_max_rate(dd->clk, speed);
+	if (rate < 0) {
+		dev_err(dd->dev,
+		"%s: no match found for requested clock frequency:%d\n",
+			__func__, speed);
+		return;
+	}
+
+	rc = clk_set_rate(dd->clk, rate);
+	if (!rc)
+		dd->clock_speed = rate;
+}
+
+static void msm_spi_clk_path_vote(struct msm_spi *dd, u32 rate)
+{
+	if (dd->bus_cl_hdl) {
+		u64 ib = rate * dd->pdata->bus_width;
+
+		msm_bus_scale_update_bw(dd->bus_cl_hdl, 0, ib);
+	}
+}
+
+static void msm_spi_clk_path_teardown(struct msm_spi *dd)
+{
+	msm_spi_clk_path_vote(dd, 0);
+
+	if (dd->bus_cl_hdl) {
+		msm_bus_scale_unregister(dd->bus_cl_hdl);
+		dd->bus_cl_hdl = NULL;
+	}
+}
+
+/**
+ * msm_spi_clk_path_postponed_register: reg with bus-scaling after it is probed
+ *
+ * @return zero on success
+ *
+ * Workaround: SPI driver may be probed before the bus scaling driver. Calling
+ * msm_bus_scale_register_client() will fail if the bus scaling driver is not
+ * ready yet. Thus, this function should be called not from probe but from a
+ * later context. Also, this function may be called more then once before
+ * register succeed. At this case only one error message will be logged. At boot
+ * time all clocks are on, so earlier SPI transactions should succeed.
+ */
+static int msm_spi_clk_path_postponed_register(struct msm_spi *dd)
+{
+	int ret = 0;
+
+	dd->bus_cl_hdl = msm_bus_scale_register(dd->pdata->master_id,
+						MSM_BUS_SLAVE_EBI_CH0,
+						(char *)dev_name(dd->dev),
+						false);
+
+	if (IS_ERR_OR_NULL(dd->bus_cl_hdl)) {
+		ret = (dd->bus_cl_hdl ? PTR_ERR(dd->bus_cl_hdl) : -EAGAIN);
+		dev_err(dd->dev, "Failed bus registration Err %d\n", ret);
+	}
+
+	return ret;
+}
+
+static void msm_spi_clk_path_init(struct msm_spi *dd)
+{
+	/*
+	 * bail out if path voting is diabled (master_id == 0) or if it is
+	 * already registered (client_hdl != 0)
+	 */
+	if (!dd->pdata->master_id || dd->bus_cl_hdl)
+		return;
+
+	/* on failure try again later */
+	if (msm_spi_clk_path_postponed_register(dd))
+		return;
+
+}
+
+static int msm_spi_calculate_size(int *fifo_size,
+				  int *block_size,
+				  int block,
+				  int mult)
+{
+	int words;
+
+	switch (block) {
+	case 0:
+		words = 1; /* 4 bytes */
+		break;
+	case 1:
+		words = 4; /* 16 bytes */
+		break;
+	case 2:
+		words = 8; /* 32 bytes */
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	switch (mult) {
+	case 0:
+		*fifo_size = words * 2;
+		break;
+	case 1:
+		*fifo_size = words * 4;
+		break;
+	case 2:
+		*fifo_size = words * 8;
+		break;
+	case 3:
+		*fifo_size = words * 16;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	*block_size = words * sizeof(u32); /* in bytes */
+	return 0;
+}
+
+static void msm_spi_calculate_fifo_size(struct msm_spi *dd)
+{
+	u32 spi_iom;
+	int block;
+	int mult;
+
+	spi_iom = readl_relaxed(dd->base + SPI_IO_MODES);
+
+	block = (spi_iom & SPI_IO_M_INPUT_BLOCK_SIZE) >> INPUT_BLOCK_SZ_SHIFT;
+	mult = (spi_iom & SPI_IO_M_INPUT_FIFO_SIZE) >> INPUT_FIFO_SZ_SHIFT;
+	if (msm_spi_calculate_size(&dd->input_fifo_size, &dd->input_block_size,
+				   block, mult)) {
+		goto fifo_size_err;
+	}
+
+	block = (spi_iom & SPI_IO_M_OUTPUT_BLOCK_SIZE) >> OUTPUT_BLOCK_SZ_SHIFT;
+	mult = (spi_iom & SPI_IO_M_OUTPUT_FIFO_SIZE) >> OUTPUT_FIFO_SZ_SHIFT;
+	if (msm_spi_calculate_size(&dd->output_fifo_size,
+				   &dd->output_block_size, block, mult)) {
+		goto fifo_size_err;
+	}
+	if (dd->qup_ver == SPI_QUP_VERSION_NONE) {
+		/* DM mode is not available for this block size */
+		if (dd->input_block_size == 4 || dd->output_block_size == 4)
+			dd->use_dma = false;
+
+		if (dd->use_dma) {
+			dd->input_burst_size = max(dd->input_block_size,
+						DM_BURST_SIZE);
+			dd->output_burst_size = max(dd->output_block_size,
+						DM_BURST_SIZE);
+		}
+	}
+
+	return;
+
+fifo_size_err:
+	dd->use_dma = false;
+	pr_err("%s: invalid FIFO size, SPI_IO_MODES=0x%x\n", __func__, spi_iom);
+}
+
+static void msm_spi_read_word_from_fifo(struct msm_spi *dd)
+{
+	u32   data_in;
+	int   i;
+	int   shift;
+	int   read_bytes = (dd->pack_words ?
+				SPI_MAX_BYTES_PER_WORD : dd->bytes_per_word);
+
+	data_in = readl_relaxed(dd->base + SPI_INPUT_FIFO);
+	if (dd->read_buf) {
+		for (i = 0; (i < read_bytes) &&
+			     dd->rx_bytes_remaining; i++) {
+			/* The data format depends on bytes_per_word:
+			 * 4 bytes: 0x12345678
+			 * 3 bytes: 0x00123456
+			 * 2 bytes: 0x00001234
+			 * 1 byte : 0x00000012
+			 */
+			shift = BITS_PER_BYTE * i;
+			*dd->read_buf++ = (data_in & (0xFF << shift)) >> shift;
+			dd->rx_bytes_remaining--;
+		}
+	} else {
+		if (dd->rx_bytes_remaining >= read_bytes)
+			dd->rx_bytes_remaining -= read_bytes;
+		else
+			dd->rx_bytes_remaining = 0;
+	}
+
+	dd->read_xfr_cnt++;
+}
+
+static inline bool msm_spi_is_valid_state(struct msm_spi *dd)
+{
+	u32 spi_op = readl_relaxed(dd->base + SPI_STATE);
+
+	return spi_op & SPI_OP_STATE_VALID;
+}
+
+static inline void msm_spi_udelay(unsigned int delay_usecs)
+{
+	/*
+	 * For smaller values of delay, context switch time
+	 * would negate the usage of usleep
+	 */
+	if (delay_usecs > 20)
+		usleep_range(delay_usecs, delay_usecs + 1);
+	else if (delay_usecs)
+		udelay(delay_usecs);
+}
+
+static inline int msm_spi_wait_valid(struct msm_spi *dd)
+{
+	unsigned int delay = 0;
+	unsigned long timeout = 0;
+
+	if (dd->clock_speed == 0)
+		return -EINVAL;
+	/*
+	 * Based on the SPI clock speed, sufficient time
+	 * should be given for the SPI state transition
+	 * to occur
+	 */
+	delay = (10 * USEC_PER_SEC) / dd->clock_speed;
+	/*
+	 * For small delay values, the default timeout would
+	 * be one jiffy
+	 */
+	if (delay < SPI_DELAY_THRESHOLD)
+		delay = SPI_DELAY_THRESHOLD;
+
+	/* Adding one to round off to the nearest jiffy */
+	timeout = jiffies + msecs_to_jiffies(delay * SPI_DEFAULT_TIMEOUT) + 1;
+	while (!msm_spi_is_valid_state(dd)) {
+		if (time_after(jiffies, timeout)) {
+			if (!msm_spi_is_valid_state(dd)) {
+				dev_err(dd->dev, "Invalid SPI operational state\n");
+				return -ETIMEDOUT;
+			} else
+				return 0;
+		}
+		msm_spi_udelay(delay);
+	}
+	return 0;
+}
+
+static inline int msm_spi_set_state(struct msm_spi *dd,
+				    enum msm_spi_state state)
+{
+	enum msm_spi_state cur_state;
+
+	if (msm_spi_wait_valid(dd))
+		return -EIO;
+	cur_state = readl_relaxed(dd->base + SPI_STATE);
+	/* Per spec:
+	 * For PAUSE_STATE to RESET_STATE, two writes of (10) are required
+	 */
+	if (((cur_state & SPI_OP_STATE) == SPI_OP_STATE_PAUSE) &&
+			(state == SPI_OP_STATE_RESET)) {
+		writel_relaxed(SPI_OP_STATE_CLEAR_BITS, dd->base + SPI_STATE);
+		writel_relaxed(SPI_OP_STATE_CLEAR_BITS, dd->base + SPI_STATE);
+	} else {
+		writel_relaxed((cur_state & ~SPI_OP_STATE) | state,
+		       dd->base + SPI_STATE);
+	}
+	if (msm_spi_wait_valid(dd))
+		return -EIO;
+
+	return 0;
+}
+
+/**
+ * msm_spi_set_bpw_and_no_io_flags: configure N, and no-input/no-output flags
+ */
+static inline void
+msm_spi_set_bpw_and_no_io_flags(struct msm_spi *dd, u32 *config, int n)
+{
+	*config &= ~(SPI_NO_INPUT|SPI_NO_OUTPUT);
+
+	if (n != (*config & SPI_CFG_N))
+		*config = (*config & ~SPI_CFG_N) | n;
+
+	if (dd->tx_mode == SPI_BAM_MODE) {
+		if (dd->read_buf == NULL)
+			*config |= SPI_NO_INPUT;
+		if (dd->write_buf == NULL)
+			*config |= SPI_NO_OUTPUT;
+	}
+}
+
+/**
+ * msm_spi_calc_spi_config_loopback_and_input_first: Calculate the values that
+ * should be updated into SPI_CONFIG's LOOPBACK and INPUT_FIRST flags
+ * @return calculatd value for SPI_CONFIG
+ */
+static u32
+msm_spi_calc_spi_config_loopback_and_input_first(u32 spi_config, u8 mode)
+{
+	if (mode & SPI_LOOP)
+		spi_config |= SPI_CFG_LOOPBACK;
+	else
+		spi_config &= ~SPI_CFG_LOOPBACK;
+
+	if (mode & SPI_CPHA)
+		spi_config &= ~SPI_CFG_INPUT_FIRST;
+	else
+		spi_config |= SPI_CFG_INPUT_FIRST;
+
+	return spi_config;
+}
+
+/**
+ * msm_spi_set_spi_config: prepares register SPI_CONFIG to process the
+ * next transfer
+ */
+static void msm_spi_set_spi_config(struct msm_spi *dd, int bpw)
+{
+	u32 spi_config = readl_relaxed(dd->base + SPI_CONFIG);
+
+	spi_config = msm_spi_calc_spi_config_loopback_and_input_first(
+					spi_config, dd->spi->mode);
+
+	if (dd->qup_ver == SPI_QUP_VERSION_NONE)
+		/* flags removed from SPI_CONFIG in QUP version-2 */
+		msm_spi_set_bpw_and_no_io_flags(dd, &spi_config, bpw-1);
+
+	/*
+	 * HS_MODE improves signal stability for spi-clk high rates
+	 * but is invalid in LOOPBACK mode.
+	 */
+	if ((dd->clock_speed >= SPI_HS_MIN_RATE) &&
+	   !(dd->spi->mode & SPI_LOOP))
+		spi_config |= SPI_CFG_HS_MODE;
+	else
+		spi_config &= ~SPI_CFG_HS_MODE;
+
+	writel_relaxed(spi_config, dd->base + SPI_CONFIG);
+}
+
+/**
+ * msm_spi_set_mx_counts: set SPI_MX_INPUT_COUNT and SPI_MX_INPUT_COUNT
+ * for FIFO-mode. set SPI_MX_INPUT_COUNT and SPI_MX_OUTPUT_COUNT for
+ * BAM and DMOV modes.
+ * @n_words The number of reads/writes of size N.
+ */
+static void msm_spi_set_mx_counts(struct msm_spi *dd, u32 n_words)
+{
+	/*
+	 * For FIFO mode:
+	 *   - Set the MX_OUTPUT_COUNT/MX_INPUT_COUNT registers to 0
+	 *   - Set the READ/WRITE_COUNT registers to 0 (infinite mode)
+	 *     or num bytes (finite mode) if less than fifo worth of data.
+	 * For Block mode:
+	 *  - Set the MX_OUTPUT/MX_INPUT_COUNT registers to num xfer bytes.
+	 *  - Set the READ/WRITE_COUNT registers to 0.
+	 */
+	if (dd->tx_mode != SPI_BAM_MODE) {
+		if (dd->tx_mode == SPI_FIFO_MODE) {
+			if (n_words <= dd->input_fifo_size)
+				msm_spi_set_write_count(dd, n_words);
+			else
+				msm_spi_set_write_count(dd, 0);
+			writel_relaxed(0, dd->base + SPI_MX_OUTPUT_COUNT);
+		} else
+			writel_relaxed(n_words, dd->base + SPI_MX_OUTPUT_COUNT);
+
+		if (dd->rx_mode == SPI_FIFO_MODE) {
+			if (n_words <= dd->input_fifo_size)
+				writel_relaxed(n_words,
+						dd->base + SPI_MX_READ_COUNT);
+			else
+				writel_relaxed(0,
+						dd->base + SPI_MX_READ_COUNT);
+			writel_relaxed(0, dd->base + SPI_MX_INPUT_COUNT);
+		} else
+			writel_relaxed(n_words, dd->base + SPI_MX_INPUT_COUNT);
+	} else {
+		/* must be zero for BAM and DMOV */
+		writel_relaxed(0, dd->base + SPI_MX_READ_COUNT);
+		msm_spi_set_write_count(dd, 0);
+
+		/*
+		 * for DMA transfers, both QUP_MX_INPUT_COUNT and
+		 * QUP_MX_OUTPUT_COUNT must be zero to all cases but one.
+		 * That case is a non-balanced transfer when there is
+		 * only a read_buf.
+		 */
+		if (dd->qup_ver == SPI_QUP_VERSION_BFAM) {
+			if (dd->write_buf)
+				writel_relaxed(0,
+						dd->base + SPI_MX_INPUT_COUNT);
+			else
+				writel_relaxed(n_words,
+						dd->base + SPI_MX_INPUT_COUNT);
+
+			writel_relaxed(0, dd->base + SPI_MX_OUTPUT_COUNT);
+		}
+	}
+}
+
+static int msm_spi_bam_pipe_disconnect(struct msm_spi *dd,
+						struct msm_spi_bam_pipe  *pipe)
+{
+	int ret = sps_disconnect(pipe->handle);
+
+	if (ret) {
+		dev_dbg(dd->dev, "%s disconnect bam %s pipe failed\n",
+			__func__, pipe->name);
+		return ret;
+	}
+	return 0;
+}
+
+static int msm_spi_bam_pipe_connect(struct msm_spi *dd,
+		struct msm_spi_bam_pipe  *pipe, struct sps_connect *config)
+{
+	int ret;
+	struct sps_register_event event  = {
+		.mode      = SPS_TRIGGER_WAIT,
+		.options   = SPS_O_EOT,
+	};
+
+	if (pipe == &dd->bam.prod)
+		event.xfer_done = &dd->rx_transfer_complete;
+	else if (pipe == &dd->bam.cons)
+		event.xfer_done = &dd->tx_transfer_complete;
+
+	ret = sps_connect(pipe->handle, config);
+	if (ret) {
+		dev_err(dd->dev, "%s: sps_connect(%s:0x%pK):%d\n",
+				__func__, pipe->name, pipe->handle, ret);
+		return ret;
+	}
+
+	ret = sps_register_event(pipe->handle, &event);
+	if (ret) {
+		dev_err(dd->dev, "%s sps_register_event(hndl:0x%pK %s):%d\n",
+				__func__, pipe->handle, pipe->name, ret);
+		msm_spi_bam_pipe_disconnect(dd, pipe);
+		return ret;
+	}
+
+	pipe->teardown_required = true;
+	return 0;
+}
+
+
+static void msm_spi_bam_pipe_flush(struct msm_spi *dd,
+					enum msm_spi_pipe_direction pipe_dir)
+{
+	struct msm_spi_bam_pipe *pipe = (pipe_dir == SPI_BAM_CONSUMER_PIPE) ?
+					(&dd->bam.prod) : (&dd->bam.cons);
+	struct sps_connect           config  = pipe->config;
+	int    ret;
+
+	ret = msm_spi_bam_pipe_disconnect(dd, pipe);
+	if (ret)
+		return;
+
+	ret = msm_spi_bam_pipe_connect(dd, pipe, &config);
+	if (ret)
+		return;
+}
+
+static void msm_spi_bam_flush(struct msm_spi *dd)
+{
+	dev_dbg(dd->dev, "%s flushing bam for recovery\n", __func__);
+
+	msm_spi_bam_pipe_flush(dd, SPI_BAM_CONSUMER_PIPE);
+	msm_spi_bam_pipe_flush(dd, SPI_BAM_PRODUCER_PIPE);
+}
+
+static int
+msm_spi_bam_process_rx(struct msm_spi *dd, u32 *bytes_to_send, u32 desc_cnt)
+{
+	int ret = 0;
+	u32 data_xfr_size = 0, rem_bc = 0;
+	u32 prod_flags = 0;
+
+	rem_bc = dd->cur_rx_transfer->len - dd->bam.curr_rx_bytes_recvd;
+	data_xfr_size = (rem_bc < *bytes_to_send) ? rem_bc : *bytes_to_send;
+
+	/*
+	 * set flags for last descriptor only
+	 */
+	if ((desc_cnt == 1)
+		|| (*bytes_to_send == data_xfr_size))
+		prod_flags = (dd->write_buf)
+			? 0 : (SPS_IOVEC_FLAG_EOT | SPS_IOVEC_FLAG_NWD);
+
+	/*
+	 * enqueue read buffer in BAM
+	 */
+	ret = sps_transfer_one(dd->bam.prod.handle,
+			dd->cur_rx_transfer->rx_dma
+				+ dd->bam.curr_rx_bytes_recvd,
+			data_xfr_size, dd, prod_flags);
+	if (ret < 0) {
+		dev_err(dd->dev,
+		"%s: Failed to queue producer BAM transfer\n",
+		__func__);
+		return ret;
+	}
+
+	dd->bam.curr_rx_bytes_recvd += data_xfr_size;
+	*bytes_to_send -= data_xfr_size;
+	dd->bam.bam_rx_len -= data_xfr_size;
+	return data_xfr_size;
+}
+
+static int
+msm_spi_bam_process_tx(struct msm_spi *dd, u32 *bytes_to_send, u32 desc_cnt)
+{
+	int ret = 0;
+	u32 data_xfr_size = 0, rem_bc = 0;
+	u32 cons_flags = 0;
+
+	rem_bc = dd->cur_tx_transfer->len - dd->bam.curr_tx_bytes_sent;
+	data_xfr_size = (rem_bc < *bytes_to_send) ? rem_bc : *bytes_to_send;
+
+	/*
+	 * set flags for last descriptor only
+	 */
+	if ((desc_cnt == 1)
+		|| (*bytes_to_send == data_xfr_size))
+		cons_flags = SPS_IOVEC_FLAG_EOT | SPS_IOVEC_FLAG_NWD;
+
+	/*
+	 * enqueue write buffer in BAM
+	 */
+	ret = sps_transfer_one(dd->bam.cons.handle,
+			dd->cur_tx_transfer->tx_dma
+				+ dd->bam.curr_tx_bytes_sent,
+			data_xfr_size, dd, cons_flags);
+	if (ret < 0) {
+		dev_err(dd->dev,
+		"%s: Failed to queue consumer BAM transfer\n",
+		__func__);
+		return ret;
+	}
+
+	dd->bam.curr_tx_bytes_sent	+= data_xfr_size;
+	*bytes_to_send	-= data_xfr_size;
+	dd->bam.bam_tx_len -= data_xfr_size;
+	return data_xfr_size;
+}
+
+
+/**
+ * msm_spi_bam_begin_transfer: transfer dd->tx_bytes_remaining bytes
+ * using BAM.
+ * @brief BAM can transfer SPI_MAX_TRFR_BTWN_RESETS byte at a single
+ * transfer. Between transfer QUP must change to reset state. A loop is
+ * issuing a single BAM transfer at a time.
+ * @return zero on success
+ */
+static int
+msm_spi_bam_begin_transfer(struct msm_spi *dd)
+{
+	u32 tx_bytes_to_send = 0, rx_bytes_to_recv = 0;
+	u32 n_words_xfr;
+	s32 ret = 0;
+	u32 prod_desc_cnt = SPI_BAM_MAX_DESC_NUM - 1;
+	u32 cons_desc_cnt = SPI_BAM_MAX_DESC_NUM - 1;
+	u32 byte_count = 0;
+
+	rx_bytes_to_recv = min_t(u32, dd->bam.bam_rx_len,
+				SPI_MAX_TRFR_BTWN_RESETS);
+	tx_bytes_to_send = min_t(u32, dd->bam.bam_tx_len,
+				SPI_MAX_TRFR_BTWN_RESETS);
+	n_words_xfr = DIV_ROUND_UP(rx_bytes_to_recv,
+				dd->bytes_per_word);
+
+	msm_spi_set_mx_counts(dd, n_words_xfr);
+	ret = msm_spi_set_state(dd, SPI_OP_STATE_RUN);
+	if (ret < 0) {
+		dev_err(dd->dev,
+			"%s: Failed to set QUP state to run\n",
+			__func__);
+		goto xfr_err;
+	}
+
+	while ((rx_bytes_to_recv + tx_bytes_to_send) &&
+		((cons_desc_cnt + prod_desc_cnt) > 0)) {
+		struct spi_transfer *t = NULL;
+
+		if (dd->read_buf && (prod_desc_cnt > 0)) {
+			ret = msm_spi_bam_process_rx(dd, &rx_bytes_to_recv,
+							prod_desc_cnt);
+			if (ret < 0)
+				goto xfr_err;
+
+			if (!(dd->cur_rx_transfer->len
+				- dd->bam.curr_rx_bytes_recvd))
+				t = dd->cur_rx_transfer;
+			prod_desc_cnt--;
+		}
+
+		if (dd->write_buf && (cons_desc_cnt > 0)) {
+			ret = msm_spi_bam_process_tx(dd, &tx_bytes_to_send,
+							cons_desc_cnt);
+			if (ret < 0)
+				goto xfr_err;
+
+			if (!(dd->cur_tx_transfer->len
+				- dd->bam.curr_tx_bytes_sent))
+				t = dd->cur_tx_transfer;
+			cons_desc_cnt--;
+		}
+
+		byte_count += ret;
+	}
+
+	dd->tx_bytes_remaining -= min_t(u32, byte_count,
+						SPI_MAX_TRFR_BTWN_RESETS);
+	return 0;
+xfr_err:
+	return ret;
+}
+
+static int
+msm_spi_bam_next_transfer(struct msm_spi *dd)
+{
+	if (dd->tx_mode != SPI_BAM_MODE)
+		return 0;
+
+	if (dd->tx_bytes_remaining > 0) {
+		if (msm_spi_set_state(dd, SPI_OP_STATE_RESET))
+			return 0;
+		if ((msm_spi_bam_begin_transfer(dd)) < 0) {
+			dev_err(dd->dev, "%s: BAM transfer setup failed\n",
+				__func__);
+			return 0;
+		}
+		return 1;
+	}
+	return 0;
+}
+
+static int msm_spi_dma_send_next(struct msm_spi *dd)
+{
+	int ret = 0;
+
+	if (dd->tx_mode == SPI_BAM_MODE)
+		ret = msm_spi_bam_next_transfer(dd);
+	return ret;
+}
+
+static inline void msm_spi_ack_transfer(struct msm_spi *dd)
+{
+	writel_relaxed(SPI_OP_MAX_INPUT_DONE_FLAG |
+		       SPI_OP_MAX_OUTPUT_DONE_FLAG,
+		       dd->base + SPI_OPERATIONAL);
+	/* Ensure done flag was cleared before proceeding further */
+	mb();
+}
+
+/* Figure which irq occurred and call the relevant functions */
+static inline irqreturn_t msm_spi_qup_irq(int irq, void *dev_id)
+{
+	u32 op, ret = IRQ_NONE;
+	struct msm_spi *dd = dev_id;
+
+	if (pm_runtime_suspended(dd->dev)) {
+		dev_warn(dd->dev, "QUP: pm runtime suspend, irq:%d\n", irq);
+		return ret;
+	}
+	if (readl_relaxed(dd->base + SPI_ERROR_FLAGS) ||
+	    readl_relaxed(dd->base + QUP_ERROR_FLAGS)) {
+		struct spi_master *master = dev_get_drvdata(dd->dev);
+
+		ret |= msm_spi_error_irq(irq, master);
+	}
+
+	op = readl_relaxed(dd->base + SPI_OPERATIONAL);
+	writel_relaxed(op, dd->base + SPI_OPERATIONAL);
+	/*
+	 * Ensure service flag was cleared before further
+	 * processing of interrupt.
+	 */
+	mb();
+	if (op & SPI_OP_INPUT_SERVICE_FLAG)
+		ret |= msm_spi_input_irq(irq, dev_id);
+
+	if (op & SPI_OP_OUTPUT_SERVICE_FLAG)
+		ret |= msm_spi_output_irq(irq, dev_id);
+
+	if (dd->tx_mode != SPI_BAM_MODE) {
+		if (!dd->rx_done) {
+			if (dd->rx_bytes_remaining == 0)
+				dd->rx_done = true;
+		}
+		if (!dd->tx_done) {
+			if (!dd->tx_bytes_remaining &&
+					(op & SPI_OP_IP_FIFO_NOT_EMPTY)) {
+				dd->tx_done = true;
+			}
+		}
+	}
+	if (dd->tx_done && dd->rx_done) {
+		msm_spi_set_state(dd, SPI_OP_STATE_RESET);
+		dd->tx_done = false;
+		dd->rx_done = false;
+		complete(&dd->rx_transfer_complete);
+		complete(&dd->tx_transfer_complete);
+	}
+	return ret;
+}
+
+static irqreturn_t msm_spi_input_irq(int irq, void *dev_id)
+{
+	struct msm_spi	       *dd = dev_id;
+
+	dd->stat_rx++;
+
+	if (dd->rx_mode == SPI_MODE_NONE)
+		return IRQ_HANDLED;
+
+	if (dd->rx_mode == SPI_FIFO_MODE) {
+		while ((readl_relaxed(dd->base + SPI_OPERATIONAL) &
+			SPI_OP_IP_FIFO_NOT_EMPTY) &&
+			(dd->rx_bytes_remaining > 0)) {
+			msm_spi_read_word_from_fifo(dd);
+		}
+	} else if (dd->rx_mode == SPI_BLOCK_MODE) {
+		int count = 0;
+
+		while (dd->rx_bytes_remaining &&
+				(count < dd->input_block_size)) {
+			msm_spi_read_word_from_fifo(dd);
+			count += SPI_MAX_BYTES_PER_WORD;
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+static void msm_spi_write_word_to_fifo(struct msm_spi *dd)
+{
+	u32    word;
+	u8     byte;
+	int    i;
+	int   write_bytes =
+		(dd->pack_words ? SPI_MAX_BYTES_PER_WORD : dd->bytes_per_word);
+
+	word = 0;
+	if (dd->write_buf) {
+		for (i = 0; (i < write_bytes) &&
+			     dd->tx_bytes_remaining; i++) {
+			dd->tx_bytes_remaining--;
+			byte = *dd->write_buf++;
+			word |= (byte << (BITS_PER_BYTE * i));
+		}
+	} else
+		if (dd->tx_bytes_remaining > write_bytes)
+			dd->tx_bytes_remaining -= write_bytes;
+		else
+			dd->tx_bytes_remaining = 0;
+	dd->write_xfr_cnt++;
+
+	writel_relaxed(word, dd->base + SPI_OUTPUT_FIFO);
+}
+
+static inline void msm_spi_write_rmn_to_fifo(struct msm_spi *dd)
+{
+	int count = 0;
+
+	if (dd->tx_mode == SPI_FIFO_MODE) {
+		while ((dd->tx_bytes_remaining > 0) &&
+			(count < dd->input_fifo_size) &&
+		       !(readl_relaxed(dd->base + SPI_OPERATIONAL)
+						& SPI_OP_OUTPUT_FIFO_FULL)) {
+			msm_spi_write_word_to_fifo(dd);
+			count++;
+		}
+	}
+
+	if (dd->tx_mode == SPI_BLOCK_MODE) {
+		while (dd->tx_bytes_remaining &&
+				(count < dd->output_block_size)) {
+			msm_spi_write_word_to_fifo(dd);
+			count += SPI_MAX_BYTES_PER_WORD;
+		}
+	}
+}
+
+static irqreturn_t msm_spi_output_irq(int irq, void *dev_id)
+{
+	struct msm_spi	       *dd = dev_id;
+
+	dd->stat_tx++;
+
+	if (dd->tx_mode == SPI_MODE_NONE)
+		return IRQ_HANDLED;
+
+	/* Output FIFO is empty. Transmit any outstanding write data. */
+	if ((dd->tx_mode == SPI_FIFO_MODE) || (dd->tx_mode == SPI_BLOCK_MODE))
+		msm_spi_write_rmn_to_fifo(dd);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t msm_spi_error_irq(int irq, void *dev_id)
+{
+	struct spi_master	*master = dev_id;
+	struct msm_spi          *dd = spi_master_get_devdata(master);
+	u32                      spi_err;
+
+	spi_err = readl_relaxed(dd->base + SPI_ERROR_FLAGS);
+	if (spi_err & SPI_ERR_OUTPUT_OVER_RUN_ERR)
+		dev_warn(master->dev.parent, "SPI output overrun error\n");
+	if (spi_err & SPI_ERR_INPUT_UNDER_RUN_ERR)
+		dev_warn(master->dev.parent, "SPI input underrun error\n");
+	if (spi_err & SPI_ERR_OUTPUT_UNDER_RUN_ERR)
+		dev_warn(master->dev.parent, "SPI output underrun error\n");
+	msm_spi_get_clk_err(dd, &spi_err);
+	if (spi_err & SPI_ERR_CLK_OVER_RUN_ERR)
+		dev_warn(master->dev.parent, "SPI clock overrun error\n");
+	if (spi_err & SPI_ERR_CLK_UNDER_RUN_ERR)
+		dev_warn(master->dev.parent, "SPI clock underrun error\n");
+	msm_spi_clear_error_flags(dd);
+	msm_spi_ack_clk_err(dd);
+	/* Ensure clearing of QUP_ERROR_FLAGS was completed */
+	mb();
+	return IRQ_HANDLED;
+}
+
+static int msm_spi_bam_map_buffers(struct msm_spi *dd)
+{
+	int ret = -EINVAL;
+	struct device *dev;
+	struct spi_transfer *xfr;
+	void *tx_buf, *rx_buf;
+	u32 tx_len, rx_len;
+
+	dev = dd->dev;
+	xfr = dd->cur_transfer;
+
+	tx_buf = (void *)xfr->tx_buf;
+	rx_buf = xfr->rx_buf;
+	tx_len = rx_len = xfr->len;
+	if (tx_buf != NULL) {
+		xfr->tx_dma = dma_map_single(dev, tx_buf,
+						tx_len, DMA_TO_DEVICE);
+		if (dma_mapping_error(dev, xfr->tx_dma)) {
+			ret = -ENOMEM;
+			goto error;
+		}
+	}
+
+	if (rx_buf != NULL) {
+		xfr->rx_dma = dma_map_single(dev, rx_buf, rx_len,
+						DMA_FROM_DEVICE);
+		if (dma_mapping_error(dev, xfr->rx_dma)) {
+			if (tx_buf != NULL)
+				dma_unmap_single(dev,
+						xfr->tx_dma,
+						tx_len, DMA_TO_DEVICE);
+			ret = -ENOMEM;
+			goto error;
+		}
+	}
+
+	return 0;
+error:
+	msm_spi_dma_unmap_buffers(dd);
+	return ret;
+}
+
+static int msm_spi_dma_map_buffers(struct msm_spi *dd)
+{
+	int ret = 0;
+
+	if (dd->tx_mode == SPI_BAM_MODE)
+		ret = msm_spi_bam_map_buffers(dd);
+	return ret;
+}
+
+static void msm_spi_bam_unmap_buffers(struct msm_spi *dd)
+{
+	struct device *dev;
+	struct spi_transfer *xfr;
+	void *tx_buf, *rx_buf;
+	u32  tx_len, rx_len;
+
+	dev = dd->dev;
+	xfr = dd->cur_transfer;
+
+	tx_buf = (void *)xfr->tx_buf;
+	rx_buf = xfr->rx_buf;
+	tx_len = rx_len = xfr->len;
+	if (tx_buf != NULL)
+		dma_unmap_single(dev, xfr->tx_dma,
+				tx_len, DMA_TO_DEVICE);
+
+	if (rx_buf != NULL)
+		dma_unmap_single(dev, xfr->rx_dma,
+				rx_len, DMA_FROM_DEVICE);
+}
+
+static inline void msm_spi_dma_unmap_buffers(struct msm_spi *dd)
+{
+	if (dd->tx_mode == SPI_BAM_MODE)
+		msm_spi_bam_unmap_buffers(dd);
+}
+
+/**
+ * msm_spi_use_dma - decides whether to use Data-Mover or BAM for
+ * the given transfer
+ * @dd:       device
+ * @tr:       transfer
+ *
+ * Start using DMA if:
+ * 1. Is supported by HW
+ * 2. Is not diabled by platform data
+ * 3. Transfer size is greater than 3*block size.
+ * 4. Buffers are aligned to cache line.
+ * 5. Bytes-per-word is 8,16 or 32.
+ */
+static inline bool
+msm_spi_use_dma(struct msm_spi *dd, struct spi_transfer *tr, u8 bpw)
+{
+	if (!dd->use_dma)
+		return false;
+
+	/* check constraints from platform data */
+	if ((dd->qup_ver == SPI_QUP_VERSION_BFAM) && !dd->pdata->use_bam)
+		return false;
+
+	if (dd->cur_msg_len < 3*dd->input_block_size)
+		return false;
+
+	if ((dd->qup_ver != SPI_QUP_VERSION_BFAM) &&
+		 !dd->read_len && !dd->write_len)
+		return false;
+
+	if (dd->qup_ver == SPI_QUP_VERSION_NONE) {
+		u32 cache_line = dma_get_cache_alignment();
+
+		if (tr->tx_buf) {
+			if (!IS_ALIGNED((size_t)tr->tx_buf, cache_line))
+				return false;
+		}
+		if (tr->rx_buf) {
+			if (!IS_ALIGNED((size_t)tr->rx_buf, cache_line))
+				return false;
+		}
+
+		if (tr->cs_change &&
+		   ((bpw != 8) && (bpw != 16) && (bpw != 32)))
+			return false;
+	}
+
+	return true;
+}
+
+/**
+ * msm_spi_set_transfer_mode: Chooses optimal transfer mode. Sets dd->mode and
+ * prepares to process a transfer.
+ */
+static void
+msm_spi_set_transfer_mode(struct msm_spi *dd, u8 bpw, u32 read_count)
+{
+	if (msm_spi_use_dma(dd, dd->cur_transfer, bpw)) {
+		dd->tx_mode = SPI_BAM_MODE;
+		dd->rx_mode = SPI_BAM_MODE;
+	} else {
+		dd->rx_mode = SPI_FIFO_MODE;
+		dd->tx_mode = SPI_FIFO_MODE;
+		dd->read_len = dd->cur_transfer->len;
+		dd->write_len = dd->cur_transfer->len;
+	}
+}
+
+/**
+ * msm_spi_set_qup_io_modes: prepares register QUP_IO_MODES to process a
+ * transfer
+ */
+static void msm_spi_set_qup_io_modes(struct msm_spi *dd)
+{
+	u32 spi_iom;
+
+	spi_iom = readl_relaxed(dd->base + SPI_IO_MODES);
+	/* Set input and output transfer mode: FIFO, DMOV, or BAM */
+	spi_iom &= ~(SPI_IO_M_INPUT_MODE | SPI_IO_M_OUTPUT_MODE);
+	spi_iom = (spi_iom | (dd->tx_mode << OUTPUT_MODE_SHIFT));
+	spi_iom = (spi_iom | (dd->rx_mode << INPUT_MODE_SHIFT));
+
+	/* Always enable packing for the BAM mode and for non BAM mode only
+	 * if bpw is % 8 and transfer length is % 4 Bytes.
+	 */
+	if (dd->tx_mode == SPI_BAM_MODE ||
+		((dd->cur_msg_len % SPI_MAX_BYTES_PER_WORD == 0) &&
+		(dd->cur_transfer->bits_per_word) &&
+		(dd->cur_transfer->bits_per_word <= 32) &&
+		(dd->cur_transfer->bits_per_word % 8 == 0))) {
+		spi_iom |= SPI_IO_M_PACK_EN | SPI_IO_M_UNPACK_EN;
+		dd->pack_words = true;
+	} else {
+		spi_iom &= ~(SPI_IO_M_PACK_EN | SPI_IO_M_UNPACK_EN);
+		spi_iom |= SPI_IO_M_OUTPUT_BIT_SHIFT_EN;
+		dd->pack_words = false;
+	}
+
+	writel_relaxed(spi_iom, dd->base + SPI_IO_MODES);
+}
+
+static u32 msm_spi_calc_spi_ioc_clk_polarity(u32 spi_ioc, u8 mode)
+{
+	if (mode & SPI_CPOL)
+		spi_ioc |= SPI_IO_C_CLK_IDLE_HIGH;
+	else
+		spi_ioc &= ~SPI_IO_C_CLK_IDLE_HIGH;
+	return spi_ioc;
+}
+
+/**
+ * msm_spi_set_spi_io_control: prepares register SPI_IO_CONTROL to process the
+ * next transfer
+ * @return the new set value of SPI_IO_CONTROL
+ */
+static u32 msm_spi_set_spi_io_control(struct msm_spi *dd)
+{
+	u32 spi_ioc, spi_ioc_orig, chip_select;
+
+	spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
+	spi_ioc_orig = spi_ioc;
+	spi_ioc = msm_spi_calc_spi_ioc_clk_polarity(spi_ioc
+						, dd->spi->mode);
+	/* Set chip-select */
+	chip_select = dd->spi->chip_select << 2;
+	if ((spi_ioc & SPI_IO_C_CS_SELECT) != chip_select)
+		spi_ioc = (spi_ioc & ~SPI_IO_C_CS_SELECT) | chip_select;
+	if (!dd->cur_transfer->cs_change)
+		spi_ioc |= SPI_IO_C_MX_CS_MODE;
+
+	if (spi_ioc != spi_ioc_orig)
+		writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
+
+	/*
+	 * Ensure that the IO control mode register gets written
+	 * before proceeding with the transfer.
+	 */
+	mb();
+	return spi_ioc;
+}
+
+/**
+ * msm_spi_set_qup_op_mask: prepares register QUP_OPERATIONAL_MASK to process
+ * the next transfer
+ */
+static void msm_spi_set_qup_op_mask(struct msm_spi *dd)
+{
+	/* mask INPUT and OUTPUT service flags in to prevent IRQs on FIFO status
+	 * change in BAM mode
+	 */
+	u32 mask = (dd->tx_mode == SPI_BAM_MODE) ?
+		QUP_OP_MASK_OUTPUT_SERVICE_FLAG | QUP_OP_MASK_INPUT_SERVICE_FLAG
+		: 0;
+	writel_relaxed(mask, dd->base + QUP_OPERATIONAL_MASK);
+}
+
+static void get_transfer_length(struct msm_spi *dd)
+{
+	struct spi_transfer *xfer = dd->cur_transfer;
+
+	dd->cur_msg_len = 0;
+	dd->read_len = dd->write_len = 0;
+	dd->bam.bam_tx_len = dd->bam.bam_rx_len = 0;
+
+	if (xfer->tx_buf)
+		dd->bam.bam_tx_len = dd->write_len = xfer->len;
+	if (xfer->rx_buf)
+		dd->bam.bam_rx_len = dd->read_len = xfer->len;
+	dd->cur_msg_len = xfer->len;
+}
+
+static int msm_spi_process_transfer(struct msm_spi *dd)
+{
+	u8  bpw;
+	u32 max_speed;
+	u32 read_count;
+	u32 timeout;
+	u32 spi_ioc;
+	u32 int_loopback = 0;
+	int ret;
+	int status = 0;
+
+	get_transfer_length(dd);
+	dd->cur_tx_transfer = dd->cur_transfer;
+	dd->cur_rx_transfer = dd->cur_transfer;
+	dd->bam.curr_rx_bytes_recvd = dd->bam.curr_tx_bytes_sent = 0;
+	dd->write_xfr_cnt = dd->read_xfr_cnt = 0;
+	dd->tx_bytes_remaining = dd->cur_msg_len;
+	dd->rx_bytes_remaining = dd->cur_msg_len;
+	dd->read_buf           = dd->cur_transfer->rx_buf;
+	dd->write_buf          = dd->cur_transfer->tx_buf;
+	dd->tx_done = false;
+	dd->rx_done = false;
+	init_completion(&dd->tx_transfer_complete);
+	init_completion(&dd->rx_transfer_complete);
+	if (dd->cur_transfer->bits_per_word)
+		bpw = dd->cur_transfer->bits_per_word;
+	else
+		bpw = 8;
+	dd->bytes_per_word = (bpw + 7) / 8;
+
+	if (dd->cur_transfer->speed_hz)
+		max_speed = dd->cur_transfer->speed_hz;
+	else
+		max_speed = dd->spi->max_speed_hz;
+	if (!dd->clock_speed || max_speed != dd->clock_speed)
+		msm_spi_clock_set(dd, max_speed);
+
+	timeout = 100 * msecs_to_jiffies(
+			DIV_ROUND_UP(dd->cur_msg_len * 8,
+			DIV_ROUND_UP(max_speed, MSEC_PER_SEC)));
+
+	read_count = DIV_ROUND_UP(dd->cur_msg_len, dd->bytes_per_word);
+	if (dd->spi->mode & SPI_LOOP)
+		int_loopback = 1;
+
+	ret = msm_spi_set_state(dd, SPI_OP_STATE_RESET);
+	if (ret < 0) {
+		dev_err(dd->dev,
+			"%s: Error setting QUP to reset-state\n",
+			__func__);
+		return ret;
+	}
+
+	msm_spi_set_transfer_mode(dd, bpw, read_count);
+	msm_spi_set_mx_counts(dd, read_count);
+	if (dd->tx_mode == SPI_BAM_MODE) {
+		ret = msm_spi_dma_map_buffers(dd);
+		if (ret < 0) {
+			pr_err("%s(): Error Mapping DMA buffers\n", __func__);
+			dd->tx_mode = SPI_MODE_NONE;
+			dd->rx_mode = SPI_MODE_NONE;
+			return ret;
+		}
+	}
+	msm_spi_set_qup_io_modes(dd);
+	msm_spi_set_spi_config(dd, bpw);
+	msm_spi_set_qup_config(dd, bpw);
+	spi_ioc = msm_spi_set_spi_io_control(dd);
+	msm_spi_set_qup_op_mask(dd);
+
+	/* The output fifo interrupt handler will handle all writes after
+	 * the first. Restricting this to one write avoids contention
+	 * issues and race conditions between this thread and the int handler
+	 */
+	if (dd->tx_mode != SPI_BAM_MODE) {
+		if (msm_spi_prepare_for_write(dd))
+			goto transfer_end;
+		msm_spi_start_write(dd, read_count);
+	} else {
+		if ((msm_spi_bam_begin_transfer(dd)) < 0) {
+			dev_err(dd->dev, "%s: BAM transfer setup failed\n",
+				__func__);
+			status = -EIO;
+			goto transfer_end;
+		}
+	}
+
+	/*
+	 * On BAM mode, current state here is run.
+	 * Only enter the RUN state after the first word is written into
+	 * the output FIFO. Otherwise, the output FIFO EMPTY interrupt
+	 * might fire before the first word is written resulting in a
+	 * possible race condition.
+	 */
+	if (dd->tx_mode != SPI_BAM_MODE)
+		if (msm_spi_set_state(dd, SPI_OP_STATE_RUN)) {
+			dev_warn(dd->dev,
+				"%s: Failed to set QUP to run-state. Mode:%d\n",
+				__func__, dd->tx_mode);
+			goto transfer_end;
+		}
+
+	/* Assume success, this might change later upon transaction result */
+	do {
+		if (dd->write_buf &&
+		    !wait_for_completion_timeout(&dd->tx_transfer_complete,
+		    timeout)) {
+			dev_err(dd->dev, "%s: SPI Tx transaction timeout\n",
+				__func__);
+			status = -EIO;
+			break;
+		}
+
+		if (dd->read_buf &&
+		    !wait_for_completion_timeout(&dd->rx_transfer_complete,
+		    timeout)) {
+			dev_err(dd->dev, "%s: SPI Rx transaction timeout\n",
+				__func__);
+			status = -EIO;
+			break;
+		}
+	} while (msm_spi_dma_send_next(dd));
+
+	msm_spi_udelay(dd->xfrs_delay_usec);
+
+transfer_end:
+	if ((dd->tx_mode == SPI_BAM_MODE) && status)
+		msm_spi_bam_flush(dd);
+	msm_spi_dma_unmap_buffers(dd);
+	dd->tx_mode = SPI_MODE_NONE;
+	dd->rx_mode = SPI_MODE_NONE;
+
+	msm_spi_set_state(dd, SPI_OP_STATE_RESET);
+	if (!dd->cur_transfer->cs_change)
+		writel_relaxed(spi_ioc & ~SPI_IO_C_MX_CS_MODE,
+		       dd->base + SPI_IO_CONTROL);
+	return status;
+}
+
+
+static inline void msm_spi_set_cs(struct spi_device *spi, bool set_flag)
+{
+	struct msm_spi *dd = spi_master_get_devdata(spi->master);
+	u32 spi_ioc;
+	u32 spi_ioc_orig;
+	int rc = 0;
+
+	rc = pm_runtime_get_sync(dd->dev);
+	if (rc < 0) {
+		dev_err(dd->dev, "Failure during runtime get,rc=%d\n", rc);
+		return;
+	}
+
+	if (dd->pdata->is_shared) {
+		rc = get_local_resources(dd);
+		if (rc)
+			return;
+	}
+
+	msm_spi_clk_path_vote(dd, spi->max_speed_hz);
+
+	if (!(spi->mode & SPI_CS_HIGH))
+		set_flag = !set_flag;
+
+	/* Serve only under mutex lock as RT suspend may cause a race */
+	mutex_lock(&dd->core_lock);
+	if (dd->suspended) {
+		dev_err(dd->dev, "%s: SPI operational state=%d Invalid\n",
+			__func__, dd->suspended);
+		mutex_unlock(&dd->core_lock);
+		return;
+	}
+
+	spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
+	spi_ioc_orig = spi_ioc;
+	if (set_flag)
+		spi_ioc |= SPI_IO_C_FORCE_CS;
+	else
+		spi_ioc &= ~SPI_IO_C_FORCE_CS;
+
+	if (spi_ioc != spi_ioc_orig)
+		writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
+	if (dd->pdata->is_shared)
+		put_local_resources(dd);
+	mutex_unlock(&dd->core_lock);
+
+	pm_runtime_mark_last_busy(dd->dev);
+	pm_runtime_put_autosuspend(dd->dev);
+}
+
+static void reset_core(struct msm_spi *dd)
+{
+	u32 spi_ioc;
+
+	msm_spi_register_init(dd);
+	/*
+	 * The SPI core generates a bogus input overrun error on some targets,
+	 * when a transition from run to reset state occurs and if the FIFO has
+	 * an odd number of entries. Hence we disable the INPUT_OVER_RUN_ERR_EN
+	 * bit.
+	 */
+	msm_spi_enable_error_flags(dd);
+
+	spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
+	spi_ioc |= SPI_IO_C_NO_TRI_STATE;
+	writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
+	/*
+	 * Ensure that the IO control is written to before returning.
+	 */
+	mb();
+	msm_spi_set_state(dd, SPI_OP_STATE_RESET);
+}
+
+static void put_local_resources(struct msm_spi *dd)
+{
+	msm_spi_disable_irqs(dd);
+	clk_disable_unprepare(dd->clk);
+	dd->clock_speed = 0;
+	clk_disable_unprepare(dd->pclk);
+
+	/* Free  the spi clk, miso, mosi, cs gpio */
+	if (dd->pdata && dd->pdata->gpio_release)
+		dd->pdata->gpio_release();
+
+	msm_spi_free_gpios(dd);
+}
+
+static int get_local_resources(struct msm_spi *dd)
+{
+	int ret = -EINVAL;
+
+	/* Configure the spi clk, miso, mosi and cs gpio */
+	if (dd->pdata->gpio_config) {
+		ret = dd->pdata->gpio_config();
+		if (ret) {
+			dev_err(dd->dev,
+					"%s: error configuring GPIOs\n",
+					__func__);
+			return ret;
+		}
+	}
+
+	ret = msm_spi_request_gpios(dd);
+	if (ret)
+		return ret;
+
+	ret = clk_prepare_enable(dd->clk);
+	if (ret)
+		goto clk0_err;
+	ret = clk_prepare_enable(dd->pclk);
+	if (ret)
+		goto clk1_err;
+	msm_spi_enable_irqs(dd);
+
+	return 0;
+
+clk1_err:
+	clk_disable_unprepare(dd->clk);
+clk0_err:
+	msm_spi_free_gpios(dd);
+	return ret;
+}
+
+/**
+ * msm_spi_transfer_one: To process one spi transfer at a time
+ * @master: spi master controller reference
+ * @msg: one multi-segment SPI transaction
+ * @return zero on success or negative error value
+ *
+ */
+static int msm_spi_transfer_one(struct spi_master *master,
+				struct spi_device *spi,
+				struct spi_transfer *xfer)
+{
+	struct msm_spi	*dd;
+	unsigned long        flags;
+	u32	status_error = 0;
+
+	dd = spi_master_get_devdata(master);
+
+	/* Check message parameters */
+	if (xfer->speed_hz > dd->pdata->max_clock_speed ||
+	    (xfer->bits_per_word &&
+	     (xfer->bits_per_word < 4 || xfer->bits_per_word > 32)) ||
+	    (xfer->tx_buf == NULL && xfer->rx_buf == NULL)) {
+		dev_err(dd->dev,
+			"Invalid transfer: %d Hz, %d bpw tx=%pK, rx=%pK\n",
+			xfer->speed_hz, xfer->bits_per_word,
+			xfer->tx_buf, xfer->rx_buf);
+		return -EINVAL;
+	}
+	dd->spi = spi;
+	dd->cur_transfer = xfer;
+
+	mutex_lock(&dd->core_lock);
+
+	spin_lock_irqsave(&dd->queue_lock, flags);
+	dd->transfer_pending = true;
+	spin_unlock_irqrestore(&dd->queue_lock, flags);
+	/*
+	 * get local resources for each transfer to ensure we're in a good
+	 * state and not interfering with other EE's using this device
+	 */
+	if (dd->pdata->is_shared) {
+		if (get_local_resources(dd)) {
+			mutex_unlock(&dd->core_lock);
+			spi_finalize_current_message(master);
+			return -EINVAL;
+		}
+
+		reset_core(dd);
+		if (dd->use_dma) {
+			msm_spi_bam_pipe_connect(dd, &dd->bam.prod,
+					&dd->bam.prod.config);
+			msm_spi_bam_pipe_connect(dd, &dd->bam.cons,
+					&dd->bam.cons.config);
+		}
+	}
+
+	if (dd->suspended || !msm_spi_is_valid_state(dd)) {
+		dev_err(dd->dev, "%s: SPI operational state not valid\n",
+			__func__);
+		status_error = 1;
+	}
+
+
+	if (!status_error)
+		status_error =
+			msm_spi_process_transfer(dd);
+
+	spin_lock_irqsave(&dd->queue_lock, flags);
+	dd->transfer_pending = false;
+	spin_unlock_irqrestore(&dd->queue_lock, flags);
+
+	/*
+	 * Put local resources prior to calling finalize to ensure the hw
+	 * is in a known state before notifying the calling thread (which is a
+	 * different context since we're running in the spi kthread here) to
+	 * prevent race conditions between us and any other EE's using this hw.
+	 */
+	if (dd->pdata->is_shared) {
+		if (dd->use_dma) {
+			msm_spi_bam_pipe_disconnect(dd, &dd->bam.prod);
+			msm_spi_bam_pipe_disconnect(dd, &dd->bam.cons);
+		}
+		put_local_resources(dd);
+	}
+	mutex_unlock(&dd->core_lock);
+	if (dd->suspended)
+		wake_up_interruptible(&dd->continue_suspend);
+	return status_error;
+}
+
+static int msm_spi_prepare_transfer_hardware(struct spi_master *master)
+{
+	struct msm_spi	*dd = spi_master_get_devdata(master);
+	int resume_state = 0;
+
+	resume_state = pm_runtime_get_sync(dd->dev);
+	if (resume_state < 0)
+		goto spi_finalize;
+
+	/*
+	 * Counter-part of system-suspend when runtime-pm is not enabled.
+	 * This way, resume can be left empty and device will be put in
+	 * active mode only if client requests anything on the bus
+	 */
+	if (!pm_runtime_enabled(dd->dev))
+		resume_state = msm_spi_pm_resume_runtime(dd->dev);
+	if (resume_state < 0)
+		goto spi_finalize;
+	if (dd->suspended) {
+		resume_state = -EBUSY;
+		goto spi_finalize;
+	}
+	return 0;
+
+spi_finalize:
+	spi_finalize_current_message(master);
+	return resume_state;
+}
+
+static int msm_spi_unprepare_transfer_hardware(struct spi_master *master)
+{
+	struct msm_spi	*dd = spi_master_get_devdata(master);
+
+	pm_runtime_mark_last_busy(dd->dev);
+	pm_runtime_put_autosuspend(dd->dev);
+	return 0;
+}
+
+static int msm_spi_setup(struct spi_device *spi)
+{
+	struct msm_spi	*dd;
+	int              rc = 0;
+	u32              spi_ioc;
+	u32              spi_config;
+	u32              mask;
+
+	if (spi->bits_per_word < 4 || spi->bits_per_word > 32) {
+		dev_err(&spi->dev, "%s: invalid bits_per_word %d\n",
+			__func__, spi->bits_per_word);
+		return -EINVAL;
+	}
+	if (spi->chip_select > SPI_NUM_CHIPSELECTS-1) {
+		dev_err(&spi->dev, "%s, chip select %d exceeds max value %d\n",
+			__func__, spi->chip_select, SPI_NUM_CHIPSELECTS - 1);
+		return -EINVAL;
+	}
+
+	dd = spi_master_get_devdata(spi->master);
+
+	rc = pm_runtime_get_sync(dd->dev);
+	if (rc < 0 && !dd->is_init_complete &&
+			pm_runtime_enabled(dd->dev)) {
+		pm_runtime_set_suspended(dd->dev);
+		pm_runtime_put_sync(dd->dev);
+		rc = 0;
+		goto err_setup_exit;
+	} else
+		rc = 0;
+
+	mutex_lock(&dd->core_lock);
+
+	/* Counter-part of system-suspend when runtime-pm is not enabled. */
+	if (!pm_runtime_enabled(dd->dev)) {
+		rc = msm_spi_pm_resume_runtime(dd->dev);
+		if (rc < 0 && !dd->is_init_complete) {
+			rc = 0;
+			mutex_unlock(&dd->core_lock);
+			goto err_setup_exit;
+		}
+	}
+
+	if (dd->suspended) {
+		rc = -EBUSY;
+		mutex_unlock(&dd->core_lock);
+		goto err_setup_exit;
+	}
+
+	if (dd->pdata->is_shared) {
+		rc = get_local_resources(dd);
+		if (rc)
+			goto no_resources;
+	}
+
+	spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL);
+	mask = SPI_IO_C_CS_N_POLARITY_0 << spi->chip_select;
+	if (spi->mode & SPI_CS_HIGH)
+		spi_ioc |= mask;
+	else
+		spi_ioc &= ~mask;
+	spi_ioc = msm_spi_calc_spi_ioc_clk_polarity(spi_ioc, spi->mode);
+
+	writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
+
+	spi_config = readl_relaxed(dd->base + SPI_CONFIG);
+	spi_config = msm_spi_calc_spi_config_loopback_and_input_first(
+							spi_config, spi->mode);
+	writel_relaxed(spi_config, dd->base + SPI_CONFIG);
+
+	/* Ensure previous write completed before disabling the clocks */
+	mb();
+	if (dd->pdata->is_shared)
+		put_local_resources(dd);
+	/* Counter-part of system-resume when runtime-pm is not enabled. */
+	if (!pm_runtime_enabled(dd->dev))
+		msm_spi_pm_suspend_runtime(dd->dev);
+
+no_resources:
+	mutex_unlock(&dd->core_lock);
+	pm_runtime_mark_last_busy(dd->dev);
+	pm_runtime_put_autosuspend(dd->dev);
+
+err_setup_exit:
+	return rc;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int debugfs_iomem_x32_set(void *data, u64 val)
+{
+	struct msm_spi_debugfs_data *reg = (struct msm_spi_debugfs_data *)data;
+	struct msm_spi *dd = reg->dd;
+	int ret;
+
+	ret = pm_runtime_get_sync(dd->dev);
+	if (ret < 0)
+		return ret;
+
+	writel_relaxed(val, (dd->base + reg->offset));
+	/* Ensure the previous write completed. */
+	mb();
+
+	pm_runtime_mark_last_busy(dd->dev);
+	pm_runtime_put_autosuspend(dd->dev);
+	return 0;
+}
+
+static int debugfs_iomem_x32_get(void *data, u64 *val)
+{
+	struct msm_spi_debugfs_data *reg = (struct msm_spi_debugfs_data *)data;
+	struct msm_spi *dd = reg->dd;
+	int ret;
+
+	ret = pm_runtime_get_sync(dd->dev);
+	if (ret < 0)
+		return ret;
+	*val = readl_relaxed(dd->base + reg->offset);
+	/* Ensure the previous read completed. */
+	mb();
+
+	pm_runtime_mark_last_busy(dd->dev);
+	pm_runtime_put_autosuspend(dd->dev);
+	return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_iomem_x32, debugfs_iomem_x32_get,
+			debugfs_iomem_x32_set, "0x%08llx\n");
+
+static void spi_debugfs_init(struct msm_spi *dd)
+{
+	char dir_name[20];
+
+	scnprintf(dir_name, sizeof(dir_name), "%s_dbg", dev_name(dd->dev));
+	dd->dent_spi = debugfs_create_dir(dir_name, NULL);
+	if (dd->dent_spi) {
+		int i;
+
+		for (i = 0; i < ARRAY_SIZE(debugfs_spi_regs); i++) {
+			dd->reg_data[i].offset = debugfs_spi_regs[i].offset;
+			dd->reg_data[i].dd = dd;
+			dd->debugfs_spi_regs[i] =
+			   debugfs_create_file(
+			       debugfs_spi_regs[i].name,
+			       debugfs_spi_regs[i].mode,
+			       dd->dent_spi, &dd->reg_data[i],
+			       &fops_iomem_x32);
+		}
+	}
+}
+
+static void spi_debugfs_exit(struct msm_spi *dd)
+{
+	if (dd->dent_spi) {
+		int i;
+
+		debugfs_remove_recursive(dd->dent_spi);
+		dd->dent_spi = NULL;
+		for (i = 0; i < ARRAY_SIZE(debugfs_spi_regs); i++)
+			dd->debugfs_spi_regs[i] = NULL;
+	}
+}
+#else
+static void spi_debugfs_init(struct msm_spi *dd) {}
+static void spi_debugfs_exit(struct msm_spi *dd) {}
+#endif
+
+/* ===Device attributes begin=== */
+static ssize_t stats_show(struct device *dev, struct device_attribute *attr,
+			  char *buf)
+{
+	struct spi_master *master = dev_get_drvdata(dev);
+	struct msm_spi *dd =  spi_master_get_devdata(master);
+
+	return scnprintf(buf, PAGE_SIZE,
+			"Device       %s\n"
+			"rx fifo_size = %d spi words\n"
+			"tx fifo_size = %d spi words\n"
+			"use_dma ?    %s\n"
+			"rx block size = %d bytes\n"
+			"tx block size = %d bytes\n"
+			"input burst size = %d bytes\n"
+			"output burst size = %d bytes\n"
+			"DMA configuration:\n"
+			"tx_ch=%d, rx_ch=%d, tx_crci= %d, rx_crci=%d\n"
+			"--statistics--\n"
+			"Rx isrs  = %d\n"
+			"Tx isrs  = %d\n"
+			"--debug--\n"
+			"NA yet\n",
+			dev_name(dev),
+			dd->input_fifo_size,
+			dd->output_fifo_size,
+			dd->use_dma ? "yes" : "no",
+			dd->input_block_size,
+			dd->output_block_size,
+			dd->input_burst_size,
+			dd->output_burst_size,
+			dd->tx_dma_chan,
+			dd->rx_dma_chan,
+			dd->tx_dma_crci,
+			dd->rx_dma_crci,
+			dd->stat_rx,
+			dd->stat_tx
+			);
+}
+
+/* Reset statistics on write */
+static ssize_t stats_store(struct device *dev, struct device_attribute *attr,
+			 const char *buf, size_t count)
+{
+	struct msm_spi *dd = dev_get_drvdata(dev);
+
+	dd->stat_rx = 0;
+	dd->stat_tx = 0;
+	return count;
+}
+
+static DEVICE_ATTR_RW(stats);
+
+static struct attribute *dev_attrs[] = {
+	&dev_attr_stats.attr,
+	NULL,
+};
+
+static struct attribute_group dev_attr_grp = {
+	.attrs = dev_attrs,
+};
+/* ===Device attributes end=== */
+
+static void msm_spi_bam_pipe_teardown(struct msm_spi *dd,
+					enum msm_spi_pipe_direction pipe_dir)
+{
+	struct msm_spi_bam_pipe *pipe = (pipe_dir == SPI_BAM_CONSUMER_PIPE) ?
+					(&dd->bam.prod) : (&dd->bam.cons);
+	if (!pipe->teardown_required)
+		return;
+
+	msm_spi_bam_pipe_disconnect(dd, pipe);
+	dma_free_coherent(dd->dev, pipe->config.desc.size,
+		pipe->config.desc.base, pipe->config.desc.phys_base);
+	sps_free_endpoint(pipe->handle);
+	pipe->handle = NULL;
+	pipe->teardown_required = false;
+}
+
+static int msm_spi_bam_pipe_init(struct msm_spi *dd,
+					enum msm_spi_pipe_direction pipe_dir)
+{
+	int rc = 0;
+	struct sps_pipe *pipe_handle;
+	struct msm_spi_bam_pipe *pipe = (pipe_dir == SPI_BAM_CONSUMER_PIPE) ?
+					(&dd->bam.prod) : (&dd->bam.cons);
+	struct sps_connect *pipe_conf = &pipe->config;
+
+	pipe->name   = (pipe_dir == SPI_BAM_CONSUMER_PIPE) ? "cons" : "prod";
+	pipe->handle = NULL;
+	pipe_handle  = sps_alloc_endpoint();
+	if (!pipe_handle) {
+		dev_err(dd->dev, "%s: Failed to allocate BAM endpoint\n"
+								, __func__);
+		return -ENOMEM;
+	}
+
+	memset(pipe_conf, 0, sizeof(*pipe_conf));
+	rc = sps_get_config(pipe_handle, pipe_conf);
+	if (rc) {
+		dev_err(dd->dev, "%s: Failed to get BAM pipe config\n"
+			, __func__);
+		goto config_err;
+	}
+
+	if (pipe_dir == SPI_BAM_CONSUMER_PIPE) {
+		pipe_conf->source          = dd->bam.handle;
+		pipe_conf->destination     = SPS_DEV_HANDLE_MEM;
+		pipe_conf->mode            = SPS_MODE_SRC;
+		pipe_conf->src_pipe_index  =
+					dd->pdata->bam_producer_pipe_index;
+		pipe_conf->dest_pipe_index = 0;
+	} else {
+		pipe_conf->source          = SPS_DEV_HANDLE_MEM;
+		pipe_conf->destination     = dd->bam.handle;
+		pipe_conf->mode            = SPS_MODE_DEST;
+		pipe_conf->src_pipe_index  = 0;
+		pipe_conf->dest_pipe_index =
+					dd->pdata->bam_consumer_pipe_index;
+	}
+	pipe_conf->options = SPS_O_EOT | SPS_O_AUTO_ENABLE;
+	pipe_conf->desc.size = SPI_BAM_MAX_DESC_NUM * sizeof(struct sps_iovec);
+	pipe_conf->desc.base = dma_zalloc_coherent(dd->dev,
+				pipe_conf->desc.size,
+				&pipe_conf->desc.phys_base,
+				GFP_KERNEL);
+	if (!pipe_conf->desc.base) {
+		dev_err(dd->dev, "%s: Failed allocate BAM pipe memory\n"
+			, __func__);
+		rc = -ENOMEM;
+		goto config_err;
+	}
+	/* zero descriptor FIFO for convenient debugging of first descs */
+	memset(pipe_conf->desc.base, 0x00, pipe_conf->desc.size);
+
+	pipe->handle = pipe_handle;
+
+	return 0;
+
+config_err:
+	sps_free_endpoint(pipe_handle);
+
+	return rc;
+}
+
+static void msm_spi_bam_teardown(struct msm_spi *dd)
+{
+	msm_spi_bam_pipe_teardown(dd, SPI_BAM_PRODUCER_PIPE);
+	msm_spi_bam_pipe_teardown(dd, SPI_BAM_CONSUMER_PIPE);
+
+	if (dd->bam.deregister_required) {
+		sps_deregister_bam_device(dd->bam.handle);
+		dd->bam.deregister_required = false;
+	}
+}
+
+static int msm_spi_bam_init(struct msm_spi *dd)
+{
+	struct sps_bam_props bam_props = {0};
+	uintptr_t bam_handle;
+	int rc = 0;
+
+	rc = sps_phy2h(dd->bam.phys_addr, &bam_handle);
+	if (rc || !bam_handle) {
+		bam_props.phys_addr = dd->bam.phys_addr;
+		bam_props.virt_addr = dd->bam.base;
+		bam_props.irq       = dd->bam.irq;
+		bam_props.manage    = SPS_BAM_MGR_DEVICE_REMOTE;
+		bam_props.summing_threshold = 0x10;
+
+		rc = sps_register_bam_device(&bam_props, &bam_handle);
+		if (rc) {
+			dev_err(dd->dev,
+				"%s: Failed to register BAM device\n",
+				__func__);
+			return rc;
+		}
+		dd->bam.deregister_required = true;
+	}
+
+	dd->bam.handle = bam_handle;
+
+	rc = msm_spi_bam_pipe_init(dd, SPI_BAM_PRODUCER_PIPE);
+	if (rc) {
+		dev_err(dd->dev,
+			"%s: Failed to init producer BAM-pipe\n",
+			__func__);
+		goto bam_init_error;
+	}
+
+	rc = msm_spi_bam_pipe_init(dd, SPI_BAM_CONSUMER_PIPE);
+	if (rc) {
+		dev_err(dd->dev,
+			"%s: Failed to init consumer BAM-pipe\n",
+			__func__);
+		goto bam_init_error;
+	}
+
+	return 0;
+
+bam_init_error:
+	msm_spi_bam_teardown(dd);
+	return rc;
+}
+
+enum msm_spi_dt_entry_status {
+	DT_REQ,  /* Required:  fail if missing */
+	DT_SGST, /* Suggested: warn if missing */
+	DT_OPT,  /* Optional:  don't warn if missing */
+};
+
+enum msm_spi_dt_entry_type {
+	DT_U32,
+	DT_GPIO,
+	DT_BOOL,
+};
+
+struct msm_spi_dt_to_pdata_map {
+	const char                  *dt_name;
+	void                        *ptr_data;
+	enum msm_spi_dt_entry_status status;
+	enum msm_spi_dt_entry_type   type;
+	int                          default_val;
+};
+
+static int msm_spi_dt_to_pdata_populate(struct platform_device *pdev,
+					struct msm_spi_platform_data *pdata,
+					struct msm_spi_dt_to_pdata_map  *itr)
+{
+	int  ret, err = 0;
+	struct device_node *node = pdev->dev.of_node;
+
+	for (; itr->dt_name; ++itr) {
+		switch (itr->type) {
+		case DT_GPIO:
+			ret = of_get_named_gpio(node, itr->dt_name, 0);
+			if (ret >= 0) {
+				*((int *) itr->ptr_data) = ret;
+				ret = 0;
+			}
+			break;
+		case DT_U32:
+			ret = of_property_read_u32(node, itr->dt_name,
+							 (u32 *) itr->ptr_data);
+			break;
+		case DT_BOOL:
+			*((bool *) itr->ptr_data) =
+				of_property_read_bool(node, itr->dt_name);
+			ret = 0;
+			break;
+		default:
+			dev_err(&pdev->dev, "%d is an unknown DT entry type\n",
+								itr->type);
+			ret = -EBADE;
+		}
+
+		dev_dbg(&pdev->dev, "DT entry ret:%d name:%s val:%d\n",
+				ret, itr->dt_name, *((int *)itr->ptr_data));
+
+		if (ret) {
+			*((int *)itr->ptr_data) = itr->default_val;
+
+			if (itr->status < DT_OPT) {
+				dev_err(&pdev->dev, "Missing '%s' DT entry\n",
+								itr->dt_name);
+
+				/* cont on err to dump all missing entries */
+				if (itr->status == DT_REQ && !err)
+					err = ret;
+			}
+		}
+	}
+
+	return err;
+}
+
+/**
+ * msm_spi_dt_to_pdata: create pdata and read gpio config from device tree
+ */
+static struct msm_spi_platform_data *msm_spi_dt_to_pdata(
+			struct platform_device *pdev, struct msm_spi *dd)
+{
+	struct msm_spi_platform_data *pdata;
+
+	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+	if (!pdata)
+		return NULL;
+
+	if (pdata) {
+		struct msm_spi_dt_to_pdata_map map[] = {
+		{"spi-max-frequency",
+			&pdata->max_clock_speed,         DT_SGST, DT_U32,   0},
+		{"qcom,infinite-mode",
+			&pdata->infinite_mode,           DT_OPT,  DT_U32,   0},
+		{"qcom,master-id",
+			&pdata->master_id,               DT_SGST, DT_U32,   0},
+		{"qcom,bus-width",
+			&pdata->bus_width,               DT_OPT, DT_U32,   8},
+		{"qcom,ver-reg-exists",
+			&pdata->ver_reg_exists,          DT_OPT,  DT_BOOL,  0},
+		{"qcom,use-bam",
+			&pdata->use_bam,                 DT_OPT,  DT_BOOL,  0},
+		{"qcom,use-pinctrl",
+			&pdata->use_pinctrl,             DT_OPT,  DT_BOOL,  0},
+		{"qcom,bam-consumer-pipe-index",
+			&pdata->bam_consumer_pipe_index, DT_OPT,  DT_U32,   0},
+		{"qcom,bam-producer-pipe-index",
+			&pdata->bam_producer_pipe_index, DT_OPT,  DT_U32,   0},
+		{"qcom,gpio-clk",
+			&dd->spi_gpios[0],               DT_OPT,  DT_GPIO, -1},
+		{"qcom,gpio-miso",
+			&dd->spi_gpios[1],               DT_OPT,  DT_GPIO, -1},
+		{"qcom,gpio-mosi",
+			&dd->spi_gpios[2],               DT_OPT,  DT_GPIO, -1},
+		{"qcom,gpio-cs0",
+			&dd->cs_gpios[0].gpio_num,       DT_OPT,  DT_GPIO, -1},
+		{"qcom,gpio-cs1",
+			&dd->cs_gpios[1].gpio_num,       DT_OPT,  DT_GPIO, -1},
+		{"qcom,gpio-cs2",
+			&dd->cs_gpios[2].gpio_num,       DT_OPT,  DT_GPIO, -1},
+		{"qcom,gpio-cs3",
+			&dd->cs_gpios[3].gpio_num,       DT_OPT,  DT_GPIO, -1},
+		{"qcom,rt-priority",
+			&pdata->rt_priority,		 DT_OPT,  DT_BOOL,  0},
+		{"qcom,shared",
+			&pdata->is_shared,		 DT_OPT,  DT_BOOL,  0},
+		{NULL,  NULL,                            0,       0,        0},
+		};
+
+		if (msm_spi_dt_to_pdata_populate(pdev, pdata, map))
+			return NULL;
+	}
+
+	if (pdata->use_bam) {
+		if (!pdata->bam_consumer_pipe_index) {
+			dev_warn(&pdev->dev,
+			"missing qcom,bam-consumer-pipe-index entry in device-tree\n");
+			pdata->use_bam = false;
+		}
+
+		if (!pdata->bam_producer_pipe_index) {
+			dev_warn(&pdev->dev,
+			"missing qcom,bam-producer-pipe-index entry in device-tree\n");
+			pdata->use_bam = false;
+		}
+	}
+	return pdata;
+}
+
+static int msm_spi_get_qup_hw_ver(struct device *dev, struct msm_spi *dd)
+{
+	u32 data = readl_relaxed(dd->base + QUP_HARDWARE_VER);
+
+	return (data >= QUP_HARDWARE_VER_2_1_1) ? SPI_QUP_VERSION_BFAM
+						: SPI_QUP_VERSION_NONE;
+}
+
+static int msm_spi_bam_get_resources(struct msm_spi *dd,
+	struct platform_device *pdev, struct spi_master *master)
+{
+	struct resource *resource;
+	size_t bam_mem_size;
+
+	resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						"spi_bam_physical");
+	if (!resource) {
+		dev_warn(&pdev->dev,
+			"%s: Missing spi_bam_physical entry in DT\n",
+			__func__);
+		return -ENXIO;
+	}
+
+	dd->bam.phys_addr = resource->start;
+	bam_mem_size = resource_size(resource);
+	dd->bam.base = devm_ioremap(&pdev->dev, dd->bam.phys_addr,
+					bam_mem_size);
+	if (!dd->bam.base) {
+		dev_warn(&pdev->dev,
+			"%s: Failed to ioremap(spi_bam_physical)\n",
+			__func__);
+		return -ENXIO;
+	}
+
+	dd->bam.irq = platform_get_irq_byname(pdev, "spi_bam_irq");
+	if (dd->bam.irq < 0) {
+		dev_warn(&pdev->dev, "%s: Missing spi_bam_irq entry in DT\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	dd->dma_init = msm_spi_bam_init;
+	dd->dma_teardown = msm_spi_bam_teardown;
+	return 0;
+}
+
+static int init_resources(struct platform_device *pdev)
+{
+	struct spi_master *master = platform_get_drvdata(pdev);
+	struct msm_spi	  *dd;
+	int               rc = -ENXIO;
+	int               clk_enabled = 0;
+	int               pclk_enabled = 0;
+
+	dd = spi_master_get_devdata(master);
+
+	if (dd->pdata && dd->pdata->use_pinctrl) {
+		rc = msm_spi_pinctrl_init(dd);
+		if (rc) {
+			dev_err(&pdev->dev, "%s: pinctrl init failed\n",
+					 __func__);
+			return rc;
+		}
+	}
+
+	mutex_lock(&dd->core_lock);
+
+	dd->clk = clk_get(&pdev->dev, "core_clk");
+	if (IS_ERR(dd->clk)) {
+		dev_err(&pdev->dev, "%s: unable to get core_clk\n", __func__);
+		rc = PTR_ERR(dd->clk);
+		goto err_clk_get;
+	}
+
+	dd->pclk = clk_get(&pdev->dev, "iface_clk");
+	if (IS_ERR(dd->pclk)) {
+		dev_err(&pdev->dev, "%s: unable to get iface_clk\n", __func__);
+		rc = PTR_ERR(dd->pclk);
+		goto err_pclk_get;
+	}
+
+	if (dd->pdata && dd->pdata->max_clock_speed)
+		msm_spi_clock_set(dd, dd->pdata->max_clock_speed);
+
+	rc = clk_prepare_enable(dd->clk);
+	if (rc) {
+		dev_err(&pdev->dev, "%s: unable to enable core_clk\n",
+			__func__);
+		goto err_clk_enable;
+	}
+
+	clk_enabled = 1;
+	rc = clk_prepare_enable(dd->pclk);
+	if (rc) {
+		dev_err(&pdev->dev, "%s: unable to enable iface_clk\n",
+		__func__);
+		goto err_pclk_enable;
+	}
+
+	pclk_enabled = 1;
+
+	if (dd->pdata && dd->pdata->ver_reg_exists) {
+		enum msm_spi_qup_version ver =
+					msm_spi_get_qup_hw_ver(&pdev->dev, dd);
+		if (dd->qup_ver != ver)
+			dev_warn(&pdev->dev,
+			"%s: HW version different then assumed by probe\n",
+			__func__);
+	}
+
+	/* GSBI dose not exists on B-family MSM-chips */
+	if (dd->qup_ver != SPI_QUP_VERSION_BFAM) {
+		rc = msm_spi_configure_gsbi(dd, pdev);
+		if (rc)
+			goto err_config_gsbi;
+	}
+
+	msm_spi_calculate_fifo_size(dd);
+	if (dd->use_dma) {
+		rc = dd->dma_init(dd);
+		if (rc) {
+			dev_err(&pdev->dev,
+				"%s: failed to init DMA. Disabling DMA mode\n",
+				__func__);
+			dd->use_dma = false;
+		}
+	}
+
+	msm_spi_register_init(dd);
+	/*
+	 * The SPI core generates a bogus input overrun error on some targets,
+	 * when a transition from run to reset state occurs and if the FIFO has
+	 * an odd number of entries. Hence we disable the INPUT_OVER_RUN_ERR_EN
+	 * bit.
+	 */
+	msm_spi_enable_error_flags(dd);
+
+	writel_relaxed(SPI_IO_C_NO_TRI_STATE, dd->base + SPI_IO_CONTROL);
+	rc = msm_spi_set_state(dd, SPI_OP_STATE_RESET);
+	if (rc)
+		goto err_spi_state;
+
+	clk_disable_unprepare(dd->clk);
+	clk_disable_unprepare(dd->pclk);
+	clk_enabled = 0;
+	pclk_enabled = 0;
+
+	dd->transfer_pending = false;
+	dd->tx_mode = SPI_MODE_NONE;
+	dd->rx_mode = SPI_MODE_NONE;
+
+	rc = msm_spi_request_irq(dd, pdev, master);
+	if (rc)
+		goto err_irq;
+
+	msm_spi_disable_irqs(dd);
+
+	mutex_unlock(&dd->core_lock);
+	return 0;
+
+err_irq:
+err_spi_state:
+	if (dd->use_dma && dd->dma_teardown)
+		dd->dma_teardown(dd);
+err_config_gsbi:
+	if (pclk_enabled)
+		clk_disable_unprepare(dd->pclk);
+err_pclk_enable:
+	if (clk_enabled)
+		clk_disable_unprepare(dd->clk);
+err_clk_enable:
+	clk_put(dd->pclk);
+err_pclk_get:
+	clk_put(dd->clk);
+err_clk_get:
+	mutex_unlock(&dd->core_lock);
+	return rc;
+}
+
+static int msm_spi_probe(struct platform_device *pdev)
+{
+	struct spi_master      *master;
+	struct msm_spi	       *dd;
+	struct resource	       *resource;
+	int			i = 0;
+	int                     rc = -ENXIO;
+	struct msm_spi_platform_data *pdata;
+
+	master = spi_alloc_master(&pdev->dev, sizeof(struct msm_spi));
+	if (!master) {
+		rc = -ENOMEM;
+		dev_err(&pdev->dev, "master allocation failed\n");
+		goto err_probe_exit;
+	}
+
+	master->bus_num        = pdev->id;
+	master->mode_bits      = SPI_SUPPORTED_MODES;
+	master->num_chipselect = SPI_NUM_CHIPSELECTS;
+	master->set_cs	       = msm_spi_set_cs;
+	master->setup          = msm_spi_setup;
+	master->prepare_transfer_hardware = msm_spi_prepare_transfer_hardware;
+	master->transfer_one = msm_spi_transfer_one;
+	master->unprepare_transfer_hardware
+			= msm_spi_unprepare_transfer_hardware;
+
+	platform_set_drvdata(pdev, master);
+	dd = spi_master_get_devdata(master);
+
+	if (pdev->dev.of_node) {
+		dd->qup_ver = SPI_QUP_VERSION_BFAM;
+		master->dev.of_node = pdev->dev.of_node;
+		pdata = msm_spi_dt_to_pdata(pdev, dd);
+		if (!pdata) {
+			dev_err(&pdev->dev, "platform data allocation failed\n");
+			rc = -ENOMEM;
+			goto err_probe_exit;
+		}
+
+		rc = of_alias_get_id(pdev->dev.of_node, "spi");
+		if (rc < 0)
+			dev_warn(&pdev->dev,
+				"using default bus_num %d\n", pdev->id);
+		else
+			master->bus_num = pdev->id = rc;
+	} else {
+		pdata = pdev->dev.platform_data;
+		dd->qup_ver = SPI_QUP_VERSION_NONE;
+
+		for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) {
+			resource = platform_get_resource(pdev, IORESOURCE_IO,
+							i);
+			dd->spi_gpios[i] = resource ? resource->start : -1;
+		}
+
+		for (i = 0; i < ARRAY_SIZE(spi_cs_rsrcs); ++i) {
+			resource = platform_get_resource(pdev, IORESOURCE_IO,
+						i + ARRAY_SIZE(spi_rsrcs));
+			dd->cs_gpios[i].gpio_num = resource ?
+							resource->start : -1;
+		}
+	}
+
+	for (i = 0; i < ARRAY_SIZE(spi_cs_rsrcs); ++i)
+		dd->cs_gpios[i].valid = false;
+
+	dd->pdata = pdata;
+	resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!resource) {
+		rc = -ENXIO;
+		goto err_probe_res;
+	}
+
+	dd->mem_phys_addr = resource->start;
+	dd->mem_size = resource_size(resource);
+	dd->dev = &pdev->dev;
+
+	if (pdata) {
+		master->rt = pdata->rt_priority;
+		if (pdata->dma_config) {
+			rc = pdata->dma_config();
+			if (rc) {
+				dev_warn(&pdev->dev,
+					"%s: DM mode not supported\n",
+					__func__);
+				dd->use_dma = false;
+				goto skip_dma_resources;
+			}
+		}
+		if (!dd->pdata->use_bam)
+			goto skip_dma_resources;
+
+		rc = msm_spi_bam_get_resources(dd, pdev, master);
+		if (rc) {
+			dev_warn(dd->dev,
+					"%s: Failed to get BAM resources\n",
+					__func__);
+			goto skip_dma_resources;
+		}
+		dd->use_dma = true;
+	}
+
+	spi_dma_mask(&pdev->dev);
+skip_dma_resources:
+
+	spin_lock_init(&dd->queue_lock);
+	mutex_init(&dd->core_lock);
+	init_waitqueue_head(&dd->continue_suspend);
+
+	if (!devm_request_mem_region(&pdev->dev, dd->mem_phys_addr,
+					dd->mem_size, SPI_DRV_NAME)) {
+		rc = -ENXIO;
+		goto err_probe_reqmem;
+	}
+
+	dd->base = devm_ioremap(&pdev->dev, dd->mem_phys_addr, dd->mem_size);
+	if (!dd->base) {
+		rc = -ENOMEM;
+		goto err_probe_reqmem;
+	}
+
+	pm_runtime_set_autosuspend_delay(&pdev->dev, MSEC_PER_SEC);
+	pm_runtime_use_autosuspend(&pdev->dev);
+	pm_runtime_enable(&pdev->dev);
+
+	dd->suspended = true;
+	rc = spi_register_master(master);
+	if (rc)
+		goto err_probe_reg_master;
+
+	rc = sysfs_create_group(&(dd->dev->kobj), &dev_attr_grp);
+	if (rc) {
+		dev_err(&pdev->dev, "failed to create dev. attrs : %d\n", rc);
+		goto err_attrs;
+	}
+	spi_debugfs_init(dd);
+
+	return 0;
+
+err_attrs:
+	spi_unregister_master(master);
+err_probe_reg_master:
+	pm_runtime_disable(&pdev->dev);
+err_probe_reqmem:
+err_probe_res:
+	spi_master_put(master);
+err_probe_exit:
+	return rc;
+}
+
+static int msm_spi_pm_suspend_runtime(struct device *device)
+{
+	struct platform_device *pdev = to_platform_device(device);
+	struct spi_master *master = platform_get_drvdata(pdev);
+	struct msm_spi	  *dd;
+	unsigned long	   flags;
+
+	dev_dbg(device, "pm_runtime: suspending...\n");
+	if (!master)
+		goto suspend_exit;
+	dd = spi_master_get_devdata(master);
+	if (!dd)
+		goto suspend_exit;
+
+	if (dd->suspended)
+		return 0;
+
+	/*
+	 * Make sure nothing is added to the queue while we're
+	 * suspending
+	 */
+	spin_lock_irqsave(&dd->queue_lock, flags);
+	dd->suspended = true;
+	spin_unlock_irqrestore(&dd->queue_lock, flags);
+
+	/* Wait for transactions to end, or time out */
+	wait_event_interruptible(dd->continue_suspend,
+		!dd->transfer_pending);
+
+	mutex_lock(&dd->core_lock);
+	if (dd->pdata && !dd->pdata->is_shared && dd->use_dma) {
+		msm_spi_bam_pipe_disconnect(dd, &dd->bam.prod);
+		msm_spi_bam_pipe_disconnect(dd, &dd->bam.cons);
+	}
+	if (dd->pdata && !dd->pdata->is_shared)
+		put_local_resources(dd);
+
+	if (dd->pdata)
+		msm_spi_clk_path_vote(dd, 0);
+	mutex_unlock(&dd->core_lock);
+
+suspend_exit:
+	return 0;
+}
+
+static int msm_spi_pm_resume_runtime(struct device *device)
+{
+	struct platform_device *pdev = to_platform_device(device);
+	struct spi_master *master = platform_get_drvdata(pdev);
+	struct msm_spi	  *dd;
+	int               ret = 0;
+
+	dev_dbg(device, "pm_runtime: resuming...\n");
+	if (!master)
+		goto resume_exit;
+	dd = spi_master_get_devdata(master);
+	if (!dd)
+		goto resume_exit;
+
+	if (!dd->suspended)
+		return 0;
+	if (!dd->is_init_complete) {
+		ret = init_resources(pdev);
+		if (ret != 0)
+			return ret;
+
+		dd->is_init_complete = true;
+	}
+	msm_spi_clk_path_init(dd);
+	msm_spi_clk_path_vote(dd, dd->pdata->max_clock_speed);
+
+	if (!dd->pdata->is_shared) {
+		ret = get_local_resources(dd);
+		if (ret)
+			return ret;
+	}
+	if (!dd->pdata->is_shared && dd->use_dma) {
+		msm_spi_bam_pipe_connect(dd, &dd->bam.prod,
+				&dd->bam.prod.config);
+		msm_spi_bam_pipe_connect(dd, &dd->bam.cons,
+				&dd->bam.cons.config);
+	}
+	dd->suspended = false;
+
+resume_exit:
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int msm_spi_suspend(struct device *device)
+{
+	if (!pm_runtime_enabled(device) || !pm_runtime_suspended(device)) {
+		struct platform_device *pdev = to_platform_device(device);
+		struct spi_master *master = platform_get_drvdata(pdev);
+		struct msm_spi   *dd;
+
+		dev_dbg(device, "system suspend\n");
+		if (!master)
+			goto suspend_exit;
+		dd = spi_master_get_devdata(master);
+		if (!dd)
+			goto suspend_exit;
+		msm_spi_pm_suspend_runtime(device);
+
+		/*
+		 * set the device's runtime PM status to 'suspended'
+		 */
+		pm_runtime_disable(device);
+		pm_runtime_set_suspended(device);
+		pm_runtime_enable(device);
+	}
+suspend_exit:
+	return 0;
+}
+
+static int msm_spi_resume(struct device *device)
+{
+	/*
+	 * Rely on runtime-PM to call resume in case it is enabled
+	 * Even if it's not enabled, rely on 1st client transaction to do
+	 * clock ON and gpio configuration
+	 */
+	dev_dbg(device, "system resume\n");
+	return 0;
+}
+#else
+#define msm_spi_suspend NULL
+#define msm_spi_resume NULL
+#endif
+
+
+static int msm_spi_remove(struct platform_device *pdev)
+{
+	struct spi_master *master = platform_get_drvdata(pdev);
+	struct msm_spi    *dd = spi_master_get_devdata(master);
+
+	spi_debugfs_exit(dd);
+	sysfs_remove_group(&pdev->dev.kobj, &dev_attr_grp);
+
+	if (dd->dma_teardown)
+		dd->dma_teardown(dd);
+	pm_runtime_disable(&pdev->dev);
+	pm_runtime_set_suspended(&pdev->dev);
+	clk_put(dd->clk);
+	clk_put(dd->pclk);
+	msm_spi_clk_path_teardown(dd);
+	platform_set_drvdata(pdev, NULL);
+	spi_unregister_master(master);
+	spi_master_put(master);
+
+	return 0;
+}
+
+static const struct of_device_id msm_spi_dt_match[] = {
+	{
+		.compatible = "qcom,spi-qup-v2",
+	},
+	{}
+};
+
+static const struct dev_pm_ops msm_spi_dev_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(msm_spi_suspend, msm_spi_resume)
+	SET_RUNTIME_PM_OPS(msm_spi_pm_suspend_runtime,
+			msm_spi_pm_resume_runtime, NULL)
+};
+
+static struct platform_driver msm_spi_driver = {
+	.driver		= {
+		.name	= SPI_DRV_NAME,
+		.pm		= &msm_spi_dev_pm_ops,
+		.of_match_table = msm_spi_dt_match,
+	},
+	.probe		= msm_spi_probe,
+	.remove		= msm_spi_remove,
+};
+
+module_platform_driver(msm_spi_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:"SPI_DRV_NAME);
diff --git a/drivers/spi/spi_qsd.h b/drivers/spi/spi_qsd.h
new file mode 100644
index 0000000..0ba8abd
--- /dev/null
+++ b/drivers/spi/spi_qsd.h
@@ -0,0 +1,558 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2014-2018, 2020, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _SPI_QSD_H
+#define _SPI_QSD_H
+
+#include <linux/pinctrl/consumer.h>
+#define SPI_DRV_NAME                  "spi_qsd"
+
+#if IS_ENABLED(CONFIG_SPI_QSD) || IS_ENABLED(CONFIG_SPI_QSD_MODULE)
+
+#define QSD_REG(x) (x)
+#define QUP_REG(x)
+
+#define SPI_FIFO_WORD_CNT             0x0048
+
+#else
+
+#define QSD_REG(x)
+#define QUP_REG(x) (x)
+
+#define QUP_CONFIG                    0x0000 /* N & NO_INPUT/NO_OUTPUT bits */
+#define QUP_ERROR_FLAGS_EN            0x030C
+#define QUP_ERR_MASK                  0x3
+#define SPI_OUTPUT_FIFO_WORD_CNT      0x010C
+#define SPI_INPUT_FIFO_WORD_CNT       0x0214
+#define QUP_MX_WRITE_COUNT            0x0150
+#define QUP_MX_WRITE_CNT_CURRENT      0x0154
+
+#define QUP_CONFIG_SPI_MODE           0x0100
+#endif
+
+#define GSBI_CTRL_REG                 0x0
+#define GSBI_SPI_CONFIG               0x30
+/* B-family only registers */
+#define QUP_HARDWARE_VER              0x0030
+#define QUP_HARDWARE_VER_2_1_1        0X20010001
+#define QUP_OPERATIONAL_MASK          0x0028
+#define QUP_OP_MASK_OUTPUT_SERVICE_FLAG 0x100
+#define QUP_OP_MASK_INPUT_SERVICE_FLAG  0x200
+
+#define QUP_ERROR_FLAGS               0x0308
+
+#define SPI_CONFIG                    (QSD_REG(0x0000) QUP_REG(0x0300))
+#define SPI_IO_CONTROL                (QSD_REG(0x0004) QUP_REG(0x0304))
+#define SPI_IO_MODES                  (QSD_REG(0x0008) QUP_REG(0x0008))
+#define SPI_SW_RESET                  (QSD_REG(0x000C) QUP_REG(0x000C))
+#define SPI_TIME_OUT_CURRENT          (QSD_REG(0x0014) QUP_REG(0x0014))
+#define SPI_MX_OUTPUT_COUNT           (QSD_REG(0x0018) QUP_REG(0x0100))
+#define SPI_MX_OUTPUT_CNT_CURRENT     (QSD_REG(0x001C) QUP_REG(0x0104))
+#define SPI_MX_INPUT_COUNT            (QSD_REG(0x0020) QUP_REG(0x0200))
+#define SPI_MX_INPUT_CNT_CURRENT      (QSD_REG(0x0024) QUP_REG(0x0204))
+#define SPI_MX_READ_COUNT             (QSD_REG(0x0028) QUP_REG(0x0208))
+#define SPI_MX_READ_CNT_CURRENT       (QSD_REG(0x002C) QUP_REG(0x020C))
+#define SPI_OPERATIONAL               (QSD_REG(0x0030) QUP_REG(0x0018))
+#define SPI_ERROR_FLAGS               (QSD_REG(0x0034) QUP_REG(0x001C))
+#define SPI_ERROR_FLAGS_EN            (QSD_REG(0x0038) QUP_REG(0x0020))
+#define SPI_DEASSERT_WAIT             (QSD_REG(0x003C) QUP_REG(0x0310))
+#define SPI_OUTPUT_DEBUG              (QSD_REG(0x0040) QUP_REG(0x0108))
+#define SPI_INPUT_DEBUG               (QSD_REG(0x0044) QUP_REG(0x0210))
+#define SPI_TEST_CTRL                 (QSD_REG(0x004C) QUP_REG(0x0024))
+#define SPI_OUTPUT_FIFO               (QSD_REG(0x0100) QUP_REG(0x0110))
+#define SPI_INPUT_FIFO                (QSD_REG(0x0200) QUP_REG(0x0218))
+#define SPI_STATE                     (QSD_REG(SPI_OPERATIONAL) QUP_REG(0x0004))
+
+/* QUP_CONFIG fields */
+#define SPI_CFG_N                     0x0000001F
+#define SPI_NO_INPUT                  0x00000080
+#define SPI_NO_OUTPUT                 0x00000040
+#define SPI_EN_EXT_OUT_FLAG           0x00010000
+
+/* SPI_CONFIG fields */
+#define SPI_CFG_LOOPBACK              0x00000100
+#define SPI_CFG_INPUT_FIRST           0x00000200
+#define SPI_CFG_HS_MODE               0x00000400
+
+/* SPI_IO_CONTROL fields */
+#define SPI_IO_C_FORCE_CS             0x00000800
+#define SPI_IO_C_CLK_IDLE_HIGH        0x00000400
+#define SPI_IO_C_MX_CS_MODE           0x00000100
+#define SPI_IO_C_CS_N_POLARITY        0x000000F0
+#define SPI_IO_C_CS_N_POLARITY_0      0x00000010
+#define SPI_IO_C_CS_SELECT            0x0000000C
+#define SPI_IO_C_TRISTATE_CS          0x00000002
+#define SPI_IO_C_NO_TRI_STATE         0x00000001
+
+/* SPI_IO_MODES fields */
+#define SPI_IO_M_OUTPUT_BIT_SHIFT_EN  (QSD_REG(0x00004000) QUP_REG(0x00010000))
+#define SPI_IO_M_PACK_EN              (QSD_REG(0x00002000) QUP_REG(0x00008000))
+#define SPI_IO_M_UNPACK_EN            (QSD_REG(0x00001000) QUP_REG(0x00004000))
+#define SPI_IO_M_INPUT_MODE           (QSD_REG(0x00000C00) QUP_REG(0x00003000))
+#define SPI_IO_M_OUTPUT_MODE          (QSD_REG(0x00000300) QUP_REG(0x00000C00))
+#define SPI_IO_M_INPUT_FIFO_SIZE      (QSD_REG(0x000000C0) QUP_REG(0x00000380))
+#define SPI_IO_M_INPUT_BLOCK_SIZE     (QSD_REG(0x00000030) QUP_REG(0x00000060))
+#define SPI_IO_M_OUTPUT_FIFO_SIZE     (QSD_REG(0x0000000C) QUP_REG(0x0000001C))
+#define SPI_IO_M_OUTPUT_BLOCK_SIZE    (QSD_REG(0x00000003) QUP_REG(0x00000003))
+
+#define INPUT_BLOCK_SZ_SHIFT          (QSD_REG(4)          QUP_REG(5))
+#define INPUT_FIFO_SZ_SHIFT           (QSD_REG(6)          QUP_REG(7))
+#define OUTPUT_BLOCK_SZ_SHIFT         (QSD_REG(0)          QUP_REG(0))
+#define OUTPUT_FIFO_SZ_SHIFT          (QSD_REG(2)          QUP_REG(2))
+#define OUTPUT_MODE_SHIFT             (QSD_REG(8)          QUP_REG(10))
+#define INPUT_MODE_SHIFT              (QSD_REG(10)         QUP_REG(12))
+
+/* SPI_OPERATIONAL fields */
+#define SPI_OP_IN_BLK_RD_REQ_FLAG     0x00002000
+#define SPI_OP_OUT_BLK_WR_REQ_FLAG    0x00001000
+#define SPI_OP_MAX_INPUT_DONE_FLAG    0x00000800
+#define SPI_OP_MAX_OUTPUT_DONE_FLAG   0x00000400
+#define SPI_OP_INPUT_SERVICE_FLAG     0x00000200
+#define SPI_OP_OUTPUT_SERVICE_FLAG    0x00000100
+#define SPI_OP_INPUT_FIFO_FULL        0x00000080
+#define SPI_OP_OUTPUT_FIFO_FULL       0x00000040
+#define SPI_OP_IP_FIFO_NOT_EMPTY      0x00000020
+#define SPI_OP_OP_FIFO_NOT_EMPTY      0x00000010
+#define SPI_OP_STATE_VALID            0x00000004
+#define SPI_OP_STATE                  0x00000003
+
+#define SPI_OP_STATE_CLEAR_BITS       0x2
+
+#define SPI_PINCTRL_STATE_DEFAULT "spi_default"
+#define SPI_PINCTRL_STATE_SLEEP "spi_sleep"
+
+enum msm_spi_state {
+	SPI_OP_STATE_RESET = 0x00000000,
+	SPI_OP_STATE_RUN   = 0x00000001,
+	SPI_OP_STATE_PAUSE = 0x00000003,
+};
+
+/* SPI_ERROR_FLAGS fields */
+#define SPI_ERR_OUTPUT_OVER_RUN_ERR   0x00000020
+#define SPI_ERR_INPUT_UNDER_RUN_ERR   0x00000010
+#define SPI_ERR_OUTPUT_UNDER_RUN_ERR  0x00000008
+#define SPI_ERR_INPUT_OVER_RUN_ERR    0x00000004
+#define SPI_ERR_CLK_OVER_RUN_ERR      0x00000002
+#define SPI_ERR_CLK_UNDER_RUN_ERR     0x00000001
+
+/* We don't allow transactions larger than 4K-64 or 64K-64 due to
+ * mx_input/output_cnt register size
+ */
+#define SPI_MAX_TRANSFERS             (QSD_REG(0xFC0) QUP_REG(0xFC0))
+#define SPI_MAX_LEN                   (SPI_MAX_TRANSFERS * dd->bytes_per_word)
+
+#define SPI_NUM_CHIPSELECTS           4
+#define SPI_SUPPORTED_MODES  (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP)
+
+/* high speed mode is when bus rate is greater then 26MHz */
+#define SPI_HS_MIN_RATE               (26000000)
+
+#define SPI_DELAY_THRESHOLD           1
+/* Default timeout is 10 milliseconds */
+#define SPI_DEFAULT_TIMEOUT           10
+/* 250 microseconds */
+#define SPI_TRYLOCK_DELAY             250
+
+/* Data Mover burst size */
+#define DM_BURST_SIZE                 16
+/* Data Mover commands should be aligned to 64 bit(8 bytes) */
+#define DM_BYTE_ALIGN                 8
+
+#if defined(CONFIG_ARM64) || defined(CONFIG_LPAE)
+#define spi_dma_mask(dev)   (dma_set_mask((dev), DMA_BIT_MASK(36)))
+#else
+#define spi_dma_mask(dev)   (dma_set_mask((dev), DMA_BIT_MASK(32)))
+#endif
+
+
+enum msm_spi_qup_version {
+	SPI_QUP_VERSION_NONE    = 0x0,
+	SPI_QUP_VERSION_BFAM    = 0x2,
+};
+
+enum msm_spi_pipe_direction {
+	SPI_BAM_CONSUMER_PIPE   = 0x0,
+	SPI_BAM_PRODUCER_PIPE   = 0x1,
+};
+
+#define SPI_BAM_MAX_DESC_NUM      32
+#define SPI_MAX_TRFR_BTWN_RESETS  ((64 * 1024) - 16)  /* 64KB - 16byte */
+
+enum msm_spi_clk_path_vec_idx {
+	MSM_SPI_CLK_PATH_SUSPEND_VEC = 0,
+	MSM_SPI_CLK_PATH_RESUME_VEC  = 1,
+};
+#define MSM_SPI_CLK_PATH_AVRG_BW(dd) (76800000)
+#define MSM_SPI_CLK_PATH_BRST_BW(dd) (76800000)
+
+static char const * const spi_rsrcs[] = {
+	"spi_clk",
+	"spi_miso",
+	"spi_mosi"
+};
+
+static char const * const spi_cs_rsrcs[] = {
+	"spi_cs",
+	"spi_cs1",
+	"spi_cs2",
+	"spi_cs3",
+};
+
+enum msm_spi_mode {
+	SPI_FIFO_MODE  = 0x0,  /* 00 */
+	SPI_BLOCK_MODE = 0x1,  /* 01 */
+	SPI_BAM_MODE   = 0x3,  /* 11 */
+	SPI_MODE_NONE  = 0xFF, /* invalid value */
+};
+
+/* Structure for SPI CS GPIOs */
+struct spi_cs_gpio {
+	int  gpio_num;
+	bool valid;
+};
+
+#ifdef CONFIG_DEBUG_FS
+struct msm_spi_debugfs_data {
+	int offset;
+	struct msm_spi *dd;
+};
+/* Used to create debugfs entries */
+static struct msm_spi_regs{
+	const char *name;
+	mode_t mode;
+	int offset;
+} debugfs_spi_regs[] = {
+	{"config",               0644,  SPI_CONFIG },
+	{"io_control",           0644,  SPI_IO_CONTROL },
+	{"io_modes",             0644,  SPI_IO_MODES },
+	{"sw_reset",             0200,  SPI_SW_RESET },
+	{"time_out_current",     0444,  SPI_TIME_OUT_CURRENT },
+	{"mx_output_count",      0644,  SPI_MX_OUTPUT_COUNT },
+	{"mx_output_cnt_current", 0444, SPI_MX_OUTPUT_CNT_CURRENT },
+	{"mx_input_count",       0644,  SPI_MX_INPUT_COUNT },
+	{"mx_input_cnt_current", 0444,  SPI_MX_INPUT_CNT_CURRENT },
+	{"mx_read_count",        0644,  SPI_MX_READ_COUNT },
+	{"mx_read_cnt_current",  0444,  SPI_MX_READ_CNT_CURRENT },
+	{"operational",          0644,  SPI_OPERATIONAL },
+	{"error_flags",          0644,  SPI_ERROR_FLAGS },
+	{"error_flags_en",       0644,  SPI_ERROR_FLAGS_EN },
+	{"deassert_wait",        0644,  SPI_DEASSERT_WAIT },
+	{"output_debug",         0444,  SPI_OUTPUT_DEBUG },
+	{"input_debug",          0444,  SPI_INPUT_DEBUG },
+	{"test_ctrl",            0644,  SPI_TEST_CTRL },
+	{"output_fifo",          0200,  SPI_OUTPUT_FIFO },
+	{"input_fifo",           0400,  SPI_INPUT_FIFO },
+	{"spi_state",            0644,  SPI_STATE },
+#if IS_ENABLED(CONFIG_SPI_QSD) || IS_ENABLED(CONFIG_SPI_QSD_MODULE)
+	{"fifo_word_cnt",        0444,  SPI_FIFO_WORD_CNT},
+#else
+	{"qup_config",           0644,  QUP_CONFIG},
+	{"qup_error_flags",      0644,  QUP_ERROR_FLAGS},
+	{"qup_error_flags_en",   0644,  QUP_ERROR_FLAGS_EN},
+	{"mx_write_cnt",         0644,  QUP_MX_WRITE_COUNT},
+	{"mx_write_cnt_current", 0444,  QUP_MX_WRITE_CNT_CURRENT},
+	{"output_fifo_word_cnt", 0444,  SPI_OUTPUT_FIFO_WORD_CNT},
+	{"input_fifo_word_cnt",  0444,  SPI_INPUT_FIFO_WORD_CNT},
+#endif
+};
+#endif
+
+struct msm_spi_bam_pipe {
+	const char              *name;
+	struct sps_pipe         *handle;
+	struct sps_connect       config;
+	bool                     teardown_required;
+};
+
+struct msm_spi_bam {
+	void __iomem            *base;
+	phys_addr_t              phys_addr;
+	uintptr_t                handle;
+	int                      irq;
+	struct msm_spi_bam_pipe  prod;
+	struct msm_spi_bam_pipe  cons;
+	bool                     deregister_required;
+	u32			 curr_rx_bytes_recvd;
+	u32			 curr_tx_bytes_sent;
+	u32			 bam_rx_len;
+	u32			 bam_tx_len;
+};
+
+struct msm_spi {
+	u8                      *read_buf;
+	const u8                *write_buf;
+	void __iomem            *base;
+	struct device           *dev;
+	spinlock_t               queue_lock;
+	struct mutex             core_lock;
+	struct spi_device       *spi;
+	struct spi_transfer     *cur_transfer;
+	struct completion        tx_transfer_complete;
+	struct completion        rx_transfer_complete;
+	struct clk              *clk;    /* core clock */
+	struct clk              *pclk;   /* interface clock */
+	struct msm_bus_client_handle *bus_cl_hdl;
+	unsigned long            mem_phys_addr;
+	size_t                   mem_size;
+	int                      input_fifo_size;
+	int                      output_fifo_size;
+	u32                      rx_bytes_remaining;
+	u32                      tx_bytes_remaining;
+	u32                      clock_speed;
+	int                      irq_in;
+	int                      read_xfr_cnt;
+	int                      write_xfr_cnt;
+	int                      write_len;
+	int                      read_len;
+#if IS_ENABLED(CONFIG_SPI_QSD) || IS_ENABLED(CONFIG_SPI_QSD_MODULE)
+	int                      irq_out;
+	int                      irq_err;
+#endif
+	int                      bytes_per_word;
+	bool                     suspended;
+	bool                     transfer_pending;
+	wait_queue_head_t        continue_suspend;
+	/* DMA data */
+	enum msm_spi_mode        tx_mode;
+	enum msm_spi_mode        rx_mode;
+	bool                     use_dma;
+	int                      tx_dma_chan;
+	int                      tx_dma_crci;
+	int                      rx_dma_chan;
+	int                      rx_dma_crci;
+	int                      (*dma_init)(struct msm_spi *dd);
+	void                     (*dma_teardown)(struct msm_spi *dd);
+	struct msm_spi_bam       bam;
+	int                      input_block_size;
+	int                      output_block_size;
+	int                      input_burst_size;
+	int                      output_burst_size;
+	atomic_t                 rx_irq_called;
+	atomic_t                 tx_irq_called;
+	/* Used to pad messages unaligned to block size */
+	u8                       *tx_padding;
+	dma_addr_t               tx_padding_dma;
+	u8                       *rx_padding;
+	dma_addr_t               rx_padding_dma;
+	u32                      tx_unaligned_len;
+	u32                      rx_unaligned_len;
+	/* DMA statistics */
+	int                      stat_rx;
+	int                      stat_tx;
+#ifdef CONFIG_DEBUG_FS
+	struct dentry *dent_spi;
+	struct dentry *debugfs_spi_regs[ARRAY_SIZE(debugfs_spi_regs)];
+	struct msm_spi_debugfs_data reg_data[ARRAY_SIZE(debugfs_spi_regs)];
+#endif
+	struct msm_spi_platform_data *pdata; /* Platform data */
+	/* When set indicates multiple transfers in a single message */
+	bool                     rx_done;
+	bool                     tx_done;
+	u32                      cur_msg_len;
+	/* Used in FIFO mode to keep track of the transfer being processed */
+	struct spi_transfer     *cur_tx_transfer;
+	struct spi_transfer     *cur_rx_transfer;
+	/* Temporary buffer used for WR-WR or WR-RD transfers */
+	u8                      *temp_buf;
+	/* GPIO pin numbers for SPI clk, miso and mosi */
+	int                      spi_gpios[ARRAY_SIZE(spi_rsrcs)];
+	/* SPI CS GPIOs for each slave */
+	struct spi_cs_gpio       cs_gpios[ARRAY_SIZE(spi_cs_rsrcs)];
+	enum msm_spi_qup_version qup_ver;
+	int			 max_trfr_len;
+	u16			 xfrs_delay_usec;
+	struct pinctrl		*pinctrl;
+	struct pinctrl_state	*pins_active;
+	struct pinctrl_state	*pins_sleep;
+	bool			is_init_complete;
+	bool			pack_words;
+};
+
+/* Forward declaration */
+static irqreturn_t msm_spi_input_irq(int irq, void *dev_id);
+static irqreturn_t msm_spi_output_irq(int irq, void *dev_id);
+static irqreturn_t msm_spi_error_irq(int irq, void *dev_id);
+static inline int msm_spi_set_state(struct msm_spi *dd,
+				    enum msm_spi_state state);
+static void msm_spi_write_word_to_fifo(struct msm_spi *dd);
+static inline void msm_spi_write_rmn_to_fifo(struct msm_spi *dd);
+static irqreturn_t msm_spi_qup_irq(int irq, void *dev_id);
+
+#if IS_ENABLED(CONFIG_SPI_QSD) || IS_ENABLED(CONFIG_SPI_QSD_MODULE)
+static inline void msm_spi_disable_irqs(struct msm_spi *dd)
+{
+	disable_irq(dd->irq_in);
+	disable_irq(dd->irq_out);
+	disable_irq(dd->irq_err);
+}
+
+static inline void msm_spi_enable_irqs(struct msm_spi *dd)
+{
+	enable_irq(dd->irq_in);
+	enable_irq(dd->irq_out);
+	enable_irq(dd->irq_err);
+}
+
+static inline int msm_spi_request_irq(struct msm_spi *dd,
+				struct platform_device *pdev,
+				struct spi_master *master)
+{
+	int rc;
+
+	dd->irq_in  = platform_get_irq(pdev, 0);
+	dd->irq_out = platform_get_irq(pdev, 1);
+	dd->irq_err = platform_get_irq(pdev, 2);
+	if ((dd->irq_in < 0) || (dd->irq_out < 0) || (dd->irq_err < 0))
+		return -EINVAL;
+
+	rc = devm_request_irq(dd->dev, dd->irq_in, msm_spi_input_irq,
+		IRQF_TRIGGER_RISING, pdev->name, dd);
+	if (rc)
+		goto error_irq;
+
+	rc = devm_request_irq(dd->dev, dd->irq_out, msm_spi_output_irq,
+		IRQF_TRIGGER_RISING, pdev->name, dd);
+	if (rc)
+		goto error_irq;
+
+	rc = devm_request_irq(dd->dev, dd->irq_err, msm_spi_error_irq,
+		IRQF_TRIGGER_RISING, pdev->name, master);
+	if (rc)
+		goto error_irq;
+
+error_irq:
+	return rc;
+}
+
+static inline void msm_spi_get_clk_err(struct msm_spi *dd, u32 *spi_err) {}
+static inline void msm_spi_ack_clk_err(struct msm_spi *dd) {}
+static inline void msm_spi_set_qup_config(struct msm_spi *dd, int bpw) {}
+
+static inline int  msm_spi_prepare_for_write(struct msm_spi *dd) { return 0; }
+static inline void msm_spi_start_write(struct msm_spi *dd, u32 read_count)
+{
+	msm_spi_write_word_to_fifo(dd);
+}
+static inline void msm_spi_set_write_count(struct msm_spi *dd, int val) {}
+
+static inline void msm_spi_complete(struct msm_spi *dd)
+{
+	complete(&dd->transfer_complete);
+}
+
+static inline void msm_spi_enable_error_flags(struct msm_spi *dd)
+{
+	writel_relaxed(0x0000007B, dd->base + SPI_ERROR_FLAGS_EN);
+}
+
+static inline void msm_spi_clear_error_flags(struct msm_spi *dd)
+{
+	writel_relaxed(0x0000007F, dd->base + SPI_ERROR_FLAGS);
+}
+
+#else
+/* In QUP the same interrupt line is used for input, output and error*/
+static inline int msm_spi_request_irq(struct msm_spi *dd,
+				struct platform_device *pdev,
+				struct spi_master *master)
+{
+	dd->irq_in  = platform_get_irq(pdev, 0);
+	if (dd->irq_in < 0)
+		return -EINVAL;
+
+	return devm_request_irq(dd->dev, dd->irq_in, msm_spi_qup_irq,
+		IRQF_TRIGGER_HIGH, pdev->name, dd);
+}
+
+static inline void msm_spi_disable_irqs(struct msm_spi *dd)
+{
+	disable_irq(dd->irq_in);
+}
+
+static inline void msm_spi_enable_irqs(struct msm_spi *dd)
+{
+	enable_irq(dd->irq_in);
+}
+
+static inline void msm_spi_get_clk_err(struct msm_spi *dd, u32 *spi_err)
+{
+	*spi_err = readl_relaxed(dd->base + QUP_ERROR_FLAGS);
+}
+
+static inline void msm_spi_ack_clk_err(struct msm_spi *dd)
+{
+	writel_relaxed(QUP_ERR_MASK, dd->base + QUP_ERROR_FLAGS);
+}
+
+static inline void
+msm_spi_set_bpw_and_no_io_flags(struct msm_spi *dd, u32 *config, int n);
+
+/**
+ * msm_spi_set_qup_config: set QUP_CONFIG to no_input, no_output, and N bits
+ */
+static inline void msm_spi_set_qup_config(struct msm_spi *dd, int bpw)
+{
+	u32 qup_config = readl_relaxed(dd->base + QUP_CONFIG);
+
+	msm_spi_set_bpw_and_no_io_flags(dd, &qup_config, bpw-1);
+	writel_relaxed(qup_config | QUP_CONFIG_SPI_MODE, dd->base + QUP_CONFIG);
+}
+
+static inline int msm_spi_prepare_for_write(struct msm_spi *dd)
+{
+	if (msm_spi_set_state(dd, SPI_OP_STATE_RUN))
+		return -EINVAL;
+	if (msm_spi_set_state(dd, SPI_OP_STATE_PAUSE))
+		return -EINVAL;
+	return 0;
+}
+
+static inline void msm_spi_start_write(struct msm_spi *dd, u32 read_count)
+{
+	msm_spi_write_rmn_to_fifo(dd);
+}
+
+static inline void msm_spi_set_write_count(struct msm_spi *dd, int val)
+{
+	writel_relaxed(val, dd->base + QUP_MX_WRITE_COUNT);
+}
+
+static inline void msm_spi_complete(struct msm_spi *dd)
+{
+	dd->tx_done = true;
+	dd->rx_done = true;
+}
+
+static inline void msm_spi_enable_error_flags(struct msm_spi *dd)
+{
+	if (dd->qup_ver == SPI_QUP_VERSION_BFAM)
+		writel_relaxed(
+			SPI_ERR_CLK_UNDER_RUN_ERR | SPI_ERR_CLK_OVER_RUN_ERR,
+			dd->base + SPI_ERROR_FLAGS_EN);
+	else
+		writel_relaxed(0x00000078, dd->base + SPI_ERROR_FLAGS_EN);
+}
+
+static inline void msm_spi_clear_error_flags(struct msm_spi *dd)
+{
+	if (dd->qup_ver == SPI_QUP_VERSION_BFAM)
+		writel_relaxed(
+			SPI_ERR_CLK_UNDER_RUN_ERR | SPI_ERR_CLK_OVER_RUN_ERR,
+			dd->base + SPI_ERROR_FLAGS);
+	else
+		writel_relaxed(0x0000007C, dd->base + SPI_ERROR_FLAGS);
+}
+
+#endif
+#endif
diff --git a/drivers/thermal/qcom-spmi-temp-alarm.c b/drivers/thermal/qcom-spmi-temp-alarm.c
index bda5af5..8e0cfd7 100644
--- a/drivers/thermal/qcom-spmi-temp-alarm.c
+++ b/drivers/thermal/qcom-spmi-temp-alarm.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011-2015, 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2015, 2017, 2020, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -201,7 +201,7 @@
 		chip->temp = mili_celsius;
 	}
 
-	*temp = chip->temp < 0 ? 0 : chip->temp;
+	*temp = chip->temp;
 
 	return 0;
 }
diff --git a/drivers/thermal/tsens2xxx.c b/drivers/thermal/tsens2xxx.c
index 941f7f4..d04ea03 100644
--- a/drivers/thermal/tsens2xxx.c
+++ b/drivers/thermal/tsens2xxx.c
@@ -83,12 +83,70 @@
 	*temp = last_temp * TSENS_TM_SCALE_DECI_MILLIDEG;
 }
 
+static int __tsens2xxx_hw_init(struct tsens_device *tmdev)
+{
+	void __iomem *srot_addr;
+	void __iomem *sensor_int_mask_addr;
+	unsigned int srot_val, crit_mask, crit_val;
+	void __iomem *int_mask_addr;
+
+	srot_addr = TSENS_CTRL_ADDR(tmdev->tsens_srot_addr + 0x4);
+	srot_val = readl_relaxed(srot_addr);
+	if (!(srot_val & TSENS_EN)) {
+		pr_err("TSENS device is not enabled\n");
+		return -ENODEV;
+	}
+
+	if (tmdev->ctrl_data->cycle_monitor) {
+		sensor_int_mask_addr =
+			TSENS_TM_CRITICAL_INT_MASK(tmdev->tsens_tm_addr);
+		crit_mask = readl_relaxed(sensor_int_mask_addr);
+		crit_val = TSENS_TM_CRITICAL_CYCLE_MONITOR;
+		if (tmdev->ctrl_data->cycle_compltn_monitor_mask)
+			writel_relaxed((crit_mask | crit_val),
+				(TSENS_TM_CRITICAL_INT_MASK
+				(tmdev->tsens_tm_addr)));
+		else
+			writel_relaxed((crit_mask & ~crit_val),
+				(TSENS_TM_CRITICAL_INT_MASK
+				(tmdev->tsens_tm_addr)));
+		/*Update critical cycle monitoring*/
+		mb();
+	}
+
+	if (tmdev->ctrl_data->wd_bark) {
+		sensor_int_mask_addr =
+			TSENS_TM_CRITICAL_INT_MASK(tmdev->tsens_tm_addr);
+		crit_mask = readl_relaxed(sensor_int_mask_addr);
+		crit_val = TSENS_TM_CRITICAL_WD_BARK;
+		if (tmdev->ctrl_data->wd_bark_mask)
+			writel_relaxed((crit_mask | crit_val),
+			(TSENS_TM_CRITICAL_INT_MASK
+			(tmdev->tsens_tm_addr)));
+		else
+			writel_relaxed((crit_mask & ~crit_val),
+			(TSENS_TM_CRITICAL_INT_MASK
+			(tmdev->tsens_tm_addr)));
+		/*Update watchdog monitoring*/
+		mb();
+	}
+
+	int_mask_addr = TSENS_TM_UPPER_LOWER_INT_MASK(tmdev->tsens_tm_addr);
+	writel_relaxed(TSENS_TM_UPPER_LOWER_INT_DISABLE, int_mask_addr);
+
+	writel_relaxed(TSENS_TM_CRITICAL_INT_EN |
+		TSENS_TM_UPPER_INT_EN | TSENS_TM_LOWER_INT_EN,
+		TSENS_TM_INT_EN(tmdev->tsens_tm_addr));
+
+	return 0;
+}
+
 static int tsens2xxx_get_temp(struct tsens_sensor *sensor, int *temp)
 {
 	struct tsens_device *tmdev = NULL, *tmdev_itr;
 	unsigned int code, ret, tsens_ret;
 	void __iomem *sensor_addr, *trdy;
-	int last_temp = 0, last_temp2 = 0, last_temp3 = 0, count = 0;
+	int rc = 0, last_temp = 0, last_temp2 = 0, last_temp3 = 0, count = 0;
 	static atomic_t in_tsens_reinit;
 
 	if (!sensor)
@@ -172,6 +230,13 @@
 			/* Notify thermal fwk */
 			list_for_each_entry(tmdev_itr,
 						&tsens_device_list, list) {
+				rc = __tsens2xxx_hw_init(tmdev_itr);
+				if (rc) {
+					pr_err(
+					"%s: Failed to re-initialize TSENS controller\n",
+						__func__);
+					BUG();
+				}
 				queue_work(tmdev_itr->tsens_reinit_work,
 					&tmdev_itr->therm_fwk_notify);
 			}
@@ -713,58 +778,11 @@
 
 static int tsens2xxx_hw_init(struct tsens_device *tmdev)
 {
-	void __iomem *srot_addr;
-	void __iomem *sensor_int_mask_addr;
-	unsigned int srot_val, crit_mask, crit_val;
-	void __iomem *int_mask_addr;
+	int rc = 0;
 
-	srot_addr = TSENS_CTRL_ADDR(tmdev->tsens_srot_addr + 0x4);
-	srot_val = readl_relaxed(srot_addr);
-	if (!(srot_val & TSENS_EN)) {
-		pr_err("TSENS device is not enabled\n");
-		return -ENODEV;
-	}
-
-	if (tmdev->ctrl_data->cycle_monitor) {
-		sensor_int_mask_addr =
-			TSENS_TM_CRITICAL_INT_MASK(tmdev->tsens_tm_addr);
-		crit_mask = readl_relaxed(sensor_int_mask_addr);
-		crit_val = TSENS_TM_CRITICAL_CYCLE_MONITOR;
-		if (tmdev->ctrl_data->cycle_compltn_monitor_mask)
-			writel_relaxed((crit_mask | crit_val),
-				(TSENS_TM_CRITICAL_INT_MASK
-				(tmdev->tsens_tm_addr)));
-		else
-			writel_relaxed((crit_mask & ~crit_val),
-				(TSENS_TM_CRITICAL_INT_MASK
-				(tmdev->tsens_tm_addr)));
-		/*Update critical cycle monitoring*/
-		mb();
-	}
-
-	if (tmdev->ctrl_data->wd_bark) {
-		sensor_int_mask_addr =
-			TSENS_TM_CRITICAL_INT_MASK(tmdev->tsens_tm_addr);
-		crit_mask = readl_relaxed(sensor_int_mask_addr);
-		crit_val = TSENS_TM_CRITICAL_WD_BARK;
-		if (tmdev->ctrl_data->wd_bark_mask)
-			writel_relaxed((crit_mask | crit_val),
-			(TSENS_TM_CRITICAL_INT_MASK
-			(tmdev->tsens_tm_addr)));
-		else
-			writel_relaxed((crit_mask & ~crit_val),
-			(TSENS_TM_CRITICAL_INT_MASK
-			(tmdev->tsens_tm_addr)));
-		/*Update watchdog monitoring*/
-		mb();
-	}
-
-	int_mask_addr = TSENS_TM_UPPER_LOWER_INT_MASK(tmdev->tsens_tm_addr);
-	writel_relaxed(TSENS_TM_UPPER_LOWER_INT_DISABLE, int_mask_addr);
-
-	writel_relaxed(TSENS_TM_CRITICAL_INT_EN |
-		TSENS_TM_UPPER_INT_EN | TSENS_TM_LOWER_INT_EN,
-		TSENS_TM_INT_EN(tmdev->tsens_tm_addr));
+	rc = __tsens2xxx_hw_init(tmdev);
+	if (rc)
+		return rc;
 
 	spin_lock_init(&tmdev->tsens_crit_lock);
 	spin_lock_init(&tmdev->tsens_upp_low_lock);
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index d118de7..71a0c20 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -1013,6 +1013,18 @@
 	  As earlycon can't have HW version awareness, decision is taken
 	  based on the configuration.
 
+config SERIAL_MSM_HS
+	tristate "MSM UART High Speed: Serial Driver"
+	depends on ARCH_QCOM
+	select SERIAL_CORE
+	help
+	  If you have a machine based on MSM family of SoCs, you
+	  can enable its onboard high speed serial port by enabling
+	  this option.
+
+	  Choose M here to compile it as a module. The module will be
+	  called msm_serial_hs.
+
 config SERIAL_VT8500
 	bool "VIA VT8500 on-chip serial port support"
 	depends on ARCH_VT8500
diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile
index ee1906b..7c422f3 100644
--- a/drivers/tty/serial/Makefile
+++ b/drivers/tty/serial/Makefile
@@ -59,6 +59,7 @@
 obj-$(CONFIG_SERIAL_UARTLITE) += uartlite.o
 obj-$(CONFIG_SERIAL_MSM) += msm_serial.o
 obj-$(CONFIG_SERIAL_MSM_GENI) += msm_geni_serial.o
+obj-$(CONFIG_SERIAL_MSM_HS) += msm_serial_hs.o
 obj-$(CONFIG_SERIAL_NETX) += netx-serial.o
 obj-$(CONFIG_SERIAL_KS8695) += serial_ks8695.o
 obj-$(CONFIG_SERIAL_OMAP) += omap-serial.o
diff --git a/drivers/tty/serial/msm_serial_hs.c b/drivers/tty/serial/msm_serial_hs.c
new file mode 100644
index 0000000..debf5f9
--- /dev/null
+++ b/drivers/tty/serial/msm_serial_hs.c
@@ -0,0 +1,3790 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* drivers/serial/msm_serial_hs.c
+ *
+ * MSM 7k High speed uart driver
+ *
+ * Copyright (c) 2008 Google Inc.
+ * Copyright (c) 2007-2018, 2020, The Linux Foundation. All rights reserved.
+ * Modified: Nick Pelly <npelly@google.com>
+ *
+ * Has optional support for uart power management independent of linux
+ * suspend/resume:
+ *
+ * RX wakeup.
+ * UART wakeup can be triggered by RX activity (using a wakeup GPIO on the
+ * UART RX pin). This should only be used if there is not a wakeup
+ * GPIO on the UART CTS, and the first RX byte is known (for example, with the
+ * Bluetooth Texas Instruments HCILL protocol), since the first RX byte will
+ * always be lost. RTS will be asserted even while the UART is off in this mode
+ * of operation. See msm_serial_hs_platform_data.rx_wakeup_irq.
+ */
+
+#include <linux/module.h>
+
+#include <linux/serial.h>
+#include <linux/serial_core.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/atomic.h>
+#include <linux/kernel.h>
+#include <linux/timer.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/dma-mapping.h>
+#include <linux/tty_flip.h>
+#include <linux/wait.h>
+#include <linux/sysfs.h>
+#include <linux/stat.h>
+#include <linux/device.h>
+#include <linux/debugfs.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/gpio.h>
+#include <linux/ipc_logging.h>
+#include <asm/irq.h>
+#include <linux/kthread.h>
+
+#include <linux/msm-sps.h>
+#include <linux/platform_data/msm_serial_hs.h>
+#include <linux/msm-bus.h>
+
+#include "msm_serial_hs_hwreg.h"
+#define UART_SPS_CONS_PERIPHERAL 0
+#define UART_SPS_PROD_PERIPHERAL 1
+
+#define IPC_MSM_HS_LOG_STATE_PAGES 2
+#define IPC_MSM_HS_LOG_USER_PAGES 2
+#define IPC_MSM_HS_LOG_DATA_PAGES 3
+#define UART_DMA_DESC_NR 8
+#define BUF_DUMP_SIZE 32
+
+/* If the debug_mask gets set to FATAL_LEV,
+ * a fatal error has happened and further IPC logging
+ * is disabled so that this problem can be detected
+ */
+enum {
+	FATAL_LEV = 0U,
+	ERR_LEV = 1U,
+	WARN_LEV = 2U,
+	INFO_LEV = 3U,
+	DBG_LEV = 4U,
+};
+
+#define MSM_HS_DBG(x...) do { \
+	if (msm_uport->ipc_debug_mask >= DBG_LEV) { \
+		ipc_log_string(msm_uport->ipc_msm_hs_log_ctxt, x); \
+	} \
+} while (0)
+
+#define MSM_HS_INFO(x...) do { \
+	if (msm_uport->ipc_debug_mask >= INFO_LEV) {\
+		ipc_log_string(msm_uport->ipc_msm_hs_log_ctxt, x); \
+	} \
+} while (0)
+
+/* warnings and errors show up on console always */
+#define MSM_HS_WARN(x...) do { \
+	pr_warn(x); \
+	if (msm_uport->ipc_msm_hs_log_ctxt && \
+			msm_uport->ipc_debug_mask >= WARN_LEV) \
+		ipc_log_string(msm_uport->ipc_msm_hs_log_ctxt, x); \
+} while (0)
+
+/* ERROR condition in the driver sets the hs_serial_debug_mask
+ * to ERR_FATAL level, so that this message can be seen
+ * in IPC logging. Further errors continue to log on the console
+ */
+#define MSM_HS_ERR(x...) do { \
+	pr_err(x); \
+	if (msm_uport->ipc_msm_hs_log_ctxt && \
+			msm_uport->ipc_debug_mask >= ERR_LEV) { \
+		ipc_log_string(msm_uport->ipc_msm_hs_log_ctxt, x); \
+		msm_uport->ipc_debug_mask = FATAL_LEV; \
+	} \
+} while (0)
+
+#define LOG_USR_MSG(ctx, x...) ipc_log_string(ctx, x)
+
+/*
+ * There are 3 different kind of UART Core available on MSM.
+ * High Speed UART (i.e. Legacy HSUART), GSBI based HSUART
+ * and BSLP based HSUART.
+ */
+enum uart_core_type {
+	LEGACY_HSUART,
+	GSBI_HSUART,
+	BLSP_HSUART,
+};
+
+enum flush_reason {
+	FLUSH_NONE,
+	FLUSH_DATA_READY,
+	FLUSH_DATA_INVALID,  /* values after this indicate invalid data */
+	FLUSH_IGNORE,
+	FLUSH_STOP,
+	FLUSH_SHUTDOWN,
+};
+
+/*
+ * SPS data structures to support HSUART with BAM
+ * @sps_pipe - This struct defines BAM pipe descriptor
+ * @sps_connect - This struct defines a connection's end point
+ * @sps_register - This struct defines a event registration parameters
+ */
+struct msm_hs_sps_ep_conn_data {
+	struct sps_pipe *pipe_handle;
+	struct sps_connect config;
+	struct sps_register_event event;
+};
+
+struct msm_hs_tx {
+	bool dma_in_flight;    /* tx dma in progress */
+	enum flush_reason flush;
+	wait_queue_head_t wait;
+	int tx_count;
+	dma_addr_t dma_base;
+	struct kthread_work kwork;
+	struct kthread_worker kworker;
+	struct task_struct *task;
+	struct msm_hs_sps_ep_conn_data cons;
+	struct timer_list tx_timeout_timer;
+	void *ipc_tx_ctxt;
+};
+
+struct msm_hs_rx {
+	enum flush_reason flush;
+	wait_queue_head_t wait;
+	dma_addr_t rbuffer;
+	unsigned char *buffer;
+	unsigned int buffer_pending;
+	struct delayed_work flip_insert_work;
+	struct kthread_work kwork;
+	struct kthread_worker kworker;
+	struct task_struct *task;
+	struct msm_hs_sps_ep_conn_data prod;
+	unsigned long queued_flag;
+	unsigned long pending_flag;
+	int rx_inx;
+	struct sps_iovec iovec[UART_DMA_DESC_NR]; /* track descriptors */
+	void *ipc_rx_ctxt;
+};
+enum buffer_states {
+	NONE_PENDING = 0x0,
+	FIFO_OVERRUN = 0x1,
+	PARITY_ERROR = 0x2,
+	CHARS_NORMAL = 0x4,
+};
+
+enum msm_hs_pm_state {
+	MSM_HS_PM_ACTIVE,
+	MSM_HS_PM_SUSPENDED,
+	MSM_HS_PM_SYS_SUSPENDED,
+};
+
+/* optional low power wakeup, typically on a GPIO RX irq */
+struct msm_hs_wakeup {
+	int irq;  /* < 0 indicates low power wakeup disabled */
+	unsigned char ignore;  /* bool */
+
+	/* bool: inject char into rx tty on wakeup */
+	bool inject_rx;
+	unsigned char rx_to_inject;
+	bool enabled;
+	bool freed;
+};
+
+struct msm_hs_port {
+	struct uart_port uport;
+	unsigned long imr_reg;  /* shadow value of UARTDM_IMR */
+	struct clk *clk;
+	struct clk *pclk;
+	struct msm_hs_tx tx;
+	struct msm_hs_rx rx;
+	atomic_t resource_count;
+	struct msm_hs_wakeup wakeup;
+
+	struct dentry *loopback_dir;
+	struct work_struct clock_off_w; /* work for actual clock off */
+	struct workqueue_struct *hsuart_wq; /* hsuart workqueue */
+	struct mutex mtx; /* resource access mutex */
+	enum uart_core_type uart_type;
+	unsigned long bam_handle;
+	resource_size_t bam_mem;
+	int bam_irq;
+	unsigned char __iomem *bam_base;
+	unsigned int bam_tx_ep_pipe_index;
+	unsigned int bam_rx_ep_pipe_index;
+	/* struct sps_event_notify is an argument passed when triggering a
+	 * callback event object registered for an SPS connection end point.
+	 */
+	struct sps_event_notify notify;
+	/* bus client handler */
+	u32 bus_perf_client;
+	/* BLSP UART required BUS Scaling data */
+	struct msm_bus_scale_pdata *bus_scale_table;
+	bool rx_bam_inprogress;
+	wait_queue_head_t bam_disconnect_wait;
+	bool use_pinctrl;
+	struct pinctrl *pinctrl;
+	struct pinctrl_state *gpio_state_active;
+	struct pinctrl_state *gpio_state_suspend;
+	bool flow_control;
+	enum msm_hs_pm_state pm_state;
+	atomic_t client_count;
+	bool obs; /* out of band sleep flag */
+	atomic_t client_req_state;
+	void *ipc_msm_hs_log_ctxt;
+	void *ipc_msm_hs_pwr_ctxt;
+	int ipc_debug_mask;
+};
+
+static const struct of_device_id msm_hs_match_table[] = {
+	{ .compatible = "qcom,msm-hsuart-v14"},
+	{}
+};
+
+
+#define MSM_UARTDM_BURST_SIZE 16   /* DM burst size (in bytes) */
+#define UARTDM_TX_BUF_SIZE UART_XMIT_SIZE
+#define UARTDM_RX_BUF_SIZE 512
+#define RETRY_TIMEOUT 5
+#define UARTDM_NR 256
+#define BAM_PIPE_MIN 0
+#define BAM_PIPE_MAX 11
+#define BUS_SCALING 1
+#define BUS_RESET 0
+#define RX_FLUSH_COMPLETE_TIMEOUT 300 /* In jiffies */
+#define BLSP_UART_CLK_FMAX 63160000
+
+static struct dentry *debug_base;
+static struct platform_driver msm_serial_hs_platform_driver;
+static struct uart_driver msm_hs_driver;
+static const struct uart_ops msm_hs_ops;
+static void msm_hs_start_rx_locked(struct uart_port *uport);
+static void msm_serial_hs_rx_work(struct kthread_work *work);
+static void flip_insert_work(struct work_struct *work);
+static void msm_hs_bus_voting(struct msm_hs_port *msm_uport, unsigned int vote);
+static struct msm_hs_port *msm_hs_get_hs_port(int port_index);
+static void msm_hs_queue_rx_desc(struct msm_hs_port *msm_uport);
+static int disconnect_rx_endpoint(struct msm_hs_port *msm_uport);
+static int msm_hs_pm_resume(struct device *dev);
+
+#define UARTDM_TO_MSM(uart_port) \
+	container_of((uart_port), struct msm_hs_port, uport)
+
+static int msm_hs_ioctl(struct uart_port *uport, unsigned int cmd,
+						unsigned long arg)
+{
+	int ret = 0, state = 1;
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+
+	if (!msm_uport)
+		return -ENODEV;
+
+	switch (cmd) {
+	case MSM_ENABLE_UART_CLOCK: {
+		ret = msm_hs_request_clock_on(&msm_uport->uport);
+		break;
+	}
+	case MSM_DISABLE_UART_CLOCK: {
+		ret = msm_hs_request_clock_off(&msm_uport->uport);
+		break;
+	}
+	case MSM_GET_UART_CLOCK_STATUS: {
+		/* Return value 0 - UART CLOCK is OFF
+		 * Return value 1 - UART CLOCK is ON
+		 */
+
+		if (msm_uport->pm_state != MSM_HS_PM_ACTIVE)
+			state = 0;
+		ret = state;
+		MSM_HS_INFO("%s():GET UART CLOCK STATUS: cmd=%d state=%d\n",
+			__func__, cmd, state);
+		break;
+	}
+	default: {
+		MSM_HS_INFO("%s():Unknown cmd specified: cmd=%d\n", __func__,
+			   cmd);
+		ret = -ENOIOCTLCMD;
+		break;
+	}
+	}
+
+	return ret;
+}
+
+/*
+ * This function is called initially during probe and then
+ * through the runtime PM framework. The function directly calls
+ * resource APIs to enable them.
+ */
+
+static int msm_hs_clk_bus_vote(struct msm_hs_port *msm_uport)
+{
+	int rc = 0;
+
+	msm_hs_bus_voting(msm_uport, BUS_SCALING);
+	/* Turn on core clk and iface clk */
+	if (msm_uport->pclk) {
+		rc = clk_prepare_enable(msm_uport->pclk);
+		if (rc) {
+			dev_err(msm_uport->uport.dev,
+				"%s(): Could not turn on pclk [%d]\n",
+				__func__, rc);
+			goto busreset;
+		}
+	}
+	rc = clk_prepare_enable(msm_uport->clk);
+	if (rc) {
+		dev_err(msm_uport->uport.dev,
+			"%s(): Could not turn on core clk [%d]\n",
+			__func__, rc);
+		goto core_unprepare;
+	}
+	MSM_HS_DBG("%s(): Clock ON successful\n", __func__);
+	return rc;
+core_unprepare:
+	clk_disable_unprepare(msm_uport->pclk);
+busreset:
+	msm_hs_bus_voting(msm_uport, BUS_RESET);
+	return rc;
+}
+
+/*
+ * This function is called initially during probe and then
+ * through the runtime PM framework. The function directly calls
+ * resource apis to disable them.
+ */
+static void msm_hs_clk_bus_unvote(struct msm_hs_port *msm_uport)
+{
+	clk_disable_unprepare(msm_uport->clk);
+	if (msm_uport->pclk)
+		clk_disable_unprepare(msm_uport->pclk);
+	msm_hs_bus_voting(msm_uport, BUS_RESET);
+	MSM_HS_DBG("%s(): Clock OFF successful\n", __func__);
+}
+
+ /* Remove vote for resources when done */
+static void msm_hs_resource_unvote(struct msm_hs_port *msm_uport)
+{
+	struct uart_port *uport = &(msm_uport->uport);
+	int rc = atomic_read(&msm_uport->resource_count);
+
+	MSM_HS_DBG("%s(): power usage count %d\n", __func__, rc);
+	if (rc <= 0) {
+		MSM_HS_WARN("%s(): rc zero, bailing\n", __func__);
+		WARN_ON(1);
+		return;
+	}
+	atomic_dec(&msm_uport->resource_count);
+	pm_runtime_mark_last_busy(uport->dev);
+	pm_runtime_put_autosuspend(uport->dev);
+}
+
+ /* Vote for resources before accessing them */
+static void msm_hs_resource_vote(struct msm_hs_port *msm_uport)
+{
+	int ret;
+	struct uart_port *uport = &(msm_uport->uport);
+
+	ret = pm_runtime_get_sync(uport->dev);
+	if (ret < 0 || msm_uport->pm_state != MSM_HS_PM_ACTIVE) {
+		MSM_HS_WARN("%s():%s runtime PM CB not invoked ret:%d st:%d\n",
+			__func__, dev_name(uport->dev), ret,
+					msm_uport->pm_state);
+		msm_hs_pm_resume(uport->dev);
+	}
+	atomic_inc(&msm_uport->resource_count);
+}
+
+/* Check if the uport line number matches with user id stored in pdata.
+ * User id information is stored during initialization. This function
+ * ensues that the same device is selected
+ */
+
+static struct msm_hs_port *get_matching_hs_port(struct platform_device *pdev)
+{
+	struct msm_serial_hs_platform_data *pdata = pdev->dev.platform_data;
+	struct msm_hs_port *msm_uport = msm_hs_get_hs_port(pdev->id);
+
+	if ((!msm_uport) || (msm_uport->uport.line != pdev->id
+	   && msm_uport->uport.line != pdata->userid)) {
+		pr_err("uport line number mismatch\n");
+		WARN_ON(1);
+		return NULL;
+	}
+
+	return msm_uport;
+}
+
+static ssize_t clock_show(struct device *dev, struct device_attribute *attr,
+			  char *buf)
+{
+	int state = 1;
+	ssize_t ret = 0;
+	struct platform_device *pdev = container_of(dev, struct
+						    platform_device, dev);
+	struct msm_hs_port *msm_uport = get_matching_hs_port(pdev);
+
+	/* This check should not fail */
+	if (msm_uport) {
+		if (msm_uport->pm_state != MSM_HS_PM_ACTIVE)
+			state = 0;
+		ret = scnprintf(buf, PAGE_SIZE, "%d\n", state);
+	}
+	return ret;
+}
+
+static ssize_t clock_store(struct device *dev, struct device_attribute *attr,
+			 const char *buf, size_t count)
+{
+	int state;
+	ssize_t ret = 0;
+	struct platform_device *pdev = container_of(dev, struct
+						    platform_device, dev);
+	struct msm_hs_port *msm_uport = get_matching_hs_port(pdev);
+
+	/* This check should not fail */
+	if (msm_uport) {
+		state = buf[0] - '0';
+		switch (state) {
+		case 0:
+			MSM_HS_DBG("%s(): Request clock OFF\n", __func__);
+			msm_hs_request_clock_off(&msm_uport->uport);
+			ret = count;
+			break;
+		case 1:
+			MSM_HS_DBG("%s(): Request clock ON\n", __func__);
+			msm_hs_request_clock_on(&msm_uport->uport);
+			ret = count;
+			break;
+		default:
+			ret = -EINVAL;
+		}
+	}
+	return ret;
+}
+
+static DEVICE_ATTR_RW(clock);
+
+static ssize_t debug_mask_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	ssize_t ret = 0;
+	struct platform_device *pdev = container_of(dev, struct
+						    platform_device, dev);
+	struct msm_hs_port *msm_uport = get_matching_hs_port(pdev);
+
+	/* This check should not fail */
+	if (msm_uport)
+		ret = scnprintf(buf, sizeof(int), "%u\n",
+					msm_uport->ipc_debug_mask);
+	return ret;
+}
+
+static ssize_t debug_mask_store(struct device *dev,
+			struct device_attribute *attr,
+			const char *buf, size_t count)
+{
+	struct platform_device *pdev = container_of(dev, struct
+						    platform_device, dev);
+	struct msm_hs_port *msm_uport = get_matching_hs_port(pdev);
+
+	/* This check should not fail */
+	if (msm_uport) {
+		msm_uport->ipc_debug_mask = buf[0] - '0';
+		if (msm_uport->ipc_debug_mask < FATAL_LEV ||
+				msm_uport->ipc_debug_mask > DBG_LEV) {
+			/* set to default level */
+			msm_uport->ipc_debug_mask = INFO_LEV;
+			MSM_HS_ERR("Range is 0 to 4;Set to default level 3\n");
+			return -EINVAL;
+		}
+	}
+	return count;
+}
+
+static DEVICE_ATTR_RW(debug_mask);
+
+static inline bool is_use_low_power_wakeup(struct msm_hs_port *msm_uport)
+{
+	return msm_uport->wakeup.irq > 0;
+}
+
+static void msm_hs_bus_voting(struct msm_hs_port *msm_uport, unsigned int vote)
+{
+	int ret;
+
+	if (msm_uport->bus_perf_client) {
+		MSM_HS_DBG("Bus voting:%d\n", vote);
+		ret = msm_bus_scale_client_update_request(
+				msm_uport->bus_perf_client, vote);
+		if (ret)
+			MSM_HS_ERR("%s(): Failed for Bus voting: %d\n",
+							__func__, vote);
+	}
+}
+
+static inline unsigned int msm_hs_read(struct uart_port *uport,
+				       unsigned int index)
+{
+	return readl_relaxed(uport->membase + index);
+}
+
+static inline void msm_hs_write(struct uart_port *uport, unsigned int index,
+				 unsigned int value)
+{
+	writel_relaxed(value, uport->membase + index);
+}
+
+static int sps_rx_disconnect(struct sps_pipe *sps_pipe_handler)
+{
+	struct sps_connect config;
+	int ret;
+
+	ret = sps_get_config(sps_pipe_handler, &config);
+	if (ret) {
+		pr_err("%s(): sps_get_config() failed ret %d\n", __func__, ret);
+		return ret;
+	}
+	config.options |= SPS_O_POLL;
+	ret = sps_set_config(sps_pipe_handler, &config);
+	if (ret) {
+		pr_err("%s(): sps_set_config() failed ret %d\n", __func__, ret);
+		return ret;
+	}
+	return sps_disconnect(sps_pipe_handler);
+}
+
+static void hex_dump_ipc(struct msm_hs_port *msm_uport, void *ipc_ctx,
+			char *prefix, char *string, u64 addr, int size)
+
+{
+	char buf[(BUF_DUMP_SIZE * 3) + 2];
+	int len = 0;
+
+	len = min(size, BUF_DUMP_SIZE);
+	/*
+	 * Print upto 32 data bytes, 32 bytes per line, 1 byte at a time and
+	 * don't include the ASCII text at the end of the buffer.
+	 */
+	hex_dump_to_buffer(string, len, 32, 1, buf, sizeof(buf), false);
+	ipc_log_string(ipc_ctx, "%s[0x%.10x:%d] : %s", prefix,
+					(unsigned int)addr, size, buf);
+}
+
+/*
+ * This API read and provides UART Core registers information.
+ */
+static void dump_uart_hs_registers(struct msm_hs_port *msm_uport)
+{
+	struct uart_port *uport = &(msm_uport->uport);
+
+	if (msm_uport->pm_state != MSM_HS_PM_ACTIVE) {
+		MSM_HS_INFO("%s():Failed clocks are off, resource_count %d\n",
+			__func__, atomic_read(&msm_uport->resource_count));
+		return;
+	}
+
+	MSM_HS_DBG(
+	"MR1:%x MR2:%x TFWR:%x RFWR:%x DMEN:%x IMR:%x MISR:%x NCF_TX:%x\n",
+	msm_hs_read(uport, UART_DM_MR1),
+	msm_hs_read(uport, UART_DM_MR2),
+	msm_hs_read(uport, UART_DM_TFWR),
+	msm_hs_read(uport, UART_DM_RFWR),
+	msm_hs_read(uport, UART_DM_DMEN),
+	msm_hs_read(uport, UART_DM_IMR),
+	msm_hs_read(uport, UART_DM_MISR),
+	msm_hs_read(uport, UART_DM_NCF_TX));
+	MSM_HS_INFO("SR:%x ISR:%x DMRX:%x RX_SNAP:%x TXFS:%x RXFS:%x\n",
+	msm_hs_read(uport, UART_DM_SR),
+	msm_hs_read(uport, UART_DM_ISR),
+	msm_hs_read(uport, UART_DM_DMRX),
+	msm_hs_read(uport, UART_DM_RX_TOTAL_SNAP),
+	msm_hs_read(uport, UART_DM_TXFS),
+	msm_hs_read(uport, UART_DM_RXFS));
+	MSM_HS_DBG("rx.flush:%u\n", msm_uport->rx.flush);
+}
+
+static int msm_serial_loopback_enable_set(void *data, u64 val)
+{
+	struct msm_hs_port *msm_uport = data;
+	struct uart_port *uport = &(msm_uport->uport);
+	unsigned long flags;
+	int ret = 0;
+
+	msm_hs_resource_vote(msm_uport);
+
+	if (val) {
+		spin_lock_irqsave(&uport->lock, flags);
+		ret = msm_hs_read(uport, UART_DM_MR2);
+		ret |= (UARTDM_MR2_LOOP_MODE_BMSK |
+			UARTDM_MR2_RFR_CTS_LOOP_MODE_BMSK);
+		msm_hs_write(uport, UART_DM_MR2, ret);
+		spin_unlock_irqrestore(&uport->lock, flags);
+	} else {
+		spin_lock_irqsave(&uport->lock, flags);
+		ret = msm_hs_read(uport, UART_DM_MR2);
+		ret &= ~(UARTDM_MR2_LOOP_MODE_BMSK |
+			UARTDM_MR2_RFR_CTS_LOOP_MODE_BMSK);
+		msm_hs_write(uport, UART_DM_MR2, ret);
+		spin_unlock_irqrestore(&uport->lock, flags);
+	}
+	/* Calling CLOCK API. Hence mb() requires here. */
+	mb();
+
+	msm_hs_resource_unvote(msm_uport);
+	return 0;
+}
+
+static int msm_serial_loopback_enable_get(void *data, u64 *val)
+{
+	struct msm_hs_port *msm_uport = data;
+	struct uart_port *uport = &(msm_uport->uport);
+	unsigned long flags;
+	int ret = 0;
+
+	msm_hs_resource_vote(msm_uport);
+
+	spin_lock_irqsave(&uport->lock, flags);
+	ret = msm_hs_read(&msm_uport->uport, UART_DM_MR2);
+	spin_unlock_irqrestore(&uport->lock, flags);
+
+	msm_hs_resource_unvote(msm_uport);
+
+	*val = (ret & UARTDM_MR2_LOOP_MODE_BMSK) ? 1 : 0;
+
+	return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(loopback_enable_fops, msm_serial_loopback_enable_get,
+			msm_serial_loopback_enable_set, "%llu\n");
+
+/*
+ * msm_serial_hs debugfs node: <debugfs_root>/msm_serial_hs/loopback.<id>
+ * writing 1 turns on internal loopback mode in HW. Useful for automation
+ * test scripts.
+ * writing 0 disables the internal loopback mode. Default is disabled.
+ */
+static void msm_serial_debugfs_init(struct msm_hs_port *msm_uport,
+					   int id)
+{
+	char node_name[15];
+
+	scnprintf(node_name, sizeof(node_name), "loopback.%d", id);
+	msm_uport->loopback_dir = debugfs_create_file(node_name,
+						0644,
+						debug_base,
+						msm_uport,
+						&loopback_enable_fops);
+
+	if (IS_ERR_OR_NULL(msm_uport->loopback_dir))
+		MSM_HS_ERR("%s(): Cannot create loopback.%d debug entry\n",
+							__func__, id);
+}
+
+static int msm_hs_remove(struct platform_device *pdev)
+{
+
+	struct msm_hs_port *msm_uport;
+	struct device *dev;
+
+	if (pdev->id < 0 || pdev->id >= UARTDM_NR) {
+		pr_err("Invalid plaform device ID = %d\n", pdev->id);
+		return -EINVAL;
+	}
+
+	msm_uport = get_matching_hs_port(pdev);
+	if (!msm_uport)
+		return -EINVAL;
+
+	dev = msm_uport->uport.dev;
+	sysfs_remove_file(&pdev->dev.kobj, &dev_attr_clock.attr);
+	sysfs_remove_file(&pdev->dev.kobj, &dev_attr_debug_mask.attr);
+	debugfs_remove(msm_uport->loopback_dir);
+
+	dma_free_coherent(msm_uport->uport.dev,
+			UART_DMA_DESC_NR * UARTDM_RX_BUF_SIZE,
+			msm_uport->rx.buffer, msm_uport->rx.rbuffer);
+
+	msm_uport->rx.buffer = NULL;
+	msm_uport->rx.rbuffer = 0;
+
+	destroy_workqueue(msm_uport->hsuart_wq);
+	mutex_destroy(&msm_uport->mtx);
+
+	uart_remove_one_port(&msm_hs_driver, &msm_uport->uport);
+	clk_put(msm_uport->clk);
+	if (msm_uport->pclk)
+		clk_put(msm_uport->pclk);
+
+	iounmap(msm_uport->uport.membase);
+
+	return 0;
+}
+
+
+/* Connect a UART peripheral's SPS endpoint(consumer endpoint)
+ *
+ * Also registers a SPS callback function for the consumer
+ * process with the SPS driver
+ *
+ * @uport - Pointer to uart uport structure
+ *
+ * @return - 0 if successful else negative value.
+ *
+ */
+
+static int msm_hs_spsconnect_tx(struct msm_hs_port *msm_uport)
+{
+	int ret;
+	struct uart_port *uport = &msm_uport->uport;
+	struct msm_hs_tx *tx = &msm_uport->tx;
+	struct sps_pipe *sps_pipe_handle = tx->cons.pipe_handle;
+	struct sps_connect *sps_config = &tx->cons.config;
+	struct sps_register_event *sps_event = &tx->cons.event;
+	unsigned long flags;
+	unsigned int data;
+
+	if (tx->flush != FLUSH_SHUTDOWN) {
+		MSM_HS_ERR("%s():Invalid flush state:%d\n",
+			__func__, tx->flush);
+		return 0;
+	}
+
+	/* Establish connection between peripheral and memory endpoint */
+	ret = sps_connect(sps_pipe_handle, sps_config);
+	if (ret) {
+		MSM_HS_ERR("msm_serial_hs: sps_connect() failed for tx\n"
+		"pipe_handle=0x%pK ret=%d", sps_pipe_handle, ret);
+		return ret;
+	}
+	/* Register callback event for EOT (End of transfer) event. */
+	ret = sps_register_event(sps_pipe_handle, sps_event);
+	if (ret) {
+		MSM_HS_ERR("msm_serial_hs: sps_connect() failed for tx\n"
+		"pipe_handle=0x%pK ret=%d", sps_pipe_handle, ret);
+		goto reg_event_err;
+	}
+
+	spin_lock_irqsave(&(msm_uport->uport.lock), flags);
+	msm_uport->tx.flush = FLUSH_STOP;
+	spin_unlock_irqrestore(&(msm_uport->uport.lock), flags);
+
+	data = msm_hs_read(uport, UART_DM_DMEN);
+	/* Enable UARTDM Tx BAM Interface */
+	data |= UARTDM_TX_BAM_ENABLE_BMSK;
+	msm_hs_write(uport, UART_DM_DMEN, data);
+
+	msm_hs_write(uport, UART_DM_CR, RESET_TX);
+	msm_hs_write(uport, UART_DM_CR, START_TX_BAM_IFC);
+	msm_hs_write(uport, UART_DM_CR, UARTDM_CR_TX_EN_BMSK);
+
+	MSM_HS_DBG("%s(): TX Connect\n", __func__);
+	return 0;
+
+reg_event_err:
+	sps_disconnect(sps_pipe_handle);
+	return ret;
+}
+
+/* Connect a UART peripheral's SPS endpoint(producer endpoint)
+ *
+ * Also registers a SPS callback function for the producer
+ * process with the SPS driver
+ *
+ * @uport - Pointer to uart uport structure
+ *
+ * @return - 0 if successful else negative value.
+ *
+ */
+
+static int msm_hs_spsconnect_rx(struct uart_port *uport)
+{
+	int ret;
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+	struct msm_hs_rx *rx = &msm_uport->rx;
+	struct sps_pipe *sps_pipe_handle = rx->prod.pipe_handle;
+	struct sps_connect *sps_config = &rx->prod.config;
+	struct sps_register_event *sps_event = &rx->prod.event;
+	unsigned long flags;
+
+	/* Establish connection between peripheral and memory endpoint */
+	ret = sps_connect(sps_pipe_handle, sps_config);
+	if (ret) {
+		MSM_HS_ERR("msm_serial_hs: sps_connect() failed for rx\n"
+		"pipe_handle=0x%pK ret=%d", sps_pipe_handle, ret);
+		return ret;
+	}
+	/* Register callback event for DESC_DONE event. */
+	ret = sps_register_event(sps_pipe_handle, sps_event);
+	if (ret) {
+		MSM_HS_ERR("msm_serial_hs: sps_connect() failed for rx\n"
+		"pipe_handle=0x%pK ret=%d", sps_pipe_handle, ret);
+		goto reg_event_err;
+	}
+	spin_lock_irqsave(&uport->lock, flags);
+	if (msm_uport->rx.pending_flag)
+		MSM_HS_WARN("%s(): Buffers may be pending 0x%lx\n",
+		__func__, msm_uport->rx.pending_flag);
+	msm_uport->rx.queued_flag = 0;
+	msm_uport->rx.pending_flag = 0;
+	msm_uport->rx.rx_inx = 0;
+	msm_uport->rx.flush = FLUSH_STOP;
+	spin_unlock_irqrestore(&uport->lock, flags);
+	MSM_HS_DBG("%s(): RX Connect\n", __func__);
+	return 0;
+
+reg_event_err:
+	sps_disconnect(sps_pipe_handle);
+	return ret;
+}
+
+/*
+ * programs the UARTDM_CSR register with correct bit rates
+ *
+ * Interrupts should be disabled before we are called, as
+ * we modify Set Baud rate
+ * Set receive stale interrupt level, dependent on Bit Rate
+ * Goal is to have around 8 ms before indicate stale.
+ * roundup (((Bit Rate * .008) / 10) + 1
+ */
+static void msm_hs_set_bps_locked(struct uart_port *uport,
+			       unsigned int bps)
+{
+	unsigned long rxstale;
+	unsigned long data;
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+
+	switch (bps) {
+	case 300:
+		msm_hs_write(uport, UART_DM_CSR, 0x00);
+		rxstale = 1;
+		break;
+	case 600:
+		msm_hs_write(uport, UART_DM_CSR, 0x11);
+		rxstale = 1;
+		break;
+	case 1200:
+		msm_hs_write(uport, UART_DM_CSR, 0x22);
+		rxstale = 1;
+		break;
+	case 2400:
+		msm_hs_write(uport, UART_DM_CSR, 0x33);
+		rxstale = 1;
+		break;
+	case 4800:
+		msm_hs_write(uport, UART_DM_CSR, 0x44);
+		rxstale = 1;
+		break;
+	case 9600:
+		msm_hs_write(uport, UART_DM_CSR, 0x55);
+		rxstale = 2;
+		break;
+	case 14400:
+		msm_hs_write(uport, UART_DM_CSR, 0x66);
+		rxstale = 3;
+		break;
+	case 19200:
+		msm_hs_write(uport, UART_DM_CSR, 0x77);
+		rxstale = 4;
+		break;
+	case 28800:
+		msm_hs_write(uport, UART_DM_CSR, 0x88);
+		rxstale = 6;
+		break;
+	case 38400:
+		msm_hs_write(uport, UART_DM_CSR, 0x99);
+		rxstale = 8;
+		break;
+	case 57600:
+		msm_hs_write(uport, UART_DM_CSR, 0xaa);
+		rxstale = 16;
+		break;
+	case 76800:
+		msm_hs_write(uport, UART_DM_CSR, 0xbb);
+		rxstale = 16;
+		break;
+	case 115200:
+		msm_hs_write(uport, UART_DM_CSR, 0xcc);
+		rxstale = 31;
+		break;
+	case 230400:
+		msm_hs_write(uport, UART_DM_CSR, 0xee);
+		rxstale = 31;
+		break;
+	case 460800:
+		msm_hs_write(uport, UART_DM_CSR, 0xff);
+		rxstale = 31;
+		break;
+	case 4000000:
+	case 3686400:
+	case 3200000:
+	case 3500000:
+	case 3000000:
+	case 2500000:
+	case 2000000:
+	case 1500000:
+	case 1152000:
+	case 1000000:
+	case 921600:
+		msm_hs_write(uport, UART_DM_CSR, 0xff);
+		rxstale = 31;
+		break;
+	default:
+		msm_hs_write(uport, UART_DM_CSR, 0xff);
+		/* default to 9600 */
+		bps = 9600;
+		rxstale = 2;
+		break;
+	}
+	/*
+	 * uart baud rate depends on CSR and MND Values
+	 * we are updating CSR before and then calling
+	 * clk_set_rate which updates MND Values. Hence
+	 * dsb requires here.
+	 */
+	mb();
+	if (bps > 460800) {
+		uport->uartclk = bps * 16;
+		/* BLSP based UART supports maximum clock frequency
+		 * of 63.16 Mhz. With this (63.16 Mhz) clock frequency
+		 * UART can support baud rate of 3.94 Mbps which is
+		 * equivalent to 4 Mbps.
+		 * UART hardware is robust enough to handle this
+		 * deviation to achieve baud rate ~4 Mbps.
+		 */
+		if (bps == 4000000)
+			uport->uartclk = BLSP_UART_CLK_FMAX;
+	} else {
+		uport->uartclk = 7372800;
+	}
+
+	if (clk_set_rate(msm_uport->clk, uport->uartclk)) {
+		MSM_HS_WARN("Error setting clock rate on UART\n");
+		WARN_ON(1);
+	}
+
+	data = rxstale & UARTDM_IPR_STALE_LSB_BMSK;
+	data |= UARTDM_IPR_STALE_TIMEOUT_MSB_BMSK & (rxstale << 2);
+
+	msm_hs_write(uport, UART_DM_IPR, data);
+	/*
+	 * It is suggested to do reset of transmitter and receiver after
+	 * changing any protocol configuration. Here Baud rate and stale
+	 * timeout are getting updated. Hence reset transmitter and receiver.
+	 */
+	msm_hs_write(uport, UART_DM_CR, RESET_TX);
+	msm_hs_write(uport, UART_DM_CR, RESET_RX);
+}
+
+
+static void msm_hs_set_std_bps_locked(struct uart_port *uport,
+			       unsigned int bps)
+{
+	unsigned long rxstale;
+	unsigned long data;
+
+	switch (bps) {
+	case 9600:
+		msm_hs_write(uport, UART_DM_CSR, 0x99);
+		rxstale = 2;
+		break;
+	case 14400:
+		msm_hs_write(uport, UART_DM_CSR, 0xaa);
+		rxstale = 3;
+		break;
+	case 19200:
+		msm_hs_write(uport, UART_DM_CSR, 0xbb);
+		rxstale = 4;
+		break;
+	case 28800:
+		msm_hs_write(uport, UART_DM_CSR, 0xcc);
+		rxstale = 6;
+		break;
+	case 38400:
+		msm_hs_write(uport, UART_DM_CSR, 0xdd);
+		rxstale = 8;
+		break;
+	case 57600:
+		msm_hs_write(uport, UART_DM_CSR, 0xee);
+		rxstale = 16;
+		break;
+	case 115200:
+		msm_hs_write(uport, UART_DM_CSR, 0xff);
+		rxstale = 31;
+		break;
+	default:
+		msm_hs_write(uport, UART_DM_CSR, 0x99);
+		/* default to 9600 */
+		bps = 9600;
+		rxstale = 2;
+		break;
+	}
+
+	data = rxstale & UARTDM_IPR_STALE_LSB_BMSK;
+	data |= UARTDM_IPR_STALE_TIMEOUT_MSB_BMSK & (rxstale << 2);
+
+	msm_hs_write(uport, UART_DM_IPR, data);
+}
+
+static void msm_hs_enable_flow_control(struct uart_port *uport, bool override)
+{
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+	unsigned int data;
+
+	if (msm_uport->flow_control || override) {
+		/* Enable RFR line */
+		msm_hs_write(uport, UART_DM_CR, RFR_LOW);
+		/* Enable auto RFR */
+		data = msm_hs_read(uport, UART_DM_MR1);
+		data |= UARTDM_MR1_RX_RDY_CTL_BMSK;
+		msm_hs_write(uport, UART_DM_MR1, data);
+		/* Ensure register IO completion */
+		mb();
+	}
+}
+
+static void msm_hs_disable_flow_control(struct uart_port *uport, bool override)
+{
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+	unsigned int data;
+
+	/*
+	 * Clear the Rx Ready Ctl bit - This ensures that
+	 * flow control lines stop the other side from sending
+	 * data while we change the parameters
+	 */
+
+	if (msm_uport->flow_control || override) {
+		data = msm_hs_read(uport, UART_DM_MR1);
+		/* disable auto ready-for-receiving */
+		data &= ~UARTDM_MR1_RX_RDY_CTL_BMSK;
+		msm_hs_write(uport, UART_DM_MR1, data);
+		/* Disable RFR line */
+		msm_hs_write(uport, UART_DM_CR, RFR_HIGH);
+		/* Ensure register IO completion */
+		mb();
+	}
+}
+
+/*
+ * termios :  new ktermios
+ * oldtermios:  old ktermios previous setting
+ *
+ * Configure the serial port
+ */
+static void msm_hs_set_termios(struct uart_port *uport,
+				   struct ktermios *termios,
+				   struct ktermios *oldtermios)
+{
+	unsigned int bps;
+	unsigned long data;
+	unsigned int c_cflag = termios->c_cflag;
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+
+	/**
+	 * set_termios can be invoked from the framework when
+	 * the clocks are off and the client has not had a chance
+	 * to turn them on. Make sure that they are on
+	 */
+	msm_hs_resource_vote(msm_uport);
+	mutex_lock(&msm_uport->mtx);
+	msm_hs_write(uport, UART_DM_IMR, 0);
+
+	msm_hs_disable_flow_control(uport, true);
+
+	/*
+	 * Disable Rx channel of UARTDM
+	 * DMA Rx Stall happens if enqueue and flush of Rx command happens
+	 * concurrently. Hence before changing the baud rate/protocol
+	 * configuration and sending flush command to ADM, disable the Rx
+	 * channel of UARTDM.
+	 * Note: should not reset the receiver here immediately as it is not
+	 * suggested to do disable/reset or reset/disable at the same time.
+	 */
+	data = msm_hs_read(uport, UART_DM_DMEN);
+	/* Disable UARTDM RX BAM Interface */
+	data &= ~UARTDM_RX_BAM_ENABLE_BMSK;
+	msm_hs_write(uport, UART_DM_DMEN, data);
+
+	/*
+	 * Reset RX and TX.
+	 * Resetting the RX enables it, therefore we must reset and disable.
+	 */
+	msm_hs_write(uport, UART_DM_CR, RESET_RX);
+	msm_hs_write(uport, UART_DM_CR, UARTDM_CR_RX_DISABLE_BMSK);
+	msm_hs_write(uport, UART_DM_CR, RESET_TX);
+
+	/* 300 is the minimum baud support by the driver  */
+	bps = uart_get_baud_rate(uport, termios, oldtermios, 200, 4000000);
+
+	/* Temporary remapping  200 BAUD to 3.2 mbps */
+	if (bps == 200)
+		bps = 3200000;
+
+	uport->uartclk = clk_get_rate(msm_uport->clk);
+	if (!uport->uartclk)
+		msm_hs_set_std_bps_locked(uport, bps);
+	else
+		msm_hs_set_bps_locked(uport, bps);
+
+	data = msm_hs_read(uport, UART_DM_MR2);
+	data &= ~UARTDM_MR2_PARITY_MODE_BMSK;
+	/* set parity */
+	if (c_cflag & PARENB) {
+		if (c_cflag & PARODD)
+			data |= ODD_PARITY;
+		else if (c_cflag & CMSPAR)
+			data |= SPACE_PARITY;
+		else
+			data |= EVEN_PARITY;
+	}
+
+	/* Set bits per char */
+	data &= ~UARTDM_MR2_BITS_PER_CHAR_BMSK;
+
+	switch (c_cflag & CSIZE) {
+	case CS5:
+		data |= FIVE_BPC;
+		break;
+	case CS6:
+		data |= SIX_BPC;
+		break;
+	case CS7:
+		data |= SEVEN_BPC;
+		break;
+	default:
+		data |= EIGHT_BPC;
+		break;
+	}
+	/* stop bits */
+	if (c_cflag & CSTOPB) {
+		data |= STOP_BIT_TWO;
+	} else {
+		/* otherwise 1 stop bit */
+		data |= STOP_BIT_ONE;
+	}
+	data |= UARTDM_MR2_ERROR_MODE_BMSK;
+	/* write parity/bits per char/stop bit configuration */
+	msm_hs_write(uport, UART_DM_MR2, data);
+
+	uport->ignore_status_mask = termios->c_iflag & INPCK;
+	uport->ignore_status_mask |= termios->c_iflag & IGNPAR;
+	uport->ignore_status_mask |= termios->c_iflag & IGNBRK;
+
+	uport->read_status_mask = (termios->c_cflag & CREAD);
+
+	/* Set Transmit software time out */
+	uart_update_timeout(uport, c_cflag, bps);
+
+	/* Enable UARTDM Rx BAM Interface */
+	data = msm_hs_read(uport, UART_DM_DMEN);
+	data |= UARTDM_RX_BAM_ENABLE_BMSK;
+	msm_hs_write(uport, UART_DM_DMEN, data);
+	msm_hs_write(uport, UART_DM_CR, UARTDM_CR_RX_EN_BMSK);
+	/* Issue TX,RX BAM Start IFC command */
+	msm_hs_write(uport, UART_DM_CR, START_TX_BAM_IFC);
+	msm_hs_write(uport, UART_DM_CR, START_RX_BAM_IFC);
+	/* Ensure Register Writes Complete */
+	mb();
+
+	/* Configure HW flow control
+	 * UART Core would see status of CTS line when it is sending data
+	 * to remote uart to confirm that it can receive or not.
+	 * UART Core would trigger RFR if it is not having any space with
+	 * RX FIFO.
+	 */
+	/* Pulling RFR line high */
+	msm_hs_write(uport, UART_DM_CR, RFR_LOW);
+	data = msm_hs_read(uport, UART_DM_MR1);
+	data &= ~(UARTDM_MR1_CTS_CTL_BMSK | UARTDM_MR1_RX_RDY_CTL_BMSK);
+	if (c_cflag & CRTSCTS) {
+		data |= UARTDM_MR1_CTS_CTL_BMSK;
+		data |= UARTDM_MR1_RX_RDY_CTL_BMSK;
+		msm_uport->flow_control = true;
+	}
+	msm_hs_write(uport, UART_DM_MR1, data);
+	MSM_HS_INFO("%s(): Cflags 0x%x Baud %u\n", __func__, c_cflag, bps);
+
+	mutex_unlock(&msm_uport->mtx);
+
+	msm_hs_resource_unvote(msm_uport);
+}
+
+/*
+ *  Standard API, Transmitter
+ *  Any character in the transmit shift register is sent
+ */
+unsigned int msm_hs_tx_empty(struct uart_port *uport)
+{
+	unsigned int data;
+	unsigned int isr;
+	unsigned int ret = 0;
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+
+	msm_hs_resource_vote(msm_uport);
+	data = msm_hs_read(uport, UART_DM_SR);
+	isr = msm_hs_read(uport, UART_DM_ISR);
+	msm_hs_resource_unvote(msm_uport);
+	MSM_HS_INFO("%s(): SR:0x%x ISR:0x%x\n", __func__, data, isr);
+
+	if (data & UARTDM_SR_TXEMT_BMSK) {
+		ret = TIOCSER_TEMT;
+	} else
+		/*
+		 * Add an extra sleep here because sometimes the framework's
+		 * delay (based on baud rate) isn't good enough.
+		 * Note that this won't happen during every port close, only
+		 * on select occassions when the userspace does back to back
+		 * write() and close().
+		 */
+		usleep_range(5000, 7000);
+
+	return ret;
+}
+EXPORT_SYMBOL(msm_hs_tx_empty);
+
+/*
+ *  Standard API, Stop transmitter.
+ *  Any character in the transmit shift register is sent as
+ *  well as the current data mover transfer .
+ */
+static void msm_hs_stop_tx_locked(struct uart_port *uport)
+{
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+	struct msm_hs_tx *tx = &msm_uport->tx;
+
+	tx->flush = FLUSH_STOP;
+}
+
+static int disconnect_rx_endpoint(struct msm_hs_port *msm_uport)
+{
+	struct msm_hs_rx *rx = &msm_uport->rx;
+	struct sps_pipe *sps_pipe_handle = rx->prod.pipe_handle;
+	int ret = 0;
+
+	ret = sps_rx_disconnect(sps_pipe_handle);
+
+	if (msm_uport->rx.pending_flag)
+		MSM_HS_WARN("%s(): Buffers may be pending 0x%lx\n",
+		__func__, msm_uport->rx.pending_flag);
+	MSM_HS_DBG("%s(): clearing desc usage flag\n", __func__);
+	msm_uport->rx.queued_flag = 0;
+	msm_uport->rx.pending_flag = 0;
+	msm_uport->rx.rx_inx = 0;
+
+	if (ret)
+		MSM_HS_ERR("%s(): sps_disconnect failed\n", __func__);
+	msm_uport->rx.flush = FLUSH_SHUTDOWN;
+	MSM_HS_DBG("%s(): Calling Completion\n", __func__);
+	wake_up(&msm_uport->bam_disconnect_wait);
+	MSM_HS_DBG("%s(): Done Completion\n", __func__);
+	wake_up(&msm_uport->rx.wait);
+	return ret;
+}
+
+static int sps_tx_disconnect(struct msm_hs_port *msm_uport)
+{
+	struct uart_port *uport = &msm_uport->uport;
+	struct msm_hs_tx *tx = &msm_uport->tx;
+	struct sps_pipe *tx_pipe = tx->cons.pipe_handle;
+	unsigned long flags;
+	int ret = 0;
+
+	if (msm_uport->tx.flush == FLUSH_SHUTDOWN) {
+		MSM_HS_DBG("%s(): pipe already disonnected\n", __func__);
+		return ret;
+	}
+
+	ret = sps_disconnect(tx_pipe);
+
+	if (ret) {
+		MSM_HS_ERR("%s(): sps_disconnect failed %d\n", __func__, ret);
+		return ret;
+	}
+
+	spin_lock_irqsave(&uport->lock, flags);
+	msm_uport->tx.flush = FLUSH_SHUTDOWN;
+	spin_unlock_irqrestore(&uport->lock, flags);
+
+	MSM_HS_DBG("%s(): TX Disconnect\n", __func__);
+	return ret;
+}
+
+static void msm_hs_disable_rx(struct uart_port *uport)
+{
+	unsigned int data;
+
+	data = msm_hs_read(uport, UART_DM_DMEN);
+	data &= ~UARTDM_RX_BAM_ENABLE_BMSK;
+	msm_hs_write(uport, UART_DM_DMEN, data);
+}
+
+/*
+ *  Standard API, Stop receiver as soon as possible.
+ *
+ *  Function immediately terminates the operation of the
+ *  channel receiver and any incoming characters are lost. None
+ *  of the receiver status bits are affected by this command and
+ *  characters that are already in the receive FIFO there.
+ */
+static void msm_hs_stop_rx_locked(struct uart_port *uport)
+{
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+
+	if (msm_uport->pm_state != MSM_HS_PM_ACTIVE)
+		MSM_HS_WARN("%s(): Clocks are off\n", __func__);
+	else
+		msm_hs_disable_rx(uport);
+
+	if (msm_uport->rx.flush == FLUSH_NONE)
+		msm_uport->rx.flush = FLUSH_STOP;
+}
+
+static void msm_hs_disconnect_rx(struct uart_port *uport)
+{
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+
+	msm_hs_disable_rx(uport);
+	/* Disconnect the BAM RX pipe */
+	if (msm_uport->rx.flush == FLUSH_NONE)
+		msm_uport->rx.flush = FLUSH_STOP;
+	disconnect_rx_endpoint(msm_uport);
+	MSM_HS_DBG("%s(): rx->flush %d\n", __func__, msm_uport->rx.flush);
+}
+
+/* Tx timeout callback function */
+void tx_timeout_handler(struct timer_list *arg)
+{
+	struct msm_hs_port *msm_uport = from_timer(msm_uport, arg,
+		tx.tx_timeout_timer);
+	struct uart_port *uport = &msm_uport->uport;
+	int isr;
+
+	if (msm_uport->pm_state != MSM_HS_PM_ACTIVE) {
+		MSM_HS_WARN("%s(): clocks are off\n", __func__);
+		return;
+	}
+
+	isr = msm_hs_read(uport, UART_DM_ISR);
+	if (UARTDM_ISR_CURRENT_CTS_BMSK & isr)
+		MSM_HS_WARN("%s(): CTS Disabled, ISR 0x%x\n", __func__, isr);
+	dump_uart_hs_registers(msm_uport);
+}
+
+/*  Transmit the next chunk of data */
+static void msm_hs_submit_tx_locked(struct uart_port *uport)
+{
+	int left;
+	int tx_count;
+	int aligned_tx_count;
+	dma_addr_t src_addr;
+	dma_addr_t aligned_src_addr;
+	u32 flags = SPS_IOVEC_FLAG_EOT | SPS_IOVEC_FLAG_INT;
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+	struct msm_hs_tx *tx = &msm_uport->tx;
+	struct circ_buf *tx_buf = &msm_uport->uport.state->xmit;
+	struct sps_pipe *sps_pipe_handle;
+	int ret;
+
+	if (uart_circ_empty(tx_buf) || uport->state->port.tty->stopped) {
+		tx->dma_in_flight = false;
+		msm_hs_stop_tx_locked(uport);
+		return;
+	}
+
+	tx_count = uart_circ_chars_pending(tx_buf);
+
+	if (tx_count > UARTDM_TX_BUF_SIZE)
+		tx_count = UARTDM_TX_BUF_SIZE;
+
+	left = UART_XMIT_SIZE - tx_buf->tail;
+
+	if (tx_count > left)
+		tx_count = left;
+
+	src_addr = tx->dma_base + tx_buf->tail;
+	/* Mask the src_addr to align on a cache
+	 * and add those bytes to tx_count
+	 */
+	aligned_src_addr = src_addr & ~(dma_get_cache_alignment() - 1);
+	aligned_tx_count = tx_count + src_addr - aligned_src_addr;
+
+	dma_sync_single_for_device(uport->dev, aligned_src_addr,
+			aligned_tx_count, DMA_TO_DEVICE);
+
+	tx->tx_count = tx_count;
+
+	hex_dump_ipc(msm_uport, tx->ipc_tx_ctxt, "Tx",
+			&tx_buf->buf[tx_buf->tail], (u64)src_addr, tx_count);
+	sps_pipe_handle = tx->cons.pipe_handle;
+
+	/* Set 1 second timeout */
+	mod_timer(&tx->tx_timeout_timer,
+		jiffies + msecs_to_jiffies(MSEC_PER_SEC));
+	/* Queue transfer request to SPS */
+	ret = sps_transfer_one(sps_pipe_handle, src_addr, tx_count,
+				msm_uport, flags);
+
+	MSM_HS_DBG("%s():Enqueue Tx Cmd, ret %d\n", __func__, ret);
+}
+
+/* This function queues the rx descriptor for BAM transfer */
+static void msm_hs_post_rx_desc(struct msm_hs_port *msm_uport, int inx)
+{
+	u32 flags = SPS_IOVEC_FLAG_INT;
+	struct msm_hs_rx *rx = &msm_uport->rx;
+	int ret;
+
+	phys_addr_t rbuff_addr = rx->rbuffer + (UARTDM_RX_BUF_SIZE * inx);
+	u8 *virt_addr = rx->buffer + (UARTDM_RX_BUF_SIZE * inx);
+
+	MSM_HS_DBG("%s(): %d:Queue desc %d, 0x%llx, base 0x%llx virtaddr %pK\n",
+		__func__, msm_uport->uport.line, inx,
+		(u64)rbuff_addr, (u64)rx->rbuffer, virt_addr);
+
+	rx->iovec[inx].size = 0;
+	ret = sps_transfer_one(rx->prod.pipe_handle, rbuff_addr,
+		UARTDM_RX_BUF_SIZE, msm_uport, flags);
+
+	if (ret)
+		MSM_HS_ERR("Error processing descriptor %d\n", ret);
+}
+
+/* Update the rx descriptor index to specify the next one to be processed */
+static void msm_hs_mark_next(struct msm_hs_port *msm_uport, int inx)
+{
+	struct msm_hs_rx *rx = &msm_uport->rx;
+	int prev;
+
+	inx %= UART_DMA_DESC_NR;
+	MSM_HS_DBG("%s(): inx %d, pending 0x%lx\n", __func__, inx,
+		rx->pending_flag);
+
+	if (!inx)
+		prev = UART_DMA_DESC_NR - 1;
+	else
+		prev = inx - 1;
+
+	if (!test_bit(prev, &rx->pending_flag))
+		msm_uport->rx.rx_inx = inx;
+	MSM_HS_DBG("%s(): prev %d pending flag 0x%lx, next %d\n", __func__,
+		prev, rx->pending_flag, msm_uport->rx.rx_inx);
+}
+
+/*
+ *	Queue the rx descriptor that has just been processed or
+ *	all of them if queueing for the first time
+ */
+static void msm_hs_queue_rx_desc(struct msm_hs_port *msm_uport)
+{
+	struct msm_hs_rx *rx = &msm_uport->rx;
+	int i, flag = 0;
+
+	/* At first, queue all, if not, queue only one */
+	if (rx->queued_flag || rx->pending_flag) {
+		if (!test_bit(rx->rx_inx, &rx->queued_flag) &&
+		    !test_bit(rx->rx_inx, &rx->pending_flag)) {
+			msm_hs_post_rx_desc(msm_uport, rx->rx_inx);
+			set_bit(rx->rx_inx, &rx->queued_flag);
+			MSM_HS_DBG("%s(): Set Queued Bit %d\n",
+				__func__, rx->rx_inx);
+		} else
+			MSM_HS_ERR("%s(): rx_inx pending or queued\n",
+				 __func__);
+		return;
+	}
+
+	for (i = 0; i < UART_DMA_DESC_NR; i++) {
+		if (!test_bit(i, &rx->queued_flag) &&
+		!test_bit(i, &rx->pending_flag)) {
+			MSM_HS_DBG("%s(): Calling post rx %d\n", __func__, i);
+			msm_hs_post_rx_desc(msm_uport, i);
+			set_bit(i, &rx->queued_flag);
+			flag = 1;
+		}
+	}
+
+	if (!flag)
+		MSM_HS_ERR("%s(): error queueing descriptor\n", __func__);
+}
+
+/* Start to receive the next chunk of data */
+static void msm_hs_start_rx_locked(struct uart_port *uport)
+{
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+	struct msm_hs_rx *rx = &msm_uport->rx;
+	unsigned int buffer_pending = msm_uport->rx.buffer_pending;
+	unsigned int data;
+
+	if (msm_uport->pm_state != MSM_HS_PM_ACTIVE) {
+		MSM_HS_WARN("%s(): Clocks are off\n", __func__);
+		return;
+	}
+	if (rx->pending_flag) {
+		MSM_HS_INFO("%s(): Rx Cmd got executed, wait for rx_tlet\n",
+								 __func__);
+		rx->flush = FLUSH_IGNORE;
+		return;
+	}
+	if (buffer_pending)
+		MSM_HS_ERR("Error: rx started in buffer state =%x\n",
+			buffer_pending);
+
+	msm_hs_write(uport, UART_DM_CR, RESET_STALE_INT);
+	msm_hs_write(uport, UART_DM_DMRX, UARTDM_RX_BUF_SIZE);
+	msm_hs_write(uport, UART_DM_CR, STALE_EVENT_ENABLE);
+	/*
+	 * Enable UARTDM Rx Interface as previously it has been
+	 * disable in set_termios before configuring baud rate.
+	 */
+	data = msm_hs_read(uport, UART_DM_DMEN);
+	/* Enable UARTDM Rx BAM Interface */
+	data |= UARTDM_RX_BAM_ENABLE_BMSK;
+
+	msm_hs_write(uport, UART_DM_DMEN, data);
+	msm_hs_write(uport, UART_DM_IMR, msm_uport->imr_reg);
+	/* Calling next DMOV API. Hence mb() here. */
+	mb();
+
+	/*
+	 * RX-transfer will be automatically re-activated
+	 * after last data of previous transfer was read.
+	 */
+	data = (RX_STALE_AUTO_RE_EN | RX_TRANS_AUTO_RE_ACTIVATE |
+				RX_DMRX_CYCLIC_EN);
+	msm_hs_write(uport, UART_DM_RX_TRANS_CTRL, data);
+	/* Issue RX BAM Start IFC command */
+	msm_hs_write(uport, UART_DM_CR, START_RX_BAM_IFC);
+	/* Ensure register IO completion */
+	mb();
+
+	msm_uport->rx.flush = FLUSH_NONE;
+	msm_uport->rx_bam_inprogress = true;
+	msm_hs_queue_rx_desc(msm_uport);
+	msm_uport->rx_bam_inprogress = false;
+	wake_up(&msm_uport->rx.wait);
+	MSM_HS_DBG("%s():Enqueue Rx Cmd\n", __func__);
+}
+
+static void flip_insert_work(struct work_struct *work)
+{
+	unsigned long flags;
+	int retval;
+	struct msm_hs_port *msm_uport =
+		container_of(work, struct msm_hs_port,
+			     rx.flip_insert_work.work);
+	struct tty_struct *tty = msm_uport->uport.state->port.tty;
+
+	spin_lock_irqsave(&msm_uport->uport.lock, flags);
+	if (!tty || msm_uport->rx.flush == FLUSH_SHUTDOWN) {
+		MSM_HS_ERR("%s() :Invalid driver state flush %d\n",
+				__func__, msm_uport->rx.flush);
+		spin_unlock_irqrestore(&msm_uport->uport.lock, flags);
+		return;
+	}
+
+	if (msm_uport->rx.buffer_pending == NONE_PENDING) {
+		MSM_HS_ERR("%s():Error: No buffer pending\n", __func__);
+		spin_unlock_irqrestore(&msm_uport->uport.lock, flags);
+		return;
+	}
+	if (msm_uport->rx.buffer_pending & FIFO_OVERRUN) {
+		retval = tty_insert_flip_char(tty->port, 0, TTY_OVERRUN);
+		if (retval)
+			msm_uport->rx.buffer_pending &= ~FIFO_OVERRUN;
+	}
+	if (msm_uport->rx.buffer_pending & PARITY_ERROR) {
+		retval = tty_insert_flip_char(tty->port, 0, TTY_PARITY);
+		if (retval)
+			msm_uport->rx.buffer_pending &= ~PARITY_ERROR;
+	}
+	if (msm_uport->rx.buffer_pending & CHARS_NORMAL) {
+		int rx_count, rx_offset;
+
+		rx_count = (msm_uport->rx.buffer_pending & 0xFFFF0000) >> 16;
+		rx_offset = (msm_uport->rx.buffer_pending & 0xFFD0) >> 5;
+		retval = tty_insert_flip_string(tty->port,
+			msm_uport->rx.buffer +
+			(msm_uport->rx.rx_inx * UARTDM_RX_BUF_SIZE)
+			+ rx_offset, rx_count);
+		msm_uport->rx.buffer_pending &= (FIFO_OVERRUN |
+						 PARITY_ERROR);
+		if (retval != rx_count)
+			msm_uport->rx.buffer_pending |= CHARS_NORMAL |
+				retval << 8 | (rx_count - retval) << 16;
+	}
+	if (msm_uport->rx.buffer_pending) {
+		schedule_delayed_work(&msm_uport->rx.flip_insert_work,
+				      msecs_to_jiffies(RETRY_TIMEOUT));
+	} else if (msm_uport->rx.flush <= FLUSH_IGNORE) {
+		MSM_HS_WARN("Pending buffers cleared, restarting\n");
+		clear_bit(msm_uport->rx.rx_inx,
+			&msm_uport->rx.pending_flag);
+		msm_hs_start_rx_locked(&msm_uport->uport);
+		msm_hs_mark_next(msm_uport, msm_uport->rx.rx_inx+1);
+	}
+	spin_unlock_irqrestore(&msm_uport->uport.lock, flags);
+	tty_flip_buffer_push(tty->port);
+}
+
+static void msm_serial_hs_rx_work(struct kthread_work *work)
+{
+	int retval;
+	int rx_count = 0;
+	unsigned long status;
+	unsigned long flags;
+	unsigned int error_f = 0;
+	struct uart_port *uport;
+	struct msm_hs_port *msm_uport;
+	unsigned int flush = FLUSH_DATA_INVALID;
+	struct tty_struct *tty;
+	struct sps_event_notify *notify;
+	struct msm_hs_rx *rx;
+	struct sps_pipe *sps_pipe_handle;
+	struct platform_device *pdev;
+	const struct msm_serial_hs_platform_data *pdata;
+
+	msm_uport = container_of((struct kthread_work *) work,
+				 struct msm_hs_port, rx.kwork);
+	msm_hs_resource_vote(msm_uport);
+	uport = &msm_uport->uport;
+	tty = uport->state->port.tty;
+	notify = &msm_uport->notify;
+	rx = &msm_uport->rx;
+	pdev = to_platform_device(uport->dev);
+	pdata = pdev->dev.platform_data;
+
+	spin_lock_irqsave(&uport->lock, flags);
+
+	if (!tty || rx->flush == FLUSH_SHUTDOWN) {
+		MSM_HS_ERR("%s():Invalid driver state flush %d\n",
+				__func__, rx->flush);
+		spin_unlock_irqrestore(&uport->lock, flags);
+		msm_hs_resource_unvote(msm_uport);
+		return;
+	}
+
+	/*
+	 * Process all pending descs or if nothing is
+	 * queued - called from termios
+	 */
+	while (!rx->buffer_pending &&
+		(rx->pending_flag || !rx->queued_flag)) {
+		MSM_HS_DBG("%s(): Loop P 0x%lx Q 0x%lx\n", __func__,
+			rx->pending_flag, rx->queued_flag);
+
+		status = msm_hs_read(uport, UART_DM_SR);
+
+		MSM_HS_DBG("In %s\n", __func__);
+
+		/* overflow is not connect to data in a FIFO */
+		if (unlikely((status & UARTDM_SR_OVERRUN_BMSK) &&
+			     (uport->read_status_mask & CREAD))) {
+			retval = tty_insert_flip_char(tty->port,
+							0, TTY_OVERRUN);
+			MSM_HS_WARN("%s(): RX Buffer Overrun Detected\n",
+				__func__);
+			if (!retval)
+				msm_uport->rx.buffer_pending |= TTY_OVERRUN;
+			uport->icount.buf_overrun++;
+			error_f = 1;
+		}
+
+		if (!(uport->ignore_status_mask & INPCK))
+			status = status & ~(UARTDM_SR_PAR_FRAME_BMSK);
+
+		if (unlikely(status & UARTDM_SR_PAR_FRAME_BMSK)) {
+			/* Can not tell diff between parity & frame error */
+			MSM_HS_WARN("msm_serial_hs: parity error\n");
+			uport->icount.parity++;
+			error_f = 1;
+			if (!(uport->ignore_status_mask & IGNPAR)) {
+				retval = tty_insert_flip_char(tty->port,
+							0, TTY_PARITY);
+				if (!retval)
+					msm_uport->rx.buffer_pending
+								|= TTY_PARITY;
+			}
+		}
+
+		if (unlikely(status & UARTDM_SR_RX_BREAK_BMSK)) {
+			MSM_HS_DBG("msm_serial_hs: Rx break\n");
+			uport->icount.brk++;
+			error_f = 1;
+			if (!(uport->ignore_status_mask & IGNBRK)) {
+				retval = tty_insert_flip_char(tty->port,
+								0, TTY_BREAK);
+				if (!retval)
+					msm_uport->rx.buffer_pending
+								|= TTY_BREAK;
+			}
+		}
+
+		if (error_f)
+			msm_hs_write(uport, UART_DM_CR,	RESET_ERROR_STATUS);
+		flush = msm_uport->rx.flush;
+		if (flush == FLUSH_IGNORE)
+			if (!msm_uport->rx.buffer_pending) {
+				MSM_HS_DBG("%s(): calling start_rx_locked\n",
+					__func__);
+				msm_hs_start_rx_locked(uport);
+			}
+		if (flush >= FLUSH_DATA_INVALID)
+			goto out;
+
+		rx_count = msm_uport->rx.iovec[msm_uport->rx.rx_inx].size;
+		hex_dump_ipc(msm_uport, rx->ipc_rx_ctxt, "Rx",
+			(msm_uport->rx.buffer +
+			(msm_uport->rx.rx_inx * UARTDM_RX_BUF_SIZE)),
+			msm_uport->rx.iovec[msm_uport->rx.rx_inx].addr,
+			rx_count);
+
+		 /*
+		  * We are in a spin locked context, spin lock taken at
+		  * other places where these flags are updated
+		  */
+		if (0 != (uport->read_status_mask & CREAD)) {
+			if (!test_bit(msm_uport->rx.rx_inx,
+				&msm_uport->rx.pending_flag) &&
+			    !test_bit(msm_uport->rx.rx_inx,
+				&msm_uport->rx.queued_flag))
+				MSM_HS_ERR("%s(): RX INX not set\n", __func__);
+			else if (test_bit(msm_uport->rx.rx_inx,
+					&msm_uport->rx.pending_flag) &&
+				!test_bit(msm_uport->rx.rx_inx,
+					&msm_uport->rx.queued_flag)) {
+				MSM_HS_DBG("%s(): Clear Pending Bit %d\n",
+					__func__, msm_uport->rx.rx_inx);
+
+				retval = tty_insert_flip_string(tty->port,
+					msm_uport->rx.buffer +
+					(msm_uport->rx.rx_inx *
+					UARTDM_RX_BUF_SIZE),
+					rx_count);
+
+				if (retval != rx_count) {
+					MSM_HS_INFO("%s():ret %d rx_count %d\n",
+						__func__, retval, rx_count);
+					msm_uport->rx.buffer_pending |=
+					CHARS_NORMAL | retval << 5 |
+					(rx_count - retval) << 16;
+				}
+			} else
+				MSM_HS_ERR("%s(): Error in inx %d\n", __func__,
+					msm_uport->rx.rx_inx);
+		}
+
+		if (!msm_uport->rx.buffer_pending) {
+			msm_uport->rx.flush = FLUSH_NONE;
+			msm_uport->rx_bam_inprogress = true;
+			sps_pipe_handle = rx->prod.pipe_handle;
+			MSM_HS_DBG("Queing bam descriptor\n");
+			/* Queue transfer request to SPS */
+			clear_bit(msm_uport->rx.rx_inx,
+				&msm_uport->rx.pending_flag);
+			msm_hs_queue_rx_desc(msm_uport);
+			msm_hs_mark_next(msm_uport, msm_uport->rx.rx_inx+1);
+			msm_hs_write(uport, UART_DM_CR, START_RX_BAM_IFC);
+			msm_uport->rx_bam_inprogress = false;
+			wake_up(&msm_uport->rx.wait);
+		} else
+			break;
+
+	}
+out:
+	if (msm_uport->rx.buffer_pending) {
+		MSM_HS_WARN("%s(): tty buffer exhausted,Stalling\n", __func__);
+		schedule_delayed_work(&msm_uport->rx.flip_insert_work
+				      , msecs_to_jiffies(RETRY_TIMEOUT));
+	}
+	/* tty_flip_buffer_push() might call msm_hs_start(), so unlock */
+	spin_unlock_irqrestore(&uport->lock, flags);
+	if (flush < FLUSH_DATA_INVALID)
+		tty_flip_buffer_push(tty->port);
+	msm_hs_resource_unvote(msm_uport);
+}
+
+static void msm_hs_start_tx_locked(struct uart_port *uport)
+{
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+	struct msm_hs_tx *tx = &msm_uport->tx;
+
+	/* Bail if transfer in progress */
+	if (tx->flush < FLUSH_STOP || tx->dma_in_flight) {
+		MSM_HS_INFO("%s(): retry, flush %d, dma_in_flight %d\n",
+			__func__, tx->flush, tx->dma_in_flight);
+		return;
+	}
+
+	if (!tx->dma_in_flight) {
+		tx->dma_in_flight = true;
+		kthread_queue_work(&msm_uport->tx.kworker,
+			&msm_uport->tx.kwork);
+	}
+}
+
+/**
+ * Callback notification from SPS driver
+ *
+ * This callback function gets triggered called from
+ * SPS driver when requested SPS data transfer is
+ * completed.
+ *
+ */
+
+static void msm_hs_sps_tx_callback(struct sps_event_notify *notify)
+{
+	struct msm_hs_port *msm_uport =
+		(struct msm_hs_port *)
+		((struct sps_event_notify *)notify)->user;
+	phys_addr_t addr = DESC_FULL_ADDR(notify->data.transfer.iovec.flags,
+		notify->data.transfer.iovec.addr);
+
+	msm_uport->notify = *notify;
+	MSM_HS_INFO("tx_cb: addr=0x%pa, size=0x%x, flags=0x%x\n",
+		&addr, notify->data.transfer.iovec.size,
+		notify->data.transfer.iovec.flags);
+
+	del_timer(&msm_uport->tx.tx_timeout_timer);
+	MSM_HS_DBG("%s(): Queue kthread work\n", __func__);
+	kthread_queue_work(&msm_uport->tx.kworker, &msm_uport->tx.kwork);
+}
+
+static void msm_serial_hs_tx_work(struct kthread_work *work)
+{
+	unsigned long flags;
+	struct msm_hs_port *msm_uport =
+			container_of((struct kthread_work *)work,
+			struct msm_hs_port, tx.kwork);
+	struct uart_port *uport = &msm_uport->uport;
+	struct circ_buf *tx_buf = &uport->state->xmit;
+	struct msm_hs_tx *tx = &msm_uport->tx;
+
+	/*
+	 * Do the work buffer related work in BAM
+	 * mode that is equivalent to legacy mode
+	 */
+	msm_hs_resource_vote(msm_uport);
+	if (tx->flush >= FLUSH_STOP) {
+		spin_lock_irqsave(&(msm_uport->uport.lock), flags);
+		tx->flush = FLUSH_NONE;
+		MSM_HS_DBG("%s(): calling submit_tx\n", __func__);
+		msm_hs_submit_tx_locked(uport);
+		spin_unlock_irqrestore(&(msm_uport->uport.lock), flags);
+		msm_hs_resource_unvote(msm_uport);
+		return;
+	}
+
+	spin_lock_irqsave(&(msm_uport->uport.lock), flags);
+	if (!uart_circ_empty(tx_buf))
+		tx_buf->tail = (tx_buf->tail +
+		tx->tx_count) & ~UART_XMIT_SIZE;
+	else
+		MSM_HS_DBG("%s():circ buffer is empty\n", __func__);
+
+	wake_up(&msm_uport->tx.wait);
+
+	uport->icount.tx += tx->tx_count;
+
+	/*
+	 * Calling to send next chunk of data
+	 * If the circ buffer is empty, we stop
+	 * If the clock off was requested, the clock
+	 * off sequence is kicked off
+	 */
+	 MSM_HS_DBG("%s(): calling submit_tx\n", __func__);
+	 msm_hs_submit_tx_locked(uport);
+
+	if (uart_circ_chars_pending(tx_buf) < WAKEUP_CHARS)
+		uart_write_wakeup(uport);
+
+	spin_unlock_irqrestore(&(msm_uport->uport.lock), flags);
+	msm_hs_resource_unvote(msm_uport);
+}
+
+static void
+msm_hs_mark_proc_rx_desc(struct msm_hs_port *msm_uport,
+			struct sps_event_notify *notify)
+{
+	struct msm_hs_rx *rx = &msm_uport->rx;
+	phys_addr_t addr = DESC_FULL_ADDR(notify->data.transfer.iovec.flags,
+		notify->data.transfer.iovec.addr);
+	/* divide by UARTDM_RX_BUF_SIZE */
+	int inx = (addr - rx->rbuffer) >> 9;
+
+	set_bit(inx, &rx->pending_flag);
+	clear_bit(inx, &rx->queued_flag);
+	rx->iovec[inx] = notify->data.transfer.iovec;
+	MSM_HS_DBG("Clear Q, Set P Bit %d, Q 0x%lx P 0x%lx\n",
+		inx, rx->queued_flag, rx->pending_flag);
+}
+
+/**
+ * Callback notification from SPS driver
+ *
+ * This callback function gets triggered called from
+ * SPS driver when requested SPS data transfer is
+ * completed.
+ *
+ */
+
+static void msm_hs_sps_rx_callback(struct sps_event_notify *notify)
+{
+
+	struct msm_hs_port *msm_uport =
+		(struct msm_hs_port *)
+		((struct sps_event_notify *)notify)->user;
+	struct uart_port *uport;
+	unsigned long flags;
+	struct msm_hs_rx *rx = &msm_uport->rx;
+	phys_addr_t addr = DESC_FULL_ADDR(notify->data.transfer.iovec.flags,
+		notify->data.transfer.iovec.addr);
+	/* divide by UARTDM_RX_BUF_SIZE */
+	int inx = (addr - rx->rbuffer) >> 9;
+
+	uport = &(msm_uport->uport);
+	msm_uport->notify = *notify;
+	MSM_HS_INFO("rx_cb: addr=0x%pa, size=0x%x, flags=0x%x\n",
+		&addr, notify->data.transfer.iovec.size,
+		notify->data.transfer.iovec.flags);
+
+	spin_lock_irqsave(&uport->lock, flags);
+	msm_hs_mark_proc_rx_desc(msm_uport, notify);
+	spin_unlock_irqrestore(&uport->lock, flags);
+
+	if (msm_uport->rx.flush == FLUSH_NONE) {
+		/* Test if others are queued */
+		if (msm_uport->rx.pending_flag & ~(1 << inx)) {
+			MSM_HS_DBG("%s(): inx 0x%x, 0x%lx not processed\n",
+			__func__, inx,
+			msm_uport->rx.pending_flag & ~(1<<inx));
+		}
+		kthread_queue_work(&msm_uport->rx.kworker,
+				&msm_uport->rx.kwork);
+		MSM_HS_DBG("%s(): Scheduled rx_tlet\n", __func__);
+	}
+}
+
+/*
+ *  Standard API, Current states of modem control inputs
+ *
+ * Since CTS can be handled entirely by HARDWARE we always
+ * indicate clear to send and count on the TX FIFO to block when
+ * it fills up.
+ *
+ * - TIOCM_DCD
+ * - TIOCM_CTS
+ * - TIOCM_DSR
+ * - TIOCM_RI
+ *  (Unsupported) DCD and DSR will return them high. RI will return low.
+ */
+static unsigned int msm_hs_get_mctrl_locked(struct uart_port *uport)
+{
+	return TIOCM_DSR | TIOCM_CAR | TIOCM_CTS;
+}
+
+/*
+ *  Standard API, Set or clear RFR_signal
+ *
+ * Set RFR high, (Indicate we are not ready for data), we disable auto
+ * ready for receiving and then set RFR_N high. To set RFR to low we just turn
+ * back auto ready for receiving and it should lower RFR signal
+ * when hardware is ready
+ */
+void msm_hs_set_mctrl_locked(struct uart_port *uport,
+				    unsigned int mctrl)
+{
+	unsigned int set_rts;
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+
+	if (msm_uport->pm_state != MSM_HS_PM_ACTIVE) {
+		MSM_HS_WARN("%s(): Clocks are off\n", __func__);
+		return;
+	}
+	/* RTS is active low */
+	set_rts = TIOCM_RTS & mctrl ? 0 : 1;
+	MSM_HS_INFO("%s(): set_rts %d\n", __func__, set_rts);
+
+	if (set_rts)
+		msm_hs_disable_flow_control(uport, false);
+	else
+		msm_hs_enable_flow_control(uport, false);
+}
+
+void msm_hs_set_mctrl(struct uart_port *uport,
+				    unsigned int mctrl)
+{
+	unsigned long flags;
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+
+	msm_hs_resource_vote(msm_uport);
+	spin_lock_irqsave(&uport->lock, flags);
+	msm_hs_set_mctrl_locked(uport, mctrl);
+	spin_unlock_irqrestore(&uport->lock, flags);
+	msm_hs_resource_unvote(msm_uport);
+}
+EXPORT_SYMBOL(msm_hs_set_mctrl);
+
+/* Standard API, Enable modem status (CTS) interrupt  */
+static void msm_hs_enable_ms_locked(struct uart_port *uport)
+{
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+
+	if (msm_uport->pm_state != MSM_HS_PM_ACTIVE) {
+		MSM_HS_WARN("%s(): Clocks are off\n", __func__);
+		return;
+	}
+
+	/* Enable DELTA_CTS Interrupt */
+	msm_uport->imr_reg |= UARTDM_ISR_DELTA_CTS_BMSK;
+	msm_hs_write(uport, UART_DM_IMR, msm_uport->imr_reg);
+	/* Ensure register IO completion */
+	mb();
+
+}
+
+/*
+ *  Standard API, Break Signal
+ *
+ * Control the transmission of a break signal. ctl eq 0 => break
+ * signal terminate ctl ne 0 => start break signal
+ */
+static void msm_hs_break_ctl(struct uart_port *uport, int ctl)
+{
+	unsigned long flags;
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+
+	msm_hs_resource_vote(msm_uport);
+	spin_lock_irqsave(&uport->lock, flags);
+	msm_hs_write(uport, UART_DM_CR, ctl ? START_BREAK : STOP_BREAK);
+	/* Ensure register IO completion */
+	mb();
+	spin_unlock_irqrestore(&uport->lock, flags);
+	msm_hs_resource_unvote(msm_uport);
+}
+
+static void msm_hs_config_port(struct uart_port *uport, int cfg_flags)
+{
+	if (cfg_flags & UART_CONFIG_TYPE)
+		uport->type = PORT_MSM;
+
+}
+
+/*  Handle CTS changes (Called from interrupt handler) */
+static void msm_hs_handle_delta_cts_locked(struct uart_port *uport)
+{
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+
+	msm_hs_resource_vote(msm_uport);
+	/* clear interrupt */
+	msm_hs_write(uport, UART_DM_CR, RESET_CTS);
+	/* Calling CLOCK API. Hence mb() requires here. */
+	mb();
+	uport->icount.cts++;
+
+	/* clear the IOCTL TIOCMIWAIT if called */
+	wake_up_interruptible(&uport->state->port.delta_msr_wait);
+	msm_hs_resource_unvote(msm_uport);
+}
+
+static irqreturn_t msm_hs_isr(int irq, void *dev)
+{
+	unsigned long flags;
+	unsigned int isr_status;
+	struct msm_hs_port *msm_uport = (struct msm_hs_port *)dev;
+	struct uart_port *uport = &msm_uport->uport;
+	struct circ_buf *tx_buf = &uport->state->xmit;
+	struct msm_hs_tx *tx = &msm_uport->tx;
+
+	spin_lock_irqsave(&uport->lock, flags);
+
+	isr_status = msm_hs_read(uport, UART_DM_MISR);
+	MSM_HS_INFO("%s(): DM_ISR: 0x%x\n", __func__, isr_status);
+	dump_uart_hs_registers(msm_uport);
+
+	/* Uart RX starting */
+	if (isr_status & UARTDM_ISR_RXLEV_BMSK) {
+		MSM_HS_DBG("%s():UARTDM_ISR_RXLEV_BMSK\n", __func__);
+		msm_uport->imr_reg &= ~UARTDM_ISR_RXLEV_BMSK;
+		msm_hs_write(uport, UART_DM_IMR, msm_uport->imr_reg);
+		/* Complete device write for IMR. Hence mb() requires. */
+		mb();
+	}
+	/* Stale rx interrupt */
+	if (isr_status & UARTDM_ISR_RXSTALE_BMSK) {
+		msm_hs_write(uport, UART_DM_CR, STALE_EVENT_DISABLE);
+		msm_hs_write(uport, UART_DM_CR, RESET_STALE_INT);
+		/*
+		 * Complete device write before calling DMOV API. Hence
+		 * mb() requires here.
+		 */
+		mb();
+		MSM_HS_DBG("%s():Stal Interrupt\n", __func__);
+	}
+	/* tx ready interrupt */
+	if (isr_status & UARTDM_ISR_TX_READY_BMSK) {
+		MSM_HS_DBG("%s(): ISR_TX_READY Interrupt\n", __func__);
+		/* Clear  TX Ready */
+		msm_hs_write(uport, UART_DM_CR, CLEAR_TX_READY);
+
+		/*
+		 * Complete both writes before starting new TX.
+		 * Hence mb() requires here.
+		 */
+		mb();
+		/* Complete DMA TX transactions and submit new transactions */
+
+		/* Do not update tx_buf.tail if uart_flush_buffer already
+		 * called in serial core
+		 */
+		if (!uart_circ_empty(tx_buf))
+			tx_buf->tail = (tx_buf->tail +
+					tx->tx_count) & ~UART_XMIT_SIZE;
+
+		tx->dma_in_flight = false;
+
+		uport->icount.tx += tx->tx_count;
+
+		if (uart_circ_chars_pending(tx_buf) < WAKEUP_CHARS)
+			uart_write_wakeup(uport);
+	}
+	if (isr_status & UARTDM_ISR_TXLEV_BMSK) {
+		/* TX FIFO is empty */
+		msm_uport->imr_reg &= ~UARTDM_ISR_TXLEV_BMSK;
+		msm_hs_write(uport, UART_DM_IMR, msm_uport->imr_reg);
+		MSM_HS_DBG("%s(): TXLEV Interrupt\n", __func__);
+		/*
+		 * Complete device write before starting clock_off request.
+		 * Hence mb() requires here.
+		 */
+		mb();
+		queue_work(msm_uport->hsuart_wq, &msm_uport->clock_off_w);
+	}
+
+	/* Change in CTS interrupt */
+	if (isr_status & UARTDM_ISR_DELTA_CTS_BMSK)
+		msm_hs_handle_delta_cts_locked(uport);
+
+	spin_unlock_irqrestore(&uport->lock, flags);
+
+	return IRQ_HANDLED;
+}
+
+/* The following two functions provide interfaces to get the underlying
+ * port structure (struct uart_port or struct msm_hs_port) given
+ * the port index. msm_hs_get_uart port is called by clients.
+ * The function msm_hs_get_hs_port is for internal use
+ */
+
+struct uart_port *msm_hs_get_uart_port(int port_index)
+{
+	struct uart_state *state = msm_hs_driver.state + port_index;
+
+	/* The uart_driver structure stores the states in an array.
+	 * Thus the corresponding offset from the drv->state returns
+	 * the state for the uart_port that is requested
+	 */
+	if (port_index == state->uart_port->line)
+		return state->uart_port;
+
+	return NULL;
+}
+EXPORT_SYMBOL(msm_hs_get_uart_port);
+
+static struct msm_hs_port *msm_hs_get_hs_port(int port_index)
+{
+	struct uart_port *uport = msm_hs_get_uart_port(port_index);
+
+	if (uport)
+		return UARTDM_TO_MSM(uport);
+	return NULL;
+}
+
+void enable_wakeup_interrupt(struct msm_hs_port *msm_uport)
+{
+	unsigned long flags;
+	struct uart_port *uport = &(msm_uport->uport);
+
+	if (!is_use_low_power_wakeup(msm_uport))
+		return;
+	if (msm_uport->wakeup.freed)
+		return;
+
+	if (!(msm_uport->wakeup.enabled)) {
+		spin_lock_irqsave(&uport->lock, flags);
+		msm_uport->wakeup.ignore = 1;
+		msm_uport->wakeup.enabled = true;
+		spin_unlock_irqrestore(&uport->lock, flags);
+		disable_irq(uport->irq);
+		enable_irq(msm_uport->wakeup.irq);
+	} else {
+		MSM_HS_WARN("%s():Wake up IRQ already enabled\n", __func__);
+	}
+}
+
+void disable_wakeup_interrupt(struct msm_hs_port *msm_uport)
+{
+	unsigned long flags;
+	struct uart_port *uport = &(msm_uport->uport);
+
+	if (!is_use_low_power_wakeup(msm_uport))
+		return;
+	if (msm_uport->wakeup.freed)
+		return;
+
+	if (msm_uport->wakeup.enabled) {
+		disable_irq_nosync(msm_uport->wakeup.irq);
+		enable_irq(uport->irq);
+		spin_lock_irqsave(&uport->lock, flags);
+		msm_uport->wakeup.enabled = false;
+		spin_unlock_irqrestore(&uport->lock, flags);
+	} else {
+		MSM_HS_WARN("%s():Wake up IRQ already disabled\n", __func__);
+	}
+}
+
+void msm_hs_resource_off(struct msm_hs_port *msm_uport)
+{
+	struct uart_port *uport = &(msm_uport->uport);
+	unsigned int data;
+
+	MSM_HS_DBG("%s(): begin", __func__);
+	msm_hs_disable_flow_control(uport, false);
+	if (msm_uport->rx.flush == FLUSH_NONE)
+		msm_hs_disconnect_rx(uport);
+
+	/* disable dlink */
+	if (msm_uport->tx.flush == FLUSH_NONE)
+		wait_event_timeout(msm_uport->tx.wait,
+			msm_uport->tx.flush == FLUSH_STOP, 500);
+
+	if (msm_uport->tx.flush != FLUSH_SHUTDOWN) {
+		data = msm_hs_read(uport, UART_DM_DMEN);
+		data &= ~UARTDM_TX_BAM_ENABLE_BMSK;
+		msm_hs_write(uport, UART_DM_DMEN, data);
+		sps_tx_disconnect(msm_uport);
+	}
+	if (!atomic_read(&msm_uport->client_req_state))
+		msm_hs_enable_flow_control(uport, false);
+}
+
+void msm_hs_resource_on(struct msm_hs_port *msm_uport)
+{
+	struct uart_port *uport = &(msm_uport->uport);
+	unsigned int data;
+	unsigned long flags;
+
+	if (msm_uport->rx.flush == FLUSH_SHUTDOWN ||
+	msm_uport->rx.flush == FLUSH_STOP) {
+		msm_hs_write(uport, UART_DM_CR, RESET_RX);
+		data = msm_hs_read(uport, UART_DM_DMEN);
+		data |= UARTDM_RX_BAM_ENABLE_BMSK;
+		msm_hs_write(uport, UART_DM_DMEN, data);
+	}
+
+	msm_hs_spsconnect_tx(msm_uport);
+	if (msm_uport->rx.flush == FLUSH_SHUTDOWN) {
+		msm_hs_spsconnect_rx(uport);
+		spin_lock_irqsave(&uport->lock, flags);
+		msm_hs_start_rx_locked(uport);
+		spin_unlock_irqrestore(&uport->lock, flags);
+	}
+}
+
+/* Request to turn off uart clock once pending TX is flushed */
+int msm_hs_request_clock_off(struct uart_port *uport)
+{
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+	int ret = 0;
+	int client_count = 0;
+
+	mutex_lock(&msm_uport->mtx);
+	/*
+	 * If we're in the middle of a system suspend, don't process these
+	 * userspace/kernel API commands.
+	 */
+	if (msm_uport->pm_state == MSM_HS_PM_SYS_SUSPENDED) {
+		MSM_HS_WARN("%s():Can't process clk request during suspend\n",
+			__func__);
+		ret = -EIO;
+	}
+	mutex_unlock(&msm_uport->mtx);
+	if (ret)
+		goto exit_request_clock_off;
+
+	if (atomic_read(&msm_uport->client_count) <= 0) {
+		MSM_HS_WARN("%s(): ioctl count -ve, client check voting\n",
+			__func__);
+		ret = -EPERM;
+		goto exit_request_clock_off;
+	}
+	/* Set the flag to disable flow control and wakeup irq */
+	if (msm_uport->obs)
+		atomic_set(&msm_uport->client_req_state, 1);
+	msm_hs_resource_unvote(msm_uport);
+	atomic_dec(&msm_uport->client_count);
+	client_count = atomic_read(&msm_uport->client_count);
+	LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
+			"%s(): Client_Count %d\n", __func__,
+			client_count);
+exit_request_clock_off:
+	return ret;
+}
+EXPORT_SYMBOL(msm_hs_request_clock_off);
+
+int msm_hs_request_clock_on(struct uart_port *uport)
+{
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+	int client_count;
+	int ret = 0;
+
+	mutex_lock(&msm_uport->mtx);
+	/*
+	 * If we're in the middle of a system suspend, don't process these
+	 * userspace/kernel API commands.
+	 */
+	if (msm_uport->pm_state == MSM_HS_PM_SYS_SUSPENDED) {
+		MSM_HS_WARN("%s(): Can't process clk request during suspend\n",
+			__func__);
+		ret = -EIO;
+	}
+	mutex_unlock(&msm_uport->mtx);
+	if (ret)
+		goto exit_request_clock_on;
+
+	msm_hs_resource_vote(UARTDM_TO_MSM(uport));
+	atomic_inc(&msm_uport->client_count);
+	client_count = atomic_read(&msm_uport->client_count);
+	LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
+			"%s(): Client_Count %d\n", __func__,
+			client_count);
+
+	/* Clear the flag */
+	if (msm_uport->obs)
+		atomic_set(&msm_uport->client_req_state, 0);
+exit_request_clock_on:
+	return ret;
+}
+EXPORT_SYMBOL(msm_hs_request_clock_on);
+
+static irqreturn_t msm_hs_wakeup_isr(int irq, void *dev)
+{
+	unsigned int wakeup = 0;
+	unsigned long flags;
+	struct msm_hs_port *msm_uport = (struct msm_hs_port *)dev;
+	struct uart_port *uport = &msm_uport->uport;
+	struct tty_struct *tty = NULL;
+
+	spin_lock_irqsave(&uport->lock, flags);
+
+	if (msm_uport->wakeup.ignore)
+		msm_uport->wakeup.ignore = 0;
+	else
+		wakeup = 1;
+
+	if (wakeup) {
+		/*
+		 * Port was clocked off during rx, wake up and
+		 * optionally inject char into tty rx
+		 */
+		if (msm_uport->wakeup.inject_rx) {
+			tty = uport->state->port.tty;
+			tty_insert_flip_char(tty->port,
+					     msm_uport->wakeup.rx_to_inject,
+					     TTY_NORMAL);
+			hex_dump_ipc(msm_uport, msm_uport->rx.ipc_rx_ctxt,
+				"Rx Inject",
+				&msm_uport->wakeup.rx_to_inject, 0, 1);
+			MSM_HS_INFO("Wakeup ISR.Ignore%d\n",
+						msm_uport->wakeup.ignore);
+		}
+	}
+
+	spin_unlock_irqrestore(&uport->lock, flags);
+
+	if (wakeup && msm_uport->wakeup.inject_rx)
+		tty_flip_buffer_push(tty->port);
+	return IRQ_HANDLED;
+}
+
+static const char *msm_hs_type(struct uart_port *port)
+{
+	return "MSM HS UART";
+}
+
+/**
+ * msm_hs_unconfig_uart_gpios: Unconfigures UART GPIOs
+ * @uport: uart port
+ */
+static void msm_hs_unconfig_uart_gpios(struct uart_port *uport)
+{
+	struct platform_device *pdev = to_platform_device(uport->dev);
+	const struct msm_serial_hs_platform_data *pdata =
+					pdev->dev.platform_data;
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+	int ret;
+
+	if (msm_uport->use_pinctrl) {
+		ret = pinctrl_select_state(msm_uport->pinctrl,
+				msm_uport->gpio_state_suspend);
+		if (ret)
+			MSM_HS_ERR("%s():Failed to pinctrl set_state\n",
+				__func__);
+	} else if (pdata) {
+		if (gpio_is_valid(pdata->uart_tx_gpio))
+			gpio_free(pdata->uart_tx_gpio);
+		if (gpio_is_valid(pdata->uart_rx_gpio))
+			gpio_free(pdata->uart_rx_gpio);
+		if (gpio_is_valid(pdata->uart_cts_gpio))
+			gpio_free(pdata->uart_cts_gpio);
+		if (gpio_is_valid(pdata->uart_rfr_gpio))
+			gpio_free(pdata->uart_rfr_gpio);
+	} else
+		MSM_HS_ERR("%s(): Error:Pdata is NULL\n", __func__);
+}
+
+/**
+ * msm_hs_config_uart_gpios - Configures UART GPIOs
+ * @uport: uart port
+ */
+static int msm_hs_config_uart_gpios(struct uart_port *uport)
+{
+	struct platform_device *pdev = to_platform_device(uport->dev);
+	const struct msm_serial_hs_platform_data *pdata =
+					pdev->dev.platform_data;
+	int ret = 0;
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+
+	if (!IS_ERR_OR_NULL(msm_uport->pinctrl)) {
+		MSM_HS_DBG("%s(): Using Pinctrl\n", __func__);
+		msm_uport->use_pinctrl = true;
+		ret = pinctrl_select_state(msm_uport->pinctrl,
+				msm_uport->gpio_state_active);
+		if (ret)
+			MSM_HS_ERR("%s(): Failed to pinctrl set_state\n",
+				__func__);
+		return ret;
+	} else if (pdata) {
+		/* Fall back to using gpio lib */
+		if (gpio_is_valid(pdata->uart_tx_gpio)) {
+			ret = gpio_request(pdata->uart_tx_gpio,
+							"UART_TX_GPIO");
+			if (unlikely(ret)) {
+				MSM_HS_ERR("gpio request failed for:%d\n",
+					pdata->uart_tx_gpio);
+				goto exit_uart_config;
+			}
+		}
+
+		if (gpio_is_valid(pdata->uart_rx_gpio)) {
+			ret = gpio_request(pdata->uart_rx_gpio,
+							"UART_RX_GPIO");
+			if (unlikely(ret)) {
+				MSM_HS_ERR("gpio request failed for:%d\n",
+					pdata->uart_rx_gpio);
+				goto uart_tx_unconfig;
+			}
+		}
+
+		if (gpio_is_valid(pdata->uart_cts_gpio)) {
+			ret = gpio_request(pdata->uart_cts_gpio,
+							"UART_CTS_GPIO");
+			if (unlikely(ret)) {
+				MSM_HS_ERR("gpio request failed for:%d\n",
+					pdata->uart_cts_gpio);
+				goto uart_rx_unconfig;
+			}
+		}
+
+		if (gpio_is_valid(pdata->uart_rfr_gpio)) {
+			ret = gpio_request(pdata->uart_rfr_gpio,
+							"UART_RFR_GPIO");
+			if (unlikely(ret)) {
+				MSM_HS_ERR("gpio request failed for:%d\n",
+					pdata->uart_rfr_gpio);
+				goto uart_cts_unconfig;
+			}
+		}
+	} else {
+		MSM_HS_ERR("%s(): Pdata is NULL\n", __func__);
+		ret = -EINVAL;
+	}
+	return ret;
+
+uart_cts_unconfig:
+	if (gpio_is_valid(pdata->uart_cts_gpio))
+		gpio_free(pdata->uart_cts_gpio);
+uart_rx_unconfig:
+	if (gpio_is_valid(pdata->uart_rx_gpio))
+		gpio_free(pdata->uart_rx_gpio);
+uart_tx_unconfig:
+	if (gpio_is_valid(pdata->uart_tx_gpio))
+		gpio_free(pdata->uart_tx_gpio);
+exit_uart_config:
+	return ret;
+}
+
+
+static void msm_hs_get_pinctrl_configs(struct uart_port *uport)
+{
+	struct pinctrl_state *set_state;
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+
+	msm_uport->pinctrl = devm_pinctrl_get(uport->dev);
+	if (IS_ERR_OR_NULL(msm_uport->pinctrl)) {
+		MSM_HS_DBG("%s(): Pinctrl not defined\n", __func__);
+	} else {
+		MSM_HS_DBG("%s(): Using Pinctrl\n", __func__);
+		msm_uport->use_pinctrl = true;
+
+		set_state = pinctrl_lookup_state(msm_uport->pinctrl,
+						PINCTRL_STATE_DEFAULT);
+		if (IS_ERR_OR_NULL(set_state)) {
+			dev_err(uport->dev,
+				"pinctrl lookup failed for default state\n");
+			goto pinctrl_fail;
+		}
+
+		MSM_HS_DBG("%s(): Pinctrl state active %pK\n", __func__,
+			set_state);
+		msm_uport->gpio_state_active = set_state;
+
+		set_state = pinctrl_lookup_state(msm_uport->pinctrl,
+						PINCTRL_STATE_SLEEP);
+		if (IS_ERR_OR_NULL(set_state)) {
+			dev_err(uport->dev,
+				"pinctrl lookup failed for sleep state\n");
+			goto pinctrl_fail;
+		}
+
+		MSM_HS_DBG("%s(): Pinctrl state sleep %pK\n", __func__,
+			set_state);
+		msm_uport->gpio_state_suspend = set_state;
+		return;
+	}
+pinctrl_fail:
+	msm_uport->pinctrl = NULL;
+}
+
+/* Called when port is opened */
+static int msm_hs_startup(struct uart_port *uport)
+{
+	int ret;
+	int rfr_level;
+	unsigned long flags;
+	unsigned int data;
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+	struct circ_buf *tx_buf = &uport->state->xmit;
+	struct msm_hs_tx *tx = &msm_uport->tx;
+	struct msm_hs_rx *rx = &msm_uport->rx;
+	struct sps_pipe *sps_pipe_handle_tx = tx->cons.pipe_handle;
+	struct sps_pipe *sps_pipe_handle_rx = rx->prod.pipe_handle;
+
+	rfr_level = uport->fifosize;
+	if (rfr_level > 16)
+		rfr_level -= 16;
+
+	tx->dma_base = dma_map_single(uport->dev, tx_buf->buf, UART_XMIT_SIZE,
+				      DMA_TO_DEVICE);
+
+	/* turn on uart clk */
+	msm_hs_resource_vote(msm_uport);
+
+	if (is_use_low_power_wakeup(msm_uport)) {
+		ret = request_threaded_irq(msm_uport->wakeup.irq, NULL,
+					msm_hs_wakeup_isr,
+					IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+					"msm_hs_wakeup", msm_uport);
+		if (unlikely(ret)) {
+			MSM_HS_ERR("%s():Err getting uart wakeup_irq %d\n",
+				  __func__, ret);
+			goto unvote_exit;
+		}
+
+		msm_uport->wakeup.freed = false;
+		disable_irq(msm_uport->wakeup.irq);
+		msm_uport->wakeup.enabled = false;
+
+		ret = irq_set_irq_wake(msm_uport->wakeup.irq, 1);
+		if (unlikely(ret)) {
+			MSM_HS_ERR("%s():Err setting wakeup irq\n", __func__);
+			goto free_uart_irq;
+		}
+	}
+
+	ret = msm_hs_config_uart_gpios(uport);
+	if (ret) {
+		MSM_HS_ERR("%s(): Uart GPIO request failed\n", __func__);
+		goto free_uart_irq;
+	}
+
+	msm_hs_write(uport, UART_DM_DMEN, 0);
+
+	/* Connect TX */
+	sps_tx_disconnect(msm_uport);
+	ret = msm_hs_spsconnect_tx(msm_uport);
+	if (ret) {
+		MSM_HS_ERR("msm_serial_hs: SPS connect failed for TX\n");
+		goto unconfig_uart_gpios;
+	}
+
+	/* Connect RX */
+	kthread_flush_worker(&msm_uport->rx.kworker);
+	if (rx->flush != FLUSH_SHUTDOWN)
+		disconnect_rx_endpoint(msm_uport);
+	ret = msm_hs_spsconnect_rx(uport);
+	if (ret) {
+		MSM_HS_ERR("msm_serial_hs: SPS connect failed for RX\n");
+		goto sps_disconnect_tx;
+	}
+
+	data = (UARTDM_BCR_TX_BREAK_DISABLE | UARTDM_BCR_STALE_IRQ_EMPTY |
+		UARTDM_BCR_RX_DMRX_LOW_EN | UARTDM_BCR_RX_STAL_IRQ_DMRX_EQL |
+		UARTDM_BCR_RX_DMRX_1BYTE_RES_EN);
+	msm_hs_write(uport, UART_DM_BCR, data);
+
+	/* Set auto RFR Level */
+	data = msm_hs_read(uport, UART_DM_MR1);
+	data &= ~UARTDM_MR1_AUTO_RFR_LEVEL1_BMSK;
+	data &= ~UARTDM_MR1_AUTO_RFR_LEVEL0_BMSK;
+	data |= (UARTDM_MR1_AUTO_RFR_LEVEL1_BMSK & (rfr_level << 2));
+	data |= (UARTDM_MR1_AUTO_RFR_LEVEL0_BMSK & rfr_level);
+	msm_hs_write(uport, UART_DM_MR1, data);
+
+	/* Make sure RXSTALE count is non-zero */
+	data = msm_hs_read(uport, UART_DM_IPR);
+	if (!data) {
+		data |= 0x1f & UARTDM_IPR_STALE_LSB_BMSK;
+		msm_hs_write(uport, UART_DM_IPR, data);
+	}
+
+	/* Assume no flow control, unless termios sets it */
+	msm_uport->flow_control = false;
+	msm_hs_disable_flow_control(uport, true);
+
+
+	/* Reset TX */
+	msm_hs_write(uport, UART_DM_CR, RESET_TX);
+	msm_hs_write(uport, UART_DM_CR, RESET_RX);
+	msm_hs_write(uport, UART_DM_CR, RESET_ERROR_STATUS);
+	msm_hs_write(uport, UART_DM_CR, RESET_BREAK_INT);
+	msm_hs_write(uport, UART_DM_CR, RESET_STALE_INT);
+	msm_hs_write(uport, UART_DM_CR, RESET_CTS);
+	msm_hs_write(uport, UART_DM_CR, RFR_LOW);
+	/* Turn on Uart Receiver */
+	msm_hs_write(uport, UART_DM_CR, UARTDM_CR_RX_EN_BMSK);
+
+	/* Turn on Uart Transmitter */
+	msm_hs_write(uport, UART_DM_CR, UARTDM_CR_TX_EN_BMSK);
+
+	tx->dma_in_flight = false;
+	MSM_HS_DBG("%s():desc usage flag 0x%lx\n", __func__, rx->queued_flag);
+	timer_setup(&(tx->tx_timeout_timer),
+			tx_timeout_handler,
+			(unsigned long) msm_uport);
+
+	/* Enable reading the current CTS, no harm even if CTS is ignored */
+	msm_uport->imr_reg |= UARTDM_ISR_CURRENT_CTS_BMSK;
+
+	/* TXLEV on empty TX fifo */
+	msm_hs_write(uport, UART_DM_TFWR, 4);
+	/*
+	 * Complete all device write related configuration before
+	 * queuing RX request. Hence mb() requires here.
+	 */
+	mb();
+
+	ret = request_irq(uport->irq, msm_hs_isr, IRQF_TRIGGER_HIGH,
+			  "msm_hs_uart", msm_uport);
+	if (unlikely(ret)) {
+		MSM_HS_ERR("%s():Error %d getting uart irq\n", __func__, ret);
+		goto sps_disconnect_rx;
+	}
+
+
+	spin_lock_irqsave(&uport->lock, flags);
+	atomic_set(&msm_uport->client_count, 0);
+	atomic_set(&msm_uport->client_req_state, 0);
+	LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
+			"%s(): Client_Count 0\n", __func__);
+	msm_hs_start_rx_locked(uport);
+
+	spin_unlock_irqrestore(&uport->lock, flags);
+
+	msm_hs_resource_unvote(msm_uport);
+	return 0;
+
+sps_disconnect_rx:
+	sps_disconnect(sps_pipe_handle_rx);
+sps_disconnect_tx:
+	sps_disconnect(sps_pipe_handle_tx);
+unconfig_uart_gpios:
+	msm_hs_unconfig_uart_gpios(uport);
+free_uart_irq:
+	free_irq(uport->irq, msm_uport);
+unvote_exit:
+	msm_hs_resource_unvote(msm_uport);
+	MSM_HS_ERR("%s(): Error return\n", __func__);
+	return ret;
+}
+
+/* Initialize tx and rx data structures */
+static int uartdm_init_port(struct uart_port *uport)
+{
+	int ret = 0;
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+	struct msm_hs_tx *tx = &msm_uport->tx;
+	struct msm_hs_rx *rx = &msm_uport->rx;
+
+	init_waitqueue_head(&rx->wait);
+	init_waitqueue_head(&tx->wait);
+	init_waitqueue_head(&msm_uport->bam_disconnect_wait);
+
+	/* Init kernel threads for tx and rx */
+
+	kthread_init_worker(&rx->kworker);
+	rx->task = kthread_run(kthread_worker_fn,
+			&rx->kworker, "msm_serial_hs_%d_rx_work", uport->line);
+	if (IS_ERR(rx->task)) {
+		MSM_HS_ERR("%s(): error creating task\n", __func__);
+		goto exit_lh_init;
+	}
+	kthread_init_work(&rx->kwork, msm_serial_hs_rx_work);
+
+	kthread_init_worker(&tx->kworker);
+	tx->task = kthread_run(kthread_worker_fn,
+			&tx->kworker, "msm_serial_hs_%d_tx_work", uport->line);
+	if (IS_ERR(rx->task)) {
+		MSM_HS_ERR("%s(): error creating task\n", __func__);
+		goto exit_lh_init;
+	}
+
+	kthread_init_work(&tx->kwork, msm_serial_hs_tx_work);
+
+	rx->buffer = dma_zalloc_coherent(uport->dev,
+				UART_DMA_DESC_NR * UARTDM_RX_BUF_SIZE,
+				 &rx->rbuffer, GFP_KERNEL);
+	if (!rx->buffer) {
+		MSM_HS_ERR("%s(): cannot allocate rx->buffer\n", __func__);
+		ret = -ENOMEM;
+		goto exit_lh_init;
+	}
+
+	/* Set up Uart Receive */
+	msm_hs_write(uport, UART_DM_RFWR, 32);
+	/* Write to BADR explicitly to set up FIFO sizes */
+	msm_hs_write(uport, UARTDM_BADR_ADDR, 64);
+
+	INIT_DELAYED_WORK(&rx->flip_insert_work, flip_insert_work);
+
+	return ret;
+exit_lh_init:
+	kthread_stop(rx->task);
+	rx->task = NULL;
+	kthread_stop(tx->task);
+	tx->task = NULL;
+	return ret;
+}
+
+struct msm_serial_hs_platform_data
+	*msm_hs_dt_to_pdata(struct platform_device *pdev)
+{
+	struct device_node *node = pdev->dev.of_node;
+	struct msm_serial_hs_platform_data *pdata;
+	u32 rx_to_inject;
+	int ret;
+
+	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+	if (!pdata)
+		return ERR_PTR(-ENOMEM);
+
+	pdev->id = of_alias_get_id(pdev->dev.of_node, "uart");
+	/* UART TX GPIO */
+	pdata->uart_tx_gpio = of_get_named_gpio(node,
+					"qcom,tx-gpio", 0);
+	if (pdata->uart_tx_gpio < 0)
+		pr_err("uart_tx_gpio is not available\n");
+
+	/* UART RX GPIO */
+	pdata->uart_rx_gpio = of_get_named_gpio(node,
+					"qcom,rx-gpio", 0);
+	if (pdata->uart_rx_gpio < 0)
+		pr_err("uart_rx_gpio is not available\n");
+
+	/* UART CTS GPIO */
+	pdata->uart_cts_gpio = of_get_named_gpio(node,
+					"qcom,cts-gpio", 0);
+	if (pdata->uart_cts_gpio < 0)
+		pr_err("uart_cts_gpio is not available\n");
+
+	/* UART RFR GPIO */
+	pdata->uart_rfr_gpio = of_get_named_gpio(node,
+					"qcom,rfr-gpio", 0);
+	if (pdata->uart_rfr_gpio < 0)
+		pr_err("uart_rfr_gpio is not available\n");
+
+	pdata->no_suspend_delay = of_property_read_bool(node,
+				"qcom,no-suspend-delay");
+
+	pdata->obs = of_property_read_bool(node,
+				"qcom,msm-obs");
+	if (pdata->obs)
+		pr_err("%s():Out of Band sleep flag is set\n", __func__);
+
+	pdata->inject_rx_on_wakeup = of_property_read_bool(node,
+				"qcom,inject-rx-on-wakeup");
+
+	if (pdata->inject_rx_on_wakeup) {
+		ret = of_property_read_u32(node, "qcom,rx-char-to-inject",
+						&rx_to_inject);
+		if (ret < 0) {
+			pr_err("Error: Rx_char_to_inject not specified\n");
+			return ERR_PTR(ret);
+		}
+		pdata->rx_to_inject = (u8)rx_to_inject;
+	}
+
+	ret = of_property_read_u32(node, "qcom,bam-tx-ep-pipe-index",
+				&pdata->bam_tx_ep_pipe_index);
+	if (ret < 0) {
+		pr_err("Error: Getting UART BAM TX EP Pipe Index\n");
+		return ERR_PTR(ret);
+	}
+
+	if (!(pdata->bam_tx_ep_pipe_index >= BAM_PIPE_MIN &&
+		pdata->bam_tx_ep_pipe_index <= BAM_PIPE_MAX)) {
+		pr_err("Error: Invalid UART BAM TX EP Pipe Index\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	ret = of_property_read_u32(node, "qcom,bam-rx-ep-pipe-index",
+					&pdata->bam_rx_ep_pipe_index);
+	if (ret < 0) {
+		pr_err("Error: Getting UART BAM RX EP Pipe Index\n");
+		return ERR_PTR(ret);
+	}
+
+	if (!(pdata->bam_rx_ep_pipe_index >= BAM_PIPE_MIN &&
+		pdata->bam_rx_ep_pipe_index <= BAM_PIPE_MAX)) {
+		pr_err("Error: Invalid UART BAM RX EP Pipe Index\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	pr_debug("tx_ep_pipe_index:%d rx_ep_pipe_index:%d\n"
+		"tx_gpio:%d rx_gpio:%d rfr_gpio:%d cts_gpio:%d\n",
+		pdata->bam_tx_ep_pipe_index, pdata->bam_rx_ep_pipe_index,
+		pdata->uart_tx_gpio, pdata->uart_rx_gpio, pdata->uart_cts_gpio,
+		pdata->uart_rfr_gpio);
+
+	return pdata;
+}
+
+
+/**
+ * Deallocate UART peripheral's SPS endpoint
+ * @msm_uport - Pointer to msm_hs_port structure
+ * @ep - Pointer to sps endpoint data structure
+ */
+
+static void msm_hs_exit_ep_conn(struct msm_hs_port *msm_uport,
+				struct msm_hs_sps_ep_conn_data *ep)
+{
+	struct sps_pipe *sps_pipe_handle = ep->pipe_handle;
+	struct sps_connect *sps_config = &ep->config;
+
+	dma_free_coherent(msm_uport->uport.dev,
+			sps_config->desc.size,
+			&sps_config->desc.phys_base,
+			GFP_KERNEL);
+	sps_free_endpoint(sps_pipe_handle);
+}
+
+
+/**
+ * Allocate UART peripheral's SPS endpoint
+ *
+ * This function allocates endpoint context
+ * by calling appropriate SPS driver APIs.
+ *
+ * @msm_uport - Pointer to msm_hs_port structure
+ * @ep - Pointer to sps endpoint data structure
+ * @is_produce - 1 means Producer endpoint
+ *             - 0 means Consumer endpoint
+ *
+ * @return - 0 if successful else negative value
+ */
+
+static int msm_hs_sps_init_ep_conn(struct msm_hs_port *msm_uport,
+				struct msm_hs_sps_ep_conn_data *ep,
+				bool is_producer)
+{
+	int rc = 0;
+	struct sps_pipe *sps_pipe_handle;
+	struct sps_connect *sps_config = &ep->config;
+	struct sps_register_event *sps_event = &ep->event;
+
+	/* Allocate endpoint context */
+	sps_pipe_handle = sps_alloc_endpoint();
+	if (!sps_pipe_handle) {
+		MSM_HS_ERR("%s(): sps_alloc_endpoint, failed,is_producer=%d\n",
+			 __func__, is_producer);
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	/* Get default connection configuration for an endpoint */
+	rc = sps_get_config(sps_pipe_handle, sps_config);
+	if (rc) {
+		MSM_HS_ERR("%s(): failed,pipe_handle=0x%pK rc=%d\n",
+			__func__, sps_pipe_handle, rc);
+		goto get_config_err;
+	}
+
+	/* Modify the default connection configuration */
+	if (is_producer) {
+		/* For UART producer transfer, source is UART peripheral
+		 * where as destination is system memory
+		 */
+		sps_config->source = msm_uport->bam_handle;
+		sps_config->destination = SPS_DEV_HANDLE_MEM;
+		sps_config->mode = SPS_MODE_SRC;
+		sps_config->src_pipe_index = msm_uport->bam_rx_ep_pipe_index;
+		sps_config->dest_pipe_index = 0;
+		sps_event->callback = msm_hs_sps_rx_callback;
+	} else {
+		/* For UART consumer transfer, source is system memory
+		 * where as destination is UART peripheral
+		 */
+		sps_config->source = SPS_DEV_HANDLE_MEM;
+		sps_config->destination = msm_uport->bam_handle;
+		sps_config->mode = SPS_MODE_DEST;
+		sps_config->src_pipe_index = 0;
+		sps_config->dest_pipe_index = msm_uport->bam_tx_ep_pipe_index;
+		sps_event->callback = msm_hs_sps_tx_callback;
+	}
+
+	sps_config->options = SPS_O_EOT | SPS_O_DESC_DONE | SPS_O_AUTO_ENABLE;
+	sps_config->event_thresh = 0x10;
+
+	/* Allocate maximum descriptor fifo size */
+	sps_config->desc.size =
+		(1 + UART_DMA_DESC_NR) * sizeof(struct sps_iovec);
+	sps_config->desc.base = dma_zalloc_coherent(msm_uport->uport.dev,
+						sps_config->desc.size,
+						&sps_config->desc.phys_base,
+						GFP_KERNEL);
+	if (!sps_config->desc.base) {
+		rc = -ENOMEM;
+		MSM_HS_ERR("msm_serial_hs: dma_zalloc_coherent() failed\n");
+		goto get_config_err;
+	}
+	memset(sps_config->desc.base, 0x00, sps_config->desc.size);
+
+	sps_event->mode = SPS_TRIGGER_CALLBACK;
+
+	sps_event->options = SPS_O_DESC_DONE | SPS_O_EOT;
+	sps_event->user = (void *)msm_uport;
+
+	/* Now save the sps pipe handle */
+	ep->pipe_handle = sps_pipe_handle;
+	MSM_HS_DBG("%s(): %s: pipe_handle=0x%pK, desc_phys_base=0x%pa\n",
+		 __func__, is_producer ? "READ" : "WRITE",
+		sps_pipe_handle, &sps_config->desc.phys_base);
+	return 0;
+
+get_config_err:
+	sps_free_endpoint(sps_pipe_handle);
+out:
+	return rc;
+}
+
+/**
+ * Initialize SPS HW connected with UART core
+ *
+ * This function register BAM HW resources with
+ * SPS driver and then initialize 2 SPS endpoints
+ *
+ * msm_uport - Pointer to msm_hs_port structure
+ *
+ * @return - 0 if successful else negative value
+ */
+
+static int msm_hs_sps_init(struct msm_hs_port *msm_uport)
+{
+	int rc = 0;
+	struct sps_bam_props bam = {0};
+	unsigned long bam_handle;
+
+	rc = sps_phy2h(msm_uport->bam_mem, &bam_handle);
+	if (rc || !bam_handle) {
+		bam.phys_addr = msm_uport->bam_mem;
+		bam.virt_addr = msm_uport->bam_base;
+		/*
+		 * This event threshold is only significant for BAM-to-BAM
+		 * transfer. It's ignored for BAM-to-System mode transfer.
+		 */
+		bam.event_threshold = 0x10;	/* Pipe event threshold */
+		bam.summing_threshold = 1;	/* BAM event threshold */
+
+		/* SPS driver wll handle the UART BAM IRQ */
+		bam.irq = (u32)msm_uport->bam_irq;
+		bam.manage = SPS_BAM_MGR_DEVICE_REMOTE;
+
+		MSM_HS_DBG("msm_serial_hs: bam physical base=0x%pa\n",
+							&bam.phys_addr);
+		MSM_HS_DBG("msm_serial_hs: bam virtual base=0x%pa\n",
+							bam.virt_addr);
+
+		/* Register UART Peripheral BAM device to SPS driver */
+		rc = sps_register_bam_device(&bam, &bam_handle);
+		if (rc) {
+			MSM_HS_ERR("%s(): BAM device register failed\n",
+				  __func__);
+			return rc;
+		}
+		MSM_HS_DBG("%s():BAM device registered. bam_handle=0x%lx\n",
+			   __func__, msm_uport->bam_handle);
+	}
+	msm_uport->bam_handle = bam_handle;
+
+	rc = msm_hs_sps_init_ep_conn(msm_uport, &msm_uport->rx.prod,
+				UART_SPS_PROD_PERIPHERAL);
+	if (rc) {
+		MSM_HS_ERR("%s(): Failed to Init Producer BAM-pipe\n",
+				__func__);
+		goto deregister_bam;
+	}
+
+	rc = msm_hs_sps_init_ep_conn(msm_uport, &msm_uport->tx.cons,
+				UART_SPS_CONS_PERIPHERAL);
+	if (rc) {
+		MSM_HS_ERR("%s(): Failed to Init Consumer BAM-pipe\n",
+				__func__);
+		goto deinit_ep_conn_prod;
+	}
+	return 0;
+
+deinit_ep_conn_prod:
+	msm_hs_exit_ep_conn(msm_uport, &msm_uport->rx.prod);
+deregister_bam:
+	sps_deregister_bam_device(msm_uport->bam_handle);
+	return rc;
+}
+
+
+static bool deviceid[UARTDM_NR] = {0};
+/*
+ * The mutex synchronizes grabbing next free device number
+ * both in case of an alias being used or not. When alias is
+ * used, the msm_hs_dt_to_pdata gets it and the boolean array
+ * is accordingly updated with device_id_set_used. If no alias
+ * is used, then device_id_grab_next_free sets that array.
+ */
+static DEFINE_MUTEX(mutex_next_device_id);
+
+static int device_id_grab_next_free(void)
+{
+	int i;
+	int ret = -ENODEV;
+
+	mutex_lock(&mutex_next_device_id);
+	for (i = 0; i < UARTDM_NR; i++)
+		if (!deviceid[i]) {
+			ret = i;
+			deviceid[i] = true;
+			break;
+		}
+	mutex_unlock(&mutex_next_device_id);
+	return ret;
+}
+
+static int device_id_set_used(int index)
+{
+	int ret = 0;
+
+	mutex_lock(&mutex_next_device_id);
+	if (deviceid[index])
+		ret = -ENODEV;
+	else
+		deviceid[index] = true;
+	mutex_unlock(&mutex_next_device_id);
+	return ret;
+}
+
+static void obs_manage_irq(struct msm_hs_port *msm_uport, bool en)
+{
+	struct uart_port *uport = &(msm_uport->uport);
+
+	if (msm_uport->obs) {
+		if (en)
+			enable_irq(uport->irq);
+		else
+			disable_irq(uport->irq);
+	}
+}
+
+static void msm_hs_pm_suspend(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct msm_hs_port *msm_uport = get_matching_hs_port(pdev);
+	int ret;
+	int client_count = 0;
+
+	if (!msm_uport)
+		goto err_suspend;
+	mutex_lock(&msm_uport->mtx);
+
+	client_count = atomic_read(&msm_uport->client_count);
+	msm_uport->pm_state = MSM_HS_PM_SUSPENDED;
+	msm_hs_resource_off(msm_uport);
+	obs_manage_irq(msm_uport, false);
+	msm_hs_clk_bus_unvote(msm_uport);
+
+	/* For OBS, don't use wakeup interrupt, set gpio to suspended state */
+	if (msm_uport->obs) {
+		ret = pinctrl_select_state(msm_uport->pinctrl,
+			msm_uport->gpio_state_suspend);
+		if (ret)
+			MSM_HS_ERR("%s():Error secting pctrl suspend state\n",
+				__func__);
+	}
+
+	if (!atomic_read(&msm_uport->client_req_state))
+		enable_wakeup_interrupt(msm_uport);
+	LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
+		"%s(): PM State Suspended client_count %d\n", __func__,
+								client_count);
+	mutex_unlock(&msm_uport->mtx);
+	return;
+err_suspend:
+	pr_err("%s(): invalid uport\n", __func__);
+}
+
+static int msm_hs_pm_resume(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct msm_hs_port *msm_uport = get_matching_hs_port(pdev);
+	int ret = 0;
+	int client_count = 0;
+
+	if (!msm_uport) {
+		dev_err(dev, "%s():Invalid uport\n", __func__);
+		return -ENODEV;
+	}
+
+	mutex_lock(&msm_uport->mtx);
+	client_count = atomic_read(&msm_uport->client_count);
+	if (msm_uport->pm_state == MSM_HS_PM_ACTIVE)
+		goto exit_pm_resume;
+	if (!atomic_read(&msm_uport->client_req_state))
+		disable_wakeup_interrupt(msm_uport);
+
+	/* For OBS, don't use wakeup interrupt, set gpio to active state */
+	if (msm_uport->obs) {
+		ret = pinctrl_select_state(msm_uport->pinctrl,
+				msm_uport->gpio_state_active);
+		if (ret)
+			MSM_HS_ERR("%s():Error selecting active state\n",
+				 __func__);
+	}
+
+	ret = msm_hs_clk_bus_vote(msm_uport);
+	if (ret) {
+		MSM_HS_ERR("%s():Failed clock vote %d\n", __func__, ret);
+		goto exit_pm_resume;
+	}
+	obs_manage_irq(msm_uport, true);
+	msm_uport->pm_state = MSM_HS_PM_ACTIVE;
+	msm_hs_resource_on(msm_uport);
+
+	LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
+		"%s():PM State:Active client_count %d\n",
+		 __func__, client_count);
+exit_pm_resume:
+	mutex_unlock(&msm_uport->mtx);
+	return ret;
+}
+
+#ifdef CONFIG_PM
+static int msm_hs_pm_sys_suspend_noirq(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct msm_hs_port *msm_uport = get_matching_hs_port(pdev);
+	enum msm_hs_pm_state prev_pwr_state;
+	int clk_cnt, client_count, ret = 0;
+
+	if (IS_ERR_OR_NULL(msm_uport))
+		return -ENODEV;
+
+	mutex_lock(&msm_uport->mtx);
+
+	/*
+	 * If there is an active clk request or an impending userspace request
+	 * fail the suspend callback.
+	 */
+	clk_cnt = atomic_read(&msm_uport->resource_count);
+	client_count = atomic_read(&msm_uport->client_count);
+	if (msm_uport->pm_state == MSM_HS_PM_ACTIVE) {
+		MSM_HS_WARN("%s():Fail Suspend.clk_cnt:%d,clnt_count:%d\n",
+				 __func__, clk_cnt, client_count);
+		ret = -EBUSY;
+		goto exit_suspend_noirq;
+	}
+
+	prev_pwr_state = msm_uport->pm_state;
+	msm_uport->pm_state = MSM_HS_PM_SYS_SUSPENDED;
+	LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
+		"%s():PM State:Sys-Suspended client_count %d\n", __func__,
+								client_count);
+exit_suspend_noirq:
+	mutex_unlock(&msm_uport->mtx);
+	return ret;
+};
+
+static int msm_hs_pm_sys_resume_noirq(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct msm_hs_port *msm_uport = get_matching_hs_port(pdev);
+
+	if (IS_ERR_OR_NULL(msm_uport))
+		return -ENODEV;
+	/*
+	 * Note system-pm resume and update the state
+	 * variable. Resource activation will be done
+	 * when transfer is requested.
+	 */
+
+	mutex_lock(&msm_uport->mtx);
+	if (msm_uport->pm_state == MSM_HS_PM_SYS_SUSPENDED)
+		msm_uport->pm_state = MSM_HS_PM_SUSPENDED;
+	LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
+		"%s():PM State: Suspended\n", __func__);
+	mutex_unlock(&msm_uport->mtx);
+	return 0;
+}
+#endif
+
+#ifdef CONFIG_PM
+static void  msm_serial_hs_rt_init(struct uart_port *uport)
+{
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+
+	MSM_HS_DBG("%s(): Enabling runtime pm\n", __func__);
+	pm_runtime_set_suspended(uport->dev);
+	pm_runtime_set_autosuspend_delay(uport->dev, 100);
+	pm_runtime_use_autosuspend(uport->dev);
+	mutex_lock(&msm_uport->mtx);
+	msm_uport->pm_state = MSM_HS_PM_SUSPENDED;
+	mutex_unlock(&msm_uport->mtx);
+	pm_runtime_enable(uport->dev);
+}
+
+static int msm_hs_runtime_suspend(struct device *dev)
+{
+	msm_hs_pm_suspend(dev);
+	return 0;
+}
+
+static int msm_hs_runtime_resume(struct device *dev)
+{
+	return msm_hs_pm_resume(dev);
+}
+#else
+static void  msm_serial_hs_rt_init(struct uart_port *uport) {}
+static int msm_hs_runtime_suspend(struct device *dev) {}
+static int msm_hs_runtime_resume(struct device *dev) {}
+#endif
+
+
+static int msm_hs_probe(struct platform_device *pdev)
+{
+	int ret = 0;
+	struct uart_port *uport;
+	struct msm_hs_port *msm_uport;
+	struct resource *core_resource;
+	struct resource *bam_resource;
+	int core_irqres, bam_irqres, wakeup_irqres;
+	struct msm_serial_hs_platform_data *pdata = pdev->dev.platform_data;
+	unsigned long data;
+	char name[30];
+
+	if (pdev->dev.of_node) {
+		dev_dbg(&pdev->dev, "device tree enabled\n");
+		pdata = msm_hs_dt_to_pdata(pdev);
+		if (IS_ERR(pdata))
+			return PTR_ERR(pdata);
+
+		if (pdev->id < 0) {
+			pdev->id = device_id_grab_next_free();
+			if (pdev->id < 0) {
+				dev_err(&pdev->dev,
+					"Error grabbing next free device id\n");
+				return pdev->id;
+			}
+		} else {
+			ret = device_id_set_used(pdev->id);
+			if (ret < 0) {
+				dev_warn(&pdev->dev, "%d alias taken\n",
+					pdev->id);
+				return ret;
+			}
+		}
+		pdev->dev.platform_data = pdata;
+	}
+
+	if (pdev->id < 0 || pdev->id >= UARTDM_NR) {
+		dev_err(&pdev->dev, "Invalid plaform device ID = %d\n",
+								pdev->id);
+		return -EINVAL;
+	}
+
+	msm_uport = devm_kzalloc(&pdev->dev, sizeof(struct msm_hs_port),
+			GFP_KERNEL);
+	if (!msm_uport)
+		return -ENOMEM;
+
+	msm_uport->uport.type = PORT_UNKNOWN;
+	uport = &msm_uport->uport;
+	uport->dev = &pdev->dev;
+
+	if (pdev->dev.of_node)
+		msm_uport->uart_type = BLSP_HSUART;
+
+	msm_hs_get_pinctrl_configs(uport);
+	/* Get required resources for BAM HSUART */
+	core_resource = platform_get_resource_byname(pdev,
+				IORESOURCE_MEM, "core_mem");
+	if (!core_resource) {
+		dev_err(&pdev->dev, "Invalid core HSUART Resources\n");
+		return -ENXIO;
+	}
+	bam_resource = platform_get_resource_byname(pdev,
+				IORESOURCE_MEM, "bam_mem");
+	if (!bam_resource) {
+		dev_err(&pdev->dev, "Invalid BAM HSUART Resources\n");
+		return -ENXIO;
+	}
+	core_irqres = platform_get_irq_byname(pdev, "core_irq");
+	if (core_irqres < 0) {
+		dev_err(&pdev->dev, "Error %d, invalid core irq resources\n",
+			core_irqres);
+		return -ENXIO;
+	}
+	bam_irqres = platform_get_irq_byname(pdev, "bam_irq");
+	if (bam_irqres < 0) {
+		dev_err(&pdev->dev, "Error %d, invalid bam irq resources\n",
+			bam_irqres);
+		return -ENXIO;
+	}
+	wakeup_irqres = platform_get_irq_byname(pdev, "wakeup_irq");
+	if (wakeup_irqres < 0) {
+		wakeup_irqres = -1;
+		pr_info("Wakeup irq not specified\n");
+	}
+
+	uport->mapbase = core_resource->start;
+
+	uport->membase = ioremap(uport->mapbase,
+				resource_size(core_resource));
+	if (unlikely(!uport->membase)) {
+		dev_err(&pdev->dev, "UART Resource ioremap Failed\n");
+		return -ENOMEM;
+	}
+	msm_uport->bam_mem = bam_resource->start;
+	msm_uport->bam_base = ioremap(msm_uport->bam_mem,
+				resource_size(bam_resource));
+	if (unlikely(!msm_uport->bam_base)) {
+		dev_err(&pdev->dev, "UART BAM Resource ioremap Failed\n");
+		iounmap(uport->membase);
+		return -ENOMEM;
+	}
+
+	memset(name, 0, sizeof(name));
+	scnprintf(name, sizeof(name), "%s%s", dev_name(msm_uport->uport.dev),
+									"_state");
+	msm_uport->ipc_msm_hs_log_ctxt =
+			ipc_log_context_create(IPC_MSM_HS_LOG_STATE_PAGES,
+								name, 0);
+	if (!msm_uport->ipc_msm_hs_log_ctxt) {
+		dev_err(&pdev->dev, "%s(): error creating logging context\n",
+								__func__);
+	} else {
+		msm_uport->ipc_debug_mask = INFO_LEV;
+		ret = sysfs_create_file(&pdev->dev.kobj,
+				&dev_attr_debug_mask.attr);
+		if (unlikely(ret))
+			MSM_HS_WARN("%s(): Failed create dev. attr\n",
+			 __func__);
+	}
+
+	uport->irq = core_irqres;
+	msm_uport->bam_irq = bam_irqres;
+	pdata->wakeup_irq = wakeup_irqres;
+
+	msm_uport->bus_scale_table = msm_bus_cl_get_pdata(pdev);
+	if (!msm_uport->bus_scale_table) {
+		MSM_HS_ERR("BLSP UART: Bus scaling is disabled\n");
+	} else {
+		msm_uport->bus_perf_client =
+			msm_bus_scale_register_client
+				(msm_uport->bus_scale_table);
+		if (IS_ERR(&msm_uport->bus_perf_client)) {
+			MSM_HS_ERR("%s():Bus client register failed\n",
+				   __func__);
+			ret = -EINVAL;
+			goto unmap_memory;
+		}
+	}
+
+	msm_uport->wakeup.irq = pdata->wakeup_irq;
+	msm_uport->wakeup.ignore = 1;
+	msm_uport->wakeup.inject_rx = pdata->inject_rx_on_wakeup;
+	msm_uport->wakeup.rx_to_inject = pdata->rx_to_inject;
+	msm_uport->obs = pdata->obs;
+
+	msm_uport->bam_tx_ep_pipe_index =
+			pdata->bam_tx_ep_pipe_index;
+	msm_uport->bam_rx_ep_pipe_index =
+			pdata->bam_rx_ep_pipe_index;
+	msm_uport->wakeup.enabled = true;
+
+	uport->iotype = UPIO_MEM;
+	uport->fifosize = 64;
+	uport->ops = &msm_hs_ops;
+	uport->flags = UPF_BOOT_AUTOCONF;
+	uport->uartclk = 7372800;
+	msm_uport->imr_reg = 0x0;
+
+	msm_uport->clk = clk_get(&pdev->dev, "core_clk");
+	if (IS_ERR(msm_uport->clk)) {
+		ret = PTR_ERR(msm_uport->clk);
+		goto deregister_bus_client;
+	}
+
+	msm_uport->pclk = clk_get(&pdev->dev, "iface_clk");
+	/*
+	 * Some configurations do not require explicit pclk control so
+	 * do not flag error on pclk get failure.
+	 */
+	if (IS_ERR(msm_uport->pclk))
+		msm_uport->pclk = NULL;
+
+	msm_uport->hsuart_wq = alloc_workqueue("k_hsuart",
+					WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
+	if (!msm_uport->hsuart_wq) {
+		MSM_HS_ERR("%s(): Unable to create workqueue hsuart_wq\n",
+								__func__);
+		ret =  -ENOMEM;
+		goto put_clk;
+	}
+
+	mutex_init(&msm_uport->mtx);
+
+	/* Initialize SPS HW connected with UART core */
+	ret = msm_hs_sps_init(msm_uport);
+	if (unlikely(ret)) {
+		MSM_HS_ERR("SPS Initialization failed, err=%d\n", ret);
+		goto destroy_mutex;
+	}
+
+	msm_uport->tx.flush = FLUSH_SHUTDOWN;
+	msm_uport->rx.flush = FLUSH_SHUTDOWN;
+
+	memset(name, 0, sizeof(name));
+	scnprintf(name, sizeof(name), "%s%s", dev_name(msm_uport->uport.dev),
+									"_tx");
+	msm_uport->tx.ipc_tx_ctxt =
+		ipc_log_context_create(IPC_MSM_HS_LOG_DATA_PAGES, name, 0);
+	if (!msm_uport->tx.ipc_tx_ctxt)
+		dev_err(&pdev->dev, "%s(): error creating tx log context\n",
+								__func__);
+
+	memset(name, 0, sizeof(name));
+	scnprintf(name, sizeof(name), "%s%s", dev_name(msm_uport->uport.dev),
+									"_rx");
+	msm_uport->rx.ipc_rx_ctxt = ipc_log_context_create(
+					IPC_MSM_HS_LOG_DATA_PAGES, name, 0);
+	if (!msm_uport->rx.ipc_rx_ctxt)
+		dev_err(&pdev->dev, "%s(): error creating rx log context\n",
+								__func__);
+
+	memset(name, 0, sizeof(name));
+	scnprintf(name, sizeof(name), "%s%s", dev_name(msm_uport->uport.dev),
+									"_pwr");
+	msm_uport->ipc_msm_hs_pwr_ctxt = ipc_log_context_create(
+					IPC_MSM_HS_LOG_USER_PAGES, name, 0);
+	if (!msm_uport->ipc_msm_hs_pwr_ctxt)
+		dev_err(&pdev->dev, "%s(): error creating usr log context\n",
+								__func__);
+
+	uport->irq = core_irqres;
+	msm_uport->bam_irq = bam_irqres;
+
+	clk_set_rate(msm_uport->clk, msm_uport->uport.uartclk);
+	msm_hs_clk_bus_vote(msm_uport);
+	ret = uartdm_init_port(uport);
+	if (unlikely(ret))
+		goto err_clock;
+
+	/* configure the CR Protection to Enable */
+	msm_hs_write(uport, UART_DM_CR, CR_PROTECTION_EN);
+
+	/*
+	 * Enable Command register protection before going ahead as this hw
+	 * configuration makes sure that issued cmd to CR register gets complete
+	 * before next issued cmd start. Hence mb() requires here.
+	 */
+	mb();
+
+	/*
+	 * Set RX_BREAK_ZERO_CHAR_OFF and RX_ERROR_CHAR_OFF
+	 * so any rx_break and character having parity of framing
+	 * error don't enter inside UART RX FIFO.
+	 */
+	data = msm_hs_read(uport, UART_DM_MR2);
+	data |= (UARTDM_MR2_RX_BREAK_ZERO_CHAR_OFF |
+			UARTDM_MR2_RX_ERROR_CHAR_OFF);
+	msm_hs_write(uport, UART_DM_MR2, data);
+	/* Ensure register IO completion */
+	mb();
+
+	ret = sysfs_create_file(&pdev->dev.kobj, &dev_attr_clock.attr);
+	if (unlikely(ret)) {
+		MSM_HS_ERR("Probe Failed as sysfs failed\n");
+		goto err_clock;
+	}
+
+	msm_serial_debugfs_init(msm_uport, pdev->id);
+	msm_hs_unconfig_uart_gpios(uport);
+
+	uport->line = pdev->id;
+	if (pdata->userid && pdata->userid <= UARTDM_NR)
+		uport->line = pdata->userid;
+	ret = uart_add_one_port(&msm_hs_driver, uport);
+	if (!ret) {
+		msm_hs_clk_bus_unvote(msm_uport);
+		msm_serial_hs_rt_init(uport);
+		return ret;
+	}
+
+err_clock:
+	msm_hs_clk_bus_unvote(msm_uport);
+
+destroy_mutex:
+	mutex_destroy(&msm_uport->mtx);
+	destroy_workqueue(msm_uport->hsuart_wq);
+
+put_clk:
+	if (msm_uport->pclk)
+		clk_put(msm_uport->pclk);
+
+	if (msm_uport->clk)
+		clk_put(msm_uport->clk);
+
+deregister_bus_client:
+	msm_bus_scale_unregister_client(msm_uport->bus_perf_client);
+unmap_memory:
+	iounmap(uport->membase);
+	iounmap(msm_uport->bam_base);
+
+	return ret;
+}
+
+static int __init msm_serial_hs_init(void)
+{
+	int ret;
+
+	ret = uart_register_driver(&msm_hs_driver);
+	if (unlikely(ret)) {
+		pr_err("%s failed to load\n", __func__);
+		return ret;
+	}
+	debug_base = debugfs_create_dir("msm_serial_hs", NULL);
+	if (IS_ERR_OR_NULL(debug_base))
+		pr_err("msm_serial_hs: Cannot create debugfs dir\n");
+
+	ret = platform_driver_register(&msm_serial_hs_platform_driver);
+	if (ret) {
+		pr_err("%s failed to load\n", __func__);
+		debugfs_remove_recursive(debug_base);
+		uart_unregister_driver(&msm_hs_driver);
+		return ret;
+	}
+
+	pr_debug("msm_serial_hs module loaded\n");
+	return ret;
+}
+
+/*
+ *  Called by the upper layer when port is closed.
+ *     - Disables the port
+ *     - Unhook the ISR
+ */
+static void msm_hs_shutdown(struct uart_port *uport)
+{
+	int ret, rc;
+	struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+	struct circ_buf *tx_buf = &uport->state->xmit;
+	int data;
+	unsigned long flags;
+
+	if (is_use_low_power_wakeup(msm_uport))
+		irq_set_irq_wake(msm_uport->wakeup.irq, 0);
+
+	if (msm_uport->wakeup.enabled)
+		disable_irq(msm_uport->wakeup.irq);
+	else
+		disable_irq(uport->irq);
+
+	spin_lock_irqsave(&uport->lock, flags);
+	msm_uport->wakeup.enabled = false;
+	msm_uport->wakeup.ignore = 1;
+	spin_unlock_irqrestore(&uport->lock, flags);
+
+	/* Free the interrupt */
+	free_irq(uport->irq, msm_uport);
+	if (is_use_low_power_wakeup(msm_uport)) {
+		free_irq(msm_uport->wakeup.irq, msm_uport);
+		MSM_HS_DBG("%s(): wakeup irq freed\n", __func__);
+	}
+	msm_uport->wakeup.freed = true;
+
+	/* make sure tx lh finishes */
+	kthread_flush_worker(&msm_uport->tx.kworker);
+	ret = wait_event_timeout(msm_uport->tx.wait,
+			uart_circ_empty(tx_buf), 500);
+	if (!ret)
+		MSM_HS_WARN("Shutdown called when tx buff not empty\n");
+
+	msm_hs_resource_vote(msm_uport);
+	/* Stop remote side from sending data */
+	msm_hs_disable_flow_control(uport, false);
+	/* make sure rx lh finishes */
+	kthread_flush_worker(&msm_uport->rx.kworker);
+
+	if (msm_uport->rx.flush != FLUSH_SHUTDOWN) {
+		/* disable and disconnect rx */
+		ret = wait_event_timeout(msm_uport->rx.wait,
+				!msm_uport->rx.pending_flag, 500);
+		if (!ret)
+			MSM_HS_WARN("%s(): rx disconnect not complete\n",
+				__func__);
+		msm_hs_disconnect_rx(uport);
+	}
+
+	cancel_delayed_work_sync(&msm_uport->rx.flip_insert_work);
+	flush_workqueue(msm_uport->hsuart_wq);
+
+	/* BAM Disconnect for TX */
+	data = msm_hs_read(uport, UART_DM_DMEN);
+	data &= ~UARTDM_TX_BAM_ENABLE_BMSK;
+	msm_hs_write(uport, UART_DM_DMEN, data);
+	ret = sps_tx_disconnect(msm_uport);
+	if (ret)
+		MSM_HS_ERR("%s(): sps_disconnect failed\n",
+					__func__);
+	msm_uport->tx.flush = FLUSH_SHUTDOWN;
+	/* Disable the transmitter */
+	msm_hs_write(uport, UART_DM_CR, UARTDM_CR_TX_DISABLE_BMSK);
+	/* Disable the receiver */
+	msm_hs_write(uport, UART_DM_CR, UARTDM_CR_RX_DISABLE_BMSK);
+
+	msm_uport->imr_reg = 0;
+	msm_hs_write(uport, UART_DM_IMR, msm_uport->imr_reg);
+	/*
+	 * Complete all device write before actually disabling uartclk.
+	 * Hence mb() requires here.
+	 */
+	mb();
+
+	msm_uport->rx.buffer_pending = NONE_PENDING;
+	MSM_HS_DBG("%s(): tx, rx events complete\n", __func__);
+
+	dma_unmap_single(uport->dev, msm_uport->tx.dma_base,
+			 UART_XMIT_SIZE, DMA_TO_DEVICE);
+
+	msm_hs_resource_unvote(msm_uport);
+	rc = atomic_read(&msm_uport->resource_count);
+	if (rc) {
+		atomic_set(&msm_uport->resource_count, 1);
+		MSM_HS_WARN("%s(): removing extra vote\n", __func__);
+		msm_hs_resource_unvote(msm_uport);
+	}
+	if (atomic_read(&msm_uport->client_req_state)) {
+		MSM_HS_WARN("%s(): Client clock vote imbalance\n", __func__);
+		atomic_set(&msm_uport->client_req_state, 0);
+	}
+	if (atomic_read(&msm_uport->client_count)) {
+		MSM_HS_WARN("%s(): Client vote on, forcing to 0\n", __func__);
+		atomic_set(&msm_uport->client_count, 0);
+		LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
+			"%s(): Client_Count 0\n", __func__);
+	}
+	msm_hs_unconfig_uart_gpios(uport);
+	MSM_HS_INFO("%s():UART port closed successfully\n", __func__);
+}
+
+static void __exit msm_serial_hs_exit(void)
+{
+	pr_debug("msm_serial_hs module removed\n");
+	debugfs_remove_recursive(debug_base);
+	platform_driver_unregister(&msm_serial_hs_platform_driver);
+	uart_unregister_driver(&msm_hs_driver);
+}
+
+static const struct dev_pm_ops msm_hs_dev_pm_ops = {
+	.runtime_suspend = msm_hs_runtime_suspend,
+	.runtime_resume = msm_hs_runtime_resume,
+	.runtime_idle = NULL,
+	.suspend_noirq = msm_hs_pm_sys_suspend_noirq,
+	.resume_noirq = msm_hs_pm_sys_resume_noirq,
+};
+
+static struct platform_driver msm_serial_hs_platform_driver = {
+	.probe	= msm_hs_probe,
+	.remove = msm_hs_remove,
+	.driver = {
+		.name = "msm_serial_hs",
+		.pm   = &msm_hs_dev_pm_ops,
+		.of_match_table = msm_hs_match_table,
+	},
+};
+
+static struct uart_driver msm_hs_driver = {
+	.owner = THIS_MODULE,
+	.driver_name = "msm_serial_hs",
+	.dev_name = "ttyHS",
+	.nr = UARTDM_NR,
+	.cons = 0,
+};
+
+static const struct uart_ops msm_hs_ops = {
+	.tx_empty = msm_hs_tx_empty,
+	.set_mctrl = msm_hs_set_mctrl_locked,
+	.get_mctrl = msm_hs_get_mctrl_locked,
+	.stop_tx = msm_hs_stop_tx_locked,
+	.start_tx = msm_hs_start_tx_locked,
+	.stop_rx = msm_hs_stop_rx_locked,
+	.enable_ms = msm_hs_enable_ms_locked,
+	.break_ctl = msm_hs_break_ctl,
+	.startup = msm_hs_startup,
+	.shutdown = msm_hs_shutdown,
+	.set_termios = msm_hs_set_termios,
+	.type = msm_hs_type,
+	.config_port = msm_hs_config_port,
+	.flush_buffer = NULL,
+	.ioctl = msm_hs_ioctl,
+};
+
+module_init(msm_serial_hs_init);
+module_exit(msm_serial_hs_exit);
+MODULE_DESCRIPTION("High Speed UART Driver for the MSM chipset");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/tty/serial/msm_serial_hs_hwreg.h b/drivers/tty/serial/msm_serial_hs_hwreg.h
new file mode 100644
index 0000000..008ad40
--- /dev/null
+++ b/drivers/tty/serial/msm_serial_hs_hwreg.h
@@ -0,0 +1,269 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* drivers/serial/msm_serial_hs_hwreg.h
+ *
+ * Copyright (c) 2007-2009, 2012-2018, 2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef MSM_SERIAL_HS_HWREG_H
+#define MSM_SERIAL_HS_HWREG_H
+
+#define GSBI_CONTROL_ADDR              0x0
+#define GSBI_PROTOCOL_CODE_MASK        0x30
+#define GSBI_PROTOCOL_I2C_UART         0x60
+#define GSBI_PROTOCOL_UART             0x40
+#define GSBI_PROTOCOL_IDLE             0x0
+
+#define TCSR_ADM_1_A_CRCI_MUX_SEL      0x78
+#define TCSR_ADM_1_B_CRCI_MUX_SEL      0x7C
+#define ADM1_CRCI_GSBI6_RX_SEL         0x800
+#define ADM1_CRCI_GSBI6_TX_SEL         0x400
+
+#define MSM_ENABLE_UART_CLOCK TIOCPMGET
+#define MSM_DISABLE_UART_CLOCK TIOCPMPUT
+#define MSM_GET_UART_CLOCK_STATUS TIOCPMACT
+
+enum msm_hsl_regs {
+	UARTDM_MR1,
+	UARTDM_MR2,
+	UARTDM_IMR,
+	UARTDM_SR,
+	UARTDM_CR,
+	UARTDM_CSR,
+	UARTDM_IPR,
+	UARTDM_ISR,
+	UARTDM_RX_TOTAL_SNAP,
+	UARTDM_RFWR,
+	UARTDM_TFWR,
+	UARTDM_RF,
+	UARTDM_TF,
+	UARTDM_MISR,
+	UARTDM_DMRX,
+	UARTDM_NCF_TX,
+	UARTDM_DMEN,
+	UARTDM_BCR,
+	UARTDM_TXFS,
+	UARTDM_RXFS,
+	UARTDM_LAST,
+};
+
+enum msm_hs_regs {
+	UART_DM_MR1 = 0x0,
+	UART_DM_MR2 = 0x4,
+	UART_DM_IMR = 0xb0,
+	UART_DM_SR = 0xa4,
+	UART_DM_CR = 0xa8,
+	UART_DM_CSR = 0xa0,
+	UART_DM_IPR = 0x18,
+	UART_DM_ISR = 0xb4,
+	UART_DM_RX_TOTAL_SNAP = 0xbc,
+	UART_DM_TFWR = 0x1c,
+	UART_DM_RFWR = 0x20,
+	UART_DM_RF = 0x140,
+	UART_DM_TF = 0x100,
+	UART_DM_MISR = 0xac,
+	UART_DM_DMRX = 0x34,
+	UART_DM_NCF_TX = 0x40,
+	UART_DM_DMEN = 0x3c,
+	UART_DM_TXFS = 0x4c,
+	UART_DM_RXFS = 0x50,
+	UART_DM_RX_TRANS_CTRL = 0xcc,
+	UART_DM_BCR = 0xc8,
+};
+
+#define UARTDM_MR1_ADDR 0x0
+#define UARTDM_MR2_ADDR 0x4
+
+/* Backward Compatibility Register for UARTDM Core v1.4 */
+#define UARTDM_BCR_ADDR	0xc8
+
+/*
+ * UARTDM Core v1.4 STALE_IRQ_EMPTY bit defination
+ * Stale interrupt will fire if bit is set when RX-FIFO is empty
+ */
+#define UARTDM_BCR_TX_BREAK_DISABLE	0x1
+#define UARTDM_BCR_STALE_IRQ_EMPTY	0x2
+#define UARTDM_BCR_RX_DMRX_LOW_EN	0x4
+#define UARTDM_BCR_RX_STAL_IRQ_DMRX_EQL	0x10
+#define UARTDM_BCR_RX_DMRX_1BYTE_RES_EN	0x20
+
+/* TRANSFER_CONTROL Register for UARTDM Core v1.4 */
+#define UARTDM_RX_TRANS_CTRL_ADDR      0xcc
+
+/* TRANSFER_CONTROL Register bits */
+#define RX_STALE_AUTO_RE_EN		0x1
+#define RX_TRANS_AUTO_RE_ACTIVATE	0x2
+#define RX_DMRX_CYCLIC_EN		0x4
+
+/* write only register */
+#define UARTDM_CSR_115200 0xFF
+#define UARTDM_CSR_57600  0xEE
+#define UARTDM_CSR_38400  0xDD
+#define UARTDM_CSR_28800  0xCC
+#define UARTDM_CSR_19200  0xBB
+#define UARTDM_CSR_14400  0xAA
+#define UARTDM_CSR_9600   0x99
+#define UARTDM_CSR_7200   0x88
+#define UARTDM_CSR_4800   0x77
+#define UARTDM_CSR_3600   0x66
+#define UARTDM_CSR_2400   0x55
+#define UARTDM_CSR_1200   0x44
+#define UARTDM_CSR_600    0x33
+#define UARTDM_CSR_300    0x22
+#define UARTDM_CSR_150    0x11
+#define UARTDM_CSR_75     0x00
+
+/* write only register */
+#define UARTDM_IPR_ADDR 0x18
+#define UARTDM_TFWR_ADDR 0x1c
+#define UARTDM_RFWR_ADDR 0x20
+#define UARTDM_HCR_ADDR 0x24
+#define UARTDM_DMRX_ADDR 0x34
+#define UARTDM_DMEN_ADDR 0x3c
+
+/* UART_DM_NO_CHARS_FOR_TX */
+#define UARTDM_NCF_TX_ADDR 0x40
+
+#define UARTDM_BADR_ADDR 0x44
+
+#define UARTDM_SIM_CFG_ADDR 0x80
+
+/* Read Only register */
+#define UARTDM_TXFS_ADDR 0x4C
+#define UARTDM_RXFS_ADDR 0x50
+
+/* Register field Mask Mapping */
+#define UARTDM_SR_RX_BREAK_BMSK	        BIT(6)
+#define UARTDM_SR_PAR_FRAME_BMSK	BIT(5)
+#define UARTDM_SR_OVERRUN_BMSK		BIT(4)
+#define UARTDM_SR_TXEMT_BMSK		BIT(3)
+#define UARTDM_SR_TXRDY_BMSK		BIT(2)
+#define UARTDM_SR_RXRDY_BMSK		BIT(0)
+
+#define UARTDM_CR_TX_DISABLE_BMSK	BIT(3)
+#define UARTDM_CR_RX_DISABLE_BMSK	BIT(1)
+#define UARTDM_CR_TX_EN_BMSK		BIT(2)
+#define UARTDM_CR_RX_EN_BMSK		BIT(0)
+
+/* UARTDM_CR channel_comman bit value (register field is bits 8:4) */
+#define RESET_RX		0x10
+#define RESET_TX		0x20
+#define RESET_ERROR_STATUS	0x30
+#define RESET_BREAK_INT		0x40
+#define START_BREAK		0x50
+#define STOP_BREAK		0x60
+#define RESET_CTS		0x70
+#define RESET_STALE_INT		0x80
+#define RFR_LOW			0xD0
+#define RFR_HIGH		0xE0
+#define CR_PROTECTION_EN	0x100
+#define STALE_EVENT_ENABLE	0x500
+#define STALE_EVENT_DISABLE	0x600
+#define FORCE_STALE_EVENT	0x400
+#define CLEAR_TX_READY		0x300
+#define RESET_TX_ERROR		0x800
+#define RESET_TX_DONE		0x810
+
+/*
+ * UARTDM_CR BAM IFC comman bit value
+ * for UARTDM Core v1.4
+ */
+#define START_RX_BAM_IFC	0x850
+#define START_TX_BAM_IFC	0x860
+
+#define UARTDM_MR1_AUTO_RFR_LEVEL1_BMSK 0xffffff00
+#define UARTDM_MR1_AUTO_RFR_LEVEL0_BMSK 0x3f
+#define UARTDM_MR1_CTS_CTL_BMSK 0x40
+#define UARTDM_MR1_RX_RDY_CTL_BMSK 0x80
+
+/*
+ * UARTDM Core v1.4 MR2_RFR_CTS_LOOP bitmask
+ * Enables internal loopback between RFR_N of
+ * RX channel and CTS_N of TX channel.
+ */
+#define UARTDM_MR2_RFR_CTS_LOOP_MODE_BMSK	0x400
+
+#define UARTDM_MR2_LOOP_MODE_BMSK		0x80
+#define UARTDM_MR2_ERROR_MODE_BMSK		0x40
+#define UARTDM_MR2_BITS_PER_CHAR_BMSK		0x30
+#define UARTDM_MR2_RX_ZERO_CHAR_OFF		0x100
+#define UARTDM_MR2_RX_ERROR_CHAR_OFF		0x200
+#define UARTDM_MR2_RX_BREAK_ZERO_CHAR_OFF	0x100
+
+#define UARTDM_MR2_BITS_PER_CHAR_8	(0x3 << 4)
+
+/* bits per character configuration */
+#define FIVE_BPC  (0 << 4)
+#define SIX_BPC   (1 << 4)
+#define SEVEN_BPC (2 << 4)
+#define EIGHT_BPC (3 << 4)
+
+#define UARTDM_MR2_STOP_BIT_LEN_BMSK 0xc
+#define STOP_BIT_ONE (1 << 2)
+#define STOP_BIT_TWO (3 << 2)
+
+#define UARTDM_MR2_PARITY_MODE_BMSK 0x3
+
+/* Parity configuration */
+#define NO_PARITY 0x0
+#define EVEN_PARITY 0x2
+#define ODD_PARITY 0x1
+#define SPACE_PARITY 0x3
+
+#define UARTDM_IPR_STALE_TIMEOUT_MSB_BMSK 0xffffff80
+#define UARTDM_IPR_STALE_LSB_BMSK 0x1f
+
+/* These can be used for both ISR and IMR register */
+#define UARTDM_ISR_TX_READY_BMSK	BIT(7)
+#define UARTDM_ISR_CURRENT_CTS_BMSK	BIT(6)
+#define UARTDM_ISR_DELTA_CTS_BMSK	BIT(5)
+#define UARTDM_ISR_RXLEV_BMSK		BIT(4)
+#define UARTDM_ISR_RXSTALE_BMSK		BIT(3)
+#define UARTDM_ISR_RXBREAK_BMSK		BIT(2)
+#define UARTDM_ISR_RXHUNT_BMSK		BIT(1)
+#define UARTDM_ISR_TXLEV_BMSK		BIT(0)
+
+/* Field definitions for UART_DM_DMEN*/
+#define UARTDM_TX_DM_EN_BMSK 0x1
+#define UARTDM_RX_DM_EN_BMSK 0x2
+
+/*
+ * UARTDM Core v1.4 bitmask
+ * Bitmasks for enabling Rx and Tx BAM Interface
+ */
+#define UARTDM_TX_BAM_ENABLE_BMSK 0x4
+#define UARTDM_RX_BAM_ENABLE_BMSK 0x8
+
+/* Register offsets for UART Core v13 */
+
+/* write only register */
+#define UARTDM_CSR_ADDR    0x8
+
+/* write only register */
+#define UARTDM_TF_ADDR   0x70
+#define UARTDM_TF2_ADDR  0x74
+#define UARTDM_TF3_ADDR  0x78
+#define UARTDM_TF4_ADDR  0x7c
+
+/* write only register */
+#define UARTDM_CR_ADDR 0x10
+/* write only register */
+#define UARTDM_IMR_ADDR 0x14
+#define UARTDM_IRDA_ADDR 0x38
+
+/* Read Only register */
+#define UARTDM_SR_ADDR 0x8
+
+/* Read Only register */
+#define UARTDM_RF_ADDR   0x70
+#define UARTDM_RF2_ADDR  0x74
+#define UARTDM_RF3_ADDR  0x78
+#define UARTDM_RF4_ADDR  0x7c
+
+/* Read Only register */
+#define UARTDM_MISR_ADDR 0x10
+
+/* Read Only register */
+#define UARTDM_ISR_ADDR 0x14
+#define UARTDM_RX_TOTAL_SNAP_ADDR 0x38
+
+#endif /* MSM_SERIAL_HS_HWREG_H */
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index fff99dd..3a5c417 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -2924,10 +2924,12 @@
 	union extcon_property_value val;
 	unsigned int extcon_id;
 	struct extcon_dev *edev = NULL;
+	const char *edev_name;
+	char *eud_str;
+	bool eud_connected = false;
 	int ret = 0;
 
 	dev_dbg(mdwc->dev, "%s: dwc3 resume work\n", __func__);
-
 	if (mdwc->extcon && mdwc->vbus_active && !mdwc->in_restart) {
 		extcon_id = EXTCON_USB;
 		edev = mdwc->extcon[mdwc->ext_idx].edev;
@@ -2936,8 +2938,17 @@
 		edev = mdwc->extcon[mdwc->ext_idx].edev;
 	}
 
+	if (edev) {
+		edev_name = extcon_get_edev_name(edev);
+		dbg_log_string("edev:%s\n", edev_name);
+		/* Skip querying speed and cc_state for EUD edev */
+		eud_str = strnstr(edev_name, "eud", strlen(edev_name));
+		if (eud_str)
+			eud_connected = true;
+	}
+
 	/* Check speed and Type-C polarity values in order to configure PHY */
-	if (edev && extcon_get_state(edev, extcon_id)) {
+	if (!eud_connected && edev && extcon_get_state(edev, extcon_id)) {
 		dwc->maximum_speed = dwc->max_hw_supp_speed;
 
 		ret = extcon_get_property(edev, extcon_id,
diff --git a/drivers/usb/phy/phy-msm-qusb.c b/drivers/usb/phy/phy-msm-qusb.c
index 5216bc7..8e73f4e 100644
--- a/drivers/usb/phy/phy-msm-qusb.c
+++ b/drivers/usb/phy/phy-msm-qusb.c
@@ -640,11 +640,22 @@
 	u32 linestate = 0, intr_mask = 0;
 
 	if (qphy->suspended == suspend) {
+		/*
+		 * PHY_SUS_OVERRIDE is set when there is a cable
+		 * disconnect and the previous suspend call was because
+		 * of EUD spoof disconnect. Override this check and
+		 * ensure that the PHY is properly put in low power
+		 * mode.
+		 */
+		if (qphy->phy.flags & PHY_SUS_OVERRIDE)
+			goto suspend;
+
 		dev_dbg(phy->dev, "%s: USB PHY is already suspended\n",
 			__func__);
 		return 0;
 	}
 
+suspend:
 	if (suspend) {
 		/* Bus suspend case */
 		if (qphy->cable_connected) {
@@ -697,8 +708,7 @@
 			writel_relaxed(0x00,
 				qphy->base + QUSB2PHY_PORT_INTR_CTRL);
 
-			if (!qphy->eud_enable_reg ||
-					!readl_relaxed(qphy->eud_enable_reg)) {
+			if (!(qphy->phy.flags & EUD_SPOOF_DISCONNECT)) {
 				/* Disable PHY */
 				writel_relaxed(POWER_DOWN |
 					readl_relaxed(qphy->base +
@@ -710,10 +720,11 @@
 				if (qphy->tcsr_clamp_dig_n)
 					writel_relaxed(0x0,
 						qphy->tcsr_clamp_dig_n);
+
+				qusb_phy_enable_clocks(qphy, false);
+				qusb_phy_enable_power(qphy, false);
 			}
 
-			qusb_phy_enable_clocks(qphy, false);
-			qusb_phy_enable_power(qphy, false);
 			mutex_unlock(&qphy->phy_lock);
 
 			/*
@@ -1695,6 +1706,14 @@
 
 	qphy->suspended = true;
 
+	/*
+	 * EUD may be enable in boot loader and to keep EUD session alive across
+	 * kernel boot till USB phy driver is initialized based on cable status,
+	 * keep LDOs on here.
+	 */
+	if (qphy->eud_enable_reg && readl_relaxed(qphy->eud_enable_reg))
+		qusb_phy_enable_power(qphy, true);
+
 	if (of_property_read_bool(dev->of_node, "extcon")) {
 		qphy->id_state = true;
 		qphy->vbus_active = false;
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index fa32ce92..110f380 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -1100,7 +1100,6 @@
 	} else if (discard_type == DPOLICY_FSTRIM) {
 		dpolicy->io_aware = false;
 	} else if (discard_type == DPOLICY_UMOUNT) {
-		dpolicy->max_requests = UINT_MAX;
 		dpolicy->io_aware = false;
 		/* we need to issue all to keep CP_TRIMMED_FLAG */
 		dpolicy->granularity = 1;
@@ -1461,6 +1460,8 @@
 
 	return issued;
 }
+static unsigned int __wait_all_discard_cmd(struct f2fs_sb_info *sbi,
+					struct discard_policy *dpolicy);
 
 static int __issue_discard_cmd(struct f2fs_sb_info *sbi,
 					struct discard_policy *dpolicy)
@@ -1469,12 +1470,14 @@
 	struct list_head *pend_list;
 	struct discard_cmd *dc, *tmp;
 	struct blk_plug plug;
-	int i, issued = 0;
+	int i, issued;
 	bool io_interrupted = false;
 
 	if (dpolicy->timeout != 0)
 		f2fs_update_time(sbi, dpolicy->timeout);
 
+retry:
+	issued = 0;
 	for (i = MAX_PLIST_NUM - 1; i >= 0; i--) {
 		if (dpolicy->timeout != 0 &&
 				f2fs_time_over(sbi, dpolicy->timeout))
@@ -1521,6 +1524,11 @@
 			break;
 	}
 
+	if (dpolicy->type == DPOLICY_UMOUNT && issued) {
+		__wait_all_discard_cmd(sbi, dpolicy);
+		goto retry;
+	}
+
 	if (!issued && io_interrupted)
 		issued = -1;
 
diff --git a/include/dt-bindings/clock/mdss-7nm-pll-clk.h b/include/dt-bindings/clock/mdss-7nm-pll-clk.h
index bb146d7..d414a8a 100644
--- a/include/dt-bindings/clock/mdss-7nm-pll-clk.h
+++ b/include/dt-bindings/clock/mdss-7nm-pll-clk.h
@@ -30,30 +30,38 @@
 #define POST_VCO_DIV3_5_0_CLK	19
 #define CPHY_PCLK_SRC_MUX_0_CLK	20
 #define CPHY_PCLK_SRC_0_CLK	21
+#define SHADOW_CPHY_BYTECLK_SRC_0_CLK   22
+#define SHADOW_POST_VCO_DIV3_5_0_CLK    23
+#define SHADOW_CPHY_PCLK_SRC_MUX_0_CLK  24
+#define SHADOW_CPHY_PCLK_SRC_0_CLK      25
 
-#define VCO_CLK_1		22
-#define PLL_OUT_DIV_1_CLK	23
-#define BITCLK_SRC_1_CLK	24
-#define BYTECLK_SRC_1_CLK	25
-#define POST_BIT_DIV_1_CLK	26
-#define POST_VCO_DIV_1_CLK	27
-#define BYTECLK_MUX_1_CLK	28
-#define PCLK_SRC_MUX_1_CLK	29
-#define PCLK_SRC_1_CLK		30
-#define PCLK_MUX_1_CLK		31
-#define SHADOW_VCO_CLK_1		32
-#define SHADOW_PLL_OUT_DIV_1_CLK	33
-#define SHADOW_BITCLK_SRC_1_CLK		34
-#define SHADOW_BYTECLK_SRC_1_CLK	35
-#define SHADOW_POST_BIT_DIV_1_CLK	36
-#define SHADOW_POST_VCO_DIV_1_CLK	37
-#define SHADOW_PCLK_SRC_MUX_1_CLK	38
-#define SHADOW_PCLK_SRC_1_CLK		39
+#define VCO_CLK_1		26
+#define PLL_OUT_DIV_1_CLK	27
+#define BITCLK_SRC_1_CLK	28
+#define BYTECLK_SRC_1_CLK	29
+#define POST_BIT_DIV_1_CLK	30
+#define POST_VCO_DIV_1_CLK	31
+#define BYTECLK_MUX_1_CLK	32
+#define PCLK_SRC_MUX_1_CLK	33
+#define PCLK_SRC_1_CLK		34
+#define PCLK_MUX_1_CLK		35
+#define SHADOW_VCO_CLK_1		36
+#define SHADOW_PLL_OUT_DIV_1_CLK	37
+#define SHADOW_BITCLK_SRC_1_CLK		38
+#define SHADOW_BYTECLK_SRC_1_CLK	39
+#define SHADOW_POST_BIT_DIV_1_CLK	40
+#define SHADOW_POST_VCO_DIV_1_CLK	41
+#define SHADOW_PCLK_SRC_MUX_1_CLK	42
+#define SHADOW_PCLK_SRC_1_CLK		43
 /* CPHY clocks for DSI-1 PLL */
-#define CPHY_BYTECLK_SRC_1_CLK	40
-#define POST_VCO_DIV3_5_1_CLK	41
-#define CPHY_PCLK_SRC_MUX_1_CLK	42
-#define CPHY_PCLK_SRC_1_CLK	43
+#define CPHY_BYTECLK_SRC_1_CLK	44
+#define POST_VCO_DIV3_5_1_CLK	45
+#define CPHY_PCLK_SRC_MUX_1_CLK	46
+#define CPHY_PCLK_SRC_1_CLK	47
+#define SHADOW_CPHY_BYTECLK_SRC_1_CLK   48
+#define SHADOW_POST_VCO_DIV3_5_1_CLK    49
+#define SHADOW_CPHY_PCLK_SRC_MUX_1_CLK  50
+#define SHADOW_CPHY_PCLK_SRC_1_CLK      51
 
 
 /* DP PLL clocks */
diff --git a/include/dt-bindings/msm/msm-bus-ids.h b/include/dt-bindings/msm/msm-bus-ids.h
index 835fb0c..4aff0c9 100644
--- a/include/dt-bindings/msm/msm-bus-ids.h
+++ b/include/dt-bindings/msm/msm-bus-ids.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2020, The Linux Foundation. All rights reserved.
  */
 
 #ifndef __MSM_BUS_IDS_H
@@ -699,6 +699,8 @@
 #define	MSM_BUS_SLAVE_ANOC_SNOC 834
 #define	MSM_BUS_SLAVE_GPU_CDSP_BIMC 835
 #define	MSM_BUS_SLAVE_AHB2PHY_2 836
+#define	MSM_BUS_SLAVE_HWKM 837
+#define	MSM_BUS_SLAVE_PKA_WRAPPER 838
 
 #define	MSM_BUS_SLAVE_EBI_CH0_DISPLAY 20512
 #define	MSM_BUS_SLAVE_LLCC_DISPLAY 20513
@@ -1175,4 +1177,6 @@
 #define	ICBID_SLAVE_MAPSS 277
 #define	ICBID_SLAVE_MDSP_MPU_CFG 278
 #define	ICBID_SLAVE_CAMERA_RT_THROTTLE_CFG 279
+#define	ICBID_SLAVE_HWKM 280
+#define	ICBID_SLAVE_PKA_WRAPPER 281
 #endif
diff --git a/include/dt-bindings/msm/power-on.h b/include/dt-bindings/msm/power-on.h
new file mode 100644
index 0000000..c43b89b
--- /dev/null
+++ b/include/dt-bindings/msm/power-on.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2015, 2017, 2019-2020, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_POWER_ON_H__
+#define __MSM_POWER_ON_H__
+
+#define PON_POWER_OFF_RESERVED		0x00
+#define PON_POWER_OFF_WARM_RESET	0x01
+#define PON_POWER_OFF_SHUTDOWN		0x04
+#define PON_POWER_OFF_DVDD_SHUTDOWN	0x05
+#define PON_POWER_OFF_HARD_RESET	0x07
+#define PON_POWER_OFF_DVDD_HARD_RESET	0x08
+#define PON_POWER_OFF_MAX_TYPE		0x10
+
+#endif
diff --git a/include/linux/extcon.h b/include/linux/extcon.h
index c65aa57..3aef2d1 100644
--- a/include/linux/extcon.h
+++ b/include/linux/extcon.h
@@ -201,6 +201,14 @@
 				unsigned int id, unsigned int prop);
 
 /*
+ * Following APIs set array of mutually exclusive.
+ * The 'exclusive' argument indicates the array of mutually exclusive set
+ * of cables that cannot be attached simultaneously.
+ */
+extern int extcon_set_mutually_exclusive(struct extcon_dev *edev,
+				const u32 *exclusive);
+
+/*
  * Following APIs register the notifier block in order to detect
  * the change of both state and property value for each external connector.
  *
diff --git a/include/linux/hwkm.h b/include/linux/hwkm.h
new file mode 100644
index 0000000..4af06b9
--- /dev/null
+++ b/include/linux/hwkm.h
@@ -0,0 +1,306 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __HWKM_H_
+#define __HWKM_H_
+
+#include <stdbool.h>
+#include <stddef.h>
+
+/* Maximum number of bytes in a key used in a KEY_SLOT_RDWR operation */
+#define HWKM_MAX_KEY_SIZE 32
+/* Maximum number of bytes in a SW ctx used in a SYSTEM_KDF operation */
+#define HWKM_MAX_CTX_SIZE 64
+/* Maximum number of bytes in a WKB used in a key wrap or unwrap operation */
+#define HWKM_MAX_BLOB_SIZE 68
+
+
+/* Opcodes to be set in the op field of a command */
+enum hwkm_op {
+	/* Opcode to generate a random key */
+	NIST_KEYGEN = 0,
+	/* Opcode to derive a key */
+	SYSTEM_KDF,
+	/* Used only by HW */
+	QFPROM_KEY_RDWR,
+	/* Opcode to wrap a key and export the wrapped key */
+	KEY_WRAP_EXPORT,
+	/*
+	 * Opcode to import a wrapped key and unwrap it in the
+	 * specified key slot
+	 */
+	KEY_UNWRAP_IMPORT,
+	/* Opcode to clear a slot */
+	KEY_SLOT_CLEAR,
+	/* Opcode to read or write a key from/to a slot */
+	KEY_SLOT_RDWR,
+	/*
+	 * Opcode to broadcast a TPKEY to all slaves configured
+	 * to receive a TPKEY.
+	 */
+	SET_TPKEY,
+
+
+	HWKM_MAX_OP,
+	HWKM_UNDEF_OP = 0xFF
+};
+
+/*
+ * Algorithm values which can be used in the alg_allowed field of the
+ * key policy.
+ */
+enum hwkm_alg {
+	AES128_ECB = 0,
+	AES256_ECB = 1,
+	DES_ECB = 2,
+	TDES_ECB = 3,
+	AES128_CBC = 4,
+	AES256_CBC = 5,
+	DES_CBC = 6,
+	TDES_CBC = 7,
+	AES128_CCM_TC = 8,
+	AES128_CCM_NTC = 9,
+	AES256_CCM_TC = 10,
+	AES256_CCM_NTC = 11,
+	AES256_SIV = 12,
+	AES128_CTR = 13,
+	AES256_CTR = 14,
+	AES128_XTS = 15,
+	AES256_XTS = 16,
+	SHA1_HMAC = 17,
+	SHA256_HMAC = 18,
+	AES128_CMAC = 19,
+	AES256_CMAC = 20,
+	SHA384_HMAC = 21,
+	SHA512_HMAC = 22,
+	AES128_GCM = 23,
+	AES256_GCM = 24,
+	KASUMI = 25,
+	SNOW3G = 26,
+	ZUC = 27,
+	PRINCE = 28,
+	SIPHASH = 29,
+	QARMA64 = 30,
+	QARMA128 = 31,
+
+	HWKM_ALG_MAX,
+
+	HWKM_UNDEF_ALG = 0xFF
+};
+
+/* Key type values which can be used in the key_type field of the key policy */
+enum hwkm_type {
+	KEY_DERIVATION_KEY = 0,
+	KEY_WRAPPING_KEY = 1,
+	KEY_SWAPPING_KEY = 2,
+	TRANSPORT_KEY = 3,
+	GENERIC_KEY = 4,
+
+	HWKM_TYPE_MAX,
+
+	HWKM_UNDEF_KEY_TYPE = 0xFF
+};
+
+/* Destinations which a context can use */
+enum hwkm_destination {
+	KM_MASTER = 0,
+	GPCE_SLAVE = 1,
+	MCE_SLAVE = 2,
+	PIMEM_SLAVE = 3,
+	ICE0_SLAVE = 4,
+	ICE1_SLAVE = 5,
+	ICE2_SLAVE = 6,
+	ICE3_SLAVE = 7,
+	DP0_HDCP_SLAVE = 8,
+	DP1_HDCP_SLAVE = 9,
+	ICEMEM_SLAVE = 10,
+
+	HWKM_DESTINATION_MAX,
+
+	HWKM_UNDEF_DESTINATION = 0xFF
+};
+
+/*
+ * Key security levels which can be set in the security_lvl field of
+ * key policy.
+ */
+enum hwkm_security_level {
+	/* Can be read by SW in plaintext using KEY_SLOT_RDWR cmd. */
+	SW_KEY = 0,
+	/* Usable by SW, but not readable in plaintext. */
+	MANAGED_KEY = 1,
+	/* Not usable by SW. */
+	HW_KEY = 2,
+
+	HWKM_SECURITY_LEVEL_MAX,
+
+	HWKM_UNDEF_SECURITY_LEVEL = 0xFF
+};
+
+struct hwkm_key_policy {
+	bool km_by_spu_allowed;
+	bool km_by_modem_allowed;
+	bool km_by_nsec_allowed;
+	bool km_by_tz_allowed;
+
+	enum hwkm_alg alg_allowed;
+
+	bool enc_allowed;
+	bool dec_allowed;
+
+	enum hwkm_type key_type;
+	u8 kdf_depth;
+
+	bool wrap_export_allowed;
+	bool swap_export_allowed;
+
+	enum hwkm_security_level security_lvl;
+
+	enum hwkm_destination hw_destination;
+
+	bool wrap_with_tpk_allowed;
+};
+
+struct hwkm_bsve {
+	bool enabled;
+	bool km_key_policy_ver_en;
+	bool km_apps_secure_en;
+	bool km_msa_secure_en;
+	bool km_lcm_fuse_en;
+	bool km_boot_stage_otp_en;
+	bool km_swc_en;
+	bool km_child_key_policy_en;
+	bool km_mks_en;
+	u64 km_fuse_region_sha_digest_en;
+};
+
+struct hwkm_keygen_cmd {
+	u8 dks;					/* Destination Key Slot */
+	struct hwkm_key_policy policy;		/* Key policy */
+};
+
+struct hwkm_rdwr_cmd {
+	uint8_t slot;			/* Key Slot */
+	bool is_write;			/* Write or read op */
+	struct hwkm_key_policy policy;	/* Key policy for write */
+	uint8_t key[HWKM_MAX_KEY_SIZE];	/* Key for write */
+	size_t sz;			/* Length of key in bytes */
+};
+
+struct hwkm_kdf_cmd {
+	uint8_t dks;			/* Destination Key Slot */
+	uint8_t kdk;			/* Key Derivation Key Slot */
+	uint8_t mks;			/* Mixing key slot (bsve controlled) */
+	struct hwkm_key_policy policy;	/* Key policy. */
+	struct hwkm_bsve bsve;		/* Binding state vector */
+	uint8_t ctx[HWKM_MAX_CTX_SIZE];	/* Context */
+	size_t sz;			/* Length of context in bytes */
+};
+
+struct hwkm_set_tpkey_cmd {
+	uint8_t sks;			/* The slot to use as the TPKEY */
+};
+
+struct hwkm_unwrap_cmd {
+	uint8_t dks;			/* Destination Key Slot */
+	uint8_t kwk;			/* Key Wrapping Key Slot */
+	uint8_t wkb[HWKM_MAX_BLOB_SIZE];/* Wrapped Key Blob */
+	uint8_t sz;			/* Length of WKB in bytes */
+};
+
+struct hwkm_wrap_cmd {
+	uint8_t sks;			/* Destination Key Slot */
+	uint8_t kwk;			/* Key Wrapping Key Slot */
+	struct hwkm_bsve bsve;		/* Binding state vector */
+};
+
+struct hwkm_clear_cmd {
+	uint8_t dks;			/* Destination key slot */
+	bool is_double_key;		/* Whether this is a double key */
+};
+
+
+struct hwkm_cmd {
+	enum hwkm_op op;		/* Operation */
+	union /* Structs with opcode specific parameters */
+	{
+		struct hwkm_keygen_cmd keygen;
+		struct hwkm_rdwr_cmd rdwr;
+		struct hwkm_kdf_cmd kdf;
+		struct hwkm_set_tpkey_cmd set_tpkey;
+		struct hwkm_unwrap_cmd unwrap;
+		struct hwkm_wrap_cmd wrap;
+		struct hwkm_clear_cmd clear;
+	};
+};
+
+struct hwkm_rdwr_rsp {
+	struct hwkm_key_policy policy;	/* Key policy for read */
+	uint8_t key[HWKM_MAX_KEY_SIZE];	/* Only available for read op */
+	size_t sz;			/* Length of the key (bytes) */
+};
+
+struct hwkm_wrap_rsp {
+	uint8_t wkb[HWKM_MAX_BLOB_SIZE];	/* Wrapping key blob */
+	size_t sz;				/* key blob len (bytes) */
+};
+
+struct hwkm_rsp {
+	u32 status;
+	union /* Structs with opcode specific outputs */
+	{
+		struct hwkm_rdwr_rsp rdwr;
+		struct hwkm_wrap_rsp wrap;
+	};
+};
+
+enum hwkm_master_key_slots {
+	/** L1 KDKs. Not usable by SW. Used by HW to derive L2 KDKs */
+	NKDK_L1 = 0,
+	PKDK_L1 = 1,
+	SKDK_L1 = 2,
+	UKDK_L1 = 3,
+
+	/*
+	 * L2 KDKs, used to derive keys by SW.
+	 * Cannot be used for crypto, only key derivation
+	 */
+	TZ_NKDK_L2 = 4,
+	TZ_PKDK_L2 = 5,
+	TZ_SKDK_L2 = 6,
+	MODEM_PKDK_L2 = 7,
+	MODEM_SKDK_L2 = 8,
+	TZ_UKDK_L2 = 9,
+
+	/** Slots reserved for TPKEY */
+	TPKEY_EVEN_SLOT = 10,
+	TPKEY_KEY_ODD_SLOT = 11,
+
+	/** First key slot available for general purpose use cases */
+	MASTER_GENERIC_SLOTS_START,
+
+	UNDEF_SLOT = 0xFF
+};
+
+#if IS_ENABLED(CONFIG_QTI_HW_KEY_MANAGER)
+int qti_hwkm_handle_cmd(struct hwkm_cmd *cmd, struct hwkm_rsp *rsp);
+int qti_hwkm_clocks(bool on);
+int qti_hwkm_init(void);
+#else
+static inline int qti_hwkm_add_req(struct hwkm_cmd *cmd,
+				   struct hwkm_rsp *rsp)
+{
+	return -EOPNOTSUPP;
+}
+static inline int qti_hwkm_clocks(bool on)
+{
+	return -EOPNOTSUPP;
+}
+static inline int qti_hwkm_init(void)
+{
+	return -EOPNOTSUPP;
+}
+#endif /* CONFIG_QTI_HW_KEY_MANAGER */
+#endif /* __HWKM_H_ */
diff --git a/include/linux/iio/consumer.h b/include/linux/iio/consumer.h
index 9887f4f..11e95d9 100644
--- a/include/linux/iio/consumer.h
+++ b/include/linux/iio/consumer.h
@@ -289,6 +289,15 @@
  */
 int iio_read_avail_channel_raw(struct iio_channel *chan,
 			       const int **vals, int *length);
+/**
+ * iio_write_channel_processed() - write to a given channel
+ * @chan:		The channel being queried.
+ * @val:		Value being written.
+ *
+ * Note processed writes to iio channels are converted to raw
+ * values before being written.
+ */
+int iio_write_channel_processed(struct iio_channel *chan, int val);
 
 /**
  * iio_get_channel_type() - get the type of a channel
diff --git a/include/linux/msm_kgsl.h b/include/linux/msm_kgsl.h
index a527a0a..c84db40 100644
--- a/include/linux/msm_kgsl.h
+++ b/include/linux/msm_kgsl.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018, 2020 The Linux Foundation. All rights reserved.
  */
 #ifndef _MSM_KGSL_H
 #define _MSM_KGSL_H
@@ -11,6 +11,7 @@
 void *kgsl_pwr_limits_add(u32 id);
 void kgsl_pwr_limits_del(void *limit);
 int kgsl_pwr_limits_set_freq(void *limit, unsigned int freq);
+int kgsl_pwr_limits_set_gpu_fmax(void *limit, unsigned int freq);
 void kgsl_pwr_limits_set_default(void *limit);
 unsigned int kgsl_pwr_limits_get_freq(u32 id);
 
diff --git a/include/linux/platform_data/msm_serial_hs.h b/include/linux/platform_data/msm_serial_hs.h
new file mode 100644
index 0000000..b2150a7
--- /dev/null
+++ b/include/linux/platform_data/msm_serial_hs.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2008 Google, Inc.
+ * Copyright (C) 2010-2018, 2020, The Linux Foundation. All rights reserved.
+ * Author: Nick Pelly <npelly@google.com>
+ */
+
+#ifndef __ASM_ARCH_MSM_SERIAL_HS_H
+#define __ASM_ARCH_MSM_SERIAL_HS_H
+
+#include <linux/serial_core.h>
+
+/**
+ * struct msm_serial_hs_platform_data - platform device data
+ *					for msm hsuart device
+ * @wakeup_irq : IRQ line to be configured as Wakeup source.
+ * @inject_rx_on_wakeup : Set 1 if specific character to be inserted on wakeup
+ * @rx_to_inject : Character to be inserted on wakeup
+ * @gpio_config : Configure gpios that are used for uart communication
+ * @userid : User-defined number to be used to enumerate device as tty<userid>
+ * @uart_tx_gpio: GPIO number for UART Tx Line.
+ * @uart_rx_gpio: GPIO number for UART Rx Line.
+ * @uart_cts_gpio: GPIO number for UART CTS Line.
+ * @uart_rfr_gpio: GPIO number for UART RFR Line.
+ * @bam_tx_ep_pipe_index : BAM TX Endpoint Pipe Index for HSUART
+ * @bam_tx_ep_pipe_index : BAM RX Endpoint Pipe Index for HSUART
+ * @no_suspend_delay : Flag used to make system go to suspend
+ * immediately or not
+ * @obs: Flag to enable out of band sleep feature support
+ */
+struct msm_serial_hs_platform_data {
+	int wakeup_irq;  /* wakeup irq */
+	bool inject_rx_on_wakeup;
+	u8 rx_to_inject;
+	int (*gpio_config)(int gpio_config);
+	int userid;
+
+	int uart_tx_gpio;
+	int uart_rx_gpio;
+	int uart_cts_gpio;
+	int uart_rfr_gpio;
+	unsigned int bam_tx_ep_pipe_index;
+	unsigned int bam_rx_ep_pipe_index;
+	bool no_suspend_delay;
+	bool obs;
+};
+
+/* return true when tx is empty */
+unsigned int msm_hs_tx_empty(struct uart_port *uport);
+int msm_hs_request_clock_off(struct uart_port *uport);
+int msm_hs_request_clock_on(struct uart_port *uport);
+struct uart_port *msm_hs_get_uart_port(int port_index);
+void msm_hs_set_mctrl(struct uart_port *uport,
+				    unsigned int mctrl);
+#endif
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index f3f7605..775e63e 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -244,6 +244,7 @@
 
 int regulator_count_voltages(struct regulator *regulator);
 int regulator_list_voltage(struct regulator *regulator, unsigned selector);
+int regulator_list_corner_voltage(struct regulator *regulator, int corner);
 int regulator_is_supported_voltage(struct regulator *regulator,
 				   int min_uV, int max_uV);
 unsigned int regulator_get_linear_step(struct regulator *regulator);
@@ -579,6 +580,11 @@
 	return -EINVAL;
 }
 
+static inline int regulator_list_corner_voltage(struct regulator *regulator,
+	int corner)
+{
+	return -EINVAL;
+}
 #endif
 
 static inline int regulator_set_voltage_triplet(struct regulator *regulator,
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index 71756e6..7ae7dc3 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -94,6 +94,10 @@
  *	if the selector indicates a voltage that is unusable on this system;
  *	or negative errno.  Selectors range from zero to one less than
  *	regulator_desc.n_voltages.  Voltages may be reported in any order.
+ * @list_corner_voltage: Return the maximum voltage in microvolts that
+ *	that can be physically configured for the regulator when operating at
+ *	the specified voltage corner or a negative errno if the corner value
+ *	can't be used on this system.
  *
  * @set_current_limit: Configure a limit for a current-limited regulator.
  *                     The driver should select the current closest to max_uA.
@@ -150,6 +154,7 @@
 
 	/* enumerate supported voltages */
 	int (*list_voltage) (struct regulator_dev *, unsigned selector);
+	int (*list_corner_voltage)(struct regulator_dev *list_reg, int corner);
 
 	/* get/set regulator voltage */
 	int (*set_voltage) (struct regulator_dev *, int min_uV, int max_uV,
diff --git a/include/linux/regulator/spm-regulator.h b/include/linux/regulator/spm-regulator.h
new file mode 100644
index 0000000..c1eaee6
--- /dev/null
+++ b/include/linux/regulator/spm-regulator.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2013-2014, 2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _LINUX_REGULATOR_SPM_H
+#define _LINUX_REGULATOR_SPM_H
+
+#include <linux/err.h>
+#include <linux/init.h>
+
+#ifdef CONFIG_REGULATOR_SPM
+int __init spm_regulator_init(void);
+#else
+static inline int __init spm_regulator_init(void) { return -ENODEV; }
+#endif
+
+#endif
diff --git a/include/linux/spi/qcom-spi.h b/include/linux/spi/qcom-spi.h
new file mode 100644
index 0000000..1888fe5
--- /dev/null
+++ b/include/linux/spi/qcom-spi.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2014-2018, 2020, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+/*
+ * SPI driver for Qualcomm Technologies, Inc. MSM platforms.
+ */
+
+/**
+ * msm_spi_platform_data: msm spi-controller's configuration data
+ *
+ * @max_clock_speed max spi clock speed
+ * @active_only when set, votes when system active and removes the vote when
+ *       system goes idle (optimises for performance). When unset, voting using
+ *       runtime pm (optimizes for power).
+ * @master_id master id number of the controller's wrapper (BLSP or GSBI).
+ *       When zero, clock path voting is disabled.
+ * @gpio_config pointer to function for configuring gpio
+ * @gpio_release pointer to function for releasing gpio pins
+ * @dma_config function poniter for configuring dma engine
+ * @pm_lat power management latency
+ * @infinite_mode use FIFO mode in infinite mode
+ * @ver_reg_exists if the version register exists
+ * @use_beam true if BAM is available
+ * @bam_consumer_pipe_index BAM conusmer pipe
+ * @bam_producer_pipe_index BAM producer pipe
+ * @rt_priority true if RT thread
+ * @use_pinctrl true if pinctrl library is used
+ * @is_shared true when qup is shared between ee's
+ */
+struct msm_spi_platform_data {
+	u32 max_clock_speed;
+	u32  master_id;
+	u32 bus_width;
+	int (*gpio_config)(void);
+	void (*gpio_release)(void);
+	int (*dma_config)(void);
+	const char *rsl_id;
+	u32  pm_lat;
+	u32  infinite_mode;
+	bool ver_reg_exists;
+	bool use_bam;
+	u32  bam_consumer_pipe_index;
+	u32  bam_producer_pipe_index;
+	bool rt_priority;
+	bool use_pinctrl;
+	bool is_shared;
+};
diff --git a/include/soc/qcom/icnss2.h b/include/soc/qcom/icnss2.h
index 64128de..bb75490 100644
--- a/include/soc/qcom/icnss2.h
+++ b/include/soc/qcom/icnss2.h
@@ -167,4 +167,7 @@
 extern int icnss_qmi_send(struct device *dev, int type, void *cmd,
 			  int cmd_len, void *cb_ctx,
 			  int (*cb)(void *ctx, void *event, int event_len));
+extern int icnss_force_wake_request(struct device *dev);
+extern int icnss_force_wake_release(struct device *dev);
+extern int icnss_is_device_awake(struct device *dev);
 #endif /* _ICNSS_WLAN_H_ */
diff --git a/include/soc/qcom/mpm.h b/include/soc/qcom/mpm.h
index 43bed05..2360335 100644
--- a/include/soc/qcom/mpm.h
+++ b/include/soc/qcom/mpm.h
@@ -16,4 +16,5 @@
 
 extern const struct mpm_pin mpm_bengal_gic_chip_data[];
 extern const struct mpm_pin mpm_scuba_gic_chip_data[];
+extern const struct mpm_pin mpm_sdm660_gic_chip_data[];
 #endif /* __QCOM_MPM_H__ */
diff --git a/include/soc/qcom/qseecomi.h b/include/soc/qcom/qseecomi.h
index b85a3f2..a4b30bd 100644
--- a/include/soc/qcom/qseecomi.h
+++ b/include/soc/qcom/qseecomi.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2020, The Linux Foundation. All rights reserved.
  */
 
 #ifndef __QSEECOMI_H_
@@ -23,6 +23,8 @@
 #define QSEOS_RESULT_FAIL_MAX_ATTEMPT         -72
 #define QSEOS_RESULT_FAIL_PENDING_OPERATION   -73
 
+#define SMCINVOKE_RESULT_INBOUND_REQ_NEEDED	3
+
 enum qseecom_command_scm_resp_type {
 	QSEOS_APP_ID = 0xEE01,
 	QSEOS_LISTENER_ID
@@ -77,6 +79,7 @@
 	QSEOS_RESULT_SUCCESS = 0,
 	QSEOS_RESULT_INCOMPLETE,
 	QSEOS_RESULT_BLOCKED_ON_LISTENER,
+	QSEOS_RESULT_CBACK_REQUEST,
 	QSEOS_RESULT_FAILURE  = 0xFFFFFFFF
 };
 
diff --git a/include/soc/qcom/spm.h b/include/soc/qcom/spm.h
new file mode 100644
index 0000000..9978bba
--- /dev/null
+++ b/include/soc/qcom/spm.h
@@ -0,0 +1,181 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2010-2017, 2020, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_SPM_H
+#define __ARCH_ARM_MACH_MSM_SPM_H
+
+enum {
+	MSM_SPM_MODE_DISABLED,
+	MSM_SPM_MODE_CLOCK_GATING,
+	MSM_SPM_MODE_RETENTION,
+	MSM_SPM_MODE_GDHS,
+	MSM_SPM_MODE_POWER_COLLAPSE,
+	MSM_SPM_MODE_STANDALONE_POWER_COLLAPSE,
+	MSM_SPM_MODE_FASTPC,
+	MSM_SPM_MODE_NR
+};
+
+enum msm_spm_avs_irq {
+	MSM_SPM_AVS_IRQ_MIN,
+	MSM_SPM_AVS_IRQ_MAX,
+};
+
+struct msm_spm_device;
+struct device_node;
+
+#if defined(CONFIG_MSM_SPM)
+
+int msm_spm_set_low_power_mode(unsigned int mode, bool notify_rpm);
+void msm_spm_set_rpm_hs(bool allow_rpm_hs);
+int msm_spm_probe_done(void);
+int msm_spm_set_vdd(unsigned int cpu, unsigned int vlevel);
+int msm_spm_get_vdd(unsigned int cpu);
+int msm_spm_turn_on_cpu_rail(struct device_node *l2ccc_node,
+		unsigned int val, int cpu, int vctl_offset);
+struct msm_spm_device *msm_spm_get_device_by_name(const char *name);
+int msm_spm_config_low_power_mode(struct msm_spm_device *dev,
+		unsigned int mode, bool notify_rpm);
+int msm_spm_config_low_power_mode_addr(struct msm_spm_device *dev,
+		unsigned int mode, bool notify_rpm);
+int msm_spm_device_init(void);
+bool msm_spm_is_mode_avail(unsigned int mode);
+void msm_spm_dump_regs(unsigned int cpu);
+int msm_spm_is_avs_enabled(unsigned int cpu);
+int msm_spm_avs_enable(unsigned int cpu);
+int msm_spm_avs_disable(unsigned int cpu);
+int msm_spm_avs_set_limit(unsigned int cpu, uint32_t min_lvl,
+		uint32_t max_lvl);
+int msm_spm_avs_enable_irq(unsigned int cpu, enum msm_spm_avs_irq irq);
+int msm_spm_avs_disable_irq(unsigned int cpu, enum msm_spm_avs_irq irq);
+int msm_spm_avs_clear_irq(unsigned int cpu, enum msm_spm_avs_irq irq);
+
+#if defined(CONFIG_MSM_L2_SPM)
+
+/* Public functions */
+
+int msm_spm_apcs_set_phase(int cpu, unsigned int phase_cnt);
+int msm_spm_enable_fts_lpm(int cpu, uint32_t mode);
+
+#else
+
+static inline int msm_spm_apcs_set_phase(int cpu, unsigned int phase_cnt)
+{
+	return -ENODEV;
+}
+
+static inline int msm_spm_enable_fts_lpm(int cpu, uint32_t mode)
+{
+	return -ENODEV;
+}
+#endif /* defined(CONFIG_MSM_L2_SPM) */
+#else /* defined(CONFIG_MSM_SPM) */
+static inline int msm_spm_set_low_power_mode(unsigned int mode, bool notify_rpm)
+{
+	return -ENODEV;
+}
+
+static inline void msm_spm_set_rpm_hs(bool allow_rpm_hs) {}
+
+static inline int msm_spm_probe_done(void)
+{
+	return -ENODEV;
+}
+
+static inline int msm_spm_set_vdd(unsigned int cpu, unsigned int vlevel)
+{
+	return -ENODEV;
+}
+
+static inline int msm_spm_get_vdd(unsigned int cpu)
+{
+	return 0;
+}
+
+static inline int msm_spm_turn_on_cpu_rail(struct device_node *l2ccc_node,
+		unsigned int val, int cpu, int vctl_offset)
+{
+	return -ENODEV;
+}
+
+static inline int msm_spm_device_init(void)
+{
+	return -ENODEV;
+}
+
+static inline void msm_spm_dump_regs(unsigned int cpu)
+{ }
+
+static inline int msm_spm_config_low_power_mode(struct msm_spm_device *dev,
+		unsigned int mode, bool notify_rpm)
+{
+	return -ENODEV;
+}
+
+static inline int msm_spm_config_low_power_mode_addr(
+	struct msm_spm_device *dev, unsigned int mode, bool notify_rpm)
+{
+	return -ENODEV;
+}
+
+static inline struct msm_spm_device *msm_spm_get_device_by_name(
+				const char *name)
+{
+	return NULL;
+}
+
+static inline bool msm_spm_is_mode_avail(unsigned int mode)
+{
+	return false;
+}
+
+static inline int msm_spm_is_avs_enabled(unsigned int cpu)
+{
+	return -ENODEV;
+}
+
+static inline int msm_spm_avs_enable(unsigned int cpu)
+{
+	return -ENODEV;
+}
+
+static inline int msm_spm_avs_disable(unsigned int cpu)
+{
+	return -ENODEV;
+}
+
+static inline int msm_spm_avs_set_limit(unsigned int cpu, uint32_t min_lvl,
+		uint32_t max_lvl)
+{
+	return -ENODEV;
+}
+
+static inline int msm_spm_avs_enable_irq(unsigned int cpu,
+		enum msm_spm_avs_irq irq)
+{
+	return -ENODEV;
+}
+
+static inline int msm_spm_avs_disable_irq(unsigned int cpu,
+		enum msm_spm_avs_irq irq)
+{
+	return -ENODEV;
+}
+
+static inline int msm_spm_avs_clear_irq(unsigned int cpu,
+		enum msm_spm_avs_irq irq)
+{
+	return -ENODEV;
+}
+
+#endif  /* defined (CONFIG_MSM_SPM) */
+#endif  /* __ARCH_ARM_MACH_MSM_SPM_H */
diff --git a/include/uapi/linux/taskstats.h b/include/uapi/linux/taskstats.h
index 125b5a9..ac1e188 100644
--- a/include/uapi/linux/taskstats.h
+++ b/include/uapi/linux/taskstats.h
@@ -35,7 +35,7 @@
 
 
 #define TASKSTATS_VERSION	9
-#define TASKSTATS2_VERSION	1
+#define TASKSTATS2_VERSION	2
 #define TS_COMM_LEN		32	/* should be >= TASK_COMM_LEN
 					 * in linux/sched.h */
 
@@ -181,6 +181,20 @@
 	__u64 shmem_rss;	/* KB */
 	__u64 unreclaimable;	/* KB */
 	/* version 1 ends here */
+
+	/* version 2 begins here */
+	__u64	utime;		/* User CPU time [usec] */
+	__u64	stime;		/* System CPU time [usec] */
+	__u64	cutime;		/* Cumulative User CPU time [usec] */
+	__u64	cstime;		/* Cumulative System CPU time [usec] */
+
+	__u32	uid __attribute__((aligned(8)));
+					/* User ID */
+	__u32	ppid;  /* Parent process ID */
+	char	name[TS_COMM_LEN];  /* Command name */
+	char    state[TS_COMM_LEN]; /* Process state */
+	/* version 2 ends here*/
+
 };
 
 /*
diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h
index f605007..7369a22 100644
--- a/include/uapi/linux/v4l2-controls.h
+++ b/include/uapi/linux/v4l2-controls.h
@@ -1003,6 +1003,9 @@
 	V4L2_CID_MPEG_VIDC_VIDEO_ROI_TYPE_2BYTE = 2,
 };
 
+#define V4L2_CID_MPEG_VIDC_VIDEO_LOWLATENCY_HINT \
+	(V4L2_CID_MPEG_MSM_VIDC_BASE + 133)
+
 /*  Camera class control IDs */
 
 #define V4L2_CID_CAMERA_CLASS_BASE	(V4L2_CTRL_CLASS_CAMERA | 0x900)
diff --git a/kernel/taskstats.c b/kernel/taskstats.c
index d5b5f75..4b0c22d 100644
--- a/kernel/taskstats.c
+++ b/kernel/taskstats.c
@@ -650,6 +650,11 @@
 	size_t size;
 	u32 pid;
 	int rc;
+	u64 utime, stime;
+	const struct cred *tcred;
+	struct cgroup_subsys_state *css;
+	unsigned long flags;
+	struct signal_struct *sig;
 
 	size = nla_total_size_64bit(sizeof(struct taskstats2));
 
@@ -691,6 +696,35 @@
 #undef K
 		task_unlock(p);
 	}
+
+	/* version 2 fields begin here */
+	task_cputime(tsk, &utime, &stime);
+	stats->utime = div_u64(utime, NSEC_PER_USEC);
+	stats->stime = div_u64(stime, NSEC_PER_USEC);
+
+	if (lock_task_sighand(tsk, &flags)) {
+		sig = tsk->signal;
+		stats->cutime = sig->cutime;
+		stats->cstime = sig->cstime;
+		unlock_task_sighand(tsk, &flags);
+	}
+
+	rcu_read_lock();
+	tcred = __task_cred(tsk);
+	stats->uid = from_kuid_munged(current_user_ns(), tcred->uid);
+	stats->ppid = pid_alive(tsk) ?
+		task_tgid_nr_ns(rcu_dereference(tsk->real_parent),
+			task_active_pid_ns(current)) : 0;
+	rcu_read_unlock();
+
+	strlcpy(stats->name, tsk->comm, sizeof(stats->name));
+
+	css = task_get_css(tsk, cpuset_cgrp_id);
+	cgroup_path_ns(css->cgroup, stats->state, sizeof(stats->state),
+				current->nsproxy->cgroup_ns);
+	css_put(css);
+	/* version 2 fields end here */
+
 	put_task_struct(tsk);
 
 	return send_reply(rep_skb, info);
diff --git a/sound/usb/usb_audio_qmi_svc.c b/sound/usb/usb_audio_qmi_svc.c
index 5d4758c..5b38bca 100644
--- a/sound/usb/usb_audio_qmi_svc.c
+++ b/sound/usb/usb_audio_qmi_svc.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/module.h>
@@ -31,6 +31,7 @@
 #define BUS_INTERVAL_FULL_SPEED 1000 /* in us */
 #define BUS_INTERVAL_HIGHSPEED_AND_ABOVE 125 /* in us */
 #define MAX_BINTERVAL_ISOC_EP 16
+#define DEV_RELEASE_WAIT_TIMEOUT 10000 /* in ms */
 
 #define SND_PCM_CARD_NUM_MASK 0xffff0000
 #define SND_PCM_DEV_NUM_MASK 0xff00
@@ -890,12 +891,14 @@
 		if (ret < 0)
 			uaudio_err("qmi send failed with err: %d\n", ret);
 
-		ret = wait_event_interruptible(dev->disconnect_wq,
-				!atomic_read(&dev->in_use));
-		if (ret < 0) {
-			uaudio_dbg("failed with ret %d\n", ret);
-			return;
-		}
+		ret = wait_event_interruptible_timeout(dev->disconnect_wq,
+				!atomic_read(&dev->in_use),
+				msecs_to_jiffies(DEV_RELEASE_WAIT_TIMEOUT));
+		if (!ret)
+			uaudio_err("timeout while waiting for dev_release\n");
+		else if (ret < 0)
+			uaudio_err("failed with ret %d\n", ret);
+
 		mutex_lock(&chip->dev_lock);
 	}
 
@@ -1154,13 +1157,17 @@
 
 response:
 	if (!req_msg->enable && ret != -EINVAL) {
-		if (info_idx >= 0) {
-			mutex_lock(&chip->dev_lock);
-			info = &uadev[pcm_card_num].info[info_idx];
-			uaudio_dev_intf_cleanup(uadev[pcm_card_num].udev, info);
-			uaudio_dbg("release resources: intf# %d card# %d\n",
-					subs->interface, pcm_card_num);
-			mutex_unlock(&chip->dev_lock);
+		if (ret != -ENODEV) {
+			if (info_idx >= 0) {
+				mutex_lock(&chip->dev_lock);
+				info = &uadev[pcm_card_num].info[info_idx];
+				uaudio_dev_intf_cleanup(
+						uadev[pcm_card_num].udev,
+						info);
+				uaudio_dbg("release resources: intf# %d card# %d\n",
+						subs->interface, pcm_card_num);
+				mutex_unlock(&chip->dev_lock);
+			}
 		}
 		if (atomic_read(&uadev[pcm_card_num].in_use))
 			kref_put(&uadev[pcm_card_num].kref,